text
stringlengths 4
1.02M
| meta
dict |
---|---|
from __future__ import print_function
import unittest
from lunisolar import ChineseDate
class LunarDateTestCases(unittest.TestCase):
def setUp(self):
self.moon_landing = ChineseDate.from_gregorian(1969, 7, 20)
self.july_fourth = ChineseDate.from_chinese(2009, 5, 12, True)
def test_construction_from_gregorian(self):
moon_landing_gdate = self.moon_landing.gregorian_date
self.assertEqual(moon_landing_gdate.year, 1969)
self.assertEqual(moon_landing_gdate.month, 7)
self.assertEqual(moon_landing_gdate.day, 20)
moon_landing_cdate = self.moon_landing.chinese_date
self.assertEqual(moon_landing_cdate.year, 1969)
self.assertEqual(moon_landing_cdate.month, 6)
self.assertEqual(moon_landing_cdate.day, 7)
self.assertEqual(moon_landing_cdate.is_leap_month, False)
def test_construction_from_chinese(self):
july_fourth_gdate = self.july_fourth.gregorian_date
self.assertEqual(july_fourth_gdate.year, 2009)
self.assertEqual(july_fourth_gdate.month, 7)
self.assertEqual(july_fourth_gdate.day, 4)
july_fourth_cdate = self.july_fourth.chinese_date
self.assertEqual(july_fourth_cdate.year, 2009)
self.assertEqual(july_fourth_cdate.month, 5)
self.assertEqual(july_fourth_cdate.day, 12)
self.assertEqual(july_fourth_cdate.is_leap_month, True)
def test_properties(self):
moon_landing = self.moon_landing
self.assertEqual(moon_landing.year, 1969)
self.assertEqual(moon_landing.month, 6)
self.assertEqual(moon_landing.day, 7)
self.assertEqual(moon_landing.is_leap_month, False)
def test_heavenly_stem(self):
self.assertEqual(self.moon_landing._stem, 5)
self.assertEqual(self.july_fourth._stem, 5)
print("test_heavenly_stem passed!")
def test_earthly_branch(self):
self.assertEqual(self.moon_landing._branch, 9)
self.assertEqual(self.july_fourth._branch, 1)
print("test_earthly_branch passed!")
def test_compare_chinese_date_with_builtin_date(self):
from datetime import date
old_date = date(1969, 7, 19)
self.assertTrue(old_date < self.moon_landing)
self.assertFalse(old_date > self.moon_landing)
self.assertTrue(old_date <= self.moon_landing)
self.assertFalse(old_date >= self.moon_landing)
self.assertFalse(old_date == self.moon_landing)
self.assertTrue(old_date != self.moon_landing)
self.assertFalse(self.moon_landing < old_date)
self.assertTrue(self.moon_landing > old_date)
self.assertFalse(self.moon_landing <= old_date)
self.assertTrue(self.moon_landing >= old_date)
self.assertFalse(self.moon_landing == old_date)
self.assertTrue(self.moon_landing != old_date)
def test_compare_chinese_date_with_chinese_date(self):
earlier = self.moon_landing
later = self.july_fourth
self.assertTrue(earlier < later)
self.assertFalse(earlier > later)
self.assertTrue(earlier <= later)
self.assertFalse(earlier >= later)
self.assertFalse(earlier == later)
self.assertTrue(earlier != later)
self.assertFalse(later < earlier)
self.assertTrue(later > earlier)
self.assertFalse(later <= earlier)
self.assertTrue(later >= earlier)
self.assertFalse(later == earlier)
self.assertTrue(later != earlier)
def test_show_full_zodiac_name(self):
self.assertEqual(self.moon_landing.show_zodiac_full(),
"Year of the Earth Rooster")
self.assertEqual(self.july_fourth.show_zodiac_full(show_element=False),
"Year of the Ox")
| {
"content_hash": "21d51ca9697de892a753f595817c7e74",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 79,
"avg_line_length": 41.71111111111111,
"alnum_prop": 0.6672882258923815,
"repo_name": "yen223/lunisolar",
"id": "fc98b6afe8d53d179f0d7102ff54ee941a1c2eb8",
"size": "3754",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/test_chinese_calendar.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "213493"
},
{
"name": "Ruby",
"bytes": "3789"
}
],
"symlink_target": ""
} |
class Pure:
def __init__(self):
pass
@staticmethod
def filter(node):
new_node = node.copy(lambda x: x.pure())
return new_node.before([]).after([]) | {
"content_hash": "f3b62ed96dc113a11e504d6e88f9c9f3",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 48,
"avg_line_length": 22.875,
"alnum_prop": 0.546448087431694,
"repo_name": "diNard/Saw",
"id": "87e6218cd053e35c12ae547672ef25b914732b27",
"size": "183",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "saw/filters/pure.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "54587"
}
],
"symlink_target": ""
} |
class Prime:
"""Provides methods to obtain prime numbers and use them."""
def __init__(self):
pass
# Cache for already calculated prime numbers
cache = [2, 3]
@staticmethod
def get_prime(index):
"""
Returns the prime number at the given index. The index starts with 0.
:param index: The index of the requested prime number.
:return: The prime number at position index.
"""
while len(Prime.cache) < index + 1:
Prime.append_next_to_cache()
return Prime.cache[index]
@staticmethod
def append_next_to_cache():
"""
Calculates the next prime number which is not in the cache.
:return: The added prime number.
"""
num = Prime.cache[len(Prime.cache) - 1] + 2
while True:
flag = True
for p in Prime.cache:
if num % p == 0:
num += 1
flag = False
break
if flag:
Prime.cache.append(num)
return num
@staticmethod
def get_prime_factors(num):
"""
Returns the prime factors of the given number.
:param num: The number to split in prime factors.
:return: An array of prime factors of num.
"""
if num <= 1:
raise ValueError("num must be > 1")
index = 0
result = []
while num > 1:
prime = Prime.get_prime(index)
if num % prime == 0:
result.append(prime)
num /= prime
else:
index += 1
return result
@staticmethod
def get_greatest_common_divisor(a, b):
"""
Calculates the greatest common divisor
:param a: The first number.
:param b: The second number.
:return: The greatest common divisor of a and b.
"""
a_factors = Prime.get_prime_factors(a) if a > 1 else Prime.get_prime_factors(-a) if a < -1 else [a]
b_factors = Prime.get_prime_factors(b) if b > 1 else Prime.get_prime_factors(-b) if b < -1 else [b]
q = 1
for f in a_factors:
if f in b_factors:
b_factors.remove(f)
q *= f
return q
| {
"content_hash": "b6638753e86b7e6d4784549859fd86ad",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 107,
"avg_line_length": 29.487179487179485,
"alnum_prop": 0.5134782608695653,
"repo_name": "Koopakiller/School",
"id": "88adb7d1671e57627ab3c6c79b68273701b78d76",
"size": "2370",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "NLA/serie1/prime.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "86659"
},
{
"name": "Pascal",
"bytes": "2813"
},
{
"name": "PowerShell",
"bytes": "7420"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from sqlalchemy.orm import joinedload, subqueryload_all
from changes.api.base import APIView
from changes.models import Job, JobPhase, JobStep
class JobPhaseIndexAPIView(APIView):
def get(self, job_id):
job = Job.query.options(
subqueryload_all(Job.phases),
joinedload('project', innerjoin=True),
).get(job_id)
if job is None:
return '', 404
phase_list = list(JobPhase.query.options(
subqueryload_all(JobPhase.steps, JobStep.node),
subqueryload_all(JobPhase.steps, JobStep.logsources)
).filter(
JobPhase.job_id == job.id,
).order_by(JobPhase.date_started.asc(), JobPhase.date_created.asc()))
context = []
for phase, phase_data in zip(phase_list, self.serialize(phase_list)):
phase_data['steps'] = []
for step, step_data in zip(phase.steps, self.serialize(list(phase.steps))):
step_data['logSources'] = self.serialize(list(step.logsources))
phase_data['steps'].append(step_data)
context.append(phase_data)
return self.respond(context, serialize=False)
| {
"content_hash": "cd3239b0d60985063a6c9fef4891998f",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 87,
"avg_line_length": 36.75757575757576,
"alnum_prop": 0.6240725474031328,
"repo_name": "bowlofstew/changes",
"id": "8e3e655f62e2b010ec19f1d73054f0e709d3648b",
"size": "1213",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "changes/api/jobphase_index.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "87142"
},
{
"name": "HTML",
"bytes": "137437"
},
{
"name": "JavaScript",
"bytes": "385108"
},
{
"name": "Makefile",
"bytes": "6212"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "1546048"
},
{
"name": "Shell",
"bytes": "868"
}
],
"symlink_target": ""
} |
import collections
import StringIO
TupleToken = collections.namedtuple('Token', ['value'])
class Token(TupleToken):
def __repr__(self):
return "{0}(value={1})".format(type(self).__name__, self.value)
TupleNoValueToken = collections.namedtuple('Token', [])
class NoValueToken(TupleNoValueToken):
value = None
def __repr__(self):
return "{0}()".format(type(self).__name__)
class KeywordToken(Token):
pass
class TypeToken(Token):
pass
class NameToken(Token):
pass
class ColonToken(NoValueToken):
value = ":"
pass
class SemiColonToken(NoValueToken):
value = ";"
pass
class CommaToken(NoValueToken):
value = ","
pass
class StarToken(NoValueToken):
value = "*"
pass
class PlusToken(NoValueToken):
value = "+"
pass
class OpenBracketToken(NoValueToken):
value = "{"
pass
class CloseBracketToken(NoValueToken):
value = "}"
pass
class OpenSquareBracketToken(NoValueToken):
value = "["
pass
class CloseSquareBracketToken(NoValueToken):
value = "]"
pass
class OpenParenthesisToken(NoValueToken):
value = "("
pass
class CloseParenthesisToken(NoValueToken):
value = ")"
pass
class SharpToken(NoValueToken):
value = "#"
pass
class EqualToken(NoValueToken):
value = "="
class NewLineToken(NoValueToken):
value = "\n"
class Lexer(object):
keywords = ["typedef", "struct", "enum", "union", "const"]
token_chr = {"*" : StarToken, "[" : OpenSquareBracketToken, "]" : CloseSquareBracketToken,
"{" : OpenBracketToken, "}" : CloseBracketToken, ";" : SemiColonToken,
"," : CommaToken, "(" : OpenParenthesisToken, ")" : CloseParenthesisToken, "#" : SharpToken, "=" : EqualToken,
":": ColonToken, "+": PlusToken}
def __init__(self, code, newlinetoken=False):
self.code = code
self.newlinetoken = newlinetoken
def split_line(self, line):
return line.strip().split()
def is_keyword(self, word):
return word in self.keywords
def split_word(self, word):
"""Slit a dummy name with all token_chr"""
queue = [None, word]
for name in iter(queue.pop, None):
if name in self.token_chr:
yield self.token_chr[name]()
continue
if not any(spec_chr in name for spec_chr in self.token_chr):
yield NameToken(name)
continue
new_tokens = [name]
for spec_chr in self.token_chr:
new_tokens = list(new_tokens[0].partition(spec_chr)) + new_tokens[1:]
queue.extend(reversed([x for x in new_tokens if x]))
def __iter__(self):
for line in self.code.split("\n"):
for word in self.split_line(line):
if self.is_keyword(word):
yield KeywordToken(word)
continue
for tok in self.split_word(word):
yield tok
if self.newlinetoken:
yield NewLineToken()
class ParsingError(Exception):
pass
class Parser(object):
def __init__(self, data):
self.lexer = iter(Lexer(self.initial_processing(data)))
self.peek_token = None
def assert_keyword(self, expected_keyword, n=None):
if n is None:
n = self.assert_token_type(KeywordToken)
if n.value != expected_keyword:
raise ParsingError("Expected Keyword {0} got {1} instead".format(expected_keyword, n.value))
return n
def assert_token_type(self, expected_type, n=None):
if n is None:
n = self.next_token()
if type(n) != expected_type:
raise ParsingError("Expected type {0} and got {1} instead".format(expected_type.__name__, n))
return n
def assert_argument_io_info(self):
io_info = self.assert_token_type(NameToken)
if io_info.value not in self.known_io_info_type:
raise ParsingError("Was expection IO_INFO got {0} instead".format(winapi))
return io_info
def promote_to_type(self, token):
self.assert_token_type(NameToken, token)
return TypeToken(token.value)
def promote_to_int(self, token):
self.assert_token_type(NameToken, token)
try:
return int(token.value)
except ValueError:
return int(token.value, 0)
def next_token(self):
if self.peek_token is not None:
res = self.peek_token
self.peek_token = None
return res
return next(self.lexer, None)
def peek(self):
if self.peek_token is None:
self.peek_token = self.next_token()
return self.peek_token
def parse(self):
raise NotImplementedError("Parser.parse()")
def initial_processing(self, data):
# https://gcc.gnu.org/onlinedocs/cpp/Initial-processing.html#Initial-processing
# Step 1 -> use correct end of line + add last \n if not existing
data = data.replace("\r\n", "\n")
if not data.endswith("\n"):
data = data + "\n"
# Step 2: Trigraph : fuck it
pass
# Step 3: Line merge !
data = data.replace("\\\n", "")
# Step 4 Remove comments:
ins = StringIO.StringIO(data)
outs = StringIO.StringIO()
in_str = False
res = []
while ins.tell() != len(data):
c = ins.read(1)
if ins.tell() == len(data):
outs.write(c)
break
if not in_str and c == "/":
nc = ins.read(1)
if nc == "/":
while c != "\n":
c = ins.read(1)
outs.write(c)
continue
elif nc == "*":
while c != "*" or nc != "/":
c = nc
nc = ins.read(1)
if not nc:
raise ValueError("Unmatched */")
outs.write(" ")
continue
else:
outs.write(c)
ins.seek(ins.tell() - 1)
continue
# TODO: escape in str
elif c == '"':
in_str = not in_str
outs.write(c)
outs.seek(0)
return outs.read()
#KNOWN_TYPE = ["BYTE", "USHORT", "DWORD", "PVOID", "ULONG", "HANDLE", "PWSTR"]
#
#def validate_structs(structs):
# by_name = dict([(struct.name, struct) for struct in structs])
# if len(by_name) != len(structs):
# raise ValueError('2 structs with the same name')
#
# for struct in structs:
# for name, value in struct.typedef.items():
# by_name[name] = value
#
# for struct in structs:
# for field_type, field_name, nb_rep in struct.fields:
# if field_type.name not in KNOWN_TYPE:
# print("non standard type : {0}".format(field_type))
# if field_type.name not in by_name:
# print("UNKNOW TYPE {0}".format(field_type))
#
# return structs
#
#
#
#data = open("winfunc.txt", "r").read()
#
#
#def dbg_lexer(data):
# for i in Lexer(data).token_generation():
# print i
#
#def dbg_parser(data):
# return Parser(data).parse()
#
#def dbg_validate(data):
# return validate_structs(Parser(data).parse())
#
#
#def tst(x):
# print("=== TEST FOR <{0}> === ".format(x))
# g = Lexer("").split_word(x)
# for i in g:
# print (i)
#
#x = dbg_parser(data)
#for i in x:
# print i
| {
"content_hash": "a81927e0e295757900d226e4647c76e5",
"timestamp": "",
"source": "github",
"line_count": 273,
"max_line_length": 131,
"avg_line_length": 28,
"alnum_prop": 0.543563579277865,
"repo_name": "hakril/PythonForWindows",
"id": "b8429424972f435619b8bb3316040eeb982dc202",
"size": "7644",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ctypes_generation/simpleparser.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "4087889"
}
],
"symlink_target": ""
} |
import collections
import datetime
import hashlib
import logging
import re
import socket
import time
import xmlrpclib
import redis
import requests
from celery.task import task
from django.conf import settings
from django.db import transaction
from django.utils.timezone import now
from crate.pypi.utils.lock import Lock
from crate.web.packages.models import Package, ReleaseFile, TroveClassifier, DownloadDelta
from crate.pypi.models import PyPIIndexPage, PyPIDownloadChange
from crate.pypi.processor import PyPIPackage
logger = logging.getLogger(__name__)
INDEX_URL = "http://pypi.python.org/pypi"
SERVERKEY_URL = "http://pypi.python.org/serverkey"
SERVERKEY_KEY = "crate:pypi:serverkey"
CLASSIFIER_URL = "http://pypi.python.org/pypi?%3Aaction=list_classifiers"
PYPI_SINCE_KEY = "crate:pypi:since"
def process(name, version, timestamp, action, matches):
package = PyPIPackage(name, version)
package.process()
def remove(name, version, timestamp, action, matches):
package = PyPIPackage(name, version)
package.delete()
def remove_file(name, version, timestamp, action, matches):
package = PyPIPackage(name, version)
package.remove_files(*matches.groups())
@task
def bulk_process(name, version, timestamp, action, matches):
package = PyPIPackage(name)
package.process(bulk=True)
@task
def bulk_synchronize():
pypi = xmlrpclib.ServerProxy(INDEX_URL)
names = set()
for package in pypi.list_packages():
names.add(package)
bulk_process.delay(package, None, None, None, None)
for package in Package.objects.exclude(name__in=names):
package.delete()
@task
def synchronize(since=None):
with Lock("synchronize", expires=60 * 5, timeout=30):
datastore = redis.StrictRedis(**dict([(x.lower(), y) for x, y in settings.REDIS[settings.PYPI_DATASTORE].items()]))
if since is None:
s = datastore.get(PYPI_SINCE_KEY)
if s is not None:
since = int(float(s)) - 30
current = time.mktime(datetime.datetime.utcnow().timetuple())
pypi = xmlrpclib.ServerProxy(INDEX_URL)
headers = datastore.hgetall(SERVERKEY_KEY + ":headers")
sig = requests.get(SERVERKEY_URL, headers=headers, prefetch=True)
if not sig.status_code == 304:
sig.raise_for_status()
if sig.content != datastore.get(SERVERKEY_KEY):
logger.error("Key Rollover Detected")
pypi_key_rollover.delay()
datastore.set(SERVERKEY_KEY, sig.content)
datastore.hmset(SERVERKEY_KEY + ":headers", {"If-Modified-Since": sig.headers["Last-Modified"]})
if since is None: # @@@ Should we do this for more than just initial?
bulk_synchronize.delay()
else:
logger.info("[SYNCING] Changes since %s" % since)
changes = pypi.changelog(since)
for name, version, timestamp, action in changes:
line_hash = hashlib.sha256(u":".join([unicode(x) for x in (name, version, timestamp, action)]).encode("utf-8")).hexdigest()
logdata = {"action": action, "name": name, "version": version, "timestamp": timestamp, "hash": line_hash}
if not datastore.exists("crate:pypi:changelog:%s" % line_hash):
logger.debug("[PROCESS] %(name)s %(version)s %(timestamp)s %(action)s" % logdata)
logger.debug("[HASH] %(name)s %(version)s %(hash)s" % logdata)
dispatch = collections.OrderedDict([
(re.compile("^create$"), process),
(re.compile("^new release$"), process),
(re.compile("^add [\w\d\.]+ file .+$"), process),
(re.compile("^remove$"), remove),
(re.compile("^remove file (.+)$"), remove_file),
(re.compile("^update [\w]+(, [\w]+)*$"), process),
#(re.compile("^docupdate$"), docupdate), # @@@ Do Something
#(re.compile("^add (Owner|Maintainer) .+$"), add_user_role), # @@@ Do Something
#(re.compile("^remove (Owner|Maintainer) .+$"), remove_user_role), # @@@ Do Something
])
# Dispatch Based on the action
for pattern, func in dispatch.iteritems():
matches = pattern.search(action)
if matches is not None:
func(name, version, timestamp, action, matches)
break
else:
logger.warn("[UNHANDLED] %(name)s %(version)s %(timestamp)s %(action)s" % logdata)
datastore.setex("crate:pypi:changelog:%s" % line_hash, 2629743, datetime.datetime.utcnow().isoformat())
else:
logger.debug("[SKIP] %(name)s %(version)s %(timestamp)s %(action)s" % logdata)
logger.debug("[HASH] %(name)s %(version)s %(hash)s" % logdata)
datastore.set(PYPI_SINCE_KEY, current)
@task
def synchronize_troves():
resp = requests.get(CLASSIFIER_URL)
resp.raise_for_status()
current_troves = set(TroveClassifier.objects.all().values_list("trove", flat=True))
new_troves = set([x.strip() for x in resp.content.splitlines()]) - current_troves
with transaction.commit_on_success():
for classifier in new_troves:
TroveClassifier.objects.get_or_create(trove=classifier)
@task
def synchronize_downloads():
for package in Package.objects.all().order_by("downloads_synced_on").prefetch_related("releases", "releases__files")[:150]:
Package.objects.filter(pk=package.pk).update(downloads_synced_on=now())
for release in package.releases.all():
update_download_counts.delay(package.name, release.version, dict([(x.filename, x.pk) for x in release.files.all()]))
@task
def update_download_counts(package_name, version, files, index=None):
try:
pypi = xmlrpclib.ServerProxy(INDEX_URL)
downloads = pypi.release_downloads(package_name, version)
for filename, download_count in downloads:
if filename in files:
with transaction.commit_on_success():
for releasefile in ReleaseFile.objects.filter(pk=files[filename]).select_for_update():
old = releasefile.downloads
releasefile.downloads = download_count
releasefile.save()
change = releasefile.downloads - old
if change:
PyPIDownloadChange.objects.create(file=releasefile, change=change)
except socket.error:
logger.exception("[DOWNLOAD SYNC] Network Error")
@task
def pypi_key_rollover():
datastore = redis.StrictRedis(**dict([(x.lower(), y) for x, y in settings.REDIS[settings.PYPI_DATASTORE].items()]))
sig = requests.get(SERVERKEY_URL, prefetch=True)
sig.raise_for_status()
datastore.set(SERVERKEY_KEY, sig.content)
for package in Package.objects.all():
fetch_server_key.delay(package.name)
@task
def fetch_server_key(package):
p = PyPIPackage(package)
p.verify_and_sync_pages()
@task
def refresh_pypi_package_index_cache():
r = requests.get("http://pypi.python.org/simple/", prefetch=True)
PyPIIndexPage.objects.create(content=r.content)
@task
def integrate_download_deltas():
with Lock("pypi-integrate-downloads", expires=60 * 5, timeout=30):
count = 0
for d in PyPIDownloadChange.objects.filter(integrated=False)[:1000]:
with transaction.commit_on_success():
dd, c = DownloadDelta.objects.get_or_create(file=d.file, date=d.created.date(), defaults={"delta": d.change})
if not c:
DownloadDelta.objects.filter(pk=dd.pk).select_for_update()
dd.delta += d.change
dd.save()
PyPIDownloadChange.objects.filter(pk=d.pk).update(integrated=True)
count += 1
return count
| {
"content_hash": "ff906354491a89bd83ce0b010939f8bd",
"timestamp": "",
"source": "github",
"line_count": 228,
"max_line_length": 139,
"avg_line_length": 35.82017543859649,
"alnum_prop": 0.6090363658626179,
"repo_name": "crateio/crate.pypi",
"id": "821a7c2e990ec5d1f349dce2e9dab7a0b8296d0d",
"size": "8167",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "crate/pypi/tasks.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "178344"
}
],
"symlink_target": ""
} |
import numpy as np
from filter import movingaverage
def gentrends(x, window=1/3.0, charts=True):
"""
Returns a Pandas dataframe with support and resistance lines.
:param x: One-dimensional data set
:param window: How long the trendlines should be. If window < 1, then it
will be taken as a percentage of the size of the data
:param charts: Boolean value saying whether to print chart to screen
"""
import numpy as np
import pandas.io.data as pd
x = np.array(x)
if window < 1:
window = int(window * len(x))
max1 = np.where(x == max(x))[0][0] # find the index of the abs max
min1 = np.where(x == min(x))[0][0] # find the index of the abs min
# First the max
if max1 + window > len(x):
max2 = max(x[0:(max1 - window)])
else:
max2 = max(x[(max1 + window):])
# Now the min
if min1 - window < 0:
min2 = min(x[(min1 + window):])
else:
min2 = min(x[0:(min1 - window)])
# Now find the indices of the secondary extrema
max2 = np.where(x == max2)[0][0] # find the index of the 2nd max
min2 = np.where(x == min2)[0][0] # find the index of the 2nd min
# Create & extend the lines
maxslope = (x[max1] - x[max2]) / (max1 - max2) # slope between max points
minslope = (x[min1] - x[min2]) / (min1 - min2) # slope between min points
a_max = x[max1] - (maxslope * max1) # y-intercept for max trendline
a_min = x[min1] - (minslope * min1) # y-intercept for min trendline
b_max = x[max1] + (maxslope * (len(x) - max1)) # extend to last data pt
b_min = x[min1] + (minslope * (len(x) - min1)) # extend to last data point
maxline = np.linspace(a_max, b_max, len(x)) # Y values between max's
minline = np.linspace(a_min, b_min, len(x)) # Y values between min's
# OUTPUT
trends = np.transpose(np.array((x, maxline, minline)))
trends = pd.DataFrame(trends, index=np.arange(0, len(x)),
columns=['Data', 'Max Line', 'Min Line'])
if charts is True:
from matplotlib.pyplot import plot, grid, show, figure
figure()
plot(trends)
grid()
show()
return trends, maxslope, minslope
def segtrends(x, segments=2, charts=True, window=7):
"""
Turn minitrends to iterative process more easily adaptable to
implementation in simple trading systems; allows backtesting functionality.
:param x: One-dimensional data set
:param window: How long the trendlines should be. If window < 1, then it
will be taken as a percentage of the size of the data
:param charts: Boolean value saying whether to print chart to screen
"""
import numpy as np
y = np.array(x)
n=len(y)
movy = movingaverage(y, window)
# Implement trendlines and Find the indexes of these maxima in the data
segments = int(segments)
maxima = np.ones(segments)
minima = np.ones(segments)
x_maxima = np.ones(segments)
x_minima = np.ones(segments)
segsize = int(len(y)/segments)
for i in range(1, segments+1):
ind2 = i*segsize
ind1 = ind2 - segsize
seg = y[ind1:ind2]
maxima[i-1] = max(seg)
minima[i-1] = min(seg)
x_maxima[i-1] = ind1 + (np.where(seg == maxima[i-1])[0][0])
x_minima[i-1] = ind1 + (np.where(seg == minima[i-1])[0][0])
if charts:
import matplotlib.pyplot as plt
plt.plot(y)
plt.grid(True)
for i in range(0, segments-1):
maxslope = (maxima[i+1] - maxima[i]) / (x_maxima[i+1] - x_maxima[i])
a_max = maxima[i] - (maxslope * x_maxima[i])
b_max = maxima[i] + (maxslope * (len(y) - x_maxima[i]))
maxline = np.linspace(a_max, b_max, len(y))
minslope = (minima[i+1] - minima[i]) / (x_minima[i+1] - x_minima[i])
a_min = minima[i] - (minslope * x_minima[i])
b_min = minima[i] + (minslope * (len(y) - x_minima[i]))
minline = np.linspace(a_min, b_min, len(y))
if charts:
#plt.plot(maxline, 'g')
#plt.plot(minline, 'r')
pass
if charts:
plt.plot(range(n), movy, 'b')
plt.plot(x_maxima, maxima, 'g')
plt.plot(x_minima, minima, 'r')
plt.show()
# OUTPUT
return x_maxima, maxima, x_minima, minima
def minitrends(x, window=20, charts=True):
"""
Turn minitrends to iterative process more easily adaptable to
implementation in simple trading systems; allows backtesting functionality.
:param x: One-dimensional data set
:param window: How long the trendlines should be. If window < 1, then it
will be taken as a percentage of the size of the data
:param charts: Boolean value saying whether to print chart to screen
"""
import numpy as np
y = np.array(x)
if window < 1: # if window is given as fraction of data length
window = float(window)
window = int(window * len(y))
x = np.arange(0, len(y))
dy = y[window:] - y[:-window]
crit = dy[:-1] * dy[1:] < 0
# Find whether max's or min's
maxi = (y[x[crit]] - y[x[crit] + window] > 0) & \
(y[x[crit]] - y[x[crit] - window] > 0) * 1
mini = (y[x[crit]] - y[x[crit] + window] < 0) & \
(y[x[crit]] - y[x[crit] - window] < 0) * 1
maxi = maxi.astype(float)
mini = mini.astype(float)
maxi[maxi == 0] = np.nan
mini[mini == 0] = np.nan
xmax = x[crit] * maxi
xmax = xmax[~np.isnan(xmax)]
xmax = xmax.astype(int)
xmin = x[crit] * mini
xmin = xmin[~np.isnan(xmin)]
xmin = xmin.astype(int)
# See if better max or min in region
yMax = np.array([])
xMax = np.array([])
for i in xmax:
indx = np.where(xmax == i)[0][0] + 1
try:
Y = y[i:xmax[indx]]
yMax = np.append(yMax, Y.max())
xMax = np.append(xMax, np.where(y == yMax[-1])[0][0])
except:
pass
yMin = np.array([])
xMin = np.array([])
for i in xmin:
indx = np.where(xmin == i)[0][0] + 1
try:
Y = y[i:xmin[indx]]
yMin = np.append(yMin, Y.min())
xMin = np.append(xMin, np.where(y == yMin[-1])[0][0])
except:
pass
if y[-1] > yMax[-1]:
yMax = np.append(yMax, y[-1])
xMax = np.append(xMax, x[-1])
if y[0] not in yMax:
yMax = np.insert(yMax, 0, y[0])
xMax = np.insert(xMax, 0, x[0])
if y[-1] < yMin[-1]:
yMin = np.append(yMin, y[-1])
xMin = np.append(xMin, x[-1])
if y[0] not in yMin:
yMin = np.insert(yMin, 0, y[0])
xMin = np.insert(xMin, 0, x[0])
# Plot results if desired
if charts is True:
from matplotlib.pyplot import plot, show, grid
plot(x, y)
plot(xMax, yMax, '-o')
plot(xMin, yMin, '-o')
grid(True)
show()
# Return arrays of critical points
return xMax, yMax, xMin, yMin
def iterlines(x, window=30, charts=True):
"""
Turn minitrends to iterative process more easily adaptable to
implementation in simple trading systems; allows backtesting functionality.
:param x: One-dimensional data set
:param window: How long the trendlines should be. If window < 1, then it
will be taken as a percentage of the size of the data
:param charts: Boolean value saying whether to print chart to screen
"""
import numpy as np
x = np.array(x)
n = len(x)
if window < 1:
window = int(window * n)
sigs = np.zeros(n, dtype=float)
i = window
while i != n:
if x[i] > max(x[i-window:i]): sigs[i] = 1
elif x[i] < min(x[i-window:i]): sigs[i] = -1
i += 1
xmin = np.where(sigs == -1.0)[0]
xmax = np.where(sigs == 1.0)[0]
ymin = x[xmin]
ymax = x[xmax]
if charts is True:
from matplotlib.pyplot import plot, grid, show
plot(x)
plot(xmin, ymin, 'ro')
plot(xmax, ymax, 'go')
grid(True)
show()
return sigs
| {
"content_hash": "c23105567f5370dd7761b000641085b6",
"timestamp": "",
"source": "github",
"line_count": 245,
"max_line_length": 79,
"avg_line_length": 32.926530612244896,
"alnum_prop": 0.563530432626751,
"repo_name": "fraka6/trading-with-python",
"id": "f7265a0b69f2307625509d7efc09f1872f2f80c4",
"size": "8067",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "util/trendy.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "220333"
},
{
"name": "Shell",
"bytes": "1082"
}
],
"symlink_target": ""
} |
import logging
import uuid
from functools import partial
from itertools import (
count,
dropwhile,
)
from pgshovel.replication.validation import TRANSACTION_START_EVENT_TYPES
from pgshovel.utilities.protobuf import get_oneof_value
logger = logging.getLogger(__name__)
class FormattedBatchIdentifier(object):
def __init__(self, batch_identifier):
self.batch_identifier = batch_identifier
def __str__(self):
return '{node.hex}/{id}'.format(
node=uuid.UUID(bytes=self.batch_identifier.node),
id=self.batch_identifier.id,
)
class FormattedSnapshot(object):
def __init__(self, snapshot, max=3):
self.snapshot = snapshot
self.max = max
def __str__(self):
active = self.snapshot.active
return '{snapshot.min}:{snapshot.max}:[{active}{truncated}]'.format(
snapshot=self.snapshot,
active=','.join(map(str, active[:self.max])),
truncated='' if len(active) <= self.max else ',+{0}...'.format(len(active) - self.max),
)
class UnableToPrimeError(Exception):
"""
Raised when an attempt to prime is made, but the priming failed after the
max number of messages.
"""
pass
def is_start_batch_operation(message):
"""
Is the ``message`` the start operation of a batch of mutations.
"""
value = get_oneof_value(message.batch_operation, 'operation')
return type(value) in TRANSACTION_START_EVENT_TYPES
def prime_until(check, max_messages, stream):
"""
Attempts to "prime" the ``stream`` by dropping any messages for which
``check`` evaluates to ``False``, up until ``max_messages``.
"""
attempts = count(start=1)
def still_checking((offset, message)):
attempt = next(attempts)
if check(message):
return False # Stop checking
elif attempt > max_messages:
raise UnableToPrimeError # We've dropped too many messages
elif attempt % 25 == 0:
logging.info('Still priming %r.' % stream)
else:
return True # Still checking
return dropwhile(still_checking, stream)
prime_for_batch_start = partial(prime_until, is_start_batch_operation)
| {
"content_hash": "455e93658913daeb9165ab9c1d87ccf7",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 99,
"avg_line_length": 27.925,
"alnum_prop": 0.6374216651745748,
"repo_name": "disqus/pgshovel",
"id": "817b8cf0e0f80ce1ad73765c5ae3ff447f1b094e",
"size": "2234",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/main/python/pgshovel/streams/utilities.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1040"
},
{
"name": "Protocol Buffer",
"bytes": "5956"
},
{
"name": "Python",
"bytes": "143749"
},
{
"name": "Shell",
"bytes": "758"
}
],
"symlink_target": ""
} |
from .scatter_type import ScatterPlotType, NScannedNPlotType
from .scatter_type import DocsExaminedPlotType, DurationLineType
from .event_type import EventPlotType, RSStatePlotType
from .range_type import RangePlotType
from .histogram_type import HistogramPlotType
from .connchurn_type import ConnectionChurnPlotType
| {
"content_hash": "35013c29696758c9bc228f411d2ae663",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 64,
"avg_line_length": 52.833333333333336,
"alnum_prop": 0.8675078864353313,
"repo_name": "rueckstiess/mtools",
"id": "5acaaf7c3cbec1b9fdd4808394364d543967feee",
"size": "317",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "mtools/mplotqueries/plottypes/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "672"
},
{
"name": "Python",
"bytes": "548352"
}
],
"symlink_target": ""
} |
import os
from qrl.core.ChainManager import ChainManager
from qrl.core.GenesisBlock import GenesisBlock
from qrl.core.State import State
from pyqryptonight.pyqryptonight import PoWHelper, StringToUInt256, UInt256ToString
def main():
persistent_state = State()
chain_manager = ChainManager(state=persistent_state)
chain_manager.load(GenesisBlock())
ph = PoWHelper()
difficulty = StringToUInt256('5000')
filename = os.path.expanduser("~/crypto/qryptonight/modeling/blockdata.csv")
with open(filename, 'w') as f:
f.write("i,timestamp,prev_timestamp,delta,difficulty,target\n")
prev_timestamp = None
for i in range(chain_manager.height):
block = chain_manager.get_block_by_number(i)
if i == 0:
prev_timestamp = block.blockheader.timestamp
continue
target = ph.getTarget(difficulty)
delta = block.blockheader.timestamp - prev_timestamp
outs = "{},{},{},{},{},{}\n".format(i,
block.blockheader.timestamp,
prev_timestamp,
delta,
UInt256ToString(difficulty),
UInt256ToString(target))
f.write(outs)
difficulty = ph.getDifficulty(block.blockheader.timestamp, prev_timestamp, difficulty)
difficulty = StringToUInt256(str(max(2, int(UInt256ToString(difficulty)))))
prev_timestamp = block.blockheader.timestamp
if __name__ == '__main__':
main()
| {
"content_hash": "5c59c2547e5dbbe42f84d7225fb6f043",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 98,
"avg_line_length": 35.765957446808514,
"alnum_prop": 0.570493753718025,
"repo_name": "theQRL/QRL",
"id": "140ead4f7a0cd4cbbde332a01b33244744567286",
"size": "1681",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/qrl/tools/modeling/extract_timing.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "185833"
},
{
"name": "Python",
"bytes": "1938166"
},
{
"name": "Shell",
"bytes": "2126"
}
],
"symlink_target": ""
} |
__author__ = 'greghines'
import numpy as np
import matplotlib.pyplot as plt
import csv
import sys
import os
import pymongo
client = pymongo.MongoClient()
db = client['plankton_2015-01-01']
classification_collection = db["plankton_classifications"]
subject_collection = db["plankton_subjects"]
user_collection = db["plankton_users"]
finished_subjects = []
for subject in subject_collection.find({"state":"complete"}):
zooniverse_id = subject["zooniverse_id"]
finished_subjects.append((zooniverse_id,subject["updated_at"]))
finished_subjects.sort(key=lambda x:x[1],reverse=True)
print len(finished_subjects)
count = 0
for zooniverse_id,date in finished_subjects[:1000]:
users_l = []
for classification in classification_collection.find({"subjects.zooniverse_id":zooniverse_id}):
if "user_name" in classification:
users_l.append(classification["user_name"])
else:
users_l.append(classification["user_ip"])
if not(len(users_l) == len(list(set(users_l)))):
count += 1
print count | {
"content_hash": "5c530873b69b24dac7c88993247a57ee",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 99,
"avg_line_length": 31.029411764705884,
"alnum_prop": 0.7023696682464455,
"repo_name": "zooniverse/aggregation",
"id": "8c03b3d0e576d77c6b185e09f096f8fa249ada40",
"size": "1077",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "experimental/plankton/repeats.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "723"
},
{
"name": "Python",
"bytes": "2184451"
},
{
"name": "Scala",
"bytes": "629"
},
{
"name": "Shell",
"bytes": "190"
}
],
"symlink_target": ""
} |
'''
Multipoint communication service protocol (T.125)
'''
import logging
import ptypes
from ptypes import *
from protocol import gcc, ber, per
ptypes.setbyteorder(ptypes.config.byteorder.bigendian)
### MCS protocol data (BER Encoding)
class Protocol(ber.Protocol.copy(recurse=True)):
pass
class Element(ber.Element):
Protocol = Protocol
Protocol.default = Element
class Packet(Element):
byteorder = ptypes.config.byteorder.bigendian
# FIXME: this number is actually encoded with PER-ALIGNED
class Result(pbinary.enum):
length, _values_ = 4, [
('rt-successful', 0),
('rt-domain-merging', 1),
('rt-domain-not-hierarchical', 2),
('rt-no-such-channel', 3),
('rt-no-such-domain', 4),
('rt-no-such-user', 5),
('rt-not-admitted', 6),
('rt-other-user-id', 7),
('rt-parameters-unacceptable', 8),
('rt-token-not-available', 9),
('rt-token-not-possessed', 10),
('rt-too-many-channels', 11),
('rt-too-many-tokens', 12),
('rt-too-many-users', 13),
('rt-unspecified-failure', 14),
]
def summary(self):
return "{:s}({:d})".format(self.str(), self.int())
class Reason(pbinary.enum):
length, _values_ = 3, [
('rn-domain-disconnect', 0),
('rn-provider-initiated', 1),
('rn-token-purged', 2),
('rn-user-requested', 3),
('rn-channel-purged', 4),
]
class TokenStatus(ber.ENUMERATED):
_values_ = [
('notInUse', 0),
('selfGrabbed', 1),
('otherGrabbed', 2),
('selfInhibited', 3),
('otherInhibited', 4),
('selfRecipient', 5),
('selfGiving', 6),
('otherGiving', 7),
]
class DataPriority(ber.ENUMERATED):
_values_ = [
('top', 0),
('high', 1),
('medium', 2),
('low', 3),
]
class DomainParameters(ber.SEQUENCE):
_fields_ = [
(ber.INTEGER, 'maxChannelIds'),
(ber.INTEGER, 'maxUserIds'),
(ber.INTEGER, 'maxTokenIds'),
(ber.INTEGER, 'numPriorities'),
(ber.INTEGER, 'minThroughput'),
(ber.INTEGER, 'maxHeight'),
(ber.INTEGER, 'maxMCSPDUsize'),
(ber.INTEGER, 'protocolVersion'),
]
@Protocol.Application.define
class ConnectInitial(ber.SEQUENCE):
tag = 101
_fields_ = [
(ber.OCTET_STRING, 'callingDomainSelector'),
(ber.OCTET_STRING, 'calledDomainSelector'),
(ber.BOOLEAN, 'upwardFlag'),
(DomainParameters, 'targetParameters'),
(DomainParameters, 'minimumParameters'),
(DomainParameters, 'maximumParameters'),
(ber.OCTET_STRING, 'userData'),
]
@Protocol.Application.define
class ConnectResponse(ber.SEQUENCE):
tag = 102
class Result(ber.OCTET_STRING):
def str(self):
res = self.cast(Result, width=8 * self.size())
return res.str()
def int(self):
res = self.cast(Result, width=8 * self.size())
return res.int()
def summary(self):
return "{:s}({:d})".format(self.str(), self.int())
# FIXME: is this right?
_fields_ = [
(Result, 'result'),
(ber.INTEGER, 'calledConnectId'),
(DomainParameters, 'domainParameters'),
(ber.OCTET_STRING, 'userData'),
]
@Protocol.Application.define
class ConnectAdditional(ber.SEQUENCE):
tag = 103
_fields_ = [
(ber.INTEGER, 'calledConnectId'),
(DataPriority, 'dataPriority'),
]
@Protocol.Application.define
class ConnectResult(ber.SEQUENCE):
tag = 104
_fields_ = [
(ber.OCTET_STRING, 'result'),
]
### DomainMCSPDU
class DomainMCSPDU(ptype.definition):
cache = {}
class Choice(pbinary.enum):
length, _values_ = 6, [
('plumbDomainIndication', 0),
('erectDomainRequest', 1),
('mergeChannelsRequest', 2),
('mergeChannelsConfirm', 3),
('purgeChannelsIndication', 4),
('mergeTokensRequest', 5),
('mergeTokensConfirm', 6),
('purgeTokensIndication', 7),
('disconnectProviderUltimatum', 8),
('rejectMCSPDUUltimatum', 9),
('attachUserRequest', 10),
('attachUserConfirm', 11),
('detachUserRequest', 12),
('detachUserIndication', 13),
('channelJoinRequest', 14),
('channelJoinConfirm', 15),
('channelLeaveRequest', 16),
('channelConveneRequest', 17),
('channelConveneConfirm', 18),
('channelDisbandRequest', 19),
('channelDisbandIndication', 20),
('channelAdmitRequest', 21),
('channelAdmitIndication', 22),
('channelExpelRequest', 23),
('channelExpelIndication', 24),
('sendDataRequest', 25), # Each of these cases are handled by HandleAllSendDataPDUs
('sendDataIndication', 26), #
('uniformSendDataRequest', 27), #
('uniformSendDataIndication', 28), #
('tokenGrabRequest', 29),
('tokenGrabConfirm', 30),
('tokenInhibitRequest', 31),
('tokenInhibitConfirm', 32),
('tokenGiveRequest', 33),
('tokenGiveIndication', 34),
('tokenGiveResponse', 35),
('tokenGiveConfirm', 36),
('tokenPleaseRequest', 37),
('tokenPleaseIndication', 38),
('tokenReleaseRequest', 39),
('tokenReleaseConfirm', 40),
('tokenTestRequest', 41),
('tokenTestConfirm', 42),
]
Header = Choice
### Main PDU
class PDU(pstruct.type):
'''
MCS packet
'''
@pbinary.bigendian
class _header(pbinary.struct):
def __value(self):
res = self['choice']
res = DomainMCSPDU.lookup(res, 0)
return getattr(res, 'Header', 0)
_fields_ = [
(DomainMCSPDU.Header, 'choice'),
(__value, 'value'),
]
def __value(self):
res = self['header'].li
return DomainMCSPDU.get(res['choice'], ptype.undefined, __header__=res.item('value'))
_fields_ = [
(_header, 'header'),
(__value, 'value'),
]
def alloc(self, **fields):
# Check if the caller is allocating the 'value' field
if 'value' in fields and not isinstance(fields['value'], dict):
res = fields['value']
# If so, then copy its Header type into the 'header' field
hdr = fields.setdefault('header', {})
if isinstance(hdr, dict) and hasattr(res, 'Header'):
hdr.setdefault('value', res.Header)
elif isinstance(hdr, ptype.base) and hasattr(res, 'Header'):
hdr['value'] = res.Header().a
res.__header__ = hdr['value']
elif ptypes.istype(res) and not hasattr(res, 'Header'):
logging.warning("Unable to map .__header__ attribute for {:s} due to missing .Header attribute for value {:s}".format(self.classname(), res.typename()))
# Now we can finally allocate our instance
res = super(PDU, self).alloc(**fields)
# If there is currently no '__header__' attribute, then explicitly assign one
if not hasattr(res['value'], '__header__'):
res['value'].__header__ = res['header'].item('value')
return res
### DomainMCSPDU definitions
@DomainMCSPDU.define
class PlumbDomainIndication(pstruct.type):
type = 0
_fields_ = [
(dyn.clone(ber.INTEGER, length=2), 'heightLimit'),
]
def summary(self):
return "heightLimit={:d}".format(self['heightLimit'].int())
@DomainMCSPDU.define
class ErectDomainRequest(pstruct.type):
type = 1
_fields_ = [
(per.INTEGER, 'subHeight'),
(per.INTEGER, 'subInterval'),
]
def summary(self):
return "subHeight={:s} subInterval={:s}".format(self['subHeight'].summary(), self['subInterval'].summary())
@DomainMCSPDU.define
class DisconnectProviderUltimatum(ptype.undefined):
type = 8
Header = Reason
def __getitem__(self, name):
if name.lower() == 'reason':
return self.__header__
raise KeyError(name)
def summary(self):
return "reference(reason)={:s}".format(self['reason'].summary())
def set(self, **fields):
if 'reason' in fields:
self['reason'].set(fields.pop('reason'))
return super(DisconnectProviderUltimatum, self).set(**fields)
def details(self):
return "[{:x}] <reference {:s} 'reason'> {:s}".format(self.getoffset(), self['reason'].classname(), self['reason'].summary()) + '\n'
def repr(self):
return self.details()
class Diagnostic(pbinary.enum):
length, _values_ = 4, [
('dc-inconsistent-merge', 0),
('dc-forbidden-PDU-downward', 1),
('dc-forbidden-PDU-upward', 2),
('dc-invalid-BER-encoding', 3),
('dc-invalid-PER-encoding', 4),
('dc-misrouted-user', 5),
('dc-unrequested-confirm', 6),
('dc-wrong-transport-priority', 7),
('dc-channel-id-conflict', 8),
('dc-token-id-conflict', 9),
('dc-not-user-id-channel', 10),
('dc-too-many-channels', 11),
('dc-too-many-tokens', 12),
('dc-too-many-users', 13),
]
@DomainMCSPDU.define
class RejectMCSPDUUltimatum(pstruct.type):
type = 9
Header = Diagnostic
_fields_ = [
(gcc.LengthDeterminant, 'length'),
(lambda self: dyn.clone(ber.OCTET_STRING, length=self['length'].li.int()), 'initialOctets'),
]
def __field__(self, name):
if name.lower() == 'diagnostic':
return self.__header__
return super(RejectMCSPDUUltimatum, self).__field__(name)
def summary(self):
return "reference(diagnostic)={:s} initialOctets={:s}".format(self['diagnostic'].summary(), self['initialOctets'].summary())
@DomainMCSPDU.define
class AttachUserRequest(ptype.undefined):
type = 10
@DomainMCSPDU.define
class AttachUserConfirm(pstruct.type):
type = 11
class Header(pbinary.flags):
_fields_ = [
(1, 'initiatorQ'),
(Result, 'result'),
]
def __initiator(self):
res = self.__header__
return gcc.UserId if res['initiatorQ'] else dyn.clone(gcc.UserId, length=0)
_fields_ = [
(__initiator, 'initiator'),
]
def __field__(self, name):
if name.lower() == 'result':
return self.__header__.item('result')
return super(AttachUserConfirm, self).__field__(name)
def summary(self):
if self.__header__['initiatorQ']:
return "reference(result)={:s} initiator={:s}".format(self['result'].summary(), self['initiator'].summary())
return "reference(result)={:s}".format(self['result'].summary())
@DomainMCSPDU.define
class DetachUserRequest(pstruct.type):
type = 12
Header = Reason
_fields_ = [
(gcc.LengthDeterminant, 'count'),
(lambda self: dyn.array(gcc.UserId, self['count'].li.int()), 'userIds'),
]
def __field__(self, name):
if name.lower() == 'reason':
return self.__header__
return super(DetachUserRequest, self).__field__(name)
def summary(self):
res = self['userIds']
return "reference(reason)={:s} userIds=[{:s}]".format(self['reason'].summary(), ', '.join(item.summary() for item in res))
def alloc(self, **fields):
res = super(DetachUserRequest, self).alloc(**fields)
return res if 'count' in fields else res.set(count=len(res['userIds']))
def set(self, **fields):
if 'reason' in fields:
self['reason'].set(fields.pop('reason'))
return super(DetachUserRequest, self).set(**fields)
@DomainMCSPDU.define
class DetachUserIndication(DetachUserRequest):
type = 13
@DomainMCSPDU.define
class ChannelJoinRequest(pstruct.type):
type = 14
_fields_ = [
(gcc.UserId, 'initiator'),
(gcc.ChannelId, 'channelId'),
]
def summary(self):
return "initiator={:s} channelId={:s}".format(self['initiator'].summary(), self['channelId'].summary())
@DomainMCSPDU.define
class ChannelJoinConfirm(pstruct.type):
type = 15
class Header(pbinary.flags):
_fields_ = [
(1, 'channelIdQ'),
(Result, 'result'),
]
def __channelId(self):
res = self.__header__
return gcc.ChannelId if res['channelIdQ'] else dyn.clone(gcc.ChannelId, length=0)
_fields_ = [
(gcc.UserId, 'initiator'),
(gcc.ChannelId, 'requested'),
(__channelId, 'channelId'),
]
def __field__(self, name):
if name.lower() == 'result':
return self.__header__.item('result')
return super(ChannelJoinConfirm, self).__field__(name)
def summary(self):
if self.__header__['channelIdQ']:
return "reference(result)={:s} initiator={:s} requested={:s} channelId={:s}".format(self['result'].summary(), self['initiator'].summary(), self['requested'].summary(), self['channelId'].summary())
return "reference(result)={:s} initiator={:s} requested={:s}".format(self['result'].summary(), self['initiator'].summary(), self['requested'].summary())
@DomainMCSPDU.define
class ChannelLeaveRequest(pstruct.type):
type = 16
_fields_ = [
(gcc.LengthDeterminant, 'count'),
(lambda self: dyn.array(gcc.ChannelId, self['count'].li.int()), 'channelIds'),
]
def alloc(self, **fields):
res = super(ChannelLeaveRequest, self).alloc(**fields)
return res if 'count' in fields else res.set(count=len(res['channelIds']))
def summary(self):
return "({:d}) [{:s}]".format(self['count'].int(), ', '.join(ch.summary() for ch in self['channelIds']))
@DomainMCSPDU.define
class ChannelConveneRequest(pstruct.type):
type = 17
_fields_ = [
(gcc.UserId, 'initiator'),
]
def summary(self):
return "initiator={:s}".format(self['initiator'].summary())
@DomainMCSPDU.define
class ChannelDisbandRequest(pstruct.type):
type = 19
_fields_ = [
(gcc.UserId, 'initiator'),
(gcc.ChannelId, 'channelId'),
]
def summary(self):
return "initiator={:s} channelId={:s}".format(self['initiator'].summary(), self['channelId'].summary())
@DomainMCSPDU.define
class ChannelDisbandIndication(pstruct.type):
type = 20
_fields_ = [
(gcc.ChannelId, 'channelId'),
]
def summary(self):
return "channelId={:s}".format(self['channelId'].summary())
class DataPriority(pbinary.enum):
length, _values_ = 2, [
('top', 0),
('high', 1),
('medium', 2),
('low', 3),
]
class Segmentation(pbinary.integer):
def blockbits(self):
return 2
class SendDataPDU(pstruct.type):
@pbinary.bigendian
class _priority_segmentation(pbinary.struct):
_fields_ = [
(DataPriority, 'dataPriority'),
(Segmentation, 'segmentation'),
]
class _length_userData(pstruct.type):
_fields_ = [
(gcc.LengthDeterminant, 'length'),
(lambda self: dyn.block(self['length'].li.int()), 'data'),
]
_fields_ = [
(gcc.UserId, 'initiator'),
(gcc.ChannelId, 'channelId'),
(_priority_segmentation, 'dataAttributes'),
(_length_userData, 'userData'),
]
@DomainMCSPDU.define
class SendDataRequest(SendDataPDU):
type = 25
@DomainMCSPDU.define
class SendDataIndication(SendDataPDU):
type = 26
@DomainMCSPDU.define
class UniformSendDataRequest(SendDataRequest):
type = 27
@DomainMCSPDU.define
class UniformSendDataIndication(SendDataIndication):
type = 28
| {
"content_hash": "a9ccadcea6e8ffe3a47123c4304b2c74",
"timestamp": "",
"source": "github",
"line_count": 523,
"max_line_length": 208,
"avg_line_length": 30.261950286806883,
"alnum_prop": 0.5764832248688949,
"repo_name": "arizvisa/syringe",
"id": "7ee3664aedebf1ca2130d0b66ad01ac5bb12ad6e",
"size": "15827",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "template/protocol/mcs.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Assembly",
"bytes": "22844"
},
{
"name": "C",
"bytes": "11095"
},
{
"name": "HTML",
"bytes": "1761"
},
{
"name": "Makefile",
"bytes": "1228"
},
{
"name": "Perl",
"bytes": "9176"
},
{
"name": "Python",
"bytes": "4312979"
},
{
"name": "Shell",
"bytes": "171"
},
{
"name": "XQuery",
"bytes": "1884"
},
{
"name": "XSLT",
"bytes": "10518"
}
],
"symlink_target": ""
} |
import csv
import re
import os
from os.path import join
from datetime import datetime
# app
import database
class CSVMongo(object):
def __init__(self, date_format=None, **kwargs):
self.date_format = date_format
def _has_pattern_date(self, value,
dt_pattern=r"[\d]{4}-[\d]{2}-[\d]{2} [\d]{2}:[\d]{2}:[\d]{2}"):
"""
Checks if a string value match with a pattern.
The pattern is custom by user. Or return None if no match occurred.
:param dt_pattern: a raw string of regex to make the match with date
format.
:param value: is a string, provably with date formatted that will be
translate to the type date/datetime if matched.
"""
re_date_format = re.compile(dt_pattern) # date pattern
res = re_date_format.match(value)
if res:
return self._convert_str_to_datetime(value)
def _convert_str_to_datetime(self, value, str_format='%Y-%m-%d %H:%M:%S'):
"""
Convert datetime string in a datetime object.
:param value: string value will be converted
:str_format: the date/datetime format wanted.
"""
date_formated = datetime.strptime(value, str_format)
return date_formated
class DocumentSpekDump(CSVMongo):
def __init__(self, date_format=None, **kwargs):
super(DocumentSpekDump, self).__init__(self, **kwargs)
self.document = kwargs
self.date_format = r"[\d]{4}-[\d]{2}-[\d]{2} [\d]{2}:[\d]{2}:[\d]{2}"
def _get_csv_spekx(self, workdir, file_pattern=r".csv",
ignorecase=True):
""" Analysis all files in workdir that match to file_pattern and returns
a list with the results.
:param workdir: absolute path to directory that contains the files. If
None current directory is workdir
:param file_pattern: a regex pattern that combines with all files
:param ignorecase: if True file_pattern can be CSV or csv, etc.
"""
if workdir is None:
workdir = os.path.curdir
else:
assert os.path.isabs(workdir)
csvfiles = []
rex = re.compile(file_pattern)
walkobj = os.walk(workdir).next()
dirname = walkobj[0]
all_files = walkobj[2]
for f in all_files:
if ignorecase:
resx = rex.match(f, re.IGNORECASE)
else:
resx = rex.match(f)
if resx:
absfile = join(dirname, f)
csvfiles.append(absfile)
return csvfiles
def get_tickets(self, workdir):
"""
Returns a dict containing all tickets/lines inserted in a csvfiles.
"""
csvfiles = self._get_csv_spekx(workdir,
file_pattern=r"[\d]+_[\d]+_[\d]+_[\d]+_[\d]+_[\d]+_CSV")
documents = []
for csvfile in csvfiles:
with open(csvfile) as csvcurrent:
dict_reader = csv.DictReader(csvcurrent)
for document in dict_reader:
# Fixes encoding...
for k, v in document.items():
if v is not None:
v = v.decode('iso-8859-1').encode('utf-8')
date_checked = self._has_pattern_date(v) # pattern default
if date_checked:
document[k] = date_checked
spk = DocumentSpekDump(**document)
documents.append(spk)
return documents
def register_tickets(self, workdir, id_field="ACIONAMENTO"):
"""
Save all tickets (or lines) returned by get_tickets in database.
This is a shorthand to avoid a for loop as:
dumps = instance.get_tickets()
for doc in dumps:
doc.save()
Now. You can to call:
dumps.register_tickets()
Args:
workdir: directory that contains all CSV files (in level)
id_field: field to index, e.g.: in mailchimp it'is email,
a system of tickets, the ticket id can be a id
Returns:
quantity of documents saved
"""
# TODO: support multiple indexes
documents = self.get_tickets(workdir)
count = 0
for doc in documents:
doc.save(id_field)
count += 1
return count
def save(self, id_field):
"""
Uses update if not exist insert rather update the objetc
"""
if self.document:
return database.SpekDumpDAO().save(self.document, id_field)
else:
raise TypeError("save() require that instance has the document "
"attribute loaded.")
| {
"content_hash": "6af9f850b0ddb7c18350cafa7dc2c33e",
"timestamp": "",
"source": "github",
"line_count": 150,
"max_line_length": 82,
"avg_line_length": 32.126666666666665,
"alnum_prop": 0.5470014525835235,
"repo_name": "horacioibrahim/spekdump",
"id": "8fd2dfb06f8835957684cea45ef3cfbe190d9349",
"size": "5099",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spekdump/spekdumps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22145"
}
],
"symlink_target": ""
} |
import sys
from vulkan import *
from PySide2 import (QtGui, QtCore)
validationLayers = [
'VK_LAYER_LUNARG_standard_validation'
]
deviceExtensions = [
VK_KHR_SWAPCHAIN_EXTENSION_NAME
]
enableValidationLayers = True
class InstanceProcAddr(object):
T = None
def __init__(self, func):
self.__func = func
def __call__(self, *args, **kwargs):
funcName = self.__func.__name__
func = InstanceProcAddr.procfunc(funcName)
if func:
return func(*args, **kwargs)
else:
return VK_ERROR_EXTENSION_NOT_PRESENT
@staticmethod
def procfunc(funcName):
return vkGetInstanceProcAddr(InstanceProcAddr.T, funcName)
class DeviceProcAddr(InstanceProcAddr):
@staticmethod
def procfunc(funcName):
return vkGetDeviceProcAddr(InstanceProcAddr.T, funcName)
# instance ext functions
@InstanceProcAddr
def vkCreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator):
pass
@InstanceProcAddr
def vkDestroyDebugReportCallbackEXT(instance, pCreateInfo, pAllocator):
pass
@InstanceProcAddr
def vkCreateWin32SurfaceKHR(instance, pCreateInfo, pAllocator):
pass
@InstanceProcAddr
def vkDestroySurfaceKHR(instance, surface, pAllocator):
pass
@InstanceProcAddr
def vkGetPhysicalDeviceSurfaceSupportKHR(physicalDevice, queueFamilyIndex, surface):
pass
@InstanceProcAddr
def vkGetPhysicalDeviceSurfaceCapabilitiesKHR(physicalDevice, surface):
pass
@InstanceProcAddr
def vkGetPhysicalDeviceSurfaceFormatsKHR(physicalDevice, surface):
pass
@InstanceProcAddr
def vkGetPhysicalDeviceSurfacePresentModesKHR(physicalDevice, surface):
pass
# device ext functions
@DeviceProcAddr
def vkCreateSwapchainKHR(device, pCreateInfo, pAllocator):
pass
@DeviceProcAddr
def vkDestroySwapchainKHR(device, swapchain, pAllocator):
pass
@DeviceProcAddr
def vkGetSwapchainImagesKHR(device, swapchain):
pass
def debugCallback(*args):
print('DEBUG: {} {}'.format(args[5], args[6]))
return 0
class Win32misc(object):
@staticmethod
def getInstance(hWnd):
from cffi import FFI as _FFI
_ffi = _FFI()
_ffi.cdef('long __stdcall GetWindowLongA(void* hWnd, int nIndex);')
_lib = _ffi.dlopen('User32.dll')
return _lib.GetWindowLongA(_ffi.cast('void*', hWnd), -6) # GWL_HINSTANCE
class QueueFamilyIndices(object):
def __init__(self):
self.graphicsFamily = -1
self.presentFamily = -1
@property
def isComplete(self):
return self.graphicsFamily >= 0 and self.presentFamily >= 0
class SwapChainSupportDetails(object):
def __init__(self):
self.capabilities = None
self.formats = None
self.presentModes = None
class HelloTriangleApplication(QtGui.QWindow):
def __init__(self):
super(HelloTriangleApplication, self).__init__()
self.setWidth(1280)
self.setHeight(720)
self.setTitle("Vulkan Python - PySide2")
# self.setSurfaceType(self.OpenGLSurface)
self.__instance = None
self.__callbcak = None
self.__surface = None
self.__physicalDevice = None
self.__device = None
self.__graphicQueue = None
self.__presentQueue = None
self.__swapChain = None
self.__swapChainImages = []
self.__swapChainImageFormat = None
self.__swapChainExtent = None
self.__swapChainImageViews = []
self.__renderpass = None
self.__pipelineLayout = None
self.__indices = QueueFamilyIndices()
self.initVulkan()
def __del__(self):
if self.__renderpass:
vkDestroyRenderPass(self.__device, self.__renderpass, None)
if self.__pipelineLayout:
vkDestroyPipelineLayout(self.__device, self.__pipelineLayout, None)
if self.__swapChainImageViews:
[vkDestroyImageView(self.__device, imv, None) for imv in self.__swapChainImageViews]
if self.__swapChain:
vkDestroySwapchainKHR(self.__device, self.__swapChain, None)
if self.__device:
vkDestroyDevice(self.__device, None)
if self.__callbcak:
vkDestroyDebugReportCallbackEXT(self.__instance, self.__callbcak, None)
if self.__surface:
vkDestroySurfaceKHR(self.__instance, self.__surface, None)
if self.__instance:
vkDestroyInstance(self.__instance, None)
print('instance destroyed')
def initVulkan(self):
self.__cretaeInstance()
self.__setupDebugCallback()
self.__createSurface()
self.__pickPhysicalDevice()
self.__createLogicalDevice()
self.__createSwapChain()
self.__createImageViews()
self.__createRenderPass()
self.__createGraphicsPipeline()
def __cretaeInstance(self):
if enableValidationLayers and not self.__checkValidationLayerSupport():
raise Exception("validation layers requested, but not available!")
appInfo = VkApplicationInfo(
# sType=VK_STRUCTURE_TYPE_APPLICATION_INFO,
pApplicationName='Python VK',
applicationVersion=VK_MAKE_VERSION(1, 0, 0),
pEngineName='pyvulkan',
engineVersion=VK_MAKE_VERSION(1, 0, 0),
apiVersion=VK_API_VERSION
)
extenstions = self.__getRequiredExtensions()
if enableValidationLayers:
instanceInfo = VkInstanceCreateInfo(
pApplicationInfo=appInfo,
# enabledLayerCount=len(validationLayers),
ppEnabledLayerNames=validationLayers,
# enabledExtensionCount=len(extenstions),
ppEnabledExtensionNames=extenstions
)
else:
instanceInfo = VkInstanceCreateInfo(
pApplicationInfo=appInfo,
enabledLayerCount=0,
# enabledExtensionCount=len(extenstions),
ppEnabledExtensionNames=extenstions
)
self.__instance = vkCreateInstance(instanceInfo, None)
InstanceProcAddr.T = self.__instance
def __setupDebugCallback(self):
if not enableValidationLayers:
return
createInfo = VkDebugReportCallbackCreateInfoEXT(
flags=VK_DEBUG_REPORT_WARNING_BIT_EXT | VK_DEBUG_REPORT_ERROR_BIT_EXT,
pfnCallback=debugCallback
)
self.__callbcak = vkCreateDebugReportCallbackEXT(self.__instance, createInfo, None)
def __createSurface(self):
if sys.platform == 'win32':
hwnd = self.winId()
hinstance = Win32misc.getInstance(hwnd)
createInfo = VkWin32SurfaceCreateInfoKHR(
hinstance=hinstance,
hwnd=hwnd
)
self.__surface = vkCreateWin32SurfaceKHR(self.__instance, createInfo, None)
# elif sys.platform == 'linux':
# pass
def __pickPhysicalDevice(self):
physicalDevices = vkEnumeratePhysicalDevices(self.__instance)
for device in physicalDevices:
if self.__isDeviceSuitable(device):
self.__physicalDevice = device
break
assert self.__physicalDevice != None
def __createLogicalDevice(self):
self.__indices = self.__findQueueFamilies(self.__physicalDevice)
uniqueQueueFamilies = {}.fromkeys([self.__indices.graphicsFamily, self.__indices.presentFamily])
queueCreateInfos = []
for i in uniqueQueueFamilies:
queueCreateInfo = VkDeviceQueueCreateInfo(
queueFamilyIndex=i,
queueCount=1,
pQueuePriorities=[1.0]
)
queueCreateInfos.append(queueCreateInfo)
deviceFeatures = VkPhysicalDeviceFeatures()
if enableValidationLayers:
createInfo = VkDeviceCreateInfo(
# queueCreateInfoCount=len(queueCreateInfos),
pQueueCreateInfos=queueCreateInfos,
# enabledExtensionCount=len(deviceExtensions),
ppEnabledExtensionNames=deviceExtensions,
# enabledLayerCount=len(validationLayers),
ppEnabledLayerNames=validationLayers,
pEnabledFeatures=deviceFeatures
)
else:
createInfo = VkDeviceCreateInfo(
queueCreateInfoCount=1,
pQueueCreateInfos=queueCreateInfo,
# enabledExtensionCount=len(deviceExtensions),
ppEnabledExtensionNames=deviceExtensions,
enabledLayerCount=0,
pEnabledFeatures=deviceFeatures
)
self.__device = vkCreateDevice(self.__physicalDevice, createInfo, None)
DeviceProcAddr.T = self.__device
self.__graphicQueue = vkGetDeviceQueue(self.__device, self.__indices.graphicsFamily, 0)
self.__presentQueue = vkGetDeviceQueue(self.__device, self.__indices.presentFamily, 0)
def __createSwapChain(self):
swapChainSupport = self.__querySwapChainSupport(self.__physicalDevice)
surfaceFormat = self.__chooseSwapSurfaceFormat(swapChainSupport.formats)
presentMode = self.__chooseSwapPresentMode(swapChainSupport.presentModes)
extent = self.__chooseSwapExtent(swapChainSupport.capabilities)
imageCount = swapChainSupport.capabilities.minImageCount + 1
if swapChainSupport.capabilities.maxImageCount > 0 and imageCount > swapChainSupport.capabilities.maxImageCount:
imageCount = swapChainSupport.capabilities.maxImageCount
indices = self.__findQueueFamilies(self.__physicalDevice)
queueFamily = {}.fromkeys([indices.graphicsFamily, indices.presentFamily])
queueFamilies = list(queueFamily.keys())
if len(queueFamilies) > 1:
createInfo = VkSwapchainCreateInfoKHR(
surface=self.__surface,
minImageCount=imageCount,
imageFormat=surfaceFormat.format,
imageColorSpace=surfaceFormat.colorSpace,
imageExtent=extent,
imageArrayLayers=1,
imageUsage=VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
# queueFamilyIndexCount=len(queueFamilies),
pQueueFamilyIndices=queueFamilies,
imageSharingMode=VK_SHARING_MODE_CONCURRENT,
preTransform=swapChainSupport.capabilities.currentTransform,
compositeAlpha=VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR,
presentMode=presentMode,
clipped=True
)
else:
createInfo = VkSwapchainCreateInfoKHR(
surface=self.__surface,
minImageCount=imageCount,
imageFormat=surfaceFormat.format,
imageColorSpace=surfaceFormat.colorSpace,
imageExtent=extent,
imageArrayLayers=1,
imageUsage=VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
# queueFamilyIndexCount=len(queueFamilies),
pQueueFamilyIndices=queueFamilies,
imageSharingMode=VK_SHARING_MODE_EXCLUSIVE,
preTransform=swapChainSupport.capabilities.currentTransform,
compositeAlpha=VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR,
presentMode=presentMode,
clipped=True
)
self.__swapChain = vkCreateSwapchainKHR(self.__device, createInfo, None)
assert self.__swapChain != None
self.__swapChainImages = vkGetSwapchainImagesKHR(self.__device, self.__swapChain)
self.__swapChainImageFormat = surfaceFormat.format
self.__swapChainExtent = extent
def __createImageViews(self):
self.__swapChainImageViews = []
for i, image in enumerate(self.__swapChainImages):
ssr = VkImageSubresourceRange(
VK_IMAGE_ASPECT_COLOR_BIT,
0, 1, 0, 1
)
createInfo = VkImageViewCreateInfo(
image=image,
viewType=VK_IMAGE_VIEW_TYPE_2D,
format=self.__swapChainImageFormat,
components=[VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY,
VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY],
subresourceRange=ssr
)
self.__swapChainImageViews.append(vkCreateImageView(self.__device, createInfo, None))
def __createRenderPass(self):
colorAttachment = VkAttachmentDescription(
format=self.__swapChainImageFormat,
samples=VK_SAMPLE_COUNT_1_BIT,
loadOp=VK_ATTACHMENT_LOAD_OP_CLEAR,
storeOp=VK_ATTACHMENT_STORE_OP_STORE,
stencilLoadOp=VK_ATTACHMENT_LOAD_OP_DONT_CARE,
stencilStoreOp=VK_ATTACHMENT_STORE_OP_DONT_CARE,
initialLayout=VK_IMAGE_LAYOUT_UNDEFINED,
finalLayout=VK_IMAGE_LAYOUT_PRESENT_SRC_KHR
)
colorAttachmentRef = VkAttachmentReference(
0,
VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL
)
subpass = VkSubpassDescription(
pipelineBindPoint=VK_PIPELINE_BIND_POINT_GRAPHICS,
pColorAttachments=[colorAttachmentRef]
)
renderPassInfo = VkRenderPassCreateInfo(
pAttachments=[colorAttachment],
pSubpasses=[subpass]
)
self.__renderpass = vkCreateRenderPass(self.__device, renderPassInfo, None)
def __createGraphicsPipeline(self):
vertexShaderMode = self.__createShaderModule('shader/vert.spv')
fragmentShaderMode = self.__createShaderModule('shader/frag.spv')
vertexShaderStageInfo = VkPipelineShaderStageCreateInfo(
stage=VK_SHADER_STAGE_VERTEX_BIT,
module=vertexShaderMode,
pName='main'
)
fragmentShaderStageInfo = VkPipelineShaderStageCreateInfo(
stage=VK_SHADER_STAGE_FRAGMENT_BIT,
module=fragmentShaderMode,
pName='main'
)
shaderStageInfos = [vertexShaderStageInfo, fragmentShaderStageInfo]
vertexInputInfo = VkPipelineVertexInputStateCreateInfo(
vertexBindingDescriptionCount=0,
vertexAttributeDescriptionCount=0
)
inputAssembly = VkPipelineInputAssemblyStateCreateInfo(
topology=VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST,
primitiveRestartEnable=False
)
viewport = VkViewport(0.0, 0.0,
float(self.__swapChainExtent.width),
float(self.__swapChainExtent.height),
0.0, 1.0)
scissor = VkRect2D([0, 0], self.__swapChainExtent)
viewportStage = VkPipelineViewportStateCreateInfo(
viewportCount=1,
pViewports=viewport,
scissorCount=1,
pScissors=scissor
)
rasterizer = VkPipelineRasterizationStateCreateInfo(
depthClampEnable=False,
rasterizerDiscardEnable=False,
polygonMode=VK_POLYGON_MODE_FILL,
lineWidth=1.0,
cullMode=VK_CULL_MODE_BACK_BIT,
frontFace=VK_FRONT_FACE_CLOCKWISE,
depthBiasEnable=False
)
multisampling = VkPipelineMultisampleStateCreateInfo(
sampleShadingEnable=False,
rasterizationSamples=VK_SAMPLE_COUNT_1_BIT
)
colorBlendAttachment = VkPipelineColorBlendAttachmentState(
colorWriteMask=VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT,
blendEnable=False
)
colorBending = VkPipelineColorBlendStateCreateInfo(
logicOpEnable=False,
logicOp=VK_LOGIC_OP_COPY,
attachmentCount=1,
pAttachments=colorBlendAttachment,
blendConstants=[0.0, 0.0, 0.0, 0.0]
)
pipelineLayoutInfo = VkPipelineLayoutCreateInfo(
setLayoutCount=0,
pushConstantRangeCount=0
)
self.__pipelineLayout = vkCreatePipelineLayout(self.__device, pipelineLayoutInfo, None)
vkDestroyShaderModule(self.__device, vertexShaderMode, None)
vkDestroyShaderModule(self.__device, fragmentShaderMode, None)
def __createShaderModule(self, shaderFile):
with open(shaderFile, 'rb') as sf:
code = sf.read()
createInfo = VkShaderModuleCreateInfo(
codeSize=len(code),
pCode=code
)
return vkCreateShaderModule(self.__device, createInfo, None)
def __chooseSwapSurfaceFormat(self, formats):
if len(formats) == 1 and formats[0].format == VK_FORMAT_UNDEFINED:
return [VK_FORMAT_B8G8R8_UNORM, VK_COLOR_SPACE_SRGB_NONLINEAR_KHR]
for i in formats:
if i.format == VK_FORMAT_B8G8R8_UNORM and i.colorSpace == VK_COLOR_SPACE_SRGB_NONLINEAR_KHR:
return i
return formats[0]
def __chooseSwapPresentMode(self, presentModes):
bestMode = VK_PRESENT_MODE_FIFO_KHR
for i in presentModes:
if i == VK_PRESENT_MODE_FIFO_KHR:
return i
elif i == VK_PRESENT_MODE_MAILBOX_KHR:
return i
elif i == VK_PRESENT_MODE_IMMEDIATE_KHR:
return i
return bestMode
def __chooseSwapExtent(self, capabilities):
width = max(capabilities.minImageExtent.width, min(capabilities.maxImageExtent.width, self.width()))
height = max(capabilities.minImageExtent.height, min(capabilities.maxImageExtent.height, self.height()))
return VkExtent2D(width, height)
def __querySwapChainSupport(self, device):
detail = SwapChainSupportDetails()
detail.capabilities = vkGetPhysicalDeviceSurfaceCapabilitiesKHR(device, self.__surface)
detail.formats = vkGetPhysicalDeviceSurfaceFormatsKHR(device, self.__surface)
detail.presentModes = vkGetPhysicalDeviceSurfacePresentModesKHR(device, self.__surface)
return detail
def __isDeviceSuitable(self, device):
indices = self.__findQueueFamilies(device)
extensionsSupported = self.__checkDeviceExtensionSupport(device)
swapChainAdequate = False
if extensionsSupported:
swapChainSupport = self.__querySwapChainSupport(device)
swapChainAdequate = (swapChainSupport.formats is not None) and (swapChainSupport.presentModes is not None)
return indices.isComplete and extensionsSupported and swapChainAdequate
def __checkDeviceExtensionSupport(self, device):
availableExtensions = vkEnumerateDeviceExtensionProperties(device, None)
aen = [i.extensionName for i in availableExtensions]
for i in deviceExtensions:
if i not in aen:
return False
return True
def __findQueueFamilies(self, device):
indices = QueueFamilyIndices()
familyProperties = vkGetPhysicalDeviceQueueFamilyProperties(device)
for i, prop in enumerate(familyProperties):
if prop.queueCount > 0 and prop.queueFlags & VK_QUEUE_GRAPHICS_BIT:
indices.graphicsFamily = i
presentSupport = vkGetPhysicalDeviceSurfaceSupportKHR(device, i, self.__surface)
if prop.queueCount > 0 and presentSupport:
indices.presentFamily = i
if indices.isComplete:
break
return indices
def __getRequiredExtensions(self):
extenstions = [e.extensionName for e in vkEnumerateInstanceExtensionProperties(None)]
if enableValidationLayers:
extenstions.append(VK_EXT_DEBUG_REPORT_EXTENSION_NAME)
return extenstions
def __checkValidationLayerSupport(self):
availableLayers = vkEnumerateInstanceLayerProperties()
for layer in validationLayers:
layerfound = False
for layerProp in availableLayers:
if layer == layerProp.layerName:
layerfound = True
break
return layerfound
return False
if __name__ == '__main__':
import sys
app = QtGui.QGuiApplication(sys.argv)
win = HelloTriangleApplication()
win.show()
def clenaup():
global win
del win
app.aboutToQuit.connect(clenaup)
sys.exit(app.exec_())
| {
"content_hash": "8da123089ce7466083c049a2442ffb72",
"timestamp": "",
"source": "github",
"line_count": 623,
"max_line_length": 133,
"avg_line_length": 32.93258426966292,
"alnum_prop": 0.6342545206414193,
"repo_name": "mackst/vulkan-tutorial",
"id": "00983060c6542186304d58608fb931bd3e0e4070",
"size": "20542",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "11_render_passes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "GLSL",
"bytes": "3586"
},
{
"name": "Python",
"bytes": "804643"
}
],
"symlink_target": ""
} |
from django.conf.urls import url
from .views import TrackingView
urlpatterns = [
url(r'^$', TrackingView.as_view(), name='tracker'),
]
| {
"content_hash": "622a87724de96774e191811a739c1fd7",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 55,
"avg_line_length": 23.333333333333332,
"alnum_prop": 0.7,
"repo_name": "alex20465/open-scriptorium",
"id": "4ee87fdee137a3fff9a174dbdd0ca7ea7069461d",
"size": "167",
"binary": false,
"copies": "1",
"ref": "refs/heads/prototype",
"path": "apps/interaction_tracker/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "100"
},
{
"name": "HTML",
"bytes": "35574"
},
{
"name": "JavaScript",
"bytes": "6398"
},
{
"name": "Python",
"bytes": "146203"
},
{
"name": "Shell",
"bytes": "850"
}
],
"symlink_target": ""
} |
import random
import unittest
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.seq = range(10)
def test_shuffle(self):
"""
Make sure shuffled sequence does not lose elements.
"""
random.shuffle(self.seq)
self.seq.sort()
self.assertEqual(self.seq, range(10))
def test_one(self):
self.assertEqual(1, 1)
def test_two(self):
self.assertNotEqual(1, 2)
if __name__ == '__main__':
#unittest.main()
suite = unittest.TestLoader().loadTestsFromTestCase(TestSequenceFunctions)
unittest.TextTestRunner(verbosity=2).run(suite)
| {
"content_hash": "f166cc1bd9f2779b1662b57972f67865",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 78,
"avg_line_length": 23.77777777777778,
"alnum_prop": 0.6339563862928349,
"repo_name": "alexisbellido/programming-in-python",
"id": "8914b5991a29780c75a650019cfc45ea3f58ca39",
"size": "642",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "basic_test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "13070"
}
],
"symlink_target": ""
} |
"""3-tier example."""
import patterns.threetier as tiers
class DataTier(tiers.DataTier):
"""Data tier."""
def __init__(self, dictionary):
"""Initialize with dictionary."""
self.dictionary = dictionary
def store(self, key, data):
"""Store some data."""
self.dictionary[key] = data
def retrieve(self, key):
"""Get some data."""
return self.dictionary[key]
class LogicTier(tiers.LogicTier):
"""Logic Tier."""
def process_and_load(self, *args, **kwargs):
"""Load."""
return super().process_and_load(*args, **kwargs)
def process_and_store(self, *args, **kwargs):
"""Store."""
return super().process_and_store(*args, **kwargs)
class PresentationTier(tiers.PresentationTier):
"""Presentation tier."""
def __init__(self):
"""Initialize the tier."""
self.logic_tier = LogicTier(DataTier({}))
def interact(self):
"""Interact with user."""
super().interact()
pres_tier = PresentationTier()
while True:
pres_tier.interact()
| {
"content_hash": "fbffbfb2c5cbc6706621de883ddb84aa",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 57,
"avg_line_length": 20.923076923076923,
"alnum_prop": 0.5873161764705882,
"repo_name": "jmanuel1/patterns",
"id": "237bd5a335c6fd5a2c7bb74b3a914d555d6914d3",
"size": "1088",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/threetier.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10190"
}
],
"symlink_target": ""
} |
import os
import random
import base64
from errno import ECONNRESET
from socket import error as SocketError
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse # NOQA
from circuits.web.headers import Headers
from circuits.protocols.http import HTTP
from circuits.core.handlers import handler
from circuits.net.sockets import TCPClient
from circuits.web.client import NotConnected
from circuits.core.components import BaseComponent
from circuits.net.events import connect, write, close
from circuits.protocols.websocket import WebSocketCodec
class WebSocketClient(BaseComponent):
"""
An RFC 6455 compliant WebSocket client component. Upon receiving a
:class:`circuits.web.client.Connect` event, the component tries to
establish the connection to the server in a two stage process. First, a
:class:`circuits.net.events.connect` event is sent to a child
:class:`~.sockets.TCPClient`. When the TCP connection has been established,
the HTTP request for opening the WebSocket is sent to the server.
A failure in this setup process is signaled by raising an
:class:`~.client.NotConnected` exception.
When the server accepts the request, the WebSocket connection is
established and can be used very much like an ordinary socket
by handling :class:`~.net.events.read` events on and sending
:class:`~.net.events.write` events to the channel
specified as the ``wschannel`` parameter of the constructor. Firing
a :class:`~.net.events.close` event on that channel closes the
connection in an orderly fashion (i.e. as specified by the
WebSocket protocol).
"""
channel = "wsclient"
def __init__(self, url, channel=channel, wschannel="ws", headers=None):
"""
:param url: the URL to connect to.
:param channel: the channel used by this component
:param wschannel: the channel used for the actual WebSocket
communication (read, write, close events)
:param headers: additional headers to be passed with the
WebSocket setup HTTP request
"""
super(WebSocketClient, self).__init__(channel=channel)
self._url = url
self._headers = headers or {}
self._response = None
self._pending = 0
self._wschannel = wschannel
self._transport = TCPClient(channel=self.channel).register(self)
HTTP(channel=self.channel).register(self._transport)
@handler("ready")
def _on_ready(self, event, *args, **kwargs):
p = urlparse(self._url)
if not p.hostname:
raise ValueError("URL must be absolute")
self._host = p.hostname
if p.scheme == "ws":
self._secure = False
self._port = p.port or 80
elif p.scheme == "wss":
self._secure = True
self._port = p.port or 443
else:
raise NotConnected()
self._resource = p.path or "/"
if p.query:
self._resource += "?" + p.query
self.fire(connect(self._host, self._port, self._secure),
self._transport)
@handler("connected")
def _on_connected(self, host, port):
headers = Headers([(k, v) for k, v in self._headers.items()])
# Clients MUST include Host header in HTTP/1.1 requests (RFC 2616)
if not "Host" in headers:
headers["Host"] = self._host \
+ (":" + str(self._port)) if self._port else ""
headers["Upgrade"] = "websocket"
headers["Connection"] = "Upgrade"
try:
sec_key = os.urandom(16)
except NotImplementedError:
sec_key = "".join([chr(random.randint(0, 255)) for i in range(16)])
headers[
"Sec-WebSocket-Key"] = base64.b64encode(sec_key).decode("latin1")
headers["Sec-WebSocket-Version"] = "13"
command = "GET %s HTTP/1.1" % self._resource
message = "%s\r\n%s" % (command, headers)
self._pending += 1
self.fire(write(message.encode('utf-8')), self._transport)
return True
@handler("response")
def _on_response(self, response):
self._response = response
self._pending -= 1
if response.headers.get("Connection") == "Close" \
or response.status != 101:
self.fire(close(), self._transport)
raise NotConnected()
WebSocketCodec(
data=response.body.read(), channel=self._wschannel).register(self)
@handler("error", priority=10)
def _on_error(self, event, error, *args, **kwargs):
# For HTTP 1.1 we leave the connection open. If the peer closes
# it after some time and we have no pending request, that's OK.
if isinstance(error, SocketError) and error.args[0] == ECONNRESET \
and self._pending == 0:
event.stop()
def close(self):
if self._transport is not None:
self._transport.close()
@property
def connected(self):
return getattr(self._transport, "connected", False) \
if hasattr(self, "_transport") else False
| {
"content_hash": "a8662c469b0d248e1e82f8e3f8575397",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 79,
"avg_line_length": 38.45522388059702,
"alnum_prop": 0.6312827479138367,
"repo_name": "nizox/circuits",
"id": "2078f398656bcdba83ea5c3134dfe3d342afdd32",
"size": "5153",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "circuits/web/websockets/client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "9"
},
{
"name": "Python",
"bytes": "646627"
}
],
"symlink_target": ""
} |
from sklearn.metrics import coverage_error, accuracy_score
from sklearn.metrics import label_ranking_loss
from sklearn.metrics import label_ranking_average_precision_score
from sklearn.metrics import hamming_loss
from sklearn import metrics
from collections import Counter
import math
import numpy as np
def patk(predictions, labels):
pak = np.zeros(3)
K = np.array([1, 3, 5])
for i in range(predictions.shape[0]):
pos = np.argsort(-predictions[i, :])
y = labels[i, :]
y = y[pos]
for j in range(3):
k = K[j]
pak[j] += (np.sum(y[:k]) / k)
pak = pak / predictions.shape[0]
return pak
def cm_precision_recall(prediction, truth):
"""Evaluate confusion matrix, precision and recall for given set of labels and predictions
Args
prediction: a vector with predictions
truth: a vector with class labels
Returns:
cm: confusion matrix
precision: precision score
recall: recall score"""
confusion_matrix = Counter()
positives = [1]
binary_truth = [x in positives for x in truth]
binary_prediction = [x in positives for x in prediction]
for t, p in zip(binary_truth, binary_prediction):
confusion_matrix[t, p] += 1
cm = np.array([confusion_matrix[True, True], confusion_matrix[False, False], confusion_matrix[False, True],
confusion_matrix[True, False]])
# print cm
precision = (cm[0] / (cm[0] + cm[2] + 0.000001))
recall = (cm[0] / (cm[0] + cm[3] + 0.000001))
return cm, precision, recall
def bipartition_scores(labels, predictions):
""" Computes bipartitation metrics for a given multilabel predictions and labels
Args:
logits: Logits tensor, float - [batch_size, NUM_LABELS].
labels: Labels tensor, int32 - [batch_size, NUM_LABELS].
Returns:
bipartiation: an array with micro_precision, micro_recall, micro_f1,macro_precision, macro_recall, macro_f1"""
sum_cm = np.zeros((4))
macro_precision = 0
macro_recall = 0
for i in range(labels.shape[1]):
truth = labels[:, i]
prediction = predictions[:, i]
cm, precision, recall = cm_precision_recall(prediction, truth)
sum_cm += cm
macro_precision += precision
macro_recall += recall
macro_precision = macro_precision / labels.shape[1]
macro_recall = macro_recall / labels.shape[1]
# print(macro_recall, macro_precision)
macro_f1 = 2 * (macro_precision) * (macro_recall) / (macro_precision + macro_recall + 0.000001)
micro_precision = sum_cm[0] / (sum_cm[0] + sum_cm[2] + 0.000001)
micro_recall = sum_cm[0] / (sum_cm[0] + sum_cm[3] + 0.000001)
micro_f1 = 2 * (micro_precision) * (micro_recall) / (micro_precision + micro_recall + 0.000001)
bipartiation = np.asarray([micro_precision, micro_recall, micro_f1, macro_precision, macro_recall, macro_f1])
return bipartiation
def BAE(labels, predictions):
abs_error = (1 - predictions) * labels # consider error only for true classes
freq = np.sum(labels, axis=0) + 1e-15 # count the frequency of each label
num_labels = np.shape(labels)[1]
bae = np.sum(np.sum(abs_error, axis=0) / freq) / num_labels
# print(bae, np.sum(abs_error, axis=0), freq, num_labels)
return bae
def evaluate(predictions, labels, threshold):
"""Evaluate the quality of the logits at predicting the label.
Args:
logits: Logits tensor, float - [batch_size, NUM_LABELS].
labels: Labels tensor, int32 - [batch_size], with values in the
range [0, NUM_LABELS).
Returns:
A scalar int32 tensor with the number of examples (out of batch_size)
that were predicted correctly.
"""
predictions, labels = np.array(predictions), np.array(labels)
shape = predictions.shape
# if dimensions equals 3, reshape it to 2 dimensions
if len(shape) > 2:
predictions = np.reshape(predictions, (shape[0] * shape[1], shape[2]))
labels = np.reshape(labels, (shape[0] * shape[1], shape[2]))
assert predictions.shape == labels.shape, "Shapes: %s, %s" % (predictions.shape, labels.shape,)
accuracy = accuracy_score(np.argmax(labels, axis=1), np.argmax(predictions, axis=1))
cross_entropy = -np.mean(labels * np.log(predictions + .0000000001))
if threshold:
for i in range(predictions.shape[0]):
predictions[i, :][predictions[i, :] >= threshold] = 1
predictions[i, :][predictions[i, :] < threshold] = 0
else:
# TOP K
for i in range(predictions.shape[0]):
k = np.sum(labels[i])
pos = predictions[i].argsort()
predictions[i].fill(0)
predictions[i][pos[-int(k):]] = 1
bae = BAE(labels, predictions) # calculate BAE with probabilities, not with binarized predictions
# labels = labels.astype(int)
coverage = coverage_error(labels, predictions)
# print(labels[:10], predictions[:10])
average_precision = label_ranking_average_precision_score(labels, predictions)
ranking_loss = label_ranking_loss(labels, predictions)
pak = patk(predictions, labels)
ham_loss = hamming_loss(labels, predictions)
micro_precision, micro_recall, micro_f1, macro_precision, macro_recall, macro_f1 = bipartition_scores(labels,
predictions)
performance = np.asarray(
[coverage, average_precision, ranking_loss, micro_f1, macro_f1, micro_precision, micro_recall, macro_precision,
macro_recall, pak[0], pak[1], pak[2], ham_loss, bae, cross_entropy, accuracy])
# print ("Performance: " , performance)
return performance
| {
"content_hash": "50472ba03662df3eaf8a76987400d065",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 120,
"avg_line_length": 40.485915492957744,
"alnum_prop": 0.6376761175856671,
"repo_name": "yashchandak/GNN",
"id": "9a33d4b7a4f55c3436369caac5d2c27bad30367a",
"size": "5749",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "Sample_Run/path_attn_Q/Eval_Calculate_Performance.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1036599"
},
{
"name": "Shell",
"bytes": "14131"
}
],
"symlink_target": ""
} |
"""MQTT gateway module."""
import logging
import time
import uuid
import json
import asyncio
from tornado.ioloop import PeriodicCallback
from gmqtt import Client as MQTTClient
from pyaiot.gateway.common import Node, GatewayBase
logger = logging.getLogger("pyaiot.gw.mqtt")
MQTT_HOST = 'localhost'
MQTT_PORT = 1886
MAX_TIME = 120
class MQTTGateway(GatewayBase):
"""Gateway application for MQTT nodes on a network."""
PROTOCOL = "MQTT"
def __init__(self, keys, options):
self.host = options.mqtt_host
self.port = options.mqtt_port
self.max_time = options.max_time
self.options = options
self.node_mapping = {} # map node id to its uuid (TODO: FIXME)
super().__init__(keys, options)
# Connect to the MQTT broker
self.mqtt_client = MQTTClient("client-id")
self.mqtt_client.on_connect = self.on_connect
self.mqtt_client.on_message = self.on_message
self.mqtt_client.on_disconnect = self.on_disconnect
self.mqtt_client.on_subscribe = self.on_subscribe
asyncio.get_event_loop().create_task(self.start())
# Start the node cleanup task
PeriodicCallback(self.check_dead_nodes, 1000).start()
PeriodicCallback(self.request_alive, 30000).start()
logger.info('MQTT gateway application started')
async def start(self):
await self.mqtt_client.connect('{}:{}'.format(self.host, self.port))
def on_connect(self, client, flags, rc, properties):
self.mqtt_client.subscribe('node/check', 1)
def on_message(self, client, topic, payload, qos, properties):
try:
data = json.loads(payload)
except Exception:
# Skip data if not valid
return
logger.debug("Received message from node: {} => {}"
.format(topic, data))
if topic.endswith("/check"):
asyncio.get_event_loop().create_task(
self.handle_node_check(data))
elif topic.endswith("/resources"):
asyncio.get_event_loop().create_task(
self.handle_node_resources(topic, data))
else:
self.handle_node_update(topic, data)
def on_disconnect(self, client, packet, exc=None):
print('Disconnected')
def on_subscribe(self, client, mid, qos, properties):
print('SUBSCRIBED')
def close(self):
loop = asyncio.get_event_loop()
loop.run_until_complete(self._disconnect())
async def _disconnect(self):
for node in self.nodes:
await self._disconnect_from_node(node)
await self.mqtt_client.disconnect()
async def discover_node(self, node):
discover_topic = 'gateway/{}/discover'.format(node.resources['id'])
await self.mqtt_client.publish(discover_topic, "resources", qos=1)
logger.debug("Published '{}' to topic: {}"
.format("resources", discover_topic))
def update_node_resource(self, node, endpoint, payload):
node_id = node.resources['id']
asyncio.get_event_loop().create_task(self.mqtt_client.publish(
'gateway/{}/{}/set'.format(node_id, endpoint), payload, qos=1))
async def handle_node_check(self, data):
"""Handle alive message received from coap node."""
node_id = data['id']
if node_id not in self.node_mapping:
node = Node(str(uuid.uuid4()), id=node_id)
self.node_mapping.update({node_id: node.uid})
resources_topic = 'node/{}/resources'.format(node_id)
await self.mqtt_client.subscribe(resources_topic, 1)
logger.debug("Subscribed to topic: {}".format(resources_topic))
self.add_node(node)
else:
# The node simply sent a check message to notify that it's still
# online.
node = self.get_node(self.node_mapping[node_id])
node.update_last_seen()
async def handle_node_resources(self, topic, data):
"""Process resources published by a node."""
node_id = topic.split("/")[1]
if node_id not in self.node_mapping:
return
for resource in data:
await self.mqtt_client.subscribe(
'node/{}/{}'.format(node_id, resource), 1
)
await self.mqtt_client.publish('gateway/{}/discover'
.format(node_id), "values", qos=1)
def handle_node_update(self, topic_name, data):
"""Handle CoAP post message sent from coap node."""
_, node_id, resource = topic_name.split("/")
value = data['value']
if self.node_mapping[node_id] not in self.nodes:
return
node = self.get_node(self.node_mapping[node_id])
self.forward_data_from_node(node, resource, value)
def request_alive(self):
"""Publish a request to trigger a check publish from nodes."""
logger.debug("Request check message from all MQTT nodes")
asyncio.get_event_loop().create_task(
self.mqtt_client.publish('gateway/check', '', qos=1))
def check_dead_nodes(self):
"""Check and remove nodes that are not alive anymore."""
to_remove = [node for node in self.nodes.values()
if int(time.time()) > node.last_seen + self.max_time]
for node in to_remove:
logger.info("Removing inactive node {}".format(node.uid))
asyncio.get_event_loop().create_task(
self._disconnect_from_node(node))
self.node_mapping.pop(node.resources['id'])
self.remove_node(node)
async def _disconnect_from_node(self, node):
node_id = node.resources['id']
await self.mqtt_client.unsubscribe(
['node/{}/resource'.format(node_id)])
for resource in node.resources:
await self.mqtt_client.unsubscribe(
['node/{}/{}'.format(node_id, resource)])
| {
"content_hash": "517003719aa6036baaf04e3599d83b8c",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 76,
"avg_line_length": 35.975903614457835,
"alnum_prop": 0.6026456798392499,
"repo_name": "pyaiot/pyaiot",
"id": "4631cd250a366aa54d6aa5b3b7af86c9594edd13",
"size": "7515",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyaiot/gateway/mqtt/gateway.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "6526"
},
{
"name": "Dockerfile",
"bytes": "959"
},
{
"name": "HTML",
"bytes": "49483"
},
{
"name": "JavaScript",
"bytes": "39285"
},
{
"name": "Makefile",
"bytes": "3372"
},
{
"name": "Python",
"bytes": "132843"
},
{
"name": "Shell",
"bytes": "5735"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from PIL import VERSION, PILLOW_VERSION, _plugins
import warnings
class _imaging_not_installed:
# module placeholder
def __getattr__(self, id):
raise ImportError("The _imaging C module is not installed")
try:
# give Tk a chance to set up the environment, in case we're
# using an _imaging module linked against libtcl/libtk (use
# __import__ to hide this from naive packagers; we don't really
# depend on Tk unless ImageTk is used, and that module already
# imports Tkinter)
__import__("FixTk")
except ImportError:
pass
try:
# If the _imaging C module is not present, you can still use
# the "open" function to identify files, but you cannot load
# them. Note that other modules should not refer to _imaging
# directly; import Image and use the Image.core variable instead.
from PIL import _imaging as core
if PILLOW_VERSION != getattr(core, 'PILLOW_VERSION', None):
raise ImportError("The _imaging extension was built for another "
" version of Pillow or PIL")
except ImportError as v:
core = _imaging_not_installed()
# Explanations for ways that we know we might have an import error
if str(v).startswith("Module use of python"):
# The _imaging C module is present, but not compiled for
# the right version (windows only). Print a warning, if
# possible.
warnings.warn(
"The _imaging extension was built for another version "
"of Python.",
RuntimeWarning
)
elif str(v).startswith("The _imaging extension"):
warnings.warn(str(v), RuntimeWarning)
elif "Symbol not found: _PyUnicodeUCS2_FromString" in str(v):
warnings.warn(
"The _imaging extension was built for Python with UCS2 support; "
"recompile PIL or build Python --without-wide-unicode. ",
RuntimeWarning
)
elif "Symbol not found: _PyUnicodeUCS4_FromString" in str(v):
warnings.warn(
"The _imaging extension was built for Python with UCS4 support; "
"recompile PIL or build Python --with-wide-unicode. ",
RuntimeWarning
)
# Fail here anyway. Don't let people run with a mostly broken Pillow.
raise
try:
import builtins
except ImportError:
import __builtin__
builtins = __builtin__
from PIL import ImageMode
from PIL._binary import i8, o8
from PIL._util import isPath, isStringType
import os, sys
# type stuff
import collections
import numbers
# works everywhere, win for pypy, not cpython
USE_CFFI_ACCESS = hasattr(sys, 'pypy_version_info')
try:
import cffi
HAS_CFFI=True
except:
HAS_CFFI=False
def isImageType(t):
"""
Checks if an object is an image object.
.. warning::
This function is for internal use only.
:param t: object to check if it's an image
:returns: True if the object is an image
"""
return hasattr(t, "im")
#
# Debug level
DEBUG = 0
#
# Constants (also defined in _imagingmodule.c!)
NONE = 0
# transpose
FLIP_LEFT_RIGHT = 0
FLIP_TOP_BOTTOM = 1
ROTATE_90 = 2
ROTATE_180 = 3
ROTATE_270 = 4
# transforms
AFFINE = 0
EXTENT = 1
PERSPECTIVE = 2
QUAD = 3
MESH = 4
# resampling filters
NONE = 0
NEAREST = 0
ANTIALIAS = 1 # 3-lobed lanczos
LINEAR = BILINEAR = 2
CUBIC = BICUBIC = 3
# dithers
NONE = 0
NEAREST = 0
ORDERED = 1 # Not yet implemented
RASTERIZE = 2 # Not yet implemented
FLOYDSTEINBERG = 3 # default
# palettes/quantizers
WEB = 0
ADAPTIVE = 1
MEDIANCUT = 0
MAXCOVERAGE = 1
FASTOCTREE = 2
# categories
NORMAL = 0
SEQUENCE = 1
CONTAINER = 2
if hasattr(core, 'DEFAULT_STRATEGY'):
DEFAULT_STRATEGY = core.DEFAULT_STRATEGY
FILTERED = core.FILTERED
HUFFMAN_ONLY = core.HUFFMAN_ONLY
RLE = core.RLE
FIXED = core.FIXED
# --------------------------------------------------------------------
# Registries
ID = []
OPEN = {}
MIME = {}
SAVE = {}
EXTENSION = {}
# --------------------------------------------------------------------
# Modes supported by this version
_MODEINFO = {
# NOTE: this table will be removed in future versions. use
# getmode* functions or ImageMode descriptors instead.
# official modes
"1": ("L", "L", ("1",)),
"L": ("L", "L", ("L",)),
"I": ("L", "I", ("I",)),
"F": ("L", "F", ("F",)),
"P": ("RGB", "L", ("P",)),
"RGB": ("RGB", "L", ("R", "G", "B")),
"RGBX": ("RGB", "L", ("R", "G", "B", "X")),
"RGBA": ("RGB", "L", ("R", "G", "B", "A")),
"CMYK": ("RGB", "L", ("C", "M", "Y", "K")),
"YCbCr": ("RGB", "L", ("Y", "Cb", "Cr")),
"LAB": ("RGB", "L", ("L", "A", "B")),
# Experimental modes include I;16, I;16L, I;16B, RGBa, BGR;15, and
# BGR;24. Use these modes only if you know exactly what you're
# doing...
}
if sys.byteorder == 'little':
_ENDIAN = '<'
else:
_ENDIAN = '>'
_MODE_CONV = {
# official modes
"1": ('|b1', None), # broken
"L": ('|u1', None),
"I": (_ENDIAN + 'i4', None),
"F": (_ENDIAN + 'f4', None),
"P": ('|u1', None),
"RGB": ('|u1', 3),
"RGBX": ('|u1', 4),
"RGBA": ('|u1', 4),
"CMYK": ('|u1', 4),
"YCbCr": ('|u1', 3),
"LAB": ('|u1', 3), # UNDONE - unsigned |u1i1i1
# I;16 == I;16L, and I;32 == I;32L
"I;16": ('<u2', None),
"I;16B": ('>u2', None),
"I;16L": ('<u2', None),
"I;16S": ('<i2', None),
"I;16BS": ('>i2', None),
"I;16LS": ('<i2', None),
"I;32": ('<u4', None),
"I;32B": ('>u4', None),
"I;32L": ('<u4', None),
"I;32S": ('<i4', None),
"I;32BS": ('>i4', None),
"I;32LS": ('<i4', None),
}
def _conv_type_shape(im):
shape = im.size[1], im.size[0]
typ, extra = _MODE_CONV[im.mode]
if extra is None:
return shape, typ
else:
return shape+(extra,), typ
MODES = sorted(_MODEINFO.keys())
# raw modes that may be memory mapped. NOTE: if you change this, you
# may have to modify the stride calculation in map.c too!
_MAPMODES = ("L", "P", "RGBX", "RGBA", "CMYK", "I;16", "I;16L", "I;16B")
def getmodebase(mode):
"""
Gets the "base" mode for given mode. This function returns "L" for
images that contain grayscale data, and "RGB" for images that
contain color data.
:param mode: Input mode.
:returns: "L" or "RGB".
:exception KeyError: If the input mode was not a standard mode.
"""
return ImageMode.getmode(mode).basemode
def getmodetype(mode):
"""
Gets the storage type mode. Given a mode, this function returns a
single-layer mode suitable for storing individual bands.
:param mode: Input mode.
:returns: "L", "I", or "F".
:exception KeyError: If the input mode was not a standard mode.
"""
return ImageMode.getmode(mode).basetype
def getmodebandnames(mode):
"""
Gets a list of individual band names. Given a mode, this function returns
a tuple containing the names of individual bands (use
:py:method:`~PIL.Image.getmodetype` to get the mode used to store each
individual band.
:param mode: Input mode.
:returns: A tuple containing band names. The length of the tuple
gives the number of bands in an image of the given mode.
:exception KeyError: If the input mode was not a standard mode.
"""
return ImageMode.getmode(mode).bands
def getmodebands(mode):
"""
Gets the number of individual bands for this mode.
:param mode: Input mode.
:returns: The number of bands in this mode.
:exception KeyError: If the input mode was not a standard mode.
"""
return len(ImageMode.getmode(mode).bands)
# --------------------------------------------------------------------
# Helpers
_initialized = 0
def preinit():
"Explicitly load standard file format drivers."
global _initialized
if _initialized >= 1:
return
try:
from PIL import BmpImagePlugin
except ImportError:
pass
try:
from PIL import GifImagePlugin
except ImportError:
pass
try:
from PIL import JpegImagePlugin
except ImportError:
pass
try:
from PIL import PpmImagePlugin
except ImportError:
pass
try:
from PIL import PngImagePlugin
except ImportError:
pass
# try:
# import TiffImagePlugin
# except ImportError:
# pass
_initialized = 1
def init():
"""
Explicitly initializes the Python Imaging Library. This function
loads all available file format drivers.
"""
global _initialized
if _initialized >= 2:
return 0
for plugin in _plugins:
try:
if DEBUG:
print ("Importing %s"%plugin)
__import__("PIL.%s"%plugin, globals(), locals(), [])
except ImportError:
if DEBUG:
print("Image: failed to import", end=' ')
print(plugin, ":", sys.exc_info()[1])
if OPEN or SAVE:
_initialized = 2
return 1
# --------------------------------------------------------------------
# Codec factories (used by tobytes/frombytes and ImageFile.load)
def _getdecoder(mode, decoder_name, args, extra=()):
# tweak arguments
if args is None:
args = ()
elif not isinstance(args, tuple):
args = (args,)
try:
# get decoder
decoder = getattr(core, decoder_name + "_decoder")
# print(decoder, mode, args + extra)
return decoder(mode, *args + extra)
except AttributeError:
raise IOError("decoder %s not available" % decoder_name)
def _getencoder(mode, encoder_name, args, extra=()):
# tweak arguments
if args is None:
args = ()
elif not isinstance(args, tuple):
args = (args,)
try:
# get encoder
encoder = getattr(core, encoder_name + "_encoder")
# print(encoder, mode, args + extra)
return encoder(mode, *args + extra)
except AttributeError:
raise IOError("encoder %s not available" % encoder_name)
# --------------------------------------------------------------------
# Simple expression analyzer
def coerce_e(value):
return value if isinstance(value, _E) else _E(value)
class _E:
def __init__(self, data):
self.data = data
def __add__(self, other):
return _E((self.data, "__add__", coerce_e(other).data))
def __mul__(self, other):
return _E((self.data, "__mul__", coerce_e(other).data))
def _getscaleoffset(expr):
stub = ["stub"]
data = expr(_E(stub)).data
try:
(a, b, c) = data # simplified syntax
if (a is stub and b == "__mul__" and isinstance(c, numbers.Number)):
return c, 0.0
if (a is stub and b == "__add__" and isinstance(c, numbers.Number)):
return 1.0, c
except TypeError: pass
try:
((a, b, c), d, e) = data # full syntax
if (a is stub and b == "__mul__" and isinstance(c, numbers.Number) and
d == "__add__" and isinstance(e, numbers.Number)):
return c, e
except TypeError: pass
raise ValueError("illegal expression")
# --------------------------------------------------------------------
# Implementation wrapper
class Image:
"""
This class represents an image object. To create
:py:class:`~PIL.Image.Image` objects, use the appropriate factory
functions. There's hardly ever any reason to call the Image constructor
directly.
* :py:func:`~PIL.Image.open`
* :py:func:`~PIL.Image.new`
* :py:func:`~PIL.Image.frombytes`
"""
format = None
format_description = None
def __init__(self):
# FIXME: take "new" parameters / other image?
# FIXME: turn mode and size into delegating properties?
self.im = None
self.mode = ""
self.size = (0, 0)
self.palette = None
self.info = {}
self.category = NORMAL
self.readonly = 0
self.pyaccess = None
def _new(self, im):
new = Image()
new.im = im
new.mode = im.mode
new.size = im.size
new.palette = self.palette
if im.mode == "P" and not new.palette:
from PIL import ImagePalette
new.palette = ImagePalette.ImagePalette()
try:
new.info = self.info.copy()
except AttributeError:
# fallback (pre-1.5.2)
new.info = {}
for k, v in self.info:
new.info[k] = v
return new
_makeself = _new # compatibility
def _copy(self):
self.load()
self.im = self.im.copy()
self.pyaccess = None
self.readonly = 0
def _dump(self, file=None, format=None):
import tempfile, os
if not file:
f, file = tempfile.mkstemp(format or '')
os.close(f)
self.load()
if not format or format == "PPM":
self.im.save_ppm(file)
else:
if file.endswith(format):
file = file + "." + format
self.save(file, format)
return file
def __repr__(self):
return "<%s.%s image mode=%s size=%dx%d at 0x%X>" % (
self.__class__.__module__, self.__class__.__name__,
self.mode, self.size[0], self.size[1],
id(self)
)
def __getattr__(self, name):
if name == "__array_interface__":
# numpy array interface support
new = {}
shape, typestr = _conv_type_shape(self)
new['shape'] = shape
new['typestr'] = typestr
new['data'] = self.tobytes()
return new
raise AttributeError(name)
def tobytes(self, encoder_name="raw", *args):
"""
Return image as a bytes object
:param encoder_name: What encoder to use. The default is to
use the standard "raw" encoder.
:param args: Extra arguments to the encoder.
:rtype: A bytes object.
"""
# may pass tuple instead of argument list
if len(args) == 1 and isinstance(args[0], tuple):
args = args[0]
if encoder_name == "raw" and args == ():
args = self.mode
self.load()
# unpack data
e = _getencoder(self.mode, encoder_name, args)
e.setimage(self.im)
bufsize = max(65536, self.size[0] * 4) # see RawEncode.c
data = []
while True:
l, s, d = e.encode(bufsize)
data.append(d)
if s:
break
if s < 0:
raise RuntimeError("encoder error %d in tobytes" % s)
return b"".join(data)
# Declare tostring as alias to tobytes
def tostring(self, *args, **kw):
warnings.warn(
'tostring() is deprecated. Please call tobytes() instead.',
DeprecationWarning,
stacklevel=2,
)
return self.tobytes(*args, **kw)
def tobitmap(self, name="image"):
"""
Returns the image converted to an X11 bitmap.
.. note:: This method only works for mode "1" images.
:param name: The name prefix to use for the bitmap variables.
:returns: A string containing an X11 bitmap.
:raises ValueError: If the mode is not "1"
"""
self.load()
if self.mode != "1":
raise ValueError("not a bitmap")
data = self.tobytes("xbm")
return b"".join([("#define %s_width %d\n" % (name, self.size[0])).encode('ascii'),
("#define %s_height %d\n"% (name, self.size[1])).encode('ascii'),
("static char %s_bits[] = {\n" % name).encode('ascii'), data, b"};"])
def frombytes(self, data, decoder_name="raw", *args):
"""
Loads this image with pixel data from a bytes object.
This method is similar to the :py:func:`~PIL.Image.frombytes` function,
but loads data into this image instead of creating a new image object.
"""
# may pass tuple instead of argument list
if len(args) == 1 and isinstance(args[0], tuple):
args = args[0]
# default format
if decoder_name == "raw" and args == ():
args = self.mode
# unpack data
d = _getdecoder(self.mode, decoder_name, args)
d.setimage(self.im)
s = d.decode(data)
if s[0] >= 0:
raise ValueError("not enough image data")
if s[1] != 0:
raise ValueError("cannot decode image data")
def fromstring(self, *args, **kw):
"""Deprecated alias to frombytes.
.. deprecated:: 2.0
"""
warnings.warn('fromstring() is deprecated. Please call frombytes() instead.', DeprecationWarning)
return self.frombytes(*args, **kw)
def load(self):
"""
Allocates storage for the image and loads the pixel data. In
normal cases, you don't need to call this method, since the
Image class automatically loads an opened image when it is
accessed for the first time.
:returns: An image access object.
"""
if self.im and self.palette and self.palette.dirty:
# realize palette
self.im.putpalette(*self.palette.getdata())
self.palette.dirty = 0
self.palette.mode = "RGB"
self.palette.rawmode = None
if "transparency" in self.info:
if isinstance(self.info["transparency"], int):
self.im.putpalettealpha(self.info["transparency"], 0)
else:
self.im.putpalettealphas(self.info["transparency"])
self.palette.mode = "RGBA"
if self.im:
if HAS_CFFI and USE_CFFI_ACCESS:
if self.pyaccess:
return self.pyaccess
from PIL import PyAccess
self.pyaccess = PyAccess.new(self, self.readonly)
if self.pyaccess:
return self.pyaccess
return self.im.pixel_access(self.readonly)
def verify(self):
"""
Verifies the contents of a file. For data read from a file, this
method attempts to determine if the file is broken, without
actually decoding the image data. If this method finds any
problems, it raises suitable exceptions. If you need to load
the image after using this method, you must reopen the image
file.
"""
pass
def convert(self, mode=None, matrix=None, dither=None,
palette=WEB, colors=256):
"""
Returns a converted copy of this image. For the "P" mode, this
method translates pixels through the palette. If mode is
omitted, a mode is chosen so that all information in the image
and the palette can be represented without a palette.
The current version supports all possible conversions between
"L", "RGB" and "CMYK." The **matrix** argument only supports "L"
and "RGB".
When translating a color image to black and white (mode "L"),
the library uses the ITU-R 601-2 luma transform::
L = R * 299/1000 + G * 587/1000 + B * 114/1000
The default method of converting a greyscale ("L") or "RGB"
image into a bilevel (mode "1") image uses Floyd-Steinberg
dither to approximate the original image luminosity levels. If
dither is NONE, all non-zero values are set to 255 (white). To
use other thresholds, use the :py:meth:`~PIL.Image.Image.point`
method.
:param mode: The requested mode.
:param matrix: An optional conversion matrix. If given, this
should be 4- or 16-tuple containing floating point values.
:param dither: Dithering method, used when converting from
mode "RGB" to "P" or from "RGB" or "L" to "1".
Available methods are NONE or FLOYDSTEINBERG (default).
:param palette: Palette to use when converting from mode "RGB"
to "P". Available palettes are WEB or ADAPTIVE.
:param colors: Number of colors to use for the ADAPTIVE palette.
Defaults to 256.
:rtype: :py:class:`~PIL.Image.Image`
:returns: An :py:class:`~PIL.Image.Image` object.
"""
if not mode:
# determine default mode
if self.mode == "P":
self.load()
if self.palette:
mode = self.palette.mode
else:
mode = "RGB"
else:
return self.copy()
self.load()
if matrix:
# matrix conversion
if mode not in ("L", "RGB"):
raise ValueError("illegal conversion")
im = self.im.convert_matrix(mode, matrix)
return self._new(im)
if mode == "P" and self.mode == "RGBA":
return self.quantize(colors)
trns = None
delete_trns = False
# transparency handling
if "transparency" in self.info and self.info['transparency'] is not None:
if self.mode in ('L', 'RGB') and mode == 'RGBA':
# Use transparent conversion to promote from transparent
# color to an alpha channel.
return self._new(self.im.convert_transparent(
mode, self.info['transparency']))
elif self.mode in ('L', 'RGB', 'P') and mode in ('L', 'RGB', 'P'):
t = self.info['transparency']
if isinstance(t, bytes):
# Dragons. This can't be represented by a single color
warnings.warn('Palette images with Transparency expressed '+
' in bytes should be converted to RGBA images')
delete_trns = True
else:
# get the new transparency color.
# use existing conversions
trns_im = Image()._new(core.new(self.mode, (1,1)))
if self.mode == 'P':
trns_im.putpalette(self.palette)
trns_im.putpixel((0,0), t)
if mode in ('L','RGB'):
trns_im = trns_im.convert(mode)
else:
# can't just retrieve the palette number, got to do it
# after quantization.
trns_im = trns_im.convert('RGB')
trns = trns_im.getpixel((0,0))
if mode == "P" and palette == ADAPTIVE:
im = self.im.quantize(colors)
new = self._new(im)
from PIL import ImagePalette
new.palette = ImagePalette.raw("RGB", new.im.getpalette("RGB"))
if delete_trns:
# This could possibly happen if we requantize to fewer colors.
# The transparency would be totally off in that case.
del(new.info['transparency'])
if trns is not None:
try:
new.info['transparency'] = new.palette.getcolor(trns)
except:
# if we can't make a transparent color, don't leave the old
# transparency hanging around to mess us up.
del(new.info['transparency'])
warnings.warn("Couldn't allocate palette entry for transparency")
return new
# colorspace conversion
if dither is None:
dither = FLOYDSTEINBERG
try:
im = self.im.convert(mode, dither)
except ValueError:
try:
# normalize source image and try again
im = self.im.convert(getmodebase(self.mode))
im = im.convert(mode, dither)
except KeyError:
raise ValueError("illegal conversion")
new_im = self._new(im)
if delete_trns:
#crash fail if we leave a bytes transparency in an rgb/l mode.
del(new.info['transparency'])
if trns is not None:
if new_im.mode == 'P':
try:
new_im.info['transparency'] = new_im.palette.getcolor(trns)
except:
del(new_im.info['transparency'])
warnings.warn("Couldn't allocate palette entry for transparency")
else:
new_im.info['transparency'] = trns
return new_im
def quantize(self, colors=256, method=None, kmeans=0, palette=None):
# methods:
# 0 = median cut
# 1 = maximum coverage
# 2 = fast octree
# NOTE: this functionality will be moved to the extended
# quantizer interface in a later version of PIL.
self.load()
if method is None:
# defaults:
method = 0
if self.mode == 'RGBA':
method = 2
if self.mode == 'RGBA' and method != 2:
# Caller specified an invalid mode.
raise ValueError('Fast Octree (method == 2) is the ' +
' only valid method for quantizing RGBA images')
if palette:
# use palette from reference image
palette.load()
if palette.mode != "P":
raise ValueError("bad mode for palette image")
if self.mode != "RGB" and self.mode != "L":
raise ValueError(
"only RGB or L mode images can be quantized to a palette"
)
im = self.im.convert("P", 1, palette.im)
return self._makeself(im)
im = self.im.quantize(colors, method, kmeans)
return self._new(im)
def copy(self):
"""
Copies this image. Use this method if you wish to paste things
into an image, but still retain the original.
:rtype: :py:class:`~PIL.Image.Image`
:returns: An :py:class:`~PIL.Image.Image` object.
"""
self.load()
im = self.im.copy()
return self._new(im)
def crop(self, box=None):
"""
Returns a rectangular region from this image. The box is a
4-tuple defining the left, upper, right, and lower pixel
coordinate.
This is a lazy operation. Changes to the source image may or
may not be reflected in the cropped image. To break the
connection, call the :py:meth:`~PIL.Image.Image.load` method on
the cropped copy.
:param box: The crop rectangle, as a (left, upper, right, lower)-tuple.
:rtype: :py:class:`~PIL.Image.Image`
:returns: An :py:class:`~PIL.Image.Image` object.
"""
self.load()
if box is None:
return self.copy()
# lazy operation
return _ImageCrop(self, box)
def draft(self, mode, size):
"""
NYI
Configures the image file loader so it returns a version of the
image that as closely as possible matches the given mode and
size. For example, you can use this method to convert a color
JPEG to greyscale while loading it, or to extract a 128x192
version from a PCD file.
Note that this method modifies the :py:class:`~PIL.Image.Image` object
in place. If the image has already been loaded, this method has no
effect.
:param mode: The requested mode.
:param size: The requested size.
"""
pass
def _expand(self, xmargin, ymargin=None):
if ymargin is None:
ymargin = xmargin
self.load()
return self._new(self.im.expand(xmargin, ymargin, 0))
def filter(self, filter):
"""
Filters this image using the given filter. For a list of
available filters, see the :py:mod:`~PIL.ImageFilter` module.
:param filter: Filter kernel.
:returns: An :py:class:`~PIL.Image.Image` object. """
self.load()
if isinstance(filter, collections.Callable):
filter = filter()
if not hasattr(filter, "filter"):
raise TypeError("filter argument should be ImageFilter.Filter instance or class")
if self.im.bands == 1:
return self._new(filter.filter(self.im))
# fix to handle multiband images since _imaging doesn't
ims = []
for c in range(self.im.bands):
ims.append(self._new(filter.filter(self.im.getband(c))))
return merge(self.mode, ims)
def getbands(self):
"""
Returns a tuple containing the name of each band in this image.
For example, **getbands** on an RGB image returns ("R", "G", "B").
:returns: A tuple containing band names.
:rtype: tuple
"""
return ImageMode.getmode(self.mode).bands
def getbbox(self):
"""
Calculates the bounding box of the non-zero regions in the
image.
:returns: The bounding box is returned as a 4-tuple defining the
left, upper, right, and lower pixel coordinate. If the image
is completely empty, this method returns None.
"""
self.load()
return self.im.getbbox()
def getcolors(self, maxcolors=256):
"""
Returns a list of colors used in this image.
:param maxcolors: Maximum number of colors. If this number is
exceeded, this method returns None. The default limit is
256 colors.
:returns: An unsorted list of (count, pixel) values.
"""
self.load()
if self.mode in ("1", "L", "P"):
h = self.im.histogram()
out = []
for i in range(256):
if h[i]:
out.append((h[i], i))
if len(out) > maxcolors:
return None
return out
return self.im.getcolors(maxcolors)
def getdata(self, band = None):
"""
Returns the contents of this image as a sequence object
containing pixel values. The sequence object is flattened, so
that values for line one follow directly after the values of
line zero, and so on.
Note that the sequence object returned by this method is an
internal PIL data type, which only supports certain sequence
operations. To convert it to an ordinary sequence (e.g. for
printing), use **list(im.getdata())**.
:param band: What band to return. The default is to return
all bands. To return a single band, pass in the index
value (e.g. 0 to get the "R" band from an "RGB" image).
:returns: A sequence-like object.
"""
self.load()
if band is not None:
return self.im.getband(band)
return self.im # could be abused
def getextrema(self):
"""
Gets the the minimum and maximum pixel values for each band in
the image.
:returns: For a single-band image, a 2-tuple containing the
minimum and maximum pixel value. For a multi-band image,
a tuple containing one 2-tuple for each band.
"""
self.load()
if self.im.bands > 1:
extrema = []
for i in range(self.im.bands):
extrema.append(self.im.getband(i).getextrema())
return tuple(extrema)
return self.im.getextrema()
def getim(self):
"""
Returns a capsule that points to the internal image memory.
:returns: A capsule object.
"""
self.load()
return self.im.ptr
def getpalette(self):
"""
Returns the image palette as a list.
:returns: A list of color values [r, g, b, ...], or None if the
image has no palette.
"""
self.load()
try:
if bytes is str:
return [i8(c) for c in self.im.getpalette()]
else:
return list(self.im.getpalette())
except ValueError:
return None # no palette
def getpixel(self, xy):
"""
Returns the pixel value at a given position.
:param xy: The coordinate, given as (x, y).
:returns: The pixel value. If the image is a multi-layer image,
this method returns a tuple.
"""
self.load()
if self.pyaccess:
return self.pyaccess.getpixel(xy)
return self.im.getpixel(xy)
def getprojection(self):
"""
Get projection to x and y axes
:returns: Two sequences, indicating where there are non-zero
pixels along the X-axis and the Y-axis, respectively.
"""
self.load()
x, y = self.im.getprojection()
return [i8(c) for c in x], [i8(c) for c in y]
def histogram(self, mask=None, extrema=None):
"""
Returns a histogram for the image. The histogram is returned as
a list of pixel counts, one for each pixel value in the source
image. If the image has more than one band, the histograms for
all bands are concatenated (for example, the histogram for an
"RGB" image contains 768 values).
A bilevel image (mode "1") is treated as a greyscale ("L") image
by this method.
If a mask is provided, the method returns a histogram for those
parts of the image where the mask image is non-zero. The mask
image must have the same size as the image, and be either a
bi-level image (mode "1") or a greyscale image ("L").
:param mask: An optional mask.
:returns: A list containing pixel counts.
"""
self.load()
if mask:
mask.load()
return self.im.histogram((0, 0), mask.im)
if self.mode in ("I", "F"):
if extrema is None:
extrema = self.getextrema()
return self.im.histogram(extrema)
return self.im.histogram()
def offset(self, xoffset, yoffset=None):
"""
.. deprecated:: 2.0
.. note:: New code should use :py:func:`PIL.ImageChops.offset`.
Returns a copy of the image where the data has been offset by the given
distances. Data wraps around the edges. If **yoffset** is omitted, it
is assumed to be equal to **xoffset**.
:param xoffset: The horizontal distance.
:param yoffset: The vertical distance. If omitted, both
distances are set to the same value.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
if warnings:
warnings.warn(
"'offset' is deprecated; use 'ImageChops.offset' instead",
DeprecationWarning, stacklevel=2
)
from PIL import ImageChops
return ImageChops.offset(self, xoffset, yoffset)
def paste(self, im, box=None, mask=None):
"""
Pastes another image into this image. The box argument is either
a 2-tuple giving the upper left corner, a 4-tuple defining the
left, upper, right, and lower pixel coordinate, or None (same as
(0, 0)). If a 4-tuple is given, the size of the pasted image
must match the size of the region.
If the modes don't match, the pasted image is converted to the mode of
this image (see the :py:meth:`~PIL.Image.Image.convert` method for
details).
Instead of an image, the source can be a integer or tuple
containing pixel values. The method then fills the region
with the given color. When creating RGB images, you can
also use color strings as supported by the ImageColor module.
If a mask is given, this method updates only the regions
indicated by the mask. You can use either "1", "L" or "RGBA"
images (in the latter case, the alpha band is used as mask).
Where the mask is 255, the given image is copied as is. Where
the mask is 0, the current value is preserved. Intermediate
values can be used for transparency effects.
Note that if you paste an "RGBA" image, the alpha band is
ignored. You can work around this by using the same image as
both source image and mask.
:param im: Source image or pixel value (integer or tuple).
:param box: An optional 4-tuple giving the region to paste into.
If a 2-tuple is used instead, it's treated as the upper left
corner. If omitted or None, the source is pasted into the
upper left corner.
If an image is given as the second argument and there is no
third, the box defaults to (0, 0), and the second argument
is interpreted as a mask image.
:param mask: An optional mask image.
"""
if isImageType(box) and mask is None:
# abbreviated paste(im, mask) syntax
mask = box; box = None
if box is None:
# cover all of self
box = (0, 0) + self.size
if len(box) == 2:
# lower left corner given; get size from image or mask
if isImageType(im):
size = im.size
elif isImageType(mask):
size = mask.size
else:
# FIXME: use self.size here?
raise ValueError(
"cannot determine region size; use 4-item box"
)
box = box + (box[0]+size[0], box[1]+size[1])
if isStringType(im):
from PIL import ImageColor
im = ImageColor.getcolor(im, self.mode)
elif isImageType(im):
im.load()
if self.mode != im.mode:
if self.mode != "RGB" or im.mode not in ("RGBA", "RGBa"):
# should use an adapter for this!
im = im.convert(self.mode)
im = im.im
self.load()
if self.readonly:
self._copy()
if mask:
mask.load()
self.im.paste(im, box, mask.im)
else:
self.im.paste(im, box)
def point(self, lut, mode=None):
"""
Maps this image through a lookup table or function.
:param lut: A lookup table, containing 256 (or 65336 if
self.mode=="I" and mode == "L") values per band in the
image. A function can be used instead, it should take a
single argument. The function is called once for each
possible pixel value, and the resulting table is applied to
all bands of the image.
:param mode: Output mode (default is same as input). In the
current version, this can only be used if the source image
has mode "L" or "P", and the output has mode "1" or the
source image mode is "I" and the output mode is "L".
:returns: An :py:class:`~PIL.Image.Image` object.
"""
self.load()
if isinstance(lut, ImagePointHandler):
return lut.point(self)
if callable(lut):
# if it isn't a list, it should be a function
if self.mode in ("I", "I;16", "F"):
# check if the function can be used with point_transform
# UNDONE wiredfool -- I think this prevents us from ever doing
# a gamma function point transform on > 8bit images.
scale, offset = _getscaleoffset(lut)
return self._new(self.im.point_transform(scale, offset))
# for other modes, convert the function to a table
lut = [lut(i) for i in range(256)] * self.im.bands
if self.mode == "F":
# FIXME: _imaging returns a confusing error message for this case
raise ValueError("point operation not supported for this mode")
return self._new(self.im.point(lut, mode))
def putalpha(self, alpha):
"""
Adds or replaces the alpha layer in this image. If the image
does not have an alpha layer, it's converted to "LA" or "RGBA".
The new layer must be either "L" or "1".
:param alpha: The new alpha layer. This can either be an "L" or "1"
image having the same size as this image, or an integer or
other color value.
"""
self.load()
if self.readonly:
self._copy()
if self.mode not in ("LA", "RGBA"):
# attempt to promote self to a matching alpha mode
try:
mode = getmodebase(self.mode) + "A"
try:
self.im.setmode(mode)
self.pyaccess = None
except (AttributeError, ValueError):
# do things the hard way
im = self.im.convert(mode)
if im.mode not in ("LA", "RGBA"):
raise ValueError # sanity check
self.im = im
self.pyaccess = None
self.mode = self.im.mode
except (KeyError, ValueError):
raise ValueError("illegal image mode")
if self.mode == "LA":
band = 1
else:
band = 3
if isImageType(alpha):
# alpha layer
if alpha.mode not in ("1", "L"):
raise ValueError("illegal image mode")
alpha.load()
if alpha.mode == "1":
alpha = alpha.convert("L")
else:
# constant alpha
try:
self.im.fillband(band, alpha)
except (AttributeError, ValueError):
# do things the hard way
alpha = new("L", self.size, alpha)
else:
return
self.im.putband(alpha.im, band)
def putdata(self, data, scale=1.0, offset=0.0):
"""
Copies pixel data to this image. This method copies data from a
sequence object into the image, starting at the upper left
corner (0, 0), and continuing until either the image or the
sequence ends. The scale and offset values are used to adjust
the sequence values: **pixel = value*scale + offset**.
:param data: A sequence object.
:param scale: An optional scale value. The default is 1.0.
:param offset: An optional offset value. The default is 0.0.
"""
self.load()
if self.readonly:
self._copy()
self.im.putdata(data, scale, offset)
def putpalette(self, data, rawmode="RGB"):
"""
Attaches a palette to this image. The image must be a "P" or
"L" image, and the palette sequence must contain 768 integer
values, where each group of three values represent the red,
green, and blue values for the corresponding pixel
index. Instead of an integer sequence, you can use an 8-bit
string.
:param data: A palette sequence (either a list or a string).
"""
from PIL import ImagePalette
if self.mode not in ("L", "P"):
raise ValueError("illegal image mode")
self.load()
if isinstance(data, ImagePalette.ImagePalette):
palette = ImagePalette.raw(data.rawmode, data.palette)
else:
if not isinstance(data, bytes):
if bytes is str:
data = "".join(chr(x) for x in data)
else:
data = bytes(data)
palette = ImagePalette.raw(rawmode, data)
self.mode = "P"
self.palette = palette
self.palette.mode = "RGB"
self.load() # install new palette
def putpixel(self, xy, value):
"""
Modifies the pixel at the given position. The color is given as
a single numerical value for single-band images, and a tuple for
multi-band images.
Note that this method is relatively slow. For more extensive changes,
use :py:meth:`~PIL.Image.Image.paste` or the :py:mod:`~PIL.ImageDraw`
module instead.
See:
* :py:meth:`~PIL.Image.Image.paste`
* :py:meth:`~PIL.Image.Image.putdata`
* :py:mod:`~PIL.ImageDraw`
:param xy: The pixel coordinate, given as (x, y).
:param value: The pixel value.
"""
self.load()
if self.readonly:
self._copy()
self.pyaccess = None
self.load()
if self.pyaccess:
return self.pyaccess.putpixel(xy,value)
return self.im.putpixel(xy, value)
def resize(self, size, resample=NEAREST):
"""
Returns a resized copy of this image.
:param size: The requested size in pixels, as a 2-tuple:
(width, height).
:param resample: An optional resampling filter. This can be
one of :py:attr:`PIL.Image.NEAREST` (use nearest neighbour),
:py:attr:`PIL.Image.BILINEAR` (linear interpolation in a 2x2
environment), :py:attr:`PIL.Image.BICUBIC` (cubic spline
interpolation in a 4x4 environment), or
:py:attr:`PIL.Image.ANTIALIAS` (a high-quality downsampling filter).
If omitted, or if the image has mode "1" or "P", it is
set :py:attr:`PIL.Image.NEAREST`.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
if resample not in (NEAREST, BILINEAR, BICUBIC, ANTIALIAS):
raise ValueError("unknown resampling filter")
self.load()
if self.mode in ("1", "P"):
resample = NEAREST
if self.mode == 'RGBA':
return self.convert('RGBa').resize(size, resample).convert('RGBA')
if resample == ANTIALIAS:
# requires stretch support (imToolkit & PIL 1.1.3)
try:
im = self.im.stretch(size, resample)
except AttributeError:
raise ValueError("unsupported resampling filter")
else:
im = self.im.resize(size, resample)
return self._new(im)
def rotate(self, angle, resample=NEAREST, expand=0):
"""
Returns a rotated copy of this image. This method returns a
copy of this image, rotated the given number of degrees counter
clockwise around its centre.
:param angle: In degrees counter clockwise.
:param filter: An optional resampling filter. This can be
one of :py:attr:`PIL.Image.NEAREST` (use nearest neighbour),
:py:attr:`PIL.Image.BILINEAR` (linear interpolation in a 2x2
environment), or :py:attr:`PIL.Image.BICUBIC`
(cubic spline interpolation in a 4x4 environment).
If omitted, or if the image has mode "1" or "P", it is
set :py:attr:`PIL.Image.NEAREST`.
:param expand: Optional expansion flag. If true, expands the output
image to make it large enough to hold the entire rotated image.
If false or omitted, make the output image the same size as the
input image.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
if expand:
import math
angle = -angle * math.pi / 180
matrix = [
math.cos(angle), math.sin(angle), 0.0,
-math.sin(angle), math.cos(angle), 0.0
]
def transform(x, y, matrix=matrix):
(a, b, c, d, e, f) = matrix
return a*x + b*y + c, d*x + e*y + f
# calculate output size
w, h = self.size
xx = []
yy = []
for x, y in ((0, 0), (w, 0), (w, h), (0, h)):
x, y = transform(x, y)
xx.append(x)
yy.append(y)
w = int(math.ceil(max(xx)) - math.floor(min(xx)))
h = int(math.ceil(max(yy)) - math.floor(min(yy)))
# adjust center
x, y = transform(w / 2.0, h / 2.0)
matrix[2] = self.size[0] / 2.0 - x
matrix[5] = self.size[1] / 2.0 - y
return self.transform((w, h), AFFINE, matrix, resample)
if resample not in (NEAREST, BILINEAR, BICUBIC):
raise ValueError("unknown resampling filter")
self.load()
if self.mode in ("1", "P"):
resample = NEAREST
return self._new(self.im.rotate(angle, resample))
def save(self, fp, format=None, **params):
"""
Saves this image under the given filename. If no format is
specified, the format to use is determined from the filename
extension, if possible.
Keyword options can be used to provide additional instructions
to the writer. If a writer doesn't recognise an option, it is
silently ignored. The available options are described later in
this handbook.
You can use a file object instead of a filename. In this case,
you must always specify the format. The file object must
implement the **seek**, **tell**, and **write**
methods, and be opened in binary mode.
:param file: File name or file object.
:param format: Optional format override. If omitted, the
format to use is determined from the filename extension.
If a file object was used instead of a filename, this
parameter should always be used.
:param options: Extra parameters to the image writer.
:returns: None
:exception KeyError: If the output format could not be determined
from the file name. Use the format option to solve this.
:exception IOError: If the file could not be written. The file
may have been created, and may contain partial data.
"""
if isPath(fp):
filename = fp
else:
if hasattr(fp, "name") and isPath(fp.name):
filename = fp.name
else:
filename = ""
# may mutate self!
self.load()
self.encoderinfo = params
self.encoderconfig = ()
preinit()
ext = os.path.splitext(filename)[1].lower()
if not format:
try:
format = EXTENSION[ext]
except KeyError:
init()
try:
format = EXTENSION[ext]
except KeyError:
raise KeyError(ext) # unknown extension
try:
save_handler = SAVE[format.upper()]
except KeyError:
init()
save_handler = SAVE[format.upper()] # unknown format
if isPath(fp):
fp = builtins.open(fp, "wb")
close = 1
else:
close = 0
try:
save_handler(self, fp, filename)
finally:
# do what we can to clean up
if close:
fp.close()
def seek(self, frame):
"""
Seeks to the given frame in this sequence file. If you seek
beyond the end of the sequence, the method raises an
**EOFError** exception. When a sequence file is opened, the
library automatically seeks to frame 0.
Note that in the current version of the library, most sequence
formats only allows you to seek to the next frame.
See :py:meth:`~PIL.Image.Image.tell`.
:param frame: Frame number, starting at 0.
:exception EOFError: If the call attempts to seek beyond the end
of the sequence.
"""
# overridden by file handlers
if frame != 0:
raise EOFError
def show(self, title=None, command=None):
"""
Displays this image. This method is mainly intended for
debugging purposes.
On Unix platforms, this method saves the image to a temporary
PPM file, and calls the **xv** utility.
On Windows, it saves the image to a temporary BMP file, and uses
the standard BMP display utility to show it (usually Paint).
:param title: Optional title to use for the image window,
where possible.
:param command: command used to show the image
"""
_show(self, title=title, command=command)
def split(self):
"""
Split this image into individual bands. This method returns a
tuple of individual image bands from an image. For example,
splitting an "RGB" image creates three new images each
containing a copy of one of the original bands (red, green,
blue).
:returns: A tuple containing bands.
"""
self.load()
if self.im.bands == 1:
ims = [self.copy()]
else:
ims = []
for i in range(self.im.bands):
ims.append(self._new(self.im.getband(i)))
return tuple(ims)
def tell(self):
"""
Returns the current frame number. See :py:meth:`~PIL.Image.Image.seek`.
:returns: Frame number, starting with 0.
"""
return 0
def thumbnail(self, size, resample=NEAREST):
"""
Make this image into a thumbnail. This method modifies the
image to contain a thumbnail version of itself, no larger than
the given size. This method calculates an appropriate thumbnail
size to preserve the aspect of the image, calls the
:py:meth:`~PIL.Image.Image.draft` method to configure the file reader
(where applicable), and finally resizes the image.
Note that the bilinear and bicubic filters in the current
version of PIL are not well-suited for thumbnail generation.
You should use :py:attr:`PIL.Image.ANTIALIAS` unless speed is much more
important than quality.
Also note that this function modifies the :py:class:`~PIL.Image.Image`
object in place. If you need to use the full resolution image as well, apply
this method to a :py:meth:`~PIL.Image.Image.copy` of the original image.
:param size: Requested size.
:param resample: Optional resampling filter. This can be one
of :py:attr:`PIL.Image.NEAREST`, :py:attr:`PIL.Image.BILINEAR`,
:py:attr:`PIL.Image.BICUBIC`, or :py:attr:`PIL.Image.ANTIALIAS`
(best quality). If omitted, it defaults to
:py:attr:`PIL.Image.NEAREST` (this will be changed to ANTIALIAS in a
future version).
:returns: None
"""
# FIXME: the default resampling filter will be changed
# to ANTIALIAS in future versions
# preserve aspect ratio
x, y = self.size
if x > size[0]: y = int(max(y * size[0] / x, 1)); x = int(size[0])
if y > size[1]: x = int(max(x * size[1] / y, 1)); y = int(size[1])
size = x, y
if size == self.size:
return
self.draft(None, size)
self.load()
try:
im = self.resize(size, resample)
except ValueError:
if resample != ANTIALIAS:
raise
im = self.resize(size, NEAREST) # fallback
self.im = im.im
self.mode = im.mode
self.size = size
self.readonly = 0
self.pyaccess = None
# FIXME: the different tranform methods need further explanation
# instead of bloating the method docs, add a separate chapter.
def transform(self, size, method, data=None, resample=NEAREST, fill=1):
"""
Transforms this image. This method creates a new image with the
given size, and the same mode as the original, and copies data
to the new image using the given transform.
:param size: The output size.
:param method: The transformation method. This is one of
:py:attr:`PIL.Image.EXTENT` (cut out a rectangular subregion),
:py:attr:`PIL.Image.AFFINE` (affine transform),
:py:attr:`PIL.Image.PERSPECTIVE` (perspective transform),
:py:attr:`PIL.Image.QUAD` (map a quadrilateral to a rectangle), or
:py:attr:`PIL.Image.MESH` (map a number of source quadrilaterals
in one operation).
:param data: Extra data to the transformation method.
:param resample: Optional resampling filter. It can be one of
:py:attr:`PIL.Image.NEAREST` (use nearest neighbour),
:py:attr:`PIL.Image.BILINEAR` (linear interpolation in a 2x2
environment), or :py:attr:`PIL.Image.BICUBIC` (cubic spline
interpolation in a 4x4 environment). If omitted, or if the image
has mode "1" or "P", it is set to :py:attr:`PIL.Image.NEAREST`.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
if self.mode == 'RGBA':
return self.convert('RGBa').transform(size, method, data, resample, fill).convert('RGBA')
if isinstance(method, ImageTransformHandler):
return method.transform(size, self, resample=resample, fill=fill)
if hasattr(method, "getdata"):
# compatibility w. old-style transform objects
method, data = method.getdata()
if data is None:
raise ValueError("missing method data")
im = new(self.mode, size, None)
if method == MESH:
# list of quads
for box, quad in data:
im.__transformer(box, self, QUAD, quad, resample, fill)
else:
im.__transformer((0, 0)+size, self, method, data, resample, fill)
return im
def __transformer(self, box, image, method, data,
resample=NEAREST, fill=1):
# FIXME: this should be turned into a lazy operation (?)
w = box[2]-box[0]
h = box[3]-box[1]
if method == AFFINE:
# change argument order to match implementation
data = (data[2], data[0], data[1],
data[5], data[3], data[4])
elif method == EXTENT:
# convert extent to an affine transform
x0, y0, x1, y1 = data
xs = float(x1 - x0) / w
ys = float(y1 - y0) / h
method = AFFINE
data = (x0 + xs/2, xs, 0, y0 + ys/2, 0, ys)
elif method == PERSPECTIVE:
# change argument order to match implementation
data = (data[2], data[0], data[1],
data[5], data[3], data[4],
data[6], data[7])
elif method == QUAD:
# quadrilateral warp. data specifies the four corners
# given as NW, SW, SE, and NE.
nw = data[0:2]; sw = data[2:4]; se = data[4:6]; ne = data[6:8]
x0, y0 = nw; As = 1.0 / w; At = 1.0 / h
data = (x0, (ne[0]-x0)*As, (sw[0]-x0)*At,
(se[0]-sw[0]-ne[0]+x0)*As*At,
y0, (ne[1]-y0)*As, (sw[1]-y0)*At,
(se[1]-sw[1]-ne[1]+y0)*As*At)
else:
raise ValueError("unknown transformation method")
if resample not in (NEAREST, BILINEAR, BICUBIC):
raise ValueError("unknown resampling filter")
image.load()
self.load()
if image.mode in ("1", "P"):
resample = NEAREST
self.im.transform2(box, image.im, method, data, resample, fill)
def transpose(self, method):
"""
Transpose image (flip or rotate in 90 degree steps)
:param method: One of :py:attr:`PIL.Image.FLIP_LEFT_RIGHT`,
:py:attr:`PIL.Image.FLIP_TOP_BOTTOM`, :py:attr:`PIL.Image.ROTATE_90`,
:py:attr:`PIL.Image.ROTATE_180`, or :py:attr:`PIL.Image.ROTATE_270`.
:returns: Returns a flipped or rotated copy of this image.
"""
self.load()
im = self.im.transpose(method)
return self._new(im)
# --------------------------------------------------------------------
# Lazy operations
class _ImageCrop(Image):
def __init__(self, im, box):
Image.__init__(self)
x0, y0, x1, y1 = box
if x1 < x0:
x1 = x0
if y1 < y0:
y1 = y0
self.mode = im.mode
self.size = x1-x0, y1-y0
self.__crop = x0, y0, x1, y1
self.im = im.im
def load(self):
# lazy evaluation!
if self.__crop:
self.im = self.im.crop(self.__crop)
self.__crop = None
if self.im:
return self.im.pixel_access(self.readonly)
# FIXME: future versions should optimize crop/paste
# sequences!
# --------------------------------------------------------------------
# Abstract handlers.
class ImagePointHandler:
# used as a mixin by point transforms (for use with im.point)
pass
class ImageTransformHandler:
# used as a mixin by geometry transforms (for use with im.transform)
pass
# --------------------------------------------------------------------
# Factories
#
# Debugging
def _wedge():
"Create greyscale wedge (for debugging only)"
return Image()._new(core.wedge("L"))
def new(mode, size, color=0):
"""
Creates a new image with the given mode and size.
:param mode: The mode to use for the new image.
:param size: A 2-tuple, containing (width, height) in pixels.
:param color: What color to use for the image. Default is black.
If given, this should be a single integer or floating point value
for single-band modes, and a tuple for multi-band modes (one value
per band). When creating RGB images, you can also use color
strings as supported by the ImageColor module. If the color is
None, the image is not initialised.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
if color is None:
# don't initialize
return Image()._new(core.new(mode, size))
if isStringType(color):
# css3-style specifier
from PIL import ImageColor
color = ImageColor.getcolor(color, mode)
return Image()._new(core.fill(mode, size, color))
def frombytes(mode, size, data, decoder_name="raw", *args):
"""
Creates a copy of an image memory from pixel data in a buffer.
In its simplest form, this function takes three arguments
(mode, size, and unpacked pixel data).
You can also use any pixel decoder supported by PIL. For more
information on available decoders, see the section
**Writing Your Own File Decoder**.
Note that this function decodes pixel data only, not entire images.
If you have an entire image in a string, wrap it in a
:py:class:`~io.BytesIO` object, and use :py:func:`~PIL.Image.open` to load
it.
:param mode: The image mode.
:param size: The image size.
:param data: A byte buffer containing raw data for the given mode.
:param decoder_name: What decoder to use.
:param args: Additional parameters for the given decoder.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
# may pass tuple instead of argument list
if len(args) == 1 and isinstance(args[0], tuple):
args = args[0]
if decoder_name == "raw" and args == ():
args = mode
im = new(mode, size)
im.frombytes(data, decoder_name, args)
return im
def fromstring(*args, **kw):
"""Deprecated alias to frombytes.
.. deprecated:: 2.0
"""
warnings.warn(
'fromstring() is deprecated. Please call frombytes() instead.',
DeprecationWarning,
stacklevel=2
)
return frombytes(*args, **kw)
def frombuffer(mode, size, data, decoder_name="raw", *args):
"""
Creates an image memory referencing pixel data in a byte buffer.
This function is similar to :py:func:`~PIL.Image.frombytes`, but uses data
in the byte buffer, where possible. This means that changes to the
original buffer object are reflected in this image). Not all modes can
share memory; supported modes include "L", "RGBX", "RGBA", and "CMYK".
Note that this function decodes pixel data only, not entire images.
If you have an entire image file in a string, wrap it in a
**BytesIO** object, and use :py:func:`~PIL.Image.open` to load it.
In the current version, the default parameters used for the "raw" decoder
differs from that used for :py:func:`~PIL.Image.fromstring`. This is a
bug, and will probably be fixed in a future release. The current release
issues a warning if you do this; to disable the warning, you should provide
the full set of parameters. See below for details.
:param mode: The image mode.
:param size: The image size.
:param data: A bytes or other buffer object containing raw
data for the given mode.
:param decoder_name: What decoder to use.
:param args: Additional parameters for the given decoder. For the
default encoder ("raw"), it's recommended that you provide the
full set of parameters::
frombuffer(mode, size, data, "raw", mode, 0, 1)
:returns: An :py:class:`~PIL.Image.Image` object.
.. versionadded:: 1.1.4
"""
"Load image from bytes or buffer"
# may pass tuple instead of argument list
if len(args) == 1 and isinstance(args[0], tuple):
args = args[0]
if decoder_name == "raw":
if args == ():
if warnings:
warnings.warn(
"the frombuffer defaults may change in a future release; "
"for portability, change the call to read:\n"
" frombuffer(mode, size, data, 'raw', mode, 0, 1)",
RuntimeWarning, stacklevel=2
)
args = mode, 0, -1 # may change to (mode, 0, 1) post-1.1.6
if args[0] in _MAPMODES:
im = new(mode, (1,1))
im = im._new(
core.map_buffer(data, size, decoder_name, None, 0, args)
)
im.readonly = 1
return im
return frombytes(mode, size, data, decoder_name, args)
def fromarray(obj, mode=None):
"""
Creates an image memory from an object exporting the array interface
(using the buffer protocol).
If obj is not contiguous, then the tobytes method is called
and :py:func:`~PIL.Image.frombuffer` is used.
:param obj: Object with array interface
:param mode: Mode to use (will be determined from type if None)
:returns: An image memory.
.. versionadded:: 1.1.6
"""
arr = obj.__array_interface__
shape = arr['shape']
ndim = len(shape)
try:
strides = arr['strides']
except KeyError:
strides = None
if mode is None:
try:
typekey = (1, 1) + shape[2:], arr['typestr']
mode, rawmode = _fromarray_typemap[typekey]
except KeyError:
# print typekey
raise TypeError("Cannot handle this data type")
else:
rawmode = mode
if mode in ["1", "L", "I", "P", "F"]:
ndmax = 2
elif mode == "RGB":
ndmax = 3
else:
ndmax = 4
if ndim > ndmax:
raise ValueError("Too many dimensions: %d > %d." % (ndim, ndmax))
size = shape[1], shape[0]
if strides is not None:
if hasattr(obj, 'tobytes'):
obj = obj.tobytes()
else:
obj = obj.tostring()
return frombuffer(mode, size, obj, "raw", rawmode, 0, 1)
_fromarray_typemap = {
# (shape, typestr) => mode, rawmode
# first two members of shape are set to one
# ((1, 1), "|b1"): ("1", "1"), # broken
((1, 1), "|u1"): ("L", "L"),
((1, 1), "|i1"): ("I", "I;8"),
((1, 1), "<i2"): ("I", "I;16"),
((1, 1), ">i2"): ("I", "I;16B"),
((1, 1), "<i4"): ("I", "I;32"),
((1, 1), ">i4"): ("I", "I;32B"),
((1, 1), "<f4"): ("F", "F;32F"),
((1, 1), ">f4"): ("F", "F;32BF"),
((1, 1), "<f8"): ("F", "F;64F"),
((1, 1), ">f8"): ("F", "F;64BF"),
((1, 1, 3), "|u1"): ("RGB", "RGB"),
((1, 1, 4), "|u1"): ("RGBA", "RGBA"),
}
# shortcuts
_fromarray_typemap[((1, 1), _ENDIAN + "i4")] = ("I", "I")
_fromarray_typemap[((1, 1), _ENDIAN + "f4")] = ("F", "F")
def open(fp, mode="r"):
"""
Opens and identifies the given image file.
This is a lazy operation; this function identifies the file, but the
actual image data is not read from the file until you try to process
the data (or call the :py:meth:`~PIL.Image.Image.load` method).
See :py:func:`~PIL.Image.new`.
:param file: A filename (string) or a file object. The file object
must implement :py:meth:`~file.read`, :py:meth:`~file.seek`, and
:py:meth:`~file.tell` methods, and be opened in binary mode.
:param mode: The mode. If given, this argument must be "r".
:returns: An :py:class:`~PIL.Image.Image` object.
:exception IOError: If the file cannot be found, or the image cannot be
opened and identified.
"""
if mode != "r":
raise ValueError("bad mode %r" % mode)
if isPath(fp):
filename = fp
fp = builtins.open(fp, "rb")
else:
filename = ""
prefix = fp.read(16)
preinit()
for i in ID:
try:
factory, accept = OPEN[i]
if not accept or accept(prefix):
fp.seek(0)
return factory(fp, filename)
except (SyntaxError, IndexError, TypeError):
#import traceback
#traceback.print_exc()
pass
if init():
for i in ID:
try:
factory, accept = OPEN[i]
if not accept or accept(prefix):
fp.seek(0)
return factory(fp, filename)
except (SyntaxError, IndexError, TypeError):
#import traceback
#traceback.print_exc()
pass
raise IOError("cannot identify image file %r"
% (filename if filename else fp))
#
# Image processing.
def alpha_composite(im1, im2):
"""
Alpha composite im2 over im1.
:param im1: The first image.
:param im2: The second image. Must have the same mode and size as
the first image.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
im1.load()
im2.load()
return im1._new(core.alpha_composite(im1.im, im2.im))
def blend(im1, im2, alpha):
"""
Creates a new image by interpolating between two input images, using
a constant alpha.::
out = image1 * (1.0 - alpha) + image2 * alpha
:param im1: The first image.
:param im2: The second image. Must have the same mode and size as
the first image.
:param alpha: The interpolation alpha factor. If alpha is 0.0, a
copy of the first image is returned. If alpha is 1.0, a copy of
the second image is returned. There are no restrictions on the
alpha value. If necessary, the result is clipped to fit into
the allowed output range.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
im1.load()
im2.load()
return im1._new(core.blend(im1.im, im2.im, alpha))
def composite(image1, image2, mask):
"""
Create composite image by blending images using a transparency mask.
:param image1: The first image.
:param image2: The second image. Must have the same mode and
size as the first image.
:param mask: A mask image. This image can can have mode
"1", "L", or "RGBA", and must have the same size as the
other two images.
"""
image = image2.copy()
image.paste(image1, None, mask)
return image
def eval(image, *args):
"""
Applies the function (which should take one argument) to each pixel
in the given image. If the image has more than one band, the same
function is applied to each band. Note that the function is
evaluated once for each possible pixel value, so you cannot use
random components or other generators.
:param image: The input image.
:param function: A function object, taking one integer argument.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
return image.point(args[0])
def merge(mode, bands):
"""
Merge a set of single band images into a new multiband image.
:param mode: The mode to use for the output image.
:param bands: A sequence containing one single-band image for
each band in the output image. All bands must have the
same size.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
if getmodebands(mode) != len(bands) or "*" in mode:
raise ValueError("wrong number of bands")
for im in bands[1:]:
if im.mode != getmodetype(mode):
raise ValueError("mode mismatch")
if im.size != bands[0].size:
raise ValueError("size mismatch")
im = core.new(mode, bands[0].size)
for i in range(getmodebands(mode)):
bands[i].load()
im.putband(bands[i].im, i)
return bands[0]._new(im)
# --------------------------------------------------------------------
# Plugin registry
def register_open(id, factory, accept=None):
"""
Register an image file plugin. This function should not be used
in application code.
:param id: An image format identifier.
:param factory: An image file factory method.
:param accept: An optional function that can be used to quickly
reject images having another format.
"""
id = id.upper()
ID.append(id)
OPEN[id] = factory, accept
def register_mime(id, mimetype):
"""
Registers an image MIME type. This function should not be used
in application code.
:param id: An image format identifier.
:param mimetype: The image MIME type for this format.
"""
MIME[id.upper()] = mimetype
def register_save(id, driver):
"""
Registers an image save function. This function should not be
used in application code.
:param id: An image format identifier.
:param driver: A function to save images in this format.
"""
SAVE[id.upper()] = driver
def register_extension(id, extension):
"""
Registers an image extension. This function should not be
used in application code.
:param id: An image format identifier.
:param extension: An extension used for this format.
"""
EXTENSION[extension.lower()] = id.upper()
# --------------------------------------------------------------------
# Simple display support. User code may override this.
def _show(image, **options):
# override me, as necessary
_showxv(image, **options)
def _showxv(image, title=None, **options):
from PIL import ImageShow
ImageShow.show(image, title, **options)
| {
"content_hash": "68cb85cec464d79dfb55603c64ba7fa1",
"timestamp": "",
"source": "github",
"line_count": 2260,
"max_line_length": 105,
"avg_line_length": 33.15486725663717,
"alnum_prop": 0.5632857333511277,
"repo_name": "SurfasJones/icecream-info",
"id": "18d2c8267cf188f761771d645cb0294de3165439",
"size": "75652",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "icecream/lib/python2.7/site-packages/PIL/Image.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5939"
},
{
"name": "CSS",
"bytes": "288937"
},
{
"name": "JavaScript",
"bytes": "589933"
},
{
"name": "PHP",
"bytes": "2274"
},
{
"name": "Python",
"bytes": "18137514"
},
{
"name": "Ruby",
"bytes": "990"
},
{
"name": "Shell",
"bytes": "10274"
},
{
"name": "TeX",
"bytes": "56626"
},
{
"name": "XSLT",
"bytes": "5122"
}
],
"symlink_target": ""
} |
"""Remove the contents of the file system on a MicroPython device.
Usage:
microcleanfs PORT [options]
Options:
-f --force Remove ALL files from filesystem w/o preserving excludes
"""
import traceback
from typing import List
import sys
import time
from ampy.files import Files
from ampy.pyboard import Pyboard, PyboardError
from docopt import docopt
def main(args: List[str]) -> None:
opts = docopt(__doc__, argv=args)
port = opts['PORT']
force = opts['--force']
print('Connecting to {}'.format(port), file=sys.stderr)
board = Pyboard(port)
files = Files(board)
# Specifying subdirectories DOES NOT work as they will be deleted when the
# parent directory is deleted. Specifying top level directories DOES work.
exclude_files = ['boot.py']
print('Removing the contents of the file system')
wait_for_board()
for name in files.ls(long_format=False):
if force or name not in exclude_files:
try:
files.rm(name)
except (RuntimeError, PyboardError):
try:
files.rmdir(name)
except (RuntimeError, PyboardError):
print('Unknown Error removing file {}'.format(name),
file=sys.stderr)
print('Done')
def wait_for_board() -> None:
"""Wait for some ESP8266 devices to become ready for REPL commands."""
time.sleep(0.5)
if __name__ == '__main__':
try:
main(sys.argv[1:])
exitcode = 0
except:
traceback.print_exc()
exitcode = 1
finally:
input('Press ENTER to continue')
sys.exit(exitcode)
| {
"content_hash": "7bd119e0073e164bf6413518feb3f7db",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 78,
"avg_line_length": 26.015625,
"alnum_prop": 0.6132132132132132,
"repo_name": "vlasovskikh/intellij-micropython",
"id": "f661ea621bc0c34189ad03d44029648bbaf9823c",
"size": "2248",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scripts/microcleanfs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "135"
},
{
"name": "Kotlin",
"bytes": "59123"
},
{
"name": "Python",
"bytes": "859927"
}
],
"symlink_target": ""
} |
"""Pibooth base states.
"""
import traceback
from pibooth.utils import LOGGER, BlockConsoleHandler
class StateMachine(object):
def __init__(self, plugins_manager, configuration, application, window):
self.states = set()
self.failsafe_state = None
self.active_state = None
# Share the application to manage between states
self.app = application
self.win = window
self.cfg = configuration
self.pm = plugins_manager
def add_state(self, name):
"""Add a state to the internal dictionary.
"""
self.states.add(name)
def add_failsafe_state(self, name):
"""Add a state that will be call in case of exception.
"""
self.failsafe_state = name
self.states.add(name)
def remove_state(self, name):
"""Remove a state to the internal dictionary.
"""
self.states.discard(name)
if name == self.failsafe_state:
self.failsafe_state = None
def process(self, events):
"""Let the current state do it's thing.
"""
# Only continue if there is an active state
if self.active_state is None:
return
try:
# Perform the actions of the active state
hook = getattr(self.pm.hook, 'state_{}_do'.format(self.active_state))
hook(cfg=self.cfg, app=self.app, win=self.win, events=events)
# Check conditions to activate the next state
hook = getattr(self.pm.hook, 'state_{}_validate'.format(self.active_state))
new_state_name = hook(cfg=self.cfg, app=self.app, win=self.win, events=events)
except Exception as ex:
if self.failsafe_state and self.active_state != self.failsafe_state:
LOGGER.error(str(ex))
if BlockConsoleHandler.is_debug():
traceback.print_exc()
new_state_name = self.failsafe_state
else:
raise
if new_state_name is not None:
self.set_state(new_state_name)
def set_state(self, state_name):
"""Change state machine's active state
"""
try:
# Perform any exit actions of the current state
if self.active_state is not None:
hook = getattr(self.pm.hook, 'state_{}_exit'.format(self.active_state))
hook(cfg=self.cfg, app=self.app, win=self.win)
except Exception as ex:
if self.failsafe_state and self.active_state != self.failsafe_state:
LOGGER.error(str(ex))
if BlockConsoleHandler.is_debug():
traceback.print_exc()
state_name = self.failsafe_state
else:
raise
if state_name not in self.states:
raise ValueError('"{}" not in registered states...'.format(state_name))
# Switch to the new state and perform its entry actions
LOGGER.debug("Activate state '%s'", state_name)
self.active_state = state_name
try:
hook = getattr(self.pm.hook, 'state_{}_enter'.format(self.active_state))
hook(cfg=self.cfg, app=self.app, win=self.win)
except Exception as ex:
if self.failsafe_state and self.active_state != self.failsafe_state:
LOGGER.error(str(ex))
if BlockConsoleHandler.is_debug():
traceback.print_exc()
self.set_state(self.failsafe_state)
else:
raise
| {
"content_hash": "bf7e171dd67e5658339fcc3c2d8a0fae",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 90,
"avg_line_length": 35.7,
"alnum_prop": 0.5719887955182072,
"repo_name": "werdeil/pibooth",
"id": "2e5c420c20d68778a5464209e06dfb2a389e6137",
"size": "3595",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pibooth/states.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "198628"
},
{
"name": "Shell",
"bytes": "1980"
}
],
"symlink_target": ""
} |
from __future__ import print_function, unicode_literals
import re
from wee_slack import linkify_text
def test_linkifytext_does_partial_html_entity_encoding(team):
text = linkify_text("& < > ' \"", team)
assert text == "& < > ' \""
def test_linkifytext_names_with_paranthesis(team):
text = linkify_text("@JohnDoe(jdoe): my test message", team)
assert text == "@JohnDoe(jdoe): my test message"
def test_linkifytext_names_with_accents(team):
text = linkify_text("@ÁrvíztűrőTükörfúrógép(atukorfurogep): my test message", team)
assert text == "@ÁrvíztűrőTükörfúrógép(atukorfurogep): my test message"
def test_linkifytext_formatting_characters(team):
text = linkify_text("\x02\x1Dmy test message\x1D\x02", team)
assert text == "*_my test message_*"
def test_linkifytext_with_many_paranthesis(team):
text = linkify_text("@k(o(v)a)())s: my(( test) message", team)
assert text == "@k(o(v)a)())s: my(( test) message"
def test_linkifytext_names_with_apostrophe(team):
text = linkify_text("@O'Connor: my test message", team)
assert text == "@O'Connor: my test message"
def test_linkifytext_names_with_subgroup_notification(team):
subteam = team.subteams["TGX0ALBK3"]
message = "This is a message for a subteam"
text = linkify_text("{}: {}".format(subteam.handle, message), team)
assert text == "<!subteam^{}|{}>: {}".format(
subteam.identifier, subteam.handle, message
)
def test_linkifytext_at_channel(team):
text = linkify_text("@channel: my test message", team)
assert text == "<!channel>: my test message"
def test_linkifytext_at_everyone(team):
text = linkify_text("@everyone: my test message", team)
assert text == "<!everyone>: my test message"
def test_linkifytext_at_group(team):
text = linkify_text("@group: my test message", team)
assert text == "<!group>: my test message"
def test_linkifytext_at_here(team):
text = linkify_text("@here: my test message", team)
assert text == "<!here>: my test message"
def test_linkifytext_channel(team, channel_general):
channel_name = re.sub(r"^[#&]", "", channel_general.name)
text = linkify_text("#{}: my test message".format(channel_name), team)
assert text == "<#{}|{}>: my test message".format(channel_general.id, channel_name)
def test_linkifytext_not_private_using_hash(team, channel_private):
channel_name = re.sub(r"^[#&]", "", channel_private.name)
text = linkify_text("#{}: my test message".format(channel_name), team)
assert text == "#{}: my test message".format(channel_name)
def test_linkifytext_not_private_using_ampersand(team, channel_private):
channel_name = re.sub(r"^[#&]", "", channel_private.name)
text = linkify_text("&{}: my test message".format(channel_name), team)
assert text == "&{}: my test message".format(channel_name)
def test_linkifytext_not_dm(team, channel_dm):
text = linkify_text("#{}: my test message".format(channel_dm.name), team)
assert text == "#{}: my test message".format(channel_dm.name)
def test_linkifytext_not_mpdm(team, channel_mpdm):
text = linkify_text("#{}: my test message".format(channel_mpdm.name), team)
assert text == "#{}: my test message".format(channel_mpdm.name)
| {
"content_hash": "8c8f377dda61ac230d94d693f487d16a",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 87,
"avg_line_length": 30.40740740740741,
"alnum_prop": 0.6674786845310596,
"repo_name": "rawdigits/wee-slack",
"id": "60bf92354cddae25e3c8a6619809bef6379b2a46",
"size": "3327",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "_pytest/test_linkifytext.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "100439"
}
],
"symlink_target": ""
} |
from config import Config
import tensorflow as tf
import functools
from util.model import Model, conv2d
def get_model(name):
name = functools.partial('{}-{}'.format, name)
self_pos = tf.placeholder(Config.dtype, Config.data_shape, name='self_pos')
self_ability = tf.placeholder(Config.dtype, Config.data_shape, name='self_ability')
enemy_pos = tf.placeholder(Config.dtype, Config.data_shape, name='enemy_pos')
input_label = tf.placeholder(Config.dtype, Config.label_shape, name='input_label')
x = tf.concat(3, [self_pos, self_ability, enemy_pos], name=name('input_concat'))
y = input_label
nl = tf.nn.tanh
def conv_pip(name, x):
name = functools.partial('{}_{}'.format, name)
x = conv2d(name('0'), x, Config.data_shape[3]*2, kernel=3, stride=1, nl=nl)
x = conv2d(name('1'), x, Config.data_shape[3], kernel=3, stride=1, nl=nl)
return x
pred = conv_pip(name('conv0'), x)
for layer in range(5):
pred_branch = tf.concat(3, [pred,x], name=name('concate%d'%layer))
pred += conv_pip(name('conv%d'%(layer+1)), pred_branch)
x = tf.tanh(pred, name=name('control_tanh'))
z = tf.mul(tf.exp(x), self_ability)
z_sum = tf.reduce_sum(z, reduction_indices=[1,2,3], name=name('partition_function')) # partition function
# another formula of y*logy
loss = -tf.reduce_sum(tf.mul(x, y), reduction_indices=[1,2,3]) + tf.log(z_sum)
z_sum = tf.reshape(z_sum, [-1, 1, 1, 1])
pred = tf.div(z, z_sum, name=name('predict'))
return Model([self_pos, self_ability, enemy_pos], input_label, loss, pred, debug=z)
if __name__=='__main__':
model = get_model('test')
sess = tf.InteractiveSession()
sess.run(tf.initialize_all_variables())
import numpy as np
x_data = np.random.randint(2, size=[3,100,9,10,16]).astype('float32')
y_data = np.random.randint(2, size=[100,9,10,16]).astype('float32')
input_dict = {}
for var, data in zip(model.inputs, x_data):
input_dict[var] = data
input_dict[model.label] = y_data
loss_val = model.loss.eval(feed_dict=input_dict)
pred_val = model.pred.eval(feed_dict=input_dict)
print(loss_val)
# print(pred_val)
pred_val = pred_val.reshape(pred_val.shape[0], -1)
assert all(abs(pred_val.sum(axis=1)-1.0<1e-6))
print('model test OK')
| {
"content_hash": "e9aa383ef8e047c5279d3eb96e757fd9",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 109,
"avg_line_length": 35.484848484848484,
"alnum_prop": 0.6323654995730145,
"repo_name": "milkpku/BetaElephant",
"id": "ff490f714e1ed199bf56f972f293df504eb65f52",
"size": "2477",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "policy_experiment/policy.resNet/model.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "51805"
},
{
"name": "C++",
"bytes": "233080"
},
{
"name": "Makefile",
"bytes": "387"
},
{
"name": "PureBasic",
"bytes": "207514"
},
{
"name": "Python",
"bytes": "258307"
},
{
"name": "Shell",
"bytes": "616"
}
],
"symlink_target": ""
} |
from numpy import inf, hstack, invert
from pyface.confirmation_dialog import confirm
from pyface.constant import YES
# ============= enthought library imports =======================
from traits.api import Bool, List
from pychron.core.helpers.iterfuncs import groupby_group_id
from pychron.core.progress import progress_loader
from pychron.options.options_manager import (
BlanksOptionsManager,
ICFactorOptionsManager,
IsotopeEvolutionOptionsManager,
FluxOptionsManager,
DefineEquilibrationOptionsManager,
)
from pychron.options.views.views import view
from pychron.pipeline.editors.define_equilibration_editor import (
DefineEquilibrationResultsEditor,
)
from pychron.pipeline.editors.flux_results_editor import (
FluxResultsEditor,
BracketingFluxResultsEditor,
)
from pychron.pipeline.editors.results_editor import IsoEvolutionResultsEditor
from pychron.pipeline.nodes.figure import FigureNode
from pychron.pipeline.results.define_equilibration import DefineEquilibrationResult
from pychron.pipeline.results.iso_evo import IsoEvoResult
from pychron.pipeline.state import get_detector_set, get_isotope_pairs_set
from pychron.pychron_constants import NULL_STR
class RefitException(BaseException):
pass
class FitNode(FigureNode):
use_save_node = Bool(True)
_fits = List
_keys = List
# has_save_node = False
def _set_additional_options(self, state):
pass
def _set_saveable(self, state):
ps = self.plotter_options.get_saveable_aux_plots()
state.saveable_keys = [p.name for p in ps]
state.saveable_fits = [p.fit for p in ps]
def _get_valid_unknowns(self, unks):
if self.plotter_options.analysis_types:
unks = [
u
for u in unks
if not u.is_omitted()
and u.analysis_type in self.plotter_options.analysis_types
]
return unks
def check_refit(self, unks):
unks = self._get_valid_unknowns(unks)
for ui in unks:
try:
if self._check_refit(ui):
break
except RefitException:
return False
else:
if confirm(None, self._refit_message) == YES:
return True
def _check_refit(self, ai):
pass
class FitReferencesNode(FitNode):
basename = None
auto_set_items = False
def run(self, state):
po = self.plotter_options
self._fits = list(reversed([pi for pi in po.get_saveable_aux_plots()]))
self._keys = [fi.name for fi in self._fits]
unks = self._get_valid_unknowns(state.unknowns)
if self.check_refit(unks):
return
super(FitReferencesNode, self).run(state)
if state.canceled:
return
# self.plotter_options.set_detectors(state.union_detectors)
if state.references:
for i, (gid, refs) in enumerate(groupby_group_id(state.references)):
if i == 0:
editor = self.editor
else:
editor = self._editor_factory()
state.editors.append(editor)
unks = [u for u in unks if u.group_id == gid]
editor.set_items(unks, compress=False)
# if self.plotter_options.use_restricted_references:
# refas = self._get_reference_analysis_types()
# if refas:
# refs = [r for r in refs if r.analysis_type in refas]
editor.set_references(list(refs))
self._set_saveable(state)
self._set_additional_options(state)
self.editor.force_update(force=True)
def _get_reference_analysis_types(self):
return []
class FitBlanksNode(FitReferencesNode):
editor_klass = "pychron.pipeline.plot.editors.blanks_editor,BlanksEditor"
plotter_options_manager_klass = BlanksOptionsManager
name = "Fit Blanks"
_refit_message = "The selected Isotopes have already been fit for blanks. Would you like to skip refitting?"
def _check_refit(self, ai):
for k in self._keys:
i = ai.get_isotope(k)
if not i.blank.reviewed:
return True
def _get_reference_analysis_types(self):
return ["blank_{}".format(a) for a in self.plotter_options.analysis_types]
def _options_view_default(self):
return view("Blanks Options")
def _configure_hook(self):
pom = self.plotter_options_manager
if self.unknowns:
unk = self.unknowns[0]
names = unk.isotope_keys
if names:
names = [NULL_STR] + names
pom.set_names(names)
atypes = list({a.analysis_type for a in self.unknowns})
pom.set_analysis_types(atypes)
if self.references:
atypes = list({a.analysis_type for a in self.references})
pom.set_reference_types(atypes)
ATTRS = ("numerator", "denominator", "standard_ratio", "analysis_type")
class FitICFactorNode(FitReferencesNode):
editor_klass = (
"pychron.pipeline.plot.editors.intercalibration_factor_editor,"
"IntercalibrationFactorEditor"
)
plotter_options_manager_klass = ICFactorOptionsManager
name = "Fit ICFactor"
predefined = List
_refit_message = "The selected IC Factors have already been fit. Would you like to skip refitting?"
def _editor_factory(self):
e = super(FitICFactorNode, self)._editor_factory()
a = self.plotter_options.aux_plots[0]
e.references_name = a.analysis_type
return e
def _get_reference_analysis_types(self):
return ["air", "cocktail"]
def _options_view_default(self):
return view("ICFactor Options")
def _configure_hook(self):
udets = get_isotope_pairs_set(self.unknowns)
rdets = get_isotope_pairs_set(self.references)
dets = list(udets.union(rdets))
udets = get_detector_set(self.unknowns)
rdets = get_detector_set(self.references)
dets.extend(list(udets.union(rdets)))
self.plotter_options_manager.set_detectors(dets)
def _set_additional_options(self, state):
state.delete_existing_icfactors = self.plotter_options.delete_existing
state.use_source_correction = self.plotter_options.use_source_correction
def _set_saveable(self, state):
super(FitICFactorNode, self)._set_saveable(state)
ps = self.plotter_options.get_saveable_aux_plots()
state.saveable_keys = [p.denominator for p in ps]
state.standard_ratios = [p.standard_ratio for p in ps]
def _check_refit(self, ai):
for k in self._keys:
num, dem = k.split("/")
i = ai.get_isotope(detector=dem)
if i is not None:
if not i.ic_factor_reviewed:
return True
else:
from pyface.message_dialog import warning
warning(
None,
"Data for detector {} is missing from {}".format(dem, ai.record_id),
)
raise RefitException()
def load(self, nodedict):
pom = self.plotter_options_manager
if pom.selected_options.name == "Default":
try:
fits = nodedict["fits"]
except KeyError as e:
return
self.plotter_options = pom.selected_options
self.plotter_options.set_aux_plots(fits)
def _to_template(self, d):
d["fits"] = [
{
"numerator": a.numerator,
"denominator": a.denominatior,
"standard_ratio": a.standard_ratio,
"analysis_type": a.analysis_type,
}
for a in self.plotter_options.aux_plots
]
class FitIsotopeEvolutionNode(FitNode):
editor_klass = (
"pychron.pipeline.plot.editors.isotope_evolution_editor,"
"IsotopeEvolutionEditor"
)
plotter_options_manager_klass = IsotopeEvolutionOptionsManager
name = "Fit IsoEvo"
use_plotting = False
_refit_message = "The selected Isotope Evolutions have already been fit. Would you like to skip refitting?"
def _check_refit(self, analysis):
for k in self._keys:
i = analysis.get_isotope(k)
if i is None:
i = analysis.get_isotope(detector=k)
if i is None:
print('invalid isotope "{}"'.format(k), analysis.isotope_keys)
continue
if not i.reviewed:
return True
def _options_view_default(self):
return view("Iso Evo Options")
def _configure_hook(self):
pom = self.plotter_options_manager
if self.unknowns:
unk = self.unknowns[0]
names = unk.isotope_keys
if names:
dets = unk.detector_keys
if dets:
names.extend(dets)
names.insert(0, NULL_STR)
pom.set_names(names)
atypes = list({a.analysis_type for a in self.unknowns})
pom.set_analysis_types(atypes)
def run(self, state):
super(FitIsotopeEvolutionNode, self).run(state)
po = self.plotter_options
self._fits = list(reversed([pi for pi in po.get_saveable_aux_plots()]))
self._keys = [fi.name for fi in self._fits]
unks = self._get_valid_unknowns(state.unknowns)
if unks:
if self.check_refit(unks):
return
fs = progress_loader(unks, self._assemble_result, threshold=1, step=10)
if self.editor:
self.editor.analysis_groups = [(ai,) for ai in unks]
self._set_saveable(state)
if fs:
e = IsoEvolutionResultsEditor(fs, self._fits)
state.editors.append(e)
def _assemble_result(self, xi, prog, i, n):
if prog:
prog.change_message("Load raw data {}".format(xi.record_id))
fits = self._fits
xi.load_raw_data(self._keys)
xi.set_fits(fits)
isotopes = xi.isotopes
for f in fits:
k = f.name
if k in isotopes:
iso = isotopes[k]
else:
iso = xi.get_isotope(detector=k, kind="baseline")
if iso:
i, e = iso.value, iso.error
try:
pe = abs(e / i * 100)
except ZeroDivisionError:
pe = inf
smart_filter_coefficients = f.get_filter_coefficients()
if smart_filter_coefficients:
smart_filter_threshold = f.smart_filter_values(i)
smart_filter_goodness = e < f.smart_filter_values(i)
goodness_threshold = f.goodness_threshold
int_err_goodness = None
if goodness_threshold:
int_err_goodness = bool(pe < goodness_threshold)
signal_to_baseline_threshold = f.signal_to_baseline_goodness
signal_to_baseline_percent_threshold = (
f.signal_to_baseline_percent_goodness
)
signal_to_baseline_goodness = None
signal_to_baseline = 0
if hasattr(iso, "baseline"):
bs = iso.baseline.error
signal_to_baseline = abs(bs / i * 100)
if (
signal_to_baseline_threshold
and signal_to_baseline_percent_threshold
):
if signal_to_baseline > signal_to_baseline_threshold:
signal_to_baseline_goodness = bool(
pe < signal_to_baseline_percent_threshold
)
slope = iso.get_slope()
slope_goodness = None
slope_threshold = None
if f.slope_goodness:
if f.slope_goodness_intensity < i:
slope_threshold = f.slope_goodness
slope_goodness = bool(slope < 0 or slope < slope_threshold)
outliers = iso.noutliers()
outliers_threshold = None
outlier_goodness = None
if f.outlier_goodness:
outliers_threshold = f.outlier_goodness
outlier_goodness = bool(outliers < f.outlier_goodness)
curvature_goodness = None
curvature = 0
curvature_threshold = None
if f.curvature_goodness:
curvature = iso.get_curvature(f.curvature_goodness_at)
curvature_threshold = f.curvature_goodness
curvature_goodness = curvature < curvature_threshold
nstr = str(iso.n)
if iso.noutliers():
nstr = "{}({})".format(iso.n - iso.noutliers(), nstr)
rsquared = iso.rsquared_adj
rsquared_goodness = None
rsquared_threshold = 0
if f.rsquared_goodness:
rsquared_threshold = f.rsquared_goodness
rsquared_goodness = rsquared > rsquared_threshold
if hasattr(iso, "blank"):
signal_to_blank = iso.blank.value / iso.value * 100
else:
signal_to_blank = 0
signal_to_blank_goodness = None
signal_to_blank_threshold = 0
if f.signal_to_blank_goodness:
signal_to_blank_threshold = f.signal_to_blank_goodness
signal_to_blank_goodness = (
signal_to_blank < signal_to_blank_threshold
)
yield IsoEvoResult(
analysis=xi,
isotope_obj=iso,
nstr=nstr,
intercept_value=i,
intercept_error=e,
normalized_error=e * iso.n ** 0.5,
percent_error=pe,
int_err=pe,
int_err_threshold=goodness_threshold,
int_err_goodness=int_err_goodness,
slope=slope,
slope_threshold=slope_threshold,
slope_goodness=slope_goodness,
outlier=outliers,
outlier_threshold=outliers_threshold,
outlier_goodness=outlier_goodness,
curvature=curvature,
curvature_threshold=curvature_threshold,
curvature_goodness=curvature_goodness,
rsquared=rsquared,
rsquared_threshold=rsquared_threshold,
rsquared_goodness=rsquared_goodness,
signal_to_blank=signal_to_blank,
signal_to_blank_threshold=signal_to_blank_threshold,
signal_to_blank_goodness=signal_to_blank_goodness,
signal_to_baseline=signal_to_baseline,
signal_to_baseline_goodness=signal_to_baseline_goodness,
signal_to_baseline_threshold=signal_to_baseline_threshold,
signal_to_baseline_percent_threshold=signal_to_baseline_percent_threshold,
smart_filter_goodness=smart_filter_goodness,
smart_filter_threshold=smart_filter_threshold,
smart_filter=e,
regression_str=iso.regressor.tostring(),
fit=iso.fit,
isotope=k,
)
class DefineEquilibrationNode(FitNode):
name = "Define Equilibration"
plotter_options_manager_klass = DefineEquilibrationOptionsManager
use_plotting = False
_refit_message = "The selected Equilibrations have already been fit. Would you like to skip refitting?"
def _configure_hook(self):
pom = self.plotter_options_manager
if self.unknowns:
unk = self.unknowns[0]
names = unk.isotope_keys
names.insert(0, NULL_STR)
pom.set_names(names)
def run(self, state):
super(DefineEquilibrationNode, self).run(state)
po = self.plotter_options
self._fits = list(reversed([pi for pi in po.get_saveable_aux_plots()]))
self._keys = [fi.name for fi in self._fits]
unks = state.unknowns
fs = progress_loader(unks, self._assemble_result, threshold=1, step=10)
self._set_saveable(state)
if fs:
e = DefineEquilibrationResultsEditor(fs, options=po)
state.editors.append(e)
def _set_saveable(self, state):
ps = self.plotter_options.get_saveable_aux_plots()
state.saveable_keys = [p.name for p in ps]
state.saveable_fits = [p.equilibration_time for p in ps]
def _assemble_result(self, xi, prog, i, n):
fits = self._fits
xi.load_raw_data(self._keys)
delay = xi.admit_delay
isotopes = xi.isotopes
ks = []
eqs = []
for fi in fits:
k = fi.name
if k in isotopes:
iso = isotopes[k]
# recombine sniff and isotope data
xs = hstack((iso.sniff.xs, iso.xs))
ys = hstack((iso.sniff.ys, iso.ys))
# ex = eval('x', {'x': xs})
ex = xs < fi.equilibration_time - delay
iex = invert(ex)
# split data based on trunc criteria
sniff_xs = xs[ex]
iso_xs = xs[iex]
sniff_ys = ys[ex]
iso_ys = ys[iex]
iso.sniff.xs = sniff_xs
iso.sniff.ys = sniff_ys
iso.xs = iso_xs
iso.ys = iso_ys
ks.append(k)
eqs.append("{}({})".format(k, fi.equilibration_time))
if ks:
yield DefineEquilibrationResult(
analysis=xi, isotopes=ks, equilibration_times=",".join(eqs)
)
class FitFluxNode(FitNode):
name = "Fit Flux"
editor_klass = FluxResultsEditor
plotter_options_manager_klass = FluxOptionsManager
def _editor_factory(self):
if self.plotter_options.model_kind == "Bracketing":
klass = BracketingFluxResultsEditor
else:
klass = FluxResultsEditor
editor = klass()
editor.plotter_options = self.plotter_options
return editor
def _options_view_default(self):
return view("Flux Options")
def run(self, state):
super(FitFluxNode, self).run(state)
editor = self.editor
if not editor:
state.canceled = True
return
self.name = "Fit Flux {}".format(state.irradiation, state.level)
geom = state.geometry
monitors = state.unknowns
if monitors:
po = self.plotter_options
# lk = po.lambda_k
# state.decay_constants = {'lambda_k_total': lk, 'lambda_k_total_error': 0}
# state.error_calc_method = po.
# state.flux_fit = po.
state.flux_options = po
editor.plotter_options = po
editor.geometry = geom
editor.irradiation = state.irradiation
editor.level = state.level
editor.holder = state.holder
editor.set_positions(monitors, state.unknown_positions)
# state.saveable_irradiation_positions = editor.monitor_positions + state.unknown_positions
state.monitor_positions = editor.monitor_positions
editor.predict_values()
editor.name = "Flux: {}{}".format(state.irradiation, state.level)
# ============= EOF =============================================
| {
"content_hash": "7795a5e0ac22a9352d1eb12733b8594d",
"timestamp": "",
"source": "github",
"line_count": 578,
"max_line_length": 112,
"avg_line_length": 34.62975778546713,
"alnum_prop": 0.5564548361310951,
"repo_name": "NMGRL/pychron",
"id": "d5caf8753d96e2e8f4cc33e8dcf830dbfc00651a",
"size": "20750",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "pychron/pipeline/nodes/fit.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "128"
},
{
"name": "C++",
"bytes": "3706"
},
{
"name": "CSS",
"bytes": "263"
},
{
"name": "Cython",
"bytes": "1692"
},
{
"name": "Fortran",
"bytes": "455875"
},
{
"name": "HTML",
"bytes": "46796"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Processing",
"bytes": "11421"
},
{
"name": "Python",
"bytes": "10773692"
},
{
"name": "Shell",
"bytes": "1003"
}
],
"symlink_target": ""
} |
from platformio.platforms.base import BasePlatform
class AtmelsamPlatform(BasePlatform):
"""
Atmel | SMART offers Flash- based ARM products based on the ARM
Cortex-M0+, Cortex-M3 and Cortex-M4 architectures, ranging from 8KB
to 2MB of Flash including a rich peripheral and feature mix.
http://www.atmel.com/products/microcontrollers/arm/default.aspx
"""
PACKAGES = {
"toolchain-gccarmnoneeabi": {
"alias": "toolchain",
"default": True
},
"ldscripts": {
"default": True
},
"framework-arduinosam": {
"default": True
},
"tool-bossac": {
"alias": "uploader",
"default": True
}
}
def get_name(self):
return "Atmel SAM"
| {
"content_hash": "8fbc56f65d8abef56b3d3f1570018d9f",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 71,
"avg_line_length": 22.333333333333332,
"alnum_prop": 0.5621890547263682,
"repo_name": "TimJay/platformio",
"id": "e11d98a431a4d3f6f5bb10a02f9d3f8c538de9dd",
"size": "879",
"binary": false,
"copies": "9",
"ref": "refs/heads/develop",
"path": "platformio/platforms/atmelsam.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "20151"
},
{
"name": "C",
"bytes": "14270"
},
{
"name": "C++",
"bytes": "57106"
},
{
"name": "JavaScript",
"bytes": "465"
},
{
"name": "PowerShell",
"bytes": "2900"
},
{
"name": "Processing",
"bytes": "1536"
},
{
"name": "Python",
"bytes": "200199"
},
{
"name": "QMake",
"bytes": "365"
},
{
"name": "Shell",
"bytes": "1897"
}
],
"symlink_target": ""
} |
"""
pymc3.blocking
Classes for working with subsets of parameters.
"""
import numpy as np
import collections
__all__ = ['ArrayOrdering', 'DictToArrayBijection', 'DictToVarBijection']
VarMap = collections.namedtuple('VarMap', 'var, slc, shp')
# TODO Classes and methods need to be fully documented.
class ArrayOrdering(object):
"""
An ordering for an array space
"""
def __init__(self, vars):
self.vmap = []
dim = 0
for var in vars:
slc = slice(dim, dim + var.dsize)
self.vmap.append(VarMap(str(var), slc, var.dshape))
dim += var.dsize
self.dimensions = dim
class DictToArrayBijection(object):
"""
A mapping between a dict space and an array space
"""
def __init__(self, ordering, dpoint):
self.ordering = ordering
self.dpt = dpoint
def map(self, dpt):
"""
Maps value from dict space to array space
Parameters
----------
dpt : dict
"""
apt = np.empty(self.ordering.dimensions)
for var, slc, _ in self.ordering.vmap:
apt[slc] = dpt[var].ravel()
return apt
def rmap(self, apt):
"""
Maps value from array space to dict space
Parameters
----------
apt : array
"""
dpt = self.dpt.copy()
for var, slc, shp in self.ordering.vmap:
dpt[var] = apt[slc].reshape(shp)
return dpt
def mapf(self, f):
"""
function f : DictSpace -> T to ArraySpace -> T
Parameters
----------
f : dict -> T
Returns
-------
f : array -> T
"""
return Compose(f, self.rmap)
class DictToVarBijection(object):
"""
A mapping between a dict space and the array space for one element within the dict space
"""
def __init__(self, var, idx, dpoint):
self.var = str(var)
self.idx = idx
self.dpt = dpoint
def map(self, dpt):
return dpt[self.var][self.idx]
def rmap(self, apt):
dpt = self.dpt.copy()
dvar = dpt[self.var].copy()
dvar[self.idx] = apt
dpt[self.var] = dvar
return dpt
def mapf(self, f):
return Compose(f, self.rmap)
class Compose(object):
"""
Compose two functions in a pickleable way
"""
def __init__(self, fa, fb):
self.fa = fa
self.fb = fb
def __call__(self, x):
return self.fa(self.fb(x))
| {
"content_hash": "8ae8220a77d212277f17ca7b874cf27d",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 92,
"avg_line_length": 21.19327731092437,
"alnum_prop": 0.5329103885804917,
"repo_name": "MCGallaspy/pymc3",
"id": "8283789c1db9073fff60d4a00e25cf1db6ed3189",
"size": "2522",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pymc3/blocking.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "344749"
},
{
"name": "Shell",
"bytes": "194"
}
],
"symlink_target": ""
} |
"""
Scheduler queues
"""
import marshal
import pickle
from os import PathLike
from pathlib import Path
from typing import Union
from queuelib import queue
from scrapy.utils.deprecate import create_deprecated_class
from scrapy.utils.request import request_from_dict
def _with_mkdir(queue_class):
class DirectoriesCreated(queue_class):
def __init__(self, path: Union[str, PathLike], *args, **kwargs):
dirname = Path(path).parent
if not dirname.exists():
dirname.mkdir(parents=True, exist_ok=True)
super().__init__(path, *args, **kwargs)
return DirectoriesCreated
def _serializable_queue(queue_class, serialize, deserialize):
class SerializableQueue(queue_class):
def push(self, obj):
s = serialize(obj)
super().push(s)
def pop(self):
s = super().pop()
if s:
return deserialize(s)
def peek(self):
"""Returns the next object to be returned by :meth:`pop`,
but without removing it from the queue.
Raises :exc:`NotImplementedError` if the underlying queue class does
not implement a ``peek`` method, which is optional for queues.
"""
try:
s = super().peek()
except AttributeError as ex:
raise NotImplementedError("The underlying queue class does not implement 'peek'") from ex
if s:
return deserialize(s)
return SerializableQueue
def _scrapy_serialization_queue(queue_class):
class ScrapyRequestQueue(queue_class):
def __init__(self, crawler, key):
self.spider = crawler.spider
super().__init__(key)
@classmethod
def from_crawler(cls, crawler, key, *args, **kwargs):
return cls(crawler, key)
def push(self, request):
request = request.to_dict(spider=self.spider)
return super().push(request)
def pop(self):
request = super().pop()
if not request:
return None
return request_from_dict(request, spider=self.spider)
def peek(self):
"""Returns the next object to be returned by :meth:`pop`,
but without removing it from the queue.
Raises :exc:`NotImplementedError` if the underlying queue class does
not implement a ``peek`` method, which is optional for queues.
"""
request = super().peek()
if not request:
return None
return request_from_dict(request, spider=self.spider)
return ScrapyRequestQueue
def _scrapy_non_serialization_queue(queue_class):
class ScrapyRequestQueue(queue_class):
@classmethod
def from_crawler(cls, crawler, *args, **kwargs):
return cls()
def peek(self):
"""Returns the next object to be returned by :meth:`pop`,
but without removing it from the queue.
Raises :exc:`NotImplementedError` if the underlying queue class does
not implement a ``peek`` method, which is optional for queues.
"""
try:
s = super().peek()
except AttributeError as ex:
raise NotImplementedError("The underlying queue class does not implement 'peek'") from ex
return s
return ScrapyRequestQueue
def _pickle_serialize(obj):
try:
return pickle.dumps(obj, protocol=4)
# Both pickle.PicklingError and AttributeError can be raised by pickle.dump(s)
# TypeError is raised from parsel.Selector
except (pickle.PicklingError, AttributeError, TypeError) as e:
raise ValueError(str(e)) from e
_PickleFifoSerializationDiskQueue = _serializable_queue(
_with_mkdir(queue.FifoDiskQueue),
_pickle_serialize,
pickle.loads
)
_PickleLifoSerializationDiskQueue = _serializable_queue(
_with_mkdir(queue.LifoDiskQueue),
_pickle_serialize,
pickle.loads
)
_MarshalFifoSerializationDiskQueue = _serializable_queue(
_with_mkdir(queue.FifoDiskQueue),
marshal.dumps,
marshal.loads
)
_MarshalLifoSerializationDiskQueue = _serializable_queue(
_with_mkdir(queue.LifoDiskQueue),
marshal.dumps,
marshal.loads
)
# public queue classes
PickleFifoDiskQueue = _scrapy_serialization_queue(_PickleFifoSerializationDiskQueue)
PickleLifoDiskQueue = _scrapy_serialization_queue(_PickleLifoSerializationDiskQueue)
MarshalFifoDiskQueue = _scrapy_serialization_queue(_MarshalFifoSerializationDiskQueue)
MarshalLifoDiskQueue = _scrapy_serialization_queue(_MarshalLifoSerializationDiskQueue)
FifoMemoryQueue = _scrapy_non_serialization_queue(queue.FifoMemoryQueue)
LifoMemoryQueue = _scrapy_non_serialization_queue(queue.LifoMemoryQueue)
# deprecated queue classes
_subclass_warn_message = "{cls} inherits from deprecated class {old}"
_instance_warn_message = "{cls} is deprecated"
PickleFifoDiskQueueNonRequest = create_deprecated_class(
name="PickleFifoDiskQueueNonRequest",
new_class=_PickleFifoSerializationDiskQueue,
subclass_warn_message=_subclass_warn_message,
instance_warn_message=_instance_warn_message,
)
PickleLifoDiskQueueNonRequest = create_deprecated_class(
name="PickleLifoDiskQueueNonRequest",
new_class=_PickleLifoSerializationDiskQueue,
subclass_warn_message=_subclass_warn_message,
instance_warn_message=_instance_warn_message,
)
MarshalFifoDiskQueueNonRequest = create_deprecated_class(
name="MarshalFifoDiskQueueNonRequest",
new_class=_MarshalFifoSerializationDiskQueue,
subclass_warn_message=_subclass_warn_message,
instance_warn_message=_instance_warn_message,
)
MarshalLifoDiskQueueNonRequest = create_deprecated_class(
name="MarshalLifoDiskQueueNonRequest",
new_class=_MarshalLifoSerializationDiskQueue,
subclass_warn_message=_subclass_warn_message,
instance_warn_message=_instance_warn_message,
)
| {
"content_hash": "7742b8d60dd18b25fa2c4674216f8827",
"timestamp": "",
"source": "github",
"line_count": 185,
"max_line_length": 105,
"avg_line_length": 32.47027027027027,
"alnum_prop": 0.6717163309472283,
"repo_name": "scrapy/scrapy",
"id": "2fa84fc008f1dd720e5d238993a194396f18986a",
"size": "6007",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scrapy/squeues.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "3237"
},
{
"name": "Python",
"bytes": "2021119"
},
{
"name": "Roff",
"bytes": "2010"
},
{
"name": "Shell",
"bytes": "259"
}
],
"symlink_target": ""
} |
"""
This file was automatically generated.
"""
import six
class users(object):
def __init__(self, zap):
self.zap = zap
def users_list(self, contextid=None):
params = {}
if contextid is not None:
params['contextId'] = contextid
return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/usersList/', params)))
def get_user_by_id(self, contextid=None, userid=None):
params = {}
if contextid is not None:
params['contextId'] = contextid
if userid is not None:
params['userId'] = userid
return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/getUserById/', params)))
def get_authentication_credentials_config_params(self, contextid):
return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/getAuthenticationCredentialsConfigParams/', {'contextId': contextid})))
def get_authentication_credentials(self, contextid, userid):
return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/getAuthenticationCredentials/', {'contextId': contextid, 'userId': userid})))
def new_user(self, contextid, name, apikey=''):
return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/newUser/', {'contextId': contextid, 'name': name, 'apikey': apikey})))
def remove_user(self, contextid, userid, apikey=''):
return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/removeUser/', {'contextId': contextid, 'userId': userid, 'apikey': apikey})))
def set_user_enabled(self, contextid, userid, enabled, apikey=''):
return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/setUserEnabled/', {'contextId': contextid, 'userId': userid, 'enabled': enabled, 'apikey': apikey})))
def set_user_name(self, contextid, userid, name, apikey=''):
return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/setUserName/', {'contextId': contextid, 'userId': userid, 'name': name, 'apikey': apikey})))
def set_authentication_credentials(self, contextid, userid, authcredentialsconfigparams=None, apikey=''):
params = {'contextId': contextid, 'userId': userid, 'apikey': apikey}
if authcredentialsconfigparams is not None:
params['authCredentialsConfigParams'] = authcredentialsconfigparams
return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/setAuthenticationCredentials/', params)))
| {
"content_hash": "767a306234143db87e3b59ad769aedc3",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 188,
"avg_line_length": 52.44897959183673,
"alnum_prop": 0.6805447470817121,
"repo_name": "Woolworths/zap-api-python",
"id": "658fd53f9cb2ffdeb26c24283c163b32d86acfdf",
"size": "3283",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/zapv2/users.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "152889"
}
],
"symlink_target": ""
} |
def main():
'''
ansible oc module for version
'''
module = AnsibleModule(
argument_spec=dict(
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
version=dict(default=True, type='bool'),
state=dict(default='list', type='str',
choices=['list']),
debug=dict(default=False, type='bool'),
),
)
oc_version = OCVersion(module.params['kubeconfig'],
module.params['debug'])
state = module.params['state']
if state == 'list':
#pylint: disable=protected-access
result = oc_version.get()
module.exit_json(changed=False, result=result)
if __name__ == '__main__':
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
# import module snippets. This are required
from ansible.module_utils.basic import *
main()
| {
"content_hash": "534e4dc31da2cdd84db547e57d6c037e",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 94,
"avg_line_length": 30.548387096774192,
"alnum_prop": 0.587117212249208,
"repo_name": "andrewklau/openshift-tools",
"id": "1695b1898856120767453607828f494d407fd3bd",
"size": "1004",
"binary": false,
"copies": "5",
"ref": "refs/heads/prod",
"path": "ansible/roles/lib_openshift_3.2/build/ansible/oc_version.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "107055"
},
{
"name": "CSS",
"bytes": "588"
},
{
"name": "Groovy",
"bytes": "6322"
},
{
"name": "HTML",
"bytes": "14650"
},
{
"name": "JavaScript",
"bytes": "229"
},
{
"name": "PHP",
"bytes": "35735"
},
{
"name": "Python",
"bytes": "6700383"
},
{
"name": "Shell",
"bytes": "581246"
},
{
"name": "Vim script",
"bytes": "1836"
}
],
"symlink_target": ""
} |
import json
from abc import ABCMeta, abstractmethod
import numpy as np
from pychemia.utils.computing import deep_unicode
from numbers import Integral, Real
class PyChemiaJsonable(object):
"""
Abstract base class specifying how to convert objects from/to dictionaries.
PyChemiaJsonable objects must implement a to_dict property and a from_dict static method.
"""
__metaclass__ = ABCMeta
@property
@abstractmethod
def to_dict(self):
"""
A JSON representation of an object.
"""
pass
@classmethod
def from_dict(cls, json_dict):
"""
This implements a default from_dict method which supports all
classes that simply try to recreate an object using the keys
as arguments for the creation of the new object.
:param json_dict: Recreate an object from its serialize form
:return:
"""
argstring = ''
for key in json_dict:
argstring += key + '=' + str(json_dict[key]) + ', '
argstring = argstring[:-2]
print(str(cls) + '(' + argstring + ')')
return eval(str(cls) + '(' + argstring + ')')
@property
def to_json(self):
"""
Returns a json string representation of the object.
"""
return json.dumps(self)
def save_to_file(self, filename):
"""
Writes the json representation to a file.
:param filename: (str) Filename for the json that will be created
"""
with open(filename, "w") as f:
json.dump(self, f)
def generic_serializer(value):
"""
A generic serializer for very common values
:param value:
:return:
"""
value = deep_unicode(value)
if value is None:
return None
elif isinstance(value, dict):
new_value = {}
for i in value:
new_value[i] = generic_serializer(value[i])
return new_value
elif hasattr(value, '__iter__'):
return [generic_serializer(element) for element in value]
elif isinstance(value, str):
return value
elif isinstance(value, Integral):
return int(value)
elif isinstance(value, Real):
return float(value)
elif isinstance(value, np.integer):
return int(value)
elif isinstance(value, np.float):
return float(value)
else:
raise ValueError("Could not serialize this: %s of type: %s" % (value, type(value)))
| {
"content_hash": "d92af01ce6b8e0208252a2294f36daef",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 93,
"avg_line_length": 28.558139534883722,
"alnum_prop": 0.6054560260586319,
"repo_name": "MaterialsDiscovery/PyChemia",
"id": "acedf80d032e2059dbdb29b615f0a97930d58019",
"size": "2457",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pychemia/utils/serializer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1390398"
},
{
"name": "Shell",
"bytes": "325"
}
],
"symlink_target": ""
} |
import re
class WSGITeamMiddleware():
def __init__(self, application):
self.app = application
def __call__(self, environ, start_repsonse):
m = re.match(r"(/teams/([\w-]+))(.*)", environ["PATH_INFO"])
if m:
environ["SCRIPT_NAME"] = m.group(1)
environ["PATH_INFO"] = m.group(3)
environ["pinax.team"] = m.group(2)
return self.app(environ, start_repsonse)
| {
"content_hash": "35d0e4e4fef081968d01c95c6e611563",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 68,
"avg_line_length": 28.8,
"alnum_prop": 0.5486111111111112,
"repo_name": "pinax/pinax-teams",
"id": "43567d75cc6b9b08af322c186eaa85628c37edd0",
"size": "518",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pinax/teams/wsgi_middleware.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "74"
},
{
"name": "Makefile",
"bytes": "90"
},
{
"name": "Python",
"bytes": "75910"
}
],
"symlink_target": ""
} |
from msrest.serialization import Model
class CaptureDescription(Model):
"""Properties to configure capture description for eventhub.
:param enabled: A value that indicates whether capture description is
enabled.
:type enabled: bool
:param encoding: Enumerates the possible values for the encoding format of
capture description. Possible values include: 'Avro', 'AvroDeflate'
:type encoding: str or
~azure.mgmt.servicebus.models.EncodingCaptureDescription
:param interval_in_seconds: The time window allows you to set the
frequency with which the capture to Azure Blobs will happen, value should
between 60 to 900 seconds
:type interval_in_seconds: int
:param size_limit_in_bytes: The size window defines the amount of data
built up in your Event Hub before an capture operation, value should be
between 10485760 and 524288000 bytes
:type size_limit_in_bytes: int
:param destination: Properties of Destination where capture will be
stored. (Storage Account, Blob Names)
:type destination: ~azure.mgmt.servicebus.models.Destination
"""
_validation = {
'interval_in_seconds': {'maximum': 900, 'minimum': 60},
'size_limit_in_bytes': {'maximum': 524288000, 'minimum': 10485760},
}
_attribute_map = {
'enabled': {'key': 'enabled', 'type': 'bool'},
'encoding': {'key': 'encoding', 'type': 'EncodingCaptureDescription'},
'interval_in_seconds': {'key': 'intervalInSeconds', 'type': 'int'},
'size_limit_in_bytes': {'key': 'sizeLimitInBytes', 'type': 'int'},
'destination': {'key': 'destination', 'type': 'Destination'},
}
def __init__(self, enabled=None, encoding=None, interval_in_seconds=None, size_limit_in_bytes=None, destination=None):
super(CaptureDescription, self).__init__()
self.enabled = enabled
self.encoding = encoding
self.interval_in_seconds = interval_in_seconds
self.size_limit_in_bytes = size_limit_in_bytes
self.destination = destination
| {
"content_hash": "ab33d4b51e064b23a28d5e0d11557368",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 122,
"avg_line_length": 44.891304347826086,
"alnum_prop": 0.6799031476997579,
"repo_name": "lmazuel/azure-sdk-for-python",
"id": "db8c8e2aca756cacca6e50d4b96157899327f785",
"size": "2539",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-servicebus/azure/mgmt/servicebus/models/capture_description.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42572767"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Ship()
result.template = "object/ship/shared_freighterlight_tier4.iff"
result.attribute_template_id = -1
result.stfName("","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "168e062add3d1e773ebb3aed12c98e97",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 64,
"avg_line_length": 20.923076923076923,
"alnum_prop": 0.6727941176470589,
"repo_name": "obi-two/Rebelion",
"id": "cf94417c4653540c0cdcc8d7551d9c3943dfaa61",
"size": "417",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/ship/shared_freighterlight_tier4.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
} |
import datetime
from django.db import models
from django.utils import timezone
# vsak model je svoja tabela v bazi
# ta ma 3 stolpce, question, pub_date, id
class Poll(models.Model):
question = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
def __unicode__(self):
return self.question
def was_published_recently(self):
return self.pub_date >= timezone.now() - datetime.timedelta(days=1)
was_published_recently.admin_order_field = 'pub_date'
was_published_recently.boolean = True
was_published_recently.short_description = 'Published recently?'
# ta ma 4 stolpce, poll, choice_text, votes, id
class Choice(models.Model):
poll = models.ForeignKey(Poll)
choice_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
def __unicode__(self): # vedno ko izpisujes na screen se klice unicode, isto ko .toString
return self.choice_text
| {
"content_hash": "e96dead6327ebd782c48613366b4adaa",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 94,
"avg_line_length": 31.129032258064516,
"alnum_prop": 0.7098445595854922,
"repo_name": "Irrialite/YouTune",
"id": "7568e70d30a1b08d22003089bef036b7d20c0c55",
"size": "965",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "youtune/polls/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "252952"
},
{
"name": "JavaScript",
"bytes": "679962"
},
{
"name": "Python",
"bytes": "75866"
}
],
"symlink_target": ""
} |
"""Helpers class for accessing system information."""
from __future__ import unicode_literals
import os
import subprocess
import time
import uuid
def ReadDMI(name):
"""Reads a DMI value from the /sys filesystem.
Args:
name (str): the name of the DMI value to read.
Returns:
str: the DMI value, or None if not available.
"""
dmi_value = None
dmi_path = os.path.join('/sys/class/dmi/id/', name)
try:
with open(dmi_path, 'r') as d_f:
dmi_value = d_f.read().strip()
except IOError:
pass
return dmi_value
def GetChassisSerial():
"""Gets the system's chassis serial number.
Returns:
str: the serial number.
"""
return ReadDMI('chassis_serial')
def GetMachineUUID():
"""Gets the system's product UUID.
Returns:
str: the product UUID.
"""
return ReadDMI('product_uuid')
def GetRandomUUID():
"""Generates a random UUID.
Returns:
str: the UUID.
"""
return str(uuid.uuid4())
def GetIdentifier():
"""Gets an identifier for the machine.
It first tries to use the machine's serial number, then the machine's UUID,
and defaults to a random UUID.
Returns:
str: the identifier.
"""
identifier = (GetChassisSerial() or
GetMachineUUID() or
GetRandomUUID())
return identifier
def GetUdevadmInfo(device_name):
"""Uses udevadm to pull metadata for a device.
Args:
device_name(str): the name of the device. ie: 'sda'
Returns:
dict: a dictionary of udev properties.
"""
device_metadata = {}
udevadm_path = Which('udevadm')
cmd = [udevadm_path, 'info', '--query', 'property', '--name', device_name]
udevadm_output = subprocess.check_output(cmd).decode()
device_metadata['udevadm_text_output'] = udevadm_output
for line in udevadm_output.split('\n'):
try:
key, value = line.strip().split('=', 1)
device_metadata[key] = value
except ValueError:
pass
return device_metadata
def GetTime():
"""Returns the current time as a iso string."""
return time.strftime('%Y%m%d-%H%M%S', time.gmtime())
def Which(cmd):
"""Searches for a binary in the current PATH environment variable.
Args:
cmd(str): the binary to search for.
Returns:
str: the first found path to a binary with the same name, or None.
"""
path_list = os.environ.get('PATH', os.defpath).split(os.pathsep)
for directory in path_list:
name = os.path.join(directory, cmd)
if os.path.isdir(name):
continue
if os.path.exists(name) and os.access(name, os.F_OK | os.X_OK):
return name
return None
| {
"content_hash": "d7012efe4ab8c67ae6634a4b1a74f92d",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 77,
"avg_line_length": 22.408695652173915,
"alnum_prop": 0.653861078773768,
"repo_name": "google/GiftStick",
"id": "b51c7f64b76c69472a82fce9e6989ee41c64d90b",
"size": "3176",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "auto_forensicate/hostinfo.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "138685"
},
{
"name": "Shell",
"bytes": "50578"
}
],
"symlink_target": ""
} |
"""This module provides an interface to cutadapt with a set of commonly
used adapters for trimming
"""
from bipy.utils import flatten_options, append_stem, flatten, which
import subprocess
import os
from bcbio.utils import safe_makedir, file_exists
import sh
import yaml
import bcbio.provenance.do as do
# adapter sequences for various commonly used systems
ADAPTERS = {}
ADAPTERS["illumina"] = [
["ACACTCTTTCCCTACACGACGCTCTTCCGATCT", "-a", "ill_pe_adapter1"],
["TGTGAGAAAGGGATGTGCTGCGAGAAGGCTAG", "-a", "ill_pe_adapter1_rc"],
["GATCGGAAGAGCGGTTCAGCAGGAATGCCGAG", "-a", "ill_pe_adapter2"],
["TCTAGCCTTCTCGCCAAGTCGTCCTTACGGCTC", "-a", "ill_pe_adapter2_rc"]]
ADAPTERS["nextera"] = [
["AATGATACGGCGACCACCGAGATCTACACGCCTCCCTCGCGCCATCAG", "-a",
"nex_pe_adapter1"],
["CTGATGGCGCGAGGGAGGCGTGTAGATCTCGGTGGTCGCCGTATCATT", "-a",
"nex_pe_adapter1_rc"],
["CAAGCAGAAGACGGCATACGAGATCGGTCTGCCTTGCCAGCCCGCTCAG",
"-a", "nex_pe_adapter2_nobc"],
["CTGAGCGGGCTGGCAAGGCAGACCGATCTCGTATGCCGTCTTCTGCTTG",
"-a", "nex_pe_adapter2_nobc_rc"],
["CTGATGGCGCGAGGGAGGCGTGTAGATCTCGGTGGTCGCCGTATCATTCTGTCTCTTATACACATCT",
"-a", "nex_transposon_pe_adapter1_rc"],
["AGATGTGTATAAGAGACAGAATGATACGGCGACCACCGAGATCTACACGCCTCCCTCGCGCCATCAG",
"-a", "nex_transposon_pe_adapter1"],
["AGATGTGTATAAGAGACAGCAAGCAGAAGACGGCATACGAGATCGGTCTGCCTTGCCAGCCCGCTCAG",
"-a", "nex_tranposon_pe_adapter2"]]
ADAPTERS["polya"] = [
["AAAAAAAAAAAAAAAAAAAAAAAAAAA", "-a", "polyA tail"],
["TTTTTTTTTTTTTTTTTTTTTTTTTTT", "-a", "polyT tail"]]
ADAPTERS["iontorrent"] = [
["CCACTACGCCTCCGCTTTCCTCTCTATGGGCAGTCGGTGAT", "-a",
"ion_5_prime_adapter"],
["CTGAGTCGGAGACACGCAGGGATGAGATGG", "-a", "3_prime_adapter"],
["ATCACCGACTGCCCATAGAGAGGAAAGCGGAGGCGTAGTGG", "-a",
"5_prime_adapter_rc"],
["CCATCTCATCCCTGCGTGTCTCCGACTCAG", "-a", "3_prime_adapter_rc"]]
TRUSEQ_BARCODES = {"ATCACG": 1, "AGTCAA": 13, "ACTGAT": 25, "CGGAAT": 37,
"CGATGT": 2, "AGTTCC": 14, "ATGAGC": 26, "CTAGCT": 38,
"TTAGGC": 3, "ATGTCA": 15, "ATTCCT": 27, "CTATAC": 39,
"TGACCA": 4, "CCGTCC": 16, "CAAAAG": 28, "CTCAGA": 40,
"ACAGTG": 5, "GTAGAG": 17, "CAACTA": 29, "GACGAC": 41,
"GCCAAT": 6, "GTCCGC": 18, "CACCGG": 30, "TAATCG": 42,
"CAGATC": 7, "GTGAAA": 19, "CACGAT": 31, "TACAGC": 43,
"ACTTGA": 8, "GTGGCC": 20, "CACTCA": 32, "TATAAT": 44,
"GATCAG": 9, "GTTTCG": 21, "CAGGCG": 33, "TCATTC": 45,
"TAGCTT": 10, "CGTACG": 22, "CATGGC": 34, "TCCCGA": 46,
"GGCTAC": 11, "GAGTGG": 23, "CATTTT": 35, "TCGAAG": 47,
"CTTGTA": 12, "GGTAGC": 24, "CCAACA": 36, "TCGGCA": 48}
VALID_TRUSEQ_RNASEQ = {k: v for (k, v) in TRUSEQ_BARCODES.items() if v < 13}
TRUSEQ_PREFIX = "GATCGGAAGAGCACACGTCTGAACTCCAGTCAC"
def truseq_barcode_lookup(barcode, small=False):
"""
looks up a truseq adapter sequence by inserting the barcode in the
correct sequence. throws an exception if the barcode does not match
known barcodes
"""
prefix = "AGATCGGAAGAGCACACGTCTGAACTCCAGTCAC"
suffix = "ATCTCGTATGCCGTCTTCTGCTTG"
if small:
raise NotImplementedError("Small RNA barcodes not implemented. Need "
"to check to make sure the prefix and "
"suffix sequences are the same as the "
"RNA-seq barcodes.")
if barcode not in VALID_TRUSEQ_RNASEQ:
raise ValueError("Barcode not found in TruSeq barcodes. Might need "
"to implement v1 and v2 versions.")
return prefix + barcode + suffix
def _get_adapter(adapter):
return [adapter[1], adapter[0]]
def _get_platform_adapters(platform):
platform_adapters = ADAPTERS.get(platform, [])
adapters = map(_get_adapter, platform_adapters)
return adapters
def _parse(config):
# handle the adapters, defaulting to illumina and a poly-a trimmer
# if none are provided
adapters = []
adapters += flatten(map(_get_adapter,
config.get("adapters", [])))
# add built in platform if available
platform = config.get("platform", None)
if platform:
adapters += flatten(map(_get_platform_adapters,
[p for p in platform if p in ADAPTERS]))
# default to illumina and poly A
if not adapters:
adapters += flatten(map(_get_platform_adapters,
[p for p in ["illumina", "polya"]]))
arguments = []
arguments += adapters
# grab everything else
arguments += config.get("options", [])
return map(str, list(flatten(arguments)))
def run(in_file, stage_config, config):
arguments = [stage_config["program"]]
arguments += _parse(stage_config)
results_dir = config["dir"].get("results", None)
if results_dir:
out_dir = os.path.join(results_dir, "cutadapt")
safe_makedir(out_dir)
out_file = os.path.join(out_dir,
os.path.basename(append_stem(in_file,
"trimmed")))
else:
out_file = append_stem(in_file, "trimmed")
if file_exists(out_file):
return out_file
arguments.extend(["--output", out_file, in_file])
do.run(arguments, "Running cutadapt on %s." % (in_file),
None)
return out_file
def _common_prefix(first, second):
for i, (x, y) in enumerate(zip(first, second)):
if x != y:
break
return first[:i]
| {
"content_hash": "0ff5a444f9184bfeb13f283808fbf3de",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 77,
"avg_line_length": 40.70503597122302,
"alnum_prop": 0.6101095793566631,
"repo_name": "roryk/bipy",
"id": "92684cbcce719786619a9a32150916b47201686a",
"size": "5658",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bipy/toolbox/cutadapt_tool.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Perl",
"bytes": "3428"
},
{
"name": "Python",
"bytes": "205527"
}
],
"symlink_target": ""
} |
"""
Interface Vistrails - TreeLayoutLW to align version trees.
Originally written by Lauro D. Lins.
"""
from __future__ import division
from tree_layout import TreeLW, NodeLW, TreeLayoutLW
from vistrails.core.data_structures.point import Point
################################################################################
class NodeVistrailsTreeLayoutLW(object):
"""
Preserving the interface that version_view
expects with the DotNode.
"""
def __init__(self):
""" DotNode() -> DotNode
Initialize DotNode as a data structure holding geometry info
"""
self.p = Point(0,0)
self.height = 0.0
self.width = 0.0
self.id = 0
def move(self, x, y):
""" move(x: float, y: float) -> None
"""
self.p.x = self.p.x + x
self.p.y = self.p.y + y
class VistrailsTreeLayoutLW(object):
"""
DotLayout is the graph outputed from Dotty which will be used and
parsed by version tree view
"""
def __init__(self, text_width_f, text_height, text_horizontal_margin,
text_vertical_margin):
""" DotLayout() -> DotLayout()
Initialize DotNode as a data structure holding graph structure
"""
self.text_width_f = text_width_f
self.text_height = text_height
self.text_horizontal_margin = text_horizontal_margin
self.text_vertical_margin = text_vertical_margin
self.nodes = {}
self.height = 0.0
self.scale = 0.0
self.width = 0.0
def generateTreeLW(self, vistrail, graph):
""" output_vistrail_graph(f: str) -> None
Using vistrail and graph to generate layout
"""
# create list of nodes
X = set()
# include the root manually
nodes = [(0,"")]
X.add(0)
# include the tagged nodes
for id, tag_name in vistrail.get_tagMap().iteritems():
if id in graph.vertices:
nodes.append((id, tag_name))
X.add(id)
# mount list of edges (parent, child).
# preserving the order given by
# "graph.edges_from()".
edges = []
for id in graph.vertices:
froom = graph.edges_from(id)
for (first,second) in froom:
# print "arc %d -> %d" % (id, first)
edges.append((id,first))
if id not in X:
# nodes.append((id," "))
nodes.append((id, vistrail.get_description(id)))
X.add(id)
if first not in X:
# nodes.append((first," "))
nodes.append((first, vistrail.get_description(first)))
X.add(first)
# get widths and heights for the nodes
empty_width = self.text_horizontal_margin + self.text_width_f(" " * 5)
# default height for all nodes
height = self.text_height + self.text_vertical_margin
# create an empty tree
tree = TreeLW()
# create map from id to tree node
mapTreeNodes = {}
# add the remaining nodes
for id, tag in nodes:
width = self.text_horizontal_margin + self.text_width_f(tag)
width = max(width, empty_width)
# print "add node to the tree %d %s" % (id, tag)
mapTreeNodes[id] = tree.addNode(None,width,height,(id,tag))
# preserve the order of the edges
# to add the children to their parents
for (parentId, childId) in edges:
# print "add arc into tree %d -> %d" % (parentId, childId)
parent = mapTreeNodes[parentId]
child = mapTreeNodes[childId]
# if child.parent is not None:
# print "child already has a parent!!! %d -> %d" % (parentId, childId)
# raise ValueError("Node already has a parent")
tree.changeParentOfNodeWithNoParent(parent, child)
# return the tree
return tree
def layout_from(self, vistrail, graph):
""" layout_from(vistrail: VisTrail, graph: Graph) -> None
Take a graph from VisTrail version and use Dotty to lay it out
"""
tree = self.generateTreeLW(vistrail, graph)
min_horizontal_separation = 20
min_vertical_separation = 50
layout = TreeLayoutLW(tree, TreeLayoutLW.TOP,
min_horizontal_separation,
min_vertical_separation)
# prepare the result
self.nodes = {}
for v in tree.nodes:
id, tag = v.object
newNode = NodeVistrailsTreeLayoutLW()
newNode.p = Point(v.x, v.y)
newNode.width = v.width
newNode.height = v.height
newNode.id = id
# newNode.label = tag
self.nodes[id] = newNode
# keep track of the bounding box
# of the whole tree
(minx, miny, width, height) = tree.boundingBox()
self.scale = 0.0
self.width = width
self.height = height
def move_node(self, id, x, y):
""" move_node(id: int, x: float, y: float) -> None
"""
self.nodes[id].move(x,y)
def add_node(self, id, node):
""" add_node(id: int, node: NodeVistrailsTreeLayoutLW) -> None
"""
self.nodes[id] = node
| {
"content_hash": "7003f13b340efa8046e4568ec8395d06",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 86,
"avg_line_length": 32.02923976608187,
"alnum_prop": 0.5316779258718276,
"repo_name": "hjanime/VisTrails",
"id": "cdd1edbb696d3a37ceed2e2e0d03cd162c1d8575",
"size": "7390",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vistrails/core/layout/version_tree_layout.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1421"
},
{
"name": "Inno Setup",
"bytes": "19550"
},
{
"name": "Makefile",
"bytes": "768"
},
{
"name": "Mako",
"bytes": "66613"
},
{
"name": "PHP",
"bytes": "49302"
},
{
"name": "Python",
"bytes": "19803915"
},
{
"name": "R",
"bytes": "782836"
},
{
"name": "Ruby",
"bytes": "875"
},
{
"name": "Shell",
"bytes": "35024"
},
{
"name": "TeX",
"bytes": "145333"
},
{
"name": "XSLT",
"bytes": "1090"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('report', '0003_auto_20151015_1921'),
]
operations = [
migrations.AlterField(
model_name='report',
name='client',
field=models.CharField(
default=None, max_length=40, null=True, verbose_name='Zg\u0142aszaj\u0105cy', blank=True
),
),
migrations.AlterField(
model_name='report',
name='created_at',
field=models.DateTimeField(auto_now_add=True, verbose_name='Utworzone'),
),
migrations.AlterField(
model_name='report',
name='description',
field=models.TextField(verbose_name='Opis'),
),
migrations.AlterField(
model_name='report',
name='resolved_at',
field=models.DateTimeField(null=True, verbose_name='Rozpatrzone', blank=True),
),
migrations.AlterField(
model_name='report',
name='resolved_by',
field=models.ForeignKey(
verbose_name='Rozpatrzone przez',
blank=True,
to=settings.AUTH_USER_MODEL,
null=True,
on_delete=models.CASCADE,
),
),
]
| {
"content_hash": "54956c16ed7da86ac7abaca3c4591b8b",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 104,
"avg_line_length": 30.622222222222224,
"alnum_prop": 0.5355587808417998,
"repo_name": "KlubJagiellonski/pola-backend",
"id": "93eb50267cf1c1fa8c76a70d1372dc57ef8f7b53",
"size": "1378",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pola/report/migrations/0004_auto_20151031_0721.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "150005"
},
{
"name": "Dockerfile",
"bytes": "7027"
},
{
"name": "HTML",
"bytes": "106428"
},
{
"name": "JavaScript",
"bytes": "370094"
},
{
"name": "Procfile",
"bytes": "111"
},
{
"name": "Python",
"bytes": "418208"
},
{
"name": "SCSS",
"bytes": "32864"
},
{
"name": "Shell",
"bytes": "24862"
}
],
"symlink_target": ""
} |
'''
Example code for adaptive filtering and thresholding.
'''
import re
import os
# OpenCV
import numpy as np
import cv2
# Histogram and plotting
import matplotlib.pyplot as plt
import matplotlib as mpl
# ----------------------------------------------------------------
def screenshot_filename(title):
'''Get the filename for the screenshot file based on the title.'''
return "../screenshots/binarization %s.jpg" % title.replace(':', '')
def save(title, img):
fname = screenshot_filename(title)
print "Writing %s..." % fname
cv2.imwrite(fname, img)
def show(title, img):
'''Show the image.
As a side-effect saves the image for use in the presentation.'''
cv2.imshow(title, img)
save(title, img)
# ----------------------------------------------------------------
def otsu_threshold(img):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
optimal_threshold, binarized_otsu = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY or cv2.THRESH_OTSU)
print optimal_threshold
ret_val, binarized = cv2.threshold(gray, optimal_threshold, 255, cv2.THRESH_BINARY)
return binarized
# ----------------------------------------------------------------
def adaptive_threshold(img, block_size=7, c=7):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
binarized = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY,
blockSize=block_size, C=c)
return binarized
# ----------------------------------------------------------------
ADAPTIVE_THRESHOLD_WINDOW = 'Adaptive Threshold'
def put_text(img, text, pos, col=(0,160,0)):
'''Mutate image, writing the text at the given position.'''
font_face = cv2.FONT_HERSHEY_SIMPLEX
font_scale = 1
cv2.putText(img, text, pos, font_face, font_scale, col)
return img
def interactive_adaptive_threshold(img):
cv2.namedWindow(ADAPTIVE_THRESHOLD_WINDOW)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
def update_callback(*arg):
block_size = cv2.getTrackbarPos('Block size', ADAPTIVE_THRESHOLD_WINDOW) + 3
block_size |= 1 # must be odd
c = cv2.getTrackbarPos('C', ADAPTIVE_THRESHOLD_WINDOW) - 100
brightest = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, block_size, c)
coloured = cv2.cvtColor(brightest, cv2.COLOR_GRAY2BGR)
put_text(coloured, 'Block size = %d' % block_size, (0,50))
put_text(coloured, 'C = %d' % c, (0,100))
cv2.imshow(ADAPTIVE_THRESHOLD_WINDOW, coloured)
pass
cv2.createTrackbar('Block size', ADAPTIVE_THRESHOLD_WINDOW, 5-3, 100, update_callback)
cv2.createTrackbar('C', ADAPTIVE_THRESHOLD_WINDOW, 105, 200, update_callback)
update_callback()
# ----------------------------------------------------------------
def transform_perspective(img, source_quad, dsize):
'''
Transform the perspective so the selected quadrilateral is mapped to a rectangle.
This maps image regions to how they would have looked photographed straight on.
'''
points_by_x = sorted([tuple(pt) for pt in source_quad])
leftmost = points_by_x[0:2:1]
rightmost = points_by_x[2:4:1]
top_left, bottom_left = sorted(leftmost, key=lambda pt: pt[1])
top_right, bottom_right = sorted(rightmost, key=lambda pt: pt[1])
corners = np.array([top_left, top_right, bottom_right, bottom_left]).astype('float32')
width, height = dsize
target = np.array([(0,0), (width,0), (width, height), (0,height)]).astype('float32')
mpt = cv2.getPerspectiveTransform(corners, target)
return cv2.warpPerspective(img, mpt, (width, height), flags=cv2.INTER_CUBIC)
# ----------------------------------------------------------------
def show_grayscale_histogram(img):
'''Show a histogram of the grayscale intensity of the image.'''
fig = plt.figure()
fig.add_subplot(111, axisbg='#660000')
n, bins, patches = plt.hist(img.flatten(), 256, normed=True)
# Colour histogram bins according to the grayscale value of the pixel
cmap = mpl.cm.gray
b_max = float(max(bins))
for b,patch in zip(bins, patches):
# scale bins to 0-1.0 for colour map look-up
c = cmap(b/b_max)
patch.set_color(c)
plt.title('Histogram of grayscale intensity')
plt.show(block=False)
fname = screenshot_filename('Histogram').replace('.jpg', '.png')
print "Saving %s..." % fname
plt.savefig(fname)
# ----------------------------------------------------------------
if __name__ =='__main__':
raw = cv2.imread('../images/books/Microserfs_p87_2.jpg')
#page_contour = np.array([[70, 59], [402, 52], [403, 535], [ 66, 526]]) * (3264/640)
# make it a bit wider to show the gradients
page_contour = np.array([[40, 59], [402, 52], [403, 535], [ 36, 526]]) * (3264/640)
page = transform_perspective(raw, page_contour, (480,640))
cv2.imshow('Page only', page)
small = page
show_grayscale_histogram(small)
interactive_adaptive_threshold(small)
gray = cv2.cvtColor(small, cv2.COLOR_BGR2GRAY)
ret_val, t_mid = cv2.threshold(gray, 128, 255, cv2.THRESH_BINARY)
cv2.imshow('Threshold mid-point', t_mid)
otsu = otsu_threshold(small)
cv2.imshow('Threshold OTSU', t_mid)
adaptive = adaptive_threshold(small)
cv2.imshow('Adaptive', adaptive)
print "Press any key..."
cv2.waitKey()
cv2.destroyAllWindows()
| {
"content_hash": "89c14ac5c95bf52a2f2613ab415aaaab",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 118,
"avg_line_length": 37.682758620689654,
"alnum_prop": 0.6052342606149341,
"repo_name": "mjul/making-the-computer-see-ndc-2014",
"id": "dbfe49ba39a99307dcbc51332b635009e781fc2b",
"size": "5487",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/binarization.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "62903"
}
],
"symlink_target": ""
} |
from cherrypy.test import test
test.prefer_parent_path()
import cherrypy
script_names = ["", "/path/to/myapp"]
def setup_server():
# Set up site
cherrypy.config.update({
'environment': 'test_suite',
'tools.proxy.on': True,
'tools.proxy.base': 'www.mydomain.test',
})
# Set up application
class Root:
def __init__(self, sn):
# Calculate a URL outside of any requests.
self.thisnewpage = cherrypy.url("/this/new/page", script_name=sn)
def pageurl(self):
return self.thisnewpage
pageurl.exposed = True
def index(self):
raise cherrypy.HTTPRedirect('dummy')
index.exposed = True
def remoteip(self):
return cherrypy.request.remote.ip
remoteip.exposed = True
def xhost(self):
raise cherrypy.HTTPRedirect('blah')
xhost.exposed = True
xhost._cp_config = {'tools.proxy.local': 'X-Host',
'tools.trailing_slash.extra': True,
}
def base(self):
return cherrypy.request.base
base.exposed = True
def ssl(self):
return cherrypy.request.base
ssl.exposed = True
ssl._cp_config = {'tools.proxy.scheme': 'X-Forwarded-Ssl'}
def newurl(self):
return ("Browse to <a href='%s'>this page</a>."
% cherrypy.url("/this/new/page"))
newurl.exposed = True
for sn in script_names:
cherrypy.tree.mount(Root(sn), sn)
from cherrypy.test import helper
class ProxyTest(helper.CPWebCase):
def testProxy(self):
self.getPage("/")
self.assertHeader('Location',
"%s://www.mydomain.test%s/dummy" %
(self.scheme, self.prefix()))
# Test X-Forwarded-Host (Apache 1.3.33+ and Apache 2)
self.getPage("/", headers=[('X-Forwarded-Host', 'http://www.example.test')])
self.assertHeader('Location', "http://www.example.test/dummy")
self.getPage("/", headers=[('X-Forwarded-Host', 'www.example.test')])
self.assertHeader('Location', "%s://www.example.test/dummy" % self.scheme)
# Test X-Forwarded-For (Apache2)
self.getPage("/remoteip",
headers=[('X-Forwarded-For', '192.168.0.20')])
self.assertBody("192.168.0.20")
self.getPage("/remoteip",
headers=[('X-Forwarded-For', '67.15.36.43, 192.168.0.20')])
self.assertBody("192.168.0.20")
# Test X-Host (lighttpd; see https://trac.lighttpd.net/trac/ticket/418)
self.getPage("/xhost", headers=[('X-Host', 'www.example.test')])
self.assertHeader('Location', "%s://www.example.test/blah" % self.scheme)
# Test X-Forwarded-Proto (lighttpd)
self.getPage("/base", headers=[('X-Forwarded-Proto', 'https')])
self.assertBody("https://www.mydomain.test")
# Test X-Forwarded-Ssl (webfaction?)
self.getPage("/ssl", headers=[('X-Forwarded-Ssl', 'on')])
self.assertBody("https://www.mydomain.test")
# Test cherrypy.url()
for sn in script_names:
# Test the value inside requests
self.getPage(sn + "/newurl")
self.assertBody("Browse to <a href='%s://www.mydomain.test" % self.scheme
+ sn + "/this/new/page'>this page</a>.")
self.getPage(sn + "/newurl", headers=[('X-Forwarded-Host',
'http://www.example.test')])
self.assertBody("Browse to <a href='http://www.example.test"
+ sn + "/this/new/page'>this page</a>.")
# Test the value outside requests
port = ""
if self.scheme == "http" and self.PORT != 80:
port = ":%s" % self.PORT
elif self.scheme == "https" and self.PORT != 443:
port = ":%s" % self.PORT
host = self.HOST
if host in ('0.0.0.0', '::'):
import socket
host = socket.gethostname()
expected = ("%s://%s%s%s/this/new/page"
% (self.scheme, host, port, sn))
self.getPage(sn + "/pageurl")
self.assertBody(expected)
# Test trailing slash (see http://www.cherrypy.org/ticket/562).
self.getPage("/xhost/", headers=[('X-Host', 'www.example.test')])
self.assertHeader('Location', "%s://www.example.test/xhost"
% self.scheme)
if __name__ == '__main__':
setup_server()
helper.testmain()
| {
"content_hash": "0d787ef3bcb8e28f29cff7eaa3e2c0d1",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 85,
"avg_line_length": 37.553030303030305,
"alnum_prop": 0.5021182166633045,
"repo_name": "cread/ec2id",
"id": "410058433f15be55121182c374e6532eaa199f6e",
"size": "4957",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cherrypy/test/test_proxy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "807550"
},
{
"name": "Shell",
"bytes": "223"
}
],
"symlink_target": ""
} |
from testtools import matchers
from tempest.api.compute import base
from tempest.common.utils import data_utils
from tempest import config
from tempest import test
CONF = config.CONF
class VolumesGetTestJSON(base.BaseV2ComputeTest):
@classmethod
def resource_setup(cls):
super(VolumesGetTestJSON, cls).resource_setup()
cls.client = cls.volumes_extensions_client
if not CONF.service_available.cinder:
skip_msg = ("%s skipped as Cinder is not available" % cls.__name__)
raise cls.skipException(skip_msg)
@test.attr(type='smoke')
def test_volume_create_get_delete(self):
# CREATE, GET, DELETE Volume
volume = None
v_name = data_utils.rand_name('Volume-%s-') % self._interface
metadata = {'Type': 'work'}
# Create volume
resp, volume = self.client.create_volume(size=1,
display_name=v_name,
metadata=metadata)
self.addCleanup(self.delete_volume, volume['id'])
self.assertEqual(200, resp.status)
self.assertIn('id', volume)
self.assertIn('displayName', volume)
self.assertEqual(volume['displayName'], v_name,
"The created volume name is not equal "
"to the requested name")
self.assertTrue(volume['id'] is not None,
"Field volume id is empty or not found.")
# Wait for Volume status to become ACTIVE
self.client.wait_for_volume_status(volume['id'], 'available')
# GET Volume
resp, fetched_volume = self.client.get_volume(volume['id'])
self.assertEqual(200, resp.status)
# Verification of details of fetched Volume
self.assertEqual(v_name,
fetched_volume['displayName'],
'The fetched Volume is different '
'from the created Volume')
self.assertEqual(volume['id'],
fetched_volume['id'],
'The fetched Volume is different '
'from the created Volume')
self.assertThat(fetched_volume['metadata'].items(),
matchers.ContainsAll(metadata.items()),
'The fetched Volume metadata misses data '
'from the created Volume')
| {
"content_hash": "9157bee948b51e5c4925c8edbfcdb746",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 79,
"avg_line_length": 42.01724137931034,
"alnum_prop": 0.567090685268773,
"repo_name": "afaheem88/tempest_neutron",
"id": "d4414273b50e56d4b10776192d651b283bd0263d",
"size": "3073",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tempest/api/compute/volumes/test_volumes_get.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2778383"
},
{
"name": "Shell",
"bytes": "8560"
}
],
"symlink_target": ""
} |
import numpy as np
def calculate_weights(labels, num_classes):
'''
Calculate class balancing weights for unary potentials
Parameters
----------
labels: numpy array
Array with ground truth labels
num_classes: numpy array
Number of classes in the dataset
Returns
-------
label_weights: numpy array
Image of weights
weights: numpy array
Per class weights
'''
height = labels.shape[0]
width = labels.shape[1]
class_hist = np.zeros(num_classes, dtype=np.int64)
for i in range(height):
for j in range(width):
if labels[i, j] >= 0 and labels[i, j] < num_classes:
class_hist[labels[i, j]] += 1
num_labels = class_hist.sum()
weights = np.zeros(num_classes, dtype=np.float32)
max_wgt = 1000
for i in range(num_classes):
if class_hist[i] > 0:
weights[i] = min(max_wgt, 1.0 / (class_hist[i] / num_labels))
else:
weights[i] = 0
label_weights = np.zeros((height, width), dtype=np.float32)
for i in range(height):
for j in range(width):
cidx = labels[i, j]
if cidx >= 0 and cidx < num_classes:
label_weights[i, j] = weights[cidx]
return label_weights, weights
def calculate_weights_binary(weights, labels_pairwise, decoding, num_classes):
'''
Calculate class balancing weights for binary potentials
Parameters
----------
weights: numpy array
Per class weights for unary potentials
labels_pairwise: numpy array
Array with ground truth labels for binary potentials
decoding: dict
dict that maps index => (label, label)
num_classes: numpy array
Number of classes in the dataset
Returns
-------
ret: numpy array
Image of weights
'''
ret = np.zeros(labels_pairwise.shape, dtype=np.float32)
for i in range(ret.shape[0]):
cidx1, cidx2 = decoding.get(labels_pairwise[i], (-1, -1))
if cidx1 >= 0 and cidx1 < num_classes and cidx2 >= 0 and cidx2 < num_classes:
ret[i] = min(100, weights[cidx1] * weights[cidx2])
return ret
| {
"content_hash": "26fa8c51109c9de012e49131fbdfb8dd",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 85,
"avg_line_length": 25.788235294117648,
"alnum_prop": 0.5948905109489051,
"repo_name": "Vaan5/piecewisecrf",
"id": "2e062889b369b3030a08e8394422df69fed6a12b",
"size": "2192",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "piecewisecrf/datasets/helpers/weights_generator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1971"
},
{
"name": "C++",
"bytes": "81218"
},
{
"name": "Makefile",
"bytes": "219"
},
{
"name": "Python",
"bytes": "315847"
}
],
"symlink_target": ""
} |
import collections
import os
import re
import time
import sys
import click
from netCDF4 import Dataset
import numpy as np
import numpy.ma as ma
import osr
import rasterio
import pdb
from .. import geotools
from .. import utils
def adbc_conv(ss):
m = re.match(r'(\d+)(ad)_pop.zip', ss, re.IGNORECASE)
if m:
y = int(m.group(1))
if m.group(2) == 'bc':
y *= -1
return y
return None
def get_years(hdir):
return sorted(tuple(filter(lambda p: p is not None, map(adbc_conv,
os.listdir(hdir)))))
@click.command()
@click.option('--version', type=click.Choice(('32', '31_final')),
default='32',
help='Which version of HYDE to convert to NetCDF (default: 3.2)')
@click.option('--outdir', type=click.Path(file_okay=False),
default='/out/hyde',
help='Output directory (default: /out/hyde)')
@click.option('--start-year', type=int, default=0,
help='Start from given year (default: 0AD)')
def main(version, outdir, start_year):
oname = os.path.join(outdir, 'hyde-%s.nc' % version)
variables = tuple([(layer, 'f4', 'ppl/km^2', -9999, 'time')
for layer in utils.hyde_variables()])
years = tuple(filter(lambda yy: yy >= start_year,
get_years(utils.hyde_dir(version))))
with Dataset(oname, 'w') as out:
with rasterio.open(utils.hyde_area()) as area_ds:
init_nc(out, area_ds, years, variables)
with click.progressbar(years, length=len(years)) as bar:
for year in bar:
idx = years.index(year)
for variable in utils.hyde_variables():
with rasterio.open(utils.hyde_raw(version, year, variable)) as ds:
data = ds.read(1, masked=True)
out.variables[variable][idx, :, :] = data
def init_nc(dst_ds, src_ds, steps, variables):
# Set attributes
dst_ds.setncattr('Conventions', u'CF-1.5')
dst_ds.setncattr('GDAL', u'GDAL 1.11.3, released 2015/09/16')
# Create dimensions
dst_ds.createDimension('time', len(steps))
dst_ds.createDimension('lat', src_ds.height)
dst_ds.createDimension('lon', src_ds.width)
# Create variables
times = dst_ds.createVariable("time", "f8", ("time"), zlib=True,
least_significant_digit=3)
latitudes = dst_ds.createVariable("lat", "f4", ("lat"), zlib=True,
least_significant_digit = 3)
longitudes = dst_ds.createVariable("lon", "f4", ("lon"), zlib=True,
least_significant_digit=3)
crs = dst_ds.createVariable('crs', "S1", ())
# Add metadata
dst_ds.history = "Created at " + time.ctime(time.time())
dst_ds.source = "hyde2nc.py"
latitudes.units = "degrees_north"
latitudes.long_name = 'latitude'
longitudes.units = "degrees_east"
longitudes.long_name = "longitude"
times.units = "years since 0000-01-01 00:00:00.0"
times.calendar = "gregorian"
times.standard_name = "time"
times.axis = 'T'
# Assign data to variables
ul = src_ds.affine * (0.5, 0.5)
lr = src_ds.affine * (src_ds.width - 0.5, src_ds.height - 0.5)
latitudes[:] = np.linspace(ul[1], lr[1], src_ds.height)
longitudes[:] = np.linspace(ul[0], lr[0], src_ds.width)
times[:] = steps
srs = osr.SpatialReference()
srs.ImportFromWkt(geotools.WGS84_WKT)
src_trans = src_ds.affine.to_gdal()
crs.grid_mapping_name = 'latitude_longitude'
crs.spatial_ref = srs.ExportToWkt()
crs.GetTransform = ' '.join(tuple(map(str, src_trans)))
# FIXME: Attribute getters don't work in python3 or GDAL2
crs.longitude_of_prime_meridian = geotools.srs_get_prime_meridian(srs)
crs.semi_major_axis = geotools.srs_get_semi_major(srs)
crs.inverse_flattening = geotools.srs_get_inv_flattening(srs)
for name, dtype, units, fill, dimension in variables:
dst_data = dst_ds.createVariable(name, dtype,
(dimension, "lat","lon"), zlib = True,
least_significant_digit = 4,
fill_value = fill)
dst_data.units = units
dst_data.grid_mapping = 'crs'
if __name__ == '__main__':
main()
click.echo('done')
| {
"content_hash": "7a3165e994d7528b1f20c0157094fbbd",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 79,
"avg_line_length": 35.932203389830505,
"alnum_prop": 0.6047169811320755,
"repo_name": "ricardog/raster-project",
"id": "ad56f5dea09cc34df48635d76efc6acf6b4c78dc",
"size": "4264",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "projections/scripts/hyde2nc.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "296"
},
{
"name": "Dockerfile",
"bytes": "509"
},
{
"name": "HTML",
"bytes": "3896"
},
{
"name": "JavaScript",
"bytes": "6062"
},
{
"name": "Jupyter Notebook",
"bytes": "58981"
},
{
"name": "Makefile",
"bytes": "12383"
},
{
"name": "Python",
"bytes": "488587"
},
{
"name": "R",
"bytes": "61424"
},
{
"name": "Shell",
"bytes": "5573"
}
],
"symlink_target": ""
} |
import functools
import logging
import os
import subprocess
import lockfile
from oslotest import base as test_base
from six import moves
from six.moves.urllib import parse
import sqlalchemy
import sqlalchemy.exc
from oslo.db.openstack.common.gettextutils import _LE
from oslo.db.sqlalchemy import utils
LOG = logging.getLogger(__name__)
def _have_mysql(user, passwd, database):
present = os.environ.get('TEST_MYSQL_PRESENT')
if present is None:
return utils.is_backend_avail(backend='mysql',
user=user,
passwd=passwd,
database=database)
return present.lower() in ('', 'true')
def _have_postgresql(user, passwd, database):
present = os.environ.get('TEST_POSTGRESQL_PRESENT')
if present is None:
return utils.is_backend_avail(backend='postgres',
user=user,
passwd=passwd,
database=database)
return present.lower() in ('', 'true')
def _set_db_lock(lock_path=None, lock_prefix=None):
def decorator(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
try:
path = lock_path or os.environ.get("OSLO_LOCK_PATH")
lock = lockfile.FileLock(os.path.join(path, lock_prefix))
with lock:
LOG.debug('Got lock "%s"' % f.__name__)
return f(*args, **kwargs)
finally:
LOG.debug('Lock released "%s"' % f.__name__)
return wrapper
return decorator
class BaseMigrationTestCase(test_base.BaseTestCase):
"""Base class fort testing of migration utils."""
def __init__(self, *args, **kwargs):
super(BaseMigrationTestCase, self).__init__(*args, **kwargs)
self.DEFAULT_CONFIG_FILE = os.path.join(os.path.dirname(__file__),
'test_migrations.conf')
# Test machines can set the TEST_MIGRATIONS_CONF variable
# to override the location of the config file for migration testing
self.CONFIG_FILE_PATH = os.environ.get('TEST_MIGRATIONS_CONF',
self.DEFAULT_CONFIG_FILE)
self.test_databases = {}
self.migration_api = None
def setUp(self):
super(BaseMigrationTestCase, self).setUp()
# Load test databases from the config file. Only do this
# once. No need to re-run this on each test...
LOG.debug('config_path is %s' % self.CONFIG_FILE_PATH)
if os.path.exists(self.CONFIG_FILE_PATH):
cp = moves.configparser.RawConfigParser()
try:
cp.read(self.CONFIG_FILE_PATH)
defaults = cp.defaults()
for key, value in defaults.items():
self.test_databases[key] = value
except moves.configparser.ParsingError as e:
self.fail("Failed to read test_migrations.conf config "
"file. Got error: %s" % e)
else:
self.fail("Failed to find test_migrations.conf config "
"file.")
self.engines = {}
for key, value in self.test_databases.items():
self.engines[key] = sqlalchemy.create_engine(value)
# We start each test case with a completely blank slate.
self._reset_databases()
def tearDown(self):
# We destroy the test data store between each test case,
# and recreate it, which ensures that we have no side-effects
# from the tests
self._reset_databases()
super(BaseMigrationTestCase, self).tearDown()
def execute_cmd(self, cmd=None):
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
output = process.communicate()[0]
LOG.debug(output)
self.assertEqual(0, process.returncode,
"Failed to run: %s\n%s" % (cmd, output))
def _reset_pg(self, conn_pieces):
(user,
password,
database,
host) = utils.get_db_connection_info(conn_pieces)
os.environ['PGPASSWORD'] = password
os.environ['PGUSER'] = user
# note(boris-42): We must create and drop database, we can't
# drop database which we have connected to, so for such
# operations there is a special database template1.
sqlcmd = ("psql -w -U %(user)s -h %(host)s -c"
" '%(sql)s' -d template1")
sql = ("drop database if exists %s;") % database
droptable = sqlcmd % {'user': user, 'host': host, 'sql': sql}
self.execute_cmd(droptable)
sql = ("create database %s;") % database
createtable = sqlcmd % {'user': user, 'host': host, 'sql': sql}
self.execute_cmd(createtable)
os.unsetenv('PGPASSWORD')
os.unsetenv('PGUSER')
@_set_db_lock(lock_prefix='migration_tests-')
def _reset_databases(self):
for key, engine in self.engines.items():
conn_string = self.test_databases[key]
conn_pieces = parse.urlparse(conn_string)
engine.dispose()
if conn_string.startswith('sqlite'):
# We can just delete the SQLite database, which is
# the easiest and cleanest solution
db_path = conn_pieces.path.strip('/')
if os.path.exists(db_path):
os.unlink(db_path)
# No need to recreate the SQLite DB. SQLite will
# create it for us if it's not there...
elif conn_string.startswith('mysql'):
# We can execute the MySQL client to destroy and re-create
# the MYSQL database, which is easier and less error-prone
# than using SQLAlchemy to do this via MetaData...trust me.
(user, password, database, host) = \
utils.get_db_connection_info(conn_pieces)
sql = ("drop database if exists %(db)s; "
"create database %(db)s;") % {'db': database}
cmd = ("mysql -u \"%(user)s\" -p\"%(password)s\" -h %(host)s "
"-e \"%(sql)s\"") % {'user': user, 'password': password,
'host': host, 'sql': sql}
self.execute_cmd(cmd)
elif conn_string.startswith('postgresql'):
self._reset_pg(conn_pieces)
class WalkVersionsMixin(object):
def _walk_versions(self, engine=None, snake_walk=False, downgrade=True):
# Determine latest version script from the repo, then
# upgrade from 1 through to the latest, with no data
# in the databases. This just checks that the schema itself
# upgrades successfully.
# Place the database under version control
self.migration_api.version_control(engine, self.REPOSITORY,
self.INIT_VERSION)
self.assertEqual(self.INIT_VERSION,
self.migration_api.db_version(engine,
self.REPOSITORY))
LOG.debug('latest version is %s' % self.REPOSITORY.latest)
versions = range(self.INIT_VERSION + 1, self.REPOSITORY.latest + 1)
for version in versions:
# upgrade -> downgrade -> upgrade
self._migrate_up(engine, version, with_data=True)
if snake_walk:
downgraded = self._migrate_down(
engine, version - 1, with_data=True)
if downgraded:
self._migrate_up(engine, version)
if downgrade:
# Now walk it back down to 0 from the latest, testing
# the downgrade paths.
for version in reversed(versions):
# downgrade -> upgrade -> downgrade
downgraded = self._migrate_down(engine, version - 1)
if snake_walk and downgraded:
self._migrate_up(engine, version)
self._migrate_down(engine, version - 1)
def _migrate_down(self, engine, version, with_data=False):
try:
self.migration_api.downgrade(engine, self.REPOSITORY, version)
except NotImplementedError:
# NOTE(sirp): some migrations, namely release-level
# migrations, don't support a downgrade.
return False
self.assertEqual(
version, self.migration_api.db_version(engine, self.REPOSITORY))
# NOTE(sirp): `version` is what we're downgrading to (i.e. the 'target'
# version). So if we have any downgrade checks, they need to be run for
# the previous (higher numbered) migration.
if with_data:
post_downgrade = getattr(
self, "_post_downgrade_%03d" % (version + 1), None)
if post_downgrade:
post_downgrade(engine)
return True
def _migrate_up(self, engine, version, with_data=False):
"""migrate up to a new version of the db.
We allow for data insertion and post checks at every
migration version with special _pre_upgrade_### and
_check_### functions in the main test.
"""
# NOTE(sdague): try block is here because it's impossible to debug
# where a failed data migration happens otherwise
try:
if with_data:
data = None
pre_upgrade = getattr(
self, "_pre_upgrade_%03d" % version, None)
if pre_upgrade:
data = pre_upgrade(engine)
self.migration_api.upgrade(engine, self.REPOSITORY, version)
self.assertEqual(version,
self.migration_api.db_version(engine,
self.REPOSITORY))
if with_data:
check = getattr(self, "_check_%03d" % version, None)
if check:
check(engine, data)
except Exception:
LOG.error(_LE("Failed to migrate to version %s on engine %s") %
(version, engine))
raise
| {
"content_hash": "b10eb40d0b92f48ed6c1e75806a6e3d4",
"timestamp": "",
"source": "github",
"line_count": 253,
"max_line_length": 79,
"avg_line_length": 41.047430830039524,
"alnum_prop": 0.5506018295618681,
"repo_name": "vikt0rs/oslo.db",
"id": "661b0a7e6bbe60f51666fab90a655d69364e279a",
"size": "11058",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "oslo/db/sqlalchemy/test_migrations.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "260000"
}
],
"symlink_target": ""
} |
"""Support for Genius Hub sensor devices."""
from datetime import timedelta
from typing import Any, Dict
from homeassistant.const import DEVICE_CLASS_BATTERY
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
import homeassistant.util.dt as dt_util
from . import DOMAIN, GeniusDevice, GeniusEntity
GH_STATE_ATTR = "batteryLevel"
GH_LEVEL_MAPPING = {
"error": "Errors",
"warning": "Warnings",
"information": "Information",
}
async def async_setup_platform(
hass: HomeAssistantType, config: ConfigType, async_add_entities, discovery_info=None
) -> None:
"""Set up the Genius Hub sensor entities."""
if discovery_info is None:
return
broker = hass.data[DOMAIN]["broker"]
sensors = [
GeniusBattery(broker, d, GH_STATE_ATTR)
for d in broker.client.device_objs
if GH_STATE_ATTR in d.data["state"]
]
issues = [GeniusIssue(broker, i) for i in list(GH_LEVEL_MAPPING)]
async_add_entities(sensors + issues, update_before_add=True)
class GeniusBattery(GeniusDevice):
"""Representation of a Genius Hub sensor."""
def __init__(self, broker, device, state_attr) -> None:
"""Initialize the sensor."""
super().__init__(broker, device)
self._state_attr = state_attr
self._name = f"{device.type} {device.id}"
@property
def icon(self) -> str:
"""Return the icon of the sensor."""
if "_state" in self._device.data: # only for v3 API
interval = timedelta(
seconds=self._device.data["_state"].get("wakeupInterval", 30 * 60)
)
if self._last_comms < dt_util.utcnow() - interval * 3:
return "mdi:battery-unknown"
battery_level = self._device.data["state"][self._state_attr]
if battery_level == 255:
return "mdi:battery-unknown"
if battery_level < 40:
return "mdi:battery-alert"
icon = "mdi:battery"
if battery_level <= 95:
icon += f"-{int(round(battery_level / 10 - 0.01)) * 10}"
return icon
@property
def device_class(self) -> str:
"""Return the device class of the sensor."""
return DEVICE_CLASS_BATTERY
@property
def unit_of_measurement(self) -> str:
"""Return the unit of measurement of the sensor."""
return "%"
@property
def state(self) -> str:
"""Return the state of the sensor."""
level = self._device.data["state"][self._state_attr]
return level if level != 255 else 0
class GeniusIssue(GeniusEntity):
"""Representation of a Genius Hub sensor."""
def __init__(self, broker, level) -> None:
"""Initialize the sensor."""
super().__init__()
self._hub = broker.client
self._unique_id = f"{broker.hub_uid}_{GH_LEVEL_MAPPING[level]}"
self._name = f"GeniusHub {GH_LEVEL_MAPPING[level]}"
self._level = level
self._issues = []
@property
def state(self) -> str:
"""Return the number of issues."""
return len(self._issues)
@property
def device_state_attributes(self) -> Dict[str, Any]:
"""Return the device state attributes."""
return {f"{self._level}_list": self._issues}
async def async_update(self) -> None:
"""Process the sensor's state data."""
self._issues = [
i["description"] for i in self._hub.issues if i["level"] == self._level
]
| {
"content_hash": "2ed501f879ab7ab0e1ac4a9961273f4b",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 88,
"avg_line_length": 29.803418803418804,
"alnum_prop": 0.6013765414396329,
"repo_name": "qedi-r/home-assistant",
"id": "bd73c700e65471862dba7a486589c283e5bd5b65",
"size": "3487",
"binary": false,
"copies": "5",
"ref": "refs/heads/dev",
"path": "homeassistant/components/geniushub/sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "18564720"
},
{
"name": "Shell",
"bytes": "6846"
}
],
"symlink_target": ""
} |
from .cartridges import NromCartridge
from .rom_file import RomFile
class CartridgeFactory:
cartridge_map = {
0: NromCartridge,
}
@classmethod
def create(cls, filename):
rom_file = RomFile.load(filename)
return cls.cartridge_map[rom_file.header.mapper](rom_file)
| {
"content_hash": "13458b409a985aabcd7627ed4d24d32f",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 66,
"avg_line_length": 21.928571428571427,
"alnum_prop": 0.6775244299674267,
"repo_name": "Hexadorsimal/pynes",
"id": "d8682331d31e6986253e9253f42750334561bd20",
"size": "307",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nes/bus/devices/cartridge/cartridge_factory.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42305"
}
],
"symlink_target": ""
} |
pytest_plugins = "pytester",
def test_passing_expect(testdir):
testdir.makepyfile(
"""
def test_func(expect):
expect(1 == 1)
""")
result = testdir.runpytest()
assert '1 passed' in result.stdout.str()
def test_failing_expect(testdir):
testdir.makepyfile(
"""
def test_func(expect):
expect(1 == 2)
""")
result = testdir.runpytest()
assert '1 failed' in result.stdout.str()
assert 'Failed Expectations:1' in result.stdout.str()
def test_passing_expect_doesnt_cloak_assert(testdir):
testdir.makepyfile(
"""
def test_func(expect):
expect(1 == 1)
assert 1 == 2
""")
result = testdir.runpytest()
assert '1 failed' in result.stdout.str()
assert 'AssertionError' in result.stdout.str()
def test_failing_expect_doesnt_cloak_assert(testdir):
testdir.makepyfile(
"""
def test_func(expect):
expect(1 == 2)
assert 1 == 2
""")
result = testdir.runpytest()
assert '1 failed' in result.stdout.str()
assert 'AssertionError' in result.stdout.str()
assert 'Failed Expectations:1' in result.stdout.str()
def test_msg_is_in_output(testdir):
testdir.makepyfile(
"""
def test_func(expect):
a = 1
b = 2
expect(a == b, 'a:%s b:%s' % (a,b))
""")
result = testdir.runpytest()
assert '1 failed' in result.stdout.str()
assert 'Failed Expectations:1' in result.stdout.str()
assert 'a:1 b:2' in result.stdout.str()
| {
"content_hash": "78e0f6be58f00bc443f25cd811d524c9",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 57,
"avg_line_length": 27.23728813559322,
"alnum_prop": 0.568139390168015,
"repo_name": "okken/pytest-expect",
"id": "330b093842932e70782d746b520dccde163b0d86",
"size": "1607",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_expect.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3929"
}
],
"symlink_target": ""
} |
"""Support for custom shell commands to turn a switch on/off."""
import logging
import subprocess
import voluptuous as vol
from homeassistant.components.switch import (
ENTITY_ID_FORMAT,
PLATFORM_SCHEMA,
SwitchEntity,
)
from homeassistant.const import (
CONF_COMMAND_OFF,
CONF_COMMAND_ON,
CONF_COMMAND_STATE,
CONF_FRIENDLY_NAME,
CONF_SWITCHES,
CONF_VALUE_TEMPLATE,
)
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
SWITCH_SCHEMA = vol.Schema(
{
vol.Optional(CONF_COMMAND_OFF, default="true"): cv.string,
vol.Optional(CONF_COMMAND_ON, default="true"): cv.string,
vol.Optional(CONF_COMMAND_STATE): cv.string,
vol.Optional(CONF_FRIENDLY_NAME): cv.string,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Required(CONF_SWITCHES): cv.schema_with_slug_keys(SWITCH_SCHEMA)}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Find and return switches controlled by shell commands."""
devices = config.get(CONF_SWITCHES, {})
switches = []
for object_id, device_config in devices.items():
value_template = device_config.get(CONF_VALUE_TEMPLATE)
if value_template is not None:
value_template.hass = hass
switches.append(
CommandSwitch(
hass,
object_id,
device_config.get(CONF_FRIENDLY_NAME, object_id),
device_config.get(CONF_COMMAND_ON),
device_config.get(CONF_COMMAND_OFF),
device_config.get(CONF_COMMAND_STATE),
value_template,
)
)
if not switches:
_LOGGER.error("No switches added")
return False
add_entities(switches)
class CommandSwitch(SwitchEntity):
"""Representation a switch that can be toggled using shell commands."""
def __init__(
self,
hass,
object_id,
friendly_name,
command_on,
command_off,
command_state,
value_template,
):
"""Initialize the switch."""
self._hass = hass
self.entity_id = ENTITY_ID_FORMAT.format(object_id)
self._name = friendly_name
self._state = False
self._command_on = command_on
self._command_off = command_off
self._command_state = command_state
self._value_template = value_template
@staticmethod
def _switch(command):
"""Execute the actual commands."""
_LOGGER.info("Running command: %s", command)
success = subprocess.call(command, shell=True) == 0 # nosec # shell by design
if not success:
_LOGGER.error("Command failed: %s", command)
return success
@staticmethod
def _query_state_value(command):
"""Execute state command for return value."""
_LOGGER.info("Running state command: %s", command)
try:
return_value = subprocess.check_output(
command, shell=True # nosec # shell by design
)
return return_value.strip().decode("utf-8")
except subprocess.CalledProcessError:
_LOGGER.error("Command failed: %s", command)
@staticmethod
def _query_state_code(command):
"""Execute state command for return code."""
_LOGGER.info("Running state command: %s", command)
return subprocess.call(command, shell=True) == 0 # nosec # shell by design
@property
def should_poll(self):
"""Only poll if we have state command."""
return self._command_state is not None
@property
def name(self):
"""Return the name of the switch."""
return self._name
@property
def is_on(self):
"""Return true if device is on."""
return self._state
@property
def assumed_state(self):
"""Return true if we do optimistic updates."""
return self._command_state is None
def _query_state(self):
"""Query for state."""
if not self._command_state:
_LOGGER.error("No state command specified")
return
if self._value_template:
return CommandSwitch._query_state_value(self._command_state)
return CommandSwitch._query_state_code(self._command_state)
def update(self):
"""Update device state."""
if self._command_state:
payload = str(self._query_state())
if self._value_template:
payload = self._value_template.render_with_possible_json_value(payload)
self._state = payload.lower() == "true"
def turn_on(self, **kwargs):
"""Turn the device on."""
if CommandSwitch._switch(self._command_on) and not self._command_state:
self._state = True
self.schedule_update_ha_state()
def turn_off(self, **kwargs):
"""Turn the device off."""
if CommandSwitch._switch(self._command_off) and not self._command_state:
self._state = False
self.schedule_update_ha_state()
| {
"content_hash": "4c8fb643d6465a01d81a57efba6a814a",
"timestamp": "",
"source": "github",
"line_count": 170,
"max_line_length": 87,
"avg_line_length": 30.341176470588234,
"alnum_prop": 0.6033346258239628,
"repo_name": "mKeRix/home-assistant",
"id": "7f62970b63997fe316523a348c345a8945690db1",
"size": "5158",
"binary": false,
"copies": "4",
"ref": "refs/heads/dev",
"path": "homeassistant/components/command_line/switch.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1466026"
},
{
"name": "Python",
"bytes": "4770710"
},
{
"name": "Ruby",
"bytes": "379"
},
{
"name": "Shell",
"bytes": "12407"
}
],
"symlink_target": ""
} |
import argparse
from datetime import date
from modules.scrape_edgar_index import EDGAR_INDEX
from modules.scrape_edgar_forms import EDGAR_INDEX_FORMS
from generic.generic_meta_enrich import GENERIC_META_ENRICH
from generic.logger import LOGGER
from enrich_modules.compute_symbology import TO_SYMBOLOGY
def main():
parser = argparse.ArgumentParser(description='Scrape EDGAR indices')
parser.add_argument('--log-file',
type=str,
dest='log_file',
action='store',
required=True)
parser.add_argument('--index',
type=str,
dest='index',
action="store")
parser.add_argument('--min-year',
type=int,
dest='min_year',
action="store",
default=2011)
parser.add_argument('--max-year',
type=int,
dest='max_year',
action="store",
default=int(date.today().year))
parser.add_argument('--most-recent',
dest='most_recent',
action="store_true")
parser.add_argument("--back-fill",
action='store_true')
parser.add_argument('--date',
type=str,
dest='date',
action="store")
parser.add_argument("--start-date",
type=str,
action='store',
required=True)
parser.add_argument("--end-date",
type=str,
action='store',
default=date.today().strftime('%Y-%m-%d'))
parser.add_argument("--section",
type=str,
action='store',
default='both')
parser.add_argument("--form-types",
type=str,
action='store',
required=True)
parser.add_argument('--expected',
type=str,
dest='expected',
action="store")
parser.add_argument('--config-path',
type=str,
action='store',
default='../config.json')
parser.add_argument('--last-week',
dest='last_week',
action="store_true")
args = parser.parse_args()
logger = LOGGER('scrape_edgar', args.log_file).create_parent()
ei = EDGAR_INDEX(args, 'scrape_edgar')
eif = EDGAR_INDEX_FORMS(args, 'scrape_edgar')
gme = GENERIC_META_ENRICH(args, 'scrape_edgar')
ts = TO_SYMBOLOGY(args, 'scrape_edgar')
logger.info('[EDGAR]|begin indexing')
doc_count = ei.main()
gme.main(doc_count, 'edgar_index_cat')
logger.info('[EDGAR]|indexing ended')
logger.info('[EDGARFORMS]|begin indexing')
doc_count = eif.main()
gme.main(doc_count, 'edgar_forms_cat')
logger.info('[EDGARFORMS]|forms indexing ended')
ts.update_symbology('edgar')
if __name__ == "__main__":
main()
| {
"content_hash": "76f30ce5ea6922ff9bb71d592bb37fc2",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 72,
"avg_line_length": 35.78021978021978,
"alnum_prop": 0.4781941031941032,
"repo_name": "gophronesis/ernest",
"id": "5e39ceabd22ea3248ce5835e8a544d3bd2148308",
"size": "3282",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scrape/scrape-edgar.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "OpenEdge ABL",
"bytes": "23601"
},
{
"name": "Python",
"bytes": "484174"
},
{
"name": "R",
"bytes": "12430"
},
{
"name": "Shell",
"bytes": "21985"
}
],
"symlink_target": ""
} |
import struct
from shockabsorber.model.sections import Section, SectionMap
from shockabsorber.loader.util import SeqBuffer
def create_section_map(f, loader_context):
return find_and_read_section(f, "mmap", loader_context)
def find_and_read_section(f, tag_to_find, loader_context):
while True:
xheader = f.read(8)
buf = SeqBuffer(xheader, loader_context.is_little_endian)
tag = buf.readTag()
[size] = buf.unpack('>i', '<i')
print(" tag=%s" % tag)
if tag==tag_to_find:
blob = f.read(size)
return parse_mmap_section(blob, f, loader_context)
else:
f.seek(size, 1)
def parse_mmap_section(blob, file, loader_context):
buf = SeqBuffer(blob, loader_context.is_little_endian)
[v1,v2,nElems,nUsed,junkPtr,v3,freePtr] = buf.unpack('>HHiiiii', '<HHiiiii')
print("mmap header: %s" % [v1,v2,nElems,nUsed,junkPtr,v3,freePtr])
sections = []
for i in range(nUsed):
tag = buf.readTag()
[size, offset, w1,w2, link] = buf.unpack('>IIhhi', '<IIhhi')
#print("mmap entry: %s" % [tag, size, offset, w1,w2, link])
if tag=="free" or tag=="junk":
section = NullSection(tag)
else:
section = SectionImpl(tag, size, offset, file, loader_context)
sections.append(section)
return SectionMap(sections)
class SectionImpl(Section): #------------------------------
def __init__(self,tag,size,offset, file, loader_context):
Section.__init__(self,tag,size)
self.offset = offset
self.file = file
self.loader_context = loader_context
def read_bytes(self):
file = self.file
file.seek(self.offset)
xheader = file.read(8)
buf = SeqBuffer(xheader, self.loader_context.is_little_endian)
tag = buf.readTag()
#[dummy_size] = buf.unpack('>i', '<i')
if tag != self.tag:
raise Exception("section header is actually %s, not %s as expected" % (tag, self.tag))
return file.read(self.size)
#--------------------------------------------------
class NullSection(Section): #------------------------------
def __init__(self,tag):
Section.__init__(self,tag,-1)
def read_bytes(self):
return None
#--------------------------------------------------
| {
"content_hash": "ed3d02abda957ce5b2e9b7137d3966f8",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 98,
"avg_line_length": 35.95384615384615,
"alnum_prop": 0.5614035087719298,
"repo_name": "Brian151/OpenShockwave",
"id": "f1597155ff0f57bde7300f0d83332e0f777e8ca9",
"size": "2401",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/imports/shockabsorber/loader/dxr_envelope.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "776"
},
{
"name": "HTML",
"bytes": "61707"
},
{
"name": "JavaScript",
"bytes": "795"
}
],
"symlink_target": ""
} |
"""Aid in submitting information to MusicBrainz.
This plugin allows the user to print track information in a format that is
parseable by the MusicBrainz track parser [1]. Programmatic submitting is not
implemented by MusicBrainz yet.
[1] http://wiki.musicbrainz.org/History:How_To_Parse_Track_Listings
"""
from __future__ import division, absolute_import, print_function
from beets.autotag import Recommendation
from beets.plugins import BeetsPlugin
from beets.ui.commands import PromptChoice
from beetsplug.info import print_data
class MBSubmitPlugin(BeetsPlugin):
def __init__(self):
super(MBSubmitPlugin, self).__init__()
self.config.add({
'format': u'$track. $title - $artist ($length)',
'threshold': 'medium',
})
# Validate and store threshold.
self.threshold = self.config['threshold'].as_choice({
'none': Recommendation.none,
'low': Recommendation.low,
'medium': Recommendation.medium,
'strong': Recommendation.strong
})
self.register_listener('before_choose_candidate',
self.before_choose_candidate_event)
def before_choose_candidate_event(self, session, task):
if task.rec <= self.threshold:
return [PromptChoice(u'p', u'Print tracks', self.print_tracks)]
def print_tracks(self, session, task):
for i in task.items:
print_data(None, i, self.config['format'].as_str())
| {
"content_hash": "9f50abcc7ddbb88ec9dd76cbf58be3d5",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 77,
"avg_line_length": 33.333333333333336,
"alnum_prop": 0.6533333333333333,
"repo_name": "artemutin/beets",
"id": "58b357dd3375ce8cefcad921693b0f355ec4d607",
"size": "2188",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "beetsplug/mbsubmit.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2951"
},
{
"name": "HTML",
"bytes": "3307"
},
{
"name": "JavaScript",
"bytes": "85950"
},
{
"name": "Python",
"bytes": "1839671"
},
{
"name": "Shell",
"bytes": "7413"
}
],
"symlink_target": ""
} |
from django.core.management.base import BaseCommand, CommandError
from data_collection.models import *
from extraction.models import FirmTrainingSet
from django.conf import settings
from nanospider import Spider
from mlscrape import PageClassifier, ElementClassifier
import os, json
class Command(BaseCommand):
help = 'Trains on a firm\'s site'
def add_arguments(self, parser):
parser.add_argument('firm_id', nargs='+', type=int)
def handle(self, *args, **options):
from nanospider.spider import Spider
scrape_dir = os.path.join(settings.MODEL_DIR, 'scrape')
model_dir = os.path.join(settings.MODEL_DIR, 'model')
for data_dir in (scrape_dir, model_dir):
if not os.path.exists(data_dir):
os.mkdir(data_dir)
for firm_id in options['firm_id']:
try:
firm = Firm.objects.get(pk=firm_id)
except Firm.DoesNotExist:
raise CommandError('Firm "%s" does not exist' % firm_id)
self.stdout.write("Training for firm \"%s\"\n" % firm.name)
self.train_page_classifier(firm)
self.train_element_classifier(firm)
self.stdout.write('Successfully trained models for firm "%s"' % firm.name)
def train_page_classifier(self, firm):
fts = FirmTrainingSet.get_for_firm(firm)
spider = Spider(firm.domain, os.path.join(settings.MODEL_DIR, 'scrape', str(fts.id) + ".db"), workers=4, retry_attempts=2)
self.stdout.write("Page classifier:\n")
model = PageClassifier(os.path.join(settings.MODEL_DIR, 'model', str(fts.id) + "_page.tgm"))
self.stdout.write('Retrieving page features...\n')
bio_pages = set()
non_bio_pages = set()
for view_log in ViewLog.objects.filter(firm=firm, session__user__collectionsettings__is_test_user=False):
bio_pages.update(view_log.bio_pages)
non_bio_pages.update(view_log.non_bio_pages)
for cat, pages in (('bio', bio_pages), ('nonbio', non_bio_pages)):
for url in pages:
self.stdout.write(' * ' + url)
try:
page = spider.get(url)
except:
continue
if (page.text.strip()):
model.add_page(page, cat)
self.stdout.write('Done (processed %s pages).\n' % (len(bio_pages) + len(non_bio_pages)))
self.stdout.write('Training... ')
model.train()
model.save()
self.stdout.write('done.\n')
self.stdout.write('Testing classifier on training data...\n')
total_count = 0
correct_count = 0
for cat, pages in (('bio', bio_pages), ('nonbio', non_bio_pages)):
for url in pages:
try:
page = spider.get(url)
except:
continue
if not page.text.strip():
continue
prediction = str(model.predict(page))
print url, json.dumps(cat), json.dumps(prediction), "true" if cat == prediction else "false"
total_count += 1
correct_count += 1 if cat == prediction else 0
self.stdout.write('Accuracy: %s%%; ' % int(round(100 * float(correct_count) / total_count)))
if float(correct_count) / total_count >= 0.8:
self.stdout.write('continuing...\n')
fts.page_classifier_trained = True
fts.save()
else:
self.stdout.write('giving up.\n')
raise CommandError('Page classifier training failed.')
def train_element_classifier(self, firm):
fts = FirmTrainingSet.get_for_firm(firm)
spider = Spider(firm.domain, os.path.join(settings.MODEL_DIR, 'scrape', str(fts.id) + ".db"), workers=4, retry_attempts=2)
self.stdout.write("Element classifier:\n")
model = ElementClassifier(os.path.join(settings.MODEL_DIR, 'model', str(fts.id) + "_element.tgm"))
self.stdout.write('Retrieving element features... ')
for bio_page in BioPage.objects.filter(firm=firm, session__user__collectionsettings__is_test_user=False):
xpaths = []
if len(bio_page.data['people']) > 1:
raise CommandError('Extracting multiple people from a bio page is currently unsupported.')
person = bio_page.data['people'][0]
xpaths.append((person['name']['container_xpath'], 'name'))
for bio in person['bio']:
xpaths.append((bio['container_xpath'], 'bio'))
try:
page = spider.get(bio_page.url)
except:
continue
if (page.text.strip()):
model.add_page(page, xpaths)
self.stdout.write('Training... ')
model.train()
model.save()
self.stdout.write('done.\n')
self.stdout.write('Testing classifier on training data...\n')
total_count = 0
correct_count = 0
for bio_page in BioPage.objects.filter(firm=firm, session__user__collectionsettings__is_test_user=False):
xpaths = []
for person in bio_page.data['people']:
xpaths.append((person['name']['container_xpath'], 'name'))
for bio in person['bio']:
xpaths.append((bio['container_xpath'], 'bio'))
page = spider.get(bio_page.url)
results = model.test_xpaths(page, xpaths)
for result in results:
total_count += 1
if result['expected_label'] == result['got_label']:
correct_count += 1
self.stdout.write('Accuracy: %s%%; ' % int(round(100 * float(correct_count) / total_count)))
if float(correct_count) / total_count >= 0.8:
self.stdout.write('continuing...\n')
fts.element_classifier_trained = True
fts.save()
else:
self.stdout.write('giving up.\n')
raise CommandError('Element classifier training failed.')
| {
"content_hash": "595c156e7f64bcbb0a7bd839c7c13ebf",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 130,
"avg_line_length": 38.82802547770701,
"alnum_prop": 0.5680774278215223,
"repo_name": "sunlightlabs/hanuman",
"id": "59b41b07a8e82e3b4d58692c3015a752b6f1b866",
"size": "6096",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "extraction/management/commands/train.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "2447"
},
{
"name": "HTML",
"bytes": "25832"
},
{
"name": "JavaScript",
"bytes": "32886"
},
{
"name": "Python",
"bytes": "43098"
}
],
"symlink_target": ""
} |
from inputs.bibleIn import BibleInput
from inputs.tldrnewsIn import TLDRNewsInput
from inputs.weatherIn import WUnderGroundInput as WeatherIn
from processors.bibleProc import BibleProc
from processors.tldrnewsProc import TLDRNewsProc
from processors.weatherProc import WeatherProc
from outputs import textOut
from outputs import htmlOut
from outputs import pdfOut
kindle = ""
def run():
data = []
print("fectching bible")
data.append(BibleProc().consume(BibleInput().fetch()))
print("done. Fetching news")
data.append(TLDRNewsProc().consume(TLDRNewsInput().fetch()))
print("done. Fetching weather")
data.append(WeatherProc().consume(WeatherIn().fetch()))
print("done. outputing")
textOut.put(data,kindle+"dailyNews.txt")
htmlOut.put(data,kindle+"dailyNews.html")
pdfOut.put(data,kindle+"dailyNews")
print("Network complete")
if __name__ == "__main__":
run()
| {
"content_hash": "2f1f7004e076766d43874b5f5b9dd827",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 61,
"avg_line_length": 29.2,
"alnum_prop": 0.769406392694064,
"repo_name": "Narcolapser/Little-News-Processor",
"id": "19f6db097304e0f7b7aee7c3ee4c59a37ab87f32",
"size": "876",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Screw up/secondNetwork.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "72699"
}
],
"symlink_target": ""
} |
"""spielberg_proj URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
]
| {
"content_hash": "cfe55bcfaa9b0dbbfc26f92d969e677e",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 79,
"avg_line_length": 36.666666666666664,
"alnum_prop": 0.7012987012987013,
"repo_name": "carlmjohnson/spielberg",
"id": "76fa7ee98b5fbf48fee0b228f9b9225f7fd77d81",
"size": "770",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spielberg_proj/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6767"
}
],
"symlink_target": ""
} |
"""
Display service status for Icinga2.
Configuration parameters:
base_url: the base url to the icinga-web2 services list (default '')
ca: (default True)
cache_timeout: how often the data should be updated (default 60)
disable_acknowledge: enable or disable counting of acknowledged
service problems (default False)
format: define a format string like "CRITICAL: %d"
(default '{status_name}: {count}')
password: password to authenticate against the icinga-web2 interface
(default '')
status: set the status you want to obtain
(0=OK,1=WARNING,2=CRITICAL,3=UNKNOWN)
(default 0)
url_parameters: (default '?service_state={service_state}&format=json')
user: username to authenticate against the icinga-web2 interface
(default '')
@author Ben Oswald <ben.oswald@root-space.de>
@license BSD License <https://opensource.org/licenses/BSD-2-Clause>
@source https://github.com/nazco/i3status-modules
"""
import requests
STATUS_NAMES = {0: 'OK', 1: 'WARNING', 2: 'CRITICAL', 3: 'UNKNOWN'}
STRING_NOT_CONFIGURED = 'not configured'
class Py3status:
"""
"""
# available configuration parameters
base_url = ''
ca = True
cache_timeout = 60
disable_acknowledge = False
format = '{status_name}: {count}'
password = ''
status = 0
url_parameters = "?service_state={service_state}&format=json"
user = ''
def post_config_hook(self):
if not self.base_url:
raise Exception(STRING_NOT_CONFIGURED)
def get_status(self):
response = {
'cached_until': self.py3.time_in(self.cache_timeout),
'full_text': self.py3.safe_format(
self.format,
dict(
status_name=STATUS_NAMES.get(self.status, "INVALID STATUS"),
count=self._query_service_count(self.status)))
}
return response
def _query_service_count(self, state):
url_parameters = self.url_parameters
if self.disable_acknowledge:
url_parameters = url_parameters + "&service_handled=0"
result = requests.get(
self.base_url + url_parameters.format(service_state=state),
auth=(self.user, self.password),
verify=self.ca)
return len(result.json())
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
| {
"content_hash": "0aff54af12f46ad8ae66d664a13e62ce",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 80,
"avg_line_length": 32.23376623376623,
"alnum_prop": 0.6273166800966962,
"repo_name": "alexoneill/py3status",
"id": "cd95a9ca58366c5b2ac1e23df5d7035fd5c41239",
"size": "2506",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py3status/modules/icinga2.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "814174"
}
],
"symlink_target": ""
} |
"""Test Motor by testing that Synchro, a fake PyMongo implementation built on
top of Motor, passes the same unittests as PyMongo.
This program monkey-patches sys.modules, so run it alone, rather than as part
of a larger test suite.
"""
import importlib
import importlib.abc
import importlib.machinery
import sys
import nose
from nose.config import Config
from nose.plugins import Plugin
from nose.plugins.manager import PluginManager
from nose.plugins.skip import Skip
from nose.plugins.xunit import Xunit
from nose.selector import Selector
import synchro
excluded_modules = [
# Exclude some PyMongo tests that can't be applied to Synchro.
"test.test_examples",
"test.test_threads",
"test.test_pooling",
"test.test_saslprep",
# Complex PyMongo-specific mocking.
"test.test_replica_set_reconfig",
# Accesses PyMongo internals.
"test.test_retryable_writes",
# Accesses PyMongo internals. Tested directly in Motor.
"test.test_session",
# Deprecated in PyMongo, removed in Motor 2.0.
"test.test_gridfs",
# Skip mypy tests.
"test.test_mypy",
]
excluded_tests = [
# Motor's reprs aren't the same as PyMongo's.
"*.test_repr",
"TestClient.test_unix_socket",
# Motor extends the handshake metadata.
"ClientUnitTest.test_metadata",
# Lazy-connection tests require multithreading; we test concurrent
# lazy connection directly.
"TestClientLazyConnect.*",
# Motor doesn't support forking or threading.
"*.test_interrupt_signal",
"TestSCRAM.test_scram_threaded",
"TestGSSAPI.test_gssapi_threaded",
"TestCursor.test_concurrent_close",
# These are in test_gridfs_bucket.
"TestGridfs.test_threaded_reads",
"TestGridfs.test_threaded_writes",
# Can't do MotorCollection(name, create=True), Motor constructors do no I/O.
"TestCollection.test_create",
# Requires indexing / slicing cursors, which Motor doesn't do, see MOTOR-84.
"TestCollection.test_min_query",
"TestCursor.test_clone",
"TestCursor.test_clone_empty",
"TestCursor.test_getitem_numeric_index",
"TestCursor.test_getitem_slice_index",
"TestCursor.test_tailable",
"TestRawBatchCursor.test_get_item",
"TestRawBatchCommandCursor.test_get_item",
# No context-manager protocol for MotorCursor.
"TestCursor.test_with_statement",
# Motor's cursors initialize lazily.
"TestRawBatchCommandCursor.test_monitoring",
# Can't iterate a GridOut in Motor.
"TestGridFile.test_iterator",
"TestGridfs.test_missing_length_iter",
# No context-manager protocol for MotorGridIn, and can't set attrs.
"TestGridFile.test_context_manager",
"TestGridFile.test_grid_in_default_opts",
"TestGridFile.test_set_after_close",
# GridOut always connects lazily in Motor.
"TestGridFile.test_grid_out_lazy_connect",
"TestGridfs.test_gridfs_lazy_connect", # In test_gridfs_bucket.
# Complex PyMongo-specific mocking.
"*.test_wire_version",
"TestClient.test_heartbeat_frequency_ms",
"TestExhaustCursor.*",
"TestHeartbeatMonitoring.*",
"TestMongoClientFailover.*",
"TestMongosLoadBalancing.*",
"TestSSL.test_system_certs_config_error",
"TestCMAP.test_cmap_wait_queue_timeout_must_aggressively_timeout_threads_enqueued_longer_than_waitQueueTimeoutMS",
# Accesses PyMongo internals.
"TestClient.test_close_kills_cursors",
"TestClient.test_stale_getmore",
"TestClient.test_direct_connection",
"TestCollection.test_aggregation_cursor",
"TestCommandAndReadPreference.*",
"TestCommandMonitoring.test_get_more_failure",
"TestCommandMonitoring.test_sensitive_commands",
"TestCursor.test_allow_disk_use",
"TestCursor.test_close_kills_cursor_synchronously",
"TestCursor.test_delete_not_initialized",
"TestGridFile.test_grid_out_cursor_options",
"TestGridFile.test_survive_cursor_not_found",
"TestMaxStaleness.test_last_write_date",
"TestSelections.test_bool",
"TestCollation.*",
"TestCollection.test_find_one_and_write_concern",
"TestCollection.test_write_error_text_handling",
"TestBinary.test_uuid_queries",
"TestCursor.test_comment",
"TestCursor.test_where",
"TestGridfs.test_gridfs_find",
# Tests that use "authenticate" or "logoout", removed in Motor 2.0.
"TestSASLPlain.test_sasl_plain_bad_credentials",
"TestSCRAM.test_scram",
"TestSCRAMSHA1.test_scram_sha1",
# Uses "collection_names", deprecated in PyMongo, removed in Motor 2.0.
"TestSingleSecondaryOk.test_reads_from_secondary",
# Slow.
"TestDatabase.test_list_collection_names",
# MOTOR-425 these tests fail with duplicate key errors.
"TestClusterChangeStreamsWCustomTypes.*",
"TestCollectionChangeStreamsWCustomTypes.*",
"TestDatabaseChangeStreamsWCustomTypes.*",
# Tests that use warnings.catch_warnings which don't show up in Motor.
"TestCursor.test_min_max_without_hint",
# TODO: MOTOR-606
"TestTransactionsConvenientAPI.*",
"TestTransactions.test_create_collection",
# Motor's change streams need Python 3.5 to support async iteration but
# these change streams tests spawn threads which don't work without an
# IO loop.
"*.test_next_blocks",
"*.test_aggregate_cursor_blocks",
# Can't run these tests because they use threads.
"*.test_ignore_stale_connection_errors",
"*.test_pool_paused_error_is_retryable",
# Needs synchro.GridFS class, see MOTOR-609.
"TestTransactions.test_gridfs_does_not_support_transactions",
# PYTHON-3228 _tmp_session should validate session input
"*.test_helpers_with_let",
# Relies on comment being in the method signatures, which would force use
# to rewrite much of AgnosticCollection.
"*.test_collection_helpers",
"*.test_database_helpers",
"*.test_client_helpers",
# This test is too slow given all of the wrapping logic.
"*.test_transaction_starts_with_batched_write",
# This test is too flaky given all the wrapping logic.
"TestProse.test_load_balancing",
# This feature is going away in PyMongo 5
"*.test_iteration",
# MD5 is deprecated
"*.test_md5",
# Causes a deadlock.
"TestFork.*",
# Also causes a deadlock.
"TestClientSimple.test_fork",
# These methods are picked up by nose despite not being a unittest.
"TestRewrapWithSeparateClientEncryption.run_test",
"TestCustomEndpoint.run_test_expected_success",
"TestDataKeyDoubleEncryption.run_test",
# Motor does not support CSOT.
"TestCsotGridfsFind.*",
# These tests are failing right now.
"TestUnifiedFindShutdownError.test_Concurrent_shutdown_error_on_find",
"TestUnifiedInsertShutdownError.test_Concurrent_shutdown_error_on_insert",
"TestUnifiedPoolClearedError.test_PoolClearedError_does_not_mark_server_unknown",
]
excluded_modules_matched = set()
excluded_tests_matched = set()
class SynchroNosePlugin(Plugin):
name = "synchro"
def __init__(self, *args, **kwargs):
# We need a standard Nose selector in order to filter out methods that
# don't match TestSuite.test_*
self.selector = Selector(config=None)
super().__init__(*args, **kwargs)
def configure(self, options, conf):
super().configure(options, conf)
self.enabled = True
def wantModule(self, module):
# Depending on PYTHONPATH, Motor's direct tests may be imported - don't
# run them now.
if module.__name__.startswith("test.test_motor_"):
return False
for module_name in excluded_modules:
if module_name.endswith("*"):
if module.__name__.startswith(module_name.rstrip("*")):
# E.g., test_motor_cursor matches "test_motor_*".
excluded_modules_matched.add(module_name)
return False
elif module.__name__ == module_name:
excluded_modules_matched.add(module_name)
return False
return True
def wantFunction(self, fn):
# PyMongo's test generators run at import time; tell Nose not to run
# them as unittests.
if fn.__name__ in (
"test_cases",
"create_spec_test",
"create_test",
"create_tests",
"create_connection_string_test",
"create_document_test",
"create_operation_test",
"create_selection_tests",
"generate_test_classes",
):
return False
def wantClass(self, cls):
# PyMongo's test generator classes run at import time; tell Nose not
# to run them as unittests.
if cls.__name__ in ("TestCreator",):
return False
def wantMethod(self, method):
# Run standard Nose checks on name, like "does it start with test_"?
if not self.selector.matches(method.__name__):
return False
if method.__name__ in ("run_test_ops", "maybe_skip_test"):
return False
for excluded_name in excluded_tests:
classname = method.__self__.__class__.__name__
# Should we exclude this method's whole TestCase?
suite_name, method_name = excluded_name.split(".")
suite_matches = suite_name in [classname, "*"]
# Should we exclude this particular method?
method_matches = method.__name__ == method_name or method_name == "*"
if suite_matches and method_matches:
excluded_tests_matched.add(excluded_name)
return False
return True
class SynchroModuleFinder(importlib.abc.MetaPathFinder):
def __init__(self):
self._loader = SynchroModuleLoader()
def find_spec(self, fullname, path, target=None):
if self._loader.patch_spec(fullname):
return importlib.machinery.ModuleSpec(fullname, self._loader)
# Let regular module search continue.
return None
class SynchroModuleLoader(importlib.abc.Loader):
def patch_spec(self, fullname):
parts = fullname.split(".")
if parts[-1] in ("gridfs", "pymongo"):
# E.g. "import pymongo"
return True
elif len(parts) >= 2 and parts[-2] in ("gridfs", "pymongo"):
# E.g. "import pymongo.mongo_client"
return True
return False
def exec_module(self, module):
pass
def create_module(self, spec):
if self.patch_spec(spec.name):
return synchro
# Let regular module search continue.
return None
if __name__ == "__main__":
try:
# Enable the fault handler to dump the traceback of each running
# thread
# after a segfault.
import faulthandler
faulthandler.enable()
# Dump the tracebacks of all threads after 25 minutes.
if hasattr(faulthandler, "dump_traceback_later"):
faulthandler.dump_traceback_later(25 * 60)
except ImportError:
pass
# Monkey-patch all pymongo's unittests so they think Synchro is the
# real PyMongo.
sys.meta_path[0:0] = [SynchroModuleFinder()]
# Delete the cached pymongo/gridfs modules so that SynchroModuleFinder will
# be invoked in Python 3, see
# https://docs.python.org/3/reference/import.html#import-hooks
for n in [
"pymongo",
"pymongo.collection",
"pymongo.client_session",
"pymongo.command_cursor",
"pymongo.change_stream",
"pymongo.cursor",
"pymongo.encryption",
"pymongo.encryption_options",
"pymongo.mongo_client",
"pymongo.database",
"gridfs",
"gridfs.grid_file",
]:
sys.modules.pop(n)
if "--check-exclude-patterns" in sys.argv:
check_exclude_patterns = True
sys.argv.remove("--check-exclude-patterns")
else:
check_exclude_patterns = False
success = nose.run(
config=Config(plugins=PluginManager()), addplugins=[SynchroNosePlugin(), Skip(), Xunit()]
)
if not success:
sys.exit(1)
if check_exclude_patterns:
unused_module_pats = set(excluded_modules) - excluded_modules_matched
assert not unused_module_pats, "Unused module patterns: %s" % (unused_module_pats,)
unused_test_pats = set(excluded_tests) - excluded_tests_matched
assert not unused_test_pats, "Unused test patterns: %s" % (unused_test_pats,)
| {
"content_hash": "7ca64064a3291c8bf39922d6f28c0293",
"timestamp": "",
"source": "github",
"line_count": 346,
"max_line_length": 118,
"avg_line_length": 36.0606936416185,
"alnum_prop": 0.661296786086399,
"repo_name": "mongodb/motor",
"id": "3b241d90a12dc6051df516d2033300a60e48dd48",
"size": "13056",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "synchro/synchrotest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "607021"
},
{
"name": "Shell",
"bytes": "3406"
}
],
"symlink_target": ""
} |
def orderBy(sortlist, orderby=[], desc=[]):
'''orderBy(sortlist, orderby, desc) >> List
sortlist: list to be sorted
orderby: list of field indexes
desc: list of field indexes that are to be sorted descending'''
orderby.reverse()
for i in orderby:
sortlist.sort(lambda x, y: cmp(*[(x[i], y[i]), (y[i], x[i])][i in desc]))
return sortlist
| {
"content_hash": "bb2e857052bd98f43a1303845612e230",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 81,
"avg_line_length": 42.111111111111114,
"alnum_prop": 0.6174142480211082,
"repo_name": "ActiveState/code",
"id": "561688e52db1786827c1a51d92db38d5ee3d5e7a",
"size": "379",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "recipes/Python/305321_SQLlike_ORDER/recipe-305321.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "35894"
},
{
"name": "C",
"bytes": "56048"
},
{
"name": "C++",
"bytes": "90880"
},
{
"name": "HTML",
"bytes": "11656"
},
{
"name": "Java",
"bytes": "57468"
},
{
"name": "JavaScript",
"bytes": "181218"
},
{
"name": "PHP",
"bytes": "250144"
},
{
"name": "Perl",
"bytes": "37296"
},
{
"name": "Perl 6",
"bytes": "9914"
},
{
"name": "Python",
"bytes": "17387779"
},
{
"name": "Ruby",
"bytes": "40233"
},
{
"name": "Shell",
"bytes": "190732"
},
{
"name": "Tcl",
"bytes": "674650"
}
],
"symlink_target": ""
} |
"""Tests for the histogram plugin summary generation functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorboard.plugins.histogram import metadata
from tensorboard.plugins.histogram import summary
class SummaryTest(tf.test.TestCase):
def setUp(self):
super(SummaryTest, self).setUp()
tf.reset_default_graph()
np.random.seed(0)
self.gaussian = np.random.normal(size=[500])
def pb_via_op(self, summary_op, feed_dict=None):
actual_pbtxt = tf.Session().run(summary_op, feed_dict=feed_dict or {})
actual_proto = tf.Summary()
actual_proto.ParseFromString(actual_pbtxt)
return actual_proto
def compute_and_check_summary_pb(self,
name='nemo',
data=None,
bucket_count=None,
display_name=None,
description=None,
data_tensor=None,
bucket_count_tensor=None,
feed_dict=None):
"""Use both `op` and `pb` to get a summary, asserting equality.
Returns:
a `Summary` protocol buffer
"""
if data is None:
data = self.gaussian
if data_tensor is None:
data_tensor = tf.constant(data)
if bucket_count_tensor is None:
bucket_count_tensor = bucket_count
op = summary.op(name, data_tensor, bucket_count=bucket_count_tensor,
display_name=display_name, description=description)
pb = summary.pb(name, data, bucket_count=bucket_count,
display_name=display_name, description=description)
pb_via_op = self.pb_via_op(op, feed_dict=feed_dict)
self.assertProtoEquals(pb, pb_via_op)
return pb
def test_metadata(self):
# We're going to assume that the basic metadata is handled the same
# across all data cases (unless explicitly changed).
pb = self.compute_and_check_summary_pb(name='widgets')
self.assertEqual(len(pb.value), 1)
self.assertEqual(pb.value[0].tag, 'widgets/histogram_summary')
summary_metadata = pb.value[0].metadata
self.assertEqual(summary_metadata.display_name, 'widgets')
self.assertEqual(summary_metadata.summary_description, '')
plugin_data = summary_metadata.plugin_data
self.assertEqual(plugin_data.plugin_name, metadata.PLUGIN_NAME)
parsed = metadata.parse_plugin_metadata(plugin_data.content)
self.assertEqual(metadata.PROTO_VERSION, parsed.version)
def test_explicit_display_name_and_description(self):
display_name = 'Widget metrics'
description = 'Tracks widget production; *units*: MacGuffins/hr'
pb = self.compute_and_check_summary_pb(name='widgets',
display_name=display_name,
description=description)
summary_metadata = pb.value[0].metadata
self.assertEqual(summary_metadata.display_name, display_name)
self.assertEqual(summary_metadata.summary_description, description)
plugin_data = summary_metadata.plugin_data
self.assertEqual(plugin_data.plugin_name, metadata.PLUGIN_NAME)
parsed = metadata.parse_plugin_metadata(plugin_data.content)
self.assertEqual(metadata.PROTO_VERSION, parsed.version)
def test_empty_input(self):
pb = self.compute_and_check_summary_pb('nothing_to_see_here', [])
buckets = tf.make_ndarray(pb.value[0].tensor)
np.testing.assert_allclose(buckets, np.array([]).reshape((0, 3)))
def test_empty_input_of_high_rank(self):
pb = self.compute_and_check_summary_pb('move_along', [[[], []], [[], []]])
buckets = tf.make_ndarray(pb.value[0].tensor)
np.testing.assert_allclose(buckets, np.array([]).reshape((0, 3)))
def test_singleton_input(self):
pb = self.compute_and_check_summary_pb('twelve', [12])
buckets = tf.make_ndarray(pb.value[0].tensor)
np.testing.assert_allclose(buckets, np.array([[11.5, 12.5, 1]]))
def test_input_with_all_same_values(self):
pb = self.compute_and_check_summary_pb('twelven', [12, 12, 12])
buckets = tf.make_ndarray(pb.value[0].tensor)
np.testing.assert_allclose(buckets, np.array([[11.5, 12.5, 3]]))
def test_normal_input(self):
bucket_count = 44
pb = self.compute_and_check_summary_pb(data=self.gaussian.reshape((5, -1)),
bucket_count=bucket_count)
buckets = tf.make_ndarray(pb.value[0].tensor)
self.assertEqual(buckets[:, 0].min(), self.gaussian.min())
self.assertEqual(buckets[:, 1].max(), self.gaussian.max())
self.assertEqual(buckets[:, 2].sum(), self.gaussian.size)
np.testing.assert_allclose(buckets[1:, 0], buckets[:-1, 1])
def test_when_shape_not_statically_known(self):
placeholder = tf.placeholder(tf.float64, shape=None)
reshaped = self.gaussian.reshape((25, -1))
self.compute_and_check_summary_pb(data=reshaped,
data_tensor=placeholder,
feed_dict={placeholder: reshaped})
# The proto-equality check is all we need.
def test_when_bucket_count_not_statically_known(self):
placeholder = tf.placeholder(tf.int32, shape=())
bucket_count = 44
pb = self.compute_and_check_summary_pb(
bucket_count=bucket_count,
bucket_count_tensor=placeholder,
feed_dict={placeholder: bucket_count})
buckets = tf.make_ndarray(pb.value[0].tensor)
self.assertEqual(buckets.shape, (bucket_count, 3))
if __name__ == '__main__':
tf.test.main()
| {
"content_hash": "b0b428d3a52c5f12aad31f70c3572878",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 79,
"avg_line_length": 42.44776119402985,
"alnum_prop": 0.6404711673699015,
"repo_name": "ioeric/tensorboard",
"id": "39f1f51351a89a1c28ba63e796415c0a3377732e",
"size": "6401",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tensorboard/plugins/histogram/summary_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "727010"
},
{
"name": "Java",
"bytes": "26959"
},
{
"name": "JavaScript",
"bytes": "3438"
},
{
"name": "Protocol Buffer",
"bytes": "9258"
},
{
"name": "Python",
"bytes": "1079129"
},
{
"name": "Shell",
"bytes": "7322"
},
{
"name": "TypeScript",
"bytes": "834655"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from . import restcall
from . import cbLogs
def registerUser(system, authenticatedUser, email, password):
newUserCredentials = {
"email": email,
"password": password
}
cbLogs.info("Registering", email + "...")
url = authenticatedUser.url
# we allow the authenticatedUser to be a developer, device, or user. however there a developer specific endpoint for creating users, so we need some logic here to build the correct url
if "/api/v/1/user" not in url and "/api/v/2/devices" not in url:
url += "/admin/user/" + system.systemKey
else:
url += "/reg"
resp = restcall.post(url, headers=authenticatedUser.headers, data=newUserCredentials, sslVerify=system.sslVerify)
try:
newUser = User(system, email, password)
newUser.token = str(resp["user_token"])
newUser.headers["ClearBlade-UserToken"] = newUser.token
cbLogs.info("Successfully registered", email + "as a user!")
return newUser
except TypeError:
cbLogs.error(email, "already exists as a user on this system.")
exit(-1)
class AnonUser(object):
def __init__(self, system):
self.headers = {
"Content-Type": "application/json",
"Accept": "application/json",
"ClearBlade-SystemKey": system.systemKey,
"ClearBlade-SystemSecret": system.systemSecret
}
self.system = system
self.url = system.url + "/api/v/1/user"
self.token = ""
def authenticate(self):
self.headers.pop("ClearBlade-UserToken", None)
try:
cbLogs.info("Authenticating", self.credentials["email"], "as a user...")
resp = restcall.post(self.url + "/auth", headers=self.headers, data=self.credentials, sslVerify=self.system.sslVerify)
except AttributeError:
cbLogs.info("Authenticating as anonymous...")
resp = restcall.post(self.url + "/anon", headers=self.headers, sslVerify=self.system.sslVerify)
self.token = str(resp["user_token"])
self.headers["ClearBlade-UserToken"] = self.token
if self not in self.system.users:
self.system.users.append(self)
cbLogs.info("Successfully authenticated!")
def logout(self):
restcall.post(self.url + "/logout", headers=self.headers, sslVerify=self.system.sslVerify)
if self in self.system.users:
self.system.users.remove(self)
try:
cbLogs.info(self.credentials["email"], "has been logged out.")
except AttributeError:
cbLogs.info("Anonymous user has been logged out.")
def checkAuth(self):
resp = restcall.post(self.url + "/checkauth", headers=self.headers, silent=True, sslVerify=self.system.sslVerify)
try:
return resp["is_authenticated"]
except TypeError:
return False
class User(AnonUser):
def __init__(self, system, email, password):
super(User, self).__init__(system)
self.credentials = {
"email": email,
"password": password
}
| {
"content_hash": "3a411ebeda1e88ed9e13df2a61c4d486",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 188,
"avg_line_length": 39.78481012658228,
"alnum_prop": 0.6264715240216354,
"repo_name": "ClearBlade/Python-API",
"id": "0cfe55b5a0d937b1f52da65d4de119b7c3c6deb9",
"size": "3143",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "clearblade/Users.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "48900"
}
],
"symlink_target": ""
} |
from scrapy.item import Item, Field
class Kanji(Item):
"""Single kanji item"""
character = Field()
| {
"content_hash": "be87820bd591037ed689c784ca4e89ec",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 35,
"avg_line_length": 21.6,
"alnum_prop": 0.6666666666666666,
"repo_name": "Xifax/suzu-web",
"id": "c1573a12cfb23aa9cd8c16bc6af356fb5e455fd7",
"size": "241",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/hebi/items.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "34163"
},
{
"name": "CoffeeScript",
"bytes": "10429"
},
{
"name": "JavaScript",
"bytes": "1468"
},
{
"name": "Makefile",
"bytes": "699"
},
{
"name": "Python",
"bytes": "65617"
}
],
"symlink_target": ""
} |
import json
import os
import subprocess
from time import sleep
import boto3
from boto3 import Session
def main():
print("Getting InstanceTypes from all regions")
regions = []
regions.extend(Session().get_available_regions("ec2"))
regions.extend(Session().get_available_regions("ec2", partition_name="aws-us-gov"))
regions.extend(Session().get_available_regions("ec2", partition_name="aws-cn"))
print("Found " + str(len(regions)) + " regions")
instances = []
for region in regions:
try:
ec2 = boto3.client("ec2", region_name=region)
offerings = ec2.describe_instance_types()
instances.extend(offerings["InstanceTypes"])
next_token = offerings.get("NextToken", "")
while next_token:
offerings = ec2.describe_instance_types(
NextToken=next_token
)
instances.extend(offerings["InstanceTypes"])
next_token = offerings.get("NextToken", None)
except Exception:
print("Could not fetch instance types from region:", region)
# We don't want it to look like we're DDOS'ing AWS
sleep(1)
print("Parsing data")
result = {}
for instance in instances:
result[instance.get('InstanceType')] = instance
root_dir = (
subprocess.check_output(["git", "rev-parse", "--show-toplevel"])
.decode()
.strip()
)
dest = os.path.join(root_dir, "moto/ec2/resources/instance_types.json")
print("Writing data to {0}".format(dest))
with open(dest, "w") as open_file:
json.dump(result, open_file, sort_keys=True)
if __name__ == "__main__":
main()
| {
"content_hash": "7e7a3f9e22aeabf066949d192340676b",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 87,
"avg_line_length": 31.77777777777778,
"alnum_prop": 0.6025641025641025,
"repo_name": "william-richard/moto",
"id": "d8f0b41882d759bdf1d20f445193de5dfbc269c6",
"size": "1739",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/get_instance_info.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "443"
},
{
"name": "HTML",
"bytes": "5848"
},
{
"name": "Java",
"bytes": "1688"
},
{
"name": "JavaScript",
"bytes": "756"
},
{
"name": "Makefile",
"bytes": "1213"
},
{
"name": "Python",
"bytes": "6637538"
},
{
"name": "Ruby",
"bytes": "188"
},
{
"name": "Scala",
"bytes": "782"
},
{
"name": "Shell",
"bytes": "797"
}
],
"symlink_target": ""
} |
"""Test configs for fully_connected."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
@register_make_test_function()
def make_fully_connected_tests(options):
"""Make a set of tests to do fully_connected."""
test_parameters = [{
"shape1": [[3, 3]],
"shape2": [[3, 3]],
"transpose_a": [True, False],
"transpose_b": [True, False],
"constant_filter": [True, False],
"fully_quantize": [False],
"quant_16x8": [False]
}, {
"shape1": [[4, 4], [1, 4], [4]],
"shape2": [[4, 4], [4, 1], [4]],
"transpose_a": [False],
"transpose_b": [False],
"constant_filter": [True, False],
"fully_quantize": [False],
"quant_16x8": [False]
}, {
"shape1": [[40, 37]],
"shape2": [[37, 40]],
"transpose_a": [False],
"transpose_b": [False],
"constant_filter": [True, False],
"fully_quantize": [False],
"quant_16x8": [False]
}, {
"shape1": [[40, 37]],
"shape2": [[40, 37]],
"transpose_a": [False],
"transpose_b": [True],
"constant_filter": [True, False],
"fully_quantize": [False],
"quant_16x8": [False]
}, {
"shape1": [[5, 3]],
"shape2": [[5, 3]],
"transpose_a": [True],
"transpose_b": [False],
"constant_filter": [True, False],
"fully_quantize": [False],
"quant_16x8": [False]
}, {
"shape1": [[1, 3]],
"shape2": [[3, 3]],
"transpose_a": [False],
"transpose_b": [False],
"constant_filter": [True],
"fully_quantize": [True],
"quant_16x8": [False]
}, {
"shape1": [[1, 4], [4]],
"shape2": [[4, 4], [4, 1], [4]],
"transpose_a": [False],
"transpose_b": [False],
"constant_filter": [True],
"fully_quantize": [True],
"quant_16x8": [False]
}, {
"shape1": [[1, 37], [2, 37]],
"shape2": [[37, 40]],
"transpose_a": [False],
"transpose_b": [False],
"constant_filter": [True],
"fully_quantize": [True],
"quant_16x8": [False]
}, {
"shape1": [[1, 3], [2, 3]],
"shape2": [[3, 5], [3, 1]],
"transpose_a": [False],
"transpose_b": [False],
"constant_filter": [True],
"fully_quantize": [True],
"quant_16x8": [False]
}, {
"shape1": [[2, 3]],
"shape2": [[3, 5]],
"transpose_a": [False],
"transpose_b": [False],
"constant_filter": [True],
"fully_quantize": [True],
"quant_16x8": [True]
}]
if options.use_experimental_converter:
test_parameters = test_parameters + [
# Zero in input shape.
{
"shape1": [[0, 3]],
"shape2": [[3, 3]],
"transpose_a": [False],
"transpose_b": [False],
"constant_filter": [True, False],
"fully_quantize": [False],
"quant_16x8": [False]
}
]
def build_graph(parameters):
"""Build a matmul graph given `parameters`."""
input_tensor1 = tf.compat.v1.placeholder(
dtype=tf.float32, name="input1", shape=parameters["shape1"])
# Get input_tensor2 either as a placeholder or constants. Also get a list of
# the input tensors that are represented as placeholders.
if parameters["constant_filter"]:
input_tensor2 = create_tensor_data(
np.float32, parameters["shape2"], min_value=-1, max_value=1)
input_tensors = [input_tensor1]
else:
input_tensor2 = tf.compat.v1.placeholder(
dtype=tf.float32, name="input2", shape=parameters["shape2"])
input_tensors = [input_tensor1, input_tensor2]
out = tf.matmul(
input_tensor1,
input_tensor2,
transpose_a=parameters["transpose_a"],
transpose_b=parameters["transpose_b"])
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
# pylint: disable=g-doc-return-or-yield, g-doc-args
"""Build list of input values.
It either contains 1 tensor (input_values1) or
2 tensors (input_values1, input_values2) based on whether the second input
is a constant or variable input.
"""
values = [
create_tensor_data(
np.float32, shape=parameters["shape1"], min_value=-1, max_value=1)
]
if not parameters["constant_filter"]:
values.append(
create_tensor_data(
np.float32, parameters["shape2"], min_value=-1, max_value=1))
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=14)
| {
"content_hash": "822688ddd16d5e78000471a9373c0f22",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 80,
"avg_line_length": 30.90625,
"alnum_prop": 0.5561172901921132,
"repo_name": "sarvex/tensorflow",
"id": "141bfef6516ff2d9d44fa7ef9ee39ee7455239ac",
"size": "5634",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "tensorflow/lite/testing/op_tests/fully_connected.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "148184"
},
{
"name": "C++",
"bytes": "6224499"
},
{
"name": "CSS",
"bytes": "107"
},
{
"name": "HTML",
"bytes": "650478"
},
{
"name": "Java",
"bytes": "53519"
},
{
"name": "JavaScript",
"bytes": "6659"
},
{
"name": "Jupyter Notebook",
"bytes": "777935"
},
{
"name": "Objective-C",
"bytes": "1288"
},
{
"name": "Protocol Buffer",
"bytes": "61743"
},
{
"name": "Python",
"bytes": "3474762"
},
{
"name": "Shell",
"bytes": "45640"
},
{
"name": "TypeScript",
"bytes": "283668"
}
],
"symlink_target": ""
} |
""" An MDI top-level application window. """
from __future__ import absolute_import
# Major package imports.
import wx
# Enthought library imports.
from traits.api import Bool, Instance, Int, Tuple
# Local imports.
from .application_window import ApplicationWindow
from .image_resource import ImageResource
try:
import wx.aui
AUI = True
except:
AUI = False
class MDIApplicationWindow(ApplicationWindow):
""" An MDI top-level application window.
The application window has support for a menu bar, tool bar and a status
bar (all of which are optional).
Usage: Create a sub-class of this class and override the protected
'_create_contents' method.
"""
#### 'MDIApplicationWindow' interface #####################################
# The workarea background image.
background_image = Instance(ImageResource, ImageResource('background'))
# Should we tile the workarea background image? The alternative is to
# scale it. Be warned that scaling the image allows for 'pretty' images,
# but is MUCH slower than tiling.
tile_background_image = Bool(True)
# WX HACK FIXME
# UPDATE: wx 2.6.1 does NOT fix this issue.
_wx_offset = Tuple(Int, Int)
###########################################################################
# 'MDIApplicationWindow' interface.
###########################################################################
def create_child_window(self, title=None, is_mdi=True, float=True):
""" Create a child window. """
if title is None:
title = self.title
if is_mdi:
return wx.MDIChildFrame(self.control, -1, title)
else:
if float:
style = wx.DEFAULT_FRAME_STYLE | wx.FRAME_FLOAT_ON_PARENT
else:
style = wx.DEFAULT_FRAME_STYLE
return wx.Frame(self.control, -1, title, style=style)
###########################################################################
# Protected 'Window' interface.
###########################################################################
def _create_contents(self, parent):
""" Create the contents of the MDI window. """
# Create the 'trim' widgets (menu, tool and status bars etc).
self._create_trim_widgets(self.control)
# The work-area background image (it can be tiled or scaled).
self._image = self.background_image.create_image()
self._bmp = self._image.ConvertToBitmap()
# Frame events.
#
# We respond to size events to layout windows around the MDI frame.
wx.EVT_SIZE(self.control, self._on_size)
# Client window events.
client_window = self.control.GetClientWindow()
wx.EVT_ERASE_BACKGROUND(client_window, self._on_erase_background)
self._wx_offset = client_window.GetPositionTuple()
if AUI:
# Let the AUI manager look after the frame.
self._aui_manager.SetManagedWindow(self.control)
contents = super(MDIApplicationWindow, self)._create_contents(parent)
return contents
def _create_control(self, parent):
""" Create the toolkit-specific control that represents the window. """
control = wx.MDIParentFrame(
parent, -1, self.title, style=wx.DEFAULT_FRAME_STYLE,
size=self.size, pos=self.position
)
return control
###########################################################################
# Private interface.
###########################################################################
def _tile_background_image(self, dc, width, height):
""" Tiles the background image. """
w = self._bmp.GetWidth()
h = self._bmp.GetHeight()
x = 0
while x < width:
y = 0
while y < height:
dc.DrawBitmap(self._bmp, x, y)
y = y + h
x = x + w
return
def _scale_background_image(self, dc, width, height):
""" Scales the background image. """
# Scale the image (if necessary).
image = self._image
if image.GetWidth() != width or image.GetHeight()!= height:
image = self._image.Copy()
image.Rescale(width, height)
# Convert it to a bitmap and draw it.
dc.DrawBitmap(image.ConvertToBitmap(), 0, 0)
return
##### wx event handlers ###################################################
def _on_size(self, event):
""" Called when the frame is resized. """
wx.LayoutAlgorithm().LayoutMDIFrame(self.control)
return
def _on_erase_background(self, event):
""" Called when the background of the MDI client window is erased. """
# fixme: Close order...
if self.control is None:
return
frame = self.control
dc = event.GetDC()
if not dc:
dc = wx.ClientDC(frame.GetClientWindow())
size = frame.GetClientSize()
# Currently you have two choices, tile the image or scale it. Be
# warned that scaling is MUCH slower than tiling.
if self.tile_background_image:
self._tile_background_image(dc, size.width, size.height)
else:
self._scale_background_image(dc, size.width, size.height)
| {
"content_hash": "54782c5b93903dc1d71a116ffb9b2e74",
"timestamp": "",
"source": "github",
"line_count": 173,
"max_line_length": 79,
"avg_line_length": 31.028901734104046,
"alnum_prop": 0.5458271236959762,
"repo_name": "geggo/pyface",
"id": "dee57a0b1867a7f39ac55f34c4b0d0488e618cec",
"size": "6008",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "pyface/mdi_application_window.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "896"
},
{
"name": "Python",
"bytes": "2246684"
},
{
"name": "Shell",
"bytes": "940"
}
],
"symlink_target": ""
} |
"""
This module contains generic utilities and configuration information for use
by the other submodules of the `pySMART` package.
"""
# Python built-ins
import ctypes
import os
import platform
from subprocess import Popen, PIPE
import warnings
# Configuration definitions
_min_smartctl_ver = {
'Windows' : {
'maj' : 6,
'min' : 1
},
'Linux' : {
'maj' : 5,
'min' : 42
},
'FreeBSD' : {
'maj' : 6,
'min' : 1
},
'Darwin' : {
'maj' : 6,
'min' : 1
}
}
"""
(dict of dict of int): Outer dict contains operating system names as keys.
Inner dict has keys 'maj' and 'min' with int values representing the minimum
required major and minor versions, respectively.
"""
OS = platform.system()
"""**(str):** The operating system's name, generally 'Linux' or 'Windows'"""
_req_ma, _req_mi = _min_smartctl_ver[OS]['maj'], _min_smartctl_ver[OS]['min']
"""Major and minor version requirements, parsed from the version string."""
smartctl_type = {
'ata' : 'ata',
'csmi' : 'ata',
'sas' : 'scsi',
'sat' : 'sat',
'sata' : 'ata',
'scsi': 'scsi',
'atacam': 'atacam'
}
"""
**(dict of str):** Contains actual interface types (ie: sas, csmi) as keys and
the corresponding smartctl interface type (ie: scsi, ata) as values.
"""
# Helper functions
def admin():
"""Determine whether this scrpt is running with administrative privilege.
### Returns:
* **(bool):** True if running as an administrator, False otherwise.
"""
try:
is_admin = os.getuid() == 0
except AttributeError:
is_admin = ctypes.windll.shell32.IsUserAnAdmin() != 0
return is_admin
def pd_to_sd(pd):
"""
Converts a device name from Windows' physical device ID (ie: pd0) to
Linux's sda notation. Handles up to 'pd675' = 'sdzz'.
###Args:
* **pd (int):** Physical device ID number.
##Returns:
* **(str):** Linux-style 'sd_' device name.
"""
try:
pd = int(pd)
except ValueError:
return None
pd2sd = {}
# Tried to build a list comprehension but Py2.6 on Linux gave syntax error
for i in range(26):
pd2sd[i] = chr(ord('a') + i)
if pd > 26:
first = (pd // 26) - 1
second = pd % 26
return 'sd' + pd2sd[first] + pd2sd[second]
else:
return 'sd' + pd2sd[pd]
def rescan_device_busses():
"""Force a rescan of internal storage busses under Windows"""
cmd = Popen('echo "rescan" | diskpart', shell=True,
stdout=PIPE, stderr=PIPE)
_stdout, _stderr = cmd.communicate()
def _warning_on_one_line(message, category, filename, lineno, file=None,
line=None):
"""Formats warning messages to appear on one line."""
return '%s:%s: %s: %s\n' % (filename, lineno, category.__name__, message)
warnings.formatwarning = _warning_on_one_line
def path_append():
"""Appneds the path to smartctl (OS Specific)"""
if OS == 'FreeBSD':
os.environ["PATH"] += '/sbin:/bin:/usr/sbin:/usr/bin:/usr/games:' +\
'/usr/local/sbin:/usr/local/bin:/root/bin'
path_append()
# Verify smartctl is on the system path and meets the minimum required version
cmd = Popen('smartctl --version', shell=True, stdout=PIPE, stderr=PIPE)
_stdout, _stderr = cmd.communicate()
if _stdout == '':
raise Exception(
"Required package 'smartmontools' is not installed, or 'smartctl'\n"
"component is not on the system path. Please install and try again."
"The current env path is: {0}".format(os.environ["PATH"]))
else:
for line in _stdout.split('\n'):
if 'release' in line:
_ma, _mi = line.strip().split(' ')[2].split('.')
if (int(_ma) < _req_ma or
(int(_ma) == _req_ma and int(_mi) < _req_mi)):
raise Exception(
"Installed version of smartctl [{0}.{1}] is below the "
"minimum requirement of [{2}.{3}]. Please upgrade and "
"try again.".format(_ma, _mi, _req_ma, _req_mi))
# Check for admin rights
if not admin():
warnings.warn(
"_NOT_ADMIN_: smartctl is intended to be run as administrator/root "
"and may not detect all device types, or may parse device information "
"incorrectly, if run without these permissions.")
__all__ = ['admin', 'OS', 'pd_to_sd', 'rescan_device_busses', 'smartctl_type',
'path_append']
| {
"content_hash": "7031bf05b952eec8dcbf61893b3b7916",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 79,
"avg_line_length": 31.089655172413792,
"alnum_prop": 0.5849600709849158,
"repo_name": "Hellowlol/HTPC-Manager",
"id": "47eba61b122540795d18bf798e8bbfc16f0b84ba",
"size": "5177",
"binary": false,
"copies": "2",
"ref": "refs/heads/master2",
"path": "libs/pySMART/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "352"
},
{
"name": "CSS",
"bytes": "55957"
},
{
"name": "HTML",
"bytes": "193526"
},
{
"name": "JavaScript",
"bytes": "596435"
},
{
"name": "Python",
"bytes": "4737828"
},
{
"name": "Shell",
"bytes": "5255"
}
],
"symlink_target": ""
} |
from unittest import mock
from oslo_config import cfg
from oslo_log import log
from watcher.common import clients
from watcher.common import exception
from watcher.decision_engine.datasources import grafana
from watcher.tests import base
import requests
CONF = cfg.CONF
LOG = log.getLogger(__name__)
@mock.patch.object(clients.OpenStackClients, 'nova', mock.Mock())
class TestGrafana(base.BaseTestCase):
"""Test the GrafanaHelper datasource
Objects under test are preceded with t_ and mocked objects are preceded
with m_ , additionally, patched objects are preceded with p_ no object
under test should be created in setUp this can influence the results.
"""
def setUp(self):
super(TestGrafana, self).setUp()
self.p_conf = mock.patch.object(
grafana, 'CONF',
new_callable=mock.PropertyMock)
self.m_conf = self.p_conf.start()
self.addCleanup(self.p_conf.stop)
self.m_conf.grafana_client.token = \
"eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk=="
self.m_conf.grafana_client.base_url = "https://grafana.proxy/api/"
self.m_conf.grafana_client.project_id_map = {'host_cpu_usage': 7221}
self.m_conf.grafana_client.database_map = \
{'host_cpu_usage': 'mock_db'}
self.m_conf.grafana_client.attribute_map = \
{'host_cpu_usage': 'hostname'}
self.m_conf.grafana_client.translator_map = \
{'host_cpu_usage': 'influxdb'}
self.m_conf.grafana_client.query_map = \
{'host_cpu_usage': 'SELECT 100-{0}("{0}_value") FROM {3}.'
'cpu_percent WHERE ("host" =~ /^{1}$/ AND '
'"type_instance" =~/^idle$/ AND time > '
'(now()-{2}m)'}
self.m_grafana = grafana.GrafanaHelper(osc=mock.Mock())
stat_agg_patcher = mock.patch.object(
self.m_grafana, 'statistic_aggregation',
spec=grafana.GrafanaHelper.statistic_aggregation)
self.mock_aggregation = stat_agg_patcher.start()
self.addCleanup(stat_agg_patcher.stop)
self.m_compute_node = mock.Mock(
id='16a86790-327a-45f9-bc82-45839f062fdc',
hostname='example.hostname.ch'
)
self.m_instance = mock.Mock(
id='73b1ff78-aca7-404f-ac43-3ed16c1fa555',
human_id='example.hostname'
)
def test_configured(self):
"""Initialize GrafanaHelper and check if configured is true"""
t_grafana = grafana.GrafanaHelper(osc=mock.Mock())
self.assertTrue(t_grafana.configured)
def test_configured_error(self):
"""Butcher the required configuration and test if configured is false
"""
self.m_conf.grafana_client.base_url = ""
t_grafana = grafana.GrafanaHelper(osc=mock.Mock())
self.assertFalse(t_grafana.configured)
def test_configured_raise_error(self):
"""Test raising error when using improperly configured GrafanHelper
Assure that the _get_metric method raises errors if the metric is
missing from the map
"""
# Clear the METRIC_MAP of Grafana since it is a static variable that
# other tests might have set before this test runs.
grafana.GrafanaHelper.METRIC_MAP = {}
self.m_conf.grafana_client.base_url = ""
t_grafana = grafana.GrafanaHelper(osc=mock.Mock())
self.assertFalse(t_grafana.configured)
self.assertEqual({}, t_grafana.METRIC_MAP)
self.assertRaises(
exception.MetricNotAvailable,
t_grafana.get_host_cpu_usage,
self.m_compute_node
)
@mock.patch.object(requests, 'get')
def test_request_raise_error(self, m_request):
"""Test raising error when status code of request indicates problem
Assure that the _request method raises errors if the response indicates
problems.
"""
m_request.return_value = mock.Mock(status_code=404)
t_grafana = grafana.GrafanaHelper(osc=mock.Mock())
self.assertIsNone(t_grafana.get_host_cpu_usage(self.m_compute_node))
def test_no_metric_raise_error(self):
"""Test raising error when specified meter does not exist"""
t_grafana = grafana.GrafanaHelper(osc=mock.Mock())
self.assertRaises(exception.MetricNotAvailable,
t_grafana.statistic_aggregation,
self.m_compute_node,
'none existing meter', 60)
@mock.patch.object(grafana.GrafanaHelper, '_request')
def test_get_metric_raise_error(self, m_request):
"""Test raising error when endpoint unable to deliver data for metric
"""
m_request.return_value.content = "{}"
t_grafana = grafana.GrafanaHelper(osc=mock.Mock())
self.assertRaises(exception.NoSuchMetricForHost,
t_grafana.get_host_cpu_usage,
self.m_compute_node, 60)
def test_metric_builder(self):
"""Creates valid and invalid sets of configuration for metrics
Ensures that a valid metric entry can be configured even if multiple
invalid configurations exist for other metrics.
"""
self.m_conf.grafana_client.project_id_map = {
'host_cpu_usage': 7221,
'host_ram_usage': 7221,
'instance_ram_allocated': 7221,
}
self.m_conf.grafana_client.database_map = {
'host_cpu_usage': 'mock_db',
'instance_cpu_usage': 'mock_db',
'instance_ram_allocated': 'mock_db',
}
self.m_conf.grafana_client.attribute_map = {
'host_cpu_usage': 'hostname',
'host_power': 'hostname',
'instance_ram_allocated': 'human_id',
}
self.m_conf.grafana_client.translator_map = {
'host_cpu_usage': 'influxdb',
'host_inlet_temp': 'influxdb',
# validate that invalid entries don't get added
'instance_ram_usage': 'dummy',
'instance_ram_allocated': 'influxdb',
}
self.m_conf.grafana_client.query_map = {
'host_cpu_usage': 'SHOW SERIES',
'instance_ram_usage': 'SHOW SERIES',
'instance_ram_allocated': 'SHOW SERIES',
}
expected_result = {
'host_cpu_usage': {
'db': 'mock_db',
'project': 7221,
'attribute': 'hostname',
'translator': 'influxdb',
'query': 'SHOW SERIES'},
'instance_ram_allocated': {
'db': 'mock_db',
'project': 7221,
'attribute': 'human_id',
'translator': 'influxdb',
'query': 'SHOW SERIES'},
}
t_grafana = grafana.GrafanaHelper(osc=mock.Mock())
self.assertEqual(t_grafana.METRIC_MAP, expected_result)
@mock.patch.object(grafana.GrafanaHelper, '_request')
def test_statistic_aggregation(self, m_request):
m_request.return_value.content = "{ \"results\": [{ \"series\": [{ " \
"\"columns\": [\"time\",\"mean\"]," \
"\"values\": [[1552500855000, " \
"67.3550078657577]]}]}]}"
t_grafana = grafana.GrafanaHelper(osc=mock.Mock())
result = t_grafana.statistic_aggregation(
self.m_compute_node, 'compute_node', 'host_cpu_usage', 60)
self.assertEqual(result, 67.3550078657577)
def test_get_host_cpu_usage(self):
self.m_grafana.get_host_cpu_usage(self.m_compute_node, 60, 'min', 15)
self.mock_aggregation.assert_called_once_with(
self.m_compute_node, 'compute_node', 'host_cpu_usage', 60, 'min',
15)
def test_get_host_ram_usage(self):
self.m_grafana.get_host_ram_usage(self.m_compute_node, 60,
'min', 15)
self.mock_aggregation.assert_called_once_with(
self.m_compute_node, 'compute_node', 'host_ram_usage', 60, 'min',
15)
def test_get_host_outlet_temperature(self):
self.m_grafana.get_host_outlet_temp(self.m_compute_node, 60,
'min', 15)
self.mock_aggregation.assert_called_once_with(
self.m_compute_node, 'compute_node', 'host_outlet_temp', 60, 'min',
15)
def test_get_host_inlet_temperature(self):
self.m_grafana.get_host_inlet_temp(self.m_compute_node, 60,
'min', 15)
self.mock_aggregation.assert_called_once_with(
self.m_compute_node, 'compute_node', 'host_inlet_temp', 60, 'min',
15)
def test_get_host_airflow(self):
self.m_grafana.get_host_airflow(self.m_compute_node, 60,
'min', 15)
self.mock_aggregation.assert_called_once_with(
self.m_compute_node, 'compute_node', 'host_airflow', 60, 'min',
15)
def test_get_host_power(self):
self.m_grafana.get_host_power(self.m_compute_node, 60,
'min', 15)
self.mock_aggregation.assert_called_once_with(
self.m_compute_node, 'compute_node', 'host_power', 60, 'min',
15)
def test_get_instance_cpu_usage(self):
self.m_grafana.get_instance_cpu_usage(self.m_compute_node, 60,
'min', 15)
self.mock_aggregation.assert_called_once_with(
self.m_compute_node, 'instance', 'instance_cpu_usage', 60,
'min', 15)
def test_get_instance_ram_usage(self):
self.m_grafana.get_instance_ram_usage(self.m_compute_node, 60,
'min', 15)
self.mock_aggregation.assert_called_once_with(
self.m_compute_node, 'instance', 'instance_ram_usage', 60,
'min', 15)
def test_get_instance_ram_allocated(self):
self.m_grafana.get_instance_ram_allocated(self.m_compute_node, 60,
'min', 15)
self.mock_aggregation.assert_called_once_with(
self.m_compute_node, 'instance', 'instance_ram_allocated', 60,
'min', 15)
def test_get_instance_l3_cache_usage(self):
self.m_grafana.get_instance_l3_cache_usage(self.m_compute_node, 60,
'min', 15)
self.mock_aggregation.assert_called_once_with(
self.m_compute_node, 'instance', 'instance_l3_cache_usage', 60,
'min', 15)
def test_get_instance_root_disk_allocated(self):
self.m_grafana.get_instance_root_disk_size(self.m_compute_node, 60,
'min', 15)
self.mock_aggregation.assert_called_once_with(
self.m_compute_node, 'instance', 'instance_root_disk_size', 60,
'min', 15)
| {
"content_hash": "064e35518c8a3162c6241b4f01f738a2",
"timestamp": "",
"source": "github",
"line_count": 283,
"max_line_length": 79,
"avg_line_length": 39.31095406360424,
"alnum_prop": 0.5697078651685393,
"repo_name": "stackforge/watcher",
"id": "1e716d7420b28e4a5bcbe7ed78c342057be38017",
"size": "11813",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "watcher/tests/decision_engine/datasources/test_grafana_helper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "995442"
},
{
"name": "Shell",
"bytes": "9758"
}
],
"symlink_target": ""
} |
import random
import bz2
import numpy
from numpy import array, where, concatenate
from numpy import kron, ones, sqrt, sum
from os.path import exists
from esvm.mldata import convert
try:
import arff
have_arff = True
except ImportError:
have_arff = False
def create_dataset():
"""Read the file with first 100k sequences from C. elegans
and generate some easier datasets.
"""
if not have_arff:
print 'import arff failed, currently cannot create data'
return
# convert data to arff format
gen_arff('C_elegans_acc_100000.fasta.bz2','C_elegans_acc_gc.arff','C_elegans_acc_seq.arff',\
'C_elegans_acc_seq2.arff','C_elegans_acc_freq.arff',\
num_seqs=100000,subset=True,overwrite=True,normalise=False,\
max_pos=200,max_neg=2000)
print 'Convert from arff to csv and fasta'
convert('C_elegans_acc_gc.arff','C_elegans_acc_gc.csv','vec')
convert('C_elegans_acc_seq.arff','C_elegans_acc_seq.csv','seq')
convert('C_elegans_acc_freq.arff','C_elegans_acc_freq.csv','vec')
convert('C_elegans_acc_seq2.arff','C_elegans_acc_seq2.csv','mseq')
convert('C_elegans_acc_seq.arff','C_elegans_acc_seq.fa','seq')
def create_modsel():
"""Read the file with last 100k sequences from C. elegans
and generate some easier datasets.
"""
if not have_arff:
print 'import arff failed, currently cannot create data'
return
# convert data to arff format
gen_arff('C_elegans_acc_modsel.fasta.bz2','C_elegans_acc_modsel_gc.arff','C_elegans_acc_modsel_seq.arff',\
'C_elegans_acc_modsel_seq2.arff','C_elegans_acc_modsel_freq.arff',\
num_seqs=100000,subset=True,overwrite=True,normalise=False,\
max_pos=200,max_neg=2000)
print 'Convert from arff to csv and fasta'
convert('C_elegans_acc_modsel_gc.arff','C_elegans_acc_modsel_gc.csv','vec')
convert('C_elegans_acc_modsel_seq.arff','C_elegans_acc_modsel_seq.csv','seq')
convert('C_elegans_acc_modsel_freq.arff','C_elegans_acc_modsel_freq.csv','vec')
convert('C_elegans_acc_modsel_seq2.arff','C_elegans_acc_modsel_seq2.csv','mseq')
convert('C_elegans_acc_modsel_seq.arff','C_elegans_acc_modsel_seq.fa','seq')
def gen_arff(fastafilename,gcfilename,seqfilename,seq2filename,specfilename,\
num_seqs=100000,subset=False,max_pos=200,max_neg=2000,\
overwrite=False,normalise=True):
"""If data not yet created, generate 2 arff files
- containing the two dimensional GC content before and after splice site
- containing the sequence around the splice site.
"""
if (exists(gcfilename) and exists(seqfilename)) and not overwrite:
return
print 'Creating %s and %s from %s' % (gcfilename,seqfilename,fastafilename)
if fastafilename.find('acc')!= -1:
# acceptor, AG at [40:42]
window = (-40, 197, 42)
elif fastafilename.find('don')!= -1:
# donor, GT or GC at [40:42]
window = (-40, 200, 42)
else:
print "Error: Cannot determine whether donor or acceptor"
[strings, lab]=read_data(bz2.BZ2File(fastafilename), num_seqs, window)
# Only a subset of the examples are used.
if subset:
[strings, lab] = take_subset(strings, lab, max_pos, max_neg)
gcs=count_gs_and_cs(strings, (0, -window[0]), (-window[0]+2, -window[0]+2+window[2]))
seq_upstream = []
seq_downstream = []
for curstr in strings:
seq_upstream.append(curstr[0:-window[0]])
seq_downstream.append(curstr[(-window[0]+2):(-window[0]+2+window[2])])
seq_upstream = array(seq_upstream)
seq_downstream = array(seq_downstream)
spec_up = count_nt_freq(seq_upstream)
spec_down = count_nt_freq(seq_downstream)
if normalise:
gcs = normalise_features(gcs)
spec_up = normalise_features(spec_up)
spec_down = normalise_features(spec_down)
# sequence file
alist = [('label',1,[]),('sequence',0,[])]
f = open(seqfilename,'w')
arff.arffwrite(f,alist,zip(lab,strings),name=fastafilename,comment='Converted from '+fastafilename)
f.close()
# 2 sequence file
alist = [('label',1,[]),('upstream sequence',0,[]),('downstream sequence',0,[])]
f = open(seq2filename,'w')
arff.arffwrite(f,alist,zip(lab,seq_upstream,seq_downstream),\
name=fastafilename,comment='Converted from '+fastafilename)
f.close()
# gc contents
alist = [('label',1,[]),('upstream',1,[]),('downstream',1,[])]
data = []
for ix,curlab in enumerate(lab):
data.append((curlab,gcs[0,ix],gcs[1,ix]))
f = open(gcfilename,'w')
arff.arffwrite(f,alist,data,name=fastafilename,comment='Converted from '+fastafilename)
f.close()
# spectrum
alist = [('label',1,[]),\
('upA',1,[]),('upC',1,[]),('upG',1,[]),('upT',1,[]),\
('downA',1,[]),('downC',1,[]),('downG',1,[]),('downT',1,[])]
data = []
for ix,curlab in enumerate(lab):
data.append((curlab,spec_up[0,ix],spec_up[1,ix],spec_up[2,ix],spec_up[3,ix],\
spec_down[0,ix],spec_down[1,ix],spec_down[2,ix],spec_down[3,ix]))
if len(specfilename)>0:
f = open(specfilename,'w')
arff.arffwrite(f,alist,data,name=fastafilename,comment='Converted from '+fastafilename)
f.close()
def take_subset(strings, lab, max_pos=200, max_neg=2000):
"""Take a subset of the classes to the maximum numbers determined by
max_pos and max_neg
"""
random.seed(123456789)
pos_idx = where(lab>0)[0]
neg_idx = where(lab<0)[0]
num_pos = len(pos_idx)
num_neg = len(neg_idx)
assert(num_pos < num_neg)
assert(max_pos < max_neg)
max_pos = min(max_pos,num_pos)
max_neg = min(max_neg,num_neg)
neg_sub_idx = array(random.sample(neg_idx,max_neg))
assert(all(lab[neg_sub_idx]<0))
pos_sub_idx = array(random.sample(pos_idx,max_pos))
assert(all(lab[pos_sub_idx]>0))
strings = concatenate((strings[pos_sub_idx],strings[neg_sub_idx]))
lab = concatenate((lab[pos_sub_idx],lab[neg_sub_idx]))
return (strings,lab)
def balance_classes(strings, lab, max_examples=1200,ratio=5.0):
"""Take a subset of negative examples such that
the number of examples in the negative class are limited to ratio.
Also limit the maximum number of examples.
"""
random.seed(123456789)
pos_idx = where(lab>0)[0]
neg_idx = where(lab<0)[0]
num_pos = len(pos_idx)
num_neg = len(neg_idx)
assert(num_pos < num_neg)
max_pos = int(float(max_examples)/(ratio+1.0))
if num_pos < max_pos:
max_pos = num_pos
pos_idx = pos_idx[:max_pos]
num_pos = len(pos_idx)
max_neg = int(num_pos*ratio)
if num_neg < max_neg:
max_neg = num_neg
sub_idx = array(random.sample(neg_idx,max_neg))
assert(all(lab[sub_idx]<0))
strings = concatenate((strings[pos_idx],strings[sub_idx]))
lab = concatenate((lab[pos_idx],lab[sub_idx]))
return (strings,lab)
def normalise_features(feats):
"""Normalise each feature to zero mean and unit variance.
Assume features are column wise matrix.
"""
(numdim,numex) = feats.shape
M = sum(feats,axis=1)/numex
M = M.reshape(numdim,1)
M2 = sum(feats**2,axis=1)/numex
M2 = M2.reshape(numdim,1)
SD = sqrt(M2-M**2)
onevec = ones((1,numex))
feats = (feats - kron(onevec,M))/(kron(onevec,SD))
return feats
def read_data(f, num, window):
"""Read the fasta file containing splice sites."""
labels=num*[0]
strings=num*[0]
l1 = f.readline()
l2 = f.readline()
line = 0
num_alt_consensus = 0
while l1 and l2 and line<num:
consensus = l2[:-1][window[1]:window[1]+2]
if (consensus == 'AG') or (consensus == 'GT'):
if 'label=-1' in l1:
labels[line]=-1
elif 'label=1' in l1:
labels[line]=+1
else:
print "error in line %d" % line
return
strings[line] = l2[:-1][window[1]+window[0] : window[1]+window[2]]
line+=1
else:
num_alt_consensus+=1
if consensus != 'GC':
print line, consensus
l1=f.readline()
l2=f.readline()
print "Number of GC consensus sites: %d" %num_alt_consensus
if line+num_alt_consensus!=num:
print "error reading file"
return
else:
strings = strings[:line+1]
labels = labels[:line+1]
return (array(strings), array(labels, dtype=numpy.double))
def count_gs_and_cs(strings, range1, range2):
"""Count the number of G and C in the two ranges."""
num=len(strings)
gc_count=num*[(0,0)]
for i in xrange(num):
x=float(strings[i].count('G', range1[0], range1[1]) +
strings[i].count('C', range1[0], range1[1])) / abs(range1[1]-range1[0])
y=float(strings[i].count('G', range2[0], range2[1]) +
strings[i].count('C', range2[0], range2[1])) / abs(range2[1]-range2[0])
gc_count[i]=(x,y)
return array(gc_count).T
def count_nt_freq(strings):
"""Count the nucleotide frequencies"""
num = len(strings)
strlen = len(strings[0])
ntfreq = num*[(0,0,0,0)]
for ix in xrange(num):
a=float(strings[ix].count('A')) / strlen
c=float(strings[ix].count('C')) / strlen
g=float(strings[ix].count('G')) / strlen
t=float(strings[ix].count('T')) / strlen
ntfreq[ix]=(a,c,g,t)
return array(ntfreq).T
| {
"content_hash": "7c7ca3becaa8260ef7ba1bed263ca037",
"timestamp": "",
"source": "github",
"line_count": 288,
"max_line_length": 110,
"avg_line_length": 33.19444444444444,
"alnum_prop": 0.6099372384937238,
"repo_name": "shogun-toolbox/shogun",
"id": "78322522e6b0163d72f263513003773cbd51edaf",
"size": "9560",
"binary": false,
"copies": "38",
"ref": "refs/heads/develop",
"path": "applications/easysvm/splicesites/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "64"
},
{
"name": "Batchfile",
"bytes": "615"
},
{
"name": "C",
"bytes": "12178"
},
{
"name": "C++",
"bytes": "10278013"
},
{
"name": "CMake",
"bytes": "196539"
},
{
"name": "Dockerfile",
"bytes": "2046"
},
{
"name": "GDB",
"bytes": "89"
},
{
"name": "HTML",
"bytes": "2060"
},
{
"name": "MATLAB",
"bytes": "8755"
},
{
"name": "Makefile",
"bytes": "244"
},
{
"name": "Python",
"bytes": "286749"
},
{
"name": "SWIG",
"bytes": "386485"
},
{
"name": "Shell",
"bytes": "7267"
}
],
"symlink_target": ""
} |
from django.db.models.query_utils import DeferredAttribute
from .compat import get_local_field_names
class DeferredManager(object):
def __init__(self, instance, model, deferred_attrs):
self.instance = instance
self.model = model
self.deferred_attrs = deferred_attrs
self.is_loaded = False
def load(self):
if self.is_loaded:
return
self.is_loaded = True
# We use only() instead of values() here because we want the
# various data coersion methods (to_python(), etc.) to be called
# here.
deferred_fields = [f for f in self.model._meta.fields if f.attname in self.deferred_attrs]
query_fields = [f.name for f in deferred_fields]
obj = self.model.base_objects.filter(pk=self.instance.pk).only(*query_fields)\
.using(self.instance._state.db).get()
for f in deferred_fields:
self.setattr(f.attname, getattr(obj, f.attname))
# TODO: Recursively support properties?
def getattr(self, attname):
if attname in self.model.__dict__:
prop = self.model.__dict__[attname]
if hasattr(prop, '__get__'):
return prop.__get__(self.instance)
return self.instance.__dict__[attname]
# TODO: Recursively support properties?
def setattr(self, attname, value):
if attname in self.model.__dict__:
prop = self.model.__dict__[attname]
if hasattr(prop, '__set__'):
prop.__set__(self.instance, value)
return
self.instance.__dict__[attname] = value
class DeferredManagerAccess(object):
def __init__(self, model, deferred_attrs):
self.model = model
self.deferred_attrs = deferred_attrs
def __get__(self, instance, owner):
try:
return instance._deferred_manager_obj
except AttributeError:
instance._deferred_manager_obj = DeferredManager(instance, self.model, self.deferred_attrs)
return instance._deferred_manager_obj
# We overwrite evry method of DeferredAttribute, but still need to extend it as
# otherwise isinstance() calls will fail
class LoadAllDeferredAttribute(DeferredAttribute):
def __init__(self, attr, model):
self.attr = attr
self.model = model
def __get__(self, instance, owner):
"""
Retrieves and caches the value from the datastore on the first lookup.
Returns the cached value.
"""
assert instance is not None
data = instance.__dict__
if not instance._deferred_manager.is_loaded:
# We use only() instead of values() here because we want the
# various data coersion methods (to_python(), etc.) to be called
# here.
assert self.attr in instance._deferred_manager.deferred_attrs
instance._deferred_manager.load()
return instance._deferred_manager.getattr(self.attr)
def __set__(self, instance, value):
"""
Deferred loading attributes can be set normally (which means there will
never be a database lookup involved.
"""
instance._deferred_manager.setattr(self.attr, value)
# This function is needed because data descriptors must be defined on a class
# object, not an instance, to have any effect.
def deferred_class_factory(model, attrs):
"""
Returns a class object that is a copy of "model" with the specified "attrs"
being replaced with DeferredAttribute objects. The "pk_value" ties the
deferred attributes to a particular instance of the model.
"""
# The app_cache wants a unique name for each model, otherwise the new class
# won't be created (we get an old one back). Therefore, we generate the
# name using the passed in attrs. It's OK to reuse an old case if the attrs
# are identical.
# NAME CHANGED TO AVOID NAME COLLISION WITH DJANGO DEFERRED MODELS
# ("a" added)
name = "%s_aDeferred_%s" % (model.__name__, '_'.join(sorted(list(attrs))))
# type() requires a non-unicode string.
name = str(name)
try:
return model._meta.apps.get_model(model._meta.app_label, name)
except LookupError:
class Meta:
pass
setattr(Meta, "proxy", True)
setattr(Meta, "app_label", model._meta.app_label)
overrides = dict([(attr, LoadAllDeferredAttribute(attr, model))
for attr in attrs])
overrides["Meta"] = Meta
overrides["__module__"] = model.__module__
overrides["_deferred"] = True
overrides["_deferred_manager"] = DeferredManagerAccess(model, attrs)
return type(name, (model,), overrides)
# The above function is also used to unpickle model instances with deferred
# fields.
deferred_class_factory.__safe_for_unpickling__ = True
# TODO: Cache deferred models!
def deferred_child_class_factory(instance, child_model):
base_model = instance.__class__
base_fields = get_local_field_names(base_model)
child_fields = get_local_field_names(child_model)
deferred_attrs = [f for f in child_fields if f not in base_fields]
if not deferred_attrs:
return child_model
return deferred_class_factory(child_model, deferred_attrs)
def deferred_child_obj_factory(instance, child_model):
# if we already got the needed child model instance, just return it
base_model = instance.__class__
if issubclass(base_model, child_model):
return instance
# create new (deferred) instance
deferred_model = deferred_child_class_factory(instance, child_model)
deferred_obj = deferred_model()
# copy all values to new obj and
# make sure every primary key is set to 'pk'
# (this is needed to set FOO_ptr_id on child instances)
#print base_model._meta.fields
for f in base_model._meta.fields:
if f.primary_key:
# we set both "pk" and f.attname, as this way we get both
# "..._ptr_id" and id to the right value
setattr(deferred_obj, f.attname, instance.pk)
setattr(deferred_obj, 'pk', instance.pk)
else:
setattr(deferred_obj, f.attname, getattr(instance, f.attname, None))
# old version, which does not encount for __get__/__set__ attributes
#elif f.attname in instance.__dict__:
# deferred_obj.__dict__[f.attname] = instance.__dict__[f.attname]
# clone db state
# TODO: Clone more state?
deferred_obj._state.db = instance._state.db
return deferred_obj
| {
"content_hash": "93ad2a338c3f05751478ec89e25de71e",
"timestamp": "",
"source": "github",
"line_count": 164,
"max_line_length": 103,
"avg_line_length": 40.146341463414636,
"alnum_prop": 0.6359356014580801,
"repo_name": "team23/django_deferred_polymorph",
"id": "280488b56bc53dbc682866388e2a6b7b3574f0fb",
"size": "6749",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_deferred_polymorph/deferred.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "19271"
}
],
"symlink_target": ""
} |
from core.himesis import Himesis, HimesisPreConditionPatternLHS
class HM1ThenClausePart1CompleteLHS(HimesisPreConditionPatternLHS):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HM1ThenClausePart1CompleteLHS.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HM1ThenClausePart1CompleteLHS, self).__init__(name='HM1ThenClausePart1CompleteLHS', num_nodes=3, edges=[])
# Add the edges
self.add_edges([[0, 2], [2, 1]])
# Set the graph attributes
self["mm__"] = ['MT_pre__GM2AUTOSAR_MM', 'MoTifRule']
self["MT_constraint__"] = """if PreNode('3')['associationType']=='component':
return True
return False
"""
self["name"] = """M1ThenClausePart1Complete"""
self["GUID__"] = 3840881782177697049
# Set the node attributes
self.vs[0]["MT_pre__name"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[0]["MT_subtypeMatching__"] = False
self.vs[0]["MT_pre__classtype"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[0]["MT_pivotIn__"] = """element1"""
self.vs[0]["MT_label__"] = """1"""
self.vs[0]["mm__"] = """MT_pre__CompositionType"""
self.vs[0]["MT_subtypes__"] = []
self.vs[0]["MT_dirty__"] = False
self.vs[0]["MT_pre__cardinality"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[0]["GUID__"] = 7133726603474971140
self.vs[1]["MT_pre__name"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[1]["MT_subtypeMatching__"] = False
self.vs[1]["MT_pre__classtype"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[1]["MT_label__"] = """2"""
self.vs[1]["mm__"] = """MT_pre__ComponentPrototype"""
self.vs[1]["MT_subtypes__"] = []
self.vs[1]["MT_dirty__"] = False
self.vs[1]["MT_pre__cardinality"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[1]["GUID__"] = 6199913097400833979
self.vs[2]["MT_subtypeMatching__"] = False
self.vs[2]["MT_pre__associationType"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[2]["MT_label__"] = """3"""
self.vs[2]["mm__"] = """MT_pre__directLink_T"""
self.vs[2]["MT_subtypes__"] = []
self.vs[2]["MT_dirty__"] = False
self.vs[2]["GUID__"] = 7758438897929111969
def eval_name1(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_classtype1(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_cardinality1(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_name2(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_classtype2(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_cardinality2(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_associationType3(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def constraint(self, PreNode, graph):
"""
Executable constraint code.
@param PreNode: Function taking an integer as parameter
and returns the node corresponding to that label.
"""
if PreNode('3')['associationType']=='component':
return True
return False
| {
"content_hash": "dc73b396e1edbac2bea12b6f68c4a07b",
"timestamp": "",
"source": "github",
"line_count": 236,
"max_line_length": 120,
"avg_line_length": 48.28813559322034,
"alnum_prop": 0.5182520182520183,
"repo_name": "levilucio/SyVOLT",
"id": "9452d993622d6163cec99d6f469f0d01fb5b05b2",
"size": "11398",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "GM2AUTOSAR_MM/Properties/positive/Himesis/HM1ThenClausePart1CompleteLHS.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "166159"
},
{
"name": "Python",
"bytes": "34207588"
},
{
"name": "Shell",
"bytes": "1118"
}
],
"symlink_target": ""
} |
n = input()
nums = list(map(int, input().split()))
print(sum(nums)) | {
"content_hash": "b180ed5252c8b4ea5b2734cc13cd0926",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 38,
"avg_line_length": 22.333333333333332,
"alnum_prop": 0.6268656716417911,
"repo_name": "osamadel/Hacker-Rank",
"id": "3ea0f0cf39c21a8bf234d4adad5ea4e8b04d3ae4",
"size": "67",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Warmup/very_big_sum.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "30142"
}
],
"symlink_target": ""
} |
"""
# BEGIN CLASSIC_STRATEGY_TESTS
>>> joe = Customer('John Doe', 0) # <1>
>>> ann = Customer('Ann Smith', 1100)
>>> cart = [LineItem('banana', 4, .5), # <2>
... LineItem('apple', 10, 1.5),
... LineItem('watermellon', 5, 5.0)]
>>> Order(joe, cart, FidelityPromo()) # <3>
<Order total: 42.00 due: 42.00>
>>> Order(ann, cart, FidelityPromo()) # <4>
<Order total: 42.00 due: 39.90>
>>> banana_cart = [LineItem('banana', 30, .5), # <5>
... LineItem('apple', 10, 1.5)]
>>> Order(joe, banana_cart, BulkItemPromo()) # <6>
<Order total: 30.00 due: 28.50>
>>> long_order = [LineItem(str(item_code), 1, 1.0) # <7>
... for item_code in range(10)]
>>> Order(joe, long_order, LargeOrderPromo()) # <8>
<Order total: 10.00 due: 9.30>
>>> Order(joe, cart, LargeOrderPromo())
<Order total: 42.00 due: 42.00>
# END CLASSIC_STRATEGY_TESTS
"""
# BEGIN CLASSIC_STRATEGY
from abc import ABCMeta, abstractmethod
from collections import namedtuple
Customer = namedtuple('Customer', 'name fidelity')
class LineItem:
def __init__(self, product, quantity, price):
self.product = product
self.quantity = quantity
self.price = price
def total(self):
return self.price * self.quantity
class Order: # the Context
def __init__(self, customer, cart, promotion=None):
self.customer = customer
self.cart = list(cart)
self.promotion = promotion
def total(self):
if not hasattr(self, '__total'):
self.__total = sum(item.total() for item in self.cart)
return self.__total
def due(self):
if self.promotion is None:
discount = 0
else:
discount = self.promotion.discount(self)
return self.total() - discount
def __repr__(self):
fmt = '<Order total: {:.2f} due: {:.2f}>'
return fmt.format(self.total(), self.due())
class Promotion(metaclass=ABCMeta): # the Strategy
@abstractmethod
def discount(self, order):
"""Return discount as an positive dollar amount"""
class FidelityPromo(Promotion): # first Concrete Strategy
"""5% discount for customers with 1000 or more fidelity points"""
def discount(self, order):
return order.total() * .05 if order.customer.fidelity >= 1000 else 0
class BulkItemPromo(Promotion): # second Concrete Strategy
"""10% discount for each LineItem with 20 or more units"""
def discount(self, order):
discount = 0
for item in order.cart:
if item.quantity >= 20:
discount += item.total() * .1
return discount
class LargeOrderPromo(Promotion): # third Concrete Strategy
"""7% discount for orders with 10 or more distinct items"""
def discount(self, order):
distinct_items = {item.product for item in order.cart}
if len(distinct_items) >= 10:
return order.total() * .07
return 0
# END CLASSIC_STRATEGY
| {
"content_hash": "7e1bc50d92b69c0b95181deaea2e63e4",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 76,
"avg_line_length": 29.601941747572816,
"alnum_prop": 0.5903574942604133,
"repo_name": "pythonprobr/oscon2014",
"id": "47d534c7fff60089bf4b9deff0c818394ed0e54f",
"size": "3117",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "strategy/classic_strategy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "50206"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from django.contrib.auth.models import Permission, User
from django.contrib.contenttypes.models import ContentType
from django.test import TestCase
from .models import Band
class AdminActionsTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(username='super', password='secret', email='super@example.com')
content_type = ContentType.objects.get_for_model(Band)
Permission.objects.create(name='custom', codename='custom_band', content_type=content_type)
for user_type in ('view', 'add', 'change', 'delete', 'custom'):
username = '%suser' % user_type
user = User.objects.create_user(username=username, password='secret', is_staff=True)
permission = Permission.objects.get(codename='%s_band' % user_type, content_type=content_type)
user.user_permissions.add(permission)
setattr(cls, username, user)
def test_get_actions_respects_permissions(self):
class MockRequest:
pass
class BandAdmin(admin.ModelAdmin):
actions = ['custom_action']
@admin.action
def custom_action(modeladmin, request, queryset):
pass
def has_custom_permission(self, request):
return request.user.has_perm('%s.custom_band' % self.opts.app_label)
ma = BandAdmin(Band, admin.AdminSite())
mock_request = MockRequest()
mock_request.GET = {}
cases = [
(None, self.viewuser, ['custom_action']),
('view', self.superuser, ['delete_selected', 'custom_action']),
('view', self.viewuser, ['custom_action']),
('add', self.adduser, ['custom_action']),
('change', self.changeuser, ['custom_action']),
('delete', self.deleteuser, ['delete_selected', 'custom_action']),
('custom', self.customuser, ['custom_action']),
]
for permission, user, expected in cases:
with self.subTest(permission=permission, user=user):
if permission is None:
if hasattr(BandAdmin.custom_action, 'allowed_permissions'):
del BandAdmin.custom_action.allowed_permissions
else:
BandAdmin.custom_action.allowed_permissions = (permission,)
mock_request.user = user
actions = ma.get_actions(mock_request)
self.assertEqual(list(actions.keys()), expected)
def test_actions_inheritance(self):
class AdminBase(admin.ModelAdmin):
actions = ['custom_action']
@admin.action
def custom_action(modeladmin, request, queryset):
pass
class AdminA(AdminBase):
pass
class AdminB(AdminBase):
actions = None
ma1 = AdminA(Band, admin.AdminSite())
action_names = [name for _, name, _ in ma1._get_base_actions()]
self.assertEqual(action_names, ['delete_selected', 'custom_action'])
# `actions = None` removes actions from superclasses.
ma2 = AdminB(Band, admin.AdminSite())
action_names = [name for _, name, _ in ma2._get_base_actions()]
self.assertEqual(action_names, ['delete_selected'])
def test_global_actions_description(self):
@admin.action(description='Site-wide admin action 1.')
def global_action_1(modeladmin, request, queryset):
pass
@admin.action
def global_action_2(modeladmin, request, queryset):
pass
admin_site = admin.AdminSite()
admin_site.add_action(global_action_1)
admin_site.add_action(global_action_2)
class BandAdmin(admin.ModelAdmin):
pass
ma = BandAdmin(Band, admin_site)
self.assertEqual(
[description for _, _, description in ma._get_base_actions()],
[
'Delete selected %(verbose_name_plural)s',
'Site-wide admin action 1.',
'Global action 2',
],
)
def test_actions_replace_global_action(self):
@admin.action(description='Site-wide admin action 1.')
def global_action_1(modeladmin, request, queryset):
pass
@admin.action(description='Site-wide admin action 2.')
def global_action_2(modeladmin, request, queryset):
pass
admin.site.add_action(global_action_1, name='custom_action_1')
admin.site.add_action(global_action_2, name='custom_action_2')
@admin.action(description='Local admin action 1.')
def custom_action_1(modeladmin, request, queryset):
pass
class BandAdmin(admin.ModelAdmin):
actions = [custom_action_1, 'custom_action_2']
@admin.action(description='Local admin action 2.')
def custom_action_2(self, request, queryset):
pass
ma = BandAdmin(Band, admin.site)
self.assertEqual(ma.check(), [])
self.assertEqual(
[
desc
for _, name, desc in ma._get_base_actions()
if name.startswith('custom_action')
],
[
'Local admin action 1.',
'Local admin action 2.',
],
)
| {
"content_hash": "d7b75bed870ce4f07c1e651889bb083b",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 117,
"avg_line_length": 37.76923076923077,
"alnum_prop": 0.5832253286428439,
"repo_name": "ar4s/django",
"id": "b61641c0c91db04411a332859891e5333eb6abf2",
"size": "5401",
"binary": false,
"copies": "13",
"ref": "refs/heads/master",
"path": "tests/modeladmin/test_actions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
""" This module returns stats about the DynamoDB table """
from datetime import datetime, timedelta
from boto.exception import JSONResponseError, BotoServerError
from retrying import retry
from dynamic_dynamodb.aws import dynamodb
from dynamic_dynamodb.log_handler import LOGGER as logger
from dynamic_dynamodb.aws.cloudwatch import (
CLOUDWATCH_CONNECTION as cloudwatch_connection)
def get_consumed_read_units_percent(
table_name, lookback_window_start=15, lookback_period=5):
""" Returns the number of consumed read units in percent
:type table_name: str
:param table_name: Name of the DynamoDB table
:type lookback_window_start: int
:param lookback_window_start: Relative start time for the CloudWatch metric
:type lookback_period: int
:param lookback_period: Number of minutes to look at
:returns: float -- Number of consumed reads as a
percentage of provisioned reads
"""
try:
metrics = __get_aws_metric(
table_name,
lookback_window_start,
lookback_period,
'ConsumedReadCapacityUnits')
except BotoServerError:
raise
if metrics:
lookback_seconds = lookback_period * 60
consumed_read_units = (
float(metrics[0]['Sum']) / float(lookback_seconds))
else:
consumed_read_units = 0
try:
table_read_units = dynamodb.get_provisioned_table_read_units(
table_name)
consumed_read_units_percent = (
float(consumed_read_units) /
float(table_read_units) * 100)
except JSONResponseError:
raise
logger.info('{0} - Consumed read units: {1:.2f}%'.format(
table_name, consumed_read_units_percent))
return consumed_read_units_percent
def get_throttled_read_event_count(
table_name, lookback_window_start=15, lookback_period=5):
""" Returns the number of throttled read events during a given time frame
:type table_name: str
:param table_name: Name of the DynamoDB table
:type lookback_window_start: int
:param lookback_window_start: Relative start time for the CloudWatch metric
:type lookback_period: int
:param lookback_period: Number of minutes to look at
:returns: int -- Number of throttled read events during the time period
"""
try:
metrics = __get_aws_metric(
table_name,
lookback_window_start,
lookback_period,
'ReadThrottleEvents')
except BotoServerError:
raise
if metrics:
throttled_read_events = int(metrics[0]['Sum'])
else:
throttled_read_events = 0
logger.info('{0} - Read throttle count: {1:d}'.format(
table_name, throttled_read_events))
return throttled_read_events
def get_throttled_by_provisioned_read_event_percent(
table_name, lookback_window_start=15, lookback_period=5):
""" Returns the number of throttled read events in percent
:type table_name: str
:param table_name: Name of the DynamoDB table
:type lookback_window_start: int
:param lookback_window_start: Relative start time for the CloudWatch metric
:type lookback_period: int
:param lookback_period: Number of minutes to look at
:returns: float -- Percent of throttled read events by provisioning
"""
try:
metrics = __get_aws_metric(
table_name,
lookback_window_start,
lookback_period,
'ReadThrottleEvents')
except BotoServerError:
raise
if metrics:
lookback_seconds = lookback_period * 60
throttled_read_events = (
float(metrics[0]['Sum']) / float(lookback_seconds))
else:
throttled_read_events = 0
try:
table_read_units = dynamodb.get_provisioned_table_read_units(
table_name)
throttled_by_provisioned_read_percent = (
float(throttled_read_events) /
float(table_read_units) * 100)
except JSONResponseError:
raise
logger.info('{0} - Throttled read percent by provision: {1:.2f}%'.format(
table_name, throttled_by_provisioned_read_percent))
return throttled_by_provisioned_read_percent
def get_throttled_by_consumed_read_percent(
table_name, lookback_window_start=15, lookback_period=5):
""" Returns the number of throttled read events in percent of consumption
:type table_name: str
:param table_name: Name of the DynamoDB table
:type lookback_window_start: int
:param lookback_window_start: Relative start time for the CloudWatch metric
:type lookback_period: int
:param lookback_period: Number of minutes to look at
:returns: float -- Percent of throttled read events by consumption
"""
try:
metrics1 = __get_aws_metric(
table_name,
lookback_window_start,
lookback_period,
'ConsumedReadCapacityUnits')
metrics2 = __get_aws_metric(
table_name,
lookback_window_start,
lookback_period,
'ReadThrottleEvents')
except BotoServerError:
raise
if metrics1 and metrics2:
lookback_seconds = lookback_period * 60
throttled_by_consumed_read_percent = (
(
(float(metrics2[0]['Sum']) / float(lookback_seconds)) /
(float(metrics1[0]['Sum']) / float(lookback_seconds))
) * 100)
else:
throttled_by_consumed_read_percent = 0
logger.info('{0} - Throttled read percent by consumption: {1:.2f}%'.format(
table_name, throttled_by_consumed_read_percent))
return throttled_by_consumed_read_percent
def get_consumed_write_units_percent(
table_name, lookback_window_start=15, lookback_period=5):
""" Returns the number of consumed write units in percent
:type table_name: str
:param table_name: Name of the DynamoDB table
:type lookback_window_start: int
:param lookback_window_start: Relative start time for the CloudWatch metric
:type lookback_period: int
:param lookback_period: Number of minutes to look at
:returns: float -- Number of consumed writes as a
percentage of provisioned writes
"""
try:
metrics = __get_aws_metric(
table_name,
lookback_window_start,
lookback_period,
'ConsumedWriteCapacityUnits')
except BotoServerError:
raise
if metrics:
lookback_seconds = lookback_period * 60
consumed_write_units = (
float(metrics[0]['Sum']) / float(lookback_seconds))
else:
consumed_write_units = 0
try:
table_write_units = dynamodb.get_provisioned_table_write_units(
table_name)
consumed_write_units_percent = (
float(consumed_write_units) /
float(table_write_units) * 100)
except JSONResponseError:
raise
logger.info('{0} - Consumed write units: {1:.2f}%'.format(
table_name, consumed_write_units_percent))
return consumed_write_units_percent
def get_throttled_write_event_count(
table_name, lookback_window_start=15, lookback_period=5):
""" Returns the number of throttled write events during a given time frame
:type table_name: str
:param table_name: Name of the DynamoDB table
:type lookback_window_start: int
:param lookback_window_start: Relative start time for the CloudWatch metric
:type lookback_period: int
:param lookback_period: Number of minutes to look at
:returns: int -- Number of throttled write events during the time period
"""
try:
metrics = __get_aws_metric(
table_name,
lookback_window_start,
lookback_period,
'WriteThrottleEvents')
except BotoServerError:
raise
if metrics:
throttled_write_count = int(metrics[0]['Sum'])
else:
throttled_write_count = 0
logger.info('{0} - Write throttle count: {1:d}'.format(
table_name, throttled_write_count))
return throttled_write_count
def get_throttled_by_provisioned_write_event_percent(
table_name, lookback_window_start=15, lookback_period=5):
""" Returns the number of throttled write events during a given time frame
:type table_name: str
:param table_name: Name of the DynamoDB table
:type lookback_window_start: int
:param lookback_window_start: Relative start time for the CloudWatch metric
:type lookback_period: int
:param lookback_period: Number of minutes to look at
:returns: float -- Percent of throttled write events by provisioning
"""
try:
metrics = __get_aws_metric(
table_name,
lookback_window_start,
lookback_period,
'WriteThrottleEvents')
except BotoServerError:
raise
if metrics:
lookback_seconds = lookback_period * 60
throttled_write_events = float(metrics[0]['Sum']) / float(
lookback_seconds)
else:
throttled_write_events = 0
try:
table_write_units = dynamodb.get_provisioned_table_write_units(
table_name)
throttled_by_provisioned_write_percent = (
float(throttled_write_events) /
float(table_write_units) * 100)
except JSONResponseError:
raise
logger.info('{0} - Throttled write percent by provision: {1:.2f}%'.format(
table_name, throttled_by_provisioned_write_percent))
return throttled_by_provisioned_write_percent
def get_throttled_by_consumed_write_percent(
table_name, lookback_window_start=15, lookback_period=5):
""" Returns the number of throttled write events in percent of consumption
:type table_name: str
:param table_name: Name of the DynamoDB table
:type lookback_window_start: int
:param lookback_window_start: Relative start time for the CloudWatch metric
:type lookback_period: int
:param lookback_period: Number of minutes to look at
:returns: float -- Percent of throttled write events by consumption
"""
try:
metrics1 = __get_aws_metric(
table_name,
lookback_window_start,
lookback_period,
'ConsumedWriteCapacityUnits')
metrics2 = __get_aws_metric(
table_name,
lookback_window_start,
lookback_period,
'WriteThrottleEvents')
except BotoServerError:
raise
if metrics1 and metrics2:
lookback_seconds = lookback_period * 60
throttled_by_consumed_write_percent = (
(
(float(metrics2[0]['Sum']) / float(lookback_seconds)) /
(float(metrics1[0]['Sum']) / float(lookback_seconds))
) * 100)
else:
throttled_by_consumed_write_percent = 0
logger.info(
'{0} - Throttled write percent by consumption: {1:.2f}%'.format(
table_name, throttled_by_consumed_write_percent))
return throttled_by_consumed_write_percent
@retry(
wait='exponential_sleep',
wait_exponential_multiplier=1000,
wait_exponential_max=10000,
stop_max_attempt_number=10)
def __get_aws_metric(table_name, lookback_window_start, lookback_period,
metric_name):
""" Returns a metric list from the AWS CloudWatch service, may return
None if no metric exists
:type table_name: str
:param table_name: Name of the DynamoDB table
:type lookback_window_start: int
:param lookback_window_start: How many minutes to look at
:type lookback_period: int
:type lookback_period: Length of the lookback period in minutes
:type metric_name: str
:param metric_name: Name of the metric to retrieve from CloudWatch
:returns: list -- A list of time series data for the given metric, may
be None if there was no data
"""
try:
now = datetime.utcnow()
start_time = now - timedelta(minutes=lookback_window_start)
end_time = now - timedelta(
minutes=lookback_window_start - lookback_period)
return cloudwatch_connection.get_metric_statistics(
period=lookback_period * 60,
start_time=start_time,
end_time=end_time,
metric_name=metric_name,
namespace='AWS/DynamoDB',
statistics=['Sum'],
dimensions={'TableName': table_name},
unit='Count')
except BotoServerError as error:
logger.error(
'Unknown boto error. Status: "{0}". '
'Reason: "{1}". Message: {2}'.format(
error.status,
error.reason,
error.message))
raise
| {
"content_hash": "d0c91a9d059f647428811701c071633f",
"timestamp": "",
"source": "github",
"line_count": 377,
"max_line_length": 79,
"avg_line_length": 33.80371352785146,
"alnum_prop": 0.6377903327055869,
"repo_name": "acquiachrisnagy/dynamic-dynamodb",
"id": "ce87f5778e6a41b7d17a13bf76a97eaec080001a",
"size": "12768",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "dynamic_dynamodb/statistics/table.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "152"
},
{
"name": "Python",
"bytes": "267068"
}
],
"symlink_target": ""
} |
"""Timestamp utilities."""
from __future__ import absolute_import
import datetime
class Timestamp(object):
"""Represents a Unix second timestamp with microsecond granularity.
Can be treated in common timestamp arithmetic operations as a numeric type.
Internally stores a time interval as an int of microseconds. This strategy
is necessary since floating point values lose precision when storing values,
especially after arithmetic operations (for example, 10000000 % 0.1 evaluates
to 0.0999999994448885).
"""
def __init__(self, seconds=0, micros=0):
self.micros = int(seconds * 1000000) + int(micros)
@staticmethod
def of(seconds):
"""Return the Timestamp for the given number of seconds.
If the input is already a Timestamp, the input itself will be returned.
Args:
seconds: Number of seconds as int, float or Timestamp.
Returns:
Corresponding Timestamp object.
"""
if isinstance(seconds, Duration):
raise TypeError('Can\'t interpret %s as Timestamp.' % seconds)
if isinstance(seconds, Timestamp):
return seconds
return Timestamp(seconds)
def predecessor(self):
"""Returns the largest timestamp smaller than self."""
return Timestamp(micros=self.micros - 1)
def __repr__(self):
micros = self.micros
sign = ''
if micros < 0:
sign = '-'
micros = -micros
int_part = micros / 1000000
frac_part = micros % 1000000
if frac_part:
return 'Timestamp(%s%d.%06d)' % (sign, int_part, frac_part)
return 'Timestamp(%s%d)' % (sign, int_part)
def to_utc_datetime(self):
epoch = datetime.datetime.utcfromtimestamp(0)
# We can't easily construct a datetime object from microseconds, so we
# create one at the epoch and add an appropriate timedelta interval.
return epoch + datetime.timedelta(microseconds=self.micros)
def isoformat(self):
# Append 'Z' for UTC timezone.
return self.to_utc_datetime().isoformat() + 'Z'
def __float__(self):
# Note that the returned value may have lost precision.
return float(self.micros) / 1000000
def __int__(self):
# Note that the returned value may have lost precision.
return self.micros / 1000000
def __cmp__(self, other):
# Allow comparisons between Duration and Timestamp values.
if not isinstance(other, Duration):
other = Timestamp.of(other)
return cmp(self.micros, other.micros)
def __hash__(self):
return hash(self.micros)
def __add__(self, other):
other = Duration.of(other)
return Timestamp(micros=self.micros + other.micros)
def __radd__(self, other):
return self + other
def __sub__(self, other):
other = Duration.of(other)
return Timestamp(micros=self.micros - other.micros)
def __mod__(self, other):
other = Duration.of(other)
return Duration(micros=self.micros % other.micros)
MIN_TIMESTAMP = Timestamp(micros=-0x7fffffffffffffff - 1)
MAX_TIMESTAMP = Timestamp(micros=0x7fffffffffffffff)
class Duration(object):
"""Represents a second duration with microsecond granularity.
Can be treated in common arithmetic operations as a numeric type.
Internally stores a time interval as an int of microseconds. This strategy
is necessary since floating point values lose precision when storing values,
especially after arithmetic operations (for example, 10000000 % 0.1 evaluates
to 0.0999999994448885).
"""
def __init__(self, seconds=0, micros=0):
self.micros = int(seconds * 1000000) + int(micros)
@staticmethod
def of(seconds):
"""Return the Duration for the given number of seconds since Unix epoch.
If the input is already a Duration, the input itself will be returned.
Args:
seconds: Number of seconds as int, float or Duration.
Returns:
Corresponding Duration object.
"""
if isinstance(seconds, Timestamp):
raise TypeError('Can\'t interpret %s as Duration.' % seconds)
if isinstance(seconds, Duration):
return seconds
return Duration(seconds)
def __repr__(self):
micros = self.micros
sign = ''
if micros < 0:
sign = '-'
micros = -micros
int_part = micros / 1000000
frac_part = micros % 1000000
if frac_part:
return 'Duration(%s%d.%06d)' % (sign, int_part, frac_part)
return 'Duration(%s%d)' % (sign, int_part)
def __float__(self):
# Note that the returned value may have lost precision.
return float(self.micros) / 1000000
def __cmp__(self, other):
# Allow comparisons between Duration and Timestamp values.
if not isinstance(other, Timestamp):
other = Duration.of(other)
return cmp(self.micros, other.micros)
def __hash__(self):
return hash(self.micros)
def __neg__(self):
return Duration(micros=-self.micros)
def __add__(self, other):
if isinstance(other, Timestamp):
return other + self
other = Duration.of(other)
return Duration(micros=self.micros + other.micros)
def __radd__(self, other):
return self + other
def __sub__(self, other):
other = Duration.of(other)
return Duration(micros=self.micros - other.micros)
def __rsub__(self, other):
return -(self - other)
def __mul__(self, other):
other = Duration.of(other)
return Duration(micros=self.micros * other.micros / 1000000)
def __rmul__(self, other):
return self * other
def __mod__(self, other):
other = Duration.of(other)
return Duration(micros=self.micros % other.micros)
| {
"content_hash": "30d3dcbc3bf4d3d7f0eb61b5dda47556",
"timestamp": "",
"source": "github",
"line_count": 190,
"max_line_length": 79,
"avg_line_length": 28.942105263157895,
"alnum_prop": 0.6763047826877614,
"repo_name": "vikkyrk/incubator-beam",
"id": "8b2ccda1f16837e6f4774a73359f222d1fb050e1",
"size": "6284",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sdks/python/apache_beam/utils/timestamp.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groovy",
"bytes": "42732"
},
{
"name": "Java",
"bytes": "11417976"
},
{
"name": "Protocol Buffer",
"bytes": "50080"
},
{
"name": "Python",
"bytes": "2779037"
},
{
"name": "Shell",
"bytes": "45279"
}
],
"symlink_target": ""
} |
import unittest
import numpy as np
import paddle.fluid.core as core
from paddle.fluid.op import Operator
from op_test import OpTest
import paddle
import paddle.fluid as fluid
import numpy
from paddle.fluid.framework import _test_eager_guard
def calculate_momentum_by_numpy(
param,
grad,
mu,
velocity,
use_nesterov,
learning_rate,
regularization_method=None,
regularization_coeff=1.0,
):
if regularization_method == "l2_decay":
grad = grad + regularization_coeff * param
velocity_out = mu * velocity + grad
if use_nesterov:
param_out = param - (grad + velocity_out * mu) * learning_rate
else:
param_out = param - learning_rate * velocity_out
else:
velocity_out = mu * velocity + grad
if use_nesterov:
param_out = (
param - grad * learning_rate - velocity_out * mu * learning_rate
)
else:
param_out = param - learning_rate * velocity_out
return param_out, velocity_out
class TestMomentumOp1(OpTest):
def setUp(self):
self.op_type = "momentum"
self.dtype = np.float32
self.init_dtype()
param = np.random.random((123, 321)).astype(self.dtype)
grad = np.random.random((123, 321)).astype(self.dtype)
velocity = np.zeros((123, 321)).astype(self.dtype)
learning_rate = np.array([0.001]).astype(np.float32)
mu = 0.0001
use_nesterov = False
self.inputs = {
'Param': param,
'Grad': grad,
'Velocity': velocity,
'LearningRate': learning_rate,
}
self.attrs = {'mu': mu}
param_out, velocity_out = calculate_momentum_by_numpy(
param=param,
grad=grad,
mu=mu,
velocity=velocity,
use_nesterov=use_nesterov,
learning_rate=learning_rate,
)
self.outputs = {'ParamOut': param_out, 'VelocityOut': velocity_out}
def init_dtype(self):
pass
def test_check_output(self):
self.check_output()
class TestMomentumOpFp16(TestMomentumOp1):
def init_dtype(self):
self.dtype = np.float16
def test_check_output(self):
self.check_output(atol=1e-3)
class TestMomentumOp2(OpTest):
'''Test Momentum with default values for attributes'''
def setUp(self):
self.op_type = "momentum"
param = np.random.random((123, 321)).astype("float32")
grad = np.random.random((123, 321)).astype("float32")
velocity = np.zeros((123, 321)).astype("float32")
learning_rate = np.array([0.001]).astype("float32")
mu = 0.0001
use_nesterov = True
self.inputs = {
'Param': param,
'Grad': grad,
'Velocity': velocity,
'LearningRate': learning_rate,
}
self.attrs = {'mu': mu, 'use_nesterov': use_nesterov}
param_out, velocity_out = calculate_momentum_by_numpy(
param=param,
grad=grad,
mu=mu,
velocity=velocity,
use_nesterov=use_nesterov,
learning_rate=learning_rate,
)
self.outputs = {'ParamOut': param_out, 'VelocityOut': velocity_out}
def test_check_output(self):
self.check_output()
@unittest.skipIf(
not core.is_compiled_with_cuda(), "core is not compiled with CUDA"
)
class TestLarsMomentumOpWithMP(OpTest):
def setUp(self):
self.config()
self.op_type = "lars_momentum"
mu = 0.0001
lars_coeff = 0.001
lars_weight_decay = 0.0005
rescale_grad = 1.0
params = []
grads = []
velocitys = []
learning_rates = []
master_params = []
param_outs = []
velocity_outs = []
master_param_outs = []
for i in range(self.params_num):
master_param = np.random.random((123, 321)).astype("float32")
param = master_param.astype("float16")
grad = np.random.random((123, 321)).astype("float16")
velocity = np.zeros((123, 321)).astype("float32")
learning_rate = np.array([0.001]).astype("float32")
fp32_grad = grad.astype("float32")
pnorm = np.sqrt(np.square(master_param).sum())
gnorm = np.sqrt(np.square(fp32_grad).sum())
local_lr = (
learning_rate
* lars_coeff
* pnorm
/ (gnorm + lars_weight_decay * pnorm)
)
fp32_grad = fp32_grad * rescale_grad
velocity_out = mu * velocity + local_lr * (
fp32_grad + lars_weight_decay * master_param
)
p_new = master_param - velocity_out
param_out = p_new.astype("float16")
master_param_out = p_new
params.append(("SubParam_" + str(i), param))
grads.append(("SubGrad_" + str(i), grad))
velocitys.append(("SubVelocity_" + str(i), velocity))
learning_rates.append(("SubLearning_rate_" + str(i), learning_rate))
velocity_outs.append(("SubVelocity_out_" + str(i), velocity_out))
param_outs.append(("SubParam_out_" + str(i), param_out))
master_params.append(("SubMasterParam_" + str(i), master_param))
master_param_outs.append(
("SubMasterParamOut_" + str(i), master_param_out)
)
self.inputs = {
'Param': params,
'Grad': grads,
'Velocity': velocitys,
'LearningRate': learning_rates,
'MasterParam': master_params,
}
self.attrs = {
'mu': mu,
'lars_coeff': lars_coeff,
'lars_weight_decay': [lars_weight_decay],
'multi_precision': True,
'rescale_grad': rescale_grad,
}
self.outputs = {
'ParamOut': param_outs,
'VelocityOut': velocity_outs,
'MasterParamOut': master_param_outs,
}
def test_check_output(self):
paddle.enable_static()
if core.is_compiled_with_cuda():
place = fluid.CUDAPlace(0)
if core.is_float16_supported(place):
self.check_output_with_place(place)
def config(self):
self.params_num = 1
class TestLarsMomentumOp(OpTest):
def setUp(self):
self.config()
self.op_type = "lars_momentum"
mu = 0.0001
lars_coeff = 0.001
lars_weight_decay = 0.0005
params = []
grads = []
velocitys = []
param_outs = []
velocity_outs = []
learning_rates = []
for i in range(self.params_num):
param = np.random.random((123, 321)).astype("float32")
grad = np.random.random((123, 321)).astype("float32")
velocity = np.zeros((123, 321)).astype("float32")
learning_rate = np.array([0.001]).astype("float32")
pnorm = np.sqrt(np.square(param).sum())
gnorm = np.sqrt(np.square(grad).sum())
local_lr = (
learning_rate
* lars_coeff
* pnorm
/ (gnorm + lars_weight_decay * param)
)
velocity_out = mu * velocity + local_lr * (
grad + lars_weight_decay * param
)
param_out = param - velocity_out
params.append(("SubParam_" + str(i), param))
grads.append(("SubGrad_" + str(i), grad))
velocitys.append(("SubVelocity_" + str(i), velocity))
learning_rates.append(("SubLearning_rate_" + str(i), learning_rate))
velocity_outs.append(("SubVelocity_out_" + str(i), velocity_out))
param_outs.append(("SubParam_out_" + str(i), param_out))
self.inputs = {
'Param': params,
'Grad': grads,
'Velocity': velocitys,
'LearningRate': learning_rates,
}
self.attrs = {
'mu': mu,
'lars_coeff': lars_coeff,
'lars_weight_decay': [lars_weight_decay],
}
self.outputs = {'ParamOut': param_outs, 'VelocityOut': velocity_outs}
def test_check_output(self):
paddle.enable_static()
self.check_output()
def config(self):
self.params_num = 1
class TestSparseMomentumOp(unittest.TestCase):
def setUp(self):
self.use_nesterov = False
self.regularization_method = ""
self.regularization_coeff = 1.0
def check_with_place(self, place):
self.init_kernel()
scope = core.Scope()
# create and initialize Grad Variable
height = 10
rows = [0, 4, 7]
row_numel = 12
mu = 1.0
use_nesterov = self.use_nesterov
regularization_method = self.regularization_method
regularization_coeff = self.regularization_coeff
# create and initialize Param Variable
param = scope.var('Param').get_tensor()
param_array = np.full((height, row_numel), 5.0).astype("float32")
param.set(param_array, place)
param_out = scope.var("ParamOut").get_tensor()
param_out_array = np.full((height, row_numel), 0.0).astype("float32")
param_out.set(param_out_array, place)
grad_selected_rows = scope.var('Grad').get_selected_rows()
grad_selected_rows.set_height(height)
grad_selected_rows.set_rows(rows)
grad_np_array = np.ones((len(rows), row_numel)).astype("float32")
grad_np_array[0, 0] = 2.0
grad_np_array[2, 8] = 4.0
grad_tensor = grad_selected_rows.get_tensor()
grad_tensor.set(grad_np_array, place)
velocity = scope.var('Velocity').get_tensor()
velocity_np_array = np.ones((height, row_numel)).astype("float32")
velocity.set(velocity_np_array, place)
velocity_out = scope.var('VelocityOut').get_tensor()
velocity_out_np_array = np.full((height, row_numel), 0.0).astype(
"float32"
)
velocity_out.set(velocity_out_np_array, place)
# create and initialize LearningRate Variable
lr = scope.var('LearningRate').get_tensor()
lr_array = np.full((1), 2.0).astype("float32")
lr.set(lr_array, place)
# create and run operator
op = Operator(
"momentum",
Param='Param',
Grad='Grad',
Velocity='Velocity',
ParamOut='ParamOut',
VelocityOut='VelocityOut',
LearningRate='LearningRate',
mu=mu,
use_nesterov=use_nesterov,
regularization_method=regularization_method,
regularization_coeff=regularization_coeff,
)
op.run(scope, place)
# get and compare result
param_out_np_array = np.array(param_out)
velocity_out_np_array = np.array(velocity_out)
# TODO(dzh): add a more suitable general numpy interface
# for sparse update.
_grad_np_array = np.full((height, row_numel), 0.0).astype("float32")
for i in range(len(rows)):
_grad_np_array[rows[i]] = grad_np_array[i]
_param = param_array
_param_out, _velocity_out = calculate_momentum_by_numpy(
param=_param,
grad=_grad_np_array,
mu=mu,
velocity=velocity_np_array,
use_nesterov=use_nesterov,
learning_rate=lr_array,
regularization_method=regularization_method,
regularization_coeff=regularization_coeff,
)
self.assertTrue((_velocity_out == velocity_out_np_array).all())
self.assertTrue((_param_out == param_out_np_array).all())
def init_kernel(self):
pass
def test_sparse_momentum(self):
places = [core.CPUPlace()]
if core.is_compiled_with_cuda():
places.append(core.CUDAPlace(0))
for place in places:
self.check_with_place(place)
class TestSparseMomentumOp2(TestSparseMomentumOp):
def init_kernel(self):
self.use_nesterov = True
class TestSparseMomentumOpWithMultiPrecision(unittest.TestCase):
def setUp(self):
self.init_args()
self.regularization_method = ""
self.regularization_coeff = 1.0
def check_with_place(self, place):
scope = core.Scope()
# create and initialize Grad Variable
height = 10
rows = [0, 4, 7]
row_numel = 12
mu = 1.0
use_nesterov = self.use_nesterov
regularization_method = self.regularization_method
regularization_coeff = self.regularization_coeff
# create and initialize Param Variable
param_array = np.full((height, row_numel), 5.0).astype("float32")
param_out_array = np.full((height, row_numel), 0.0).astype("float32")
param = scope.var('Param').get_tensor()
param.set(param_array.astype("float16"), place)
param_out = scope.var("ParamOut").get_tensor()
param_out.set(param_out_array.astype("float16"), place)
master_param = scope.var('MasterParam').get_tensor()
master_param.set(param_array, place)
master_param_out = scope.var("MasterParamOut").get_tensor()
master_param_out.set(param_out_array, place)
grad_selected_rows = scope.var('Grad').get_selected_rows()
grad_selected_rows.set_height(height)
grad_selected_rows.set_rows(rows)
grad_np_array = np.ones((len(rows), row_numel)).astype("float32")
grad_np_array[0, 0] = 2.0
grad_np_array[2, 8] = 4.0
grad_tensor = grad_selected_rows.get_tensor()
grad_tensor.set(grad_np_array.astype("float16"), place)
velocity = scope.var('Velocity').get_tensor()
velocity_np_array = np.ones((height, row_numel)).astype("float32")
velocity.set(velocity_np_array, place)
velocity_out = scope.var('VelocityOut').get_tensor()
velocity_out_np_array = np.full((height, row_numel), 0.0).astype(
"float32"
)
velocity_out.set(velocity_out_np_array, place)
# create and initialize LearningRate Variable
lr = scope.var('LearningRate').get_tensor()
lr_array = np.full((1), 2.0).astype("float32")
lr.set(lr_array, place)
# create and run operator
op = Operator(
"momentum",
Param='Param',
Grad='Grad',
Velocity='Velocity',
MasterParam='MasterParam',
ParamOut='ParamOut',
VelocityOut='VelocityOut',
MasterParamOut='MasterParamOut',
LearningRate='LearningRate',
mu=mu,
use_nesterov=use_nesterov,
regularization_method=regularization_method,
regularization_coeff=regularization_coeff,
multi_precision=True,
rescale_grad=1.0,
)
op.run(scope, place)
# get and compare result
param_out_np_array = np.array(param_out)
velocity_out_np_array = np.array(velocity_out)
_grad_np_array = np.full((height, row_numel), 0.0).astype("float32")
for i in range(len(rows)):
_grad_np_array[rows[i]] = grad_np_array[i]
_param = param_array
_param_out, _velocity_out = calculate_momentum_by_numpy(
param=_param,
grad=_grad_np_array,
mu=mu,
velocity=velocity_np_array,
use_nesterov=use_nesterov,
learning_rate=lr_array,
regularization_method=regularization_method,
regularization_coeff=regularization_coeff,
)
self.assertTrue((_velocity_out == velocity_out_np_array).all())
self.assertTrue((_param_out == param_out_np_array).all())
def init_args(self):
self.use_nesterov = False
def test_sparse_momentum(self):
if core.is_compiled_with_cuda():
self.check_with_place(fluid.CUDAPlace(0))
class TestSparseMomentumOpWithMultiPrecision2(
TestSparseMomentumOpWithMultiPrecision
):
def init_args(self):
self.use_nesterov = True
class TestMomentumV2(unittest.TestCase):
def test_momentum_dygraph(self):
paddle.disable_static()
value = np.arange(26).reshape(2, 13).astype("float32")
a = paddle.to_tensor(value)
linear = paddle.nn.Linear(13, 5)
# This can be any optimizer supported by dygraph.
adam = paddle.optimizer.Momentum(
learning_rate=0.01, momentum=0.9, parameters=linear.parameters()
)
out = linear(a)
out.backward()
adam.step()
adam.clear_gradients()
def test_momentum(self):
paddle.enable_static()
place = fluid.CPUPlace()
main = fluid.Program()
with fluid.program_guard(main):
x = fluid.layers.data(name='x', shape=[13], dtype='float32')
y = fluid.layers.data(name='y', shape=[1], dtype='float32')
y_predict = fluid.layers.fc(input=x, size=1, act=None)
cost = fluid.layers.square_error_cost(input=y_predict, label=y)
avg_cost = paddle.mean(cost)
rms_optimizer = paddle.optimizer.Momentum(
learning_rate=0.1, momentum=0.9
)
rms_optimizer.minimize(avg_cost)
fetch_list = [avg_cost]
train_reader = paddle.batch(
paddle.dataset.uci_housing.train(), batch_size=1
)
feeder = fluid.DataFeeder(place=place, feed_list=[x, y])
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
for data in train_reader():
exe.run(main, feed=feeder.feed(data), fetch_list=fetch_list)
def test_raise_error(self):
self.assertRaises(
ValueError, paddle.optimizer.Momentum, learning_rate=None
)
self.assertRaises(ValueError, paddle.optimizer.Momentum, momentum=None)
def test_api_eager_dygraph(self):
with _test_eager_guard():
self.test_momentum_dygraph()
self.test_raise_error()
class TestMomentumOpWithDecay(OpTest):
def setUp(self):
self.op_type = "momentum"
self.dtype = np.float32
self.use_nesterov = True
self.regularization_method = 'l2_decay'
self.regularization_coeff = 0.9
self.init_config()
param = np.random.random((123, 321)).astype(self.dtype)
grad = np.random.random((123, 321)).astype(self.dtype)
velocity = np.zeros((123, 321)).astype(self.dtype)
learning_rate = np.array([0.001]).astype(np.float32)
mu = 0.0001
use_nesterov = self.use_nesterov
regularization_method = self.regularization_method
regularization_coeff = self.regularization_coeff
self.inputs = {
'Param': param,
'Grad': grad,
'Velocity': velocity,
'LearningRate': learning_rate,
}
self.attrs = {
'mu': mu,
'use_nesterov': use_nesterov,
'regularization_method': regularization_method,
'regularization_coeff': regularization_coeff,
}
grad = grad + regularization_coeff * param
param_out, velocity_out = calculate_momentum_by_numpy(
param=param,
grad=grad,
mu=mu,
velocity=velocity,
use_nesterov=use_nesterov,
learning_rate=learning_rate,
)
self.outputs = {'ParamOut': param_out, 'VelocityOut': velocity_out}
def init_config(self):
pass
def test_check_output(self):
paddle.enable_static()
self.check_output()
class TestMomentumOpWithDecayFP16(TestMomentumOpWithDecay):
def init_config(self):
self.dtype = np.float16
def test_check_output(self):
paddle.enable_static()
self.check_output(atol=1e-3)
class TestMomentumOpWithDecay2(TestMomentumOpWithDecay):
def init_config(self):
self.use_nesterov = False
class TestSparseMomentumOpWithDecay(TestSparseMomentumOp):
def setUp(self):
self.use_nesterov = False
self.regularization_method = 'l2_decay'
self.regularization_coeff = 0.9
class TestSparseMomentumOpWithDecay2(TestSparseMomentumOpWithDecay):
def init_kernel(self):
self.use_nesterov = True
class TestMomentumOpWithDecayAPI(unittest.TestCase):
def _test_momentum_dygraph_common(self, regularization):
paddle.disable_static()
inp = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32")
linear = paddle.nn.Linear(10, 10)
inp = paddle.to_tensor(inp)
out = linear(inp)
loss = paddle.mean(out)
# This can be any optimizer supported by dygraph.
momentum = paddle.fluid.contrib.optimizer.Momentum(
learning_rate=0.01,
momentum=0.9,
parameter_list=linear.parameters(),
regularization=regularization,
)
momentum.minimize(loss)
def test_momentum_dygraph_1(self):
self._test_momentum_dygraph_common(
regularization=paddle.fluid.regularizer.L2Decay(
regularization_coeff=0.1
)
)
def test_momentum_static(self):
paddle.enable_static()
place = fluid.CPUPlace()
main = fluid.Program()
with fluid.program_guard(main):
x = fluid.layers.data(name='x', shape=[13], dtype='float32')
y = fluid.layers.data(name='y', shape=[1], dtype='float32')
y_predict = fluid.layers.fc(input=x, size=1, act=None)
cost = fluid.layers.square_error_cost(input=y_predict, label=y)
avg_cost = paddle.mean(cost)
momentum_optimizer = paddle.fluid.contrib.optimizer.Momentum(
learning_rate=0.1, momentum=0.9
)
momentum_optimizer.minimize(avg_cost)
fetch_list = [avg_cost]
train_reader = paddle.batch(
paddle.dataset.uci_housing.train(), batch_size=1
)
feeder = fluid.DataFeeder(place=place, feed_list=[x, y])
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
for data in train_reader():
exe.run(main, feed=feeder.feed(data), fetch_list=fetch_list)
class TestFusedMomentumWithDecayAPI(unittest.TestCase):
def get_program(self, weight_attr, bias_attr=False):
main_program = paddle.static.Program()
startup_program = paddle.static.Program()
with paddle.static.program_guard(
main_program=main_program, startup_program=startup_program
):
x = paddle.static.data(name='x', shape=[10, 10])
linear = paddle.nn.Linear(
10, 10, weight_attr=weight_attr, bias_attr=bias_attr
)
out = linear(x)
loss = paddle.mean(out)
optimizer = paddle.optimizer.Momentum(
learning_rate=0.01,
momentum=0.9,
weight_decay=paddle.regularizer.L2Decay(0.5),
)
optimizer.minimize(loss)
return main_program
def test_param_has_l2decay(self):
paddle.enable_static()
weight_attr = paddle.ParamAttr(
name="weight",
initializer=paddle.nn.initializer.Constant(value=0.5),
regularizer=paddle.regularizer.L2Decay(0.1),
)
program = self.get_program(weight_attr, bias_attr=False)
ops = program.global_block().ops
self.assertEqual(ops[-1].attr('regularization_method'), 'l2_decay')
self.assertEqual(ops[-1].attr('regularization_coeff'), np.float32(0.1))
for i in range(len(ops)):
self.assertTrue('sum' not in ops[i].type)
self.assertTrue('scale' not in ops[i].type)
def test_param_has_l1decay(self):
paddle.enable_static()
weight_attr = paddle.ParamAttr(
name="weight",
initializer=paddle.nn.initializer.Constant(value=0.5),
regularizer=paddle.regularizer.L1Decay(0.1),
)
bias_attr = paddle.ParamAttr(
name="bias",
initializer=paddle.nn.initializer.Constant(value=0.0),
regularizer=None,
)
program = self.get_program(weight_attr, bias_attr)
ops = program.global_block().ops
self.assertEqual(ops[-1].type, 'momentum')
self.assertEqual(ops[-2].type, 'momentum')
self.assertEqual(ops[-3].type, 'sum')
self.assertEqual(ops[-4].type, 'scale')
self.assertEqual(ops[-5].type, 'sign')
self.assertEqual(ops[-6].type, 'matmul_v2_grad')
if 'weight' in ops[-1].input('Param'):
self.assertEqual(ops[-1].attr('regularization_method'), '')
self.assertEqual(ops[-1].attr('regularization_coeff'), 0)
if 'bias' in ops[-2].input('Param'):
self.assertEqual(ops[-2].attr('regularization_method'), 'l2_decay')
self.assertEqual(
ops[-2].attr('regularization_coeff'), np.float32(0.5)
)
def test_param_has_no_regularizer(self):
paddle.enable_static()
program = self.get_program(weight_attr=None)
ops = program.global_block().ops
self.assertEqual(ops[-1].attr('regularization_method'), 'l2_decay')
self.assertEqual(ops[-1].attr('regularization_coeff'), np.float32(0.5))
for i in range(len(ops)):
self.assertTrue('sum' not in ops[i].type)
self.assertTrue('scale' not in ops[i].type)
class TestMomentumOpVsMomentumOpWithDecayAPI(unittest.TestCase):
def __update_params(self, momentum, linear):
for i in range(10):
inp = paddle.full(
shape=[2, 2], fill_value=i, dtype='float32'
).astype("float32")
inp = paddle.to_tensor(inp)
out = linear(inp)
loss = paddle.mean(out)
loss.backward()
momentum.minimize(loss)
linear.clear_gradients()
def __test_vs(self, place=fluid.CPUPlace()):
paddle.disable_static(place=place)
linear_old = paddle.nn.Linear(
2,
2,
weight_attr=paddle.nn.initializer.Constant(value=2.0),
bias_attr=paddle.nn.initializer.Constant(value=2.0),
)
momentum_old = paddle.fluid.optimizer.Momentum(
learning_rate=0.01,
momentum=0.9,
parameter_list=linear_old.parameters(),
regularization=paddle.fluid.regularizer.L2Decay(
regularization_coeff=0.1
),
)
self.__update_params(momentum=momentum_old, linear=linear_old)
linear_new = paddle.nn.Linear(
2,
2,
weight_attr=paddle.nn.initializer.Constant(value=2.0),
bias_attr=paddle.nn.initializer.Constant(value=2.0),
)
momentum_new = paddle.fluid.contrib.optimizer.Momentum(
learning_rate=0.01,
momentum=0.9,
parameter_list=linear_new.parameters(),
regularization=paddle.fluid.regularizer.L2Decay(
regularization_coeff=0.1
),
)
self.__update_params(momentum=momentum_new, linear=linear_new)
self.assertEqual(
(linear_old.weight.numpy() == linear_new.weight.numpy()).all(),
True,
'the param weight updated by two Momentum optimizers should equal',
)
def test_vs(self, place=fluid.CPUPlace()):
places = [fluid.CPUPlace()]
if paddle.fluid.core.is_compiled_with_cuda():
places.append(fluid.CUDAPlace(0))
for place in places:
self.__test_vs(place=place)
class TestMomentumV2Group(TestMomentumV2):
def test_momentum_dygraph(self):
paddle.disable_static()
value = np.arange(26).reshape(2, 13).astype("float32")
a = paddle.to_tensor(value)
linear_1 = paddle.nn.Linear(13, 5)
linear_2 = paddle.nn.Linear(5, 3)
# This can be any optimizer supported by dygraph.
adam = paddle.optimizer.Momentum(
learning_rate=0.01,
parameters=[
{'params': linear_1.parameters()},
{
'params': linear_2.parameters(),
'weight_decay': 0.001,
'learning_rate': 0.1,
'momentum': 0.99,
},
],
weight_decay=0.1,
momentum=0.9,
)
out = linear_1(a)
out = linear_2(out)
out.backward()
adam.step()
adam.clear_gradients()
class TestMultiTensorMomentumDygraph(unittest.TestCase):
def _momentum_optimize_dygraph(
self,
place,
use_param_attr=False,
use_param_group=False,
use_amp=False,
use_multi_tensor=False,
):
paddle.disable_static()
paddle.seed(10)
paddle.set_device(place)
input = paddle.randn((5, 5))
weight_attr = paddle.ParamAttr(
learning_rate=0.5,
regularizer=paddle.regularizer.L2Decay(1.0),
trainable=True,
)
if use_param_attr:
model = paddle.nn.Linear(5, 5, weight_attr)
else:
model = paddle.nn.Linear(5, 5)
if not use_param_group:
optimizer = paddle.optimizer.Momentum(
parameters=model.parameters(),
use_multi_tensor=use_multi_tensor,
multi_precision=use_amp,
)
else:
parameters = list(model.parameters())
n = len(parameters)
optimizer = paddle.optimizer.Momentum(
parameters=[
{
'params': parameters[: int(n / 2)],
'weight_decay': 0.001,
'learning_rate': 0.1,
'momentum': 0.99,
},
{
'params': parameters[int(n / 2) :],
'weight_decay': 0.001,
'learning_rate': 0.1,
'momentum': 0.99,
},
],
use_multi_tensor=use_multi_tensor,
multi_precision=use_amp,
)
for idx in range(5):
if place == 'gpu' and use_amp:
model = paddle.amp.decorate(models=model, level='O2')
scaler = paddle.amp.GradScaler(init_loss_scaling=1024)
if place == 'gpu' and use_amp:
with paddle.amp.auto_cast(level='O2'):
output = model(input)
loss = paddle.mean(output)
scaled = scaler.scale(loss)
scaled.backward()
scaler.step(optimizer)
optimizer.clear_grad(set_to_zero=False)
else:
output = model(input)
loss = paddle.mean(output)
# This can be any optimizer supported by dygraph.
loss.backward()
optimizer.step()
optimizer.clear_grad(set_to_zero=False)
return output, model.parameters()
def _get_places(self):
places = ['cpu']
if paddle.is_compiled_with_cuda():
places.append('gpu')
return places
def _check_with_place_amp(self, place, use_amp):
output1, params1 = self._momentum_optimize_dygraph(
place=place, use_amp=use_amp, use_multi_tensor=True
)
output2, params2 = self._momentum_optimize_dygraph(
place=place, use_amp=use_amp, use_multi_tensor=False
)
np.testing.assert_allclose(output1, output2, rtol=1e-05)
for idx in range(len(params1)):
np.testing.assert_allclose(params1[idx], params2[idx], rtol=1e-05)
def _check_with_param_arrt(self, place, use_amp):
output1, params1 = self._momentum_optimize_dygraph(
place=place,
use_amp=use_amp,
use_param_attr=True,
use_multi_tensor=True,
)
output2, params2 = self._momentum_optimize_dygraph(
place=place,
use_amp=use_amp,
use_param_attr=True,
use_multi_tensor=False,
)
np.testing.assert_allclose(output1, output2, rtol=1e-05)
for idx in range(len(params1)):
np.testing.assert_allclose(params1[idx], params2[idx], rtol=1e-05)
def _check_with_param_group(self, place, use_amp):
output1, params1 = self._momentum_optimize_dygraph(
place=place,
use_amp=use_amp,
use_param_group=True,
use_multi_tensor=True,
)
output2, params2 = self._momentum_optimize_dygraph(
place=place,
use_amp=use_amp,
use_param_group=True,
use_multi_tensor=False,
)
np.testing.assert_allclose(output1, output2, rtol=1e-05)
for idx in range(len(params1)):
np.testing.assert_allclose(params1[idx], params2[idx], rtol=1e-05)
def test_main(self):
for place in self._get_places():
use_amp_list = [True, False]
for use_amp in use_amp_list:
self._check_with_place_amp(place, use_amp)
self._check_with_param_arrt(place, use_amp)
self._check_with_param_group(place, use_amp)
def test_api_eager_dygraph(self):
with _test_eager_guard():
self.test_main()
class TestMultiTensorMomentumStatic(unittest.TestCase):
def _momentum_optimize_static(
self, place, use_amp=False, use_multi_tensor=False
):
paddle.enable_static()
paddle.seed(10)
np.random.seed(10)
if place == 'cpu':
use_amp = False
exe = paddle.static.Executor(place=place)
train_program = paddle.static.Program()
startup_program = paddle.static.Program()
optimizer = paddle.optimizer.Momentum(
multi_precision=use_amp, use_multi_tensor=use_multi_tensor
)
if use_amp:
optimizer = paddle.static.amp.decorate(
optimizer,
init_loss_scaling=128.0,
use_dynamic_loss_scaling=True,
use_pure_fp16=True,
use_fp16_guard=False,
)
with paddle.static.program_guard(train_program, startup_program):
if use_amp:
data = paddle.static.data(
shape=[2, 2], name='X', dtype='float16'
)
else:
data = paddle.static.data(
shape=[2, 2], name='X', dtype='float32'
)
hidden = paddle.static.nn.fc(x=data, size=10)
loss = paddle.mean(hidden)
optimizer.minimize(loss)
exe.run(startup_program)
if use_amp:
optimizer.amp_init(place=place, scope=paddle.static.global_scope())
x = numpy.random.random(size=(2, 2)).astype('float16')
else:
x = numpy.random.random(size=(2, 2)).astype('float32')
out = []
for idx in range(5):
(loss_data,) = exe.run(
train_program, feed={"X": x}, fetch_list=[loss.name]
)
out.append(loss_data)
return out
def _get_places(self):
places = ['cpu']
if paddle.is_compiled_with_cuda():
places.append('gpu')
return places
def _check_with_place_amp(self, place, use_amp):
output1 = self._momentum_optimize_static(
place=place, use_amp=use_amp, use_multi_tensor=True
)
output2 = self._momentum_optimize_static(
place=place, use_amp=use_amp, use_multi_tensor=False
)
for idx in range(len(output1)):
np.testing.assert_allclose(output1[idx], output2[idx], rtol=1e-05)
def test_main(self):
for place in self._get_places():
use_amp_list = [True, False]
for use_amp in use_amp_list:
self._check_with_place_amp(place, use_amp)
if __name__ == "__main__":
paddle.enable_static()
unittest.main()
| {
"content_hash": "5c7ecff916f1ac1c6f24941854152fdc",
"timestamp": "",
"source": "github",
"line_count": 1058,
"max_line_length": 80,
"avg_line_length": 34.51984877126654,
"alnum_prop": 0.5599364766442144,
"repo_name": "luotao1/Paddle",
"id": "fd9b8b88016bd749815fc4de252e1820a7e8126f",
"size": "37135",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "python/paddle/fluid/tests/unittests/test_momentum_op.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "58544"
},
{
"name": "C",
"bytes": "210300"
},
{
"name": "C++",
"bytes": "36771446"
},
{
"name": "CMake",
"bytes": "903079"
},
{
"name": "Cuda",
"bytes": "5200715"
},
{
"name": "Dockerfile",
"bytes": "4361"
},
{
"name": "Go",
"bytes": "49796"
},
{
"name": "Java",
"bytes": "16630"
},
{
"name": "Jinja",
"bytes": "23852"
},
{
"name": "MLIR",
"bytes": "39982"
},
{
"name": "Python",
"bytes": "36248258"
},
{
"name": "R",
"bytes": "1332"
},
{
"name": "Shell",
"bytes": "553175"
}
],
"symlink_target": ""
} |
"""The Post class."""
from __future__ import unicode_literals, print_function, absolute_import
import io
from collections import defaultdict
import datetime
import hashlib
import json
import os
import re
import string
try:
from urlparse import urljoin
except ImportError:
from urllib.parse import urljoin # NOQA
from . import utils
from blinker import signal
import dateutil.tz
import lxml.html
import natsort
try:
import pyphen
except ImportError:
pyphen = None
from math import ceil # for reading time feature
# for tearDown with _reload we cannot use 'from import' to get forLocaleBorg
import nikola.utils
from .utils import (
current_time,
Functionary,
LOGGER,
LocaleBorg,
slugify,
to_datetime,
unicode_str,
demote_headers,
get_translation_candidate,
unslugify,
)
from .rc4 import rc4
__all__ = ('Post',)
TEASER_REGEXP = re.compile('<!--\s*TEASER_END(:(.+))?\s*-->', re.IGNORECASE)
_UPGRADE_METADATA_ADVERTISED = False
class Post(object):
"""Represent a blog post or site page."""
def __init__(
self,
source_path,
config,
destination,
use_in_feeds,
messages,
template_name,
compiler
):
"""Initialize post.
The source path is the user created post file. From it we calculate
the meta file, as well as any translations available, and
the .html fragment file path.
"""
self.config = config
self.compiler = compiler
self.compile_html = self.compiler.compile_html
self.demote_headers = self.compiler.demote_headers and self.config['DEMOTE_HEADERS']
tzinfo = self.config['__tzinfo__']
if self.config['FUTURE_IS_NOW']:
self.current_time = None
else:
self.current_time = current_time(tzinfo)
self.translated_to = set([])
self._prev_post = None
self._next_post = None
self.base_url = self.config['BASE_URL']
self.is_draft = False
self.is_private = False
self.strip_indexes = self.config['STRIP_INDEXES']
self.index_file = self.config['INDEX_FILE']
self.pretty_urls = self.config['PRETTY_URLS']
self.source_path = source_path # posts/blah.txt
self.post_name = os.path.splitext(source_path)[0] # posts/blah
# cache[\/]posts[\/]blah.html
self.base_path = os.path.join(self.config['CACHE_FOLDER'], self.post_name + ".html")
# cache/posts/blah.html
self._base_path = self.base_path.replace('\\', '/')
self.metadata_path = self.post_name + ".meta" # posts/blah.meta
self.folder = destination
self.translations = self.config['TRANSLATIONS']
self.default_lang = self.config['DEFAULT_LANG']
self.messages = messages
self.skip_untranslated = not self.config['SHOW_UNTRANSLATED_POSTS']
self._template_name = template_name
self.is_two_file = True
self.newstylemeta = True
self._reading_time = None
self._remaining_reading_time = None
self._paragraph_count = None
self._remaining_paragraph_count = None
self._dependency_file_fragment = defaultdict(list)
self._dependency_file_page = defaultdict(list)
self._dependency_uptodate_fragment = defaultdict(list)
self._dependency_uptodate_page = defaultdict(list)
self._depfile = defaultdict(list)
default_metadata, self.newstylemeta = get_meta(self, self.config['FILE_METADATA_REGEXP'], self.config['UNSLUGIFY_TITLES'])
self.meta = Functionary(lambda: None, self.default_lang)
self.meta[self.default_lang] = default_metadata
# Load internationalized metadata
for lang in self.translations:
if os.path.isfile(get_translation_candidate(self.config, self.source_path, lang)):
self.translated_to.add(lang)
if lang != self.default_lang:
meta = defaultdict(lambda: '')
meta.update(default_metadata)
_meta, _nsm = get_meta(self, self.config['FILE_METADATA_REGEXP'], self.config['UNSLUGIFY_TITLES'], lang)
self.newstylemeta = self.newstylemeta and _nsm
meta.update(_meta)
self.meta[lang] = meta
if not self.is_translation_available(self.default_lang):
# Special case! (Issue #373)
# Fill default_metadata with stuff from the other languages
for lang in sorted(self.translated_to):
default_metadata.update(self.meta[lang])
if 'date' not in default_metadata and not use_in_feeds:
# For stories we don't *really* need a date
if self.config['__invariant__']:
default_metadata['date'] = datetime.datetime(2013, 12, 31, 23, 59, 59, tzinfo=tzinfo)
else:
default_metadata['date'] = datetime.datetime.utcfromtimestamp(
os.stat(self.source_path).st_ctime).replace(tzinfo=dateutil.tz.tzutc()).astimezone(tzinfo)
# If time zone is set, build localized datetime.
self.date = to_datetime(self.meta[self.default_lang]['date'], tzinfo)
if 'updated' not in default_metadata:
default_metadata['updated'] = default_metadata.get('date', None)
self.updated = to_datetime(default_metadata['updated'])
if 'title' not in default_metadata or 'slug' not in default_metadata \
or 'date' not in default_metadata:
raise OSError("You must set a title (found '{0}'), a slug (found "
"'{1}') and a date (found '{2}')! [in file "
"{3}]".format(default_metadata.get('title', None),
default_metadata.get('slug', None),
default_metadata.get('date', None),
source_path))
if 'type' not in default_metadata:
# default value is 'text'
default_metadata['type'] = 'text'
self.publish_later = False if self.current_time is None else self.date >= self.current_time
is_draft = False
is_private = False
self._tags = {}
for lang in self.translated_to:
self._tags[lang] = natsort.natsorted(
list(set([x.strip() for x in self.meta[lang]['tags'].split(',')])),
alg=natsort.ns.F | natsort.ns.IC)
self._tags[lang] = [t for t in self._tags[lang] if t]
if 'draft' in [_.lower() for _ in self._tags[lang]]:
is_draft = True
LOGGER.debug('The post "{0}" is a draft.'.format(self.source_path))
self._tags[lang].remove('draft')
# TODO: remove in v8
if 'retired' in self._tags[lang]:
is_private = True
LOGGER.warning('The "retired" tag in post "{0}" is now deprecated and will be removed in v8. Use "private" instead.'.format(self.source_path))
self._tags[lang].remove('retired')
# end remove in v8
if 'private' in self._tags[lang]:
is_private = True
LOGGER.debug('The post "{0}" is private.'.format(self.source_path))
self._tags[lang].remove('private')
# While draft comes from the tags, it's not really a tag
self.is_draft = is_draft
self.is_private = is_private
self.is_post = use_in_feeds
self.use_in_feeds = use_in_feeds and not is_draft and not is_private \
and not self.publish_later
# Register potential extra dependencies
self.compiler.register_extra_dependencies(self)
def _get_hyphenate(self):
return bool(self.config['HYPHENATE'] or self.meta('hyphenate'))
hyphenate = property(_get_hyphenate)
def __repr__(self):
"""Provide a representation of the post object."""
# Calculate a hash that represents most data about the post
m = hashlib.md5()
# source_path modification date (to avoid reading it)
m.update(utils.unicode_str(os.stat(self.source_path).st_mtime).encode('utf-8'))
clean_meta = {}
for k, v in self.meta.items():
sub_meta = {}
clean_meta[k] = sub_meta
for kk, vv in v.items():
if vv:
sub_meta[kk] = vv
m.update(utils.unicode_str(json.dumps(clean_meta, cls=utils.CustomEncoder, sort_keys=True)).encode('utf-8'))
return '<Post: {0!r} {1}>'.format(self.source_path, m.hexdigest())
def _has_pretty_url(self, lang):
if self.pretty_urls and \
self.meta[lang].get('pretty_url', '') != 'False' and \
self.meta[lang]['slug'] != 'index':
return True
else:
return False
@property
def is_mathjax(self):
"""True if this post has the mathjax tag in the current language or is a python notebook."""
if self.compiler.name == 'ipynb':
return True
lang = nikola.utils.LocaleBorg().current_lang
if self.is_translation_available(lang):
return 'mathjax' in self.tags_for_language(lang)
# If it has math in ANY other language, enable it. Better inefficient than broken.
return 'mathjax' in self.alltags
@property
def alltags(self):
"""Return ALL the tags for this post."""
tags = []
for l in self._tags:
tags.extend(self._tags[l])
return list(set(tags))
def tags_for_language(self, lang):
"""Return tags for a given language."""
if lang in self._tags:
return self._tags[lang]
elif lang not in self.translated_to and self.skip_untranslated:
return []
elif self.default_lang in self._tags:
return self._tags[self.default_lang]
else:
return []
@property
def tags(self):
"""Return tags for the current language."""
lang = nikola.utils.LocaleBorg().current_lang
return self.tags_for_language(lang)
@property
def prev_post(self):
"""Return previous post."""
lang = nikola.utils.LocaleBorg().current_lang
rv = self._prev_post
while self.skip_untranslated:
if rv is None:
break
if rv.is_translation_available(lang):
break
rv = rv._prev_post
return rv
@prev_post.setter # NOQA
def prev_post(self, v):
"""Set previous post."""
self._prev_post = v
@property
def next_post(self):
"""Return next post."""
lang = nikola.utils.LocaleBorg().current_lang
rv = self._next_post
while self.skip_untranslated:
if rv is None:
break
if rv.is_translation_available(lang):
break
rv = rv._next_post
return rv
@next_post.setter # NOQA
def next_post(self, v):
"""Set next post."""
self._next_post = v
@property
def template_name(self):
"""Return template name for this post."""
lang = nikola.utils.LocaleBorg().current_lang
return self.meta[lang]['template'] or self._template_name
def formatted_date(self, date_format, date=None):
"""Return the formatted date as unicode."""
return utils.LocaleBorg().formatted_date(date_format, date if date else self.date)
def formatted_updated(self, date_format):
"""Return the updated date as unicode."""
return self.formatted_date(date_format, self.updated)
def title(self, lang=None):
"""Return localized title.
If lang is not specified, it defaults to the current language from
templates, as set in LocaleBorg.
"""
if lang is None:
lang = nikola.utils.LocaleBorg().current_lang
return self.meta[lang]['title']
def author(self, lang=None):
"""Return localized author or BLOG_AUTHOR if unspecified.
If lang is not specified, it defaults to the current language from
templates, as set in LocaleBorg.
"""
if lang is None:
lang = nikola.utils.LocaleBorg().current_lang
if self.meta[lang]['author']:
author = self.meta[lang]['author']
else:
author = self.config['BLOG_AUTHOR'](lang)
return author
def description(self, lang=None):
"""Return localized description."""
if lang is None:
lang = nikola.utils.LocaleBorg().current_lang
return self.meta[lang]['description']
def add_dependency(self, dependency, add='both', lang=None):
"""Add a file dependency for tasks using that post.
The ``dependency`` should be a string specifying a path, or a callable
which returns such a string or a list of strings.
The ``add`` parameter can be 'both', 'fragment' or 'page', to indicate
that this dependency shall be used
* when rendering the fragment to HTML ('fragment' and 'both'), or
* when creating a page with parts of the ``Post`` embedded, which
includes the HTML resulting from compiling the fragment ('page' or
'both').
If ``lang`` is not specified, this dependency is added for all languages.
"""
if add not in {'fragment', 'page', 'both'}:
raise Exception("Add parameter is '{0}', but must be either 'fragment', 'page', or 'both'.".format(add))
if add == 'fragment' or add == 'both':
self._dependency_file_fragment[lang].append((type(dependency) != str, dependency))
if add == 'page' or add == 'both':
self._dependency_file_page[lang].append((type(dependency) != str, dependency))
def add_dependency_uptodate(self, dependency, is_callable=False, add='both', lang=None):
"""Add a dependency for task's ``uptodate`` for tasks using that post.
This can be for example an ``utils.config_changed`` object, or a list of
such objects.
The ``is_callable`` parameter specifies whether ``dependency`` is a
callable which generates an entry or a list of entries for the ``uptodate``
list, or whether it is an entry which can directly be added (as a single
object or a list of objects).
The ``add`` parameter can be 'both', 'fragment' or 'page', to indicate
that this dependency shall be used
* when rendering the fragment to HTML ('fragment' and 'both'), or
* when creating a page with parts of the ``Post`` embedded, which
includes the HTML resulting from compiling the fragment ('page' or
'both').
If ``lang`` is not specified, this dependency is added for all languages.
Example:
post.add_dependency_uptodate(
utils.config_changed({1: some_data}, 'uniqueid'), False, 'page')
"""
if add == 'fragment' or add == 'both':
self._dependency_uptodate_fragment[lang].append((is_callable, dependency))
if add == 'page' or add == 'both':
self._dependency_uptodate_page[lang].append((is_callable, dependency))
def register_depfile(self, dep, dest=None, lang=None):
"""Register a dependency in the dependency file."""
if not dest:
dest = self.translated_base_path(lang)
self._depfile[dest].append(dep)
@staticmethod
def write_depfile(dest, deps_list):
"""Write a depfile for a given language."""
deps_path = dest + '.dep'
if deps_list:
deps_list = [p for p in deps_list if p != dest] # Don't depend on yourself (#1671)
with io.open(deps_path, "w+", encoding="utf8") as deps_file:
deps_file.write('\n'.join(deps_list))
else:
if os.path.isfile(deps_path):
os.unlink(deps_path)
def _get_dependencies(self, deps_list):
deps = []
for dep in deps_list:
if dep[0]:
# callable
result = dep[1]()
else:
# can add directly
result = dep[1]
# if result is a list, add its contents
if type(result) == list:
deps.extend(result)
else:
deps.append(result)
return deps
def deps(self, lang):
"""Return a list of file dependencies to build this post's page."""
deps = []
if self.default_lang in self.translated_to:
deps.append(self.base_path)
deps.append(self.source_path)
if os.path.exists(self.metadata_path):
deps.append(self.metadata_path)
if lang != self.default_lang:
cand_1 = get_translation_candidate(self.config, self.source_path, lang)
cand_2 = get_translation_candidate(self.config, self.base_path, lang)
if os.path.exists(cand_1):
deps.extend([cand_1, cand_2])
cand_3 = get_translation_candidate(self.config, self.metadata_path, lang)
if os.path.exists(cand_3):
deps.append(cand_3)
deps += self._get_dependencies(self._dependency_file_page[lang])
deps += self._get_dependencies(self._dependency_file_page[None])
return sorted(deps)
def deps_uptodate(self, lang):
"""Return a list of uptodate dependencies to build this post's page.
These dependencies should be included in ``uptodate`` for the task
which generates the page.
"""
deps = []
deps += self._get_dependencies(self._dependency_uptodate_page[lang])
deps += self._get_dependencies(self._dependency_uptodate_page[None])
deps.append(utils.config_changed({1: sorted(self.compiler.config_dependencies)}, 'nikola.post.Post.deps_uptodate:compiler:' + self.source_path))
return deps
def compile(self, lang):
"""Generate the cache/ file with the compiled post."""
def wrap_encrypt(path, password):
"""Wrap a post with encryption."""
with io.open(path, 'r+', encoding='utf8') as inf:
data = inf.read() + "<!--tail-->"
data = CRYPT.substitute(data=rc4(password, data))
with io.open(path, 'w+', encoding='utf8') as outf:
outf.write(data)
dest = self.translated_base_path(lang)
if not self.is_translation_available(lang) and not self.config['SHOW_UNTRANSLATED_POSTS']:
return
# Set the language to the right thing
LocaleBorg().set_locale(lang)
self.compile_html(
self.translated_source_path(lang),
dest,
self.is_two_file)
Post.write_depfile(dest, self._depfile[dest])
signal('compiled').send({
'source': self.translated_source_path(lang),
'dest': dest,
'post': self,
})
if self.meta('password'):
# TODO: get rid of this feature one day (v8?; warning added in v7.3.0.)
LOGGER.warn("The post {0} is using the `password` attribute, which may stop working in the future.")
LOGGER.warn("Please consider switching to a more secure method of encryption.")
LOGGER.warn("More details: https://github.com/getnikola/nikola/issues/1547")
wrap_encrypt(dest, self.meta('password'))
if self.publish_later:
LOGGER.notice('{0} is scheduled to be published in the future ({1})'.format(
self.source_path, self.date))
def fragment_deps(self, lang):
"""Return a list of uptodate dependencies to build this post's fragment.
These dependencies should be included in ``uptodate`` for the task
which generates the fragment.
"""
deps = []
if self.default_lang in self.translated_to:
deps.append(self.source_path)
if os.path.isfile(self.metadata_path):
deps.append(self.metadata_path)
lang_deps = []
if lang != self.default_lang:
lang_deps = [get_translation_candidate(self.config, d, lang) for d in deps]
deps += lang_deps
deps = [d for d in deps if os.path.exists(d)]
deps += self._get_dependencies(self._dependency_file_fragment[lang])
deps += self._get_dependencies(self._dependency_file_fragment[None])
return sorted(deps)
def fragment_deps_uptodate(self, lang):
"""Return a list of file dependencies to build this post's fragment."""
deps = []
deps += self._get_dependencies(self._dependency_uptodate_fragment[lang])
deps += self._get_dependencies(self._dependency_uptodate_fragment[None])
deps.append(utils.config_changed({1: sorted(self.compiler.config_dependencies)}, 'nikola.post.Post.deps_uptodate:compiler:' + self.source_path))
return deps
def is_translation_available(self, lang):
"""Return True if the translation actually exists."""
return lang in self.translated_to
def translated_source_path(self, lang):
"""Return path to the translation's source file."""
if lang in self.translated_to:
if lang == self.default_lang:
return self.source_path
else:
return get_translation_candidate(self.config, self.source_path, lang)
elif lang != self.default_lang:
return self.source_path
else:
return get_translation_candidate(self.config, self.source_path, sorted(self.translated_to)[0])
def translated_base_path(self, lang):
"""Return path to the translation's base_path file."""
return get_translation_candidate(self.config, self.base_path, lang)
def _translated_file_path(self, lang):
"""Return path to the translation's file, or to the original."""
if lang in self.translated_to:
if lang == self.default_lang:
return self.base_path
else:
return get_translation_candidate(self.config, self.base_path, lang)
elif lang != self.default_lang:
return self.base_path
else:
return get_translation_candidate(self.config, self.base_path, sorted(self.translated_to)[0])
def text(self, lang=None, teaser_only=False, strip_html=False, show_read_more_link=True,
feed_read_more_link=False, feed_links_append_query=None):
"""Read the post file for that language and return its contents.
teaser_only=True breaks at the teaser marker and returns only the teaser.
strip_html=True removes HTML tags
show_read_more_link=False does not add the Read more... link
feed_read_more_link=True uses FEED_READ_MORE_LINK instead of INDEX_READ_MORE_LINK
lang=None uses the last used to set locale
All links in the returned HTML will be relative.
The HTML returned is a bare fragment, not a full document.
"""
if lang is None:
lang = nikola.utils.LocaleBorg().current_lang
file_name = self._translated_file_path(lang)
# Yes, we compile it and screw it.
# This may be controversial, but the user (or someone) is asking for the post text
# and the post should not just refuse to give it.
if not os.path.isfile(file_name):
self.compile(lang)
with io.open(file_name, "r", encoding="utf8") as post_file:
data = post_file.read().strip()
if self.compiler.extension() == '.php':
return data
try:
document = lxml.html.fragment_fromstring(data, "body")
except lxml.etree.ParserError as e:
# if we don't catch this, it breaks later (Issue #374)
if str(e) == "Document is empty":
return ""
# let other errors raise
raise(e)
base_url = self.permalink(lang=lang)
document.make_links_absolute(base_url)
if self.hyphenate:
hyphenate(document, lang)
try:
data = lxml.html.tostring(document.body, encoding='unicode')
except:
data = lxml.html.tostring(document, encoding='unicode')
if teaser_only:
teaser_regexp = self.config.get('TEASER_REGEXP', TEASER_REGEXP)
teaser = teaser_regexp.split(data)[0]
if teaser != data:
if not strip_html and show_read_more_link:
if teaser_regexp.search(data).groups()[-1]:
teaser_text = teaser_regexp.search(data).groups()[-1]
else:
teaser_text = self.messages[lang]["Read more"]
l = self.config['FEED_READ_MORE_LINK'](lang) if feed_read_more_link else self.config['INDEX_READ_MORE_LINK'](lang)
teaser += l.format(
link=self.permalink(lang, query=feed_links_append_query),
read_more=teaser_text,
min_remaining_read=self.messages[lang]["%d min remaining to read"] % (self.remaining_reading_time),
reading_time=self.reading_time,
remaining_reading_time=self.remaining_reading_time,
paragraph_count=self.paragraph_count,
remaining_paragraph_count=self.remaining_paragraph_count)
# This closes all open tags and sanitizes the broken HTML
document = lxml.html.fromstring(teaser)
try:
data = lxml.html.tostring(document.body, encoding='unicode')
except IndexError:
data = lxml.html.tostring(document, encoding='unicode')
if data and strip_html:
try:
# Not all posts have a body. For example, you may have a page statically defined in the template that does not take content as input.
content = lxml.html.fromstring(data)
data = content.text_content().strip() # No whitespace wanted.
except lxml.etree.ParserError:
data = ""
elif data:
if self.demote_headers:
# see above
try:
document = lxml.html.fromstring(data)
demote_headers(document, self.demote_headers)
data = lxml.html.tostring(document.body, encoding='unicode')
except (lxml.etree.ParserError, IndexError):
data = lxml.html.tostring(document, encoding='unicode')
return data
@property
def reading_time(self):
"""Reading time based on length of text."""
if self._reading_time is None:
text = self.text(strip_html=True)
words_per_minute = 220
words = len(text.split())
markup = lxml.html.fromstring(self.text(strip_html=False))
embeddables = [".//img", ".//picture", ".//video", ".//audio", ".//object", ".//iframe"]
media_time = 0
for embedded in embeddables:
media_time += (len(markup.findall(embedded)) * 0.33) # +20 seconds
self._reading_time = int(ceil((words / words_per_minute) + media_time)) or 1
return self._reading_time
@property
def remaining_reading_time(self):
"""Remaining reading time based on length of text (does not include teaser)."""
if self._remaining_reading_time is None:
text = self.text(teaser_only=True, strip_html=True)
words_per_minute = 220
words = len(text.split())
self._remaining_reading_time = self.reading_time - int(ceil(words / words_per_minute)) or 1
return self._remaining_reading_time
@property
def paragraph_count(self):
"""Return the paragraph count for this post."""
if self._paragraph_count is None:
# duplicated with Post.text()
lang = nikola.utils.LocaleBorg().current_lang
file_name = self._translated_file_path(lang)
with io.open(file_name, "r", encoding="utf8") as post_file:
data = post_file.read().strip()
try:
document = lxml.html.fragment_fromstring(data, "body")
except lxml.etree.ParserError as e:
# if we don't catch this, it breaks later (Issue #374)
if str(e) == "Document is empty":
return ""
# let other errors raise
raise(e)
# output is a float, for no real reason at all
self._paragraph_count = int(document.xpath('count(//p)'))
return self._paragraph_count
@property
def remaining_paragraph_count(self):
"""Return the remaining paragraph count for this post (does not include teaser)."""
if self._remaining_paragraph_count is None:
try:
# Just asking self.text() is easier here.
document = lxml.html.fragment_fromstring(self.text(teaser_only=True, show_read_more_link=False), "body")
except lxml.etree.ParserError as e:
# if we don't catch this, it breaks later (Issue #374)
if str(e) == "Document is empty":
return ""
# let other errors raise
raise(e)
self._remaining_paragraph_count = self.paragraph_count - int(document.xpath('count(//p)'))
return self._remaining_paragraph_count
def source_link(self, lang=None):
"""Return absolute link to the post's source."""
ext = self.source_ext(True)
link = "/" + self.destination_path(lang=lang, extension=ext, sep='/')
link = utils.encodelink(link)
return link
def destination_path(self, lang=None, extension='.html', sep=os.sep):
"""Destination path for this post, relative to output/.
If lang is not specified, it's the current language.
Extension is used in the path if specified.
"""
if lang is None:
lang = nikola.utils.LocaleBorg().current_lang
if self._has_pretty_url(lang):
path = os.path.join(self.translations[lang],
self.folder, self.meta[lang]['slug'], 'index' + extension)
else:
path = os.path.join(self.translations[lang],
self.folder, self.meta[lang]['slug'] + extension)
if sep != os.sep:
path = path.replace(os.sep, sep)
if path.startswith('./'):
path = path[2:]
return path
def section_color(self, lang=None):
"""Return the color of the post's section."""
slug = self.section_slug(lang)
if slug in self.config['POSTS_SECTION_COLORS'](lang):
return self.config['POSTS_SECTION_COLORS'](lang)[slug]
base = self.config['THEME_COLOR']
return utils.colorize_str_from_base_color(slug, base)
def section_link(self, lang=None):
"""Return the link to the post's section (deprecated)."""
utils.LOGGER.warning("Post.section_link is deprecated. Please use " +
"site.link('section_index', post.section_slug()) instead.")
if lang is None:
lang = nikola.utils.LocaleBorg().current_lang
slug = self.section_slug(lang)
t = os.path.normpath(self.translations[lang])
if t == '.':
t = ''
link = '/' + '/'.join(i for i in (t, slug) if i) + '/'
if not self.pretty_urls:
link = urljoin(link, self.index_file)
link = utils.encodelink(link)
return link
def section_name(self, lang=None):
"""Return the name of the post's section."""
slug = self.section_slug(lang)
if slug in self.config['POSTS_SECTION_NAME'](lang):
name = self.config['POSTS_SECTION_NAME'](lang)[slug]
else:
name = slug.replace('-', ' ').title()
return name
def section_slug(self, lang=None):
"""Return the slug for the post's section."""
if lang is None:
lang = nikola.utils.LocaleBorg().current_lang
if not self.config['POSTS_SECTION_FROM_META']:
dest = self.destination_path(lang)
if dest[-(1 + len(self.index_file)):] == os.sep + self.index_file:
dest = dest[:-(1 + len(self.index_file))]
dirname = os.path.dirname(dest)
slug = dest.split(os.sep)
if not slug or dirname == '.':
slug = self.messages[lang]["Uncategorized"]
elif lang == slug[0]:
slug = slug[1]
else:
slug = slug[0]
else:
slug = self.meta[lang]['section'].split(',')[0] if 'section' in self.meta[lang] else self.messages[lang]["Uncategorized"]
return utils.slugify(slug, lang)
def permalink(self, lang=None, absolute=False, extension='.html', query=None):
"""Return permalink for a post."""
if lang is None:
lang = nikola.utils.LocaleBorg().current_lang
# Let compilers override extension (e.g. the php compiler)
if self.compiler.extension() != '.html':
extension = self.compiler.extension()
pieces = self.translations[lang].split(os.sep)
pieces += self.folder.split(os.sep)
if self._has_pretty_url(lang):
pieces += [self.meta[lang]['slug'], 'index' + extension]
else:
pieces += [self.meta[lang]['slug'] + extension]
pieces = [_f for _f in pieces if _f and _f != '.']
link = '/' + '/'.join(pieces)
if absolute:
link = urljoin(self.base_url, link[1:])
index_len = len(self.index_file)
if self.strip_indexes and link[-(1 + index_len):] == '/' + self.index_file:
link = link[:-index_len]
if query:
link = link + "?" + query
link = utils.encodelink(link)
return link
@property
def previewimage(self, lang=None):
"""Return the previewimage path."""
if lang is None:
lang = nikola.utils.LocaleBorg().current_lang
image_path = self.meta[lang]['previewimage']
if not image_path:
return None
# This is further parsed by the template, because we don’t have access
# to the URL replacer here. (Issue #1473)
return image_path
def source_ext(self, prefix=False):
"""Return the source file extension.
If `prefix` is True, a `.src.` prefix will be added to the resulting extension
if it's equal to the destination extension.
"""
ext = os.path.splitext(self.source_path)[1]
# do not publish PHP sources
if prefix and ext == '.html':
# ext starts with a dot
return '.src' + ext
else:
return ext
# Code that fetches metadata from different places
def re_meta(line, match=None):
"""Find metadata using regular expressions."""
if match:
reStr = re.compile('^\.\. {0}: (.*)'.format(re.escape(match)))
else:
reStr = re.compile('^\.\. (.*?): (.*)')
result = reStr.findall(line.strip())
if match and result:
return (match, result[0])
elif not match and result:
return (result[0][0], result[0][1].strip())
else:
return (None,)
def _get_metadata_from_filename_by_regex(filename, metadata_regexp, unslugify_titles, lang):
"""Try to reed the metadata from the filename based on the given re.
This requires to use symbolic group names in the pattern.
The part to read the metadata from the filename based on a regular
expression is taken from Pelican - pelican/readers.py
"""
match = re.match(metadata_regexp, filename)
meta = {}
if match:
# .items() for py3k compat.
for key, value in match.groupdict().items():
k = key.lower().strip() # metadata must be lowercase
if k == 'title' and unslugify_titles:
meta[k] = unslugify(value, lang, discard_numbers=False)
else:
meta[k] = value
return meta
def get_metadata_from_file(source_path, config=None, lang=None):
"""Extract metadata from the file itself, by parsing contents."""
try:
if lang and config:
source_path = get_translation_candidate(config, source_path, lang)
elif lang:
source_path += '.' + lang
with io.open(source_path, "r", encoding="utf-8-sig") as meta_file:
meta_data = [x.strip() for x in meta_file.readlines()]
return _get_metadata_from_file(meta_data)
except (UnicodeDecodeError, UnicodeEncodeError):
raise ValueError('Error reading {0}: Nikola only supports UTF-8 files'.format(source_path))
except Exception: # The file may not exist, for multilingual sites
return {}
re_md_title = re.compile(r'^{0}([^{0}].*)'.format(re.escape('#')))
# Assuming rst titles are going to be at least 4 chars long
# otherwise this detects things like ''' wich breaks other markups.
re_rst_title = re.compile(r'^([{0}]{{4,}})'.format(re.escape(
string.punctuation)))
def _get_title_from_contents(meta_data):
"""Extract title from file contents, LAST RESOURCE."""
piece = meta_data[:]
title = None
for i, line in enumerate(piece):
if re_rst_title.findall(line) and i > 0:
title = meta_data[i - 1].strip()
break
if (re_rst_title.findall(line) and i >= 0 and
re_rst_title.findall(meta_data[i + 2])):
title = meta_data[i + 1].strip()
break
if re_md_title.findall(line):
title = re_md_title.findall(line)[0]
break
return title
def _get_metadata_from_file(meta_data):
"""Extract metadata from a post's source file."""
meta = {}
if not meta_data:
return meta
# Skip up to one empty line at the beginning (for txt2tags)
if not meta_data[0]:
meta_data = meta_data[1:]
# First, get metadata from the beginning of the file,
# up to first empty line
for i, line in enumerate(meta_data):
if not line:
break
match = re_meta(line)
if match[0]:
meta[match[0]] = match[1]
# If we have no title, try to get it from document
if 'title' not in meta:
t = _get_title_from_contents(meta_data)
if t is not None:
meta['title'] = t
return meta
def get_metadata_from_meta_file(path, config=None, lang=None):
"""Take a post path, and gets data from a matching .meta file."""
global _UPGRADE_METADATA_ADVERTISED
meta_path = os.path.splitext(path)[0] + '.meta'
if lang and config:
meta_path = get_translation_candidate(config, meta_path, lang)
elif lang:
meta_path += '.' + lang
if os.path.isfile(meta_path):
with io.open(meta_path, "r", encoding="utf8") as meta_file:
meta_data = meta_file.readlines()
# Detect new-style metadata.
newstyleregexp = re.compile(r'\.\. .*?: .*')
newstylemeta = False
for l in meta_data:
if l.strip():
if re.match(newstyleregexp, l):
newstylemeta = True
if newstylemeta:
# New-style metadata is basically the same as reading metadata from
# a 1-file post.
return get_metadata_from_file(path, config, lang), newstylemeta
else:
if not _UPGRADE_METADATA_ADVERTISED:
LOGGER.warn("Some posts on your site have old-style metadata. You should upgrade them to the new format, with support for extra fields.")
LOGGER.warn("Install the 'upgrade_metadata' plugin (with 'nikola plugin -i upgrade_metadata') and run 'nikola upgrade_metadata'.")
_UPGRADE_METADATA_ADVERTISED = True
while len(meta_data) < 7:
meta_data.append("")
(title, slug, date, tags, link, description, _type) = [
x.strip() for x in meta_data][:7]
meta = {}
if title:
meta['title'] = title
if slug:
meta['slug'] = slug
if date:
meta['date'] = date
if tags:
meta['tags'] = tags
if link:
meta['link'] = link
if description:
meta['description'] = description
if _type:
meta['type'] = _type
return meta, newstylemeta
elif lang:
# Metadata file doesn't exist, but not default language,
# So, if default language metadata exists, return that.
# This makes the 2-file format detection more reliable (Issue #525)
return get_metadata_from_meta_file(path, config, lang=None)
else:
return {}, True
def get_meta(post, file_metadata_regexp=None, unslugify_titles=False, lang=None):
"""Get post's meta from source.
If ``file_metadata_regexp`` is given it will be tried to read
metadata from the filename.
If ``unslugify_titles`` is True, the extracted title (if any) will be unslugified, as is done in galleries.
If any metadata is then found inside the file the metadata from the
file will override previous findings.
"""
meta = defaultdict(lambda: '')
try:
config = post.config
except AttributeError:
config = None
_, newstylemeta = get_metadata_from_meta_file(post.metadata_path, config, lang)
meta.update(_)
if not meta:
post.is_two_file = False
if file_metadata_regexp is not None:
meta.update(_get_metadata_from_filename_by_regex(post.source_path,
file_metadata_regexp,
unslugify_titles,
post.default_lang))
compiler_meta = {}
if getattr(post, 'compiler', None):
compiler_meta = post.compiler.read_metadata(post, file_metadata_regexp, unslugify_titles, lang)
meta.update(compiler_meta)
if not post.is_two_file and not compiler_meta:
# Meta file has precedence over file, which can contain garbage.
# Moreover, we should not to talk to the file if we have compiler meta.
meta.update(get_metadata_from_file(post.source_path, config, lang))
if lang is None:
# Only perform these checks for the default language
if 'slug' not in meta:
# If no slug is found in the metadata use the filename
meta['slug'] = slugify(unicode_str(os.path.splitext(
os.path.basename(post.source_path))[0]), post.default_lang)
if 'title' not in meta:
# If no title is found, use the filename without extension
meta['title'] = os.path.splitext(
os.path.basename(post.source_path))[0]
return meta, newstylemeta
def hyphenate(dom, _lang):
"""Hyphenate a post."""
# circular import prevention
from .nikola import LEGAL_VALUES
lang = None
if pyphen is not None:
lang = LEGAL_VALUES['PYPHEN_LOCALES'].get(_lang, pyphen.language_fallback(_lang))
else:
utils.req_missing(['pyphen'], 'hyphenate texts', optional=True)
hyphenator = None
if pyphen is not None and lang is not None:
# If pyphen does exist, we tell the user when configuring the site.
# If it does not support a language, we ignore it quietly.
try:
hyphenator = pyphen.Pyphen(lang=lang)
except KeyError:
LOGGER.error("Cannot find hyphenation dictoniaries for {0} (from {1}).".format(lang, _lang))
LOGGER.error("Pyphen cannot be installed to ~/.local (pip install --user).")
if hyphenator is not None:
for tag in ('p', 'li', 'span'):
for node in dom.xpath("//%s[not(parent::pre)]" % tag):
skip_node = False
skippable_nodes = ['kbd', 'code', 'samp', 'mark', 'math', 'data', 'ruby', 'svg']
if node.getchildren():
for child in node.getchildren():
if child.tag in skippable_nodes or (child.tag == 'span' and 'math' in child.get('class', [])):
skip_node = True
elif 'math' in node.get('class', []):
skip_node = True
if not skip_node:
insert_hyphens(node, hyphenator)
return dom
def insert_hyphens(node, hyphenator):
"""Insert hyphens into a node."""
textattrs = ('text', 'tail')
if isinstance(node, lxml.etree._Entity):
# HTML entities have no .text
textattrs = ('tail',)
for attr in textattrs:
text = getattr(node, attr)
if not text:
continue
new_data = ' '.join([hyphenator.inserted(w, hyphen='\u00AD')
for w in text.split(' ')])
# Spaces are trimmed, we have to add them manually back
if text[0].isspace():
new_data = ' ' + new_data
if text[-1].isspace():
new_data += ' '
setattr(node, attr, new_data)
for child in node.iterchildren():
insert_hyphens(child, hyphenator)
CRYPT = string.Template("""\
<script>
function rc4(key, str) {
var s = [], j = 0, x, res = '';
for (var i = 0; i < 256; i++) {
s[i] = i;
}
for (i = 0; i < 256; i++) {
j = (j + s[i] + key.charCodeAt(i % key.length)) % 256;
x = s[i];
s[i] = s[j];
s[j] = x;
}
i = 0;
j = 0;
for (var y = 0; y < str.length; y++) {
i = (i + 1) % 256;
j = (j + s[i]) % 256;
x = s[i];
s[i] = s[j];
s[j] = x;
res += String.fromCharCode(str.charCodeAt(y) ^ s[(s[i] + s[j]) % 256]);
}
return res;
}
function decrypt() {
key = $$("#key").val();
crypt_div = $$("#encr")
crypted = crypt_div.html();
decrypted = rc4(key, window.atob(crypted));
if (decrypted.substr(decrypted.length - 11) == "<!--tail-->"){
crypt_div.html(decrypted);
$$("#pwform").hide();
crypt_div.show();
} else { alert("Wrong password"); };
}
</script>
<div id="encr" style="display: none;">${data}</div>
<div id="pwform">
<form onsubmit="javascript:decrypt(); return false;" class="form-inline">
<fieldset>
<legend>This post is password-protected.</legend>
<input type="password" id="key" placeholder="Type password here">
<button type="submit" class="btn">Show Content</button>
</fieldset>
</form>
</div>""")
| {
"content_hash": "2df88ae6a0a837458067c8f4685029b4",
"timestamp": "",
"source": "github",
"line_count": 1189,
"max_line_length": 159,
"avg_line_length": 39.52649285113541,
"alnum_prop": 0.5781220077877312,
"repo_name": "x1101/nikola",
"id": "9364dc05fa69048c34658a454aa39d3cc7a7a39e",
"size": "48141",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nikola/post.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "18268"
},
{
"name": "JavaScript",
"bytes": "24667"
},
{
"name": "Python",
"bytes": "1088257"
},
{
"name": "Shell",
"bytes": "11088"
},
{
"name": "XSLT",
"bytes": "3619"
}
],
"symlink_target": ""
} |
import tornado.web
from handlers.base import BaseHandler
class ErrorHandler(tornado.web.ErrorHandler, BaseHandler):
"""
Generic error handler
"""
pass
| {
"content_hash": "09f4415b14160c2e3649ec33716133a6",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 58,
"avg_line_length": 18.77777777777778,
"alnum_prop": 0.7218934911242604,
"repo_name": "Lepthy/glowing-journey",
"id": "4c6264c60264cc49bb5ef015526357b2d3b68c9a",
"size": "169",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/glowing-journey/handlers/error.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "237"
},
{
"name": "HTML",
"bytes": "899"
},
{
"name": "JavaScript",
"bytes": "527"
},
{
"name": "Python",
"bytes": "7773"
},
{
"name": "Shell",
"bytes": "398"
}
],
"symlink_target": ""
} |
"""C_8_email
Revision ID: c9c1ca205749
Revises: daf5f84ca180
Create Date: 2016-01-07 11:24:10.202218
"""
# revision identifiers, used by Alembic.
revision = 'c9c1ca205749'
down_revision = 'daf5f84ca180'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('email', sa.String(length=64), nullable=True))
op.create_index(op.f('ix_users_email'), 'users', ['email'], unique=True)
op.create_foreign_key(None, 'users', 'roles', ['role_id'], ['id'])
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'users', type_='foreignkey')
op.drop_index(op.f('ix_users_email'), table_name='users')
op.drop_column('users', 'email')
### end Alembic commands ###
| {
"content_hash": "c7b23201ab5769f80e9b3896d52a4c16",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 83,
"avg_line_length": 29.1,
"alnum_prop": 0.6655211912943871,
"repo_name": "menghao2015/MyBlog",
"id": "e699b566f435086c7204cd9ef16f40b9ea4fec73",
"size": "873",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "migrations/versions/c9c1ca205749_c_8_email.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1855"
},
{
"name": "HTML",
"bytes": "14852"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "52302"
}
],
"symlink_target": ""
} |
from django.core.urlresolvers import reverse
from django.http import HttpRequest # noqa
from django.test.utils import override_settings # noqa
from mox3.mox import IsA # noqa
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
INDEX_URL = reverse('horizon:project:access_and_security:index')
API_URL = "horizon:project:access_and_security:api_access"
EC2_URL = reverse(API_URL + ":ec2")
OPENRC_URL = reverse(API_URL + ":openrc")
OPENRCV2_URL = reverse(API_URL + ":openrcv2")
CREDS_URL = reverse(API_URL + ":view_credentials")
RECREATE_CREDS_URL = reverse(API_URL + ":recreate_credentials")
class APIAccessTests(test.TestCase):
def test_ec2_download_view(self):
creds = self.ec2.first()
cert = self.certs.first()
self.mox.StubOutWithMock(api.keystone, "list_ec2_credentials")
self.mox.StubOutWithMock(api.nova, "get_x509_credentials")
self.mox.StubOutWithMock(api.nova, "get_x509_root_certificate")
self.mox.StubOutWithMock(api.keystone, "create_ec2_credentials")
api.keystone.list_ec2_credentials(IsA(HttpRequest), self.user.id) \
.AndReturn([])
api.nova.get_x509_credentials(IsA(HttpRequest)).AndReturn(cert)
api.nova.get_x509_root_certificate(IsA(HttpRequest)) \
.AndReturn(cert)
api.keystone.create_ec2_credentials(IsA(HttpRequest),
self.user.id,
self.tenant.id).AndReturn(creds)
self.mox.ReplayAll()
res = self.client.get(EC2_URL)
self.assertEqual(res.status_code, 200)
self.assertEqual(res['content-type'], 'application/zip')
def test_openrcv2_credentials(self):
res = self.client.get(OPENRCV2_URL)
self.assertEqual(res.status_code, 200)
openrc = 'project/access_and_security/api_access/openrc_v2.sh.template'
self.assertTemplateUsed(res, openrc)
name = 'export OS_USERNAME="{}"'.format(self.request.user.username)
t_id = 'export OS_TENANT_ID={}'.format(self.request.user.tenant_id)
domain = 'export OS_USER_DOMAIN_NAME="{}"'.format(
self.request.user.user_domain_name)
self.assertIn(name.encode('utf-8'), res.content)
self.assertIn(t_id.encode('utf-8'), res.content)
# domain content should not be present for v2
self.assertNotIn(domain.encode('utf-8'), res.content)
@override_settings(OPENSTACK_API_VERSIONS={"identity": 3})
def test_openrc_credentials(self):
res = self.client.get(OPENRC_URL)
self.assertEqual(res.status_code, 200)
openrc = 'project/access_and_security/api_access/openrc.sh.template'
self.assertTemplateUsed(res, openrc)
name = 'export OS_USERNAME="{}"'.format(self.request.user.username)
p_id = 'export OS_PROJECT_ID={}'.format(self.request.user.tenant_id)
domain = 'export OS_USER_DOMAIN_NAME="{}"'.format(
self.request.user.user_domain_name)
self.assertIn(name.encode('utf-8'), res.content)
self.assertIn(p_id.encode('utf-8'), res.content)
self.assertIn(domain.encode('utf-8'), res.content)
@test.create_stubs({api.keystone: ("list_ec2_credentials",)})
def test_credential_api(self):
certs = self.ec2.list()
api.keystone.list_ec2_credentials(IsA(HttpRequest), self.user.id) \
.AndReturn(certs)
self.mox.ReplayAll()
res = self.client.get(CREDS_URL)
self.assertEqual(res.status_code, 200)
credentials = 'project/access_and_security/api_access/credentials.html'
self.assertTemplateUsed(res, credentials)
self.assertEqual(self.user.id, res.context['openrc_creds']['user'].id)
self.assertEqual(certs[0].access,
res.context['ec2_creds']['ec2_access_key'])
@test.create_stubs({api.keystone: ("list_ec2_credentials",
"create_ec2_credentials",
"delete_user_ec2_credentials",)})
def _test_recreate_user_credentials(self, exists_credentials=True):
old_creds = self.ec2.list() if exists_credentials else []
new_creds = self.ec2.first()
api.keystone.list_ec2_credentials(
IsA(HttpRequest),
self.user.id).AndReturn(old_creds)
if exists_credentials:
api.keystone.delete_user_ec2_credentials(
IsA(HttpRequest),
self.user.id,
old_creds[0].access).AndReturn([])
api.keystone.create_ec2_credentials(
IsA(HttpRequest),
self.user.id,
self.tenant.id).AndReturn(new_creds)
self.mox.ReplayAll()
res_get = self.client.get(RECREATE_CREDS_URL)
self.assertEqual(res_get.status_code, 200)
credentials = \
'project/access_and_security/api_access/recreate_credentials.html'
self.assertTemplateUsed(res_get, credentials)
res_post = self.client.post(RECREATE_CREDS_URL)
self.assertNoFormErrors(res_post)
self.assertRedirectsNoFollow(res_post, INDEX_URL)
def test_recreate_user_credentials(self):
self._test_recreate_user_credentials()
def test_recreate_user_credentials_with_no_existing_creds(self):
self._test_recreate_user_credentials(exists_credentials=False)
| {
"content_hash": "6eeac9ec66a0fee46a008224547cc248",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 79,
"avg_line_length": 44.10569105691057,
"alnum_prop": 0.6387096774193548,
"repo_name": "yangleo/cloud-github",
"id": "bbad063738d0728520e0e5d2a4ec1c0c52e2de95",
"size": "6028",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "openstack_dashboard/dashboards/project/access_and_security/api_access/tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "118532"
},
{
"name": "HTML",
"bytes": "533790"
},
{
"name": "JavaScript",
"bytes": "1516542"
},
{
"name": "Makefile",
"bytes": "588"
},
{
"name": "Python",
"bytes": "4933215"
},
{
"name": "Shell",
"bytes": "19775"
}
],
"symlink_target": ""
} |
from keystone.common import dependency
from keystone.common import sql
from keystone import exception
from keystone.identity.mapping_backends import mapping as identity_mapping
from keystone.identity.mapping_backends.sql import Mapping
from keystone.identity.backends import sql as model
try: from oslo_log import log
except ImportError: from keystone.openstack.common import log
LOG = log.getLogger(__name__)
@dependency.requires('identity_api', 'id_generator_api')
class IdGroupLdapMapping(Mapping):
def get_public_id(self, local_entity):
if (local_entity['entity_type'] == identity_mapping.EntityType.GROUP):
LOG.debug('Trying to get public_id for group %s in %s' % (local_entity['local_id'],
local_entity['domain_id']))
try:
session = sql.get_session()
query = session.query(model.Group)
query = query.filter_by(name=local_entity['local_id'])
query = query.filter_by(domain_id=local_entity['domain_id'])
try:
group_ref = query.one()
except sql.NotFound:
raise exception.GroupNotFound(group_id=local_entity['local_id'])
group = group_ref.to_dict()
public_id = group['id']
LOG.debug('Public_id for group %s in %s is: %s ' % (local_entity['local_id'],
local_entity['domain_id'],
public_id))
return public_id
except Exception:
return None
else:
return super(Mapping, self).get_public_id(local_entity)
def get_id_mapping(self, public_id):
return super(Mapping, self).get_id_mapping(public_id)
def create_id_mapping(self, local_entity, public_id=None):
return super(Mapping, self).create_id_mapping(local_entity, public_id)
def delete_id_mapping(self, public_id):
return super(Mapping, self).delete_id_mapping(public_id)
def purge_mappings(self, purge_filter):
return super(Mapping, self).purge_mapping(purge_filter)
| {
"content_hash": "ac41dfda4486abac28f36362eb120e93",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 97,
"avg_line_length": 44.35294117647059,
"alnum_prop": 0.581343943412909,
"repo_name": "telefonicaid/fiware-keystone-spassword",
"id": "01d9fb7da70b7c14948c9f039ad9f4dceb763f0e",
"size": "2262",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/id_group_ldap.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "8882"
},
{
"name": "Python",
"bytes": "83281"
},
{
"name": "Shell",
"bytes": "47463"
}
],
"symlink_target": ""
} |
import sys
import datetime
from pymongo.errors import OperationFailure
sys.path[0:0] = [""]
try:
import unittest2 as unittest
except ImportError:
import unittest
import pymongo
from bson.tz_util import utc
from mongoengine import (
connect, register_connection,
Document, DateTimeField
)
from mongoengine.python_support import IS_PYMONGO_3
import mongoengine.connection
from mongoengine.connection import get_db, get_connection, ConnectionError
def get_tz_awareness(connection):
if not IS_PYMONGO_3:
return connection.tz_aware
else:
return connection.codec_options.tz_aware
class ConnectionTest(unittest.TestCase):
def tearDown(self):
mongoengine.connection._connection_settings = {}
mongoengine.connection._connections = {}
mongoengine.connection._dbs = {}
def test_connect(self):
"""Ensure that the connect() method works properly.
"""
connect('mongoenginetest')
conn = get_connection()
self.assertTrue(isinstance(conn, pymongo.mongo_client.MongoClient))
db = get_db()
self.assertTrue(isinstance(db, pymongo.database.Database))
self.assertEqual(db.name, 'mongoenginetest')
connect('mongoenginetest2', alias='testdb')
conn = get_connection('testdb')
self.assertTrue(isinstance(conn, pymongo.mongo_client.MongoClient))
def test_sharing_connections(self):
"""Ensure that connections are shared when the connection settings are exactly the same
"""
connect('mongoenginetest', alias='testdb1')
expected_connection = get_connection('testdb1')
connect('mongoenginetest', alias='testdb2')
actual_connection = get_connection('testdb2')
# Handle PyMongo 3+ Async Connection
if IS_PYMONGO_3:
# Ensure we are connected, throws ServerSelectionTimeoutError otherwise.
# Purposely not catching exception to fail test if thrown.
expected_connection.server_info()
self.assertEqual(expected_connection, actual_connection)
def test_connect_uri(self):
"""Ensure that the connect() method works properly with uri's
"""
c = connect(db='mongoenginetest', alias='admin')
c.admin.system.users.remove({})
c.mongoenginetest.system.users.remove({})
c.admin.add_user("admin", "password")
c.admin.authenticate("admin", "password")
c.mongoenginetest.add_user("username", "password")
if not IS_PYMONGO_3:
self.assertRaises(ConnectionError, connect, "testdb_uri_bad", host='mongodb://test:password@localhost')
connect("testdb_uri", host='mongodb://username:password@localhost/mongoenginetest')
conn = get_connection()
self.assertTrue(isinstance(conn, pymongo.mongo_client.MongoClient))
db = get_db()
self.assertTrue(isinstance(db, pymongo.database.Database))
self.assertEqual(db.name, 'mongoenginetest')
c.admin.system.users.remove({})
c.mongoenginetest.system.users.remove({})
def test_connect_uri_without_db(self):
"""Ensure that the connect() method works properly with uri's
without database_name
"""
c = connect(db='mongoenginetest', alias='admin')
c.admin.system.users.remove({})
c.mongoenginetest.system.users.remove({})
c.admin.add_user("admin", "password")
c.admin.authenticate("admin", "password")
c.mongoenginetest.add_user("username", "password")
if not IS_PYMONGO_3:
self.assertRaises(ConnectionError, connect, "testdb_uri_bad", host='mongodb://test:password@localhost')
connect("mongoenginetest", host='mongodb://localhost/')
conn = get_connection()
self.assertTrue(isinstance(conn, pymongo.mongo_client.MongoClient))
db = get_db()
self.assertTrue(isinstance(db, pymongo.database.Database))
self.assertEqual(db.name, 'mongoenginetest')
c.admin.system.users.remove({})
c.mongoenginetest.system.users.remove({})
def test_connect_uri_with_authsource(self):
"""Ensure that the connect() method works well with
the option `authSource` in URI.
This feature was introduced in MongoDB 2.4 and removed in 2.6
"""
# Create users
c = connect('mongoenginetest')
c.admin.system.users.remove({})
c.admin.add_user('username', 'password')
# Authentication fails without "authSource"
if IS_PYMONGO_3:
test_conn = connect('mongoenginetest', alias='test2',
host='mongodb://username:password@localhost/mongoenginetest')
self.assertRaises(OperationFailure, test_conn.server_info)
else:
self.assertRaises(
ConnectionError, connect, 'mongoenginetest', alias='test1',
host='mongodb://username:password@localhost/mongoenginetest'
)
self.assertRaises(ConnectionError, get_db, 'test1')
# Authentication succeeds with "authSource"
test_conn2 = connect(
'mongoenginetest', alias='test2',
host=('mongodb://username:password@localhost/'
'mongoenginetest?authSource=admin')
)
# This will fail starting from MongoDB 2.6+
# test_conn2.server_info()
db = get_db('test2')
self.assertTrue(isinstance(db, pymongo.database.Database))
self.assertEqual(db.name, 'mongoenginetest')
# Clear all users
c.admin.system.users.remove({})
def test_register_connection(self):
"""Ensure that connections with different aliases may be registered.
"""
register_connection('testdb', 'mongoenginetest2')
self.assertRaises(ConnectionError, get_connection)
conn = get_connection('testdb')
self.assertTrue(isinstance(conn, pymongo.mongo_client.MongoClient))
db = get_db('testdb')
self.assertTrue(isinstance(db, pymongo.database.Database))
self.assertEqual(db.name, 'mongoenginetest2')
def test_register_connection_defaults(self):
"""Ensure that defaults are used when the host and port are None.
"""
register_connection('testdb', 'mongoenginetest', host=None, port=None)
conn = get_connection('testdb')
self.assertTrue(isinstance(conn, pymongo.mongo_client.MongoClient))
def test_connection_kwargs(self):
"""Ensure that connection kwargs get passed to pymongo.
"""
connect('mongoenginetest', alias='t1', tz_aware=True)
conn = get_connection('t1')
self.assertTrue(get_tz_awareness(conn))
connect('mongoenginetest2', alias='t2')
conn = get_connection('t2')
self.assertFalse(get_tz_awareness(conn))
def test_datetime(self):
connect('mongoenginetest', tz_aware=True)
d = datetime.datetime(2010, 5, 5, tzinfo=utc)
class DateDoc(Document):
the_date = DateTimeField(required=True)
DateDoc.drop_collection()
DateDoc(the_date=d).save()
date_doc = DateDoc.objects.first()
self.assertEqual(d, date_doc.the_date)
def test_multiple_connection_settings(self):
connect('mongoenginetest', alias='t1', host="localhost")
connect('mongoenginetest2', alias='t2', host="127.0.0.1")
mongo_connections = mongoengine.connection._connections
self.assertEqual(len(mongo_connections.items()), 2)
self.assertTrue('t1' in mongo_connections.keys())
self.assertTrue('t2' in mongo_connections.keys())
if not IS_PYMONGO_3:
self.assertEqual(mongo_connections['t1'].host, 'localhost')
self.assertEqual(mongo_connections['t2'].host, '127.0.0.1')
else:
# Handle PyMongo 3+ Async Connection
# Ensure we are connected, throws ServerSelectionTimeoutError otherwise.
# Purposely not catching exception to fail test if thrown.
mongo_connections['t1'].server_info()
mongo_connections['t2'].server_info()
self.assertEqual(mongo_connections['t1'].address[0], 'localhost')
self.assertEqual(mongo_connections['t2'].address[0], '127.0.0.1')
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "8337a453ae21440157e7c31bc7b1c5db",
"timestamp": "",
"source": "github",
"line_count": 231,
"max_line_length": 115,
"avg_line_length": 36.25974025974026,
"alnum_prop": 0.643505253104107,
"repo_name": "9nix00/mongoengine",
"id": "4a02696af67c0b1d84ce803e477116de3e38af46",
"size": "8376",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/test_connection.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1017560"
}
],
"symlink_target": ""
} |
import os
from typing import Callable, Dict, List, Tuple
_vars = {}
_var_hooks = {}
_VAR_DEFAULT_VAL = False
def _run_var_hooks(key: str, run_global_hooks: bool=False):
""" Run all of the update hooks for the specified variable """
if run_global_hooks:
hook_key = ""
else:
hook_key = key
hooks = _var_hooks.get(hook_key)
if hooks is None:
return
for hook in hooks:
hook(key, _vars.get(key))
def add_hook(key: str, hook: Callable):
""" Add a hook to call when the variable is changed """
key_up = key.upper()
hooks = _var_hooks.get(key_up)
if hooks is None:
hooks = []
_var_hooks[key_up] = hooks
hooks.append(hook)
def set(key: str, value: str, export: bool=False):
""" Set the environmental variable given by "key=value" """
key_up = key.upper()
_vars[key_up] = value
if export:
export_var(key_up)
_run_var_hooks(key_up)
_run_var_hooks(key_up, run_global_hooks=True)
def get(key: str) -> str:
""" Get the environmental variable with the name "key" """
key_up = key.upper()
value = _vars.get(key_up)
if value is None:
return _VAR_DEFAULT_VAL
return value
def print_env():
""" Print all environmental variables, values, and hooks """
for key, value in _vars.items():
print(key + "=" + value)
hooks = _var_hooks.get(key)
if hooks:
for hook in hooks:
print("`--> " + hook.__name__ + "()")
def import_sys_env():
for key, value in os.environ.items():
set(key, value)
def export_var(key: str):
value = get(key)
os.environ[key] = value
| {
"content_hash": "a7d1bf67150c7e89d85329bbe70f4ca5",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 66,
"avg_line_length": 23.319444444444443,
"alnum_prop": 0.5783204288266826,
"repo_name": "zstuartp/AstroReduce",
"id": "2f0d750ba303c7fa4a737f976927e25b74f129f5",
"size": "3224",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "astroreduce/env.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1613"
},
{
"name": "Python",
"bytes": "52655"
},
{
"name": "Shell",
"bytes": "1667"
}
],
"symlink_target": ""
} |
import socket
import cPickle
filename = 'cached_lookups'
cached_lookups = {}
def load():
global cached_lookups
try:
cached_lookups = cPickle.load(open(filename))
except:
cached_lookups = {}
print 'loaded DNS lookups:', len(cached_lookups)
def gethostbyaddr(addr):
try:
return cached_lookups[addr]
except (KeyError):
try:
return cached_lookups.setdefault(addr,
socket.gethostbyaddr(addr))
except (socket.herror):
return cached_lookups.setdefault(addr, 'NA')
def save():
cPickle.dump(cached_lookups, open(filename, 'wb'))
print 'saved DNS lookups:', len(cached_lookups)
| {
"content_hash": "6af888fc026a60a3e5ff30dd97639b6b",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 68,
"avg_line_length": 26.25925925925926,
"alnum_prop": 0.610719322990127,
"repo_name": "rauljim/power-pcap-analyzer",
"id": "6b3b5f9aded587d25116af59826ea12fd4bb3908",
"size": "709",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "parsers/cached_lookup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
"""
Module containing utilities for NDFrame.sample() and .GroupBy.sample()
"""
from __future__ import annotations
import numpy as np
from pandas._libs import lib
from pandas._typing import FrameOrSeries
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCSeries,
)
def preprocess_weights(obj: FrameOrSeries, weights, axis: int) -> np.ndarray:
"""
Process and validate the `weights` argument to `NDFrame.sample` and
`.GroupBy.sample`.
Returns `weights` as an ndarray[np.float64], validated except for normalizing
weights (because that must be done groupwise in groupby sampling).
"""
# If a series, align with frame
if isinstance(weights, ABCSeries):
weights = weights.reindex(obj.axes[axis])
# Strings acceptable if a dataframe and axis = 0
if isinstance(weights, str):
if isinstance(obj, ABCDataFrame):
if axis == 0:
try:
weights = obj[weights]
except KeyError as err:
raise KeyError(
"String passed to weights not a valid column"
) from err
else:
raise ValueError(
"Strings can only be passed to "
"weights when sampling from rows on "
"a DataFrame"
)
else:
raise ValueError(
"Strings cannot be passed as weights when sampling from a Series."
)
if isinstance(obj, ABCSeries):
func = obj._constructor
else:
func = obj._constructor_sliced
weights = func(weights, dtype="float64")._values
if len(weights) != obj.shape[axis]:
raise ValueError("Weights and axis to be sampled must be of same length")
if lib.has_infs(weights):
raise ValueError("weight vector may not include `inf` values")
if (weights < 0).any():
raise ValueError("weight vector many not include negative values")
weights[np.isnan(weights)] = 0
return weights
def process_sampling_size(
n: int | None, frac: float | None, replace: bool
) -> int | None:
"""
Process and validate the `n` and `frac` arguments to `NDFrame.sample` and
`.GroupBy.sample`.
Returns None if `frac` should be used (variable sampling sizes), otherwise returns
the constant sampling size.
"""
# If no frac or n, default to n=1.
if n is None and frac is None:
n = 1
elif n is not None and frac is not None:
raise ValueError("Please enter a value for `frac` OR `n`, not both")
elif n is not None:
if n < 0:
raise ValueError(
"A negative number of rows requested. Please provide `n` >= 0."
)
if n % 1 != 0:
raise ValueError("Only integers accepted as `n` values")
else:
assert frac is not None # for mypy
if frac > 1 and not replace:
raise ValueError(
"Replace has to be set to `True` when "
"upsampling the population `frac` > 1."
)
if frac < 0:
raise ValueError(
"A negative number of rows requested. Please provide `frac` >= 0."
)
return n
def sample(
obj_len: int,
size: int,
replace: bool,
weights: np.ndarray | None,
random_state: np.random.RandomState | np.random.Generator,
) -> np.ndarray:
"""
Randomly sample `size` indices in `np.arange(obj_len)`
Parameters
----------
obj_len : int
The length of the indices being considered
size : int
The number of values to choose
replace : bool
Allow or disallow sampling of the same row more than once.
weights : np.ndarray[np.float64] or None
If None, equal probability weighting, otherwise weights according
to the vector normalized
random_state: np.random.RandomState or np.random.Generator
State used for the random sampling
Returns
-------
np.ndarray[np.intp]
"""
if weights is not None:
weight_sum = weights.sum()
if weight_sum != 0:
weights = weights / weight_sum
else:
raise ValueError("Invalid weights: weights sum to zero")
return random_state.choice(obj_len, size=size, replace=replace, p=weights).astype(
np.intp, copy=False
)
| {
"content_hash": "a51412b2b6d8588536185d7f2554f1a2",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 86,
"avg_line_length": 30.631944444444443,
"alnum_prop": 0.5923826796644752,
"repo_name": "gfyoung/pandas",
"id": "e4bad22e8e43cadbf1c02bf6a51c0aaa74f7504e",
"size": "4411",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pandas/core/sample.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4912"
},
{
"name": "C",
"bytes": "404689"
},
{
"name": "C++",
"bytes": "17194"
},
{
"name": "HTML",
"bytes": "551714"
},
{
"name": "Makefile",
"bytes": "574"
},
{
"name": "Python",
"bytes": "14336547"
},
{
"name": "Shell",
"bytes": "29174"
},
{
"name": "Smarty",
"bytes": "2069"
}
],
"symlink_target": ""
} |
import ibmsecurity.utilities.tools
import logging
from ibmsecurity.utilities import tools
logger = logging.getLogger(__name__)
module_uri = "/isam/felb/configuration/services"
requires_modules = None
requires_version = None
requires_model = "Appliance"
def add(isamAppliance, service_name, address, active, port, weight, secure, ssllabel, check_mode=False, force=False):
"""
Creating a server
"""
check_exist, warnings = _check_exist(isamAppliance, service_name, address, port)
if force is True or check_exist is False:
if check_mode is True:
return isamAppliance.create_return_object(changed=True, warnings=warnings)
else:
return isamAppliance.invoke_post("Creating a server",
"{0}/{1}/servers".format(module_uri, service_name),
{
"active": active,
"address": address,
"port": port,
"weight": weight,
"secure": secure,
"ssllabel": ssllabel
},
requires_version=requires_version, requires_modules=requires_modules, requires_model=requires_model)
else:
return isamAppliance.create_return_object(warnings=warnings)
def delete(isamAppliance, service_name, address, port, check_mode=False, force=False):
"""
deletes a server from specified service name
"""
check_exist, warnings = _check_exist(isamAppliance, service_name, address, port)
if force is True or check_exist is True:
if check_mode is True:
return isamAppliance.create_return_object(changed=True, warnings=warnings)
else:
id = address + ":" + str(port)
return isamAppliance.invoke_delete("Deleting a server",
"{0}/{1}/servers/{2}".format(module_uri, service_name, id),
requires_version=requires_version, requires_modules=requires_modules, requires_model=requires_model)
else:
return isamAppliance.create_return_object(warnings=warnings)
def get(isamAppliance, service_name, address, port, check_mode=False, force=False):
"""
Retrieves server from specified service name
"""
id = address + ":" + str(port)
return (isamAppliance.invoke_get("Retrieving a server", "{0}/{1}/servers/{2}".format(module_uri, service_name, id),
requires_version=requires_version, requires_modules=requires_modules, requires_model=requires_model))
def get_all(isamAppliance, service_name, check_mode=False, force=False):
"""
Retrieves a list of servers under a specified service
"""
return isamAppliance.invoke_get("Retrieving servers for a service",
"{0}/{1}/servers".format(module_uri, service_name),
requires_version=requires_version, requires_modules=requires_modules, requires_model=requires_model)
def update(isamAppliance, service_name, address, active, port, weight, secure=False, ssllabel=None, new_address=None, new_port=None, check_mode=False, force=False):
"""
Updating server
"""
id = address + ":" + str(port)
json_data = {'active': active, 'secure': secure, 'ssllabel': ssllabel, 'weight': weight}
if new_address is not None:
json_data['address'] = new_address
else:
json_data['address'] = address
if new_port is not None:
json_data['port'] = new_port
else:
json_data['port'] = port
change_required, warnings = _check_update(isamAppliance, service_name, address, port, json_data)
if force is True or change_required is True:
if check_mode is True:
return isamAppliance.create_return_object(changed=True, warnings=warnings)
else:
return isamAppliance.invoke_put("Updating a server",
"{0}/{1}/servers/{2}".format(module_uri, service_name, id),
json_data,
requires_modules=requires_modules,
requires_version=requires_version,
requires_model = requires_model)
else:
return isamAppliance.create_return_object(warnings=warnings)
def _check_update(isamAppliance, service_name, address, port, json_data):
"""
idempontency test
"""
ret_obj = get(isamAppliance, service_name, address, port)
warnings = ret_obj['warnings']
ret_data = ret_obj['data']
if 'id' in ret_data:
del ret_data['id']
else:
return False, warnings
sorted_ret_data = tools.json_sort(ret_data)
sorted_json_data = tools.json_sort(json_data)
logger.debug("Sorted Existing Data:{0}".format(sorted_ret_data))
logger.debug("Sorted Desired Data:{0}".format(sorted_json_data))
if sorted_ret_data != sorted_json_data:
return True, warnings
else:
return False, warnings
def _check_exist(isamAppliance, service_name, address, port):
"""
idempotency test for delete function
"""
id = address + ":" + str(port)
ret_obj = get_all(isamAppliance, service_name)
warnings = ret_obj['warnings']
for obj in ret_obj['data']:
if obj['id'] == id:
return True, warnings
return False, warnings
def compare(isamAppliance1, service_name1, isamAppliance2, service_name2):
"""
Compare cluster configuration between two appliances
"""
ret_obj1 = get_all(isamAppliance1, service_name1)
ret_obj2 = get_all(isamAppliance2, service_name2)
return ibmsecurity.utilities.tools.json_compare(ret_obj1, ret_obj2, deleted_keys=[])
| {
"content_hash": "9800550a1e5248896807299248a2673e",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 164,
"avg_line_length": 38.1875,
"alnum_prop": 0.5839607201309329,
"repo_name": "IBM-Security/ibmsecurity",
"id": "0b2203524a72487114261e03890801b808e93e0c",
"size": "6110",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ibmsecurity/isam/base/network/felb/services/servers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1501984"
}
],
"symlink_target": ""
} |
import re
import sys
import urllib
from django.db import models
from django.db.models import Q
from django.conf import settings
from django.core import exceptions
from mint.django_rest.rbuilder import errors
from mint.django_rest.rbuilder.modellib import XObjIdModel, type_map
from mint.jobstatus import FINISHED
from xobj import xobj
import math
#import logging
#log = logging.getLogger(__name__)
filterTermMap = {
'EQUAL' : 'iexact',
'NOT_EQUAL' : 'iexact',
'LESS_THAN' : 'lt',
'LESS_THAN_OR_EQUAL' : 'lte',
'GREATER_THAN' : 'gt',
'GREATER_THAN_OR_EQUAL' : 'gte',
'LIKE' : 'contains',
'NOT_LIKE' : 'contains',
'IN' : 'in',
'NOT_IN' : 'in',
'IS_NULL' : 'isnull',
}
class Operator(object):
filterTerm = None
operator = None
description = None
arity = 2
# Variable length arguments
ARITY_VAR = object()
# This may look weird, but we need two backslashes when trying to
# match a single one, for escaping reasons
_singleBackslashRe = re.compile(r'\\')
def __init__(self, *operands):
self.operands = list(operands)
def addOperand(self, operand):
self.operands.append(operand)
def asString(self):
return "%s(%s)" % (self.filterTerm,
','.join((hasattr(x, 'asString') and x.asString() or self._quote(x))
for x in self.operands))
@classmethod
def _quote(cls, s):
s = cls._singleBackslashRe.sub(r'\\\\', s)
slen = len(s)
s = s.replace('"', r'\"')
if len(s) != slen:
# We've replaced something
s = '"%s"' % s
return s
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
if not isinstance(self, other.__class__):
return False
if len(self.operands) != len(other.operands):
return False
for ssub, osub in zip(self.operands, other.operands):
if ssub != osub:
return False
return True
def __ne__(self, other):
return not (self == other)
def castValue(self, value):
return value
class BooleanOperator(Operator):
def castValue(self, value):
if value.lower() == 'false':
return False
return True
class InOperator(Operator):
filterTerm = 'IN'
operator = 'in'
description = 'In list'
arity = Operator.ARITY_VAR
class NotInOperator(InOperator):
filterTerm = 'NOT_IN'
description = 'Not in list'
class NullOperator(BooleanOperator):
filterTerm = 'IS_NULL'
operator = 'isnull'
description = 'Is NULL'
class EqualOperator(Operator):
filterTerm = 'EQUAL'
operator = 'iexact'
description = 'Equal to'
class NotEqualOperator(EqualOperator):
filterTerm = 'NOT_EQUAL'
description = 'Not equal to'
class LessThanOperator(Operator):
filterTerm = 'LESS_THAN'
operator = 'lt'
description = 'Less than'
class LessThanEqualOperator(Operator):
filterTerm = 'LESS_THAN_OR_EQUAL'
operator = 'lte'
description = 'Less than or equal to'
class GreaterThanOperator(Operator):
filterTerm = 'GREATER_THAN'
operator = 'gt'
description = 'Greater than'
class GreaterThanEqualOperator(Operator):
filterTerm = 'GREATER_THAN_OR_EQUAL'
operator = 'gte'
description = 'Greater than or equal to'
class LikeOperator(Operator):
filterTerm = 'LIKE'
operator = 'icontains'
description = 'Like'
class NotLikeOperator(LikeOperator):
filterTerm = 'NOT_LIKE'
operator = 'icontains'
description = 'Not like'
class ContainsOperator(Operator):
filterTerm = 'CONTAINS'
operator = None
description = "Contains"
arity = Operator.ARITY_VAR
class AndOperator(Operator):
filterTerm = 'AND'
operator = None
description = "And"
arity = Operator.ARITY_VAR
class OrOperator(Operator):
filterTerm = 'OR'
operator = None
description = "Or"
arity = Operator.ARITY_VAR
def operatorFactory(operator):
return operatorMap[operator]
class Lexer(object):
"""
Class used for parsing a query tree.
The general syntax is, in BNF-like syntax:
optree ::== OPERATOR(operand[,operand*])
OPERATOR ::== (word)
operand ::== string | quotedstring | optree
string ::== (pretty obvious)
quotedstring :== " | string | "
Strings MUST be quoted if they contain a quote (which must be escaped with
a backslash), paranthesis or commas. Simple words do not have to be quoted,
as they do not break the parser. Backslashes have to be doubled up within
quotes.
Example of operands that evaluate to strings::
simple word
"quoted words"
"an embedded \"quote\" and an escaped \\ (backslash)"
Note that semicolons will have to be URL-escaped before the query is passed
in the URL.
"""
_doubleBackslash = r'\\\\'
_convertedDoubleBackslash = u'\u0560'
_escaped = re.compile(_doubleBackslash)
_unescaped = re.compile(_convertedDoubleBackslash)
# .*? means non-greedy expansion, to avoid skipping over separators
_startSep = re.compile(r'^(?P<head>.*?)(?P<sep>(\(|\)|,|(?<!\\)"))(?P<tail>.*)$')
_endQuote = re.compile(r'^(?P<head>.*?)(?P<sep>(?<!\\)")(?P<tail>.*)$')
def scan(self, s):
return self._split(s)
@classmethod
def _split(cls, code):
# The stack contains only tree nodes. Literal nodes are added as
# operands directly to the last tree node in the stack.
stack = []
# First pass: we replace all double-backslashes with a
# non-ascii unicode char, to simplify the regular expressions
# _unescape will then revert this operation
escCode = cls._escaped.sub(cls._convertedDoubleBackslash, code).strip()
# There are only 2 states to worry about.
# We look for a separator that is either ( , ) or " (unescaped,
# hence the negative look-ahead in the regex)
# If an (unescaped) quote is found, we need to find its matching
# (unescaped) quote, which is the sep == '"' case.
while escCode:
m = cls._startSep.match(escCode)
if m is None:
raise errors.InvalidData(msg="Unable to parse %s" % code)
g = m.groupdict()
head, sep, tail = g['head'], g['sep'], g['tail']
# Get rid of leading whitespaces, unless the string is
# quoted
if sep != '"':
escCode = tail.lstrip()
else:
escCode = tail
if sep == '(':
# New operator found.
op = cls._unescape(head.strip())
opFactory = operatorMap.get(op, None)
if opFactory is None:
raise errors.InvalidData(msg="Unknown operator %s" % op)
tree = opFactory()
if stack:
# Add the tree node to the parent (using the stack)
cls._addOperand(stack, tree)
# ... and we push it onto the stack
stack.append(tree)
continue
if sep == '"':
# Ignore everything but a close quote
m = cls._endQuote.match(escCode)
if m:
g = m.groupdict()
head, sep, tail = g['head'], g['sep'], g['tail']
escCode = tail.lstrip()
cls._addOperand(stack, cls._unescapeString(head))
continue
raise errors.InvalidData(msg="Closing quote not found")
if head:
cls._addOperand(stack, cls._unescape(head.strip()))
if sep == ',':
continue
assert sep == ')'
top = stack.pop()
if not stack:
if escCode != '':
raise errors.InvalidData(msg="Garbage found at the end of the expression: '%s'" % escCode)
return top
@classmethod
def _addOperand(cls, stack, child):
top = stack[-1]
assert isinstance(top, Operator)
top.addOperand(child)
@classmethod
def _unescape(cls, s):
return cls._unescaped.sub(r'\\', s).encode('ascii')
@classmethod
def _unescapeString(cls, s):
s = s.replace(r'\"', '"')
return cls._unescape(s)
# === BEGIN ADVANCED SEARCH ===
def _filterTerm(node, scope):
''' given a filter instance (node) produce a hash that Django understands '''
# TODO: handle NOT by teaching classes to provide the proper operands
if node.arity == 2:
(field, value) = node.operands
elif node.arity == node.ARITY_VAR:
(field, value) = node.operands[0], node.operands[1:]
else:
raise Exception("Unsupported arity %s" % node.arity)
django_operator = "%s%s__%s" % (scope, field, node.operator)
django_operator = django_operator.replace(".","__")
filt = {}
filt[django_operator] = node.castValue(value)
return filt
def _isAllLeaves(operands, and_no_repeated_terms=False):
''' are none of the operands complex? No (ANDs or ORs)? '''
for x in operands:
if isinstance(x, AndOperator) or isinstance(x, OrOperator) or isinstance(x, ContainsOperator):
return False
if and_no_repeated_terms:
# if the same term is used more than once, we can't merge the queryset together
counts = {}
for x in operands:
field = x.operands[0]
if field not in counts:
counts[field] = 1
else:
counts[field] = counts[field] + 1
counts = [ c for c in counts.values() if c > 1 ]
if len(counts):
return False
return True
def _filterTreeAnd(model, operands, scope):
''' Compute the results of a tree with AND as the root node '''
and_result = None
for (i,x) in enumerate(operands):
if (i==0):
and_result = filterTree(model, x, scope)
else:
and_result = and_result & filterTree(model, x, scope)
return and_result
def _filterTreeOr(model, operands, scope):
''' Compute the results of a tree with OR as the root node '''
def first(value, this):
value = filterTree(model, this, scope)
return value
def later(value, this):
value = value | filterTree(model, this, scope)
return value
return _reduceFirst(operands, first, later)
def _filterHasAnyNegatives(terms):
'''
Django negations must be treated specially in and clauses to preserve the behavior where
two clauses in the same AND are implied to be related to the same object. This detects that.
'''
for x in terms:
if x.filterTerm.startswith('NOT_'):
return True
return False
def _reduceFirst(terms, first, later):
''' like python's reduce, but with special handling for the first item '''
res = None
if type(terms) != list:
terms = [ terms ]
for (i, x) in enumerate(terms):
if i == 0:
res = first(res, x)
else:
res = later(res, x)
return res
def _filterTreeAndFlat(model, terms, scope):
'''
Leaf-node and terms are handled differently than top-level and terms. To ensure
ands are logical when talking about the same resource, if no resources contain AND operations
generate only one filter-clause. This can't be done if negations are included without turning
all negations into positives, which is currently not done.
'''
filters = {}
if not _filterHasAnyNegatives(terms):
for x in terms:
filters.update(_filterTerm(x,scope))
return model.filter(**filters)
else:
def first(value, this):
value = _filterOperator(model, this, scope)
return value
def later(value, this):
value = value & _filterOperator(model, this, scope)
return value
return _reduceFirst(terms, first, later)
def _filterOperator(model, node, scope):
''' given a filter term, generate a filter clause suitable for Django usage '''
filters = _filterTerm(node, scope)
if not node.filterTerm.startswith("NOT_"):
return model.filter(**filters)
else:
return model.filter(~Q(**filters))
def filterTree(djangoQuerySet, tree, scope=''):
result = _filterTree(djangoQuerySet, tree, scope)
return result
def _filterTree(djangoQuerySet, tree, scope=''):
''' new style advanced filtering '''
if not isinstance(tree, Operator):
raise Exception("expecting an operator")
model = getattr(djangoQuerySet, 'model', None)
if model is None:
raise Exception("filtering is not supported on non-database collections")
if isinstance(tree, ContainsOperator):
if len(tree.operands) != 2 or not isinstance(tree.operands[0], basestring):
raise Exception("invalid usage of Contains() operator")
scope = scope + tree.operands[0] + "__"
return filterTree(djangoQuerySet, tree.operands[1], scope)
elif isinstance(tree, AndOperator):
if not _isAllLeaves(tree.operands, and_no_repeated_terms=True):
return _filterTreeAnd(djangoQuerySet, tree.operands, scope)
return _filterTreeAndFlat(djangoQuerySet, tree.operands, scope)
elif isinstance(tree, OrOperator):
return _filterTreeOr(djangoQuerySet, tree.operands, scope)
return _filterOperator(djangoQuerySet, tree, scope)
# === END ADVANCED SEARCH ===
def filterDjangoQuerySet(djangoQuerySet, field, operator, value,
collection=None, queryset=None):
# a bit of a hack to deal with "eclipsed" fields where the name
# is the default search key but the real field named name
# is different, but let's be honest, all of QSes are a hack :)
# example:
# Stage default search is by PROJECT name
# stage also has a name
literal=False
if field.startswith("literal:"):
field = field.replace("literal:","")
literal=True
# attempt to DWIM when asked to search on something
if not literal:
# stage search is more logical if the search key is the project name
if field == 'project_branch_stage.name' or field == 'name':
if (queryset and queryset.resource_type == 'project_branch_stage') or \
(collection and collection._xobj.tag == 'project_branch_stages'):
field = 'project.name'
# user model doesn't have a name, so point at that
if field == 'user.name' or field == 'name':
if (queryset and queryset.resource_type == 'user') or \
(collection and collection._xobj.tag == 'users'):
field = 'user.user_name'
# I think this deals with some inconsistent model relation weirdness but
# it's unclear
if field == 'rbac_permission.permission_id' or field == 'permission_id':
if (queryset and queryset.resource_type == 'grant') or \
(collection and collection._xobj.tag == 'grants'):
field = 'permission.permission_id'
# same
if field == 'name':
if (queryset and queryset.resource_type == 'grant') or \
(collection and collection._xobj.tag == 'grants'):
field = 'permission.name'
fieldName = field.split('.')[0]
if not hasattr(djangoQuerySet, 'model'):
raise Exception("filtering is not supported on non-database collections")
if fieldName not in djangoQuerySet.model._meta.get_all_field_names():
# if the model field didn't exist, try just the fieldName,
# it's possible the model was renamed and a custom query set
# is no longer accurate.
field = field.split('.', 1)[-1]
if value is None:
value = False
operatorCls = operatorFactory(operator)()
operator_name = operatorCls.operator
# TODO: see why Boolean and Int fields are not getting correct operator choices
# work around Django SQL generator bug where iexact does not
# properly quote things that look like numbers, there is no
# issue with like queries
if operator_name == 'iexact':
try:
float(value)
operator_name = 'in'
except ValueError:
# not numeric
pass
# lists can be entered seperated by commas
if operator_name == 'in':
value = value.split(",")
# if things look boolean, no need for Django operator
# as it will just mess up the SQL
if operator_name == 'iexact':
lower = str(value).lower()
if lower in [ 'true', 'false' ]:
if lower == "true":
value = True
elif lower == "false":
value = False
operator_name = ''
if operator_name == "isnull":
lower = str(value).lower()
if lower in [ 'true', 'false' ]:
if lower == "true":
value = True
elif lower == "false":
value = False
# Replace all '.' with '__', to handle fields that span
# relationships
operator_name = "__%s" % operator_name
if operator_name == '__':
operator_name = ''
k = "%s%s" % (field.replace('.', '__'), operator_name)
filtDict = { k : value }
# never be able to list a deleted user account, they are
# present for admin metadata only
if queryset and queryset.resource_type == 'user':
djangoQuerySet = djangoQuerySet.filter(deleted=False)
# image querysets should not show non-successful images
if queryset and queryset.resource_type == 'image':
djangoQuerySet = djangoQuerySet.filter(status=FINISHED)
if operator.startswith('NOT_'):
qs = djangoQuerySet.filter(~Q(**filtDict))
else:
qs = djangoQuerySet.filter(**filtDict)
return qs
class UnpaginatedCollection(XObjIdModel):
class Meta:
abstract = True
class Collection(XObjIdModel):
_xobj = xobj.XObjMetadata(
attributes = {
'id' : str,
'count' : int,
'full_collection' : str,
'per_page' : str,
'start_index' : str,
'end_index' : str,
'num_pages' : str,
'next_page' : str,
'previous_page' : str,
'order_by' : str,
'filter_by' : str,
'limit' : str,
}
)
class Meta:
abstract = True
# number of total items
count = models.IntegerField()
# URL to the collection without pagination params?
full_collection = models.TextField()
# size of each page
per_page = models.IntegerField()
# where in the total list we are currently at
start_index = models.IntegerField()
# last count int he the total list, ignoring pagination
end_index = models.IntegerField()
num_pages = models.IntegerField()
# page numbers
next_page = models.TextField()
previous_page = models.TextField()
# any requested user parameters:
order_by = models.TextField()
filter_by = models.TextField()
limit = models.TextField()
def get_absolute_url(self, request=None, parents=None, model=None,
paged=False, startIndex=None, full=None):
url = XObjIdModel.get_absolute_url(self, request, parents, model)
if url:
if paged:
limit = request.GET.get('limit', settings.PER_PAGE)
url += ';start_index=%s;limit=%s' % (startIndex, limit)
if self.order_by:
url += ';order_by=%s' % self.order_by
if self.filter_by:
url += ';filter_by=%s' % urllib.quote(self.filter_by, safe="[],")
return url
def _py_sort(self, modelList, param):
invert = param.startswith("-")
if param[0] in [ '+', '-', ' ' ]:
param = param[1:].strip()
# a list, not a query set
modelList = sorted(
modelList,
key=lambda f: getattr(f, param),
reverse=invert
)
return modelList
def _sortByField(key):
return lambda field: getattr(field, key, None)
def orderBy(self, request, modelList):
orderBy = request.GET.get('order_by', None)
use_python_sort = False
collected_model = type_map[self.list_fields[0]]
if orderBy:
newOrderParams = []
orderParams = orderBy.split(',')
for orderParam in orderParams:
# Ignore fields that don't exist on the model
fieldName = orderParam.split('.')[0]
if fieldName.startswith('-'):
fieldName = fieldName[1:]
# look for field name in the model's synthetic_fields dict,
# and if any field is synthetic, then sort in python
if fieldName in collected_model._meta.synthetic_fields:
use_python_sort = True
orderParam = orderParam.replace('.', '__')
newOrderParams.append(orderParam)
if hasattr(modelList, 'order_by') and not use_python_sort:
modelList = modelList.order_by(*newOrderParams)
elif use_python_sort:
for param in newOrderParams:
modelList = self._py_sort(modelList, param)
else:
param = newOrderParams[0]
modelList = self._py_sort(modelList, param)
self.order_by = orderBy
return modelList
def filterBy(self, request, modelList):
filterBy = request.GET.get('filter_by')
if filterBy and filterBy.startswith('['):
self.filter_by = filterBy
for filt in filterBy.split(']'):
if not (filt.startswith('[') or filt.startswith(',[')):
continue
filtString = filt.strip(',').strip('[').strip(']')
field, oper, value = filtString.split(',', 2)
modelList = filterDjangoQuerySet(modelList, field, oper, value, collection=self)
elif filterBy:
lexer = Lexer()
qt = lexer.scan(filterBy)
self.filter_by = qt.asString()
modelList = filterTree(modelList, qt)
return modelList
def paginate(self, request, listField, modelList):
startIndex = int(request.GET.get('start_index', 0))
self.limit = int(request.GET.get('limit', settings.PER_PAGE))
if modelList is not None:
self.count = None
if type(modelList) == list:
self.count = len(modelList)
else:
# queryset
self.count = modelList.count()
else:
modelList = []
self.count = 0
if self.limit == 0:
self.count = 0
# compute page counts and numbers
pageCount = 0
pageNumber = 1
stopIndex = 0
if self.limit > 0:
pageCount = int(math.ceil(self.count / float(self.limit)))
pageNumber = int(math.floor(startIndex / float(self.limit)))
stopIndex = startIndex + self.limit -1
# some somewhat confusing fenceposty stuff because we're working in ints
if pageCount == 0:
pageCount = 1
if stopIndex < 0:
stopIndex = 0
pageObjectList = []
if self.limit > 0:
pageObjectList = modelList[startIndex:(stopIndex+1)]
# Force qs evaluation here, to catch order_by errors
try:
setattr(self, listField, list(pageObjectList))
except exceptions.FieldError, e:
if e.args and e.args[0].startswith("Cannot resolve keyword"):
raise errors.InvalidFilterKey(msg=e.args[0])
raise
self.full_collection = self.get_absolute_url(request, full=True)
self.per_page = self.limit
self.start_index = startIndex
self.end_index = stopIndex
# more handling around the edges
if self.end_index >= self.count - 1:
self.end_index = self.count - 1
if self.end_index < 0:
self.end_index = 0
self.num_pages = pageCount
self._pagedId = self.get_absolute_url(request, paged=True, startIndex=startIndex)
# if there are pages left, link to a next_page
if self.end_index < self.count - 1:
nextIndex = startIndex + self.limit
self.next_page = self.get_absolute_url(request, paged=True, startIndex = nextIndex)
# if there are pages beforehand, link to them
if startIndex - self.limit >= 0:
prevIndex = startIndex - self.limit
if prevIndex < 0:
prevIndex = 0
self.previous_page = self.get_absolute_url(request, paged=True, startIndex= prevIndex)
def serialize(self, request=None, tag=None):
if tag is None:
tag = self._xobj.tag
if not self.list_fields:
return XObjIdModel.serialize(self, request, tag=tag)
# We only support one list field right now
listField = self.list_fields[0]
modelList = getattr(self, listField)
if request:
modelList = self.filterBy(request, modelList)
modelList = self.orderBy(request, modelList)
self.paginate(request, listField, modelList)
etreeModel = XObjIdModel.serialize(self, request, tag=tag)
if self._pagedId is not None:
etreeModel.attrib['id'] = self._pagedId
return etreeModel
operatorMap = {}
for mod_obj in sys.modules[__name__].__dict__.values():
if hasattr(mod_obj, 'filterTerm'):
operatorMap[mod_obj.filterTerm] = mod_obj
| {
"content_hash": "2d913d63b8cc0d27cc23b3b91d4f8e99",
"timestamp": "",
"source": "github",
"line_count": 747,
"max_line_length": 110,
"avg_line_length": 34.77510040160642,
"alnum_prop": 0.5859414097085883,
"repo_name": "sassoftware/mint",
"id": "f625cf434a51951002ac2335085b2ac219054e12",
"size": "26582",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mint/django_rest/rbuilder/modellib/collections.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "50165"
},
{
"name": "Genshi",
"bytes": "58741"
},
{
"name": "HTML",
"bytes": "2814"
},
{
"name": "JavaScript",
"bytes": "11470"
},
{
"name": "Makefile",
"bytes": "92418"
},
{
"name": "NASL",
"bytes": "582"
},
{
"name": "PLpgSQL",
"bytes": "5358"
},
{
"name": "Puppet",
"bytes": "17914"
},
{
"name": "Python",
"bytes": "3239135"
},
{
"name": "Ruby",
"bytes": "9268"
},
{
"name": "Shell",
"bytes": "24834"
}
],
"symlink_target": ""
} |
import os
import init_multiprocessing # import before numpy
import numpy as np
import time
from multiprocessing import Pool, cpu_count, current_process
from analysis_basin_plotting import plot_proj_timeseries, plot_basin_occupancy_timeseries, plot_basin_step
from singlecell_class import Cell
from singlecell_constants import RUNS_FOLDER, ASYNC_BATCH, FIELD_PROTOCOL
from singlecell_data_io import run_subdir_setup, runinfo_append
from singlecell_fields import field_setup
from singlecell_simsetup import singlecell_simsetup, unpack_simsetup
# analysis settings
ANALYSIS_SUBDIR = "basin_transitions"
ANNEAL_BETA = 1.3
ANNEAL_PROTOCOL = "protocol_A"
OCC_THRESHOLD = 0.7
SPURIOUS_LIST = ["mixed"]
PROFILE_PREFIX = "profile_row_"
# analysis plotting
highlights_CLPside = {6: 'k', 8: 'blue', 7: 'red', 16: 'deeppink', 11: 'darkorchid'}
highlights_simple = {6: 'k', 8: 'blue', 10: 'steelblue'}
highlights_both = {6: 'k', 8: 'blue', 10: 'steelblue', 9: 'forestgreen', 7: 'red', 16: 'deeppink', 11: 'darkorchid'}
DEFAULT_HIGHLIGHTS = highlights_CLPside
def anneal_setup(protocol=ANNEAL_PROTOCOL):
"""
Start in basin of interest at some intermediate temperature that allows basin escape
(e.g. beta_init = 1 / T_init = 1.3)
For each trajectory:
- Modify temperature once it has left basin
(define by projection vs a strict cutoff, e.g. if projection[mem_init] < 0.6, then the trajectory is wandering)
- Modification schedule: Decrease temp each timestep (beta += beta_step) until some ceiling is reached (beta_end)
- Note currently "one timestep" is N spin flips.
- If particle re-enters basin (using same cutoff as above), reset beta to beta_init and repeat procedure.
"""
assert protocol in ["constant", "protocol_A", "protocol_B"]
anneal_dict = {'protocol': protocol,
'beta_start': ANNEAL_BETA}
if protocol == "protocol_A":
anneal_dict.update({'beta_end': 2.0,
'beta_step': 0.1,
'wandering_threshold': 0.6})
elif protocol == "protocol_B":
anneal_dict.update({'beta_end': 3.0,
'beta_step': 0.5,
'wandering_threshold': 0.6})
else:
assert protocol == "constant"
anneal_dict.update({'beta_end': ANNEAL_BETA,
'beta_step': 0.0,
'wandering_threshold': 0.6})
return anneal_dict
def anneal_iterate(proj_onto_init, beta_current, step, wandering, anneal_dict, verbose=False):
# TODO implement "acceleration" if it stays in basin or keeps re-entering basin
if proj_onto_init < anneal_dict['wandering_threshold']:
if verbose:
print "+++++++++++++++++++++++++++++++++ Wandering condition passed at step %d" % step
wandering = True
elif wandering:
if verbose:
print "+++++++++++++++++++++++++++++++++ Re-entered orig basin after wandering at step %d" % step
wandering = False
beta_current = anneal_dict['beta_start']
if wandering and beta_current < anneal_dict['beta_end']:
beta_current = beta_current + anneal_dict['beta_step']
return beta_current, wandering
def get_init_info(init_cond, simsetup):
"""
Args:
- init_cond: np array of init state OR string memory label
Return:
- init state (Nx1 array) and init_id (str)
"""
if isinstance(init_cond, np.ndarray):
init_state = init_cond
init_id = 'specific'
else:
assert isinstance(init_cond, str)
init_state = simsetup['XI'][:, simsetup['CELLTYPE_ID'][init_cond]]
init_id = init_cond
return init_state, init_id
def save_and_plot_basinstats(io_dict, proj_data, occ_data, num_steps, ensemble, prefix='', simsetup=None,
occ_threshold=OCC_THRESHOLD, plot=True, highlights=DEFAULT_HIGHLIGHTS):
# filename prep
if prefix[-1] != '_':
prefix += '_'
# simsetup unpack for labelling plots
if simsetup is None:
simsetup = singlecell_simsetup()
memory_labels = simsetup['CELLTYPE_LABELS']
memory_id = simsetup['CELLTYPE_ID']
N, P, gene_labels, memory_labels, gene_id, celltype_id, xi, _, a_inv, intxn_matrix, _ = unpack_simsetup(simsetup)
# path setup
datapath_proj = io_dict['datadir'] + os.sep + '%sproj_timeseries.txt' % prefix
datapath_occ = io_dict['datadir'] + os.sep + '%soccupancy_timeseries.txt' % prefix
plotpath_proj = io_dict['plotdatadir'] + os.sep + '%sproj_timeseries.png' % prefix
plotpath_occ = io_dict['plotdatadir'] + os.sep + '%soccupancy_timeseries.png' % prefix
plotpath_basin_endpt = io_dict['plotdatadir'] + os.sep + '%sendpt_distro.png' % prefix
# save data to file
np.savetxt(datapath_proj, proj_data, delimiter=',', fmt='%.4f')
np.savetxt(datapath_occ, occ_data, delimiter=',', fmt='%i')
# plot and save figs
if plot:
plot_proj_timeseries(proj_data, num_steps, ensemble, memory_labels, plotpath_proj, highlights=highlights)
plot_basin_occupancy_timeseries(occ_data, num_steps, ensemble, memory_labels, occ_threshold, SPURIOUS_LIST, plotpath_occ, highlights=highlights)
plot_basin_step(occ_data[:, -1], num_steps, ensemble, memory_labels, memory_id, SPURIOUS_LIST, plotpath_basin_endpt, highlights=highlights)
return
def load_basinstats(rowdata_dir, celltype):
proj_name = "%s_proj_timeseries.txt" % celltype
occ_name = "%s_occupancy_timeseries.txt" % celltype
proj_timeseries_array = np.loadtxt(rowdata_dir + os.sep + proj_name, delimiter=',', dtype=float)
basin_occupancy_timeseries = np.loadtxt(rowdata_dir + os.sep + occ_name, delimiter=',', dtype=int)
return proj_timeseries_array, basin_occupancy_timeseries
def fetch_from_run_info(txtpath, obj_labels):
"""
Args:
txtpath: path to standardized 'run_info.txt' file
obj_labels: list of the form ['str_to_look_for', ...] type it will be read as is defined in local dict
Returns:
list of corresponding objects (or None's if they weren't found) in the same order
"""
label_obj_map = {'ensemble': int,
'num_steps': int}
assert all([a in label_obj_map.keys() for a in obj_labels])
linelist = [line.rstrip() for line in open(txtpath)]
fetched_values = [None for _ in obj_labels]
for line in linelist:
line = line.split(',')
for idx, label in enumerate(obj_labels):
if label == line[0]:
fetched_values[idx] = label_obj_map[label](line[1])
return fetched_values
def wrapper_get_basin_stats(fn_args_dict):
np.random.seed()
if fn_args_dict['kwargs'] is not None:
return get_basin_stats(*fn_args_dict['args'], **fn_args_dict['kwargs'])
else:
return get_basin_stats(*fn_args_dict['args'])
def get_basin_stats(init_cond, init_state, init_id, ensemble, ensemble_idx, simsetup, num_steps=100,
anneal_protocol=ANNEAL_PROTOCOL, field_protocol=FIELD_PROTOCOL, occ_threshold=OCC_THRESHOLD,
async_batch=ASYNC_BATCH, verbose=False, profile=False):
if profile:
start_outer = time.time() # TODO remove
# simsetup unpack
N, _, gene_labels, memory_labels, gene_id, celltype_id, xi, _, a_inv, intxn_matrix, _ = unpack_simsetup(simsetup)
# prep applied field TODO: how to include applied field neatly
field_dict = field_setup(simsetup, protocol=field_protocol)
assert not field_dict['time_varying'] # TODO not yet supported
app_field = field_dict['app_field']
app_field_strength = field_dict['app_field_strength']
transfer_dict = {}
proj_timeseries_array = np.zeros((len(memory_labels), num_steps))
basin_occupancy_timeseries = np.zeros((len(memory_labels) + len(SPURIOUS_LIST), num_steps), dtype=int)
assert len(SPURIOUS_LIST) == 1
mixed_index = len(memory_labels) # i.e. last elem
anneal_dict = anneal_setup(protocol=anneal_protocol)
wandering = False
if profile:
start_inner = time.time() # TODO remove
for cell_idx in xrange(ensemble_idx, ensemble_idx + ensemble):
if verbose:
print "Simulating cell:", cell_idx
cell = Cell(init_state, init_id, memory_labels, gene_labels)
beta = anneal_dict['beta_start'] # reset beta to use in each trajectory
for step in xrange(num_steps):
# report on each mem proj ranked
projvec = cell.get_memories_projection(a_inv, xi)
proj_timeseries_array[:, step] += projvec
absprojvec = np.abs(projvec)
topranked = np.argmax(absprojvec)
if verbose:
print "\ncell %d step %d" % (cell_idx, step)
# print some timestep proj ranking info
if verbose:
for rank in xrange(10):
sortedmems_smalltobig = np.argsort(absprojvec)
sortedmems_bigtosmall = sortedmems_smalltobig[::-1]
topranked = sortedmems_bigtosmall[0]
ranked_mem_idx = sortedmems_bigtosmall[rank]
ranked_mem = memory_labels[ranked_mem_idx]
print rank, ranked_mem_idx, ranked_mem, projvec[ranked_mem_idx], absprojvec[ranked_mem_idx]
if projvec[topranked] > occ_threshold:
basin_occupancy_timeseries[topranked, step] += 1
else:
basin_occupancy_timeseries[mixed_index, step] += 1
""" comment out for speedup
if topranked != celltype_id[init_cond] and projvec[topranked] > occ_threshold:
if cell_idx not in transfer_dict:
transfer_dict[cell_idx] = {topranked: (step, projvec[topranked])}
else:
if topranked not in transfer_dict[cell_idx]:
transfer_dict[cell_idx] = {topranked: (step, projvec[topranked])}
"""
# annealing block
proj_onto_init = projvec[celltype_id[init_cond]]
beta, wandering = anneal_iterate(proj_onto_init, beta, step, wandering, anneal_dict, verbose=verbose)
# main call to update
if step < num_steps:
#cell.update_state(intxn_matrix, beta=beta, app_field=None, async_batch=async_batch)
cell.update_state(intxn_matrix, beta=beta, app_field=app_field, app_field_strength=app_field_strength,
async_batch=async_batch)
if profile:
end_inner = time.time()
total_time = end_inner - start_outer
if verbose:
print "TIMINGS for %s, process %s, ensemble_start %d, last job %d" % \
(init_cond, current_process(), ensemble_idx, cell_idx)
print "start outer | start inner | end --- init_time | total time"
print "%.2f | %.2f | %.2f --- %.2f | %.2f " % \
(start_outer, start_inner, end_inner, start_inner - start_outer, total_time)
else:
total_time = None
return transfer_dict, proj_timeseries_array, basin_occupancy_timeseries, total_time
def fast_basin_stats(init_cond, init_state, init_id, ensemble, num_processes, simsetup=None, num_steps=100,
occ_threshold=OCC_THRESHOLD, anneal_protocol=ANNEAL_PROTOCOL, field_protocol=FIELD_PROTOCOL,
async_batch=ASYNC_BATCH, verbose=False, profile=False):
# simsetup unpack
if simsetup is None:
simsetup = singlecell_simsetup()
# prepare fn args and kwargs for wrapper
kwargs_dict = {'num_steps': num_steps, 'anneal_protocol': anneal_protocol, 'field_protocol': field_protocol,
'occ_threshold': occ_threshold, 'async_batch': async_batch, 'verbose': verbose, 'profile': profile}
fn_args_dict = [0]*num_processes
if verbose:
print "NUM_PROCESSES:", num_processes
assert ensemble % num_processes == 0
for i in xrange(num_processes):
subensemble = ensemble / num_processes
cell_startidx = i * subensemble
if verbose:
print "process:", i, "job size:", subensemble, "runs"
fn_args_dict[i] = {'args': (init_cond, init_state, init_id, subensemble, cell_startidx, simsetup),
'kwargs': kwargs_dict}
# generate results list over workers
t0 = time.time()
pool = Pool(num_processes)
print "pooling"
results = pool.map(wrapper_get_basin_stats, fn_args_dict)
print "done"
pool.close()
pool.join()
if verbose:
print "TIMER:", time.time() - t0
# collect pooled results
summed_transfer_dict = {} # TODO remove?
summed_proj_timeseries_array = np.zeros((len(simsetup['CELLTYPE_LABELS']), num_steps))
summed_basin_occupancy_timeseries = np.zeros((len(simsetup['CELLTYPE_LABELS']) + 1, num_steps), dtype=int) # could have some spurious here too? not just last as mixed
if profile:
worker_times = np.zeros(num_processes)
else:
worker_times = None
for i, result in enumerate(results):
transfer_dict, proj_timeseries_array, basin_occupancy_timeseries, worker_time = result
summed_transfer_dict.update(transfer_dict) # TODO check
summed_proj_timeseries_array += proj_timeseries_array
summed_basin_occupancy_timeseries += basin_occupancy_timeseries
if profile:
worker_times[i] = worker_time
#check2 = np.sum(summed_basin_occupancy_timeseries, axis=0)
# notmalize proj timeseries
summed_proj_timeseries_array = summed_proj_timeseries_array / ensemble # want ensemble average
return summed_transfer_dict, summed_proj_timeseries_array, summed_basin_occupancy_timeseries, worker_times
def ensemble_projection_timeseries(init_cond, ensemble, num_processes, simsetup=None, num_steps=100,
occ_threshold=OCC_THRESHOLD, anneal_protocol=ANNEAL_PROTOCOL,
field_protocol=FIELD_PROTOCOL, async_batch=ASYNC_BATCH, output=True, plot=True,
profile=False):
"""
Args:
- init_cond: np array of init state OR string memory label
- ensemble: ensemble of particles beginning at init_cond
- num_steps: how many steps to iterate (each step updates every spin once)
- occ_threshold: projection value cutoff to say state is in a basin (default: 0.7)
- anneal_protocol: define how temperature changes during simulation
What:
- Track a timeseries of: ensemble mean projection onto each memory
- Optionally plot
Eeturn:
- timeseries of projections onto store memories (dim p x T)
"""
# simsetup unpack
if simsetup is None:
simsetup = singlecell_simsetup()
# prep io
if output:
io_dict = run_subdir_setup(run_subfolder=ANALYSIS_SUBDIR)
else:
assert not plot
io_dict = None
# profiler setup
profile_path = RUNS_FOLDER + os.sep + PROFILE_PREFIX + "%dens_%dsteps.txt" % (ensemble, num_steps)
if profile:
time_start = time.time()
# generate initial state
init_state, init_id = get_init_info(init_cond, simsetup)
# simulate ensemble - pooled wrapper call
transfer_dict, proj_timeseries_array, basin_occupancy_timeseries, worker_times = \
fast_basin_stats(init_cond, init_state, init_id, ensemble, num_processes, simsetup=simsetup, num_steps=num_steps,
anneal_protocol=anneal_protocol, field_protocol=field_protocol, occ_threshold=occ_threshold,
async_batch=async_batch, verbose=False, profile=profile)
# save data and plot figures
if output:
save_and_plot_basinstats(io_dict, proj_timeseries_array, basin_occupancy_timeseries, num_steps, ensemble,
simsetup=simsetup, prefix=init_id, occ_threshold=occ_threshold, plot=plot)
"""
# print transfer dict
for idx in xrange(ensemble):
if idx in transfer_dict:
print idx, transfer_dict[idx], [simsetup['CELLTYPE_LABELS'][a] for a in transfer_dict[idx].keys()]
"""
if profile:
time_end = time.time()
time_total = time_end - time_start
with open(profile_path, 'a+') as f:
f.write('%d,%.2f,%.2f,%.2f\n' % (num_processes, time_total, min(worker_times), max(worker_times)))
#f.write(','.join(str(s) for s in [num_processes, time_total]) + '\n')
return proj_timeseries_array, basin_occupancy_timeseries, worker_times, io_dict
def basin_transitions(init_cond, ensemble, num_steps, beta, simsetup):
# TODO note analysis basin grid fulfills this functionality, not great spurious handling though
"""
Track jumps from basin 'i' to basin 'j' for all 'i'
Defaults:
- temperature: default is intermediate (1/BETA from singlecell_constants)
- ensemble: 10,000 cells start in basin 'i'
- time: fixed, 100 steps (option for unspecified; stop when ensemble dissipates)
Output:
- matrix of basin transition probabilities (i.e. discrete time markov chain)
Spurious basin notes:
- unclear how to identify spurious states dynamically
- suppose
- define new spurious state if, within some window of time T:
- (A) the state does not project on planned memories within some tolerance; and
- (B) the state has some self-similarity over time
- if a potential function is known (e.g. energy H(state)) then a spurious state
could be formally defined as a minimizer; however this may be numerically expensive to check
"""
"""
app_field = construct_app_field_from_genes(IPSC_CORE_GENES, num_steps)
proj_timeseries_array = np.zeros((num_steps, P))
"""
# add 1 as spurious sink dimension? this treats spurious as global sink state
basins_dim = len(simsetup['CELLTYPE_LABELS']) + 1
spurious_index = len(simsetup['CELLTYPE_LABELS'])
transition_data = np.zeros((basins_dim, basins_dim))
for idx, memory_label in enumerate(simsetup['CELLTYPE_LABELS']):
# TODO
print idx, memory_label
"""
cellstate_array, current_run_folder, data_folder, plot_lattice_folder, plot_data_folder = singlecell_sim(init_id=memory_label, iterations=num_steps, app_field=app_field, app_field_strength=10.0,
flag_burst_error=FLAG_BURST_ERRORS, flag_write=False, analysis_subdir=analysis_subdir,
plot_period=num_steps*2)
proj_timeseries_array[:, idx] = get_memory_proj_timeseries(cellstate_array, esc_idx)[:]
"""
# TODO: transiton_data_row = ...
transiton_data_row = 0
transition_data[idx, :] = transiton_data_row
# cleanup output folders from main()
# TODO...
# save transition array and run info to file
# TODO...
# plot output
# TODO...
return transition_data
if __name__ == '__main__':
gen_basin_data = False
plot_grouped_data = False
profile = False
plot_groups_of_transitions = True
# prep simulation globals
simsetup = singlecell_simsetup()
if gen_basin_data:
# common: 'HSC' / 'Common Lymphoid Progenitor (CLP)' / 'Common Myeloid Progenitor (CMP)' /
# 'Megakaryocyte-Erythroid Progenitor (MEP)' / 'Granulocyte-Monocyte Progenitor (GMP)' / 'thymocyte DN'
# 'thymocyte - DP' / 'neutrophils' / 'monocytes - classical'
init_cond = 'macrophage' # note HSC index is 6 in mehta mems
ensemble = 128
num_steps = 100
num_proc = cpu_count() / 2 # seems best to use only physical core count (1 core ~ 3x slower than 4)
anneal_protocol = "protocol_A"
field_protocol = "miR_21" # "yamanaka" or "miR_21" or None
async_batch = True
plot = True
parallel = True
# run and time basin ensemble sim
t0 = time.time()
if parallel:
proj_timeseries_array, basin_occupancy_timeseries, worker_times, io_dict = \
ensemble_projection_timeseries(init_cond, ensemble, num_proc, num_steps=num_steps, simsetup=simsetup,
occ_threshold=OCC_THRESHOLD, anneal_protocol=anneal_protocol,
field_protocol=field_protocol, async_batch=async_batch, plot=plot)
else:
# Unparallelized for testing/profiling:
init_state, init_id = get_init_info(init_cond, simsetup)
io_dict = run_subdir_setup(run_subfolder=ANALYSIS_SUBDIR)
transfer_dict, proj_timeseries_array, basin_occupancy_timeseries, worker_times = \
get_basin_stats(init_cond, init_state, init_id, ensemble, 0, simsetup, num_steps=num_steps,
anneal_protocol=anneal_protocol, field_protocol=field_protocol,
occ_threshold=OCC_THRESHOLD, async_batch=async_batch, verbose=False, profile=True)
proj_timeseries_array = proj_timeseries_array / ensemble # ensure normalized (get basin stats won't do this)
t1 = time.time() - t0
print "Runtime:", t1
# append info to run info file TODO maybe move this INTO the function?
info_list = [['fncall', 'ensemble_projection_timeseries()'], ['init_cond', init_cond], ['ensemble', ensemble],
['num_steps', num_steps], ['num_proc', num_proc], ['anneal_protocol', anneal_protocol],
['occ_threshold', OCC_THRESHOLD], ['field_protocol', field_protocol],
['async_batch', async_batch], ['time', t1], ['time_workers', worker_times]]
runinfo_append(io_dict, info_list, multi=True)
# direct data plotting
if plot_grouped_data:
group_dir = RUNS_FOLDER
bases = ["output_335260","output_335261","output_335262","output_335264","output_335265"]
types = ["HSC","HSC","mef","mef","mef"]
labels = ["yam_1e5", "yam_0", "None", "yam_idk", "yam_1e5"]
ensemble = 10000
for i in xrange(len(bases)):
celltypes = simsetup['CELLTYPE_LABELS']
outdir = group_dir + os.sep + bases[i]
outproj = outdir + os.sep + 'proj_timeseries_%s.png' % labels[i]
outocc = outdir + os.sep + 'occ_timeseries_%s.png' % labels[i]
outend = outdir + os.sep + 'occ_endpt_%s.png' % labels[i]
# load and parse
proj_data, occ_data = load_basinstats(outdir + os.sep + 'data', types[i])
ensemble, num_steps = fetch_from_run_info(outdir + os.sep + 'run_info.txt', ['ensemble', 'num_steps'])
assert num_steps == proj_data.shape[1]
# plot
plot_proj_timeseries(proj_data, num_steps, ensemble, celltypes, outproj, highlights=highlights_CLPside)
plot_basin_occupancy_timeseries(occ_data, num_steps, ensemble, celltypes, OCC_THRESHOLD, SPURIOUS_LIST,
outocc, highlights=highlights_CLPside)
plot_basin_step(occ_data[:, -1], num_steps, ensemble, celltypes, simsetup['CELLTYPE_ID'],
SPURIOUS_LIST, outend, highlights=highlights_CLPside)
if profile:
# common: 'HSC' / 'Common Lymphoid Progenitor (CLP)' / 'Common Myeloid Progenitor (CMP)' /
# 'Megakaryocyte-Erythroid Progenitor (MEP)' / 'Granulocyte-Monocyte Progenitor (GMP)' / 'thymocyte DN'
# 'thymocyte - DP' / 'neutrophils' / 'monocytes - classical'
init_cond = 'HSC' # note HSC index is 6 in mehta mems
num_steps = 100
anneal_protocol = "protocol_A"
field_protocol = None
async_batch = ASYNC_BATCH
plot = False
ens_scaled = False
if ens_scaled:
ens_base = 16 # NETWORK_METHOD: all workers will do this many traj
proc_lists = {p: range(1,p+1) for p in [4,8,80]}
else:
ens_base = 128 # NETWORK_METHOD: divide this number amongst all workers
proc_lists = {4: [1,2,3,4],
8: [1,2,4,8], #[1,2,3,4,5,6,8],
64: [1,2,4,8,16,32,64],
80: [1,2,3,4,5,6,8,10,12,15,16,20,24,30,40,48,60,80]}
# run and time basin ensemble sim
for num_proc in proc_lists[cpu_count()]:
if ens_scaled:
ensemble = ens_base * num_proc
else:
ensemble = ens_base
print "Start timer for num_proc %d (%d ens x %d steps)" % (num_proc, ensemble, num_steps)
t0 = time.time()
proj_timeseries_array, basin_occupancy_timeseries, worker_times, io_dict = \
ensemble_projection_timeseries(init_cond, ensemble, num_proc, num_steps=num_steps, simsetup=simsetup,
occ_threshold=OCC_THRESHOLD, anneal_protocol=anneal_protocol,
field_protocol=field_protocol, async_batch=async_batch, plot=plot,
output=True, profile=True)
t1 = time.time() - t0
print "Runtime:", t1
# append info to run info file TODO maybe move this INTO the function?
info_list = [['fncall', 'ensemble_projection_timeseries()'], ['init_cond', init_cond],
['ensemble', ensemble],
['num_steps', num_steps], ['num_proc', num_proc], ['anneal_protocol', anneal_protocol],
['occ_threshold', OCC_THRESHOLD], ['field_protocol', field_protocol],
['async_batch', async_batch], ['time', t1], ['time_workers', worker_times]]
runinfo_append(io_dict, info_list, multi=True)
# plot specific directory data from basin transitions run
if plot_groups_of_transitions:
groupdir = RUNS_FOLDER + os.sep + 'single_celltype_transitions'
basedirs = ['Macrophage (A)', 'Macrophage (B)', 'Macrophage (C)', 'Macrophage (D)'] # celltype labels
subdirs = ['noField', 'mir21_1', 'mir21_2', 'mir21_3'] # field labels
for basedir in basedirs:
for subdir in subdirs:
datadir = groupdir + os.sep + basedir + os.sep + subdir + os.sep + 'data'
print "working in", datadir
# load proj data and occ data
proj_data, occ_data = load_basinstats(datadir, basedir)
ens = float(np.sum(occ_data[:, 0]))
print proj_data.shape
# setup timepoints
total_steps = proj_data.shape[1]
num_timepoints = 0
timepoints = [a*int(total_steps/num_timepoints) for a in xrange(num_timepoints)]
timepoints.append(total_steps-1)
for step in timepoints:
# sort proj and occ data at each timepoint
projvec = proj_data[:, step]
absprojvec = np.abs(projvec)
occvec = occ_data[:, step] # TODO
# print some timestep proj ranking info
print "\nRanking transitions (by proj) from %s, %s at step %d" % (basedir, subdir, step)
sortedmems_smalltobig = np.argsort(absprojvec)
sortedmems_bigtosmall = sortedmems_smalltobig[::-1]
for rank in xrange(10):
ranked_mem_idx = sortedmems_bigtosmall[rank]
ranked_mem = simsetup['CELLTYPE_LABELS'][ranked_mem_idx]
print rank, ranked_mem_idx, ranked_mem, projvec[ranked_mem_idx], absprojvec[ranked_mem_idx]
# print some timestep occ ranking info
print "\nRanking transitions (by occ) from %s, %s at step %d" % (basedir, subdir, step)
occ_labels = simsetup['CELLTYPE_LABELS'] + SPURIOUS_LIST
sortedmems_smalltobig = np.argsort(occvec)
sortedmems_bigtosmall_occ = sortedmems_smalltobig[::-1]
for rank in xrange(10):
ranked_mem_idx = sortedmems_bigtosmall_occ[rank]
ranked_mem = occ_labels[ranked_mem_idx]
print rank, ranked_mem_idx, ranked_mem, occvec[ranked_mem_idx], occvec[ranked_mem_idx] / ens
# plot sorted data with labels
outpath = groupdir + os.sep + 'occ_%s_%s_step_%d.png' % (basedir, subdir, step)
sorted_occ = [occ_data[idx, step] for idx in sortedmems_bigtosmall_occ]
sorted_labels = [occ_labels[idx] for idx in sortedmems_bigtosmall_occ]
print '\n', len(sorted_occ)
plot_basin_step(sorted_occ, step, ens, sorted_labels, simsetup['CELLTYPE_ID'], [], outpath,
highlights=None, autoscale=True, inset=True, title_add='(%s)' % subdir, init_mem=basedir)
| {
"content_hash": "76e05383ff7bb4cdbc1c16ef40334bd0",
"timestamp": "",
"source": "github",
"line_count": 589,
"max_line_length": 202,
"avg_line_length": 49.65365025466893,
"alnum_prop": 0.6024413595021542,
"repo_name": "mattsmart/biomodels",
"id": "49d1268fd00b35e17a70e8af1e724d317676381b",
"size": "29246",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "celltypes/singlecell/analysis_basin_transitions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "30519"
},
{
"name": "MATLAB",
"bytes": "44848"
},
{
"name": "Mathematica",
"bytes": "238830"
},
{
"name": "Python",
"bytes": "644776"
},
{
"name": "Shell",
"bytes": "179"
}
],
"symlink_target": ""
} |
import cv2
import numpy as np
import os
import sys
import time
#from uploadToGDrive import main_upload
#import uploadToGDrive
def readimage(filename):
print filename
image = cv2.imread(filename, cv2.IMREAD_GRAYSCALE) # reading image
if image is None:
print 'Can not find the image!'
sys.stdin.read(1)
exit(-1)
return image
#-------------------------------------------------------------------------------------
def createFolder(directory):
directory = directory + time.strftime("-%Y-%m-%d-%H-%M-%S")
if not os.path.exists(directory):
os.makedirs(directory)
print 'OK! can create directory '+directory
else:
print 'Error cannot create directory '+directory
return directory
#------------------------------------------------------------------------------------
print "sys \n"
print "\n".join(sys.argv)
filename=sys.argv[1]
foldername=sys.argv[2]
#print 'foldername '+foldername
resultDirectory = createFolder(foldername)
print 'resultDirectory = '+resultDirectory
# remove parent directory name
folderNameLength = foldername.index('/')+1
print 'folderNameLength = '+str(folderNameLength)
onlyFolderName = resultDirectory[folderNameLength:]
print 'onlyFolderName = '+onlyFolderName
image=readimage (filename)
ret, thresh = cv2.threshold(image, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
N = 3
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (N, N))
thresh = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)
#----------------------------------------------------------------------------------
#cv2.imshow('thresh', thresh)
cv2.imwrite('thresh.jpg',thresh)
im = cv2.imread('thresh.jpg')
imgray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
ret,thresh = cv2.threshold(imgray,100,255,0)
imageview, contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
#-------------------------------5--------------------------------------------------
cnt = contours[0]
area = cv2.contourArea(cnt)
M = len(contours)
#print M
areas = [cv2.contourArea(c) for c in contours]
max_index = np.argmax(areas)
cnt=contours[max_index]
max_index=np.zeros(M)
for i in range (M):
areas = [cv2.contourArea(c) for c in contours]
if i == 0:
max_index[i] = np.argmax(areas)
else :
if max_index[i-1] == max_index[i]:
max_index[i] = np.argmax(areas)
#print max_index
#------------------------------------------------------------------------------------
height,width,channels = im.shape
print height,width
#-------------------------------------------------------------------------------------
#number=0
crop=0
pic=np.zeros(5,int)
for i in range(M):
x,y,w,h = cv2.boundingRect(contours[i])
if h >= 100 :
if w >= (w*0.3):
#print x,y
piccopy=np.zeros((w,h),int)
#cv2.rectangle(im,(x,y),(x+w,y+h),(0,255,0),1)
#cv2.imshow("Show",im)
crop_img = im[y-15:y+h+15, x-15:x+w+15]
if x>=0 and x<= (width*0.2):
crop=1
crop_img=np.invert(crop_img)
cv2.imwrite(resultDirectory+'/'+'crop'+str(crop)+'.tif',crop_img)
tCommand = 'tesseract '+resultDirectory+'/crop1.tif' + ' '+resultDirectory+'/crop1 -psm 10 digits'
print 'tCommand = '+tCommand
os.system(tCommand)
# cat crop1.txt to numberVariable
with open(resultDirectory+'/'+'crop'+str(crop)+'.txt', 'r') as myfile:
data=myfile.read().replace('\n', '')
print 'data crop'+str(crop)+'.txt ='+data
# insert resultDirectory crop1 to google spreadsheet
elif x>=(width*0.2)+1 and x<= (width*0.4):
crop=2
crop_img=np.invert(crop_img)
cv2.imwrite(resultDirectory+'/'+'crop'+str(crop)+'.tif',crop_img)
tCommand = 'tesseract '+resultDirectory+'/crop2.tif' + ' '+resultDirectory+'/crop2 -psm 10 digits'
print 'tCommand = '+tCommand
os.system(tCommand)
# cat crop2.txt to numberVariable
with open(resultDirectory+'/'+'crop'+str(crop)+'.txt', 'r') as myfile:
data=myfile.read().replace('\n', '')
print 'data crop'+str(crop)+'.txt ='+data
elif x>=(width*0.4)+1 and x<= (width*0.6):
crop=3
crop_img=np.invert(crop_img)
cv2.imwrite(resultDirectory+'/'+'crop'+str(crop)+'.tif',crop_img)
tCommand = 'tesseract '+resultDirectory+'/crop3.tif' + ' '+resultDirectory+'/crop3 -psm 10 digits'
print 'tCommand = '+tCommand
os.system(tCommand)
elif x>=(width*0.6)+1 and x<= (width*0.8):
crop=4
crop_img=np.invert(crop_img)
cv2.imwrite(resultDirectory+'/'+'crop'+str(crop)+'.tif',crop_img)
tCommand = 'tesseract '+resultDirectory+'/crop4.tif' + ' '+resultDirectory+'/crop4 -psm 10 digits'
print 'tCommand = '+tCommand
os.system(tCommand)
elif x>=(width*0.8)+1 and x<= width:
crop=5
cv2.imwrite(resultDirectory+'/'+'crop'+str(crop)+'.tif',crop_img)
tCommand = 'tesseract '+resultDirectory+'/crop5.tif' + ' '+resultDirectory+'/crop5 -psm 10 digits'
print 'tCommand = '+tCommand
os.system(tCommand)
#crop_img=np.invert(crop_img)
#cv2.imshow('crop '+str(crop),crop_img)
#cv2.imwrite('crop'+str(crop)+'.tif',crop_img)
#number=number+1
#--------------------------------------------------------------------------------------
exit('ohoh')
k = cv2.waitKey(0)
if k==27:
cv2.destroyAllWindows()
| {
"content_hash": "7fd8c6f463081ce0ed5db9b6a7afe5ff",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 100,
"avg_line_length": 40.2,
"alnum_prop": 0.527363184079602,
"repo_name": "DBMSRmutl/meterOCR",
"id": "272d369341e8c6f9a6b7394ae88dfc48b3f95629",
"size": "5829",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "v1-3-arg.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "21383"
},
{
"name": "Roff",
"bytes": "1698"
}
],
"symlink_target": ""
} |
import os
import numpy as np
import pytest
from pymt.printers.nc.ugrid import (
NetcdfRectilinearField,
NetcdfStructuredField,
NetcdfUnstructuredField,
)
def new_rectilinear(**kwds):
from pymt.grids import RectilinearField
ndims = kwds.pop("ndims", 1)
shape = np.random.randint(3, 101 + 1, ndims)
args = []
for size in shape:
args.append(np.cumsum((1.0 - np.random.random(size))))
return RectilinearField(*args, **kwds)
_GRID_TYPE = {
"rectilinear": NetcdfRectilinearField,
"structured": NetcdfStructuredField,
"unstructured": NetcdfUnstructuredField,
}
@pytest.mark.parametrize("ndims", (1, 2, 3))
@pytest.mark.parametrize("grid", ("rectilinear", "structured", "unstructured"))
def test_rectilinear_points(tmpdir, grid, ndims):
field = new_rectilinear(
ndims=ndims,
coordinate_names=("elevation", "latitude", "longitude"),
units=("m", "degrees_north", "degrees_east"),
)
data = np.arange(field.get_point_count())
field.add_field("air_temperature", data, centering="point", units="F")
with tmpdir.as_cwd():
_GRID_TYPE[grid]("rectilinear.nc", field)
assert os.path.isfile("rectilinear.nc")
| {
"content_hash": "84fca234f808360e29275944d3f06273",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 79,
"avg_line_length": 27.022222222222222,
"alnum_prop": 0.665296052631579,
"repo_name": "csdms/coupling",
"id": "de2764de5c46867ee1ee2afc76a84b5a21e6ae2f",
"size": "1216",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/printers/nc/test_ugrid.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "469855"
},
{
"name": "Shell",
"bytes": "1528"
}
],
"symlink_target": ""
} |
"""
Contains player and NPC-classes.
"""
import os
import numpy as np
import pygame
import logging
from Placeable import Placeable
from Item import Item
from Text import TextDialog
from FindPath import Maze
DEFAULT_HEALTH = 100
DEFAULT_FEAR = 75
DEFAULT_HATE = 25
class Person(Placeable):
"""
Base class for all characters in game.
"""
persons = []
players = []
npcs = []
def __init__(self, position, game, world, sprite, health=DEFAULT_HEALTH):
"""
@param health The health that is given at init.
@param position [x, y] the position at init.
"""
super(Person, self).__init__(position, sprite)
self.health = health
self.game = game
self.world = world
self.inventory = []
self.move_cool = 0.10 # seconds
self.move_time = np.inf
self.velocity = np.array([0,0])
self._add(self)
@classmethod
def _add(self, p):
self.persons.append(p)
def update(self):
if len(self.game.text_dialog_queue) > 0:
return
if (self.velocity != 0).any():
newpos = self.position + self.velocity
self.move(newpos)
self.move_time += self.game.dt
def move(self, newpos):
# Change facing direction
change = newpos - self.position
self.facing = int(round(np.arctan2(change[0], change[1]) / np.pi * 2))
# Check if outside bounds of map
inside_x = 0 <= newpos[0] < self.world.shape[0]
inside_y = 0 <= newpos[1] < self.world.shape[1]
# Check if new position is on walkable place
on_walkable = self.world.board[tuple(newpos)] in ('g')
if on_walkable:
for person in Person.persons + [self.game.player]:
if person == self:
continue # Cannot collide with self.
if (newpos == person.position).all():
on_walkable = False
# If new position is on water, must have boat
if self.world.board[tuple(newpos)] == 'w':
names = [item.name for item in self.inventory]
has_boat = 'Boat' in names
on_walkable = has_boat
# Only walk after certain cooldown
cooldown_passed = self.move_time > self.move_cool
# Check if step is valid, and if it is, move
if (inside_x and inside_y and on_walkable and cooldown_passed):
self.position = newpos
self.move_time = 0
return True
else:
return False
def speed_up(self, speed_vector):
"""
Changes the velocity of the player.
"""
self.velocity += speed_vector
def hurt(self, dmg):
self.health -= dmg
if self.health <= 0:
self.health = 0
self._die()
def _die(self):
self.dialog = "I died."
self.update = self._update_dead
def _update_dead(self):
# Cannot do anything when dead.
pass
def give_item(self, item):
if not isinstance(item, Item):
logging.error(
"Item given to player is not item instance."
)
return
self.inventory.append(item)
class Player(Person):
"""
Contains the player-controlled character.
"""
def __init__(self, position, game, world, health=DEFAULT_HEALTH):
super(Player, self).__init__(position, game, world, "player", health)
self.interacting_with = None
@classmethod
def _add(self, p):
self.players.append(p)
self.persons.append(p)
def give_item(self, item):
"""
Player reads pages if they are picked up.
"""
super(Player, self).give_item(item)
TextDialog("You got %s!" % item.name.lower(), self.game)
if item.name == "Page":
TextDialog(item.text, self.game)
class NPC(Person):
"""
Contains a character controlled by the game.
"""
def __init__(self,
position, game, world, health=DEFAULT_HEALTH,
dialog=None, items=[],
fear=DEFAULT_FEAR, hate=DEFAULT_HATE,
):
super(NPC, self).__init__(position, game, world, "npc", health)
self.dialog = dialog
self.fear = fear
self.hate = hate
for item in items:
self.give_item(item)
self.set_target()
self.set_path()
@classmethod
def _add(self, p):
self.persons.append(p)
self.npcs.append(p)
def next_step(self):
"""
Since the game controls this character, some algorithm should say where
it moves.
TODO
"""
water_to_left = 0
water_to_right = 0
water_up = 0
water_down = 0
for i in range(self.position[0], self.world.shape[0]):
water_to_right += 1
if self.world.board[i, self.position[1]] == 'w':
break
for i in list(reversed(range(0, self.position[0]))):
water_to_left += 1
if self.world.board[i, self.position[1]] == 'w':
break
for i in range(self.position[1], self.world.shape[1]):
water_up += 1
if self.world.board[self.position[0], i] == 'w':
break
for i in list(reversed(range(0, self.position[1]))):
water_down += 1
if self.world.board[self.position[0], i] == 'w':
break
if np.random.random() > 0.8:
right_direction = max([water_down, water_up, water_to_left,
water_to_right])
if right_direction == water_down:
return np.asarray([0, -1])
elif right_direction == water_up:
return np.asarray([0, 1])
elif right_direction == water_to_right:
return np.asarray([1, 0])
elif right_direction == water_to_left:
return np.asarray([-1, 0])
return np.asarray([0, 0])
def set_target(self):
change = (self.position - self.game.player.position)
if change.sum() <= 10 and max(self.fear, self.hate) > 50:
if self.fear > self.hate:
idealtarget = self.position + change
else:
idealtarget = self.position - change
else:
idealtarget = self.position + np.random.randint(-3, 3+1, size=2)
for r in xrange(10):
possibilities = []
for x in xrange(-r, r+1):
for y in xrange(-r, r+1):
target = idealtarget + [x, y]
inside_x = 0 <= target[0] < self.world.shape[0]
inside_y = 0 <= target[1] < self.world.shape[1]
if (inside_x and inside_y):
on_walkable = self.world.board[tuple(target)] in ('g')
if on_walkable:
possibilities.append(target)
if not possibilities:
continue
else:
self.target = possibilities[np.random.randint(0, len(possibilities))]
break
def set_path(self):
maze = Maze(
self.position,
self.target,
self.world.board == 'g',
)
self.path = maze.solve(10)
def update(self, depth=0):
"""
"""
if len(self.game.text_dialog_queue) > 0:
return False
# Only walk after certain cooldown
cooldown_passed = self.move_time > self.move_cool
if cooldown_passed:
if not self.path: # if empty or None
self.set_target()
self.set_path()
if self.path:
newpos = self.path[0]
if self.move(newpos): # Successfull move.
del self.path[0]
elif depth >= 10:
# Move was blocked by some entity.
# Clear current path and try again.
# Maxium depth to avoid potential infinite recursive loop.
self.path = []
self.update(depth+1)
return
else:
self.path = []
self.move_time = 0
else: # Else backup solution.
goal = self.next_step()
newpos = self.position + (self.velocity + goal)
# If next is water, try turn
is_water = self.world.board[tuple(newpos)] == 'w'
if is_water:
self.velocity = np.asarray([0, 0])
# If at end of world, move
at_yend = self.position[0] == (self.world.shape[0] - 1)
at_xend = self.position[1] == (self.world.shape[1] - 1)
if at_yend or at_xend:
self.velocity = np.asarray([0, 0])
if (self.velocity == [0, 0]).all():
self.speed_up(goal)
# Do the actual moving
newpos = self.position + self.velocity
super(NPC, self).move(newpos)
self.move_time += self.game.dt
def interact(self):
"""
Called when player interacts with this NPC.
"""
self.fear -= 50
if not self.dialog:
return
TextDialog(self.dialog, self.game)
self.dialog = "I have nothing more to tell you."
for i in range(len(self.inventory)):
self.game.player.give_item(self.inventory.pop(i))
| {
"content_hash": "614eb33031c1bb8fcf81e892e272b098",
"timestamp": "",
"source": "github",
"line_count": 318,
"max_line_length": 85,
"avg_line_length": 30.19496855345912,
"alnum_prop": 0.5135388460737347,
"repo_name": "benedicteb/outcast",
"id": "124f442b2c1ca5cb67b3e78f9d001a40f21772aa",
"size": "9624",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/Person.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "29"
},
{
"name": "Python",
"bytes": "29627"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from django.conf import settings
import logging
def _module_logger_helper(glob):
logger_path = '.'.join([settings.SURROUND_ROOT_LOGGER_NAME, glob['__name__']])
logger = logging.getLogger(logger_path)
entities = {}
entities['logger'] = logger
for level in ['debug', 'info', 'warning', 'error', 'critical']:
entities[level] = getattr(logger, level)
return entities
def prepare_module_logger(glob):
return type('logging', (object,), _module_logger_helper(glob))
def setupModuleLogger(glob):
glob.update(_module_logger_helper(glob))
| {
"content_hash": "f2421b8686b817a08a98ffef9f0ea1c1",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 82,
"avg_line_length": 26.782608695652176,
"alnum_prop": 0.6801948051948052,
"repo_name": "sniegu/django-surround",
"id": "6f2ad5a5ba2252cfb36a2a69ed5ed0007694d99a",
"size": "616",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "surround/django/logging.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "66947"
}
],
"symlink_target": ""
} |
from ansible.callbacks import AggregateStats
class CustomAggregateStats(AggregateStats):
"""
Holds stats about per-host activity during playbook runs.
"""
def __init__(self):
super(CustomAggregateStats, self).__init__()
self.results = {}
def compute(self, runner_results, setup=False, poll=False,
ignore_errors=False):
"""
Walk through all results and increment stats.
"""
super(CustomAggregateStats, self).compute(runner_results, setup, poll,
ignore_errors)
for (host, value) in runner_results.get('contacted', {}).iteritems():
if not host in self.results:
self.results[host] = []
if 'invocation' in value and value['invocation']['module_name'] == 'debug' and 'var' in value:
self.results[host].append(value['var'])
def summarize(self, host):
"""
Return information about a particular host
"""
summarized_info = super(CustomAggregateStats, self).summarize(host)
# Adding the info I need
if host in self.results:
summarized_info['result'] = self.results[host]
return summarized_info
| {
"content_hash": "ca23d807eff85643a6fd0b6547f989e8",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 106,
"avg_line_length": 36.08571428571429,
"alnum_prop": 0.5835312747426762,
"repo_name": "Tech-Twnel/python-webhooks",
"id": "e781d5092159df93dc0684c2a11fa88587b4310b",
"size": "1263",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "helpers/callbacks.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "7088"
},
{
"name": "Shell",
"bytes": "319"
}
],
"symlink_target": ""
} |
"""Helpers for overriding numpy functions.
We override numpy functions in `~astropy.units.Quantity.__array_function__`.
In this module, the numpy functions are split in four groups, each of
which has an associated `set` or `dict`:
1. SUBCLASS_SAFE_FUNCTIONS (set), if the numpy implementation
supports Quantity; we pass on to ndarray.__array_function__.
2. FUNCTION_HELPERS (dict), if the numpy implementation is usable
after converting quantities to arrays with suitable units,
and possibly setting units on the result.
3. DISPATCHED_FUNCTIONS (dict), if the function makes sense but
requires a Quantity-specific implementation
4. UNSUPPORTED_FUNCTIONS (set), if the function does not make sense.
For the FUNCTION_HELPERS `dict`, the value is a function that does the
unit conversion. It should take the same arguments as the numpy
function would (though one can use ``*args`` and ``**kwargs``) and
return a tuple of ``args, kwargs, unit, out``, where ``args`` and
``kwargs`` will be will be passed on to the numpy implementation,
``unit`` is a possible unit of the result (`None` if it should not be
converted to Quantity), and ``out`` is a possible output Quantity passed
in, which will be filled in-place.
For the DISPATCHED_FUNCTIONS `dict`, the value is a function that
implements the numpy functionality for Quantity input. It should
return a tuple of ``result, unit, out``, where ``result`` is generally
a plain array with the result, and ``unit`` and ``out`` are as above.
If unit is `None`, result gets returned directly, so one can also
return a Quantity directly using ``quantity_result, None, None``.
"""
import functools
import operator
import numpy as np
from astropy.units.core import (
UnitsError, UnitTypeError, dimensionless_unscaled)
from astropy.utils.compat import NUMPY_LT_1_18, NUMPY_LT_1_20
from astropy.utils import isiterable
# In 1.17, overrides are enabled by default, but it is still possible to
# turn them off using an environment variable. We use getattr since it
# is planned to remove that possibility in later numpy versions.
ARRAY_FUNCTION_ENABLED = getattr(np.core.overrides,
'ENABLE_ARRAY_FUNCTION', True)
SUBCLASS_SAFE_FUNCTIONS = set()
"""Functions with implementations supporting subclasses like Quantity."""
FUNCTION_HELPERS = {}
"""Functions with implementations usable with proper unit conversion."""
DISPATCHED_FUNCTIONS = {}
"""Functions for which we provide our own implementation."""
UNSUPPORTED_FUNCTIONS = set()
"""Functions that cannot sensibly be used with quantities."""
SUBCLASS_SAFE_FUNCTIONS |= {
np.shape, np.size, np.ndim,
np.reshape, np.ravel, np.moveaxis, np.rollaxis, np.swapaxes,
np.transpose, np.atleast_1d, np.atleast_2d, np.atleast_3d,
np.expand_dims, np.squeeze, np.broadcast_to, np.broadcast_arrays,
np.flip, np.fliplr, np.flipud, np.rot90,
np.argmin, np.argmax, np.argsort, np.lexsort, np.searchsorted,
np.nonzero, np.argwhere, np.flatnonzero,
np.diag_indices_from, np.triu_indices_from, np.tril_indices_from,
np.real, np.imag, np.diagonal, np.diagflat,
np.empty_like, np.zeros_like,
np.compress, np.extract, np.delete, np.trim_zeros, np.roll, np.take,
np.put, np.fill_diagonal, np.tile, np.repeat,
np.split, np.array_split, np.hsplit, np.vsplit, np.dsplit,
np.stack, np.column_stack, np.hstack, np.vstack, np.dstack,
np.amax, np.amin, np.ptp, np.sum, np.cumsum,
np.prod, np.product, np.cumprod, np.cumproduct,
np.round, np.around,
np.fix, np.angle, np.i0, np.clip,
np.isposinf, np.isneginf, np.isreal, np.iscomplex,
np.average, np.mean, np.std, np.var, np.median, np.trace,
np.nanmax, np.nanmin, np.nanargmin, np.nanargmax, np.nanmean,
np.nanmedian, np.nansum, np.nancumsum, np.nanstd, np.nanvar,
np.nanprod, np.nancumprod,
np.einsum_path, np.trapz, np.linspace,
np.sort, np.msort, np.partition, np.meshgrid,
np.common_type, np.result_type, np.can_cast, np.min_scalar_type,
np.iscomplexobj, np.isrealobj,
np.shares_memory, np.may_share_memory,
np.apply_along_axis, np.take_along_axis, np.put_along_axis,
np.linalg.cond, np.linalg.multi_dot}
if NUMPY_LT_1_18:
SUBCLASS_SAFE_FUNCTIONS |= {np.alen}
# Implemented as methods on Quantity:
# np.ediff1d is from setops, but we support it anyway; the others
# currently return NotImplementedError.
# TODO: move latter to UNSUPPORTED? Would raise TypeError instead.
SUBCLASS_SAFE_FUNCTIONS |= {np.ediff1d}
# Nonsensical for quantities.
UNSUPPORTED_FUNCTIONS |= {
np.packbits, np.unpackbits, np.unravel_index,
np.ravel_multi_index, np.ix_, np.cov, np.corrcoef,
np.busday_count, np.busday_offset, np.datetime_as_string,
np.is_busday, np.all, np.any, np.sometrue, np.alltrue}
# Could be supported if we had a natural logarithm unit.
UNSUPPORTED_FUNCTIONS |= {np.linalg.slogdet}
# The following are not just unsupported, but so unlikely to be thought
# to be supported that we ignore them in testing. (Kept in a separate
# variable so that we can check consistency in the test routine -
# test_quantity_non_ufuncs.py)
IGNORED_FUNCTIONS = {
# Deprecated
np.asscalar,
# I/O - useless for Quantity, since no way to store the unit.
np.save, np.savez, np.savetxt, np.savez_compressed,
# Polynomials
np.poly, np.polyadd, np.polyder, np.polydiv, np.polyfit, np.polyint,
np.polymul, np.polysub, np.polyval, np.roots, np.vander}
if NUMPY_LT_1_20:
# financial
IGNORED_FUNCTIONS |= {np.fv, np.ipmt, np.irr, np.mirr, np.nper,
np.npv, np.pmt, np.ppmt, np.pv, np.rate}
if NUMPY_LT_1_18:
IGNORED_FUNCTIONS |= {np.rank}
else:
IGNORED_FUNCTIONS |= {np.alen}
UNSUPPORTED_FUNCTIONS |= IGNORED_FUNCTIONS
class FunctionAssigner:
def __init__(self, assignments):
self.assignments = assignments
def __call__(self, f=None, helps=None, module=np):
"""Add a helper to a numpy function.
Normally used as a decorator.
If ``helps`` is given, it should be the numpy function helped (or an
iterable of numpy functions helped).
If ``helps`` is not given, it is assumed the function helped is the
numpy function with the same name as the decorated function.
"""
if f is not None:
if helps is None:
helps = getattr(module, f.__name__)
if not isiterable(helps):
helps = (helps,)
for h in helps:
self.assignments[h] = f
return f
elif helps is not None or module is not np:
return functools.partial(self.__call__, helps=helps, module=module)
else: # pragma: no cover
raise ValueError("function_helper requires at least one argument.")
function_helper = FunctionAssigner(FUNCTION_HELPERS)
dispatched_function = FunctionAssigner(DISPATCHED_FUNCTIONS)
@function_helper(helps={
np.copy, np.asfarray, np.real_if_close, np.sort_complex, np.resize,
np.fft.fft, np.fft.ifft, np.fft.rfft, np.fft.irfft,
np.fft.fft2, np.fft.ifft2, np.fft.rfft2, np.fft.irfft2,
np.fft.fftn, np.fft.ifftn, np.fft.rfftn, np.fft.irfftn,
np.fft.hfft, np.fft.ihfft,
np.linalg.eigvals, np.linalg.eigvalsh})
def invariant_a_helper(a, *args, **kwargs):
return (a.view(np.ndarray),) + args, kwargs, a.unit, None
@function_helper(helps={np.tril, np.triu})
def invariant_m_helper(m, *args, **kwargs):
return (m.view(np.ndarray),) + args, kwargs, m.unit, None
@function_helper(helps={np.fft.fftshift, np.fft.ifftshift})
def invariant_x_helper(x, *args, **kwargs):
return (x.view(np.ndarray),) + args, kwargs, x.unit, None
# Note that ones_like does *not* work by default (unlike zeros_like) since if
# one creates an empty array with a unit, one cannot just fill it with unity.
# Indeed, in this respect, it is a bit of an odd function for Quantity. On the
# other hand, it matches the idea that a unit is the same as the quantity with
# that unit and value of 1. Also, it used to work without __array_function__.
@function_helper
def ones_like(a, *args, **kwargs):
subok = args[2] if len(args) > 2 else kwargs.pop('subok', True)
unit = a.unit if subok else None
return (a.view(np.ndarray),) + args, kwargs, unit, None
@function_helper
def sinc(x):
from astropy.units.si import radian
try:
x = x.to_value(radian)
except UnitsError:
raise UnitTypeError("Can only apply 'sinc' function to "
"quantities with angle units")
return (x,), {}, dimensionless_unscaled, None
@dispatched_function
def unwrap(p, discont=None, axis=-1):
from astropy.units.si import radian
if discont is None:
discont = np.pi << radian
p, discont = _as_quantities(p, discont)
result = np.unwrap.__wrapped__(p.to_value(radian),
discont.to_value(radian), axis=axis)
result = radian.to(p.unit, result)
return result, p.unit, None
@function_helper
def argpartition(a, *args, **kwargs):
return (a.view(np.ndarray),) + args, kwargs, None, None
@function_helper
def full_like(a, fill_value, *args, **kwargs):
unit = a.unit if kwargs.get('subok', True) else None
return (a.view(np.ndarray),
a._to_own_unit(fill_value)) + args, kwargs, unit, None
@function_helper
def putmask(a, mask, values):
from astropy.units import Quantity
if isinstance(a, Quantity):
return (a.view(np.ndarray), mask,
a._to_own_unit(values)), {}, a.unit, None
elif isinstance(values, Quantity):
return (a, mask,
values.to_value(dimensionless_unscaled)), {}, None, None
else:
raise NotImplementedError
@function_helper
def place(arr, mask, vals):
from astropy.units import Quantity
if isinstance(arr, Quantity):
return (arr.view(np.ndarray), mask,
arr._to_own_unit(vals)), {}, arr.unit, None
elif isinstance(vals, Quantity):
return (arr, mask,
vals.to_value(dimensionless_unscaled)), {}, None, None
else:
raise NotImplementedError
@function_helper
def copyto(dst, src, *args, **kwargs):
from astropy.units import Quantity
if isinstance(dst, Quantity):
return ((dst.view(np.ndarray), dst._to_own_unit(src)) + args,
kwargs, None, None)
elif isinstance(src, Quantity):
return ((dst, src.to_value(dimensionless_unscaled)) + args,
kwargs, None, None)
else:
raise NotImplementedError
@function_helper
def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None):
nan = x._to_own_unit(nan)
if posinf is not None:
posinf = x._to_own_unit(posinf)
if neginf is not None:
neginf = x._to_own_unit(neginf)
return ((x.view(np.ndarray),),
dict(copy=True, nan=nan, posinf=posinf, neginf=neginf),
x.unit, None)
def _as_quantity(a):
"""Convert argument to a Quantity (or raise NotImplementedError)."""
from astropy.units import Quantity
try:
return Quantity(a, copy=False, subok=True)
except Exception:
# If we cannot convert to Quantity, we should just bail.
raise NotImplementedError
def _as_quantities(*args):
"""Convert arguments to Quantity (or raise NotImplentedError)."""
from astropy.units import Quantity
try:
return tuple(Quantity(a, copy=False, subok=True)
for a in args)
except Exception:
# If we cannot convert to Quantity, we should just bail.
raise NotImplementedError
def _quantities2arrays(*args, unit_from_first=False):
"""Convert to arrays in units of the first argument that has a unit.
If unit_from_first, take the unit of the first argument regardless
whether it actually defined a unit (e.g., dimensionless for arrays).
"""
# Turn first argument into a quantity.
q = _as_quantity(args[0])
if len(args) == 1:
return (q.value,), q.unit
# If we care about the unit being explicit, then check whether this
# argument actually had a unit, or was likely inferred.
if not unit_from_first and (q.unit is q._default_unit
and not hasattr(args[0], 'unit')):
# Here, the argument could still be things like [10*u.one, 11.*u.one]),
# i.e., properly dimensionless. So, we only override with anything
# that has a unit not equivalent to dimensionless (fine to ignore other
# dimensionless units pass, even if explicitly given).
for arg in args[1:]:
trial = _as_quantity(arg)
if not trial.unit.is_equivalent(q.unit):
# Use any explicit unit not equivalent to dimensionless.
q = trial
break
# We use the private _to_own_unit method here instead of just
# converting everything to quantity and then do .to_value(qs0.unit)
# as we want to allow arbitrary unit for 0, inf, and nan.
try:
arrays = tuple((q._to_own_unit(arg)) for arg in args)
except TypeError:
raise NotImplementedError
return arrays, q.unit
def _iterable_helper(*args, out=None, **kwargs):
"""Convert arguments to Quantity, and treat possible 'out'."""
from astropy.units import Quantity
if out is not None:
if isinstance(out, Quantity):
kwargs['out'] = out.view(np.ndarray)
else:
# TODO: for an ndarray output, we could in principle
# try converting all Quantity to dimensionless.
raise NotImplementedError
arrays, unit = _quantities2arrays(*args)
return arrays, kwargs, unit, out
@function_helper
def concatenate(arrays, axis=0, out=None):
# TODO: make this smarter by creating an appropriately shaped
# empty output array and just filling it.
arrays, kwargs, unit, out = _iterable_helper(*arrays, out=out, axis=axis)
return (arrays,), kwargs, unit, out
@dispatched_function
def block(arrays):
# We need to override block since the numpy implementation can take two
# different paths, one for concatenation, one for creating a large empty
# result array in which parts are set. Each assumes array input and
# cannot be used directly. Since it would be very costly to inspect all
# arrays and then turn them back into a nested list, we just copy here the
# second implementation, np.core.shape_base._block_slicing, since it is
# shortest and easiest.
(arrays, list_ndim, result_ndim,
final_size) = np.core.shape_base._block_setup(arrays)
shape, slices, arrays = np.core.shape_base._block_info_recursion(
arrays, list_ndim, result_ndim)
# Here, one line of difference!
arrays, unit = _quantities2arrays(*arrays)
# Back to _block_slicing
dtype = np.result_type(*[arr.dtype for arr in arrays])
F_order = all(arr.flags['F_CONTIGUOUS'] for arr in arrays)
C_order = all(arr.flags['C_CONTIGUOUS'] for arr in arrays)
order = 'F' if F_order and not C_order else 'C'
result = np.empty(shape=shape, dtype=dtype, order=order)
for the_slice, arr in zip(slices, arrays):
result[(Ellipsis,) + the_slice] = arr
return result, unit, None
@function_helper
def choose(a, choices, out=None, **kwargs):
choices, kwargs, unit, out = _iterable_helper(*choices, out=out, **kwargs)
return (a, choices,), kwargs, unit, out
@function_helper
def select(condlist, choicelist, default=0):
choicelist, kwargs, unit, out = _iterable_helper(*choicelist)
if default != 0:
default = (1 * unit)._to_own_unit(default)
return (condlist, choicelist, default), kwargs, unit, out
@dispatched_function
def piecewise(x, condlist, funclist, *args, **kw):
from astropy.units import Quantity
# Copied implementation from numpy.lib.function_base.piecewise,
# taking care of units of function outputs.
n2 = len(funclist)
# undocumented: single condition is promoted to a list of one condition
if np.isscalar(condlist) or (
not isinstance(condlist[0], (list, np.ndarray)) and x.ndim != 0):
condlist = [condlist]
if any(isinstance(c, Quantity) for c in condlist):
raise NotImplementedError
condlist = np.array(condlist, dtype=bool)
n = len(condlist)
if n == n2 - 1: # compute the "otherwise" condition.
condelse = ~np.any(condlist, axis=0, keepdims=True)
condlist = np.concatenate([condlist, condelse], axis=0)
n += 1
elif n != n2:
raise ValueError(
f"with {n} condition(s), either {n} or {n + 1} functions are expected"
)
y = np.zeros(x.shape, x.dtype)
where = []
what = []
for k in range(n):
item = funclist[k]
if not callable(item):
where.append(condlist[k])
what.append(item)
else:
vals = x[condlist[k]]
if vals.size > 0:
where.append(condlist[k])
what.append(item(vals, *args, **kw))
what, unit = _quantities2arrays(*what)
for item, value in zip(where, what):
y[item] = value
return y, unit, None
@function_helper
def append(arr, values, *args, **kwargs):
arrays, unit = _quantities2arrays(arr, values, unit_from_first=True)
return arrays + args, kwargs, unit, None
@function_helper
def insert(arr, obj, values, *args, **kwargs):
from astropy.units import Quantity
if isinstance(obj, Quantity):
raise NotImplementedError
(arr, values), unit = _quantities2arrays(arr, values,
unit_from_first=True)
return (arr, obj, values) + args, kwargs, unit, None
@function_helper
def pad(array, pad_width, mode='constant', **kwargs):
# pad dispatches only on array, so that must be a Quantity.
for key in 'constant_values', 'end_values':
value = kwargs.pop(key, None)
if value is None:
continue
if not isinstance(value, tuple):
value = (value,)
new_value = []
for v in value:
new_value.append(
tuple(array._to_own_unit(_v) for _v in v)
if isinstance(v, tuple) else array._to_own_unit(v))
kwargs[key] = new_value
return (array.view(np.ndarray), pad_width, mode), kwargs, array.unit, None
@function_helper
def where(condition, *args):
from astropy.units import Quantity
if isinstance(condition, Quantity) or len(args) != 2:
raise NotImplementedError
args, unit = _quantities2arrays(*args)
return (condition,) + args, {}, unit, None
@function_helper(helps=({np.quantile, np.nanquantile}))
def quantile(a, q, *args, _q_unit=dimensionless_unscaled, **kwargs):
if len(args) >= 2:
out = args[1]
args = args[:1] + args[2:]
else:
out = kwargs.pop('out', None)
from astropy.units import Quantity
if isinstance(q, Quantity):
q = q.to_value(_q_unit)
(a,), kwargs, unit, out = _iterable_helper(a, out=out, **kwargs)
return (a, q) + args, kwargs, unit, out
@function_helper(helps={np.percentile, np.nanpercentile})
def percentile(a, q, *args, **kwargs):
from astropy.units import percent
return quantile(a, q, *args, _q_unit=percent, **kwargs)
@function_helper
def count_nonzero(a, *args, **kwargs):
return (a.value,) + args, kwargs, None, None
@function_helper(helps={np.isclose, np.allclose})
def close(a, b, rtol=1e-05, atol=1e-08, *args, **kwargs):
from astropy.units import Quantity
(a, b), unit = _quantities2arrays(a, b, unit_from_first=True)
# Allow number without a unit as having the unit.
atol = Quantity(atol, unit).value
return (a, b, rtol, atol) + args, kwargs, None, None
@function_helper
def array_equal(a1, a2):
args, unit = _quantities2arrays(a1, a2)
return args, {}, None, None
@function_helper
def array_equiv(a1, a2):
args, unit = _quantities2arrays(a1, a2)
return args, {}, None, None
@function_helper(helps={np.dot, np.outer})
def dot_like(a, b, out=None):
from astropy.units import Quantity
a, b = _as_quantities(a, b)
unit = a.unit * b.unit
if out is not None:
if not isinstance(out, Quantity):
raise NotImplementedError
return tuple(x.view(np.ndarray) for x in (a, b, out)), {}, unit, out
else:
return (a.view(np.ndarray), b.view(np.ndarray)), {}, unit, None
@function_helper(helps={np.cross, np.inner, np.vdot, np.tensordot, np.kron,
np.correlate, np.convolve})
def cross_like(a, b, *args, **kwargs):
a, b = _as_quantities(a, b)
unit = a.unit * b.unit
return (a.view(np.ndarray), b.view(np.ndarray)) + args, kwargs, unit, None
@function_helper
def einsum(subscripts, *operands, out=None, **kwargs):
from astropy.units import Quantity
if not isinstance(subscripts, str):
raise ValueError('only "subscripts" string mode supported for einsum.')
if out is not None:
if not isinstance(out, Quantity):
raise NotImplementedError
else:
kwargs['out'] = out.view(np.ndarray)
qs = _as_quantities(*operands)
unit = functools.reduce(operator.mul, (q.unit for q in qs),
dimensionless_unscaled)
arrays = tuple(q.view(np.ndarray) for q in qs)
return (subscripts,) + arrays, kwargs, unit, out
@function_helper
def bincount(x, weights=None, minlength=0):
from astropy.units import Quantity
if isinstance(x, Quantity):
raise NotImplementedError
return (x, weights.value, minlength), {}, weights.unit, None
@function_helper
def digitize(x, bins, *args, **kwargs):
arrays, unit = _quantities2arrays(x, bins, unit_from_first=True)
return arrays + args, kwargs, None, None
def _check_bins(bins, unit):
from astropy.units import Quantity
check = _as_quantity(bins)
if check.ndim > 0:
return check.to_value(unit)
elif isinstance(bins, Quantity):
# bins should be an integer (or at least definitely not a Quantity).
raise NotImplementedError
else:
return bins
@function_helper
def histogram(a, bins=10, range=None, weights=None, density=None):
if weights is not None:
weights = _as_quantity(weights)
unit = weights.unit
weights = weights.value
else:
unit = None
a = _as_quantity(a)
if not isinstance(bins, str):
bins = _check_bins(bins, a.unit)
if density:
unit = (unit or 1) / a.unit
return ((a.value, bins, range), {'weights': weights, 'density': density},
(unit, a.unit), None)
@function_helper(helps=np.histogram_bin_edges)
def histogram_bin_edges(a, bins=10, range=None, weights=None):
# weights is currently unused
a = _as_quantity(a)
if not isinstance(bins, str):
bins = _check_bins(bins, a.unit)
return (a.value, bins, range, weights), {}, a.unit, None
@function_helper
def histogram2d(x, y, bins=10, range=None, weights=None, density=None):
from astropy.units import Quantity
if weights is not None:
weights = _as_quantity(weights)
unit = weights.unit
weights = weights.value
else:
unit = None
x, y = _as_quantities(x, y)
try:
n = len(bins)
except TypeError:
# bins should be an integer (or at least definitely not a Quantity).
if isinstance(bins, Quantity):
raise NotImplementedError
else:
if n == 1:
raise NotImplementedError
elif n == 2 and not isinstance(bins, Quantity):
bins = [_check_bins(b, unit)
for (b, unit) in zip(bins, (x.unit, y.unit))]
else:
bins = _check_bins(bins, x.unit)
y = y.to(x.unit)
if density:
unit = (unit or 1) / x.unit / y.unit
return ((x.value, y.value, bins, range),
{'weights': weights, 'density': density},
(unit, x.unit, y.unit), None)
@function_helper
def histogramdd(sample, bins=10, range=None, weights=None, density=None):
if weights is not None:
weights = _as_quantity(weights)
unit = weights.unit
weights = weights.value
else:
unit = None
try:
# Sample is an ND-array.
_, D = sample.shape
except (AttributeError, ValueError):
# Sample is a sequence of 1D arrays.
sample = _as_quantities(*sample)
sample_units = [s.unit for s in sample]
sample = [s.value for s in sample]
D = len(sample)
else:
sample = _as_quantity(sample)
sample_units = [sample.unit] * D
try:
M = len(bins)
except TypeError:
# bins should be an integer
from astropy.units import Quantity
if isinstance(bins, Quantity):
raise NotImplementedError
else:
if M != D:
raise ValueError(
'The dimension of bins must be equal to the dimension of the '
' sample x.')
bins = [_check_bins(b, unit)
for (b, unit) in zip(bins, sample_units)]
if density:
unit = functools.reduce(operator.truediv, sample_units, (unit or 1))
return ((sample, bins, range), {'weights': weights, 'density': density},
(unit, sample_units), None)
@function_helper
def diff(a, n=1, axis=-1, prepend=np._NoValue, append=np._NoValue):
a = _as_quantity(a)
if prepend is not np._NoValue:
prepend = _as_quantity(prepend).to_value(a.unit)
if append is not np._NoValue:
append = _as_quantity(append).to_value(a.unit)
return (a.value, n, axis, prepend, append), {}, a.unit, None
@function_helper
def gradient(f, *varargs, **kwargs):
f = _as_quantity(f)
axis = kwargs.get('axis', None)
if axis is None:
n_axis = f.ndim
elif isinstance(axis, tuple):
n_axis = len(axis)
else:
n_axis = 1
if varargs:
varargs = _as_quantities(*varargs)
if len(varargs) == 1 and n_axis > 1:
varargs = varargs * n_axis
if varargs:
units = [f.unit / q.unit for q in varargs]
varargs = tuple(q.value for q in varargs)
else:
units = [f.unit] * n_axis
if len(units) == 1:
units = units[0]
return (f.value,) + varargs, kwargs, units, None
@function_helper
def logspace(start, stop, *args, **kwargs):
from astropy.units import LogQuantity, dex
if (not isinstance(start, LogQuantity) or
not isinstance(stop, LogQuantity)):
raise NotImplementedError
# Get unit from end point as for linspace.
stop = stop.to(dex(stop.unit.physical_unit))
start = start.to(stop.unit)
unit = stop.unit.physical_unit
return (start.value, stop.value) + args, kwargs, unit, None
@function_helper
def geomspace(start, stop, *args, **kwargs):
# Get unit from end point as for linspace.
(stop, start), unit = _quantities2arrays(stop, start)
return (start, stop) + args, kwargs, unit, None
@function_helper
def interp(x, xp, fp, *args, **kwargs):
from astropy.units import Quantity
(x, xp), _ = _quantities2arrays(x, xp)
if isinstance(fp, Quantity):
unit = fp.unit
fp = fp.value
else:
unit = None
return (x, xp, fp) + args, kwargs, unit, None
@function_helper
def unique(ar, return_index=False, return_inverse=False,
return_counts=False, axis=None):
unit = ar.unit
n_index = sum(bool(i) for i in
(return_index, return_inverse, return_counts))
if n_index:
unit = [unit] + n_index * [None]
return (ar.value, return_index, return_inverse, return_counts,
axis), {}, unit, None
@function_helper
def intersect1d(ar1, ar2, assume_unique=False, return_indices=False):
(ar1, ar2), unit = _quantities2arrays(ar1, ar2)
if return_indices:
unit = [unit, None, None]
return (ar1, ar2, assume_unique, return_indices), {}, unit, None
@function_helper(helps=(np.setxor1d, np.union1d, np.setdiff1d))
def twosetop(ar1, ar2, *args, **kwargs):
(ar1, ar2), unit = _quantities2arrays(ar1, ar2)
return (ar1, ar2) + args, kwargs, unit, None
@function_helper(helps=(np.isin, np.in1d))
def setcheckop(ar1, ar2, *args, **kwargs):
# This tests whether ar1 is in ar2, so we should change the unit of
# a1 to that of a2.
(ar2, ar1), unit = _quantities2arrays(ar2, ar1)
return (ar1, ar2) + args, kwargs, None, None
@dispatched_function
def apply_over_axes(func, a, axes):
# Copied straight from numpy/lib/shape_base, just to omit its
# val = asarray(a); if only it had been asanyarray, or just not there
# since a is assumed to an an array in the next line...
# Which is what we do here - we can only get here if it is a Quantity.
val = a
N = a.ndim
if np.array(axes).ndim == 0:
axes = (axes,)
for axis in axes:
if axis < 0:
axis = N + axis
args = (val, axis)
res = func(*args)
if res.ndim == val.ndim:
val = res
else:
res = np.expand_dims(res, axis)
if res.ndim == val.ndim:
val = res
else:
raise ValueError("function is not returning "
"an array of the correct shape")
# Returning unit is None to signal nothing should happen to
# the output.
return val, None, None
@dispatched_function
def array_repr(arr, *args, **kwargs):
# TODO: The addition of "unit='...'" doesn't worry about line
# length. Could copy & adapt _array_repr_implementation from
# numpy.core.arrayprint.py
cls_name = arr.__class__.__name__
fake_name = '_' * len(cls_name)
fake_cls = type(fake_name, (np.ndarray,), {})
no_unit = np.array_repr(arr.view(fake_cls),
*args, **kwargs).replace(fake_name, cls_name)
unit_part = f"unit='{arr.unit}'"
pre, dtype, post = no_unit.rpartition('dtype')
if dtype:
return f"{pre}{unit_part}, {dtype}{post}", None, None
else:
return f"{no_unit[:-1]}, {unit_part})", None, None
@dispatched_function
def array_str(arr, *args, **kwargs):
# TODO: The addition of the unit doesn't worry about line length.
# Could copy & adapt _array_repr_implementation from
# numpy.core.arrayprint.py
no_unit = np.array_str(arr.value, *args, **kwargs)
return no_unit + arr._unitstr, None, None
@function_helper
def array2string(a, *args, **kwargs):
# array2string breaks on quantities as it tries to turn individual
# items into float, which works only for dimensionless. Since the
# defaults would not keep any unit anyway, this is rather pointless -
# we're better off just passing on the array view. However, one can
# also work around this by passing on a formatter (as is done in Angle).
# So, we do nothing if the formatter argument is present and has the
# relevant formatter for our dtype.
formatter = args[6] if len(args) >= 7 else kwargs.get('formatter', None)
if formatter is None:
a = a.value
else:
# See whether it covers our dtype.
from numpy.core.arrayprint import _get_format_function
with np.printoptions(formatter=formatter) as options:
try:
ff = _get_format_function(a.value, **options)
except Exception:
# Shouldn't happen, but possibly we're just not being smart
# enough, so let's pass things on as is.
pass
else:
# If the selected format function is that of numpy, we know
# things will fail
if 'numpy' in ff.__module__:
a = a.value
return (a,) + args, kwargs, None, None
@function_helper
def diag(v, *args, **kwargs):
# Function works for *getting* the diagonal, but not *setting*.
# So, override always.
return (v.value,) + args, kwargs, v.unit, None
@function_helper(module=np.linalg)
def svd(a, full_matrices=True, compute_uv=True, hermitian=False):
unit = a.unit
if compute_uv:
unit = (None, unit, None)
return ((a.view(np.ndarray), full_matrices, compute_uv, hermitian),
{}, unit, None)
def _interpret_tol(tol, unit):
from astropy.units import Quantity
return Quantity(tol, unit).value
@function_helper(module=np.linalg)
def matrix_rank(M, tol=None, *args, **kwargs):
if tol is not None:
tol = _interpret_tol(tol, M.unit)
return (M.view(np.ndarray), tol) + args, kwargs, None, None
@function_helper(helps={np.linalg.inv, np.linalg.tensorinv})
def inv(a, *args, **kwargs):
return (a.view(np.ndarray),)+args, kwargs, 1/a.unit, None
@function_helper(module=np.linalg)
def pinv(a, rcond=1e-15, *args, **kwargs):
rcond = _interpret_tol(rcond, a.unit)
return (a.view(np.ndarray), rcond) + args, kwargs, 1/a.unit, None
@function_helper(module=np.linalg)
def det(a):
return (a.view(np.ndarray),), {}, a.unit ** a.shape[-1], None
@function_helper(helps={np.linalg.solve, np.linalg.tensorsolve})
def solve(a, b, *args, **kwargs):
a, b = _as_quantities(a, b)
return ((a.view(np.ndarray), b.view(np.ndarray)) + args, kwargs,
b.unit / a.unit, None)
@function_helper(module=np.linalg)
def lstsq(a, b, rcond="warn"):
a, b = _as_quantities(a, b)
if rcond not in (None, "warn", -1):
rcond = _interpret_tol(rcond, a.unit)
return ((a.view(np.ndarray), b.view(np.ndarray), rcond), {},
(b.unit / a.unit, b.unit ** 2, None, a.unit), None)
@function_helper(module=np.linalg)
def norm(x, ord=None, *args, **kwargs):
if ord == 0:
from astropy.units import dimensionless_unscaled
unit = dimensionless_unscaled
else:
unit = x.unit
return (x.view(np.ndarray), ord)+args, kwargs, unit, None
@function_helper(module=np.linalg)
def matrix_power(a, n):
return (a.value, n), {}, a.unit ** n, None
@function_helper(module=np.linalg)
def cholesky(a):
return (a.value,), {}, a.unit ** 0.5, None
@function_helper(module=np.linalg)
def qr(a, mode='reduced'):
if mode.startswith('e'):
units = None
elif mode == 'r':
units = a.unit
else:
from astropy.units import dimensionless_unscaled
units = (dimensionless_unscaled, a.unit)
return (a.value, mode), {}, units, None
@function_helper(helps={np.linalg.eig, np.linalg.eigh})
def eig(a, *args, **kwargs):
from astropy.units import dimensionless_unscaled
return (a.value,)+args, kwargs, (a.unit, dimensionless_unscaled), None
| {
"content_hash": "a230ae8c825bba156bb1706f512aedb7",
"timestamp": "",
"source": "github",
"line_count": 1043,
"max_line_length": 82,
"avg_line_length": 33.350910834132314,
"alnum_prop": 0.6367974701739255,
"repo_name": "dhomeier/astropy",
"id": "c035a7bf34beab763b160796dafc1db68530ea98",
"size": "35008",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "astropy/units/quantity_helper/function_helpers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "10891881"
},
{
"name": "C++",
"bytes": "55147"
},
{
"name": "HTML",
"bytes": "1172"
},
{
"name": "Lex",
"bytes": "181654"
},
{
"name": "M4",
"bytes": "18016"
},
{
"name": "Makefile",
"bytes": "51059"
},
{
"name": "Python",
"bytes": "10582251"
},
{
"name": "Shell",
"bytes": "17024"
},
{
"name": "TeX",
"bytes": "853"
}
],
"symlink_target": ""
} |
import maya.cmds as cmds
class mirrorSelectedShapes(object):
def __init__(self):
self.sculpt_shape = sculpt_shape = ""
self.base_shape = base_shape = ""
self.cur_shapes = []
def main(self,*args):
self.cur_shapes = cmds.ls(sl=True,fl=True)
if len(self.cur_shapes)>0:
if cmds.window("popupWin",exists=1):
cmds.deleteUI("popupWin",window=1)
cmds.window("popupWin",t="Select Base Shape",wh=(160,20),rtf=1,mnb=0,mxb=0,s=0)
cmds.columnLayout()
cmds.button(l="Now Select the \"Base Shape\".",c=self.selectBaseShape)
cmds.showWindow("popupWin")
else:
cmds.confirmDialog(m="Please Select Shapes You Wish To Mirror")
def selectBaseShape(self,*args):
base_shape = cmds.ls(sl=True)[0]
if len(base_shape)>0:
for shape in self.cur_shapes:
channels = []
#Check "base_shape" and "shape" if channels are locked. If so unlock them.
base_lock_list = cmds.listAttr(base_shape,locked=True)
shape_lock_list = cmds.listAttr(shape,locked=True)
if base_lock_list:
for x in base_lock_list:
channels.append("{1}.{0}".format(x,base_shape))
elif shape_lock_list:
for x in shape_lock_list:
channels.append("{1}.{0}".format(x,shape))
if len(channels)>0:
for channel in channels:
cmds.setAttr(channel,lock=False)
self.CreateMirrorShape(base_shape,shape)
if cmds.window("popupWin",exists=1):
cmds.deleteUI("popupWin",window=1)
else:
cmds.confirmDialog(m="Please Select Base Shape Geo!")
def CreateMirrorShape(self,base_shape,sculpt_shape):
if base_shape and sculpt_shape:
if "L_" in sculpt_shape and "left" not in sculpt_shape and "right" not in sculpt_shape:
newShape="R_{0}".format(sculpt_shape.split('L_')[1])
print sculpt_shape,newShape
elif "R_" in sculpt_shape and "left" not in sculpt_shape and "right" not in sculpt_shape:
newShape="L_{0}".format(sculpt_shape.split('R_')[1])
print sculpt_shape,newShape
elif "L_" in sculpt_shape and "left" in sculpt_shape and "right" not in sculpt_shape:
newShape="R_{0}right{1}".format(sculpt_shape.split('L_')[1].split('left')[0],sculpt_shape.split('L_')[1].split('left')[1])
print sculpt_shape,newShape
elif "L_" in sculpt_shape and "left" not in sculpt_shape and "right" in sculpt_shape:
newShape="R_{0}left{1}".format(sculpt_shape.split('L_')[1].split('right')[0],sculpt_shape.split('L_')[1].split('right')[1])
print sculpt_shape,newShape
elif "R_" in sculpt_shape and "left" in sculpt_shape and "right" not in sculpt_shape:
newShape="L_{0}right{1}".format(sculpt_shape.split('R_')[1].split('left')[0],sculpt_shape.split('R_')[1].split('left')[1])
print sculpt_shape,newShape
elif "R_" in sculpt_shape and "left" not in sculpt_shape and "right" in sculpt_shape:
newShape="L_{0}left{1}".format(sculpt_shape.split('R_')[1].split('right')[0],sculpt_shape.split('R_')[1].split('right')[1])
print sculpt_shape,newShape
elif "M_" in sculpt_shape and "left" not in sculpt_shape and "right" in sculpt_shape:
newShape="{0}left{1}".format(sculpt_shape.split('right')[0],sculpt_shape.split('right')[1])
print sculpt_shape,newShape
elif "M_" in sculpt_shape and "left" in sculpt_shape and "right" not in sculpt_shape:
newShape="{0}right{1}".format(sculpt_shape.split('left')[0],sculpt_shape.split('left')[1])
print sculpt_shape,newShape
else:
newShape="Mirrored_{0}".format(sculpt_shape)
print sculpt_shape,newShape
#Create Wrap and Negative shape
cmds.duplicate(base_shape, name="baseWrap")
cmds.duplicate(base_shape, name="baseScaleNeg")
#Flip Scale
cmds.setAttr("baseScaleNeg.scaleX", -1)
#Blend Sculped shape to flipped shape
cmds.blendShape(sculpt_shape, 'baseScaleNeg', name='TempBlend')
#Create Wrap between wrap shape and Neg Shape
cmds.select(cl=True)
cmds.select('baseWrap')
cmds.select('baseScaleNeg', add=True)
cmds.CreateWrap()
cmds.select(cl=True)
cmds.setAttr("wrap1.exclusiveBind", 1)
#Now turn on our Negated blendShpe
cmds.setAttr("TempBlend."+sculpt_shape, 1)
#Duplicate Wrapped shape for final result
new_shape = cmds.duplicate('baseWrap', name=newShape)
if cmds.listRelatives(new_shape,p=True):
cmds.parent(new_shape,w=True)
#Clean up setup
cmds.delete('baseWrap', 'baseScaleNeg')
mirrorSelectedShapes().main()
| {
"content_hash": "7d3e96d5b70e93cc0d077c99bed94c45",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 139,
"avg_line_length": 50.42857142857143,
"alnum_prop": 0.5599622285174694,
"repo_name": "aaronfang/personal_scripts",
"id": "ddced5427588abd0e23fbb05a44ccd4115f4628b",
"size": "5295",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "af_scripts/blendshapes/mirrorSelectedShapes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Mathematica",
"bytes": "319303"
},
{
"name": "Python",
"bytes": "154066"
}
],
"symlink_target": ""
} |
def extractfgiLaNTranslations(item):
"""
# 'fgiLaN translations'
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol or frag) or 'preview' in item['title'].lower():
return None
if 'kimi no na wa' in item['tags']:
return buildReleaseMessageWithType(item, 'kimi no na wa', vol, chp, frag=frag, postfix=postfix)
if 'shuumatsu nani shitemasu ka? isogashii desu ka? sukutte moratte ii desu ka?' in item['tags']:
return buildReleaseMessageWithType(item, 'shuumatsu nani shitemasu ka? isogashii desu ka? sukutte moratte ii desu ka?', vol, chp, frag=frag, postfix=postfix)
return False
| {
"content_hash": "79977b351a5509608549aa536e99ba65",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 159,
"avg_line_length": 53.083333333333336,
"alnum_prop": 0.7362637362637363,
"repo_name": "fake-name/ReadableWebProxy",
"id": "c2c8f69ff2b0dc123b9043c546e37657c29bc5d6",
"size": "637",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "WebMirror/management/rss_parser_funcs/feed_parse_extractfgiLaNTranslations.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "105811"
},
{
"name": "Dockerfile",
"bytes": "1178"
},
{
"name": "HTML",
"bytes": "119737"
},
{
"name": "JavaScript",
"bytes": "3006524"
},
{
"name": "Jupyter Notebook",
"bytes": "148075"
},
{
"name": "Mako",
"bytes": "1454"
},
{
"name": "Python",
"bytes": "5264346"
},
{
"name": "Shell",
"bytes": "1059"
}
],
"symlink_target": ""
} |
"""
Created on Fri Aug 29 12:07:20 2014
@author: ecksjoh
"""
from pptx import Presentation
from pptx.util import Inches, Px
img_path = 'monty-truth.png'
prs = Presentation()
blank_slide_layout = prs.slide_layouts[6]
slide = prs.slides.add_slide(blank_slide_layout)
left = top = Inches(1)
pic = slide.shapes.add_picture(img_path, left, top)
left = Inches(5)
width = Px(280)
height = int(width*1.427)
pic = slide.shapes.add_picture(img_path, left, top, width, height)
prs.save('test.pptx')
| {
"content_hash": "4679eaaa6b2d502e603d0a5ef59a674d",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 66,
"avg_line_length": 21.625,
"alnum_prop": 0.6782273603082851,
"repo_name": "DocBO/mubosym",
"id": "d605131028a4aaeaa9c805054c5ff697ab85b4ab",
"size": "544",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "muboreport/autodocument/autoslide.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "22697"
},
{
"name": "HTML",
"bytes": "82974"
},
{
"name": "JavaScript",
"bytes": "85775"
},
{
"name": "Python",
"bytes": "305741"
}
],
"symlink_target": ""
} |
from exceptions import CommandError, ParameterError, UnexpectedUserExit
from CommandLineUI import CommandLineUI
from MenuUI import MenuUI
import os
clear = lambda: os.system('clear') # little hack because I hate writing. clear() can be used anywhere to clear the current console. Works even in IDE consoles
class UIHandler:
def __init__(self, commandHandler, familyInstance):
self.commandHandler = commandHandler
self.familyInstance = familyInstance
self.fakeInput()
def fakeInput(self):
"""
Add some expenses to make debugging easier
Input: None
Output: None
"""
self.commandHandler.execute(("add 1000 dog"), self.familyInstance.getExpensesInstance())
self.commandHandler.execute(("add 1000 pet"), self.familyInstance.getExpensesInstance())
self.commandHandler.execute(("insert 1 1999 pet"), self.familyInstance.getExpensesInstance())
self.commandHandler.execute(("insert 3 1999 dog"), self.familyInstance.getExpensesInstance())
self.commandHandler.execute(("insert 5 80 dog"), self.familyInstance.getExpensesInstance())
self.commandHandler.execute(("insert 2 1999 pet"), self.familyInstance.getExpensesInstance())
self.commandHandler.execute(("insert 14 1999 dog"), self.familyInstance.getExpensesInstance())
def launchMenu(self):
"""
UI entry point, will let the user select between a menu or command line
Input: None
Output: None
"""
print('{:*^30}'.format('Family Spending Manager'))
while True:
try:
inpt = self._handleInput("1. For haxor command line\n2. For interactive menu\n> ")
option = int(inpt)
if option == 1:
commandLineHandler = CommandLineUI(self.commandHandler, self.familyInstance)
commandLineHandler.run()
elif option == 2:
menuHandler = MenuUI(self.commandHandler, self.familyInstance)
menuHandler.run()
else:
raise CommandError('Invalid option')
except UnexpectedUserExit: # except this first so user can exit even in this part
print("User Exit - Bye Bye")
break # if this is removed, user can come back to this menu from anywhere
except (ValueError, CommandError) as ex:
print('[MainMenu]:', ex)
def _handleInput(self, msg = None):
"""
Wrapper for input, handles exit anywhere in the program
Input: msg - Optional message to show when asking for input
Output: The input from the user
Raises: UnexpectedUserExit on 'exit' input
"""
if msg is None:
inpt = input("> ")
else:
inpt = input(str(msg))
if inpt.lower().strip() == 'exit':
raise UnexpectedUserExit(0)
return inpt | {
"content_hash": "9b73bac04f2462f005f72fa70f734499",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 158,
"avg_line_length": 46.65151515151515,
"alnum_prop": 0.6027931146476129,
"repo_name": "Zephyrrus/ubb",
"id": "84d5cf63782a0b17369a7a564630053ef3a36510",
"size": "3079",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "YEAR 1/SEM1/FP/LAB/l2-l5/UIHandler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ASP",
"bytes": "96"
},
{
"name": "Assembly",
"bytes": "24190"
},
{
"name": "Batchfile",
"bytes": "80"
},
{
"name": "C",
"bytes": "504974"
},
{
"name": "C#",
"bytes": "116117"
},
{
"name": "C++",
"bytes": "406145"
},
{
"name": "CMake",
"bytes": "116836"
},
{
"name": "CSS",
"bytes": "507511"
},
{
"name": "Common Lisp",
"bytes": "4926"
},
{
"name": "Dockerfile",
"bytes": "601"
},
{
"name": "HTML",
"bytes": "774629"
},
{
"name": "Hack",
"bytes": "1348"
},
{
"name": "Java",
"bytes": "225193"
},
{
"name": "JavaScript",
"bytes": "1323357"
},
{
"name": "Kotlin",
"bytes": "80576"
},
{
"name": "M",
"bytes": "812"
},
{
"name": "MATLAB",
"bytes": "14300"
},
{
"name": "Makefile",
"bytes": "62922"
},
{
"name": "PHP",
"bytes": "26576"
},
{
"name": "PLSQL",
"bytes": "3270"
},
{
"name": "PLpgSQL",
"bytes": "73862"
},
{
"name": "Perl 6",
"bytes": "324"
},
{
"name": "Prolog",
"bytes": "5214"
},
{
"name": "Python",
"bytes": "315759"
},
{
"name": "QMake",
"bytes": "5282"
},
{
"name": "Shell",
"bytes": "4089"
},
{
"name": "TSQL",
"bytes": "79222"
},
{
"name": "XSLT",
"bytes": "1953"
},
{
"name": "Yacc",
"bytes": "1718"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import numpy as np
np.random.seed(1234) # for reproducibility?
# specifying the gpu to use
# import theano.sandbox.cuda
# theano.sandbox.cuda.use('gpu1')
import theano
import theano.tensor as T
import binary_net
import lasagne
import lasagne.objectives as LO
from pylearn2.datasets.cifar10 import CIFAR10
from collections import OrderedDict
oneHot=True
def main():
# BN parameters
batch_size = 200
print("batch_size = " + str(batch_size))
# alpha is the exponential moving average factor
alpha = .1
print("alpha = " + str(alpha))
epsilon = 1e-4
print("epsilon = " + str(epsilon))
# BinaryOut
activation = binary_net.binary_tanh_unit
print("activation = binary_net.binary_tanh_unit")
# activation = binary_net.binary_sigmoid_unit
# print("activation = binary_net.binary_sigmoid_unit")
# BinaryConnect
binary = True
print("binary = " + str(binary))
stochastic = False
print("stochastic = " + str(stochastic))
# (-H,+H) are the two binary values
# H = "Glorot"
H = 1.
print("H = " + str(H))
# W_LR_scale = 1.
W_LR_scale = "Glorot" # "Glorot" means we are using the coefficients from Glorot's paper
print("W_LR_scale = " + str(W_LR_scale))
# Training parameters
num_epochs = 500
print("num_epochs = " + str(num_epochs))
# Decaying LR
LR_start = 0.01
print("LR_start = " + str(LR_start))
LR_fin = 0.0000003
print("LR_fin = " + str(LR_fin))
LR_decay = (LR_fin / LR_start) ** (1. / num_epochs)
print("LR_decay = " + str(LR_decay))
# BTW, LR decay might good for the BN moving average...
train_set_size = 45000
print("train_set_size = " + str(train_set_size))
shuffle_parts = 1
print("shuffle_parts = " + str(shuffle_parts))
print('\nLoading CIFAR-10 dataset...')
train_set = CIFAR10(which_set="train", start=0, stop=train_set_size)
valid_set = CIFAR10(which_set="train", start=train_set_size, stop=50000)
test_set = CIFAR10(which_set="test")
# bc01 format
# Inputs in the range [-1,+1]
# print("Inputs in the range [-1,+1]")
train_set.X = np.reshape(np.subtract(np.multiply(2. / 255., train_set.X), 1.), (-1, 3, 32, 32))
valid_set.X = np.reshape(np.subtract(np.multiply(2. / 255., valid_set.X), 1.), (-1, 3, 32, 32))
test_set.X = np.reshape(np.subtract(np.multiply(2. / 255., test_set.X), 1.), (-1, 3, 32, 32))
# flatten targets
train_set.y = np.hstack(train_set.y)
valid_set.y = np.hstack(valid_set.y)
test_set.y = np.hstack(test_set.y)
if oneHot:
# Onehot the targets
train_set.y = np.float32(np.eye(10)[train_set.y])
valid_set.y = np.float32(np.eye(10)[valid_set.y])
test_set.y = np.float32(np.eye(10)[test_set.y])
# for hinge loss
train_set.y = 2 * train_set.y - 1.
valid_set.y = 2 * valid_set.y - 1.
test_set.y = 2 * test_set.y - 1.
else:
train_set.y = np.int32(train_set.y)
valid_set.y = np.int32(valid_set.y)
test_set.y = np.int32(test_set.y)
#import pdb;pdb.set_trace()
print('\nBuilding the CNN...')
# Prepare Theano variables for inputs and targets
input = T.tensor4('inputs')
if oneHot: target = T.matrix('targets')
else: target = T.ivector('targets')
LR = T.scalar('LR', dtype=theano.config.floatX)
cnn = buildCNN(dataType='cifar10', networkType='cifar10', oneHot=oneHot, input=input, epsilon=epsilon, alpha=alpha, activation=activation, binary=binary, stochastic=stochastic, H=H, W_LR_scale=W_LR_scale)
train_output = lasagne.layers.get_output(cnn, deterministic=False)
# squared hinge loss
if oneHot: loss = T.mean(T.sqr(T.maximum(0., 1. - target * train_output)))
else: loss = LO.categorical_crossentropy(train_output, target); loss = loss.mean()
# W updates
W = lasagne.layers.get_all_params(cnn, binary=True)
W_grads = binary_net.compute_grads(loss, cnn)
updates = lasagne.updates.adam(loss_or_grads=W_grads, params=W, learning_rate=LR)
updates = binary_net.clipping_scaling(updates, cnn)
# other parameters updates
params = lasagne.layers.get_all_params(cnn, trainable=True, binary=False)
updates = OrderedDict(
updates.items() + lasagne.updates.adam(loss_or_grads=loss, params=params, learning_rate=LR).items())
test_output = lasagne.layers.get_output(cnn, deterministic=True)
if oneHot:
test_loss = T.mean(T.sqr(T.maximum(0., 1. - target * test_output)))
test_err = T.mean(T.neq(T.argmax(test_output, axis=1), T.argmax(target, axis=1)), dtype=theano.config.floatX)
else:
test_loss = LO.categorical_crossentropy(test_output, target); test_loss = test_loss.mean()
test_err = T.mean(T.neq(T.argmax(test_output, axis=1), T.argmax(target)), dtype=theano.config.floatX)
# Compile a function performing a training step on a mini-batch (by giving the updates dictionary)
# and returning the corresponding training loss:
train_fn = theano.function([input, target, LR], loss, updates=updates)
# Compile a second function computing the validation loss and accuracy:
val_fn = theano.function([input, target], [test_loss, test_err])
print('Training...')
binary_net.train(
train_fn, val_fn,
cnn,
batch_size,
LR_start, LR_decay,
num_epochs,
train_set.X, train_set.y,
valid_set.X, valid_set.y,
test_set.X, test_set.y,
shuffle_parts=shuffle_parts)
def buildCNN(networkType, dataType, oneHot, input, epsilon, alpha, activation, binary, stochastic, H, W_LR_scale):
if oneHot: denseOut = lasagne.nonlinearities.identity
else: denseOut = lasagne.nonlinearities.softmax
print(denseOut)
if dataType == 'TCDTIMIT':
nbClasses = 39
cnn = lasagne.layers.InputLayer(
shape=(None, 1, 120, 120),
input_var=input)
elif dataType == 'cifar10':
nbClasses = 10
cnn = lasagne.layers.InputLayer(
shape=(None, 3, 32, 32),
input_var=input)
if networkType == 'google':
# conv 1
cnn = binary_net.Conv2DLayer(
cnn,
binary=binary,
stochastic=stochastic,
H=H,
W_LR_scale=W_LR_scale,
num_filters=128,
filter_size=(3, 3),
pad=1,
nonlinearity=lasagne.nonlinearities.identity)
cnn = lasagne.layers.MaxPool2DLayer(cnn, pool_size=(2, 2))
cnn = lasagne.layers.BatchNormLayer(
cnn,
epsilon=epsilon,
alpha=alpha)
cnn = lasagne.layers.NonlinearityLayer(
cnn,
nonlinearity=activation)
# conv 2
cnn = binary_net.Conv2DLayer(
cnn,
binary=binary,
stochastic=stochastic,
H=H,
W_LR_scale=W_LR_scale,
num_filters=256,
filter_size=(3, 3),
stride=(2, 2),
pad=1,
nonlinearity=lasagne.nonlinearities.identity)
cnn = lasagne.layers.MaxPool2DLayer(cnn, pool_size=(2, 2))
cnn = lasagne.layers.BatchNormLayer(
cnn,
epsilon=epsilon,
alpha=alpha)
cnn = lasagne.layers.NonlinearityLayer(
cnn,
nonlinearity=activation)
# conv3
cnn = binary_net.Conv2DLayer(
cnn,
binary=binary,
stochastic=stochastic,
H=H,
W_LR_scale=W_LR_scale,
num_filters=512,
filter_size=(3, 3),
pad=1,
nonlinearity=lasagne.nonlinearities.identity)
cnn = lasagne.layers.NonlinearityLayer(
cnn,
nonlinearity=activation)
# conv 4
cnn = binary_net.Conv2DLayer(
cnn,
binary=binary,
stochastic=stochastic,
H=H,
W_LR_scale=W_LR_scale,
num_filters=512,
filter_size=(3, 3),
pad=1,
nonlinearity=lasagne.nonlinearities.identity)
cnn = lasagne.layers.NonlinearityLayer(
cnn,
nonlinearity=activation)
# conv 5
cnn = binary_net.Conv2DLayer(
cnn,
binary=binary,
stochastic=stochastic,
H=H,
W_LR_scale=W_LR_scale,
num_filters=512,
filter_size=(3, 3),
pad=1,
nonlinearity=lasagne.nonlinearities.identity)
cnn = lasagne.layers.MaxPool2DLayer(cnn, pool_size=(2, 2))
cnn = lasagne.layers.NonlinearityLayer(
cnn,
nonlinearity=activation)
# FC layer
cnn = binary_net.DenseLayer(
cnn,
binary=binary,
stochastic=stochastic,
H=H,
W_LR_scale=W_LR_scale,
nonlinearity=denseOut, # TODO was identity
num_units=nbClasses)
elif networkType == 'cifar10':
# 128C3-128C3-P2
# 128C3-128C3-P2
cnn = binary_net.Conv2DLayer(
cnn,
binary=binary,
stochastic=stochastic,
H=H,
W_LR_scale=W_LR_scale,
num_filters=128,
filter_size=(3, 3),
pad=1,
nonlinearity=lasagne.nonlinearities.identity)
cnn = lasagne.layers.BatchNormLayer(
cnn,
epsilon=epsilon,
alpha=alpha)
cnn = lasagne.layers.NonlinearityLayer(
cnn,
nonlinearity=activation)
cnn = binary_net.Conv2DLayer(
cnn,
binary=binary,
stochastic=stochastic,
H=H,
W_LR_scale=W_LR_scale,
num_filters=128,
filter_size=(3, 3),
pad=1,
nonlinearity=lasagne.nonlinearities.identity)
cnn = lasagne.layers.MaxPool2DLayer(cnn, pool_size=(2, 2))
cnn = lasagne.layers.BatchNormLayer(
cnn,
epsilon=epsilon,
alpha=alpha)
cnn = lasagne.layers.NonlinearityLayer(
cnn,
nonlinearity=activation)
# 256C3-256C3-P2
cnn = binary_net.Conv2DLayer(
cnn,
binary=binary,
stochastic=stochastic,
H=H,
W_LR_scale=W_LR_scale,
num_filters=256,
filter_size=(3, 3),
pad=1,
nonlinearity=lasagne.nonlinearities.identity)
cnn = lasagne.layers.BatchNormLayer(
cnn,
epsilon=epsilon,
alpha=alpha)
cnn = lasagne.layers.NonlinearityLayer(
cnn,
nonlinearity=activation)
cnn = binary_net.Conv2DLayer(
cnn,
binary=binary,
stochastic=stochastic,
H=H,
W_LR_scale=W_LR_scale,
num_filters=256,
filter_size=(3, 3),
pad=1,
nonlinearity=lasagne.nonlinearities.identity)
cnn = lasagne.layers.MaxPool2DLayer(cnn, pool_size=(2, 2))
cnn = lasagne.layers.BatchNormLayer(
cnn,
epsilon=epsilon,
alpha=alpha)
cnn = lasagne.layers.NonlinearityLayer(
cnn,
nonlinearity=activation)
# 512C3-512C3-P2
cnn = binary_net.Conv2DLayer(
cnn,
binary=binary,
stochastic=stochastic,
H=H,
W_LR_scale=W_LR_scale,
num_filters=512,
filter_size=(3, 3),
pad=1,
nonlinearity=lasagne.nonlinearities.identity)
cnn = lasagne.layers.BatchNormLayer(
cnn,
epsilon=epsilon,
alpha=alpha)
cnn = lasagne.layers.NonlinearityLayer(
cnn,
nonlinearity=activation)
cnn = binary_net.Conv2DLayer(
cnn,
binary=binary,
stochastic=stochastic,
H=H,
W_LR_scale=W_LR_scale,
num_filters=512,
filter_size=(3, 3),
pad=1,
nonlinearity=lasagne.nonlinearities.identity)
cnn = lasagne.layers.MaxPool2DLayer(cnn, pool_size=(2, 2))
cnn = lasagne.layers.BatchNormLayer(
cnn,
epsilon=epsilon,
alpha=alpha)
cnn = lasagne.layers.NonlinearityLayer(
cnn,
nonlinearity=activation)
# print(cnn.output_shape)
# 1024FP-1024FP-10FP
cnn = binary_net.DenseLayer(
cnn,
binary=binary,
stochastic=stochastic,
H=H,
W_LR_scale=W_LR_scale,
nonlinearity=lasagne.nonlinearities.identity,
num_units=1024)
cnn = lasagne.layers.BatchNormLayer(
cnn,
epsilon=epsilon,
alpha=alpha)
cnn = lasagne.layers.NonlinearityLayer(
cnn,
nonlinearity=activation)
cnn = binary_net.DenseLayer(
cnn,
binary=binary,
stochastic=stochastic,
H=H,
W_LR_scale=W_LR_scale,
nonlinearity=lasagne.nonlinearities.identity,
num_units=1024)
cnn = lasagne.layers.BatchNormLayer(
cnn,
epsilon=epsilon,
alpha=alpha)
cnn = lasagne.layers.NonlinearityLayer(
cnn,
nonlinearity=activation)
cnn = binary_net.DenseLayer(
cnn,
binary=binary,
stochastic=stochastic,
H=H,
W_LR_scale=W_LR_scale,
nonlinearity=denseOut,
num_units=nbClasses)
return cnn
if __name__ == "__main__":
main() | {
"content_hash": "90243516ac95862d71e3cedc56b785bd",
"timestamp": "",
"source": "github",
"line_count": 462,
"max_line_length": 208,
"avg_line_length": 32.722943722943725,
"alnum_prop": 0.5140230189178463,
"repo_name": "matthijsvk/multimodalSR",
"id": "fff68b2690738ecd5fbc4f1c4918eb669b743481",
"size": "15118",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code/lipreading/binary/cifar10.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "3372053"
},
{
"name": "C++",
"bytes": "159084"
},
{
"name": "CSS",
"bytes": "1594827"
},
{
"name": "Cuda",
"bytes": "24531"
},
{
"name": "JavaScript",
"bytes": "4924"
},
{
"name": "Jupyter Notebook",
"bytes": "15270096"
},
{
"name": "M4",
"bytes": "7412"
},
{
"name": "Makefile",
"bytes": "62415"
},
{
"name": "Matlab",
"bytes": "54469"
},
{
"name": "Perl",
"bytes": "130963"
},
{
"name": "Python",
"bytes": "3811156"
},
{
"name": "Shell",
"bytes": "42206"
},
{
"name": "TeX",
"bytes": "1433023"
}
],
"symlink_target": ""
} |
"""253. Meeting Rooms II
https://leetcode.com/problems/meeting-rooms-ii/
Given an array of meeting time intervals intervals where
intervals[i] = [start_i, end_i], return the minimum number
of conference rooms required.
Example 1:
Input: intervals = [[0,30],[5,10],[15,20]]
Output: 2
Example 2:
Input: intervals = [[7,10],[2,4]]
Output: 1
Constraints:
1 <= intervals.length <= 10^4
0 <= starti < endi <= 10^6
"""
from typing import List
class Solution:
def min_meeting_rooms(self, intervals: List[List[int]]) -> int:
intervals.sort(key=lambda x: (x[0]))
cnt = 0
used = [0] * len(intervals)
for i in range(len(intervals)):
if not used[i]:
cur_end = intervals[i][1]
for j in range(i + 1, len(intervals)):
if not used[j] and cur_end <= intervals[j][0]:
cnt += 1
cur_end = intervals[j][1]
used[j] = 1
return len(intervals) - cnt
| {
"content_hash": "222af29c6084d1a75c2da253d9190480",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 67,
"avg_line_length": 25.974358974358974,
"alnum_prop": 0.5567620927936822,
"repo_name": "isudox/leetcode-solution",
"id": "e2a07b7519bdae7f2212dfc695e09c79c30969f1",
"size": "1014",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python-algorithm/leetcode/problem_253.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Groovy",
"bytes": "16121"
},
{
"name": "Java",
"bytes": "118043"
},
{
"name": "Python",
"bytes": "151015"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.