text
stringlengths 4
1.02M
| meta
dict |
---|---|
'''
REST Server For DataBundle Libraries.
'''
from bottle import get, put, post, request, response #@UnresolvedImport
from bottle import HTTPResponse, static_file, install #@UnresolvedImport
from bottle import ServerAdapter, server_names #@UnresolvedImport
from decorator import decorator #@UnresolvedImport
import databundles.library
import databundles.run
import databundles.util
from databundles.bundle import DbBundle
import logging
import os
import databundles.client.exceptions as exc
# This might get changed, as in test_run
run_config = databundles.run.RunConfig()
library_name = 'default'
logger = databundles.util.get_logger(__name__)
logger.setLevel(logging.INFO)
def get_library_config(name=None):
global library_name
if name is not None:
library_name = name
cfg = run_config.library.get(library_name)
if not cfg:
raise Exception("Failed to get exception for name {} ".format(library_name))
return cfg
def get_library(name=None):
'''Return the library. In a function to defer execution, so the
run_config variable can be altered before it is called. '''
global library_name
# Originally, we were caching the library, but the library
# holds open a sqlite database, and that isn't multi-threaded, so then
# we can use a multi-threaded server.
# Of course, then you have concurrency problems with sqlite ....
if name is not None:
library_name = name
return databundles.library._get_library(run_config, library_name)
def make_exception_response(e):
import sys
import traceback
(exc_type, exc_value, exc_traceback) = sys.exc_info() #@UnusedVariable
tb_list = traceback.format_list(traceback.extract_tb(sys.exc_info()[2]))
return {'exception':
{'class':e.__class__.__name__,
'args':e.args,
'trace': "\n".join(tb_list)
}
}
def _CaptureException(f, *args, **kwargs):
'''Decorator implementation for capturing exceptions '''
try:
r = f(*args, **kwargs)
except Exception as e:
r = make_exception_response(e)
return r
def CaptureException(f, *args, **kwargs):
'''Decorator to capture exceptions and convert them
to a dict that can be returned as JSON '''
return decorator(_CaptureException, f) # Preserves signature
class AllJSONPlugin(object):
'''A copy of the bottle JSONPlugin, but this one tries to convert
all objects to json '''
from json import dumps as json_dumps
name = 'json'
api = 2
def __init__(self, json_dumps=json_dumps):
self.json_dumps = json_dumps
def apply(self, callback, context):
dumps = self.json_dumps
if not dumps: return callback
def wrapper(*a, **ka):
rv = callback(*a, **ka)
if isinstance(rv, HTTPResponse ):
return rv
#Attempt to serialize, raises exception on failure
try:
json_response = dumps(rv)
except Exception as e:
r = make_exception_response(e)
json_response = dumps(r)
#Set content type only if serialization succesful
response.content_type = 'application/json'
return json_response
return wrapper
install(AllJSONPlugin())
@get('/datasets')
def get_datasets():
'''Return all of the dataset identities, as a dict,
indexed by id'''
return { i.id_ : i.to_dict() for i in get_library().datasets}
@get('/datasets/find/<term>')
def get_datasets_find(term):
'''Find a partition or data bundle with a, id or name term '''
dataset, partition = get_library().get_ref(term)
if dataset is False:
return False
return {
'dataset' : dataset.identity.to_dict(),
'dataset_local': os.path.exists(dataset.identity.cache_key),
'partition' : partition.identity.to_dict() if partition else None,
'partition_local' :os.path.exists(partition.identity.cache_key) if partition else None,
}
@post('/datasets/find')
def post_datasets_find():
'''Post a QueryCommand to search the library. '''
from databundles.library import QueryCommand
q = request.json
bq = QueryCommand(q)
db_query = get_library().find(bq)
results = db_query.all() #@UnusedVariable
out = []
for r in results:
if isinstance(r, tuple):
e = { 'dataset': r.Dataset.identity.to_dict(),
'partition': r.Partition.identity.to_dict() if hasattr(r,'Partition') else None
}
else:
e = { 'dataset': {'id_': r.Dataset.id_, 'name': r.Dataset.name},
'partition': None
}
out.append(e)
return out
def _get_dataset_partition_record(did, pid):
from databundles.identity import ObjectNumber, DatasetNumber, PartitionNumber
don = ObjectNumber.parse(did)
if not don or not isinstance(don, DatasetNumber):
raise exc.BadRequest('Dataset number {} is not valid'.format(did))
pon = ObjectNumber.parse(pid)
if not pon or not isinstance(pon, PartitionNumber):
raise exc.BadRequest('Partition number {} is not valid'.format(pid))
if str(pon.dataset) != str(don):
raise exc.BadRequest('Partition number {} does not belong to datset {}'.format(pid, did))
gr = get_library().get(did)
# Need to read the file early, otherwise exceptions here
# will result in the cilent's ocket disconnecting.
if not gr:
raise exc.NotFound('No dataset for id: {}'.format(did))
bundle = gr.bundle
partition = bundle.partitions.get(pid)
return bundle,partition
def _read_body(request):
# Really important to only call request.body once! The property method isn't
# idempotent!
import zlib
import uuid # For a random filename.
import tempfile
tmp_dir = tempfile.gettempdir()
#tmp_dir = '/tmp'
file_ = os.path.join(tmp_dir,'rest-downloads',str(uuid.uuid4())+".db")
if not os.path.exists(os.path.dirname(file_)):
os.makedirs(os.path.dirname(file_))
body = request.body # Property acessor
# This method can recieve data as compressed or not, and determines which
# from the magic number in the head of the data.
data_type = databundles.util.bundle_file_type(body)
decomp = zlib.decompressobj(16+zlib.MAX_WBITS) # http://stackoverflow.com/a/2424549/1144479
if not data_type:
raise Exception("Bad data type: not compressed nor sqlite")
# Read the file directly from the network, writing it to the temp file,
# and uncompressing it if it is compressesed.
with open(file_,'w') as f:
chunksize = 8192
chunk = body.read(chunksize) #@UndefinedVariable
while chunk:
if data_type == 'gzip':
f.write(decomp.decompress(chunk))
else:
f.write(chunk)
chunk = body.read(chunksize) #@UndefinedVariable
return file_
@put('/datasets/<did>')
#@CaptureException
def put_dataset(did):
'''Store a bundle, calling put() on the bundle file in the Library.
:param did: A dataset id string. must be parsable as a `DatasetNumber`
value
:rtype: string
:param pid: A partition id string. must be parsable as a `partitionNumber`
value
:rtype: string
:param payload: The bundle database file, which may be compressed.
:rtype: binary
'''
from databundles.identity import ObjectNumber, DatasetNumber
import stat
try:
cf = _read_body(request)
size = os.stat(cf).st_size
if size == 0:
raise exc.BadRequest("Got a zero size dataset file")
if not os.path.exists(cf):
raise exc.BadRequest("Non existent file")
# Now we have the bundle in cf. Stick it in the library.
# We're doing these exceptions here b/c if we don't read the body, the
# client will get an error with the socket closes.
try:
on = ObjectNumber.parse(did)
except ValueError:
raise exc.BadRequest("Unparse dataset id: {}".format(did))
if not isinstance(on, DatasetNumber):
raise exc.BadRequest("Bad dataset id, not for a dataset: {}".format(did))
# Is this a partition or a bundle?
try:
tb = DbBundle(cf)
type = tb.db_config.info.type
except Exception as e:
logger.error("Failed to access database: {}".format(cf))
raise
if( type == 'partition'):
raise exc.BadRequest("Bad data type: Got a partition")
if(tb.identity.id_ != did ):
raise exc.BadRequest("""Bad request. Dataset id of URL doesn't
match payload. {} != {}""".format(did,tb.identity.id_))
library_path, rel_path, url = get_library().put(tb) #@UnusedVariable
identity = tb.identity
# if that worked, OK to remove the temporary file.
finally :
pass
#os.remove(cf)
r = identity.to_dict()
r['url'] = url
return r
@get('/datasets/<did>')
@CaptureException
def get_dataset_bundle(did):
'''Get a bundle database file, given an id or name
Args:
id The Name or id of the dataset bundle.
May be for a bundle or partition
'''
bp = get_library().get(did)
if bp is False:
raise Exception("Didn't find dataset for id: {} ".format(did))
return static_file(bp.bundle.database.path, root='/', mimetype="application/octet-stream")
@get('/datasets/:did/info')
def get_dataset_info(did):
'''Return the complete record for a dataset, including
the schema and all partitions. '''
@get('/datasets/<did>/partitions')
@CaptureException
def get_dataset_partitions_info(did):
''' GET /dataset/:did/partitions'''
gr = get_library().get(did)
if not gr:
raise exc.NotFound("Failed to find dataset for {}".format(did))
out = {}
for partition in gr.bundle.partitions:
out[partition.id_] = partition.to_dict()
return out;
@get('/datasets/<did>/partitions/<pid>')
@CaptureException
def get_dataset_partitions(did, pid):
'''Return a partition for a dataset'''
dataset, partition = _get_dataset_partition_record(did, pid)
return static_file(partition.database.path, root='/', mimetype="application/octet-stream")
@put('/datasets/<did>/partitions/<pid>')
@CaptureException
def put_datasets_partitions(did, pid):
'''Return a partition for a dataset
:param did: a `RunConfig` object
:rtype: a `LibraryDb` object
'''
try:
payload_file = _read_body(request)
dataset, partition = _get_dataset_partition_record(did, pid) #@UnusedVariable
library_path, rel_path, url = get_library().put_file(partition.identity, payload_file) #@UnusedVariable
finally:
if os.path.exists(payload_file):
os.remove(payload_file)
r = partition.identity.to_dict()
r['url'] = url
return r
#### Test Code
@get('/test/echo/<arg>')
def get_test_echo(arg):
'''just echo the argument'''
return (arg, dict(request.query.items()))
@put('/test/echo')
def put_test_echo():
'''just echo the argument'''
return (request.json, dict(request.query.items()))
@get('/test/exception')
@CaptureException
def get_test_exception():
'''Throw an exception'''
raise Exception("throws exception")
@put('/test/exception')
@CaptureException
def put_test_exception():
'''Throw an exception'''
raise Exception("throws exception")
@get('/test/isdebug')
def get_test_isdebug():
'''eturn true if the server is open and is in debug mode'''
try:
global stoppable_wsgi_server_run
if stoppable_wsgi_server_run is True:
return True
else:
return False
except NameError:
return False
@post('/test/close')
@CaptureException
def get_test_close():
'''Close the server'''
global stoppable_wsgi_server_run
if stoppable_wsgi_server_run is not None:
print "SERVER CLOSING"
stoppable_wsgi_server_run = False
return True
else:
raise exc.NotAuthorized("Not in debug mode, won't close")
class StoppableWSGIRefServer(ServerAdapter):
'''A server that can be stopped by setting the module variable
stoppable_wsgi_server_run to false. It is primarily used for testing. '''
def run(self, handler): # pragma: no cover
global stoppable_wsgi_server_run
stoppable_wsgi_server_run = True
from wsgiref.simple_server import make_server, WSGIRequestHandler
if self.quiet:
class QuietHandler(WSGIRequestHandler):
def log_request(*args, **kw): pass #@NoSelf
self.options['handler_class'] = QuietHandler
srv = make_server(self.host, self.port, handler, **self.options)
while stoppable_wsgi_server_run:
srv.handle_request()
server_names['stoppable'] = StoppableWSGIRefServer
def test_run(config=None, library_name=None):
'''Run method to be called from unit tests'''
from bottle import run, debug
# Reset the library with a different configuration. This is the module
# level library, defined at the top of the module.
if config:
global run_config
run_config = config # If this is called before get_library, will change the lib config
debug()
l = get_library(library_name) # fixate library
config = get_library_config(library_name)
port = config.get('port', 7979)
host = config.get('host', 'localhost')
logger.info("starting server on http://{}:{}".format(host, port))
return run(host=host, port=port, reloader=False, server='stoppable')
def local_run(config=None, name='default', reloader=True):
from bottle import run
from bottle import run, debug
global stoppable_wsgi_server_run
stoppable_wsgi_server_run = None
if config:
global run_config
run_config = config # If this is called before get_library, will change the lib config
debug()
l = get_library(name) #@UnusedVariable
config = get_library_config(name)
port = config.get('port', 8080)
host = config.get('host', '0.0.0.0')
logger.info("starting server for library '{}' on http://{}:{}".format(name, host, port))
return run(host=host, port=port, reloader=reloader)
def local_debug_run(name='default'):
from bottle import run, debug
debug()
l = get_library() #@UnusedVariable
config = get_library_config(name)
port = config.get('port', 8080)
host = config.get('host', '0.0.0.0')
return run(host=host, port=port, reloader=True)
def production_run(config=None, name='default', reloader=True):
from bottle import run
if config:
global run_config
run_config = config # If this is called before get_library, will change the lib config
l = get_library(name) #@UnusedVariable
config = get_library_config(name)
port = config.get('port', 80)
host = config.get('host', '0.0.0.0')
logger.info("starting server for library '{}' on http://{}:{}".format(name, host, port))
return run(host=host, port=port, reloader=False, server='paste')
if __name__ == '__main__':
local_debug_run()
| {
"content_hash": "1f982fc48864da24eeb99b3637b1613d",
"timestamp": "",
"source": "github",
"line_count": 538,
"max_line_length": 111,
"avg_line_length": 29.449814126394052,
"alnum_prop": 0.619098712446352,
"repo_name": "treyhunner/databundles",
"id": "866bfc081cecfe3b4441e5a4479be17820c2decf",
"size": "15844",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "databundles/server/main.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "631408"
},
{
"name": "Racket",
"bytes": "295"
},
{
"name": "Shell",
"bytes": "8872"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, include, url
# Import main view
from todo.views import Home
urlpatterns = patterns('',
# Main page
url( r'^$', Home.as_view() ),
# Include API URLs
url( r'^api/', include( 'api.urls' ) ),
)
| {
"content_hash": "21073e006ff7db4e5c3c2d4133d0fb53",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 51,
"avg_line_length": 18.923076923076923,
"alnum_prop": 0.6300813008130082,
"repo_name": "AxiaCore/todomvc-django",
"id": "44d2e60207c02fd4718bf1fffa63fd0830e34f7f",
"size": "246",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "todo/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "6418"
},
{
"name": "JavaScript",
"bytes": "20139"
},
{
"name": "Python",
"bytes": "7186"
}
],
"symlink_target": ""
} |
from django.db import models
from django.utils.encoding import force_bytes
from django.utils import timezone
# dependent on icalendar package - pip install icalendar
from icalendar import Calendar, Event, vDatetime, LocalTimezone
from datetime import datetime, timedelta
import urllib.request, urllib.error, urllib.parse
import os
from csc_new import settings
import dateutil.rrule as rrule
# Create your models here.
class ExamReview(models.Model):
title = models.CharField(max_length=100)
questions = models.FileField(upload_to="exam_reviews")
answers = models.FileField(upload_to="exam_reviews")
last_modified = models.DateTimeField(auto_now=True)
def __str__(self):
return '%s' % (self.title)
def delete(self, *args, **kwargs):
os.remove(os.path.join(settings.MEDIA_ROOT, str(self.questions)))
os.remove(os.path.join(settings.MEDIA_ROOT, str(self.answers)))
super(ExamReview, self).delete(*args, **kwargs)
class GeneralMeetingSlides(models.Model):
date = models.DateField()
pdf = models.FileField(upload_to="general_meeting_slides", verbose_name="PDF")
class Meta:
verbose_name = "General Meeting Slides"
verbose_name_plural = verbose_name
def __str__(self):
return self.date.__str__()
def delete(self, *args, **kwargs):
# this is broken (the delete doesn't work; the file lingers in MEDIA_ROOT)
os.remove(os.path.join(settings.MEDIA_ROOT, str(self.pdf)))
super(GeneralMeetingSlides, self).delete(*args, **kwargs)
class Photo(models.Model):
title = models.CharField(max_length=100)
desc = models.CharField(max_length=255)
src = models.FileField(upload_to="photos")
def __str__(self):
return self.title
def delete(self, *args, **kwargs):
os.remove(os.path.join(settings.MEDIA_ROOT, str(self.src)))
super(Photo, self).delete(*args, **kwargs)
# RenderableEvent - holds an event
class RenderableEvent(models.Model):
__slots__ = ('summary', 'start_date', 'start_time', 'end_time', 'desc', 'pureTime', 'location')
def __init__(self, summ, sdate, stime, etime, d, stimePure, loc):
self.summary = summ
self.start_date = sdate
self.start_time = stime
self.end_time = etime
self.desc = d
self.pureTime = stimePure
self.location = loc
def __str__(self):
return self.summary + " " + self.start_date + " " + self.start_time + " " + self.end_time + " " + self.location
# RenderableEvents - holds all events
class RenderableEvents(models.Model):
__slots__ = ('events')
def __init__(self):
self.events = []
def getEvents(self):
icalFile = urllib.request.urlopen(
'http://www.google.com/calendar/ical/calendar%40csc.cs.rit.edu/public/basic.ics')
ical = Calendar.from_ical(icalFile.read())
lt = LocalTimezone()
for thing in ical.walk():
eventtime = thing.get('dtstart')
if eventtime != None:
offset = lt.utcoffset(eventtime.dt)
loc = thing.get('location')
if (loc == None) or (loc == "") or (loc == "TBD"):
loc = "TBD"
if thing.name == "VEVENT" and eventtime.dt.replace(tzinfo=None) + offset > datetime.today() - timedelta(
days=1):
event = RenderableEvent(
thing.get('summary'),
(eventtime.dt.replace(tzinfo=None) + offset).strftime("%m/%d/%Y"),
(eventtime.dt.replace(tzinfo=None) + offset).strftime("%I:%M %p"),
(thing.get('dtend').dt.replace(tzinfo=None) + offset).strftime("%I:%M %p"),
thing.get('description'),
(eventtime.dt.replace(tzinfo=None) + offset),
loc)
self.events.append(event)
elif thing.name == "VEVENT" and thing.get('RRULE') is not None:
repeats = list(rrule.rrulestr(thing.get('RRULE').to_ical().decode('unicode_escape'), ignoretz=True,
dtstart=datetime.now()))
if (len(repeats) <= 0):
continue
if(thing.get('summary')=='General Meeting!'):
continue
self.events.append(
RenderableEvent(thing.get('summary'), (repeats[0].replace(tzinfo=None)).strftime("%m/%d/%Y"),
(thing.get('dtstart').dt.replace(tzinfo=None)).strftime("%I:%M %p"),
(thing.get('dtend').dt.replace(tzinfo=None)).strftime("%I:%M %p"),
thing.get('description'),
(repeats[0].replace(tzinfo=None)), loc))
# Sort events by date and time!
self.events = sorted(self.events, key=lambda renderable_event: renderable_event.pureTime)
icalFile.close()
| {
"content_hash": "6e10ddb130ad6df2509362f43a0d9917",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 119,
"avg_line_length": 36.97037037037037,
"alnum_prop": 0.5834502103786816,
"repo_name": "zg/CSCWebsite",
"id": "15da3b807ec19c8a5445c081448e7d09396460cc",
"size": "4991",
"binary": false,
"copies": "1",
"ref": "refs/heads/staging",
"path": "csc_new/pages/models.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5866"
},
{
"name": "HTML",
"bytes": "15853"
},
{
"name": "JavaScript",
"bytes": "4204"
},
{
"name": "Python",
"bytes": "24573"
}
],
"symlink_target": ""
} |
from pythonforandroid.toolchain import Recipe, current_directory, shprint
import sh
import os.path
class LibpqRecipe(Recipe):
version = '9.5.3'
url = 'http://ftp.postgresql.org/pub/source/v{version}/postgresql-{version}.tar.bz2'
depends = []
def should_build(self, arch):
return not os.path.isfile('{}/libpq.a'.format(self.ctx.get_libs_dir(arch.arch)))
def build_arch(self, arch):
env = self.get_recipe_env(arch)
with current_directory(self.get_build_dir(arch.arch)):
configure = sh.Command('./configure')
shprint(configure, '--without-readline', '--host=arm-linux',
_env=env)
shprint(sh.make, 'submake-libpq', _env=env)
shprint(sh.cp, '-a', 'src/interfaces/libpq/libpq.a',
self.ctx.get_libs_dir(arch.arch))
recipe = LibpqRecipe()
| {
"content_hash": "d5d2616bccb38606cc04537c93b8c332",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 88,
"avg_line_length": 33.46153846153846,
"alnum_prop": 0.6172413793103448,
"repo_name": "rnixx/python-for-android",
"id": "45c296a2a67aa7a23f9544509b22dbdffa9e00d7",
"size": "870",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pythonforandroid/recipes/libpq/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "70942"
},
{
"name": "C++",
"bytes": "491"
},
{
"name": "CMake",
"bytes": "250"
},
{
"name": "CSS",
"bytes": "3487"
},
{
"name": "Dockerfile",
"bytes": "4440"
},
{
"name": "HTML",
"bytes": "11631"
},
{
"name": "Java",
"bytes": "517112"
},
{
"name": "Makefile",
"bytes": "27307"
},
{
"name": "Python",
"bytes": "1359684"
},
{
"name": "Shell",
"bytes": "5340"
}
],
"symlink_target": ""
} |
import runStatus
import threading
import traceback
import sys
import os
import signal
def install_pystuck():
import pystuck
stuck_port = 6666
while 1:
try:
pystuck.run_server(port=stuck_port)
print("PyStuck installed to process, running on port %s" % stuck_port)
return
except OSError:
stuck_port += 1
if stuck_port > 7000:
raise RuntimeError("wat?")
# os.kill(os.getpid(), signal.SIGINT)
def halt_exc(x, y):
if runStatus.run_state.value == 0:
print("Raising Keyboard Interrupt")
raise KeyboardInterrupt
def handler(signum, frame):
for th in threading.enumerate():
print("Dumping stack for thread: ", th)
traceback.print_stack(sys._current_frames()[th.ident])
print() | {
"content_hash": "8aa8867db3ddf326dd37f5484dbf6d6f",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 73,
"avg_line_length": 21.545454545454547,
"alnum_prop": 0.7130801687763713,
"repo_name": "fake-name/ReadableWebProxy",
"id": "05e3e51f89625abee9acd19d13aa75134e72ccb5",
"size": "712",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "common/stuck.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "105811"
},
{
"name": "Dockerfile",
"bytes": "1178"
},
{
"name": "HTML",
"bytes": "119737"
},
{
"name": "JavaScript",
"bytes": "3006524"
},
{
"name": "Jupyter Notebook",
"bytes": "148075"
},
{
"name": "Mako",
"bytes": "1454"
},
{
"name": "Python",
"bytes": "5264346"
},
{
"name": "Shell",
"bytes": "1059"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from builtins import range
from past.utils import old_div
from math import *
import proteus.MeshTools
from proteus import Domain
from proteus.default_n import *
from proteus import Context
'''
flow around a 2D cylinder benchmark problem.
'''
opts = Context.Options([
("T", 4.0, "Time interval [0, T]"),
("he",0.2, "maximum size of edges"),
("backwardEuler",False,"use backward Euler or not"),
("onlySaveFinalSolution",False,"Only save the final solution")
], mutable=True)
nd = 2
spaceOrder=1
useHex=False
DX = opts.he
usePETSc = False#True
parallelPartitioningType = MeshParallelPartitioningTypes.node
nLayersOfOverlapForParallel = 1
L = 2.2
H = 0.41
Um = 1.5
radius = 0.05
fl_H = H
# Input checks
if spaceOrder not in [1,2]:
print("INVALID: spaceOrder" + spaceOrder)
sys.exit()
if spaceOrder == 1:
hFactor=1.0
if useHex:
basis=C0_AffineLinearOnCubeWithNodalBasis
elementQuadrature = CubeGaussQuadrature(nd,3)
elementBoundaryQuadrature = CubeGaussQuadrature(nd-1,3)
else:
basis=C0_AffineLinearOnSimplexWithNodalBasis
elementQuadrature = SimplexGaussQuadrature(nd,3)
elementBoundaryQuadrature = SimplexGaussQuadrature(nd-1,3)
elif spaceOrder == 2:
hFactor=0.5
if useHex:
basis=C0_AffineLagrangeOnCubeWithNodalBasis
elementQuadrature = CubeGaussQuadrature(nd,4)
elementBoundaryQuadrature = CubeGaussQuadrature(nd-1,4)
else:
basis=C0_AffineQuadraticOnSimplexWithNodalBasis
elementQuadrature = SimplexGaussQuadrature(nd,4)
elementBoundaryQuadrature = SimplexGaussQuadrature(nd-1,4)
nLevels = 1
#from cylinder2dDomain import *
try:
from .symmetricDomain_john import *
except:
from symmetricDomain_john import *
domain = symmetric2D(box=(2.2,0.41),
L= 0.2,
H = 0.2,
r = 0.1,
C = (0.2,0.2),
DX = DX,
refinement_length=1.0,
DX_coarse = DX)
boundaryTags=domain.boundaryFlags
# Time stepping
T= opts.T
runCFL = 0.9
dt_fixed = 0.005
dt_init = 0.005
nDTout = int(old_div(T,dt_fixed))
dt_init = min(dt_init,0.5*dt_fixed)
tnList = [0.0,dt_init]+[i*dt_fixed for i in range(1,nDTout+1)]
if opts.onlySaveFinalSolution == True:
tnList = [0.0,dt_init,opts.T]
useBackwardEuler = opts.backwardEuler
# Numerical parameters
ns_shockCapturingFactor = 0.0
ns_lag_shockCapturing = True#False
ns_lag_subgridError = True
epsFact_density = 1.5
epsFact_viscosity = 1.5
epsFact_redistance = 0.33
epsFact_consrv_heaviside = 1.5
epsFact_consrv_dirac = 1.5
epsFact_consrv_diffusion = 10.0
# Fluid
rho = 1.0
#mu =rho*0.2
nu = 1.0e-3
# Gravity
g = [0.0,0.0]
domain.MeshOptions.triangleOptions="pAq30ena"#D=Delaunay gives bad results for this composite meshing approach
domain.MeshOptions.genMesh=False#True
#domain.writePLY('cylinder2D')
#domain.writePoly('mesh_cylinder2D')
domain.polyfile=os.path.dirname(os.path.abspath(__file__))+"/"+"mesh_cylinder2D"
| {
"content_hash": "6a90e70d4364396f7f3338d60f355365",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 110,
"avg_line_length": 26.270491803278688,
"alnum_prop": 0.6764430577223088,
"repo_name": "erdc/proteus",
"id": "fc3796217ca960c8a34fcc32db011891148f589c",
"size": "3205",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "proteus/tests/cylinder2D/conforming_rans2p/cylinder2d.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "2790"
},
{
"name": "Asymptote",
"bytes": "1569"
},
{
"name": "C",
"bytes": "2827957"
},
{
"name": "C++",
"bytes": "7262408"
},
{
"name": "Cython",
"bytes": "154607"
},
{
"name": "Dockerfile",
"bytes": "2738"
},
{
"name": "Fortran",
"bytes": "51671"
},
{
"name": "Jupyter Notebook",
"bytes": "33357"
},
{
"name": "Makefile",
"bytes": "19043"
},
{
"name": "Python",
"bytes": "12534530"
},
{
"name": "Roff",
"bytes": "322"
},
{
"name": "Shell",
"bytes": "14084"
}
],
"symlink_target": ""
} |
import argparse
import collections
import os
import sys
from spinnaker.run import check_run_and_monitor
from spinnaker.run import check_run_quick
from spinnaker.run import run_and_monitor
from spinnaker.run import run_quick
def get_repository_dir(name):
"""Determine the local directory that a given repository is in.
We assume that refresh_source is being run in the build directory
that contains all the repositories. Except spinnaker/ itself is not
in the build directory so special case it.
Args:
name [string]: The repository name.
"""
if name == 'spinnaker':
return os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
else:
return name
class SourceRepository(
collections.namedtuple('SourceRepository', ['name', 'owner'])):
"""Denotes a github repository.
Attributes:
name: The [short] name of the repository.
owner: The github user name owning the repository
"""
pass
class Refresher(object):
"""Provides branch management capabilities across Spinnaker repositories.
The Spinnaker repositories are federated across several independent
repositories. This class provides convenient support to update local
repositories from remote and vice-versa.
The origin repository is specified using --github_user option. This specifies
the github repository owner for the origin repositories. It is only relevant
when a repository needs to be cloned to establish a local repository. The
value 'upstream' can be used to indicate that the repository should be cloned
from its authoritative source as opposed to another user's fork.
When the refresher clones new repositories, it establishes an "upstream"
remote to the authoritative repository (based on hard-coded mappings)
unless the origin is the upstream. Upstream pulls are disabled (including
when the origin is the upstream) and only the master branch can be pulled
from upstream.
If --pull_branch is used then the local repositories will pull their current
branch from the origin repository. If a local repository does not yet exist,
then it will be cloned from the --github_user using the branch specified
by --pull_branch. The --pull_origin option is similar but implies that the
branch is 'master'. This is intended to perform complete updates of the
local repositories.
--push_branch (or --push_master, implying 'master' branch) will push the
local repository branch back to the origin, but only if the local repository
is in the specified branch. This is for safety to prevent accidental pushes.
It is assumed that multi-repository changes will have a common feature-branch
name, and not all repositories will be affected.
Of course, individual repositories can still be managed using explicit git
commands. This class is intended for cross-cutting management.
"""
__OPTIONAL_REPOSITORIES = [SourceRepository('citest', 'google')]
__REQUIRED_REPOSITORIES = [
SourceRepository('spinnaker', 'spinnaker'),
SourceRepository('clouddriver', 'spinnaker'),
SourceRepository('orca', 'spinnaker'),
SourceRepository('front50', 'spinnaker'),
SourceRepository('echo', 'spinnaker'),
SourceRepository('rosco', 'spinnaker'),
SourceRepository('gate', 'spinnaker'),
SourceRepository('fiat', 'spinnaker'),
SourceRepository('igor', 'spinnaker'),
SourceRepository('deck', 'spinnaker')]
@property
def pull_branch(self):
"""Gets the branch that we want to pull.
This may raise a ValueError if the specification is inconsistent.
This is determined lazily rather than at construction to be consistent
with the push_branch property.
"""
if self.__options.pull_origin:
if (self.__options.pull_branch
and self.__options.pull_branch != 'master'):
raise ValueError(
'--pull_origin is incompatible with --pull_branch={branch}'
.format(branch=self.__options.pull_branch))
return 'master'
return self.__options.pull_branch
@property
def push_branch(self):
"""Gets the branch that we want to push.
This may raise a ValueError if the specification is inconsistent.
This is determined lazily rather than at construction because the
option to push is not necessarily present depending on the use case.
"""
if self.__options.push_master:
if (self.__options.push_branch
and self.__options.push_branch != 'master'):
raise ValueError(
'--push_origin is incompatible with --push_branch={branch}'
.format(branch=self.__options.push_branch))
return 'master'
return self.__options.push_branch
def __init__(self, options):
self.__options = options
self.__extra_repositories = self.__OPTIONAL_REPOSITORIES
if options.extra_repos:
for extra in options.extra_repos.split(','):
pair = extra.split('=')
if len(pair) != 2:
raise ValueError(
'Invalid --extra_repos value "{extra}"'.format(extra=extra))
self.__extra_repositories.append(SourceRepository(pair[0], pair[1]))
def get_remote_repository_url(self, path, which='origin'):
"""Determine the repository that a given path is from.
Args:
path [string]: The path to the repository
which [string]: The remote repository name (origin or upstream).
Returns:
The origin url for path, or None if not a git repository.
"""
result = run_quick('git -C {path} config --get remote.{which}.url'
.format(path=path, which=which),
echo=False)
if result.returncode:
return None
return result.stdout.strip()
def get_local_branch_name(self, name):
"""Determine which git branch a local repository is in.
Args:
name [string]: The repository name.
Returns:
The name of the branch.
"""
result = run_quick('git -C "{dir}" rev-parse --abbrev-ref HEAD'
.format(dir=get_repository_dir(name)),
echo=False)
if result.returncode:
error = 'Could not determine branch: ' + result.stdout.strip()
raise RuntimeError(error)
return result.stdout.strip()
def get_github_repository_url(self, repository, owner=None):
"""Determine the URL for a given github repository.
Args:
repository [string]: The upstream SourceRepository.
owner [string]: The explicit owner for the repository we want.
If not provided then use the github_user in the bound options.
"""
user = owner or self.__options.github_user
if not user:
raise ValueError('No --github_user specified.')
if user == 'default' or user == 'upstream':
user = repository.owner
url_pattern = ('https://github.com/{user}/{name}.git'
if self.__options.use_https
else 'git@github.com:{user}/{name}.git')
return url_pattern.format(user=user, name=repository.name)
def git_clone(self, repository, owner=None):
"""Clone the specified repository
Args:
repository [string]: The name of the github repository (without owner).
owner [string]: An explicit repository owner.
If not provided use the configured options.
"""
name = repository.name
repository_dir = get_repository_dir(name)
upstream_user = repository.owner
branch = self.pull_branch or 'master'
origin_url = self.get_github_repository_url(repository, owner=owner)
upstream_url = 'https://github.com/{upstream_user}/{name}.git'.format(
upstream_user=upstream_user, name=name)
# Don't echo because we're going to hide some failure.
print 'Cloning {name} from {origin_url} -b {branch}.'.format(
name=name, origin_url=origin_url, branch=branch)
shell_result = run_and_monitor(
'git clone {url} -b {branch}'.format(url=origin_url, branch=branch),
echo=False)
if not shell_result.returncode:
if shell_result.stdout:
print shell_result.stdout
else:
if repository in self.__extra_repositories:
sys.stderr.write('WARNING: Missing optional repository {name}.\n'
.format(name=name))
sys.stderr.write(' Continue on without it.\n')
return
sys.stderr.write(shell_result.stderr or shell_result.stdout)
sys.stderr.write(
'FATAL: Cannot continue without required repository {name}.\n'
' Consider using github to fork one from {upstream}.\n'.
format(name=name, upstream=upstream_url))
raise SystemExit('Repository {url} not found.'.format(url=origin_url))
if self.__options.add_upstream and origin_url != upstream_url:
print ' Adding upstream repository {upstream}.'.format(
upstream=upstream_url)
check_run_quick('git -C "{dir}" remote add upstream {url}'
.format(dir=repository_dir, url=upstream_url),
echo=False)
if self.__options.disable_upstream_push:
which = 'upstream' if origin_url != upstream_url else 'origin'
print ' Disabling git pushes to {which} {upstream}'.format(
which=which, upstream=upstream_url)
check_run_quick(
'git -C "{dir}" remote set-url --push {which} disabled'
.format(dir=repository_dir, which=which),
echo=False)
def pull_from_origin(self, repository):
"""Pulls the current branch from the git origin.
Args:
repository [string]: The local repository to update.
"""
name = repository.name
repository_dir = get_repository_dir(name)
if not os.path.exists(repository_dir):
self.git_clone(repository)
return
print 'Updating {name} from origin'.format(name=name)
branch = self.get_local_branch_name(name)
if branch != self.pull_branch:
if self.__options.force_pull:
sys.stderr.write(
'WARNING: Updating {name} branch={branch}, *NOT* "{want}"\n'
.format(name=name, branch=branch, want=self.pull_branch))
else:
sys.stderr.write(
'WARNING: Skipping {name} because branch={branch},'
' *NOT* "{want}"\n'
.format(name=name, branch=branch, want=self.pull_branch))
return
try:
check_run_and_monitor('git -C "{dir}" pull origin {branch} --tags'
.format(dir=repository_dir, branch=branch),
echo=True)
except RuntimeError:
result = check_run_and_monitor('git -C "{dir}" branch -r'
.format(dir=repository_dir),
echo=False)
if result.stdout.find('origin/{branch}\n') >= 0:
raise
sys.stderr.write(
'WARNING {name} branch={branch} is not known to the origin.\n'
.format(name=name, branch=branch))
def pull_from_upstream_if_master(self, repository):
"""Pulls the master branch from the upstream repository.
This will only have effect if the local repository exists
and is currently in the master branch.
Args:
repository [string]: The name of the local repository to update.
"""
name = repository.name
repository_dir = get_repository_dir(name)
if not os.path.exists(repository_dir):
self.pull_from_origin(repository)
branch = self.get_local_branch_name(name)
if branch != 'master':
sys.stderr.write('Skipping {name} because it is in branch={branch}.\n'
.format(name=name, branch=branch))
return
print 'Pulling master {name} from upstream'.format(name=name)
check_run_and_monitor('git -C "{dir}" pull upstream master --tags'
.format(dir=repository_dir),
echo=True)
def push_to_origin_if_target_branch(self, repository):
"""Pushes the current target branch of the local repository to the origin.
This will only have effect if the local repository exists
and is currently in the target branch.
Args:
repository [string]: The name of the local repository to push from.
"""
name = repository.name
repository_dir = get_repository_dir(name)
if not os.path.exists(repository_dir):
sys.stderr.write('Skipping {name} because it does not yet exist.\n'
.format(name=name))
return
branch = self.get_local_branch_name(name)
if branch != self.push_branch:
sys.stderr.write(
'Skipping {name} because it is in branch={branch}, not {want}.\n'
.format(name=name, branch=branch, want=self.push_branch))
return
print 'Pushing {name} to origin.'.format(name=name)
check_run_and_monitor('git -C "{dir}" push origin {branch} --tags'.format(
dir=repository_dir, branch=self.push_branch),
echo=True)
def push_all_to_origin_if_target_branch(self):
"""Push all the local repositories current target branch to origin.
This will skip any local repositories that are not currently in the
target branch.
"""
all_repos = self.__REQUIRED_REPOSITORIES + self.__extra_repositories
for repository in all_repos:
self.push_to_origin_if_target_branch(repository)
def pull_all_from_upstream_if_master(self):
"""Pull all the upstream master branches into their local repository.
This will skip any local repositories that are not currently in the master
branch.
"""
all_repos = self.__REQUIRED_REPOSITORIES + self.__extra_repositories
for repository in all_repos:
self.pull_from_upstream_if_master(repository)
def pull_all_from_origin(self):
"""Pull all the origin target branches into their local repository.
This will skip any local repositories that are not currently in the
target branch.
"""
all_repos = self.__REQUIRED_REPOSITORIES + self.__extra_repositories
for repository in all_repos:
try:
self.pull_from_origin(repository)
except RuntimeError as ex:
if repository in self.__extra_repositories and not os.path.exists(
get_repository_dir(repository)):
sys.stderr.write(
'IGNORING error "{msg}" in optional repository {name}'
' because the local repository does not yet exist.\n'
.format(msg=ex.message, name=repository.name))
else:
raise
def __determine_spring_config_location(self):
root = '{dir}/config'.format(
dir=os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
home = os.path.join(os.environ['HOME'] + '/.spinnaker')
return '{root}/,{home}/'.format(home=home, root=root)
def write_gradle_run_script(self, repository):
"""Generate a dev_run.sh script for the local repository.
Args:
repository [string]: The name of the local repository to generate in.
"""
name = repository.name
path = '{name}/start_dev.sh'.format(name=name)
with open(path, 'w') as f:
f.write("""#!/bin/bash
d=$(dirname "$0")
cd "$d"
LOG_DIR=${{LOG_DIR:-../logs}}
DEF_SYS_PROPERTIES="-Dspring.config.location='{spring_location}'"
bash -c "(./gradlew $DEF_SYS_PROPERTIES $@ > '$LOG_DIR/{name}.log') 2>&1\
| tee -a '$LOG_DIR/{name}.log' >& '$LOG_DIR/{name}.err' &"
""".format(name=name,
spring_location=self.__determine_spring_config_location()))
os.chmod(path, 0777)
def write_deck_run_script(self, repository):
"""Generate a dev_run.sh script for running deck locally.
Args:
repository [string]: The name of the local repository to generate in.
"""
name = repository.name
path = '{name}/start_dev.sh'.format(name=name)
with open(path, 'w') as f:
f.write("""#!/bin/bash
d=$(dirname "$0")
cd "$d"
LOG_DIR=${{LOG_DIR:-../logs}}
if [[ node_modules -ot .git ]]; then
# Update npm, otherwise assume nothing changed and we're good.
npm install >& "$LOG_DIR/deck.log"
else
echo "deck npm node_modules looks up to date already."
fi
# Append to the log file we just started.
bash -c "(npm start >> '$LOG_DIR/{name}.log') 2>&1\
| tee -a '$LOG_DIR/{name}.log' >& '$LOG_DIR/{name}.err' &"
""".format(name=name))
os.chmod(path, 0777)
def update_spinnaker_run_scripts(self):
"""Regenerate the local dev_run.sh script for each local repository."""
for repository in self.__REQUIRED_REPOSITORIES:
name = repository.name
if not os.path.exists(name):
continue
if name == 'deck':
self.write_deck_run_script(repository)
else:
self.write_gradle_run_script(repository)
@classmethod
def init_extra_argument_parser(cls, parser):
"""Initialize additional arguments for managing remote repositories.
This is to sync the origin and upstream repositories. The intent
is to ultimately sync the origin from the upstream repository, but
this might be in two steps so the upstream can be verified [again]
before pushing the changes to the origin.
"""
# Note that we only pull the master branch from upstream.
# Pulling other branches don't normally make sense.
parser.add_argument('--pull_upstream', default=False,
action='store_true',
help='If the local branch is master, then refresh it'
' from the upstream repository.'
' Otherwise leave as is.')
parser.add_argument('--nopull_upstream',
dest='pull_upstream',
action='store_false')
# Note we only push target branches to origin specified by --push_branch
# To push another branch, you must explicitly push it with git
# (or another invocation).
parser.add_argument('--push_master', action='store_true',
help='Push the current branch to origin if it is'
' master. This is the same as --push_branch=master.')
parser.add_argument('--nopush_master', dest='push_master',
action='store_false')
parser.add_argument('--push_branch', default='',
help='If specified and the local repository is in'
' this branch then push it to the origin'
' repository. Otherwise do not push it.')
@classmethod
def init_argument_parser(cls, parser):
"""Initialize command-line arguments."""
parser.add_argument('--use_https', default=True, action='store_true',
help='Use https when cloning github repositories.')
parser.add_argument('--use_ssh', dest='use_https', action='store_false',
help='Use SSH when cloning github repositories.')
parser.add_argument('--add_upstream', default=True,
action='store_true',
help='Add upstream repository when cloning.')
parser.add_argument('--noadd_upstream', dest='add_upstream',
action='store_false')
parser.add_argument('--disable_upstream_push', default=True,
action='store_true',
help='Disable future pushes to the upstream'
' repository when cloning a repository.')
parser.add_argument('--nodisable_upstream_push',
dest='disable_upstream_push',
action='store_false')
parser.add_argument('--pull_origin', default=False,
action='store_true',
help='Refresh the local branch from the origin.'
' If cloning, then clone the master branch.'
' See --pull_branch for a more general option.')
parser.add_argument('--nopull_origin', dest='pull_origin',
action='store_false')
parser.add_argument('--pull_branch', default='',
help='Refresh the local branch from the origin if'
' it is in the specified branch,'
' otherwise skip it.'
' If cloning, then clone this branch.')
parser.add_argument('--force_pull', default=False,
help='Force pulls, even if the current branch'
' differs from the pulled branch.')
parser.add_argument(
'--extra_repos', default=None,
help='A comma-delimited list of name=owner optional repositories.'
'name is the repository name,'
' owner is the authoritative github user name owning it.'
' The --github_user will still be used to determine the origin.')
parser.add_argument('--github_user', default=None,
help='Pull from this github user\'s repositories.'
' If the user is "default" then use the'
' authoritative (upstream) repository.')
@classmethod
def main(cls):
parser = argparse.ArgumentParser()
cls.init_argument_parser(parser)
cls.init_extra_argument_parser(parser)
options = parser.parse_args()
refresher = cls(options)
in_repository_url = refresher.get_remote_repository_url('.')
if in_repository_url:
sys.stderr.write(
'ERROR: You cannot run this script from within a local repository.\n'
' This directory is from "{url}".\n'
' Did you intend to be in the parent directory?\n'
.format(url=in_repository_url))
return -1
try:
# This is ok. Really we want to look for an exception validating these
# properties so we can fail with a friendly error rather than stack.
if (refresher.pull_branch != refresher.push_branch
and refresher.pull_branch and refresher.push_branch):
sys.stderr.write(
'WARNING: pulling branch {pull} and pushing branch {push}'
.format(pull=refresher.pull_branch,
push=refresher.push_branch))
except Exception as ex:
sys.stderr.write('FAILURE: {0}\n'.format(ex.message))
return -1
nothing = True
if options.pull_upstream:
nothing = False
refresher.pull_all_from_upstream_if_master()
if refresher.push_branch:
nothing = False
refresher.push_all_to_origin_if_target_branch()
if refresher.pull_branch:
nothing = False
refresher.pull_all_from_origin()
refresher.update_spinnaker_run_scripts()
if nothing:
sys.stderr.write('No pull/push options were specified.\n')
else:
print 'DONE'
return 0
if __name__ == '__main__':
sys.exit(Refresher.main())
| {
"content_hash": "4b7a01d1cb5c5cb1546d5b3d1a4c460f",
"timestamp": "",
"source": "github",
"line_count": 576,
"max_line_length": 80,
"avg_line_length": 40.802083333333336,
"alnum_prop": 0.6126287124500043,
"repo_name": "stitchfix/spinnaker",
"id": "8803371e19ae9606068eced1464c07e606671025",
"size": "24119",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "dev/refresh_source.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "7648"
},
{
"name": "Python",
"bytes": "735198"
},
{
"name": "Shell",
"bytes": "116671"
}
],
"symlink_target": ""
} |
from builtins import object
import json
import sys
from datetime import datetime
from nose.plugins.skip import SkipTest
from nose.tools import assert_equal, assert_false, assert_true, assert_not_equal, assert_raises
from django.core import management
from django.db.utils import OperationalError
from beeswax.models import SavedQuery
from beeswax.design import hql_query
from notebook.models import import_saved_beeswax_query
from useradmin.models import get_default_user_group, User
from filebrowser.conf import REMOTE_STORAGE_HOME
from desktop.conf import has_connectors, RAZ
from desktop.converters import DocumentConverter
from desktop.lib.connectors.models import Connector
from desktop.lib.django_test_util import make_logged_in_client
from desktop.lib.fs import ProxyFS
from desktop.lib.test_utils import grant_access
from desktop.models import Directory, Document2, Document, Document2Permission, ClusterConfig, get_remote_home_storage
try:
from oozie.models2 import Workflow
has_oozie = True
except RuntimeError:
has_oozie = False
if sys.version_info[0] > 2:
from unittest.mock import patch, Mock
else:
from mock import patch, Mock
class MockFs(object):
def __init__(self):
pass
class TestClusterConfig(object):
def setUp(self):
self.client = make_logged_in_client(username="test", groupname="test", recreate=True, is_superuser=False)
self.user = User.objects.get(username="test")
self.client_not_me = make_logged_in_client(username="test_not_me", groupname="test_not_me", recreate=True, is_superuser=False)
self.user_not_me = User.objects.get(username="test_not_me")
def test_get_fs(self):
if not has_connectors():
raise SkipTest
with patch('desktop.models.appmanager.get_apps_dict') as get_apps_dict:
with patch('desktop.models.fsmanager.is_enabled_and_has_access') as is_enabled_and_has_access:
# filebrowser
ClusterConfig(user=self.user)
def test_get_main_quick_action(self):
with patch('desktop.models.get_user_preferences') as get_user_preferences:
get_user_preferences.return_value = json.dumps({'app': 'editor', 'interpreter': 1})
apps = {'editor': {'interpreters': [{'type': 1, 'name': 'SQL'}, {'type': 2, 'name': 'Stream SQL'}]}}
main_app = ClusterConfig(user=self.user, apps=apps).get_main_quick_action(apps=apps)
assert_true({'type': 1, 'name': 'SQL'}, main_app)
def test_get_remote_storage_home(self):
# When default home ends with /user in RAZ ADLS env.
resets = [
RAZ.IS_ENABLED.set_for_testing(True),
REMOTE_STORAGE_HOME.set_for_testing('abfs://gethue-container/user')
]
try:
remote_home_storage = get_remote_home_storage(self.user)
assert_equal(remote_home_storage, 'abfs://gethue-container/user/test')
remote_home_storage = get_remote_home_storage(self.user_not_me)
assert_equal(remote_home_storage, 'abfs://gethue-container/user/test_not_me')
finally:
for reset in resets:
reset()
# When default home ends with /user in RAZ S3 env.
resets = [
RAZ.IS_ENABLED.set_for_testing(True),
REMOTE_STORAGE_HOME.set_for_testing('s3a://gethue-bucket/user')
]
try:
remote_home_storage = get_remote_home_storage(self.user)
assert_equal(remote_home_storage, 's3a://gethue-bucket/user/test')
remote_home_storage = get_remote_home_storage(self.user_not_me)
assert_equal(remote_home_storage, 's3a://gethue-bucket/user/test_not_me')
finally:
for reset in resets:
reset()
# When default home does not ends with /user in RAZ env
resets = [
RAZ.IS_ENABLED.set_for_testing(True),
REMOTE_STORAGE_HOME.set_for_testing('abfs://gethue-container')
]
try:
remote_home_storage = get_remote_home_storage(self.user)
assert_equal(remote_home_storage, 'abfs://gethue-container')
remote_home_storage = get_remote_home_storage(self.user_not_me)
assert_equal(remote_home_storage, 'abfs://gethue-container')
finally:
for reset in resets:
reset()
class TestDocument2(object):
def setUp(self):
self.client = make_logged_in_client(username="doc2", groupname="doc2", recreate=True, is_superuser=False)
self.user = User.objects.get(username="doc2")
# This creates the user directories for the new user
response = self.client.get('/desktop/api2/doc/')
data = json.loads(response.content)
assert_equal('/', data['document']['path'], data)
self.home_dir = Document2.objects.get_home_directory(user=self.user)
def test_trash_directory(self):
assert_true(Directory.objects.filter(owner=self.user, name=Document2.TRASH_DIR, type='directory').exists())
def test_document_create(self):
sql = 'SELECT * FROM sample_07'
design = hql_query(sql)
# is_auto
# is_trashed
# is_redacted
old_query = SavedQuery.objects.create(
type=SavedQuery.TYPES_MAPPING['hql'],
owner=self.user,
data=design.dumps(),
name='See examples',
desc='Example of old format'
)
try:
new_query = import_saved_beeswax_query(old_query)
new_query_data = new_query.get_data()
assert_equal('query-hive', new_query_data['type'])
assert_equal('See examples', new_query_data['name'])
assert_equal('Example of old format', new_query_data['description'])
assert_equal('ready', new_query_data['snippets'][0]['status'])
assert_equal('See examples', new_query_data['snippets'][0]['name'])
assert_equal('SELECT * FROM sample_07', new_query_data['snippets'][0]['statement_raw'])
assert_equal([], new_query_data['snippets'][0]['properties']['settings'])
assert_equal([], new_query_data['snippets'][0]['properties']['files'])
assert_equal([], new_query_data['snippets'][0]['properties']['functions'])
finally:
old_query.delete()
def test_get_document(self):
doc = Document2.objects.create(name='test_doc', type='query-hive', owner=self.user, data={})
self.home_dir.children.add(doc)
response = self.client.get('/desktop/api2/doc/', {'uuid': doc.uuid})
data = json.loads(response.content)
assert_true('document' in data)
assert_equal(doc.uuid, data['document']['uuid'])
# Invalid uuid returns error
response = self.client.get('/desktop/api2/doc/', {'uuid': '1234-5678-9'})
data = json.loads(response.content)
assert_equal(-1, data['status'])
assert_true('not found' in data['message'])
# Document UUID and XML UUID missmatch
response = self.client.get('/desktop/api2/doc/', {'uuid': doc.uuid})
data = json.loads(response.content)
doc.uuid = '1234-5678-9'
doc.save()
assert_not_equal(doc.uuid, data['document']['uuid'])
response = self.client.get('/desktop/api2/doc/', {'uuid': doc.uuid})
data = json.loads(response.content)
assert_equal(doc.uuid, data['document']['uuid'])
def test_directory_create_and_rename(self):
response = self.client.post(
'/desktop/api2/doc/mkdir',
{'parent_uuid': json.dumps(self.home_dir.uuid), 'name': json.dumps('test_mkdir')}
)
data = json.loads(response.content)
assert_equal(0, data['status'], data)
assert_true('directory' in data)
assert_equal(data['directory']['name'], 'test_mkdir', data)
assert_equal(data['directory']['type'], 'directory', data)
response = self.client.post('/desktop/api2/doc/update', {'uuid': json.dumps(data['directory']['uuid']),
'name': 'updated'})
data = json.loads(response.content)
assert_equal(0, data['status'])
assert_equal('updated', data['document']['name'], data)
def test_file_move(self):
source_dir = Directory.objects.create(name='test_mv_file_src', owner=self.user, parent_directory=self.home_dir)
target_dir = Directory.objects.create(name='test_mv_file_dst', owner=self.user, parent_directory=self.home_dir)
doc = Document2.objects.create(name='query1.sql', type='query-hive', owner=self.user, data={}, parent_directory=source_dir)
orig_last_modified = doc.last_modified
# Verify original paths before move operation
response = self.client.get('/desktop/api2/doc/', {'uuid': source_dir.uuid})
data = json.loads(response.content)
assert_equal('/test_mv_file_src', data['document']['path'])
response = self.client.get('/desktop/api2/doc/', {'uuid': doc.uuid})
data = json.loads(response.content)
assert_equal('/test_mv_file_src/query1.sql', data['document']['path'])
response = self.client.post('/desktop/api2/doc/move', {
'source_doc_uuid': json.dumps(doc.uuid),
'destination_doc_uuid': json.dumps(target_dir.uuid)
})
data = json.loads(response.content)
assert_equal(0, data['status'], data)
# Verify that the paths are updated
response = self.client.get('/desktop/api2/doc/', {'uuid': source_dir.uuid})
data = json.loads(response.content)
assert_false(any(doc['uuid'] == doc.uuid for doc in data['children']), data['children'])
response = self.client.get('/desktop/api2/doc/', {'uuid': doc.uuid})
data = json.loads(response.content)
assert_equal('/test_mv_file_dst/query1.sql', data['document']['path'])
# Verify that last_modified is intact
doc = Document2.objects.get(id=doc.id)
assert_equal(orig_last_modified.strftime('%Y-%m-%dT%H:%M:%S'), doc.last_modified.strftime('%Y-%m-%dT%H:%M:%S'))
def test_file_copy(self):
if not has_oozie:
raise SkipTest
workflow_doc = Document2.objects.create(
name='Copy Test',
type='oozie-workflow2',
owner=self.user,
data={},
parent_directory=self.home_dir
)
Document.objects.link(
workflow_doc,
owner=workflow_doc.owner,
name=workflow_doc.name,
description=workflow_doc.description,
extra='workflow2'
)
workflow = Workflow(user=self.user)
workflow.update_name('Copy Test')
workflow.set_workspace(self.user)
# Monkey patch check_workspace for both new wor
if not hasattr(Workflow, 'real_check_workspace'):
Workflow.real_check_workspace = Workflow.check_workspace
try:
Workflow.check_workspace = lambda a, b, c: None
workflow.check_workspace(MockFs(), self.user)
workflow_doc.update_data({'workflow': workflow.get_data()['workflow']})
workflow_doc.save()
def copy_remote_dir(self, src, dst, *args, **kwargs):
pass
# Monkey patch as we don't want to do real copy
if not hasattr(ProxyFS, 'real_copy_remote_dir'):
ProxyFS.real_copy_remote_dir = ProxyFS.copy_remote_dir
ProxyFS.copy_remote_dir = copy_remote_dir
response = self.client.post('/desktop/api2/doc/copy', {
'uuid': json.dumps(workflow_doc.uuid)
})
finally:
Workflow.check_workspace = Workflow.real_check_workspace
ProxyFS.copy_remote_dir = ProxyFS.real_copy_remote_dir
copy_doc_json = json.loads(response.content)
copy_doc = Document2.objects.get(type='oozie-workflow2', uuid=copy_doc_json['document']['uuid'])
copy_workflow = Workflow(document=copy_doc)
# Check if document2 and data are in sync
assert_equal(copy_doc.name, copy_workflow.get_data()['workflow']['name'])
assert_equal(copy_doc.uuid, copy_workflow.get_data()['workflow']['uuid'])
assert_equal(copy_workflow.name, workflow.name + "-copy")
assert_not_equal(copy_workflow.deployment_dir, workflow.deployment_dir)
assert_not_equal(copy_doc.uuid, workflow_doc.uuid)
assert_not_equal(copy_workflow.get_data()['workflow']['uuid'], workflow.get_data()['workflow']['uuid'])
def test_directory_move(self):
source_dir = Directory.objects.create(name='test_mv', owner=self.user, parent_directory=self.home_dir)
target_dir = Directory.objects.create(name='test_mv_dst', owner=self.user, parent_directory=self.home_dir)
doc = Document2.objects.create(name='query1.sql', type='query-hive', owner=self.user, data={}, parent_directory=source_dir)
# Verify original paths before move operation
response = self.client.get('/desktop/api2/doc/', {'uuid': source_dir.uuid})
data = json.loads(response.content)
assert_equal('/test_mv', data['document']['path'])
response = self.client.get('/desktop/api2/doc/', {'uuid': doc.uuid})
data = json.loads(response.content)
assert_equal('/test_mv/query1.sql', data['document']['path'])
response = self.client.post('/desktop/api2/doc/move', {
'source_doc_uuid': json.dumps(Directory.objects.get(owner=self.user, name='test_mv').uuid),
'destination_doc_uuid': json.dumps(Directory.objects.get(owner=self.user, name='test_mv_dst').uuid)
})
data = json.loads(response.content)
assert_equal(0, data['status'], data)
# Verify that the paths are updated
response = self.client.get('/desktop/api2/doc/', {'uuid': source_dir.uuid})
data = json.loads(response.content)
assert_equal('/test_mv_dst/test_mv', data['document']['path'])
response = self.client.get('/desktop/api2/doc/', {'uuid': doc.uuid})
data = json.loads(response.content)
assert_equal('/test_mv_dst/test_mv/query1.sql', data['document']['path'])
def test_directory_children(self):
# Creates 2 directories and 2 queries and saves to home directory
dir1 = Directory.objects.create(name='test_dir1', owner=self.user)
dir2 = Directory.objects.create(name='test_dir2', owner=self.user)
query1 = Document2.objects.create(name='query1.sql', type='query-hive', owner=self.user, data={}, search='foobar')
query2 = Document2.objects.create(name='query2.sql', type='query-hive', owner=self.user, data={}, search='barfoo')
children = [dir1, dir2, query1, query2]
self.home_dir.children.add(*children)
# Test that all children directories and documents are returned
response = self.client.get('/desktop/api2/doc', {'path': '/'})
data = json.loads(response.content)
assert_true('children' in data)
assert_equal(5, data['count']) # This includes the 4 docs and .Trash and Gist
# Test filter type
response = self.client.get('/desktop/api2/doc', {'path': '/', 'type': ['directory']})
data = json.loads(response.content)
assert_equal(['directory'], data['types'])
assert_equal(3, data['count'])
assert_true(all(doc['type'] == 'directory' for doc in data['children']))
# Test search text
response = self.client.get('/desktop/api2/doc', {'path': '/', 'text': 'foo'})
data = json.loads(response.content)
assert_equal('foo', data['text'])
assert_equal(2, data['count'])
response = self.client.get('/desktop/api2/doc', {'path': '/', 'text': 'foobar'})
data = json.loads(response.content)
assert_equal(1, data['count'])
# Test pagination with limit
response = self.client.get('/desktop/api2/doc', {'path': '/', 'page': 2, 'limit': 2})
data = json.loads(response.content)
assert_equal(5, data['count'])
assert_equal(2, len(data['children']))
def test_update_document(self):
doc = Document2.objects.create(
name='initial',
description='initial desc',
type='query-hive',
owner=self.user,
data={},
parent_directory=self.home_dir
)
response = self.client.get('/desktop/api2/doc/', {'uuid': doc.uuid})
data = json.loads(response.content)
assert_equal('initial', data['document']['name'])
assert_equal('initial desc', data['document']['description'])
assert_equal('query-hive', data['document']['type'])
# Update document's name and description
response = self.client.post('/desktop/api2/doc/update', {'uuid': json.dumps(doc.uuid),
'name': 'updated',
'description': 'updated desc',
'type': 'bogus-type'})
data = json.loads(response.content)
assert_equal(0, data['status'])
assert_true('document' in data, data)
assert_equal('updated', data['document']['name'], data)
assert_equal('updated desc', data['document']['description'], data)
# Non-whitelisted attributes should remain unchanged
assert_equal('query-hive', data['document']['type'], data)
def test_document_trash(self):
# Create document under home and directory under home with child document
# /
# test_dir/
# query1.sql
# query2.sql
dir = Directory.objects.create(name='test_dir', owner=self.user, parent_directory=self.home_dir)
nested_query = Document2.objects.create(name='query1.sql', type='query-hive', owner=self.user, data={}, parent_directory=dir)
query = Document2.objects.create(name='query2.sql', type='query-hive', owner=self.user, data={}, parent_directory=self.home_dir)
# Test that .Trash is currently empty
response = self.client.get('/desktop/api2/doc', {'path': '/.Trash'})
data = json.loads(response.content)
assert_equal(0, data['count'])
# Delete query2.sql
assert_false(Document2.objects.get(uuid=query.uuid).is_trashed)
response = self.client.post('/desktop/api2/doc/delete', {'uuid': json.dumps(query.uuid)})
data = json.loads(response.content)
assert_equal(0, data['status'])
assert_true(Document2.objects.get(uuid=query.uuid).is_trashed)
response = self.client.get('/desktop/api2/doc', {'path': '/.Trash'})
data = json.loads(response.content)
assert_equal(1, data['count'])
assert_equal(data['children'][0]['uuid'], query.uuid)
# Delete test_dir directory w/ contents
assert_false(Document2.objects.get(uuid=dir.uuid).is_trashed)
response = self.client.post('/desktop/api2/doc/delete', {'uuid': json.dumps(dir.uuid)})
data = json.loads(response.content)
assert_equal(0, data['status'], data)
assert_true(Document2.objects.get(uuid=dir.uuid).is_trashed)
response = self.client.get('/desktop/api2/doc', {'path': '/.Trash'})
data = json.loads(response.content)
assert_equal(2, data['count'])
# Child document should be in trash too
response = self.client.get('/desktop/api2/doc', {'path': '/.Trash/test_dir'})
data = json.loads(response.content)
assert_equal(nested_query.uuid, data['children'][0]['uuid'])
# Skip Trash (erase) on a directory with contents should erase all children recursively
response = self.client.post('/desktop/api2/doc/delete', {'uuid': json.dumps(dir.uuid), 'skip_trash': json.dumps(True)})
data = json.loads(response.content)
assert_equal(0, data['status'])
assert_false(Document2.objects.filter(uuid=dir.uuid).exists())
assert_false(Document2.objects.filter(uuid=nested_query.uuid).exists())
# Verify that only doc in home is .Trash
response = self.client.get('/desktop/api2/doc', {'path': '/'})
data = json.loads(response.content)
assert_true('children' in data)
assert_equal(1, data['count'])
assert_true(Document2.TRASH_DIR in [f['name'] for f in data['children']])
def test_get_history(self):
history = Document2.objects.get_history(user=self.user, doc_type='query-hive')
assert_false(history.filter(name='test_get_history').exists())
query = Document2.objects.create(
name='test_get_history',
type='query-hive',
owner=self.user,
is_history=True
)
try:
history = Document2.objects.get_history(user=self.user, doc_type='query-hive')
assert_true(history.filter(name='test_get_history').exists())
finally:
query.delete()
def test_get_history_with_connector(self):
connector = Connector.objects.create(
name='MySql',
dialect='mysql'
)
query = Document2.objects.create(
name='test_get_history',
type='query-hive',
owner=self.user,
is_history=False,
connector=connector
)
try:
history = Document2.objects.get_history(user=self.user, doc_type='query-hive', connector_id=connector.id)
assert_false(history.filter(name='test_get_history').exists())
query.is_history = True
query.save()
history = Document2.objects.get_history(user=self.user, doc_type='query-hive', connector_id=connector.id)
assert_true(history.filter(name='test_get_history').exists())
finally:
query.delete()
connector.delete()
def test_validate_immutable_user_directories(self):
# Test that home and Trash directories cannot be recreated or modified
test_dir = Directory.objects.create(name='test_dir', owner=self.user, parent_directory=self.home_dir)
response = self.client.post(
'/desktop/api2/doc/mkdir',
{'parent_uuid': json.dumps(test_dir.uuid), 'name': json.dumps(Document2.TRASH_DIR)}
)
data = json.loads(response.content)
assert_equal(-1, data['status'], data)
assert_equal('Cannot create or modify directory with name: .Trash', data['message'])
response = self.client.post('/desktop/api2/doc/move', {
'source_doc_uuid': json.dumps(self.home_dir.uuid),
'destination_doc_uuid': json.dumps(test_dir.uuid)
})
data = json.loads(response.content)
assert_equal(-1, data['status'], data)
assert_equal('Cannot create or modify directory with name: ', data['message'])
trash_dir = Directory.objects.get(name=Document2.TRASH_DIR, owner=self.user)
response = self.client.post('/desktop/api2/doc/move', {
'source_doc_uuid': json.dumps(trash_dir.uuid),
'destination_doc_uuid': json.dumps(test_dir.uuid)
})
data = json.loads(response.content)
assert_equal(-1, data['status'], data)
assert_equal('Cannot create or modify directory with name: .Trash', data['message'])
def test_validate_circular_directory(self):
# Test that saving a document with cycle raises an error, i.e. - This should fail:
# a.parent_directory = b
# b.parent_directory = c
# c.parent_directory = a
c_dir = Directory.objects.create(name='c', owner=self.user)
b_dir = Directory.objects.create(name='b', owner=self.user, parent_directory=c_dir)
a_dir = Directory.objects.create(name='a', owner=self.user, parent_directory=b_dir)
response = self.client.post('/desktop/api2/doc/move', {
'source_doc_uuid': json.dumps(c_dir.uuid),
'destination_doc_uuid': json.dumps(a_dir.uuid)
})
data = json.loads(response.content)
assert_equal(-1, data['status'], data)
assert_true('circular dependency' in data['message'], data)
# Test simple case where directory is saved to self as parent
dir = Directory.objects.create(name='dir', owner=self.user)
response = self.client.post('/desktop/api2/doc/move', {
'source_doc_uuid': json.dumps(dir.uuid),
'destination_doc_uuid': json.dumps(dir.uuid)
})
data = json.loads(response.content)
assert_equal(-1, data['status'], data)
assert_true('circular dependency' in data['message'], data)
def test_api_get_data(self):
doc_data = {'info': 'hello', 'is_history': False}
doc = Document2.objects.create(name='query1.sql', type='query-hive', owner=self.user, data=json.dumps(doc_data))
doc_data.update({'id': doc.id, 'uuid': doc.uuid})
response = self.client.get('/desktop/api2/doc/', {
'uuid': doc.uuid,
})
data = json.loads(response.content)
assert_true('document' in data, data)
assert_false(data['data'], data)
response = self.client.get('/desktop/api2/doc/', {
'uuid': doc.uuid,
'data': 'true'
})
data = json.loads(response.content)
assert_true('data' in data, data)
assert_equal(data['data'], doc_data)
def test_is_trashed_migration(self):
# Skipping to prevent failing tests in TestOozieSubmissions
raise SkipTest
start_migration = '0024_auto__add_field_document2_is_managed'
mid_migration = '0025_auto__add_field_document2_is_trashed'
end_migration = '0026_change_is_trashed_default_to_false'
APP = 'desktop'
# Making sure Migration is up-to-date with fake migrations
management.call_command('migrate', 'desktop', fake=True, verbosity=0)
dir = Directory.objects.create(name='test_dir', owner=self.user, parent_directory=self.home_dir)
query = Document2.objects.create(name='query1.sql', type='query-hive', owner=self.user, data={}, parent_directory=dir)
trashed_query = Document2.objects.create(name='query2.sql', type='query-hive', owner=self.user, data={}, parent_directory=dir)
trashed_query.trash()
try:
assert_false(dir.is_trashed)
assert_false(query.is_trashed)
assert_true(trashed_query.is_trashed)
# Reverse migrate to 0025
management.call_command('migrate', APP, mid_migration, verbosity=0)
dir = Document2.objects.get(uuid=dir.uuid)
query = Document2.objects.get(uuid=query.uuid)
trashed_query = Document2.objects.get(uuid=trashed_query.uuid)
assert_false(dir.is_trashed)
assert_false(query.is_trashed)
assert_true(trashed_query.is_trashed)
# Reverse migrate to 0024. Deletes 'is_trashed' field from desktop_documents2
management.call_command('migrate', APP, start_migration, verbosity=0)
assert_raises(OperationalError, Document2.objects.get, uuid=dir.uuid)
assert_raises(OperationalError, Document2.objects.get, uuid=query.uuid)
assert_raises(OperationalError, Document2.objects.get, uuid=trashed_query.uuid)
# Forward migrate to 0025
management.call_command('migrate', APP, mid_migration, verbosity=0)
dir = Document2.objects.get(uuid=dir.uuid)
query = Document2.objects.get(uuid=query.uuid)
trashed_query = Document2.objects.get(uuid=trashed_query.uuid)
assert_true(dir.is_trashed is None)
assert_true(query.is_trashed is None)
assert_true(trashed_query.is_trashed is None)
# Forward migrate to 0026
management.call_command('migrate', APP, end_migration, verbosity=0)
dir = Document2.objects.get(uuid=dir.uuid)
query = Document2.objects.get(uuid=query.uuid)
trashed_query = Document2.objects.get(uuid=trashed_query.uuid)
assert_true(dir.is_trashed is None)
assert_true(query.is_trashed is None)
assert_true(trashed_query.is_trashed is None)
# New Documents should have is_trashed=False
query1 = Document2.objects.create(name='new_query.sql', type='query-hive', owner=self.user, data={}, parent_directory=dir)
assert_true(query1.is_trashed is False)
# Create history doc
query1.is_history = True
query1.save()
query1 = Document2.objects.get(uuid=query1.uuid)
query1_last_modified = query1.last_modified
dir_last_modified = dir.last_modified
query_last_modified = query.last_modified
trashed_query_last_modified = trashed_query.last_modified
# Converter sets is_trashed=True for currently trashed docs
converter = DocumentConverter(self.user)
converter.convert()
trashed_query = Document2.objects.get(uuid=trashed_query.uuid)
dir = Document2.objects.get(uuid=dir.uuid)
query = Document2.objects.get(uuid=query.uuid)
assert_true(trashed_query.is_trashed)
assert_true(dir.is_trashed is False)
assert_true(query.is_trashed is False)
# last_modified should be retained post conversion
assert_equal(dir_last_modified, dir.last_modified)
assert_equal(query_last_modified, query.last_modified)
assert_equal(trashed_query_last_modified, trashed_query.last_modified)
query1 = Document2.objects.get(uuid=query1.uuid)
assert_equal(query1_last_modified, query1.last_modified)
finally:
# Delete docs
dir.delete()
query.delete()
query1.delete()
trashed_query.delete()
class TestDocument2Permissions(object):
def setUp(self):
self.default_group = get_default_user_group()
self.client = make_logged_in_client(username="perm_user", groupname=self.default_group.name, recreate=True, is_superuser=False)
self.client_not_me = make_logged_in_client(
username="not_perm_user", groupname=self.default_group.name, recreate=True, is_superuser=False
)
self.user = User.objects.get(username="perm_user")
self.user_not_me = User.objects.get(username="not_perm_user")
grant_access(self.user.username, self.user.username, "desktop")
grant_access(self.user_not_me.username, self.user_not_me.username, "desktop")
# This creates the user directories for the new user
response = self.client.get('/desktop/api2/doc/')
data = json.loads(response.content)
assert_equal('/', data['document']['path'], data)
self.home_dir = Document2.objects.get_home_directory(user=self.user)
def test_default_permissions(self):
# Tests that for a new doc by default, read/write perms are set to no users and no groups
new_doc = Document2.objects.create(name='new_doc', type='query-hive', owner=self.user, data={}, parent_directory=self.home_dir)
response = self.client.get('/desktop/api2/doc/', {'uuid': new_doc.uuid})
data = json.loads(response.content)
assert_equal(new_doc.uuid, data['document']['uuid'], data)
assert_true('perms' in data['document'])
assert_equal(
{
'read': {'users': [], 'groups': []},
'write': {'users': [], 'groups': []},
'link_read': False,
'link_sharing_on': False,
'link_write': False,
},
data['document']['perms']
)
def test_share_document_read_by_user(self):
doc = Document2.objects.create(name='new_doc', type='query-hive', owner=self.user, data={}, parent_directory=self.home_dir)
# owner can view document
response = self.client.get('/desktop/api2/doc/', {'uuid': doc.uuid})
data = json.loads(response.content)
assert_equal(doc.uuid, data['document']['uuid'], data)
# other user cannot view document
response = self.client_not_me.get('/desktop/api2/doc/', {'uuid': doc.uuid})
data = json.loads(response.content)
assert_equal(-1, data['status'])
# Share read perm by users
response = self.client.post("/desktop/api2/doc/share", {
'uuid': json.dumps(doc.uuid),
'data': json.dumps({
'read': {
'user_ids': [
self.user.id,
self.user_not_me.id
],
'group_ids': [],
},
'write': {
'user_ids': [],
'group_ids': [],
}
})
})
assert_equal(0, json.loads(response.content)['status'], response.content)
assert_true(doc.can_read(self.user))
assert_true(doc.can_write(self.user))
assert_true(doc.can_read(self.user_not_me))
assert_false(doc.can_write(self.user_not_me))
# other user can view document
response = self.client_not_me.get('/desktop/api2/doc/', {'uuid': doc.uuid})
data = json.loads(response.content)
assert_equal(doc.uuid, data['document']['uuid'], data)
# other user can share document with read permissions
response = self.client_not_me.post("/desktop/api2/doc/share", {
'uuid': json.dumps(doc.uuid),
'data': json.dumps({
'read': {
'user_ids': [],
'group_ids': [
self.default_group.id
],
},
'write': {
'user_ids': [],
'group_ids': [],
}
})
})
assert_equal(0, json.loads(response.content)['status'], response.content)
# other user cannot share document with write permissions
response = self.client_not_me.post("/desktop/api2/doc/share", {
'uuid': json.dumps(doc.uuid),
'data': json.dumps({
'read': {
'user_ids': [],
'group_ids': [],
},
'write': {
'user_ids': [],
'group_ids': [
self.default_group.id
],
}
})
})
assert_equal(-1, json.loads(response.content)['status'], response.content)
def test_share_document_read_by_group(self):
doc = Document2.objects.create(name='new_doc', type='query-hive', owner=self.user, data={}, parent_directory=self.home_dir)
# owner can view document
response = self.client.get('/desktop/api2/doc/', {'uuid': doc.uuid})
data = json.loads(response.content)
assert_equal(doc.uuid, data['document']['uuid'], data)
# other user cannot view document
response = self.client_not_me.get('/desktop/api2/doc/', {'uuid': doc.uuid})
data = json.loads(response.content)
assert_equal(-1, data['status'])
response = self.client.post("/desktop/api2/doc/share", {
'uuid': json.dumps(doc.uuid),
'data': json.dumps({
'read': {
'user_ids': [
self.user.id
],
'group_ids': [
self.default_group.id
]
},
'write': {
'user_ids': [],
'group_ids': []
}
})
})
assert_equal(0, json.loads(response.content)['status'], response.content)
assert_true(doc.can_read(self.user))
assert_true(doc.can_write(self.user))
assert_true(doc.can_read(self.user_not_me))
assert_false(doc.can_write(self.user_not_me))
# other user can view document
response = self.client_not_me.get('/desktop/api2/doc/', {'uuid': doc.uuid})
data = json.loads(response.content)
assert_equal(doc.uuid, data['document']['uuid'], data)
def test_share_document_write_by_user(self):
doc = Document2.objects.create(name='new_doc', type='query-hive', owner=self.user, data={}, parent_directory=self.home_dir)
# other user cannot modify document
response = self.client_not_me.post('/desktop/api2/doc/delete', {'uuid': json.dumps(doc.uuid)})
data = json.loads(response.content)
assert_equal(-1, data['status'])
# Share write perm by user
response = self.client.post("/desktop/api2/doc/share", {
'uuid': json.dumps(doc.uuid),
'data': json.dumps({
'read': {
'user_ids': [
self.user.id
],
'group_ids': []
},
'write': {
'user_ids': [
self.user_not_me.id
],
'group_ids': []
}
})
})
assert_equal(0, json.loads(response.content)['status'], response.content)
assert_true(doc.can_read(self.user))
assert_true(doc.can_write(self.user))
assert_true(doc.can_read(self.user_not_me))
assert_true(doc.can_write(self.user_not_me))
# other user can modify document
response = self.client_not_me.post('/desktop/api2/doc/delete', {'uuid': json.dumps(doc.uuid)})
data = json.loads(response.content)
assert_equal(0, data['status'])
def test_share_document_write_by_group(self):
doc = Document2.objects.create(name='new_doc', type='query-hive', owner=self.user, data={}, parent_directory=self.home_dir)
# other user cannot modify document
response = self.client_not_me.post('/desktop/api2/doc/delete', {'uuid': json.dumps(doc.uuid)})
data = json.loads(response.content)
assert_equal(-1, data['status'])
# Share write perm by group
response = self.client.post("/desktop/api2/doc/share", {
'uuid': json.dumps(doc.uuid),
'data': json.dumps({
'read': {
'user_ids': [
self.user.id
],
'group_ids': []
},
'write': {
'user_ids': [],
'group_ids': [
self.default_group.id
]
}
})
})
assert_equal(0, json.loads(response.content)['status'], response.content)
assert_true(doc.can_read(self.user))
assert_true(doc.can_write(self.user))
assert_true(doc.can_read(self.user_not_me))
assert_true(doc.can_write(self.user_not_me))
# other user can modify document
response = self.client_not_me.post('/desktop/api2/doc/delete', {'uuid': json.dumps(doc.uuid)})
data = json.loads(response.content)
assert_equal(0, data['status'])
def test_share_directory(self):
# Test that updating the permissions for a directory updates all nested documents accordingly, with file structure:
# /
# test_dir/
# query1.sql
# nested_dir/
# query2.sql
# All initially owned by self.user
parent_dir = Directory.objects.create(name='test_dir', owner=self.user, parent_directory=self.home_dir)
child_doc = Document2.objects.create(name='query1.sql', type='query-hive', owner=self.user, data={}, parent_directory=parent_dir)
nested_dir = Directory.objects.create(name='nested_dir', owner=self.user, parent_directory=parent_dir)
nested_doc = Document2.objects.create(name='query2.sql', type='query-hive', owner=self.user, data={}, parent_directory=nested_dir)
for doc in [parent_dir, child_doc, nested_dir, nested_doc]:
assert_true(doc.can_read(self.user))
assert_true(doc.can_write(self.user))
assert_false(doc.can_read(self.user_not_me))
assert_false(doc.can_write(self.user_not_me))
# Update parent_dir permissions to grant write permissions to default group
response = self.client.post("/desktop/api2/doc/share", {
'uuid': json.dumps(parent_dir.uuid),
'data': json.dumps({
'read': {
'user_ids': [],
'group_ids': []
},
'write': {
'user_ids': [],
'group_ids': [
self.default_group.id
]
}
})
})
assert_equal(0, json.loads(response.content)['status'], response.content)
for doc in [parent_dir, child_doc, nested_dir, nested_doc]:
assert_true(doc.can_read(self.user))
assert_true(doc.can_write(self.user))
assert_true(doc.can_read(self.user_not_me))
assert_true(doc.can_write(self.user_not_me))
def test_get_shared_documents(self):
not_shared = Document2.objects.create(name='query1.sql', type='query-hive', owner=self.user, data={}, parent_directory=self.home_dir)
shared_1 = Document2.objects.create(name='query2.sql', type='query-hive', owner=self.user, data={}, parent_directory=self.home_dir)
shared_2 = Document2.objects.create(name='query3.sql', type='query-hive', owner=self.user, data={}, parent_directory=self.home_dir)
shared_1.share(user=self.user, name='read', users=[self.user_not_me], groups=[])
shared_2.share(user=self.user, name='read', users=[self.user_not_me], groups=[])
# 2 shared docs should appear in the other user's shared documents response
response = self.client_not_me.get('/desktop/api2/docs/', {'perms': 'shared'})
data = json.loads(response.content)
assert_true('documents' in data)
assert_equal(2, data['count'])
doc_names = [doc['name'] for doc in data['documents']]
assert_true('query2.sql' in doc_names)
assert_true('query3.sql' in doc_names)
assert_false('query1.sql' in doc_names)
# they should also appear in user's home directory get_documents response
response = self.client_not_me.get('/desktop/api2/doc/')
data = json.loads(response.content)
doc_names = [doc['name'] for doc in data['children']]
assert_true('query2.sql' in doc_names)
assert_true('query3.sql' in doc_names)
def test_get_shared_directories(self):
# Tests that when fetching the shared documents for a user, they are grouped by top-level directory when possible
# /
# dir1/
# query1.sql
# dir2/
# dir3/
# query2.sql
# query3.sql
dir1 = Directory.objects.create(name='dir1', owner=self.user, parent_directory=self.home_dir)
doc1 = Document2.objects.create(name='query1.sql', type='query-hive', owner=self.user, data={}, parent_directory=dir1)
dir2 = Directory.objects.create(name='dir2', owner=self.user, parent_directory=self.home_dir)
dir3 = Directory.objects.create(name='dir3', owner=self.user, parent_directory=dir2)
doc2 = Document2.objects.create(name='query2.sql', type='query-hive', owner=self.user, data={}, parent_directory=dir3)
doc3 = Document2.objects.create(name='query3.sql', type='query-hive', owner=self.user, data={}, parent_directory=self.home_dir)
dir1.share(user=self.user, name='read', users=[], groups=[self.default_group])
dir3.share(user=self.user, name='read', users=[], groups=[self.default_group])
doc3.share(user=self.user, name='read', users=[], groups=[self.default_group])
# 3 shared docs should appear, due to directory rollup
response = self.client_not_me.get('/desktop/api2/docs/', {'perms': 'shared', 'flatten': 'false'})
data = json.loads(response.content)
assert_true('documents' in data)
assert_equal(3, data['count'], data)
doc_names = [doc['name'] for doc in data['documents']]
assert_true('dir1' in doc_names)
assert_true('dir3' in doc_names)
assert_true('query3.sql' in doc_names)
assert_false('dir2' in doc_names)
# nested documents should not appear
assert_false('query1.sql' in doc_names)
assert_false('query2.sql' in doc_names)
# but nested documents should still be shared/viewable by group
response = self.client_not_me.get('/desktop/api2/doc/', {'uuid': doc1.uuid})
data = json.loads(response.content)
assert_equal(doc1.uuid, data['document']['uuid'], data)
response = self.client_not_me.get('/desktop/api2/doc/', {'uuid': doc2.uuid})
data = json.loads(response.content)
assert_equal(doc2.uuid, data['document']['uuid'], data)
def test_inherit_parent_permissions(self):
# Tests that when saving a document to a shared directory, the doc/dir inherits same permissions
dir1 = Directory.objects.create(name='dir1', owner=self.user, parent_directory=self.home_dir)
dir1.share(user=self.user, name='read', users=[], groups=[self.default_group])
dir1.share(user=self.user, name='write', users=[self.user_not_me], groups=[])
doc1 = Document2.objects.create(name='doc1', owner=self.user, parent_directory=dir1)
response = self.client.get('/desktop/api2/doc/', {'uuid': doc1.uuid})
data = json.loads(response.content)
assert_equal(
[{'id': self.default_group.id, 'name': self.default_group.name}],
data['document']['perms']['read']['groups'],
data
)
assert_equal(
[{'id': self.user_not_me.id, 'username': self.user_not_me.username}],
data['document']['perms']['write']['users'],
data
)
def test_search_documents(self):
owned_dir = Directory.objects.create(name='test_dir', owner=self.user, parent_directory=self.home_dir)
owned_query = Document2.objects.create(
name='query1.sql', type='query-hive', owner=self.user, data={}, parent_directory=owned_dir
)
owned_history = Document2.objects.create(
name='history.sql', type='query-hive', owner=self.user, data={}, is_history=True, parent_directory=owned_dir
)
owned_workflow = Document2.objects.create(
name='test.wf', type='oozie-workflow2', owner=self.user, data={}, parent_directory=owned_dir
)
other_home_dir = Document2.objects.get_home_directory(user=self.user_not_me)
not_shared = Document2.objects.create(
name='other_query1.sql', type='query-hive', owner=self.user_not_me, data={}, parent_directory=other_home_dir
)
shared_1 = Document2.objects.create(
name='other_query2.sql', type='query-hive', owner=self.user_not_me, data={}, parent_directory=other_home_dir
)
shared_2 = Document2.objects.create(
name='other_query3.sql', type='query-hive', owner=self.user_not_me, data={}, parent_directory=other_home_dir
)
shared_1.share(user=self.user_not_me, name='read', users=[self.user], groups=[])
shared_2.share(user=self.user_not_me, name='read', users=[], groups=[self.default_group])
# 3 total docs (1 owned, 2 shared)
response = self.client.get('/desktop/api2/docs/', {'type': 'query-hive'})
data = json.loads(response.content)
assert_true('documents' in data)
assert_equal(3, data['count'])
doc_names = [doc['name'] for doc in data['documents']]
assert_true('query1.sql' in doc_names)
assert_true('other_query2.sql' in doc_names)
assert_true('other_query3.sql' in doc_names)
# Return history docs
response = self.client.get('/desktop/api2/docs/', {'type': 'query-hive', 'include_history': 'true'})
data = json.loads(response.content)
assert_true('documents' in data)
assert_equal(4, data['count'])
doc_names = [doc['name'] for doc in data['documents']]
assert_true('history.sql' in doc_names)
def test_x_share_directory_y_add_file_x_share(self):
# Test that when another User, Y, adds a doc to dir shared by User X, User X doesn't fail to share the dir next time:
# /
# test_dir/
# query1.sql
# Dir owned by self.user
parent_dir = Directory.objects.create(name='test_dir', owner=self.user, parent_directory=self.home_dir)
child_doc = Document2.objects.create(name='query1.sql', type='query-hive', owner=self.user, data={}, parent_directory=parent_dir)
user_y = User.objects.create(username='user_y', password="user_y")
# Share the dir with user_not_me
response = self.client.post("/desktop/api2/doc/share", {
'uuid': json.dumps(parent_dir.uuid),
'data': json.dumps({
'read': {
'user_ids': [],
'group_ids': []
},
'write': {
'user_ids': [user_y.id],
'group_ids': []
}
})
})
user_y_child_doc = Document2.objects.create(
name='other_query1.sql',
type='query-hive',
owner=user_y,
data={},
parent_directory=parent_dir
)
share_test_user = User.objects.create(username='share_test_user', password="share_test_user")
# Share the dir with another user - share_test_user
response = self.client.post("/desktop/api2/doc/share", {
'uuid': json.dumps(parent_dir.uuid),
'data': json.dumps({
'read': {
'user_ids': [],
'group_ids': []
},
'write': {
'user_ids': [share_test_user.id],
'group_ids': []
}
})
})
assert_equal(0, json.loads(response.content)['status'], response.content)
for doc in [parent_dir, child_doc, user_y_child_doc]:
assert_true(doc.can_read(self.user))
assert_true(doc.can_write(self.user))
assert_true(doc.can_read(share_test_user))
assert_true(doc.can_write(share_test_user))
def test_unicode_name(self):
doc = Document2.objects.create(
name='My Bundle a voté « non » à l’accord',
type='oozie-workflow2',
owner=self.user,
data={},
parent_directory=self.home_dir
)
# Verify that home directory contents return correctly
response = self.client.get('/desktop/api2/doc/', {'uuid': self.home_dir.uuid})
data = json.loads(response.content)
assert_equal(0, data['status'])
# Verify that the doc's path is escaped
response = self.client.get('/desktop/api2/doc/', {'uuid': doc.uuid})
data = json.loads(response.content)
assert_equal(0, data['status'])
path = data['document']['path']
assert_equal('/My%20Bundle%20a%20vot%C3%A9%20%C2%AB%20non%20%C2%BB%20%C3%A0%20l%E2%80%99accord', path)
def test_link_permissions(self):
doc = Document2.objects.create(
name='test_link_permissions.sql',
type='query-hive',
owner=self.user,
data={},
parent_directory=self.home_dir
)
try:
assert_true(doc.can_read(self.user))
assert_true(doc.can_write(self.user))
assert_false(doc.can_read(self.user_not_me))
assert_false(doc.can_write(self.user_not_me))
doc.share(self.user, name=Document2Permission.LINK_READ_PERM, is_link_on=True)
assert_true(doc.can_read(self.user))
assert_true(doc.can_write(self.user))
assert_true(doc.can_read(self.user_not_me))
assert_false(doc.can_write(self.user_not_me))
assert_true(doc.get_permissions('read'))
assert_false(doc.get_permissions('write'))
assert_false(doc.get_permission('link_read').users.all())
assert_false(doc.get_permission('link_read').groups.all())
assert_false(doc.get_permission('read')) # There is no doc listing via links, only direct access
assert_false(doc.get_permission('write'))
doc.share(self.user, name=Document2Permission.LINK_READ_PERM, is_link_on=False)
assert_true(doc.can_read(self.user))
assert_true(doc.can_write(self.user))
assert_false(doc.can_read(self.user_not_me))
assert_false(doc.can_write(self.user_not_me))
doc.share(self.user, name=Document2Permission.LINK_WRITE_PERM, is_link_on=True)
assert_true(doc.can_read(self.user))
assert_true(doc.can_write(self.user))
assert_true(doc.can_read(self.user_not_me))
assert_true(doc.can_write(self.user_not_me))
doc.share(self.user, name=Document2Permission.LINK_WRITE_PERM, is_link_on=False)
assert_true(doc.can_read(self.user))
assert_true(doc.can_write(self.user))
assert_false(doc.can_read(self.user_not_me))
assert_false(doc.can_write(self.user_not_me))
finally:
doc.delete()
def test_combined_permissions(self):
doc = Document2.objects.create(
name='test_combined_permissions.sql',
type='query-hive',
owner=self.user,
data={},
parent_directory=self.home_dir
)
try:
assert_true(doc.can_read(self.user))
assert_true(doc.can_write(self.user))
assert_false(doc.can_read(self.user_not_me))
assert_false(doc.can_write(self.user_not_me))
assert_equal(0, doc.get_permissions('read').count())
assert_equal(0, doc.get_permissions('write').count())
# READ and LINK_READ
doc.share(self.user, name=Document2Permission.LINK_READ_PERM, is_link_on=True)
doc.share(self.user, name=Document2Permission.READ_PERM, users=[self.user_not_me])
assert_true(doc.can_read(self.user))
assert_true(doc.can_write(self.user))
assert_true(doc.can_read(self.user_not_me))
assert_false(doc.can_write(self.user_not_me))
assert_equal(2, doc.get_permissions('read').count())
assert_equal(0, doc.get_permissions('write').count())
# READ, WRITE and LINK_READ
doc.share(self.user, name=Document2Permission.WRITE_PERM, users=[self.user_not_me])
assert_true(doc.can_read(self.user))
assert_true(doc.can_write(self.user))
assert_true(doc.can_read(self.user_not_me))
assert_true(doc.can_write(self.user_not_me))
assert_equal(2, doc.get_permissions('read').count())
assert_equal(1, doc.get_permissions('write').count())
# READ, WRITE, LINK_READ and LINK_WRITE
doc.share(self.user, name=Document2Permission.LINK_WRITE_PERM, is_link_on=True)
assert_true(doc.can_read(self.user))
assert_true(doc.can_write(self.user))
assert_true(doc.can_read(self.user_not_me))
assert_true(doc.can_write(self.user_not_me))
assert_equal(2, doc.get_permissions('read').count())
assert_equal(2, doc.get_permissions('write').count())
# WRITE and WRITE_READ
doc.share(self.user, name=Document2Permission.LINK_READ_PERM, is_link_on=False)
doc.share(self.user, name=Document2Permission.READ_PERM, users=[])
assert_true(doc.can_read(self.user))
assert_true(doc.can_write(self.user))
assert_true(doc.can_read(self.user_not_me))
assert_true(doc.can_write(self.user_not_me))
assert_equal(1, doc.get_permissions('read').count())
assert_equal(2, doc.get_permissions('write').count())
# Not shared
doc.share(self.user, name=Document2Permission.LINK_WRITE_PERM, is_link_on=False)
doc.share(self.user, name=Document2Permission.WRITE_PERM, users=[])
assert_true(doc.can_read(self.user))
assert_true(doc.can_write(self.user))
assert_false(doc.can_read(self.user_not_me))
assert_false(doc.can_write(self.user_not_me))
assert_equal(1, doc.get_permissions('read').count()) # 1 READ but empty people
assert_false(doc.get_permissions('read')[0].users.all())
assert_false(doc.get_permissions('read')[0].groups.all())
assert_equal(1, doc.get_permissions('write').count()) # 1 WRITE but empty people
assert_false(doc.get_permissions('write')[0].users.all())
assert_false(doc.get_permissions('write')[0].groups.all())
finally:
doc.delete()
class TestDocument2ImportExport(object):
def setUp(self):
self.client = make_logged_in_client(username="perm_user", groupname="default", recreate=True, is_superuser=False)
self.client_not_me = make_logged_in_client(username="not_perm_user", groupname="default", recreate=True, is_superuser=False)
self.user = User.objects.get(username="perm_user")
self.user_not_me = User.objects.get(username="not_perm_user")
grant_access(self.user.username, self.user.username, "desktop")
grant_access(self.user_not_me.username, self.user_not_me.username, "desktop")
self.default_group = get_default_user_group()
# This creates the user directories for the new user
response = self.client.get('/desktop/api2/doc/')
data = json.loads(response.content)
assert_equal('/', data['document']['path'], data)
self.home_dir = Document2.objects.get_home_directory(user=self.user)
self.not_me_home_dir = Document2.objects.get_home_directory(user=self.user_not_me)
def test_export_documents_with_dependencies(self):
query1 = Document2.objects.create(name='query1.sql', type='query-hive', owner=self.user, data={}, parent_directory=self.home_dir)
query2 = Document2.objects.create(name='query2.sql', type='query-hive', owner=self.user, data={}, parent_directory=self.home_dir)
query3 = Document2.objects.create(
name='query3.sql', type='query-hive', owner=self.user, data={}, parent_directory=self.home_dir, is_history=True
)
workflow = Document2.objects.create(name='test.wf', type='oozie-workflow2', owner=self.user, data={}, parent_directory=self.home_dir)
workflow.dependencies.add(query1)
workflow.dependencies.add(query2)
workflow.dependencies.add(query3)
# Test that exporting workflow should export all dependencies except history
response = self.client.get('/desktop/api2/doc/export/', {'documents': json.dumps([workflow.id]), 'format': 'json'})
documents = json.loads(response.content)
documents = json.loads(documents)
assert_equal(3, len(documents))
assert_true('test.wf' in [doc['fields']['name'] for doc in documents])
assert_true('query1.sql' in [doc['fields']['name'] for doc in documents])
assert_true('query2.sql' in [doc['fields']['name'] for doc in documents])
assert_false('query3.sql' in [doc['fields']['name'] for doc in documents])
# Test that exporting multiple workflows with overlapping dependencies works
workflow2 = Document2.objects.create(name='test2.wf', type='oozie-workflow2', owner=self.user, data={}, parent_directory=self.home_dir)
workflow2.dependencies.add(query1)
response = self.client.get('/desktop/api2/doc/export/', {'documents': json.dumps([workflow.id, workflow2.id]), 'format': 'json'})
documents = json.loads(response.content)
documents = json.loads(documents)
assert_equal(4, len(documents))
assert_true('test.wf' in [doc['fields']['name'] for doc in documents])
assert_true('test2.wf' in [doc['fields']['name'] for doc in documents])
assert_true('query1.sql' in [doc['fields']['name'] for doc in documents])
assert_true('query2.sql' in [doc['fields']['name'] for doc in documents])
def test_export_documents_file_name(self):
query1 = Document2.objects.create(name='query1.sql', type='query-hive', owner=self.user, data={},
parent_directory=self.home_dir)
query2 = Document2.objects.create(name='query2.sql', type='query-hive', owner=self.user, data={},
parent_directory=self.home_dir)
query3 = Document2.objects.create(name='query3.sql', type='query-hive', owner=self.user, data={},
parent_directory=self.home_dir, is_history=True)
workflow = Document2.objects.create(name='test.wf', type='oozie-workflow2', owner=self.user, data={},
parent_directory=self.home_dir)
workflow.dependencies.add(query1)
workflow.dependencies.add(query2)
workflow.dependencies.add(query3)
# Test that exporting multiple workflows with overlapping dependencies works
workflow2 = Document2.objects.create(name='test2.wf', type='oozie-workflow2', owner=self.user, data={},
parent_directory=self.home_dir)
workflow2.dependencies.add(query1)
# Test that exporting to a file includes the date and number of documents in the filename
response = self.client.get('/desktop/api2/doc/export/', {'documents': json.dumps([workflow.id, workflow2.id])})
assert_equal(
response['Content-Disposition'], 'attachment; filename="hue-documents-%s-(4).json"' % datetime.today().strftime('%Y-%m-%d')
)
# Test that exporting single file gets the name of the document in the filename
response = self.client.get('/desktop/api2/doc/export/', {'documents': json.dumps([workflow.id])})
assert_equal(response['Content-Disposition'], 'attachment; filename="' + workflow.name + '.json"')
def test_export_directories_with_children(self):
# Test that exporting a directory exports children docs
# /
# dir1/
# query1.sql
# dir2/
# dir3/
# query2.sql
# query3.sql
dir1 = Directory.objects.create(name='dir1', owner=self.user, parent_directory=self.home_dir)
doc1 = Document2.objects.create(name='query1.sql', type='query-hive', owner=self.user, data={}, parent_directory=dir1)
dir2 = Directory.objects.create(name='dir2', owner=self.user, parent_directory=self.home_dir)
dir3 = Directory.objects.create(name='dir3', owner=self.user, parent_directory=dir2)
doc2 = Document2.objects.create(name='query2.sql', type='query-hive', owner=self.user, data={}, parent_directory=dir3)
doc3 = Document2.objects.create(name='query3.sql', type='query-hive', owner=self.user, data={}, parent_directory=self.home_dir)
response = self.client.get('/desktop/api2/doc/export/', {'documents': json.dumps([dir1.id, dir2.id, doc3.id]), 'format': 'json'})
documents = json.loads(response.content)
documents = json.loads(documents)
assert_equal(6, len(documents))
assert_true('dir1' in [doc['fields']['name'] for doc in documents])
assert_true('query1.sql' in [doc['fields']['name'] for doc in documents])
assert_true('dir2' in [doc['fields']['name'] for doc in documents])
assert_true('dir3' in [doc['fields']['name'] for doc in documents])
assert_true('query2.sql' in [doc['fields']['name'] for doc in documents])
assert_true('query3.sql' in [doc['fields']['name'] for doc in documents])
def test_import_owned_document(self):
owned_query = Document2.objects.create(
name='query.sql',
type='query-hive',
owner=self.user,
data=json.dumps({'description': 'original_query'}),
parent_directory=self.home_dir
)
# Test that importing existing doc updates it and retains owner, UUID
response = self.client.get('/desktop/api2/doc/export/', {'documents': json.dumps([owned_query.id]), 'format': 'json'})
documents = response.content
if isinstance(documents, bytes):
documents = documents.decode('utf-8')
response = self.client.post('/desktop/api2/doc/import/', {'documents': documents})
data = json.loads(response.content)
assert_true('message' in data, data)
assert_true('Installed 1 object' in data['message'], data)
assert_true('count' in data)
assert_equal(1, data['count'])
assert_true('created_count' in data)
assert_equal(0, data['created_count'])
assert_true('updated_count' in data)
assert_equal(1, data['updated_count'])
assert_true('documents' in data)
assert_true('name' in data['documents'][0])
assert_equal('query.sql', data['documents'][0]['name'])
assert_true('type' in data['documents'][0])
assert_equal('query-hive', data['documents'][0]['type'])
assert_true('owner' in data['documents'][0])
assert_equal('perm_user', data['documents'][0]['owner'])
assert_equal(1, Document2.objects.filter(name='query.sql').count())
imported_doc = Document2.objects.get(name='query.sql')
assert_equal(owned_query.uuid, imported_doc.uuid)
assert_equal(owned_query.owner, imported_doc.owner)
# Test that import non-existing doc creates it, sets parent to home
Document2.objects.get(name='query.sql').delete()
assert_equal(0, Document2.objects.filter(name='query.sql').count())
response = self.client.post('/desktop/api2/doc/import/', {'documents': documents})
assert_equal(1, Document2.objects.filter(name='query.sql').count())
imported_doc = Document2.objects.get(name='query.sql')
assert_equal(owned_query.uuid, imported_doc.uuid)
assert_equal(owned_query.owner, imported_doc.owner)
assert_equal(owned_query.parent_directory, imported_doc.parent_directory)
def test_import_nonowned_document(self):
owned_query = Document2.objects.create(
name='query.sql',
type='query-hive',
owner=self.user,
data=json.dumps({'description': 'original_query'}),
parent_directory=self.home_dir
)
response = self.client.get('/desktop/api2/doc/export/', {'documents': json.dumps([owned_query.id]), 'format': 'json'})
documents = response.content
if isinstance(documents, bytes):
documents = documents.decode('utf-8')
# Test that importing non-owned doc copies it, sets parent to home
response = self.client_not_me.post('/desktop/api2/doc/import/', {'documents': documents})
assert_equal(2, Document2.objects.filter(name='query.sql').count())
imported_doc = Document2.objects.get(name='query.sql', owner=self.user_not_me)
assert_true(owned_query.uuid != imported_doc.uuid)
assert_equal(self.user_not_me, imported_doc.owner)
assert_equal(self.not_me_home_dir.uuid, imported_doc.parent_directory.uuid)
data = json.loads(response.content)
assert_true('count' in data)
assert_equal(1, data['count'])
assert_true('created_count' in data)
assert_equal(1, data['created_count'])
assert_true('updated_count' in data)
assert_equal(0, data['updated_count'])
def test_import_with_history_dependencies(self):
query1 = Document2.objects.create(name='query1.sql', type='query-hive', owner=self.user, data={},
parent_directory=self.home_dir)
query2 = Document2.objects.create(name='query2.sql', type='query-hive', owner=self.user, data={},
parent_directory=self.home_dir, is_history=True)
workflow = Document2.objects.create(name='test.wf', type='oozie-workflow2', owner=self.user, data={},
parent_directory=self.home_dir)
workflow.dependencies.add(query1)
workflow.dependencies.add(query2)
response = self.client.get('/desktop/api2/doc/export/', {'documents': json.dumps([workflow.id]), 'format': 'json'})
documents = response.content
# Delete previous entries from DB, so when you import it creates them
query1.delete()
query2.delete()
workflow.delete()
if not isinstance(documents, str):
documents = documents.decode('utf-8')
response = self.client_not_me.post('/desktop/api2/doc/import/', {'documents': documents})
assert_true(Document2.objects.filter(name='query1.sql').exists())
assert_false(Document2.objects.filter(name='query2.sql').exists())
data = json.loads(response.content)
assert_true('count' in data)
assert_equal(2, data['count'])
assert_true('created_count' in data)
assert_equal(2, data['created_count'])
assert_true('updated_count' in data)
assert_equal(0, data['updated_count'])
| {
"content_hash": "22ff1b501e80a580a2403e5159ce7870",
"timestamp": "",
"source": "github",
"line_count": 1579,
"max_line_length": 139,
"avg_line_length": 40.671944268524385,
"alnum_prop": 0.6545211068030706,
"repo_name": "cloudera/hue",
"id": "00c797e17a8d4b0c218142729a163b3bfa94d77c",
"size": "65043",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "desktop/core/src/desktop/models_tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ABAP",
"bytes": "962"
},
{
"name": "ActionScript",
"bytes": "1133"
},
{
"name": "Ada",
"bytes": "99"
},
{
"name": "Assembly",
"bytes": "2347"
},
{
"name": "AutoHotkey",
"bytes": "720"
},
{
"name": "BASIC",
"bytes": "2884"
},
{
"name": "Batchfile",
"bytes": "143575"
},
{
"name": "C",
"bytes": "5129166"
},
{
"name": "C#",
"bytes": "83"
},
{
"name": "C++",
"bytes": "718011"
},
{
"name": "COBOL",
"bytes": "4"
},
{
"name": "CSS",
"bytes": "680715"
},
{
"name": "Cirru",
"bytes": "520"
},
{
"name": "Clojure",
"bytes": "794"
},
{
"name": "Closure Templates",
"bytes": "1072"
},
{
"name": "CoffeeScript",
"bytes": "403"
},
{
"name": "ColdFusion",
"bytes": "86"
},
{
"name": "Common Lisp",
"bytes": "632"
},
{
"name": "Cython",
"bytes": "1016963"
},
{
"name": "D",
"bytes": "324"
},
{
"name": "Dart",
"bytes": "489"
},
{
"name": "Dockerfile",
"bytes": "13576"
},
{
"name": "EJS",
"bytes": "752"
},
{
"name": "Eiffel",
"bytes": "375"
},
{
"name": "Elixir",
"bytes": "692"
},
{
"name": "Elm",
"bytes": "487"
},
{
"name": "Emacs Lisp",
"bytes": "411907"
},
{
"name": "Erlang",
"bytes": "487"
},
{
"name": "Forth",
"bytes": "979"
},
{
"name": "FreeMarker",
"bytes": "1017"
},
{
"name": "G-code",
"bytes": "521"
},
{
"name": "GAP",
"bytes": "29873"
},
{
"name": "GLSL",
"bytes": "512"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Gherkin",
"bytes": "699"
},
{
"name": "Go",
"bytes": "641"
},
{
"name": "Groovy",
"bytes": "1080"
},
{
"name": "HTML",
"bytes": "28328425"
},
{
"name": "Haml",
"bytes": "920"
},
{
"name": "Handlebars",
"bytes": "173"
},
{
"name": "Haskell",
"bytes": "512"
},
{
"name": "Haxe",
"bytes": "447"
},
{
"name": "HiveQL",
"bytes": "43"
},
{
"name": "Io",
"bytes": "140"
},
{
"name": "Java",
"bytes": "457398"
},
{
"name": "JavaScript",
"bytes": "39181239"
},
{
"name": "Jinja",
"bytes": "356"
},
{
"name": "Julia",
"bytes": "210"
},
{
"name": "LSL",
"bytes": "2080"
},
{
"name": "Lean",
"bytes": "213"
},
{
"name": "Less",
"bytes": "396102"
},
{
"name": "Lex",
"bytes": "218764"
},
{
"name": "Liquid",
"bytes": "1883"
},
{
"name": "LiveScript",
"bytes": "5747"
},
{
"name": "Lua",
"bytes": "78382"
},
{
"name": "M4",
"bytes": "1751"
},
{
"name": "MATLAB",
"bytes": "203"
},
{
"name": "Makefile",
"bytes": "1025937"
},
{
"name": "Mako",
"bytes": "3644004"
},
{
"name": "Mask",
"bytes": "597"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "Nix",
"bytes": "2212"
},
{
"name": "OCaml",
"bytes": "539"
},
{
"name": "Objective-C",
"bytes": "2672"
},
{
"name": "OpenSCAD",
"bytes": "333"
},
{
"name": "PHP",
"bytes": "662"
},
{
"name": "PLSQL",
"bytes": "29403"
},
{
"name": "PLpgSQL",
"bytes": "6006"
},
{
"name": "Pascal",
"bytes": "84273"
},
{
"name": "Perl",
"bytes": "4327"
},
{
"name": "PigLatin",
"bytes": "371"
},
{
"name": "PowerShell",
"bytes": "6235"
},
{
"name": "Procfile",
"bytes": "47"
},
{
"name": "Pug",
"bytes": "584"
},
{
"name": "Python",
"bytes": "92881549"
},
{
"name": "R",
"bytes": "2445"
},
{
"name": "Roff",
"bytes": "484108"
},
{
"name": "Ruby",
"bytes": "1098"
},
{
"name": "Rust",
"bytes": "495"
},
{
"name": "SCSS",
"bytes": "78508"
},
{
"name": "Sass",
"bytes": "770"
},
{
"name": "Scala",
"bytes": "1541"
},
{
"name": "Scheme",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "249165"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "SourcePawn",
"bytes": "948"
},
{
"name": "Stylus",
"bytes": "682"
},
{
"name": "Tcl",
"bytes": "899"
},
{
"name": "TeX",
"bytes": "165743"
},
{
"name": "Thrift",
"bytes": "341963"
},
{
"name": "Twig",
"bytes": "761"
},
{
"name": "TypeScript",
"bytes": "1241396"
},
{
"name": "VBScript",
"bytes": "938"
},
{
"name": "VHDL",
"bytes": "830"
},
{
"name": "Vala",
"bytes": "485"
},
{
"name": "Verilog",
"bytes": "274"
},
{
"name": "Vim Snippet",
"bytes": "226931"
},
{
"name": "Vue",
"bytes": "350385"
},
{
"name": "XQuery",
"bytes": "114"
},
{
"name": "XSLT",
"bytes": "522199"
},
{
"name": "Yacc",
"bytes": "1070437"
},
{
"name": "jq",
"bytes": "4"
}
],
"symlink_target": ""
} |
class IPStreetError(Exception):
pass
class ParamsInvalidError(IPStreetError):
pass
class APIConnectionError(IPStreetError):
pass
class SendError(IPStreetError):
pass
| {
"content_hash": "75b2dd41d3582b23fd31773ce41ae4bb",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 40,
"avg_line_length": 13.428571428571429,
"alnum_prop": 0.7553191489361702,
"repo_name": "IPStreet/PythonSDK",
"id": "e31e9515bb0bc029cb502984ef96936796648a0f",
"size": "188",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "IPStreet/error.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "35384"
}
],
"symlink_target": ""
} |
"""Xception tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
from tensor2tensor.data_generators import problem_hparams
from tensor2tensor.models import xception
import tensorflow as tf
class XceptionTest(tf.test.TestCase):
def testXception(self):
vocab_size = 9
x = np.random.random_integers(1, high=vocab_size - 1, size=(3, 5, 1, 1))
y = np.random.random_integers(1, high=vocab_size - 1, size=(3, 1, 1, 1))
hparams = xception.xception_tiny()
p_hparams = problem_hparams.test_problem_hparams(vocab_size, vocab_size)
with self.test_session() as session:
features = {
"inputs": tf.constant(x, dtype=tf.int32),
"targets": tf.constant(y, dtype=tf.int32),
}
model = xception.Xception(
hparams, tf.estimator.ModeKeys.TRAIN, p_hparams)
sharded_logits, _ = model.model_fn(features)
logits = tf.concat(sharded_logits, 0)
session.run(tf.global_variables_initializer())
res = session.run(logits)
self.assertEqual(res.shape, (3, 5, 1, 1, vocab_size))
if __name__ == "__main__":
tf.test.main()
| {
"content_hash": "e3785646bda68822db8d90bfff2f08db",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 76,
"avg_line_length": 30.125,
"alnum_prop": 0.6655601659751037,
"repo_name": "waterblue13/tensor2tensor",
"id": "9114fb78157f45dcbca7d346d10e12d886d31a38",
"size": "1811",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensor2tensor/models/xception_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "10699"
},
{
"name": "Jupyter Notebook",
"bytes": "14442"
},
{
"name": "Python",
"bytes": "1070492"
},
{
"name": "Shell",
"bytes": "744"
}
],
"symlink_target": ""
} |
import builtins
import unittest
import xml
import pytest
import astroid
from astroid import MANAGER, builder, exceptions, objects, test_utils, util
BUILTINS = MANAGER.astroid_cache[builtins.__name__]
class InstanceModelTest(unittest.TestCase):
def test_instance_special_model(self):
ast_nodes = builder.extract_node(
"""
class A:
"test"
def __init__(self):
self.a = 42
a = A()
a.__class__ #@
a.__module__ #@
a.__doc__ #@
a.__dict__ #@
""",
module_name="fake_module",
)
cls = next(ast_nodes[0].infer())
self.assertIsInstance(cls, astroid.ClassDef)
self.assertEqual(cls.name, "A")
module = next(ast_nodes[1].infer())
self.assertIsInstance(module, astroid.Const)
self.assertEqual(module.value, "fake_module")
doc = next(ast_nodes[2].infer())
self.assertIsInstance(doc, astroid.Const)
self.assertEqual(doc.value, "test")
dunder_dict = next(ast_nodes[3].infer())
self.assertIsInstance(dunder_dict, astroid.Dict)
attr = next(dunder_dict.getitem(astroid.Const("a")).infer())
self.assertIsInstance(attr, astroid.Const)
self.assertEqual(attr.value, 42)
@pytest.mark.xfail(reason="Instance lookup cannot override object model")
def test_instance_local_attributes_overrides_object_model(self):
# The instance lookup needs to be changed in order for this to work.
ast_node = builder.extract_node(
"""
class A:
@property
def __dict__(self):
return []
A().__dict__
"""
)
inferred = next(ast_node.infer())
self.assertIsInstance(inferred, astroid.List)
self.assertEqual(inferred.elts, [])
class BoundMethodModelTest(unittest.TestCase):
def test_bound_method_model(self):
ast_nodes = builder.extract_node(
"""
class A:
def test(self): pass
a = A()
a.test.__func__ #@
a.test.__self__ #@
"""
)
func = next(ast_nodes[0].infer())
self.assertIsInstance(func, astroid.FunctionDef)
self.assertEqual(func.name, "test")
self_ = next(ast_nodes[1].infer())
self.assertIsInstance(self_, astroid.Instance)
self.assertEqual(self_.name, "A")
class UnboundMethodModelTest(unittest.TestCase):
def test_unbound_method_model(self):
ast_nodes = builder.extract_node(
"""
class A:
def test(self): pass
t = A.test
t.__class__ #@
t.__func__ #@
t.__self__ #@
t.im_class #@
t.im_func #@
t.im_self #@
"""
)
cls = next(ast_nodes[0].infer())
self.assertIsInstance(cls, astroid.ClassDef)
unbound_name = "function"
self.assertEqual(cls.name, unbound_name)
func = next(ast_nodes[1].infer())
self.assertIsInstance(func, astroid.FunctionDef)
self.assertEqual(func.name, "test")
self_ = next(ast_nodes[2].infer())
self.assertIsInstance(self_, astroid.Const)
self.assertIsNone(self_.value)
self.assertEqual(cls.name, next(ast_nodes[3].infer()).name)
self.assertEqual(func, next(ast_nodes[4].infer()))
self.assertIsNone(next(ast_nodes[5].infer()).value)
class ClassModelTest(unittest.TestCase):
def test_priority_to_local_defined_values(self):
ast_node = builder.extract_node(
"""
class A:
__doc__ = "first"
A.__doc__ #@
"""
)
inferred = next(ast_node.infer())
self.assertIsInstance(inferred, astroid.Const)
self.assertEqual(inferred.value, "first")
def test_class_model_correct_mro_subclasses_proxied(self):
ast_nodes = builder.extract_node(
"""
class A(object):
pass
A.mro #@
A.__subclasses__ #@
"""
)
for node in ast_nodes:
inferred = next(node.infer())
self.assertIsInstance(inferred, astroid.BoundMethod)
self.assertIsInstance(inferred._proxied, astroid.FunctionDef)
self.assertIsInstance(inferred.bound, astroid.ClassDef)
self.assertEqual(inferred.bound.name, "type")
def test_class_model(self):
ast_nodes = builder.extract_node(
"""
class A(object):
"test"
class B(A): pass
class C(A): pass
A.__module__ #@
A.__name__ #@
A.__qualname__ #@
A.__doc__ #@
A.__mro__ #@
A.mro() #@
A.__bases__ #@
A.__class__ #@
A.__dict__ #@
A.__subclasses__() #@
""",
module_name="fake_module",
)
module = next(ast_nodes[0].infer())
self.assertIsInstance(module, astroid.Const)
self.assertEqual(module.value, "fake_module")
name = next(ast_nodes[1].infer())
self.assertIsInstance(name, astroid.Const)
self.assertEqual(name.value, "A")
qualname = next(ast_nodes[2].infer())
self.assertIsInstance(qualname, astroid.Const)
self.assertEqual(qualname.value, "fake_module.A")
doc = next(ast_nodes[3].infer())
self.assertIsInstance(doc, astroid.Const)
self.assertEqual(doc.value, "test")
mro = next(ast_nodes[4].infer())
self.assertIsInstance(mro, astroid.Tuple)
self.assertEqual([cls.name for cls in mro.elts], ["A", "object"])
called_mro = next(ast_nodes[5].infer())
self.assertEqual(called_mro.elts, mro.elts)
bases = next(ast_nodes[6].infer())
self.assertIsInstance(bases, astroid.Tuple)
self.assertEqual([cls.name for cls in bases.elts], ["object"])
cls = next(ast_nodes[7].infer())
self.assertIsInstance(cls, astroid.ClassDef)
self.assertEqual(cls.name, "type")
cls_dict = next(ast_nodes[8].infer())
self.assertIsInstance(cls_dict, astroid.Dict)
subclasses = next(ast_nodes[9].infer())
self.assertIsInstance(subclasses, astroid.List)
self.assertEqual([cls.name for cls in subclasses.elts], ["B", "C"])
class ModuleModelTest(unittest.TestCase):
def test_priority_to_local_defined_values(self):
ast_node = astroid.parse(
"""
__file__ = "mine"
"""
)
file_value = next(ast_node.igetattr("__file__"))
self.assertIsInstance(file_value, astroid.Const)
self.assertEqual(file_value.value, "mine")
def test__path__not_a_package(self):
ast_node = builder.extract_node(
"""
import sys
sys.__path__ #@
"""
)
with self.assertRaises(exceptions.InferenceError):
next(ast_node.infer())
def test_module_model(self):
ast_nodes = builder.extract_node(
"""
import xml
xml.__path__ #@
xml.__name__ #@
xml.__doc__ #@
xml.__file__ #@
xml.__spec__ #@
xml.__loader__ #@
xml.__cached__ #@
xml.__package__ #@
xml.__dict__ #@
"""
)
path = next(ast_nodes[0].infer())
self.assertIsInstance(path, astroid.List)
self.assertIsInstance(path.elts[0], astroid.Const)
self.assertEqual(path.elts[0].value, xml.__path__[0])
name = next(ast_nodes[1].infer())
self.assertIsInstance(name, astroid.Const)
self.assertEqual(name.value, "xml")
doc = next(ast_nodes[2].infer())
self.assertIsInstance(doc, astroid.Const)
self.assertEqual(doc.value, xml.__doc__)
file_ = next(ast_nodes[3].infer())
self.assertIsInstance(file_, astroid.Const)
self.assertEqual(file_.value, xml.__file__.replace(".pyc", ".py"))
for ast_node in ast_nodes[4:7]:
inferred = next(ast_node.infer())
self.assertIs(inferred, astroid.Uninferable)
package = next(ast_nodes[7].infer())
self.assertIsInstance(package, astroid.Const)
self.assertEqual(package.value, "xml")
dict_ = next(ast_nodes[8].infer())
self.assertIsInstance(dict_, astroid.Dict)
class FunctionModelTest(unittest.TestCase):
def test_partial_descriptor_support(self):
bound, result = builder.extract_node(
"""
class A(object): pass
def test(self): return 42
f = test.__get__(A(), A)
f #@
f() #@
"""
)
bound = next(bound.infer())
self.assertIsInstance(bound, astroid.BoundMethod)
self.assertEqual(bound._proxied._proxied.name, "test")
result = next(result.infer())
self.assertIsInstance(result, astroid.Const)
self.assertEqual(result.value, 42)
def test___get__has_extra_params_defined(self):
node = builder.extract_node(
"""
def test(self): return 42
test.__get__
"""
)
inferred = next(node.infer())
self.assertIsInstance(inferred, astroid.BoundMethod)
args = inferred.args.args
self.assertEqual(len(args), 2)
self.assertEqual([arg.name for arg in args], ["self", "type"])
@test_utils.require_version(minver="3.8")
def test__get__and_positional_only_args(self):
node = builder.extract_node(
"""
def test(self, a, b, /, c): return a + b + c
test.__get__(test)(1, 2, 3)
"""
)
inferred = next(node.infer())
assert inferred is util.Uninferable
@pytest.mark.xfail(reason="Descriptors cannot infer what self is")
def test_descriptor_not_inferrring_self(self):
# We can't infer __get__(X, Y)() when the bounded function
# uses self, because of the tree's parent not being propagating good enough.
result = builder.extract_node(
"""
class A(object):
x = 42
def test(self): return self.x
f = test.__get__(A(), A)
f() #@
"""
)
result = next(result.infer())
self.assertIsInstance(result, astroid.Const)
self.assertEqual(result.value, 42)
def test_descriptors_binding_invalid(self):
ast_nodes = builder.extract_node(
"""
class A: pass
def test(self): return 42
test.__get__()() #@
test.__get__(2, 3, 4) #@
"""
)
for node in ast_nodes:
with self.assertRaises(exceptions.InferenceError):
next(node.infer())
@pytest.mark.xfail(reason="Relying on path copy")
def test_descriptor_error_regression(self):
"""Make sure the following code does
node cause an exception"""
node = builder.extract_node(
"""
class MyClass:
text = "MyText"
def mymethod1(self):
return self.text
def mymethod2(self):
return self.mymethod1.__get__(self, MyClass)
cl = MyClass().mymethod2()()
cl #@
"""
)
[const] = node.inferred()
assert const.value == "MyText"
def test_function_model(self):
ast_nodes = builder.extract_node(
'''
def func(a=1, b=2):
"""test"""
func.__name__ #@
func.__doc__ #@
func.__qualname__ #@
func.__module__ #@
func.__defaults__ #@
func.__dict__ #@
func.__globals__ #@
func.__code__ #@
func.__closure__ #@
''',
module_name="fake_module",
)
name = next(ast_nodes[0].infer())
self.assertIsInstance(name, astroid.Const)
self.assertEqual(name.value, "func")
doc = next(ast_nodes[1].infer())
self.assertIsInstance(doc, astroid.Const)
self.assertEqual(doc.value, "test")
qualname = next(ast_nodes[2].infer())
self.assertIsInstance(qualname, astroid.Const)
self.assertEqual(qualname.value, "fake_module.func")
module = next(ast_nodes[3].infer())
self.assertIsInstance(module, astroid.Const)
self.assertEqual(module.value, "fake_module")
defaults = next(ast_nodes[4].infer())
self.assertIsInstance(defaults, astroid.Tuple)
self.assertEqual([default.value for default in defaults.elts], [1, 2])
dict_ = next(ast_nodes[5].infer())
self.assertIsInstance(dict_, astroid.Dict)
globals_ = next(ast_nodes[6].infer())
self.assertIsInstance(globals_, astroid.Dict)
for ast_node in ast_nodes[7:9]:
self.assertIs(next(ast_node.infer()), astroid.Uninferable)
def test_empty_return_annotation(self):
ast_node = builder.extract_node(
"""
def test(): pass
test.__annotations__
"""
)
annotations = next(ast_node.infer())
self.assertIsInstance(annotations, astroid.Dict)
self.assertEqual(len(annotations.items), 0)
def test_builtin_dunder_init_does_not_crash_when_accessing_annotations(self):
ast_node = builder.extract_node(
"""
class Class:
@classmethod
def class_method(cls):
cls.__init__.__annotations__ #@
"""
)
inferred = next(ast_node.infer())
self.assertIsInstance(inferred, astroid.Dict)
self.assertEqual(len(inferred.items), 0)
def test_annotations_kwdefaults(self):
ast_node = builder.extract_node(
"""
def test(a: 1, *args: 2, f:4='lala', **kwarg:3)->2: pass
test.__annotations__ #@
test.__kwdefaults__ #@
"""
)
annotations = next(ast_node[0].infer())
self.assertIsInstance(annotations, astroid.Dict)
self.assertIsInstance(
annotations.getitem(astroid.Const("return")), astroid.Const
)
self.assertEqual(annotations.getitem(astroid.Const("return")).value, 2)
self.assertIsInstance(annotations.getitem(astroid.Const("a")), astroid.Const)
self.assertEqual(annotations.getitem(astroid.Const("a")).value, 1)
self.assertEqual(annotations.getitem(astroid.Const("args")).value, 2)
self.assertEqual(annotations.getitem(astroid.Const("kwarg")).value, 3)
self.assertEqual(annotations.getitem(astroid.Const("f")).value, 4)
kwdefaults = next(ast_node[1].infer())
self.assertIsInstance(kwdefaults, astroid.Dict)
# self.assertEqual(kwdefaults.getitem('f').value, 'lala')
@test_utils.require_version(minver="3.8")
def test_annotation_positional_only(self):
ast_node = builder.extract_node(
"""
def test(a: 1, b: 2, /, c: 3): pass
test.__annotations__ #@
"""
)
annotations = next(ast_node.infer())
self.assertIsInstance(annotations, astroid.Dict)
self.assertIsInstance(annotations.getitem(astroid.Const("a")), astroid.Const)
self.assertEqual(annotations.getitem(astroid.Const("a")).value, 1)
self.assertEqual(annotations.getitem(astroid.Const("b")).value, 2)
self.assertEqual(annotations.getitem(astroid.Const("c")).value, 3)
class GeneratorModelTest(unittest.TestCase):
def test_model(self):
ast_nodes = builder.extract_node(
"""
def test():
"a"
yield
gen = test()
gen.__name__ #@
gen.__doc__ #@
gen.gi_code #@
gen.gi_frame #@
gen.send #@
"""
)
name = next(ast_nodes[0].infer())
self.assertEqual(name.value, "test")
doc = next(ast_nodes[1].infer())
self.assertEqual(doc.value, "a")
gi_code = next(ast_nodes[2].infer())
self.assertIsInstance(gi_code, astroid.ClassDef)
self.assertEqual(gi_code.name, "gi_code")
gi_frame = next(ast_nodes[3].infer())
self.assertIsInstance(gi_frame, astroid.ClassDef)
self.assertEqual(gi_frame.name, "gi_frame")
send = next(ast_nodes[4].infer())
self.assertIsInstance(send, astroid.BoundMethod)
class ExceptionModelTest(unittest.TestCase):
def test_valueerror_py3(self):
ast_nodes = builder.extract_node(
"""
try:
x[42]
except ValueError as err:
err.args #@
err.__traceback__ #@
err.message #@
"""
)
args = next(ast_nodes[0].infer())
self.assertIsInstance(args, astroid.Tuple)
tb = next(ast_nodes[1].infer())
self.assertIsInstance(tb, astroid.Instance)
self.assertEqual(tb.name, "traceback")
with self.assertRaises(exceptions.InferenceError):
next(ast_nodes[2].infer())
def test_syntax_error(self):
ast_node = builder.extract_node(
"""
try:
x[42]
except SyntaxError as err:
err.text #@
"""
)
inferred = next(ast_node.infer())
assert isinstance(inferred, astroid.Const)
def test_oserror(self):
ast_nodes = builder.extract_node(
"""
try:
raise OSError("a")
except OSError as err:
err.filename #@
err.filename2 #@
err.errno #@
"""
)
expected_values = ["", "", 0]
for node, value in zip(ast_nodes, expected_values):
inferred = next(node.infer())
assert isinstance(inferred, astroid.Const)
assert inferred.value == value
def test_unicodedecodeerror(self):
code = """
try:
raise UnicodeDecodeError("utf-8", "blob", 0, 1, "reason")
except UnicodeDecodeError as error:
error.object[:1] #@
"""
node = builder.extract_node(code)
inferred = next(node.infer())
assert isinstance(inferred, astroid.Const)
def test_import_error(self):
ast_nodes = builder.extract_node(
"""
try:
raise ImportError("a")
except ImportError as err:
err.name #@
err.path #@
"""
)
for node in ast_nodes:
inferred = next(node.infer())
assert isinstance(inferred, astroid.Const)
assert inferred.value == ""
def test_exception_instance_correctly_instantiated(self):
ast_node = builder.extract_node(
"""
try:
raise ImportError("a")
except ImportError as err:
err #@
"""
)
inferred = next(ast_node.infer())
assert isinstance(inferred, astroid.Instance)
cls = next(inferred.igetattr("__class__"))
assert isinstance(cls, astroid.ClassDef)
class DictObjectModelTest(unittest.TestCase):
def test__class__(self):
ast_node = builder.extract_node("{}.__class__")
inferred = next(ast_node.infer())
self.assertIsInstance(inferred, astroid.ClassDef)
self.assertEqual(inferred.name, "dict")
def test_attributes_inferred_as_methods(self):
ast_nodes = builder.extract_node(
"""
{}.values #@
{}.items #@
{}.keys #@
"""
)
for node in ast_nodes:
inferred = next(node.infer())
self.assertIsInstance(inferred, astroid.BoundMethod)
def test_wrapper_objects_for_dict_methods_python3(self):
ast_nodes = builder.extract_node(
"""
{1:1, 2:3}.values() #@
{1:1, 2:3}.keys() #@
{1:1, 2:3}.items() #@
"""
)
values = next(ast_nodes[0].infer())
self.assertIsInstance(values, objects.DictValues)
self.assertEqual([elt.value for elt in values.elts], [1, 3])
keys = next(ast_nodes[1].infer())
self.assertIsInstance(keys, objects.DictKeys)
self.assertEqual([elt.value for elt in keys.elts], [1, 2])
items = next(ast_nodes[2].infer())
self.assertIsInstance(items, objects.DictItems)
class LruCacheModelTest(unittest.TestCase):
def test_lru_cache(self):
ast_nodes = builder.extract_node(
"""
import functools
class Foo(object):
@functools.lru_cache()
def foo():
pass
f = Foo()
f.foo.cache_clear #@
f.foo.__wrapped__ #@
f.foo.cache_info() #@
"""
)
cache_clear = next(ast_nodes[0].infer())
self.assertIsInstance(cache_clear, astroid.BoundMethod)
wrapped = next(ast_nodes[1].infer())
self.assertIsInstance(wrapped, astroid.FunctionDef)
self.assertEqual(wrapped.name, "foo")
cache_info = next(ast_nodes[2].infer())
self.assertIsInstance(cache_info, astroid.Instance)
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "b084224daedc9c88957fa87899123db0",
"timestamp": "",
"source": "github",
"line_count": 675,
"max_line_length": 85,
"avg_line_length": 31.17185185185185,
"alnum_prop": 0.5557720640653961,
"repo_name": "ruchee/vimrc",
"id": "5d438a65fbe3c404d15c602da086e6fcf3ab3045",
"size": "21688",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vimfiles/bundle/vim-python/submodules/astroid/tests/unittest_object_model.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "22028"
},
{
"name": "Blade",
"bytes": "3314"
},
{
"name": "C#",
"bytes": "1734"
},
{
"name": "CSS",
"bytes": "31547"
},
{
"name": "Clojure",
"bytes": "47036"
},
{
"name": "CoffeeScript",
"bytes": "9274"
},
{
"name": "Common Lisp",
"bytes": "54314"
},
{
"name": "D",
"bytes": "11562"
},
{
"name": "Dockerfile",
"bytes": "7620"
},
{
"name": "Elixir",
"bytes": "41696"
},
{
"name": "Emacs Lisp",
"bytes": "10489"
},
{
"name": "Erlang",
"bytes": "137788"
},
{
"name": "F#",
"bytes": "2230"
},
{
"name": "Go",
"bytes": "54655"
},
{
"name": "HTML",
"bytes": "178954"
},
{
"name": "Haml",
"bytes": "39"
},
{
"name": "Haskell",
"bytes": "2031"
},
{
"name": "JavaScript",
"bytes": "9086"
},
{
"name": "Julia",
"bytes": "9540"
},
{
"name": "Kotlin",
"bytes": "8669"
},
{
"name": "Less",
"bytes": "327"
},
{
"name": "Makefile",
"bytes": "87500"
},
{
"name": "Mustache",
"bytes": "3375"
},
{
"name": "Nix",
"bytes": "1860"
},
{
"name": "PHP",
"bytes": "9238"
},
{
"name": "PLpgSQL",
"bytes": "33747"
},
{
"name": "Perl",
"bytes": "84200"
},
{
"name": "PostScript",
"bytes": "3891"
},
{
"name": "Python",
"bytes": "7366233"
},
{
"name": "Racket",
"bytes": "1150"
},
{
"name": "Raku",
"bytes": "21146"
},
{
"name": "Ruby",
"bytes": "133344"
},
{
"name": "SCSS",
"bytes": "327"
},
{
"name": "Sass",
"bytes": "308"
},
{
"name": "Scala",
"bytes": "13125"
},
{
"name": "Shell",
"bytes": "52916"
},
{
"name": "Smarty",
"bytes": "300"
},
{
"name": "Swift",
"bytes": "11436"
},
{
"name": "TypeScript",
"bytes": "4663"
},
{
"name": "Vim Script",
"bytes": "10545492"
},
{
"name": "Vim Snippet",
"bytes": "559139"
}
],
"symlink_target": ""
} |
"""
SuperSocket.
"""
from __future__ import absolute_import
from select import select, error as select_error
import ctypes
import errno
import socket
import struct
import time
from scapy.config import conf
from scapy.consts import DARWIN, WINDOWS
from scapy.data import MTU, ETH_P_IP, SOL_PACKET, SO_TIMESTAMPNS
from scapy.compat import raw
from scapy.error import warning, log_runtime
from scapy.interfaces import network_name
import scapy.modules.six as six
from scapy.packet import Packet
import scapy.packet
from scapy.plist import (
PacketList,
SndRcvList,
_PacketIterable,
)
from scapy.utils import PcapReader, tcpdump
# Typing imports
from scapy.interfaces import _GlobInterfaceType
from scapy.compat import (
Any,
Iterator,
List,
Optional,
Tuple,
Type,
cast,
)
# Utils
class _SuperSocket_metaclass(type):
desc = None # type: Optional[str]
def __repr__(self):
# type: () -> str
if self.desc is not None:
return "<%s: %s>" % (self.__name__, self.desc)
else:
return "<%s>" % self.__name__
# Used to get ancillary data
PACKET_AUXDATA = 8 # type: int
ETH_P_8021Q = 0x8100 # type: int
TP_STATUS_VLAN_VALID = 1 << 4 # type: int
class tpacket_auxdata(ctypes.Structure):
_fields_ = [
("tp_status", ctypes.c_uint),
("tp_len", ctypes.c_uint),
("tp_snaplen", ctypes.c_uint),
("tp_mac", ctypes.c_ushort),
("tp_net", ctypes.c_ushort),
("tp_vlan_tci", ctypes.c_ushort),
("tp_padding", ctypes.c_ushort),
] # type: List[Tuple[str, Any]]
# SuperSocket
@six.add_metaclass(_SuperSocket_metaclass)
class SuperSocket:
closed = 0 # type: int
nonblocking_socket = False # type: bool
auxdata_available = False # type: bool
def __init__(self,
family=socket.AF_INET, # type: int
type=socket.SOCK_STREAM, # type: int
proto=0, # type: int
iface=None, # type: Optional[_GlobInterfaceType]
**kwargs # type: Any
):
# type: (...) -> None
self.ins = socket.socket(family, type, proto) # type: socket.socket
self.outs = self.ins # type: Optional[socket.socket]
self.promisc = None
self.iface = iface
def send(self, x):
# type: (Packet) -> int
sx = raw(x)
try:
x.sent_time = time.time()
except AttributeError:
pass
if self.outs:
return self.outs.send(sx)
else:
return 0
if six.PY2:
def _recv_raw(self, sock, x):
# type: (socket.socket, int) -> Tuple[bytes, Any, Optional[float]]
"""Internal function to receive a Packet"""
pkt, sa_ll = sock.recvfrom(x)
return pkt, sa_ll, None
else:
def _recv_raw(self, sock, x):
# type: (socket.socket, int) -> Tuple[bytes, Any, Optional[float]]
"""Internal function to receive a Packet,
and process ancillary data.
"""
timestamp = None
if not self.auxdata_available:
pkt, _, _, sa_ll = sock.recvmsg(x)
return pkt, sa_ll, timestamp
flags_len = socket.CMSG_LEN(4096)
pkt, ancdata, flags, sa_ll = sock.recvmsg(x, flags_len)
if not pkt:
return pkt, sa_ll, timestamp
for cmsg_lvl, cmsg_type, cmsg_data in ancdata:
# Check available ancillary data
if (cmsg_lvl == SOL_PACKET and cmsg_type == PACKET_AUXDATA):
# Parse AUXDATA
try:
auxdata = tpacket_auxdata.from_buffer_copy(cmsg_data)
except ValueError:
# Note: according to Python documentation, recvmsg()
# can return a truncated message. A ValueError
# exception likely indicates that Auxiliary
# Data is not supported by the Linux kernel.
return pkt, sa_ll, timestamp
if auxdata.tp_vlan_tci != 0 or \
auxdata.tp_status & TP_STATUS_VLAN_VALID:
# Insert VLAN tag
tag = struct.pack(
"!HH",
ETH_P_8021Q,
auxdata.tp_vlan_tci
)
pkt = pkt[:12] + tag + pkt[12:]
elif cmsg_lvl == socket.SOL_SOCKET and \
cmsg_type == SO_TIMESTAMPNS:
length = len(cmsg_data)
if length == 16: # __kernel_timespec
tmp = struct.unpack("ll", cmsg_data)
elif length == 8: # timespec
tmp = struct.unpack("ii", cmsg_data)
else:
log_runtime.warning("Unknown timespec format.. ?!")
continue
timestamp = tmp[0] + tmp[1] * 1e-9
return pkt, sa_ll, timestamp
def recv_raw(self, x=MTU):
# type: (int) -> Tuple[Optional[Type[Packet]], Optional[bytes], Optional[float]] # noqa: E501
"""Returns a tuple containing (cls, pkt_data, time)"""
return conf.raw_layer, self.ins.recv(x), None
def recv(self, x=MTU):
# type: (int) -> Optional[Packet]
cls, val, ts = self.recv_raw(x)
if not val or not cls:
return None
try:
pkt = cls(val) # type: Packet
except KeyboardInterrupt:
raise
except Exception:
if conf.debug_dissector:
from scapy.sendrecv import debug
debug.crashed_on = (cls, val)
raise
pkt = conf.raw_layer(val)
if ts:
pkt.time = ts
return pkt
def fileno(self):
# type: () -> int
return self.ins.fileno()
def close(self):
# type: () -> None
if self.closed:
return
self.closed = True
if getattr(self, "outs", None):
if getattr(self, "ins", None) != self.outs:
if self.outs and (WINDOWS or self.outs.fileno() != -1):
self.outs.close()
if getattr(self, "ins", None):
if WINDOWS or self.ins.fileno() != -1:
self.ins.close()
def sr(self, *args, **kargs):
# type: (Any, Any) -> Tuple[SndRcvList, PacketList]
from scapy import sendrecv
ans, unans = sendrecv.sndrcv(self, *args, **kargs) # type: SndRcvList, PacketList # noqa: E501
return ans, unans
def sr1(self, *args, **kargs):
# type: (Any, Any) -> Optional[Packet]
from scapy import sendrecv
ans = sendrecv.sndrcv(self, *args, **kargs)[0] # type: SndRcvList
if len(ans) > 0:
pkt = ans[0][1] # type: Packet
return pkt
else:
return None
def sniff(self, *args, **kargs):
# type: (Any, Any) -> PacketList
from scapy import sendrecv
pkts = sendrecv.sniff(opened_socket=self, *args, **kargs) # type: PacketList # noqa: E501
return pkts
def tshark(self, *args, **kargs):
# type: (Any, Any) -> None
from scapy import sendrecv
sendrecv.tshark(opened_socket=self, *args, **kargs)
# TODO: use 'scapy.ansmachine.AnsweringMachine' when typed
def am(self,
cls, # type: Type[Any]
*args, # type: Any
**kwargs # type: Any
):
# type: (...) -> Any
"""
Creates an AnsweringMachine associated with this socket.
:param cls: A subclass of AnsweringMachine to instantiate
"""
return cls(*args, opened_socket=self, socket=self, **kwargs)
@staticmethod
def select(sockets, remain=conf.recv_poll_rate):
# type: (List[SuperSocket], Optional[float]) -> List[SuperSocket]
"""This function is called during sendrecv() routine to select
the available sockets.
:param sockets: an array of sockets that need to be selected
:returns: an array of sockets that were selected and
the function to be called next to get the packets (i.g. recv)
"""
try:
inp, _, _ = select(sockets, [], [], remain)
except (IOError, select_error) as exc:
# select.error has no .errno attribute
if not exc.args or exc.args[0] != errno.EINTR:
raise
return inp
def __del__(self):
# type: () -> None
"""Close the socket"""
self.close()
def __enter__(self):
# type: () -> SuperSocket
return self
def __exit__(self, exc_type, exc_value, traceback):
# type: (Optional[Type[BaseException]], Optional[BaseException], Optional[Any]) -> None # noqa: E501
"""Close the socket"""
self.close()
class L3RawSocket(SuperSocket):
desc = "Layer 3 using Raw sockets (PF_INET/SOCK_RAW)"
def __init__(self,
type=ETH_P_IP, # type: int
filter=None, # type: Optional[str]
iface=None, # type: Optional[_GlobInterfaceType]
promisc=None, # type: Optional[bool]
nofilter=0 # type: int
):
# type: (...) -> None
self.outs = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_RAW) # noqa: E501
self.outs.setsockopt(socket.SOL_IP, socket.IP_HDRINCL, 1)
self.ins = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.htons(type)) # noqa: E501
self.iface = iface
if iface is not None:
iface = network_name(iface)
self.ins.bind((iface, type))
if not six.PY2:
try:
# Receive Auxiliary Data (VLAN tags)
self.ins.setsockopt(SOL_PACKET, PACKET_AUXDATA, 1)
self.ins.setsockopt(
socket.SOL_SOCKET,
SO_TIMESTAMPNS,
1
)
self.auxdata_available = True
except OSError:
# Note: Auxiliary Data is only supported since
# Linux 2.6.21
msg = "Your Linux Kernel does not support Auxiliary Data!"
log_runtime.info(msg)
def recv(self, x=MTU):
# type: (int) -> Optional[Packet]
data, sa_ll, ts = self._recv_raw(self.ins, x)
if sa_ll[2] == socket.PACKET_OUTGOING:
return None
if sa_ll[3] in conf.l2types:
cls = conf.l2types.num2layer[sa_ll[3]] # type: Type[Packet]
lvl = 2
elif sa_ll[1] in conf.l3types:
cls = conf.l3types.num2layer[sa_ll[1]]
lvl = 3
else:
cls = conf.default_l2
warning("Unable to guess type (interface=%s protocol=%#x family=%i). Using %s", sa_ll[0], sa_ll[1], sa_ll[3], cls.name) # noqa: E501
lvl = 3
try:
pkt = cls(data)
except KeyboardInterrupt:
raise
except Exception:
if conf.debug_dissector:
raise
pkt = conf.raw_layer(data)
if lvl == 2:
pkt = pkt.payload
if pkt is not None:
if ts is None:
from scapy.arch.linux import get_last_packet_timestamp
ts = get_last_packet_timestamp(self.ins)
pkt.time = ts
return pkt
def send(self, x):
# type: (Packet) -> int
try:
sx = raw(x)
if self.outs:
x.sent_time = time.time()
return self.outs.sendto(
sx,
(x.dst, 0)
)
except AttributeError:
raise ValueError(
"Missing 'dst' attribute in the first layer to be "
"sent using a native L3 socket ! (make sure you passed the "
"IP layer)"
)
except socket.error as msg:
log_runtime.error(msg)
return 0
class SimpleSocket(SuperSocket):
desc = "wrapper around a classic socket"
nonblocking_socket = True
def __init__(self, sock):
# type: (socket.socket) -> None
self.ins = sock
self.outs = sock
class StreamSocket(SimpleSocket):
desc = "transforms a stream socket into a layer 2"
def __init__(self, sock, basecls=None):
# type: (socket.socket, Optional[Type[Packet]]) -> None
if basecls is None:
basecls = conf.raw_layer
SimpleSocket.__init__(self, sock)
self.basecls = basecls
def recv(self, x=MTU):
# type: (int) -> Optional[Packet]
data = self.ins.recv(x, socket.MSG_PEEK)
x = len(data)
if x == 0:
return None
pkt = self.basecls(data) # type: Packet
pad = pkt.getlayer(conf.padding_layer)
if pad is not None and pad.underlayer is not None:
del(pad.underlayer.payload)
from scapy.packet import NoPayload
while pad is not None and not isinstance(pad, NoPayload):
x -= len(pad.load)
pad = pad.payload
self.ins.recv(x)
return pkt
class SSLStreamSocket(StreamSocket):
desc = "similar usage than StreamSocket but specialized for handling SSL-wrapped sockets" # noqa: E501
def __init__(self, sock, basecls=None):
# type: (socket.socket, Optional[Type[Packet]]) -> None
self._buf = b""
super(SSLStreamSocket, self).__init__(sock, basecls)
# 65535, the default value of x is the maximum length of a TLS record
def recv(self, x=65535):
# type: (int) -> Optional[Packet]
pkt = None # type: Optional[Packet]
if self._buf != b"":
try:
pkt = self.basecls(self._buf)
except Exception:
# We assume that the exception is generated by a buffer underflow # noqa: E501
pass
if not pkt:
buf = self.ins.recv(x)
if len(buf) == 0:
raise socket.error((100, "Underlying stream socket tore down"))
self._buf += buf
x = len(self._buf)
pkt = self.basecls(self._buf)
if pkt is not None:
pad = pkt.getlayer(conf.padding_layer)
if pad is not None and pad.underlayer is not None:
del(pad.underlayer.payload)
while pad is not None and not isinstance(pad, scapy.packet.NoPayload): # noqa: E501
x -= len(pad.load)
pad = pad.payload
self._buf = self._buf[x:]
return pkt
class L2ListenTcpdump(SuperSocket):
desc = "read packets at layer 2 using tcpdump"
def __init__(self,
iface=None, # type: Optional[_GlobInterfaceType]
promisc=False, # type: bool
filter=None, # type: Optional[str]
nofilter=False, # type: bool
prog=None, # type: Optional[str]
*arg, # type: Any
**karg # type: Any
):
# type: (...) -> None
self.outs = None
args = ['-w', '-', '-s', '65535']
if iface is None and (WINDOWS or DARWIN):
iface = conf.iface
self.iface = iface
if iface is not None:
args.extend(['-i', network_name(iface)])
if not promisc:
args.append('-p')
if not nofilter:
if conf.except_filter:
if filter:
filter = "(%s) and not (%s)" % (filter, conf.except_filter)
else:
filter = "not (%s)" % conf.except_filter
if filter is not None:
args.append(filter)
self.tcpdump_proc = tcpdump(None, prog=prog, args=args, getproc=True)
self.reader = PcapReader(self.tcpdump_proc.stdout)
self.ins = self.reader # type: ignore
def recv(self, x=MTU):
# type: (int) -> Optional[Packet]
return self.reader.recv(x)
def close(self):
# type: () -> None
SuperSocket.close(self)
self.tcpdump_proc.kill()
@staticmethod
def select(sockets, remain=None):
# type: (List[SuperSocket], Optional[float]) -> List[SuperSocket]
if (WINDOWS or DARWIN):
return sockets
return SuperSocket.select(sockets, remain=remain)
# More abstract objects
class IterSocket(SuperSocket):
desc = "wrapper around an iterable"
nonblocking_socket = True
def __init__(self, obj):
# type: (_PacketIterable) -> None
if not obj:
self.iter = iter([]) # type: Iterator[Packet]
elif isinstance(obj, IterSocket):
self.iter = obj.iter
elif isinstance(obj, SndRcvList):
def _iter(obj=cast(SndRcvList, obj)):
# type: (SndRcvList) -> Iterator[Packet]
for s, r in obj:
if s.sent_time:
s.time = s.sent_time
yield s
yield r
self.iter = _iter()
elif isinstance(obj, (list, PacketList)):
if isinstance(obj[0], bytes): # type: ignore
self.iter = iter(obj)
else:
self.iter = (y for x in obj for y in x)
else:
self.iter = obj.__iter__()
@staticmethod
def select(sockets, remain=None):
# type: (List[SuperSocket], Any) -> List[SuperSocket]
return sockets
def recv(self, *args):
# type: (*Any) -> Optional[Packet]
try:
pkt = next(self.iter)
return pkt.__class__(bytes(pkt))
except StopIteration:
raise EOFError
def close(self):
# type: () -> None
pass
| {
"content_hash": "c0bd36f6d25ccda7df1876defa446677",
"timestamp": "",
"source": "github",
"line_count": 538,
"max_line_length": 145,
"avg_line_length": 33.600371747211895,
"alnum_prop": 0.5221552248713835,
"repo_name": "4shadoww/hakkuframework",
"id": "4dff0ecf413ce25af3784e9d7ea0b03d391b02c3",
"size": "18271",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/scapy/supersocket.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7992059"
}
],
"symlink_target": ""
} |
from PIL import Image
import os
import math
import easygui
import webbrowser
import cloudinary
import cloudinary.uploader
import cloudinary.api
"""
QuickLogo v1.1 (Beta)
by William Wang
Takes in an image, strips it of most common-colored pixels,
then uploads it to a Cloudinary cloud of your choice.
To resolve dependencies:
$ pip install pillow
$ pip install easygui
$ pip install cloudinary
"""
def most_frequent_color(image):
"""
Input:
image -- Image file
Output:
Most frequent color found in the image as a 4-tuple (R,G,B,A)
"""
x, y = image.size
colors = image.getcolors(x * y)
most_frequent = colors[0] # first (count, rgba value) in colors
for count, pixel in colors:
if count > most_frequent[0]:
most_frequent = (count, pixel)
print "Most frequent color: RGBA" + str(most_frequent[1]) + " [" + str(most_frequent[0]) + " pixels]"
return most_frequent[1]
def color_to_transparency(image, target_color, threshold):
"""
Transforms pixels within a certain color range to transparent
Inputs:
file_in -- string filename of image to transform
file_out -- string filename the transformed image will be saved to
target_color -- RGBA tuple representing the esteimated color to remove
threshold -- maximum 4D distance between target_color and removed pixels
Outputs:
None
"""
# Convert to sequence object containing pixel values
pixelData = image.load()
target_red, target_green, target_blue, target_opacity = target_color[0], target_color[1], target_color[2], target_color[3],
# Find all near-target-color pixels and set their opacity to 0
for y in xrange(image.size[1]):
for x in xrange(image.size[0]):
pixel = pixelData[x, y]
red, green, blue, opacity = pixel[0], pixel[1], pixel[2], pixel[3]
if math.sqrt((red - target_red)**2 + (green - target_green)**2 + (blue - target_blue)**2 + (opacity - target_opacity)**2) <= threshold:
pixelData[x, y] = (255,255,255,0)
print "Color to transparency applied"
return image
def preview(file_in, savepath, file_out):
"""
Opens before and after comparison in web browser
Input:
file_in -- string filename of image that was transformed
savepath -- string directory the transformed image was saved to
file_out -- string filename the transformed image was saved to
"""
html = open("testpage.html", "w")
html.write("<html><head><title>Test Page</title><style>body{background:rgba(0,0,0,.7);color:#fff;font-family:'Arial';}img{max-height:30%;max-width:40%;display:block;margin-left:auto;margin-right:auto;margin-top:10px;}</style>")
html.write("</head><body>Before<img src='" + file_in + "'><br/>After<img src='"+ savepath + file_out + "'></body></html>")
html.close()
webbrowser.open("testpage.html", new=2, autoraise=False)
def upload(filename):
"""
Performs upload to specified cloud
Input:
filename -- string filename of image to upload
"""
cloud_name = None # YOUR_CLOUD_NAME_HERE
api_key = None # YOUR_API_KEY_HERE
api_secret = None # YOUR_API_SECRET_HERE
cloudinary.config(
cloud_name = cloud_name,
api_key = api_key,
api_secret = api_secret
)
return cloudinary.uploader.upload(filename)
def run(file_in, savepath, threshold=100):
"""
Runs QuickLogo
Inputs:
file_in -- string filename of image to transform
savepath -- string directory the transformed image will be saved to
threshold -- maximum 4D distance between target_color and removed pixels
Output:
"""
alias = file_in.split("\\")[-1]
print "Transforming", alias
file_out = "".join(alias.split(".")[:-1]) + "-out.png"
img = Image.open(file_in)
# Convert to RGBA colorspace
img = img.convert("RGBA")
target_color = most_frequent_color(img)
transformed = color_to_transparency(img, target_color, threshold)
local_savepath = savepath
if not os.path.exists(local_savepath):
os.makedirs(local_savepath)
transformed.save(local_savepath + file_out, "PNG") # output file name and extension
print "Saved to", savepath + file_out
return file_out
# Main
file_in = str(easygui.fileopenbox("Select image to process..."))
savepath = "./TestOut/" # Manually specify image save directory for now
if not file_in == ".":
file_out = run(file_in, savepath , 120)
preview(file_in, savepath, file_out)
""" Uncomment this block to enable Cloudinary upload
proceed = raw_input("Continue with upload? (Y/N) >")
if proceed.lower() == 'y':
print upload(savepath + file_out)
print "Upload Complete"
elif proceed.lower() == 'n':
print "Upload aborted"
else:
print "Invalid input, upload aborted"
"""
print "Done"
else:
print "No file was specified" | {
"content_hash": "1aadbcbfe0aab7adc2ddefbc8f3111d8",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 231,
"avg_line_length": 31.96875,
"alnum_prop": 0.6330400782013685,
"repo_name": "williamvwang/QuickLogo",
"id": "854e1dcdf474e902523893537631268104adbb6d",
"size": "5115",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "quicklogo_beta.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8272"
}
],
"symlink_target": ""
} |
import logging
import multiprocessing
import os
import time
import arrow
import OnePy as op
from OnePy.sys_module.metabase_env import OnePyEnvBase
from OnePy.utils.awesome_func import run_multiprocessing
from OnePy.utils.easy_func import get_day_ratio
class ForwardAnalysis(OnePyEnvBase):
def __init__(self):
self.workers = os.cpu_count()
self.total_iter_times = None
self.show_summary = False
def run(self, fromdate: str, length_month: int=3, rolling_month: int=3,
times: int=2, show_summary=True, warning: bool=True):
"""
fromdate: 起始日期
length_month: 每次回测长度
rolling_month: 每次向前长度
times: 滚动回测次数
"""
self.show_summary = show_summary
if not warning:
logging.basicConfig(level=logging.CRITICAL)
first_todate = arrow.get(fromdate).shift(
months=length_month).format("YYYY-MM-DD")
self.total_iter_times = times
last_todate = arrow.get(first_todate).shift(
months=(times-1)*rolling_month).format("YYYY-MM-DD")
print(f'Begin Forward Analysis!\n+{"-"*40}+',
f'\nFromdate: {fromdate}, Todate: {last_todate}'
f'\nTimescale: {length_month} Months.'
f'\nRollingscale: {rolling_month} Months.'
f'\nTotal roll {times} times.\n+{"-"*40}+')
cache_list: list = multiprocessing.Manager().list()
params = [(fromdate, first_todate, cache_list, index*rolling_month)
for index in range(times)]
run_multiprocessing(self._analysis_func, params, self.workers)
print('Done!')
def _analysis_func(self, fromdate, todate, cache, index):
t1 = time.time()
go = op.OnePiece()
fromdate = arrow.get(fromdate).shift(
months=index).format("YYYY-MM-DD")
todate = arrow.get(todate).shift(
months=index).format("YYYY-MM-DD")
go.env.fromdate = fromdate
ratio = get_day_ratio(go.env.sys_frequency)
go.env.sys_date = arrow.get(fromdate).shift(
days=-ratio).format('YYYY-MM-DD HH:mm:ss')
go.env.todate = todate
go.sunny(self.show_summary)
summary = 1
cache.append(summary)
t2 = time.time()
self._compute_running_time(t1, t2, len(cache))
def _compute_running_time(self, start: float, end: float, finished_times: int):
diff = end - start
left = diff*(self.total_iter_times-finished_times)/60/self.workers
print(f'当前是第 {finished_times} 次, 剩余 {left:.2f} mins')
| {
"content_hash": "4a83099bcdba054d6f56ec6c509b7739",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 83,
"avg_line_length": 34.44,
"alnum_prop": 0.6085946573751452,
"repo_name": "Chandlercjy/OnePy",
"id": "c7706842e261a345a896ed07832c6e1d5b77b22d",
"size": "2641",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "OnePy/custom_module/forward_analysis.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "251278"
}
],
"symlink_target": ""
} |
"""Tests for clause_search.inputs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import numpy as np
import tensorflow as tf
from deepmath.eprover import prover_clause_examples_pb2
from deepmath.guidance import inputs
FLAGS = tf.flags.FLAGS
Info = collections.namedtuple('Info', ('conjecture', 'positives', 'negatives'))
class InputsTest(tf.test.TestCase):
def testParseBuckets(self):
self.assertAllEqual([2, 3, 4], inputs.parse_buckets('2,3,4'))
def testExamplesShards(self):
shards = 6
FLAGS.examples_train = '/blah/examples-train@%d' % shards
FLAGS.examples_eval = '/blah/examples-eval@%d' % shards
for mode in 'train', 'eval':
self.assertAllEqual(inputs.examples_shards(mode=mode),
['/blah/examples-%s-%05d-of-%05d' % (mode, i, shards)
for i in range(shards)])
def testReadVocab(self):
# Mirrors tests in vocabulary_test.cc
path = os.path.join(self.get_temp_dir(), 'small_vocab')
with open(path, 'w') as vocab_file:
for s in '7', 'X', 'Yx', 'f', 'g':
print(s, file=vocab_file)
def check(vocab_to_id, expect):
expect.update({' ': 0, '*': 1, '~': 2, '|': 3, '&': 4, '(': 5, ')': 6,
',': 7, '=': 8, '$false': 9, '$true': 10})
for word, i in expect.items():
self.assertEqual(vocab_to_id[word], i)
# No flags
size, vocab_to_id = inputs.read_vocab(path)
self.assertEqual(size, 32 + 5)
check(vocab_to_id,
{'7': 32 + 0, 'X': 32 + 1, 'Yx': 32 + 2, 'f': 32 + 3, 'g': 32 + 4})
# One variable
size, vocab_to_id = inputs.read_vocab(path + ':one_variable')
self.assertEqual(size, 32 + 4)
check(vocab_to_id,
{'7': 32 + 0, 'X': 32 + 1, 'Yx': 32 + 1, 'f': 32 + 2, 'g': 32 + 3})
def testProtoBatch(self):
shards = 10
batch_size = 4
examples_per_shard = 6
FLAGS.examples_train = os.path.join(self.get_temp_dir(),
'examples-train@%d' % shards)
FLAGS.examples_eval = os.path.join(self.get_temp_dir(),
'examples-eval@%d' % shards)
FLAGS.approx_proofs_per_shard = examples_per_shard
FLAGS.input_queue_factor = 2
# Write sharded tfrecords
mode_values = {'train': set(), 'eval': set()}
for mode in 'train', 'eval':
for shard in range(shards):
shard_path = os.path.join(
self.get_temp_dir(),
'examples-%s-%05d-of-%05d' % (mode, shard, shards))
with tf.python_io.TFRecordWriter(shard_path) as writer:
for i in range(examples_per_shard):
value = tf.compat.as_bytes('value-%s-%d.%d' % (mode, shard, i))
writer.write(value)
mode_values[mode].add(value)
def traverse(mode, epochs, shuffle):
"""Record the keys seen throughout some number of epochs."""
with tf.Graph().as_default() as graph:
tf.set_random_seed(7)
values = inputs.proto_batch(mode=mode, batch_size=batch_size,
shuffle=shuffle)
init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
with self.test_session(graph=graph):
init_op.run()
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
counts = collections.defaultdict(int)
for _ in range(epochs * shards * examples_per_shard // batch_size):
values_np = values.eval()
self.assertEqual(values_np.shape, (batch_size,))
for value in values_np:
self.assertIn(value, mode_values[mode])
counts[value] += 1
coord.request_stop()
for thread in threads:
thread.join()
return counts
for mode in 'train', 'eval':
# With shuffling off, we should be able to hit each example once
counts = traverse(mode, epochs=1, shuffle=False)
for value in mode_values[mode]:
self.assertEqual(counts[value], 1)
# With shuffling on, the counts will be off but should have the right sum
epochs = 10
counts = traverse(mode, epochs=epochs, shuffle=True)
self.assertEqual(np.sum(list(counts.values())),
epochs * len(mode_values[mode]))
def testSequence(self):
# Random generation of ProverClauseExamples
vocab = set()
def random_list(limit, empty, separator, f):
count = np.random.randint(limit)
if not count:
return empty
return separator.join(f() for _ in range(count))
def new_name(prefix):
s = '%s%d' % (prefix, len(vocab))
vocab.add(s)
return s
def random_term(term, depth):
if depth == 0 or np.random.randint(3) == 0:
if np.random.randint(2):
name = term.variable.name = new_name('X')
return name
else:
name = term.number.value = new_name('')
return name
else:
name = term.function.name = new_name('f')
def random_arg():
return random_term(term.function.args.add(), depth=depth - 1)
args = random_list(2, '', ',', random_arg)
return '%s(%s)' % (name, args) if args else name
def random_equation(equation):
equation.negated = np.random.randint(2)
s = '~' * equation.negated
s += random_term(equation.left, depth=2)
if np.random.randint(2):
s += '=' + random_term(equation.right, depth=1)
return s
def random_clause(clause):
return random_list(4, '$false', '|',
lambda: random_equation(clause.clause.equations.add()))
def random_clauses(clauses):
return random_list(4, '$true', '&',
lambda: '(%s)' % random_clause(clauses.add()))
np.random.seed(7)
tf.set_random_seed(7)
shards = 10
batch_size = 2
examples_per_shard = 6
FLAGS.examples_train = os.path.join(self.get_temp_dir(),
'examples-train@%d' % shards)
FLAGS.examples_eval = os.path.join(self.get_temp_dir(),
'examples-eval@%d' % shards)
FLAGS.approx_proofs_per_shard = examples_per_shard
FLAGS.input_queue_factor = 2
# Build tfrecords of ProverClauseExamples
key_info = {}
mode_keys = {'train': set(), 'eval': set()}
valid_keys = set() # Keys with at least one positive and negative
for mode in 'train', 'eval':
for shard in range(shards):
shard_path = os.path.join(
self.get_temp_dir(),
'examples-%s-%05d-of-%05d' % (mode, shard, shards))
with tf.python_io.TFRecordWriter(shard_path) as writer:
valid_count = 0
while valid_count < examples_per_shard:
key = 'key%d' % len(key_info)
full_key = tf.compat.as_bytes('%s:%s' % (shard_path, key))
examples = prover_clause_examples_pb2.ProverClauseExamples()
examples.key = full_key
conjecture = random_clauses(examples.cnf.negated_conjecture)
positives = [random_clause(examples.positives.add())
for _ in range(np.random.randint(3))]
negatives = [random_clause(examples.negatives.add())
for _ in range(np.random.randint(3))]
writer.write(examples.SerializeToString())
key_info[full_key] = Info(conjecture, positives, negatives)
if positives and negatives:
mode_keys[mode].add(full_key)
valid_keys.add(full_key)
valid_count += 1
# Write vocab file
vocab_path = os.path.join(self.get_temp_dir(), 'vocab')
with open(vocab_path, 'w') as vocab_file:
for s in vocab:
print(s, file=vocab_file)
FLAGS.vocab = vocab_path
# Read vocabulary, and construct map from int sequence back to string
vocab_size, vocab_to_id = inputs.read_vocab(vocab_path)
self.assertEqual(vocab_size, len(vocab_to_id) + 32 - 11)
id_to_vocab = {i: s for s, i in vocab_to_id.items()}
def show_ids(ids):
"""Converts a coded clause to string, truncating and stripping."""
return ''.join(id_to_vocab[i] for i in ids).strip()
# Test both train and eval
for shuffle in False, True:
if shuffle:
buckets = '16,32,64,128,256,512'
else:
# Disable bucketing so that we can verify everything is processed
buckets = '100000'
FLAGS.negated_conjecture_buckets = FLAGS.clause_buckets = buckets
for mode in 'train', 'eval':
with tf.Graph().as_default() as graph:
keys, conjectures, clauses, labels = (inputs.sequence_example_batch(
mode=mode, batch_size=batch_size, shuffle=shuffle))
init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
self.assertEqual(keys.dtype, tf.string)
self.assertEqual(conjectures.dtype, tf.int32)
self.assertEqual(clauses.dtype, tf.int32)
self.assertEqual(labels.dtype, tf.bool)
# Evaluate enough times to see every key exactly twice
with self.test_session(graph=graph) as sess:
init_op.run()
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
visited = collections.defaultdict(int)
for _ in range(len(mode_keys[mode]) // batch_size):
batch = sess.run([keys, conjectures, clauses, labels])
for data in batch:
self.assertEqual(len(data), batch_size)
for pair in batch[2:]:
self.assertEqual(pair.shape[1], 2)
for key, conjecture, clause_pair, label_pair in zip(*batch):
self.assertIn(key, mode_keys[mode],
'mode %s, key %r, keys %r' %
(mode, key, mode_keys[mode]))
visited[key] += 1
info = key_info[key]
self.assertEqual(info.conjecture, show_ids(conjecture))
for clause, label in zip(clause_pair, label_pair):
self.assertIn(show_ids(clause),
info.positives if label else info.negatives)
coord.request_stop()
for thread in threads:
thread.join()
if not shuffle:
# Verify that we visited everything exactly twice
for key in mode_keys[mode]:
count = visited[key]
if count != 1:
raise ValueError('key %s visited %d != 1 times' % (key, count))
def testDepth(self):
# Build very simple vocabulary
FLAGS.vocab = os.path.join(self.get_temp_dir(), 'depth_vocab')
with open(FLAGS.vocab, 'w') as vocab_file:
print('X\nf', file=vocab_file)
_, vocab_to_id = inputs.read_vocab(FLAGS.vocab)
# Build two very deep clauses
def deep_clause(n, clause):
term = clause.clause.equations.add().left
for _ in range(n):
term.function.name = 'f'
term = term.function.args.add()
term.variable.name = 'X'
examples = prover_clause_examples_pb2.ProverClauseExamples()
deep_clause(100, examples.positives.add())
deep_clause(200, examples.negatives.add())
# The clause are f(f(...(X)...))
def correct(n):
correct = ['f', '('] * n + ['X'] + [')'] * n
return [vocab_to_id[s] for s in correct]
# Check that parsing works
with self.test_session() as sess:
_, negated_conjecture, clauses, labels = sess.run(
inputs.random_clauses_as_sequence(examples.SerializeToString(),
vocab=FLAGS.vocab))
def decode(s):
return np.fromstring(s, dtype=np.int32)
self.assertAllEqual(decode(negated_conjecture), [vocab_to_id['$true']])
self.assertAllEqual(decode(clauses[0]), correct(100))
self.assertAllEqual(decode(clauses[1]), correct(200))
self.assertAllEqual(labels, [True, False])
if __name__ == '__main__':
tf.test.main()
| {
"content_hash": "179e884e04f4edcfad33703d7fa61ca7",
"timestamp": "",
"source": "github",
"line_count": 312,
"max_line_length": 80,
"avg_line_length": 39.21153846153846,
"alnum_prop": 0.574464606833415,
"repo_name": "tensorflow/deepmath",
"id": "a780cca46325e0a32c0f213bc332442231814c34",
"size": "12911",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "deepmath/guidance/inputs_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "1034511"
},
{
"name": "Dockerfile",
"bytes": "1605"
},
{
"name": "Makefile",
"bytes": "7154"
},
{
"name": "Python",
"bytes": "860334"
},
{
"name": "Shell",
"bytes": "460"
},
{
"name": "Starlark",
"bytes": "38957"
}
],
"symlink_target": ""
} |
"""
=====================
Classifier comparison
=====================
A comparison of a several classifiers in scikit-learn on synthetic datasets.
The point of this example is to illustrate the nature of decision boundaries
of different classifiers.
This should be taken with a grain of salt, as the intuition conveyed by
these examples does not necessarily carry over to real datasets.
Particularly in high-dimensional spaces, data can more easily be separated
linearly and the simplicity of classifiers such as naive Bayes and linear SVMs
might lead to better generalization than is achieved by other classifiers.
The plots show training points in solid colors and testing points
semi-transparent. The lower right shows the classification accuracy on the test
set.
"""
# Code source: Gaël Varoquaux
# Andreas Müller
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import make_pipeline
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.inspection import DecisionBoundaryDisplay
names = [
"Nearest Neighbors",
"Linear SVM",
"RBF SVM",
"Gaussian Process",
"Decision Tree",
"Random Forest",
"Neural Net",
"AdaBoost",
"Naive Bayes",
"QDA",
]
classifiers = [
KNeighborsClassifier(3),
SVC(kernel="linear", C=0.025),
SVC(gamma=2, C=1),
GaussianProcessClassifier(1.0 * RBF(1.0)),
DecisionTreeClassifier(max_depth=5),
RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),
MLPClassifier(alpha=1, max_iter=1000),
AdaBoostClassifier(),
GaussianNB(),
QuadraticDiscriminantAnalysis(),
]
X, y = make_classification(
n_features=2, n_redundant=0, n_informative=2, random_state=1, n_clusters_per_class=1
)
rng = np.random.RandomState(2)
X += 2 * rng.uniform(size=X.shape)
linearly_separable = (X, y)
datasets = [
make_moons(noise=0.3, random_state=0),
make_circles(noise=0.2, factor=0.5, random_state=1),
linearly_separable,
]
figure = plt.figure(figsize=(27, 9))
i = 1
# iterate over datasets
for ds_cnt, ds in enumerate(datasets):
# preprocess dataset, split into training and test part
X, y = ds
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.4, random_state=42
)
x_min, x_max = X[:, 0].min() - 0.5, X[:, 0].max() + 0.5
y_min, y_max = X[:, 1].min() - 0.5, X[:, 1].max() + 0.5
# just plot the dataset first
cm = plt.cm.RdBu
cm_bright = ListedColormap(["#FF0000", "#0000FF"])
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
if ds_cnt == 0:
ax.set_title("Input data")
# Plot the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright, edgecolors="k")
# Plot the testing points
ax.scatter(
X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6, edgecolors="k"
)
ax.set_xlim(x_min, x_max)
ax.set_ylim(y_min, y_max)
ax.set_xticks(())
ax.set_yticks(())
i += 1
# iterate over classifiers
for name, clf in zip(names, classifiers):
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
clf = make_pipeline(StandardScaler(), clf)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
DecisionBoundaryDisplay.from_estimator(
clf, X, cmap=cm, alpha=0.8, ax=ax, eps=0.5
)
# Plot the training points
ax.scatter(
X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright, edgecolors="k"
)
# Plot the testing points
ax.scatter(
X_test[:, 0],
X_test[:, 1],
c=y_test,
cmap=cm_bright,
edgecolors="k",
alpha=0.6,
)
ax.set_xlim(x_min, x_max)
ax.set_ylim(y_min, y_max)
ax.set_xticks(())
ax.set_yticks(())
if ds_cnt == 0:
ax.set_title(name)
ax.text(
x_max - 0.3,
y_min + 0.3,
("%.2f" % score).lstrip("0"),
size=15,
horizontalalignment="right",
)
i += 1
plt.tight_layout()
plt.show()
| {
"content_hash": "b92917f9766767a0a7bf6be1b157b03c",
"timestamp": "",
"source": "github",
"line_count": 156,
"max_line_length": 88,
"avg_line_length": 31.192307692307693,
"alnum_prop": 0.6491985203452528,
"repo_name": "TomDLT/scikit-learn",
"id": "b6fb666a4cd7176fd6cd5c0b514a4adc6d071f8b",
"size": "4892",
"binary": false,
"copies": "8",
"ref": "refs/heads/main",
"path": "examples/classification/plot_classifier_comparison.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "42335"
},
{
"name": "C++",
"bytes": "147316"
},
{
"name": "Cython",
"bytes": "667491"
},
{
"name": "Makefile",
"bytes": "1644"
},
{
"name": "Python",
"bytes": "10429796"
},
{
"name": "Shell",
"bytes": "43325"
}
],
"symlink_target": ""
} |
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName(_fromUtf8("Dialog"))
Dialog.resize(433, 427)
self.gridLayout_3 = QtGui.QGridLayout(Dialog)
self.gridLayout_3.setObjectName(_fromUtf8("gridLayout_3"))
self.gridLayout_2 = QtGui.QGridLayout()
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
self.gridLayout = QtGui.QGridLayout()
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.label = QtGui.QLabel(Dialog)
self.label.setObjectName(_fromUtf8("label"))
self.gridLayout.addWidget(self.label, 0, 0, 1, 1)
self.lineEdit = QtGui.QLineEdit(Dialog)
self.lineEdit.setObjectName(_fromUtf8("lineEdit"))
self.gridLayout.addWidget(self.lineEdit, 0, 1, 1, 1)
self.label_2 = QtGui.QLabel(Dialog)
self.label_2.setMinimumSize(QtCore.QSize(255, 14))
self.label_2.setObjectName(_fromUtf8("label_2"))
self.gridLayout.addWidget(self.label_2, 1, 0, 1, 1)
self.lineEdit_2 = QtGui.QLineEdit(Dialog)
self.lineEdit_2.setObjectName(_fromUtf8("lineEdit_2"))
self.gridLayout.addWidget(self.lineEdit_2, 1, 1, 1, 1)
self.label_3 = QtGui.QLabel(Dialog)
self.label_3.setMinimumSize(QtCore.QSize(255, 14))
self.label_3.setObjectName(_fromUtf8("label_3"))
self.gridLayout.addWidget(self.label_3, 2, 0, 1, 1)
self.lineEdit_3 = QtGui.QLineEdit(Dialog)
self.lineEdit_3.setObjectName(_fromUtf8("lineEdit_3"))
self.gridLayout.addWidget(self.lineEdit_3, 2, 1, 1, 1)
self.label_4 = QtGui.QLabel(Dialog)
self.label_4.setMinimumSize(QtCore.QSize(255, 14))
self.label_4.setObjectName(_fromUtf8("label_4"))
self.gridLayout.addWidget(self.label_4, 3, 0, 1, 1)
self.lineEdit_4 = QtGui.QLineEdit(Dialog)
self.lineEdit_4.setObjectName(_fromUtf8("lineEdit_4"))
self.gridLayout.addWidget(self.lineEdit_4, 3, 1, 1, 1)
self.label_5 = QtGui.QLabel(Dialog)
self.label_5.setMinimumSize(QtCore.QSize(255, 14))
self.label_5.setObjectName(_fromUtf8("label_5"))
self.gridLayout.addWidget(self.label_5, 4, 0, 1, 1)
self.comboBox = QtGui.QComboBox(Dialog)
self.comboBox.setEditable(True)
self.comboBox.setObjectName(_fromUtf8("comboBox"))
self.comboBox.addItem(_fromUtf8(""))
self.comboBox.addItem(_fromUtf8(""))
self.comboBox.addItem(_fromUtf8(""))
self.comboBox.addItem(_fromUtf8(""))
self.comboBox.addItem(_fromUtf8(""))
self.comboBox.addItem(_fromUtf8(""))
self.comboBox.addItem(_fromUtf8(""))
self.comboBox.addItem(_fromUtf8(""))
self.comboBox.addItem(_fromUtf8(""))
self.gridLayout.addWidget(self.comboBox, 4, 1, 1, 1)
self.label_6 = QtGui.QLabel(Dialog)
self.label_6.setMinimumSize(QtCore.QSize(91, 14))
self.label_6.setObjectName(_fromUtf8("label_6"))
self.gridLayout.addWidget(self.label_6, 5, 0, 1, 1)
self.comboBox_2 = QtGui.QComboBox(Dialog)
self.comboBox_2.setEditable(True)
self.comboBox_2.setObjectName(_fromUtf8("comboBox_2"))
self.comboBox_2.addItem(_fromUtf8(""))
self.comboBox_2.addItem(_fromUtf8(""))
self.comboBox_2.addItem(_fromUtf8(""))
self.comboBox_2.addItem(_fromUtf8(""))
self.comboBox_2.addItem(_fromUtf8(""))
self.comboBox_2.addItem(_fromUtf8(""))
self.comboBox_2.addItem(_fromUtf8(""))
self.comboBox_2.addItem(_fromUtf8(""))
self.gridLayout.addWidget(self.comboBox_2, 5, 1, 1, 1)
self.label_7 = QtGui.QLabel(Dialog)
self.label_7.setMinimumSize(QtCore.QSize(255, 14))
self.label_7.setObjectName(_fromUtf8("label_7"))
self.gridLayout.addWidget(self.label_7, 6, 0, 1, 1)
self.comboBox_3 = QtGui.QComboBox(Dialog)
self.comboBox_3.setEditable(True)
self.comboBox_3.setObjectName(_fromUtf8("comboBox_3"))
self.comboBox_3.addItem(_fromUtf8(""))
self.comboBox_3.addItem(_fromUtf8(""))
self.comboBox_3.addItem(_fromUtf8(""))
self.comboBox_3.addItem(_fromUtf8(""))
self.comboBox_3.addItem(_fromUtf8(""))
self.comboBox_3.addItem(_fromUtf8(""))
self.comboBox_3.addItem(_fromUtf8(""))
self.comboBox_3.addItem(_fromUtf8(""))
self.gridLayout.addWidget(self.comboBox_3, 6, 1, 1, 1)
self.label_8 = QtGui.QLabel(Dialog)
self.label_8.setMinimumSize(QtCore.QSize(255, 14))
self.label_8.setObjectName(_fromUtf8("label_8"))
self.gridLayout.addWidget(self.label_8, 7, 0, 1, 1)
self.comboBox_4 = QtGui.QComboBox(Dialog)
self.comboBox_4.setEditable(True)
self.comboBox_4.setObjectName(_fromUtf8("comboBox_4"))
self.comboBox_4.addItem(_fromUtf8(""))
self.comboBox_4.addItem(_fromUtf8(""))
self.comboBox_4.addItem(_fromUtf8(""))
self.comboBox_4.addItem(_fromUtf8(""))
self.comboBox_4.addItem(_fromUtf8(""))
self.comboBox_4.addItem(_fromUtf8(""))
self.comboBox_4.addItem(_fromUtf8(""))
self.gridLayout.addWidget(self.comboBox_4, 7, 1, 1, 1)
self.label_9 = QtGui.QLabel(Dialog)
self.label_9.setMinimumSize(QtCore.QSize(255, 14))
self.label_9.setObjectName(_fromUtf8("label_9"))
self.gridLayout.addWidget(self.label_9, 8, 0, 1, 1)
self.lineEdit_5 = QtGui.QLineEdit(Dialog)
self.lineEdit_5.setObjectName(_fromUtf8("lineEdit_5"))
self.gridLayout.addWidget(self.lineEdit_5, 8, 1, 1, 1)
self.label_10 = QtGui.QLabel(Dialog)
self.label_10.setMinimumSize(QtCore.QSize(255, 14))
self.label_10.setObjectName(_fromUtf8("label_10"))
self.gridLayout.addWidget(self.label_10, 9, 0, 1, 1)
self.lineEdit_6 = QtGui.QLineEdit(Dialog)
self.lineEdit_6.setObjectName(_fromUtf8("lineEdit_6"))
self.gridLayout.addWidget(self.lineEdit_6, 9, 1, 1, 1)
self.label_11 = QtGui.QLabel(Dialog)
self.label_11.setMinimumSize(QtCore.QSize(255, 14))
self.label_11.setObjectName(_fromUtf8("label_11"))
self.gridLayout.addWidget(self.label_11, 10, 0, 1, 1)
self.checkBox = QtGui.QCheckBox(Dialog)
self.checkBox.setChecked(True)
self.checkBox.setObjectName(_fromUtf8("checkBox"))
self.gridLayout.addWidget(self.checkBox, 10, 1, 1, 1)
self.label_12 = QtGui.QLabel(Dialog)
self.label_12.setMinimumSize(QtCore.QSize(255, 14))
self.label_12.setObjectName(_fromUtf8("label_12"))
self.gridLayout.addWidget(self.label_12, 11, 0, 1, 1)
self.checkBox_2 = QtGui.QCheckBox(Dialog)
self.checkBox_2.setChecked(True)
self.checkBox_2.setObjectName(_fromUtf8("checkBox_2"))
self.gridLayout.addWidget(self.checkBox_2, 11, 1, 1, 1)
self.label_13 = QtGui.QLabel(Dialog)
self.label_13.setMinimumSize(QtCore.QSize(255, 14))
self.label_13.setObjectName(_fromUtf8("label_13"))
self.gridLayout.addWidget(self.label_13, 12, 0, 1, 1)
self.checkBox_3 = QtGui.QCheckBox(Dialog)
self.checkBox_3.setChecked(True)
self.checkBox_3.setObjectName(_fromUtf8("checkBox_3"))
self.gridLayout.addWidget(self.checkBox_3, 12, 1, 1, 1)
self.label_14 = QtGui.QLabel(Dialog)
self.label_14.setMinimumSize(QtCore.QSize(255, 14))
self.label_14.setObjectName(_fromUtf8("label_14"))
self.gridLayout.addWidget(self.label_14, 13, 0, 1, 1)
self.checkBox_4 = QtGui.QCheckBox(Dialog)
self.checkBox_4.setChecked(True)
self.checkBox_4.setObjectName(_fromUtf8("checkBox_4"))
self.gridLayout.addWidget(self.checkBox_4, 13, 1, 1, 1)
self.gridLayout_2.addLayout(self.gridLayout, 0, 0, 1, 1)
self.buttonBox = QtGui.QDialogButtonBox(Dialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.gridLayout_2.addWidget(self.buttonBox, 1, 0, 1, 1)
self.gridLayout_3.addLayout(self.gridLayout_2, 0, 0, 1, 1)
self.buttonBox.raise_()
self.lineEdit.raise_()
self.lineEdit_2.raise_()
self.lineEdit_3.raise_()
self.lineEdit_4.raise_()
self.comboBox.raise_()
self.comboBox_2.raise_()
self.comboBox_3.raise_()
self.comboBox_4.raise_()
self.lineEdit_5.raise_()
self.lineEdit_6.raise_()
self.checkBox.raise_()
self.checkBox_4.raise_()
self.checkBox_3.raise_()
self.checkBox_2.raise_()
self.checkBox.raise_()
self.checkBox_4.raise_()
self.checkBox_3.raise_()
self.checkBox_2.raise_()
self.label_14.raise_()
self.label_14.raise_()
self.retranslateUi(Dialog)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), Dialog.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), Dialog.reject)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(_translate("Dialog", "Dialog", None))
self.label.setText(_translate("Dialog", "Nome utente", None))
self.lineEdit.setText(_translate("Dialog", "live", None))
self.label_2.setText(_translate("Dialog", "Nome computer", None))
self.lineEdit_2.setText(_translate("Dialog", "bluethrush_live", None))
self.label_3.setText(_translate("Dialog", "Password utente", None))
self.lineEdit_3.setText(_translate("Dialog", "live", None))
self.label_4.setText(_translate("Dialog", "Password amministratore", None))
self.lineEdit_4.setText(_translate("Dialog", "live", None))
self.label_5.setText(_translate("Dialog", "Percorso bootloader", None))
self.comboBox.setItemText(0, _translate("Dialog", "/dev/sda", None))
self.comboBox.setItemText(1, _translate("Dialog", "/dev/sda1", None))
self.comboBox.setItemText(2, _translate("Dialog", "/dev/sda2", None))
self.comboBox.setItemText(3, _translate("Dialog", "/dev/sda3", None))
self.comboBox.setItemText(4, _translate("Dialog", "/dev/sda4", None))
self.comboBox.setItemText(5, _translate("Dialog", "/dev/sda5", None))
self.comboBox.setItemText(6, _translate("Dialog", "/dev/sda6", None))
self.comboBox.setItemText(7, _translate("Dialog", "/dev/sda7", None))
self.comboBox.setItemText(8, _translate("Dialog", "/dev/sda8", None))
self.label_6.setText(_translate("Dialog", "Percorso rootfs", None))
self.comboBox_2.setItemText(0, _translate("Dialog", "/dev/sda1", None))
self.comboBox_2.setItemText(1, _translate("Dialog", "/dev/sda2", None))
self.comboBox_2.setItemText(2, _translate("Dialog", "/dev/sda3", None))
self.comboBox_2.setItemText(3, _translate("Dialog", "/dev/sda4", None))
self.comboBox_2.setItemText(4, _translate("Dialog", "/dev/sda5", None))
self.comboBox_2.setItemText(5, _translate("Dialog", "/dev/sda6", None))
self.comboBox_2.setItemText(6, _translate("Dialog", "/dev/sda7", None))
self.comboBox_2.setItemText(7, _translate("Dialog", "/dev/sda8", None))
self.label_7.setText(_translate("Dialog", "Percorso home", None))
self.comboBox_3.setItemText(0, _translate("Dialog", "/dev/sda1", None))
self.comboBox_3.setItemText(1, _translate("Dialog", "/dev/sda2", None))
self.comboBox_3.setItemText(2, _translate("Dialog", "/dev/sda3", None))
self.comboBox_3.setItemText(3, _translate("Dialog", "/dev/sda4", None))
self.comboBox_3.setItemText(4, _translate("Dialog", "/dev/sda5", None))
self.comboBox_3.setItemText(5, _translate("Dialog", "/dev/sda6", None))
self.comboBox_3.setItemText(6, _translate("Dialog", "/dev/sda7", None))
self.comboBox_3.setItemText(7, _translate("Dialog", "/dev/sda8", None))
self.label_8.setText(_translate("Dialog", "Percorso swap", None))
self.comboBox_4.setItemText(0, _translate("Dialog", "/dev/sda5", None))
self.comboBox_4.setItemText(1, _translate("Dialog", "/dev/sda1", None))
self.comboBox_4.setItemText(2, _translate("Dialog", "/dev/sda2", None))
self.comboBox_4.setItemText(3, _translate("Dialog", "/dev/sda3", None))
self.comboBox_4.setItemText(4, _translate("Dialog", "/dev/sda4", None))
self.comboBox_4.setItemText(5, _translate("Dialog", "/dev/sda6", None))
self.comboBox_4.setItemText(6, _translate("Dialog", "/dev/sda7", None))
self.label_9.setText(_translate("Dialog", "Filesystem rootfs", None))
self.lineEdit_5.setText(_translate("Dialog", "ext4", None))
self.label_10.setText(_translate("Dialog", "Filesystem home", None))
self.lineEdit_6.setText(_translate("Dialog", "ext4", None))
self.label_11.setText(_translate("Dialog", "Swap Y o N", None))
self.checkBox.setText(_translate("Dialog", "Y", None))
self.label_12.setText(_translate("Dialog", "Autologin Y o N", None))
self.checkBox_2.setText(_translate("Dialog", "Y", None))
self.label_13.setText(_translate("Dialog", "Metodo di installazione veloce Y o N", None))
self.checkBox_3.setText(_translate("Dialog", "Y", None))
self.label_14.setText(_translate("Dialog", "Cambiare password amministratore Y o N ", None))
self.checkBox_4.setText(_translate("Dialog", "Y", None))
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
Dialog = QtGui.QDialog()
ui = Ui_Dialog()
ui.setupUi(Dialog)
Dialog.show()
sys.exit(app.exec_())
| {
"content_hash": "283bd22ae9c73ad7df6a98cde8912a04",
"timestamp": "",
"source": "github",
"line_count": 266,
"max_line_length": 101,
"avg_line_length": 53.82706766917293,
"alnum_prop": 0.6435954742282441,
"repo_name": "MirkoPerrone/Scrips",
"id": "2ebb2c7481b677c46d95e2f785f099b680487589",
"size": "14553",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bluethrush-installer-gui.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14553"
},
{
"name": "Shell",
"bytes": "35260"
}
],
"symlink_target": ""
} |
import plotly.offline as offline
offline.init_notebook_mode(connected=True)
import plotly.graph_objects as go
fig = go.Figure(
data=[go.Bar(y=[2, 3, 1])],
layout=go.Layout(title="bar plot"))
fig.show()
fig.data[0].marker = dict(color='purple')
fig
| {
"content_hash": "ef433eda6dd3764a4ef1b6533cc4aff0",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 42,
"avg_line_length": 25.7,
"alnum_prop": 0.7042801556420234,
"repo_name": "mwouts/jupytext",
"id": "d3517da103887bf751335211e68c4375377f232a",
"size": "657",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/notebooks/mirror/ipynb_to_script_vim_folding_markers/plotly_graphs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "752"
},
{
"name": "C#",
"bytes": "2752"
},
{
"name": "C++",
"bytes": "26237"
},
{
"name": "Clojure",
"bytes": "7920"
},
{
"name": "F#",
"bytes": "1449"
},
{
"name": "Gnuplot",
"bytes": "2067"
},
{
"name": "Groovy",
"bytes": "6195"
},
{
"name": "Haskell",
"bytes": "930"
},
{
"name": "Java",
"bytes": "1670"
},
{
"name": "JavaScript",
"bytes": "21654"
},
{
"name": "Julia",
"bytes": "25322"
},
{
"name": "Jupyter Notebook",
"bytes": "630468"
},
{
"name": "MATLAB",
"bytes": "1316"
},
{
"name": "Makefile",
"bytes": "581"
},
{
"name": "OCaml",
"bytes": "1049"
},
{
"name": "PowerShell",
"bytes": "8962"
},
{
"name": "Prolog",
"bytes": "12028"
},
{
"name": "Python",
"bytes": "832380"
},
{
"name": "R",
"bytes": "6011"
},
{
"name": "RobotFramework",
"bytes": "1275"
},
{
"name": "Rust",
"bytes": "15459"
},
{
"name": "Sage",
"bytes": "418"
},
{
"name": "Scala",
"bytes": "1000"
},
{
"name": "Scheme",
"bytes": "54543"
},
{
"name": "Shell",
"bytes": "1014"
},
{
"name": "Tcl",
"bytes": "791"
},
{
"name": "TypeScript",
"bytes": "17816"
},
{
"name": "q",
"bytes": "1866"
}
],
"symlink_target": ""
} |
from django.contrib.auth.decorators import login_required
from django.http import Http404
from django.http import HttpResponseRedirect
from django.shortcuts import render
from orchestra.bots.errors import StaffingResponseException
from orchestra.communication.staffing import get_available_requests
from orchestra.communication.staffing import handle_staffing_response
from orchestra.models import Worker
@login_required
def accept_staffing_request_inquiry(request,
staffing_request_inquiry_id):
worker = Worker.objects.get(user=request.user)
response = handle_staffing_response(
worker, staffing_request_inquiry_id, is_available=True)
if response is None:
raise Http404
return render(request, 'communication/staffing_request_accepted.html',
{
'response': response,
})
@login_required
def reject_staffing_request_inquiry(request,
staffing_request_inquiry_id):
worker = Worker.objects.get(user=request.user)
try:
response = handle_staffing_response(
worker, staffing_request_inquiry_id, is_available=False)
except StaffingResponseException:
return render(request,
'communication/staffing_response_not_permitted.html',
{})
if response is None:
raise Http404
next_path = request.GET.get('next')
if next_path:
return HttpResponseRedirect(next_path)
else:
return render(request, 'communication/staffing_request_rejected.html',
{})
@login_required
def available_staffing_requests(request):
worker = Worker.objects.get(user=request.user)
return render(request, 'communication/available_staffing_requests.html',
{
'requests': get_available_requests(worker),
})
| {
"content_hash": "239b5848e9d07dd7ca31268656048bb1",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 78,
"avg_line_length": 35.2,
"alnum_prop": 0.65650826446281,
"repo_name": "unlimitedlabs/orchestra",
"id": "f1f31ffd68156b961cf0deaac3e791c74657fed2",
"size": "1936",
"binary": false,
"copies": "2",
"ref": "refs/heads/dependabot/npm_and_yarn/minimist-1.2.6",
"path": "orchestra/communication/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "115443"
},
{
"name": "HTML",
"bytes": "79220"
},
{
"name": "JavaScript",
"bytes": "317637"
},
{
"name": "Makefile",
"bytes": "1085"
},
{
"name": "Python",
"bytes": "683120"
}
],
"symlink_target": ""
} |
import bokeh.server
from bokeh.plotting import line, circle, curdoc
from bokeh.widgetobjects import (VBoxModelForm, HBox,
BokehApplet, TextInput, PreText,
Select, Slider)
from bokeh.objects import Plot, ColumnDataSource
from bokeh.plot_object import PlotObject
from bokeh.properties import (Dict, Float, String, Instance)
import numpy as np
import logging
logging.basicConfig(level=logging.DEBUG)
class MyModel(VBoxModelForm):
"""Input Widgets, define the fields you want to
read from the input here as bokeh properties
input_specs is a list of dictionary, specifying
how the kind of input widget you want for each
property. the name field must match
one of the properties, for example here,
we use names of offset and scale. You can
also specify title, if you want a different
label in the generated form
"""
offset = Float(1.0)
scale = Float(1.0)
title = String(default="my sin wave")
input_specs = [
{"widget" : TextInput,
"name" : "title",
"value" : "my sin wave"},
{"widget" : Slider,
"name" : "offset",
"value" : 1.0,
"start" : 0.0,
"end" : 5.0},
{"widget" : Slider,
"name" : "scale",
"value" : 1.0,
"start" : -5.0,
"end" : 5.0},
]
class MyApp(BokehApplet):
plot = Instance(Plot)
source = Instance(ColumnDataSource)
def create(self, doc):
"""
This function is called once, and is responsible for
creating all objects (plots, datasources, etc)
"""
self.modelform = MyModel()
self.modelform.create_inputs(doc)
self.source = ColumnDataSource(data={'x':[], 'y':[]})
self.update_data()
self.plot = line('x', 'y', source=self.source,
plot_width=400, plot_height=400,
title=self.modelform.title
)
self.children.append(self.modelform)
self.children.append(self.plot)
def input_change(self, obj, attrname, old, new):
"""
This function is called whenever the input form changes
This is responsible for updating the plot, or whatever
you want. The signature is
obj : the object that changed
attrname : the attr that changed
old : old value of attr
new : new value of attr
"""
self.update_data()
self.plot.title = self.modelform.title
def update_data(self):
N = 80
x = np.linspace(0, 4*np.pi, N)
y = np.sin(x)
logging.debug ("PARAMS %s %s", self.modelform.offset, self.modelform.scale)
y = self.modelform.offset + y * self.modelform.scale
self.source.data = {'x' : x, 'y' : y}
# the following addes "/exampleapp" as a url which renders MyApp
bokeh_url = "http://localhost:5006"
MyApp.add_route("/exampleapp", bokeh_url)
"""
Example 2
you need to run download.py to get the data from quantquote
"""
import os
from os.path import join, dirname, splitext
import pandas as pd
data_dir = join(dirname(__file__), "daily")
tickers = os.listdir(data_dir)
tickers = [splitext(x)[0].split("table_")[-1] for x in tickers]
class StockInputModel(VBoxModelForm):
"""Input Widgets, define the fields you want to
read from the input here as bokeh properties
input_specs is a list of dictionary, specifying
how the kind of input widget you want for each
property. the name field must match
one of the properties, for example here,
we use names of offset and scale. You can
also specify title, if you want a different
label in the generated form
"""
ticker1 = String(default="AAPL")
ticker2 = String(default="GOOG")
input_specs = [
{"widget" : Select,
"name" : "ticker1",
"value" : "AAPL",
"options" : ["AAPL","GOOG","INTC","BRCM","YHOO"]
},
{"widget" : Select,
"name" : "ticker2",
"value" : "GOOG",
"options" : ["AAPL","GOOG","INTC","BRCM","YHOO"]
}
]
class StockApp(BokehApplet):
plot = Instance(Plot)
source = Instance(ColumnDataSource)
pretext = Instance(PreText)
def get_data(self, ticker1, ticker2):
fname = join(data_dir, "table_%s.csv" % ticker1.lower())
data1 = pd.read_csv(fname,
names=['date', 'foo', 'o', 'h', 'l', 'c', 'v'],
header=False,
parse_dates=['date'])
data1 = data1.set_index('date')
fname = join(data_dir, "table_%s.csv" % ticker2.lower())
data2 = pd.read_csv(fname,
names=['date', 'foo', 'o', 'h', 'l', 'c', 'v'],
header=False,
parse_dates=['date'])
data2 = data2.set_index('date')
data = pd.DataFrame({ticker1 : data1.c, ticker2 : data2.c})
data[ticker1 + "_returns"] = data[ticker1].diff()
data[ticker2 + "_returns"] = data[ticker2].diff()
data = data.dropna()
return data
def create(self, doc):
"""
This function is called once, and is responsible for
creating all objects (plots, datasources, etc)
"""
self.modelform = StockInputModel()
self.modelform.create_inputs(doc)
ticker1 = self.modelform.ticker1
ticker2 = self.modelform.ticker2
self.pretext = PreText(text="")
self.make_source(ticker1, ticker2)
self.make_plots(ticker1, ticker2)
self.make_stats()
self.set_children()
def make_source(self, ticker1, ticker2):
df = self.get_data(ticker1, ticker2)
self.source = ColumnDataSource(data=df)
def make_plots(self, ticker1, ticker2):
self.plot = circle(ticker1 + "_returns", ticker2 + "_returns",
title="%s vs %s" %(ticker1, ticker2),
source=self.source,
plot_width=400, plot_height=400,
tools="pan,wheel_zoom,select"
)
def set_children(self):
self.children = [self.modelform, self.plot, self.pretext]
curdoc()._plotcontext.children = [self]
curdoc().add_all()
def input_change(self, obj, attrname, old, new):
"""
This function is called whenever the input form changes
This is responsible for updating the plot, or whatever
you want. The signature is
obj : the object that changed
attrname : the attr that changed
old : old value of attr
new : new value of attr
"""
if attrname in ("ticker1", "ticker2"):
ticker1 = self.modelform.ticker1
ticker2 = self.modelform.ticker2
self.make_source(ticker1, ticker2)
self.make_plots(ticker1, ticker2)
self.set_children()
def setup_events(self):
super(StockApp, self).setup_events()
if self.source:
self.source.on_change('selected', self, 'selection_change')
def make_stats(self):
pandas_df = pd.DataFrame(self.source.data)
selected = self.source.selected
if selected:
pandas_df = pandas_df.iloc[selected, :]
stats = pandas_df.describe()
self.pretext.text = str(stats)
def selection_change(self, obj, attrname, old, new):
self.make_stats()
# the following addes "/exampleapp" as a url which renders StockApp
bokeh_url = "http://localhost:5006"
StockApp.add_route("/stocks", bokeh_url)
if __name__ == "__main__":
bokeh.server.run()
| {
"content_hash": "c6e66afe88567e15003c291e2de0b8c9",
"timestamp": "",
"source": "github",
"line_count": 224,
"max_line_length": 83,
"avg_line_length": 34.316964285714285,
"alnum_prop": 0.5803304279953168,
"repo_name": "sahat/bokeh",
"id": "e185aa59bfc0add5380e07642855064de7e85814",
"size": "7687",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "examples/app/applet/example.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "329134"
},
{
"name": "CoffeeScript",
"bytes": "2099237"
},
{
"name": "JavaScript",
"bytes": "2683660"
},
{
"name": "Python",
"bytes": "973217"
},
{
"name": "Scala",
"bytes": "27312"
},
{
"name": "Shell",
"bytes": "12135"
}
],
"symlink_target": ""
} |
import pytest
import tempfile
from vic import lib as vic_lib
@pytest.fixture()
def param_file(scope='function'):
p = 'GAUGE_HEIGHT 12.33\n'
temp = tempfile.NamedTemporaryFile(prefix='test_param', suffix='txt')
with open(temp.name, 'w') as f:
f.write(p)
return vic_lib.open_file(temp.name.encode(), b'r')
def test_get_parameters(param_file):
assert vic_lib.get_parameters(param_file) is None
assert vic_lib.param.GAUGE_HEIGHT == 12.33
def test_validate_parameters():
assert vic_lib.validate_parameters() is None
| {
"content_hash": "654012e0a1d5670102e7721e6e35d8bc",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 73,
"avg_line_length": 26.333333333333332,
"alnum_prop": 0.6943942133815552,
"repo_name": "UW-Hydro/VIC",
"id": "4767f9529e6bc04f295fee6898693becc5931e27",
"size": "553",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "tests/unit/shared/test_get_parameters.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "2344409"
},
{
"name": "Dockerfile",
"bytes": "792"
},
{
"name": "Fortran",
"bytes": "55922"
},
{
"name": "Makefile",
"bytes": "11578"
},
{
"name": "Python",
"bytes": "175656"
},
{
"name": "Shell",
"bytes": "10906"
}
],
"symlink_target": ""
} |
from pyscf.prop.rotational_gtensor import rhf
from pyscf.prop.rotational_gtensor import uhf
#from pyscf.prop.magnetizability import dhf
RHF = rhf.RotationalGTensor
UHF = uhf.RotationalGTensor
#DHF = dhf.RotationalGTensor
try:
from pyscf.prop.rotational_gtensor import rks
from pyscf.prop.rotational_gtensor import uks
RKS = rks.RotationalGTensor
UKS = uks.RotationalGTensor
except ImportError:
pass
| {
"content_hash": "b930e3628444ae9d7f9c5d986adeb3ca",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 49,
"avg_line_length": 28.066666666666666,
"alnum_prop": 0.7885985748218527,
"repo_name": "gkc1000/pyscf",
"id": "f45dca45ec13babdb8c0bc7d53079cd47c5018ec",
"size": "1103",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyscf/prop/rotational_gtensor/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "2749942"
},
{
"name": "C++",
"bytes": "20522"
},
{
"name": "CMake",
"bytes": "29300"
},
{
"name": "Common Lisp",
"bytes": "40269"
},
{
"name": "Cuda",
"bytes": "12405"
},
{
"name": "Fortran",
"bytes": "1104054"
},
{
"name": "Jupyter Notebook",
"bytes": "42844"
},
{
"name": "Makefile",
"bytes": "6797"
},
{
"name": "Python",
"bytes": "10739278"
},
{
"name": "Shell",
"bytes": "5480"
},
{
"name": "VBA",
"bytes": "577"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Backup',
fields=[
('backup_id', models.AutoField(serialize=False, primary_key=True)),
('protocol', models.CharField(max_length=255, choices=[(b'local', b'file system'), (b's3', b'Amazon S3')])),
('filename', models.CharField(max_length=4096)),
('jsonfile', models.CharField(max_length=4096)),
('description', models.CharField(default=b'', max_length=4096)),
('datetimestamp', models.DateTimeField(auto_now_add=True)),
('status', models.IntegerField(default=0, choices=[(0, b'Done'), (1, b'Processing'), (2, b'Failed')])),
],
options={
'db_table': 'backups',
'managed': True,
},
),
migrations.CreateModel(
name='Channel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('channel_name', models.CharField(max_length=255)),
('channel_description', models.CharField(max_length=4096, blank=True)),
('channel_type', models.CharField(max_length=255, choices=[(b'image', b'IMAGES'), (b'annotation', b'ANNOTATIONS'), (b'timeseries', b'TIMESERIES')])),
('resolution', models.IntegerField(default=0)),
('propagate', models.IntegerField(default=0, choices=[(0, b'NOT PROPAGATED'), (2, b'PROPAGATED')])),
('channel_datatype', models.CharField(max_length=255, choices=[(b'uint8', b'uint8'), (b'uint16', b'uint16'), (b'uint32', b'uint32'), (b'uint64', b'uint64'), (b'float32', b'float32')])),
('readonly', models.IntegerField(default=0, choices=[(1, b'Yes'), (0, b'No')])),
('exceptions', models.IntegerField(default=0, choices=[(1, b'Yes'), (0, b'No')])),
('startwindow', models.IntegerField(default=0)),
('endwindow', models.IntegerField(default=0)),
('default', models.BooleanField(default=False)),
('header', models.CharField(default=b'', max_length=8192, blank=True)),
],
options={
'db_table': 'channels',
'managed': True,
},
),
migrations.CreateModel(
name='Dataset',
fields=[
('dataset_name', models.CharField(max_length=255, serialize=False, verbose_name=b'Name of the Image dataset', primary_key=True)),
('dataset_description', models.CharField(max_length=4096, blank=True)),
('public', models.IntegerField(default=0, choices=[(0, b'Private'), (1, b'Public')])),
('ximagesize', models.IntegerField()),
('yimagesize', models.IntegerField()),
('zimagesize', models.IntegerField()),
('xoffset', models.IntegerField(default=0)),
('yoffset', models.IntegerField(default=0)),
('zoffset', models.IntegerField(default=0)),
('xvoxelres', models.FloatField(default=1.0)),
('yvoxelres', models.FloatField(default=1.0)),
('zvoxelres', models.FloatField(default=1.0)),
('scalingoption', models.IntegerField(default=0, choices=[(0, b'Z Slices'), (1, b'Isotropic')])),
('scalinglevels', models.IntegerField(default=0)),
('starttime', models.IntegerField(default=0)),
('endtime', models.IntegerField(default=0)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'datasets',
'managed': True,
},
),
migrations.CreateModel(
name='Project',
fields=[
('project_name', models.CharField(max_length=255, serialize=False, primary_key=True)),
('project_description', models.CharField(max_length=4096, blank=True)),
('public', models.IntegerField(default=0, choices=[(0, b'Private'), (1, b'Public')])),
('host', models.CharField(default=b'localhost', max_length=255, choices=[(b'dsp061.pha.jhu.edu', b'default'), (b'dsp061.pha.jhu.edu', b'dsp061'), (b'dsp062.pha.jhu.edu', b'dsp062'), (b'dsp063.pha.jhu.edu', b'dsp063'), (b'localhost', b'Debug')])),
('kvengine', models.CharField(default=b'MySQL', max_length=255, choices=[(b'MySQL', b'MySQL'), (b'Cassandra', b'Cassandra'), (b'Riak', b'Riak')])),
('kvserver', models.CharField(default=b'localhost', max_length=255, choices=[(b'dsp061.pha.jhu.edu', b'default'), (b'dsp061.pha.jhu.edu', b'dsp061'), (b'dsp062.pha.jhu.edu', b'dsp062'), (b'dsp063.pha.jhu.edu', b'dsp063'), (b'localhost', b'Debug')])),
('nd_version', models.CharField(default=b'0.6', max_length=255)),
('schema_version', models.CharField(default=b'0.6', max_length=255)),
('dataset', models.ForeignKey(to='nduser.Dataset')),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'projects',
'managed': True,
},
),
migrations.CreateModel(
name='Token',
fields=[
('token_name', models.CharField(max_length=255, serialize=False, primary_key=True)),
('token_description', models.CharField(max_length=4096, blank=True)),
('public', models.IntegerField(default=0, choices=[(0, b'Private'), (1, b'Public')])),
('project', models.ForeignKey(to='nduser.Project')),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'tokens',
'managed': True,
},
),
migrations.CreateModel(
name='NIFTIHeader',
fields=[
('channel', models.OneToOneField(primary_key=True, serialize=False, to='nduser.Channel')),
('header', models.BinaryField(max_length=1024)),
('affine', models.BinaryField(max_length=1024)),
],
options={
'db_table': 'nifti_header',
'managed': True,
},
),
migrations.AddField(
model_name='channel',
name='project',
field=models.ForeignKey(to='nduser.Project'),
),
migrations.AddField(
model_name='backup',
name='channel',
field=models.ForeignKey(blank=True, to='nduser.Channel', null=True),
),
migrations.AddField(
model_name='backup',
name='project',
field=models.ForeignKey(to='nduser.Project'),
),
migrations.AlterUniqueTogether(
name='channel',
unique_together=set([('project', 'channel_name')]),
),
migrations.AlterUniqueTogether(
name='backup',
unique_together=set([('project', 'datetimestamp')]),
),
]
| {
"content_hash": "716d494734a0caa1cc9a5a785a6466ba",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 266,
"avg_line_length": 51.39041095890411,
"alnum_prop": 0.5371184859389577,
"repo_name": "openconnectome/open-connectome",
"id": "24cbde84e39ab6c5b85c5e3e3f4a62c7c81128cc",
"size": "7527",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "django/nduser/migrations/0001_initial.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "43100"
},
{
"name": "C++",
"bytes": "23724"
},
{
"name": "CSS",
"bytes": "53255"
},
{
"name": "HTML",
"bytes": "142332"
},
{
"name": "JavaScript",
"bytes": "303249"
},
{
"name": "Makefile",
"bytes": "2273"
},
{
"name": "Python",
"bytes": "1409968"
},
{
"name": "Shell",
"bytes": "5637"
}
],
"symlink_target": ""
} |
import calendar
import itertools
import logging
from datetime import datetime, timedelta, time
from django import http
from django.db import models
from django.template.context import RequestContext
from django.shortcuts import get_object_or_404, render
from swingtime.models import Event, Occurrence
from swingtime import utils, forms
from swingtime.conf import settings as swingtime_settings
from dateutil import parser
if swingtime_settings.CALENDAR_FIRST_WEEKDAY is not None:
calendar.setfirstweekday(swingtime_settings.CALENDAR_FIRST_WEEKDAY)
#-------------------------------------------------------------------------------
def event_listing(
request,
template='swingtime/event_list.html',
events=None,
**extra_context
):
'''
View all ``events``.
If ``events`` is a queryset, clone it. If ``None`` default to all ``Event``s.
Context parameters:
``events``
an iterable of ``Event`` objects
... plus all values passed in via **extra_context
'''
if events is None:
events = Event.objects.all()
extra_context['events'] = events
return render(request, template, extra_context)
#-------------------------------------------------------------------------------
def event_view(
request,
pk,
template='swingtime/event_detail.html',
event_form_class=forms.EventForm,
recurrence_form_class=forms.MultipleOccurrenceForm
):
'''
View an ``Event`` instance and optionally update either the event or its
occurrences.
Context parameters:
``event``
the event keyed by ``pk``
``event_form``
a form object for updating the event
``recurrence_form``
a form object for adding occurrences
'''
event = get_object_or_404(Event, pk=pk)
event_form = recurrence_form = None
if request.method == 'POST':
if '_update' in request.POST:
event_form = event_form_class(request.POST, instance=event)
if event_form.is_valid():
event_form.save(event)
return http.HttpResponseRedirect(request.path)
elif '_add' in request.POST:
recurrence_form = recurrence_form_class(request.POST)
if recurrence_form.is_valid():
recurrence_form.save(event)
return http.HttpResponseRedirect(request.path)
else:
return http.HttpResponseBadRequest('Bad Request')
data = {
'event': event,
'event_form': event_form or event_form_class(instance=event),
'recurrence_form': recurrence_form or recurrence_form_class(initial={'dtstart': datetime.now()})
}
return render(request, template, data)
#-------------------------------------------------------------------------------
def occurrence_view(
request,
event_pk,
pk,
template='swingtime/occurrence_detail.html',
form_class=forms.SingleOccurrenceForm
):
'''
View a specific occurrence and optionally handle any updates.
Context parameters:
``occurrence``
the occurrence object keyed by ``pk``
``form``
a form object for updating the occurrence
'''
occurrence = get_object_or_404(Occurrence, pk=pk, event__pk=event_pk)
if request.method == 'POST':
form = form_class(request.POST, instance=occurrence)
if form.is_valid():
form.save()
return http.HttpResponseRedirect(request.path)
else:
form = form_class(instance=occurrence)
return render(request, template, {'occurrence': occurrence, 'form': form})
#-------------------------------------------------------------------------------
def add_event(
request,
template='swingtime/add_event.html',
event_form_class=forms.EventForm,
recurrence_form_class=forms.MultipleOccurrenceForm
):
'''
Add a new ``Event`` instance and 1 or more associated ``Occurrence``s.
Context parameters:
``dtstart``
a datetime.datetime object representing the GET request value if present,
otherwise None
``event_form``
a form object for updating the event
``recurrence_form``
a form object for adding occurrences
'''
dtstart = None
if request.method == 'POST':
event_form = event_form_class(request.POST)
recurrence_form = recurrence_form_class(request.POST)
if event_form.is_valid() and recurrence_form.is_valid():
event = event_form.save()
recurrence_form.save(event)
return http.HttpResponseRedirect(event.get_absolute_url())
else:
if 'dtstart' in request.GET:
try:
dtstart = parser.parse(request.GET['dtstart'])
except(TypeError, ValueError) as exc:
# TODO: A badly formatted date is passed to add_event
logging.warning(exc)
dtstart = dtstart or datetime.now()
event_form = event_form_class()
recurrence_form = recurrence_form_class(initial={'dtstart': dtstart})
return render(
request,
template,
{'dtstart': dtstart, 'event_form': event_form, 'recurrence_form': recurrence_form}
)
#-------------------------------------------------------------------------------
def _datetime_view(
request,
template,
dt,
timeslot_factory=None,
items=None,
params=None
):
'''
Build a time slot grid representation for the given datetime ``dt``. See
utils.create_timeslot_table documentation for items and params.
Context parameters:
``day``
the specified datetime value (dt)
``next_day``
day + 1 day
``prev_day``
day - 1 day
``timeslots``
time slot grid of (time, cells) rows
'''
timeslot_factory = timeslot_factory or utils.create_timeslot_table
params = params or {}
return render(request, template, {
'day': dt,
'next_day': dt + timedelta(days=+1),
'prev_day': dt + timedelta(days=-1),
'timeslots': timeslot_factory(dt, items, **params)
})
#-------------------------------------------------------------------------------
def day_view(request, year, month, day, template='swingtime/daily_view.html', **params):
'''
See documentation for function``_datetime_view``.
'''
dt = datetime(int(year), int(month), int(day))
return _datetime_view(request, template, dt, **params)
#-------------------------------------------------------------------------------
def today_view(request, template='swingtime/daily_view.html', **params):
'''
See documentation for function``_datetime_view``.
'''
return _datetime_view(request, template, datetime.now(), **params)
#-------------------------------------------------------------------------------
def year_view(request, year, template='swingtime/yearly_view.html', queryset=None):
'''
Context parameters:
``year``
an integer value for the year in questin
``next_year``
year + 1
``last_year``
year - 1
``by_month``
a sorted list of (month, occurrences) tuples where month is a
datetime.datetime object for the first day of a month and occurrences
is a (potentially empty) list of values for that month. Only months
which have at least 1 occurrence is represented in the list
'''
year = int(year)
queryset = queryset._clone() if queryset is not None else Occurrence.objects.select_related()
occurrences = queryset.filter(
models.Q(start_time__year=year) |
models.Q(end_time__year=year)
)
def group_key(o):
return datetime(
year,
o.start_time.month if o.start_time.year == year else o.end_time.month,
1
)
return render(request, template, {
'year': year,
'by_month': [(dt, list(o)) for dt,o in itertools.groupby(occurrences, group_key)],
'next_year': year + 1,
'last_year': year - 1
})
#-------------------------------------------------------------------------------
def month_view(
request,
year,
month,
template='swingtime/monthly_view.html',
queryset=None
):
'''
Render a tradional calendar grid view with temporal navigation variables.
Context parameters:
``today``
the current datetime.datetime value
``calendar``
a list of rows containing (day, items) cells, where day is the day of
the month integer and items is a (potentially empty) list of occurrence
for the day
``this_month``
a datetime.datetime representing the first day of the month
``next_month``
this_month + 1 month
``last_month``
this_month - 1 month
'''
year, month = int(year), int(month)
cal = calendar.monthcalendar(year, month)
dtstart = datetime(year, month, 1)
last_day = max(cal[-1])
dtend = datetime(year, month, last_day)
# TODO Whether to include those occurrences that started in the previous
# month but end in this month?
queryset = queryset._clone() if queryset is not None else Occurrence.objects.select_related()
occurrences = queryset.filter(start_time__year=year, start_time__month=month)
def start_day(o):
return o.start_time.day
by_day = dict([(dt, list(o)) for dt,o in itertools.groupby(occurrences, start_day)])
data = {
'today': datetime.now(),
'calendar': [[(d, by_day.get(d, [])) for d in row] for row in cal],
'this_month': dtstart,
'next_month': dtstart + timedelta(days=+last_day),
'last_month': dtstart + timedelta(days=-1),
}
return render(request, template, data)
| {
"content_hash": "5a4d6f7cc3f10d59b687462cd5be8223",
"timestamp": "",
"source": "github",
"line_count": 334,
"max_line_length": 104,
"avg_line_length": 29.952095808383234,
"alnum_prop": 0.5671731307477009,
"repo_name": "FortschrittApps/django-swingtime",
"id": "36419f19fbe6d51f2c3f0f87bc9053b04f39d209",
"size": "10004",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "swingtime/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "28317"
},
{
"name": "Python",
"bytes": "70279"
},
{
"name": "Shell",
"bytes": "420"
}
],
"symlink_target": ""
} |
from direct.actor.Actor import Actor
from panda3d.bullet import BulletBoxShape
from panda3d.bullet import BulletCharacterControllerNode
from panda3d.core import *
# My Game Classes
from Settings import *
# Members:
# controllerNode
# nodePath
# animator
# pose
class Character():
def __init__(self, world, render, name, animator, position, pose):
# Create a box shape
shape = BulletBoxShape(Vec3(0.3, 0.2, 0.7))
# Create a Controller Node with the shape
self.controllerNode = BulletCharacterControllerNode(shape, 0.4, name)
self.controllerNode.setIntoCollideMask(BitMask32.allOn())
# Attach the Controller Node to the world
world.attachCharacter(self.controllerNode)
# Attach Controller Node to the render and get a NodePath
self.nodePath = render.attachNewNode(self.controllerNode)
# Setup the nodePath
self.nodePath.setCollideMask(BitMask32.allOn())
self.nodePath.setH(DEFAULT_NODEPATH_HEIGHT)
self.nodePath.setPos(position)
# Set the actor of the Character
self.animator = animator
# Add animator to NodePath so it can be rendered
self.animator.reparentTo(self.nodePath)
# Configure the animator
self.animator.setScale(DEFAULT_ANIMATOR_SCALE)
self.animator.setH(DEFAULT_ANIMATOR_HEIGHT)
self.animator.setPos(DEFAULT_ANIMATOR_OFFSET)
# Set Current Character Pose
self.pose = pose
# Save home position
self.home = position
# ========== Character Controls
def lookAt(self, position):
position.setZ(self.getPosition().getZ())
self.nodePath.lookAt(position)
def movement(self, vector):
self.controllerNode.setLinearMovement(vector, True)
# ========== Getters
def getNodePath(self):
return self.nodePath
def getHeight(self):
return self.nodePath.getZ()
def getPosition(self):
return self.nodePath.getPos()
def getPose(self):
return self.pose
def getControllerNode(self):
return self.controllerNode
def getCurrentPoseName(self):
return self.getCurrentAnim()
def getHomePosition(self):
return self.home
# ========== Setters
def setPosition(self, position):
self.nodePath.setPos(position)
def setPose(self, pose):
if self.animator.getCurrentAnim() != 'jump':
if pose == JUMPING:
self.animator.stop()
self.animator.play('jump')
self.pose = JUMPING
else:
if self.pose != pose:
self.pose = pose
self.animator.stop()
if (self.pose == RUNNING):
self.animator.loop('run')
elif (self.pose == WALKING):
self.animator.loop('run')
elif (self.pose == STANDING):
self.animator.pose('walk', 0)
elif (self.pose == ATTACKING):
self.animator.loop('attack')
self.pose == STANDING
# ========== Boolean Functions
def isWalking(self):
animate = self.animator.getCurrentAnim()
return animate == 'walk' or animate == 'run'
def isAttacking(self):
animate = self.animator.getCurrentAnim()
return animate == 'attack'
def goBackHome(self):
return
# print("TODO: Go back to self.position") | {
"content_hash": "ee9ccc87f07de8ff16e3ef22a0b258fa",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 77,
"avg_line_length": 31.92792792792793,
"alnum_prop": 0.5973476297968398,
"repo_name": "LIHAOLIN/Lego-Adventure",
"id": "d1fc43ce20a26fcd4420234a34f1486a7fa9a0c2",
"size": "3562",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Character.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "28890"
}
],
"symlink_target": ""
} |
"""Support for Ubee router."""
import logging
import voluptuous as vol
from homeassistant.components.device_tracker import (
DOMAIN, PLATFORM_SCHEMA, DeviceScanner)
from homeassistant.const import (
CONF_HOST, CONF_PASSWORD, CONF_USERNAME)
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
CONF_MODEL = 'model'
DEFAULT_MODEL = 'detect'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Optional(CONF_MODEL, default=DEFAULT_MODEL): cv.string
})
def get_scanner(hass, config):
"""Validate the configuration and return a Ubee scanner."""
info = config[DOMAIN]
host = info[CONF_HOST]
username = info[CONF_USERNAME]
password = info[CONF_PASSWORD]
model = info[CONF_MODEL]
from pyubee import Ubee
ubee = Ubee(host, username, password, model)
if not ubee.login():
_LOGGER.error("Login failed")
return None
scanner = UbeeDeviceScanner(ubee)
return scanner
class UbeeDeviceScanner(DeviceScanner):
"""This class queries a wireless Ubee router."""
def __init__(self, ubee):
"""Initialize the Ubee scanner."""
self._ubee = ubee
self._mac2name = {}
def scan_devices(self):
"""Scan for new devices and return a list with found device IDs."""
devices = self._get_connected_devices()
self._mac2name = devices
return list(devices)
def get_device_name(self, device):
"""Return the name of the given device or None if we don't know."""
return self._mac2name.get(device)
def _get_connected_devices(self):
"""List connected devices with pyubee."""
if not self._ubee.session_active():
self._ubee.login()
return self._ubee.get_connected_devices()
| {
"content_hash": "57e88fe461c928304b238db66398a57b",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 75,
"avg_line_length": 28.87878787878788,
"alnum_prop": 0.6647429171038824,
"repo_name": "molobrakos/home-assistant",
"id": "8e610a4f51c8083e9393922416cfcf8ea46bf1b1",
"size": "1906",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "homeassistant/components/ubee/device_tracker.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1081"
},
{
"name": "HCL",
"bytes": "407"
},
{
"name": "Python",
"bytes": "15057917"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17609"
}
],
"symlink_target": ""
} |
from swgpy.object import *
from swgpy.command import BaseSwgCommand
from swgpy import ACTION
class PeaceCommand(BaseSwgCommand):
def getCommandName(self):
return 'peace'
def run(self):
actor = self.getActor()
target = self.getTarget()
if actor.hasState(ACTION.COMBAT):
actor.toggleStateOff(ACTION.COMBAT)
actor.toggleStateOn(ACTION.PEACE)
if target:
actor.removeDefender(target.id)
if not target.hasState(ACTION.COMBAT):
target.removeDefender(actor.id)
actor.targetId = 0
SystemMessage.sendFlyText(actor, "@combat_effects:go_peace", FlyTextColor.WHITE)
else:
actor.stateBitmask = ACTION.NONE
def postRun(self, success):
if success:
combat_svc = self.getKernel().serviceManager().combatService()
actor = self.getActor()
target = self.getTargetCreature()
if (actor and target):
combat_svc.endCombat(actor, target)
| {
"content_hash": "1d124af80979a68f39956bce34717111",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 92,
"avg_line_length": 33.28125,
"alnum_prop": 0.6065727699530516,
"repo_name": "anhstudios/swganh",
"id": "7b8630629f12911ace7221bc472160189426eed5",
"size": "1065",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/commands/peace.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
} |
"""
The main QuerySet implementation. This provides the public API for the ORM.
"""
import copy
import sys
import warnings
from collections import OrderedDict, deque
from django.conf import settings
from django.core import exceptions
from django.db import (
DJANGO_VERSION_PICKLE_KEY, IntegrityError, connections, router,
transaction,
)
from django.db.models import sql
from django.db.models.constants import LOOKUP_SEP
from django.db.models.deletion import Collector
from django.db.models.expressions import F, Date, DateTime
from django.db.models.fields import AutoField
from django.db.models.query_utils import (
Q, InvalidQuery, check_rel_lookup_compatibility, deferred_class_factory,
)
from django.db.models.sql.constants import CURSOR
from django.utils import six, timezone
from django.utils.functional import partition
from django.utils.version import get_version
# The maximum number of items to display in a QuerySet.__repr__
REPR_OUTPUT_SIZE = 20
# Pull into this namespace for backwards compatibility.
EmptyResultSet = sql.EmptyResultSet
class BaseIterable(object):
def __init__(self, queryset):
self.queryset = queryset
class ModelIterable(BaseIterable):
"""
Iterable that yields a model instance for each row.
"""
def __iter__(self):
queryset = self.queryset
db = queryset.db
compiler = queryset.query.get_compiler(using=db)
# Execute the query. This will also fill compiler.select, klass_info,
# and annotations.
results = compiler.execute_sql()
select, klass_info, annotation_col_map = (compiler.select, compiler.klass_info,
compiler.annotation_col_map)
if klass_info is None:
return
model_cls = klass_info['model']
select_fields = klass_info['select_fields']
model_fields_start, model_fields_end = select_fields[0], select_fields[-1] + 1
init_list = [f[0].target.attname
for f in select[model_fields_start:model_fields_end]]
if len(init_list) != len(model_cls._meta.concrete_fields):
init_set = set(init_list)
skip = [f.attname for f in model_cls._meta.concrete_fields
if f.attname not in init_set]
model_cls = deferred_class_factory(model_cls, skip)
related_populators = get_related_populators(klass_info, select, db)
for row in compiler.results_iter(results):
obj = model_cls.from_db(db, init_list, row[model_fields_start:model_fields_end])
if related_populators:
for rel_populator in related_populators:
rel_populator.populate(row, obj)
if annotation_col_map:
for attr_name, col_pos in annotation_col_map.items():
setattr(obj, attr_name, row[col_pos])
# Add the known related objects to the model, if there are any
if queryset._known_related_objects:
for field, rel_objs in queryset._known_related_objects.items():
# Avoid overwriting objects loaded e.g. by select_related
if hasattr(obj, field.get_cache_name()):
continue
pk = getattr(obj, field.get_attname())
try:
rel_obj = rel_objs[pk]
except KeyError:
pass # may happen in qs1 | qs2 scenarios
else:
setattr(obj, field.name, rel_obj)
yield obj
class ValuesIterable(BaseIterable):
"""
Iterable returned by QuerySet.values() that yields a dict
for each row.
"""
def __iter__(self):
queryset = self.queryset
query = queryset.query
compiler = query.get_compiler(queryset.db)
field_names = list(query.values_select)
extra_names = list(query.extra_select)
annotation_names = list(query.annotation_select)
# extra(select=...) cols are always at the start of the row.
names = extra_names + field_names + annotation_names
for row in compiler.results_iter():
yield dict(zip(names, row))
class ValuesListIterable(BaseIterable):
"""
Iterable returned by QuerySet.values_list(flat=False)
that yields a tuple for each row.
"""
def __iter__(self):
queryset = self.queryset
query = queryset.query
compiler = query.get_compiler(queryset.db)
if not query.extra_select and not query.annotation_select:
for row in compiler.results_iter():
yield tuple(row)
else:
field_names = list(query.values_select)
extra_names = list(query.extra_select)
annotation_names = list(query.annotation_select)
# extra(select=...) cols are always at the start of the row.
names = extra_names + field_names + annotation_names
if queryset._fields:
# Reorder according to fields.
fields = list(queryset._fields) + [f for f in annotation_names if f not in queryset._fields]
else:
fields = names
for row in compiler.results_iter():
data = dict(zip(names, row))
yield tuple(data[f] for f in fields)
class FlatValuesListIterable(BaseIterable):
"""
Iterable returned by QuerySet.values_list(flat=True) that
yields single values.
"""
def __iter__(self):
queryset = self.queryset
compiler = queryset.query.get_compiler(queryset.db)
for row in compiler.results_iter():
yield row[0]
class QuerySet(object):
"""
Represents a lazy database lookup for a set of objects.
"""
def __init__(self, model=None, query=None, using=None, hints=None):
self.model = model
self._db = using
self._hints = hints or {}
self.query = query or sql.Query(self.model)
self._result_cache = None
self._sticky_filter = False
self._for_write = False
self._prefetch_related_lookups = []
self._prefetch_done = False
self._known_related_objects = {} # {rel_field, {pk: rel_obj}}
self._iterable_class = ModelIterable
self._fields = None
def as_manager(cls):
# Address the circular dependency between `Queryset` and `Manager`.
from django.db.models.manager import Manager
manager = Manager.from_queryset(cls)()
manager._built_with_as_manager = True
return manager
as_manager.queryset_only = True
as_manager = classmethod(as_manager)
########################
# PYTHON MAGIC METHODS #
########################
def __deepcopy__(self, memo):
"""
Deep copy of a QuerySet doesn't populate the cache
"""
obj = self.__class__()
for k, v in self.__dict__.items():
if k == '_result_cache':
obj.__dict__[k] = None
else:
obj.__dict__[k] = copy.deepcopy(v, memo)
return obj
def __getstate__(self):
"""
Allows the QuerySet to be pickled.
"""
# Force the cache to be fully populated.
self._fetch_all()
obj_dict = self.__dict__.copy()
obj_dict[DJANGO_VERSION_PICKLE_KEY] = get_version()
return obj_dict
def __setstate__(self, state):
msg = None
pickled_version = state.get(DJANGO_VERSION_PICKLE_KEY)
if pickled_version:
current_version = get_version()
if current_version != pickled_version:
msg = ("Pickled queryset instance's Django version %s does"
" not match the current version %s."
% (pickled_version, current_version))
else:
msg = "Pickled queryset instance's Django version is not specified."
if msg:
warnings.warn(msg, RuntimeWarning, stacklevel=2)
self.__dict__.update(state)
def __repr__(self):
data = list(self[:REPR_OUTPUT_SIZE + 1])
if len(data) > REPR_OUTPUT_SIZE:
data[-1] = "...(remaining elements truncated)..."
return '<QuerySet %r>' % data
def __len__(self):
self._fetch_all()
return len(self._result_cache)
def __iter__(self):
"""
The queryset iterator protocol uses three nested iterators in the
default case:
1. sql.compiler:execute_sql()
- Returns 100 rows at time (constants.GET_ITERATOR_CHUNK_SIZE)
using cursor.fetchmany(). This part is responsible for
doing some column masking, and returning the rows in chunks.
2. sql/compiler.results_iter()
- Returns one row at time. At this point the rows are still just
tuples. In some cases the return values are converted to
Python values at this location.
3. self.iterator()
- Responsible for turning the rows into model objects.
"""
self._fetch_all()
return iter(self._result_cache)
def __bool__(self):
self._fetch_all()
return bool(self._result_cache)
def __nonzero__(self): # Python 2 compatibility
return type(self).__bool__(self)
def __getitem__(self, k):
"""
Retrieves an item or slice from the set of results.
"""
if not isinstance(k, (slice,) + six.integer_types):
raise TypeError
assert ((not isinstance(k, slice) and (k >= 0)) or
(isinstance(k, slice) and (k.start is None or k.start >= 0) and
(k.stop is None or k.stop >= 0))), \
"Negative indexing is not supported."
if self._result_cache is not None:
return self._result_cache[k]
if isinstance(k, slice):
qs = self._clone()
if k.start is not None:
start = int(k.start)
else:
start = None
if k.stop is not None:
stop = int(k.stop)
else:
stop = None
qs.query.set_limits(start, stop)
return list(qs)[::k.step] if k.step else qs
qs = self._clone()
qs.query.set_limits(k, k + 1)
return list(qs)[0]
def __and__(self, other):
self._merge_sanity_check(other)
if isinstance(other, EmptyQuerySet):
return other
if isinstance(self, EmptyQuerySet):
return self
combined = self._clone()
combined._merge_known_related_objects(other)
combined.query.combine(other.query, sql.AND)
return combined
def __or__(self, other):
self._merge_sanity_check(other)
if isinstance(self, EmptyQuerySet):
return other
if isinstance(other, EmptyQuerySet):
return self
combined = self._clone()
combined._merge_known_related_objects(other)
combined.query.combine(other.query, sql.OR)
return combined
####################################
# METHODS THAT DO DATABASE QUERIES #
####################################
def iterator(self):
"""
An iterator over the results from applying this QuerySet to the
database.
"""
return iter(self._iterable_class(self))
def aggregate(self, *args, **kwargs):
"""
Returns a dictionary containing the calculations (aggregation)
over the current queryset
If args is present the expression is passed as a kwarg using
the Aggregate object's default alias.
"""
if self.query.distinct_fields:
raise NotImplementedError("aggregate() + distinct(fields) not implemented.")
for arg in args:
# The default_alias property may raise a TypeError, so we use
# a try/except construct rather than hasattr in order to remain
# consistent between PY2 and PY3 (hasattr would swallow
# the TypeError on PY2).
try:
arg.default_alias
except (AttributeError, TypeError):
raise TypeError("Complex aggregates require an alias")
kwargs[arg.default_alias] = arg
query = self.query.clone()
for (alias, aggregate_expr) in kwargs.items():
query.add_annotation(aggregate_expr, alias, is_summary=True)
if not query.annotations[alias].contains_aggregate:
raise TypeError("%s is not an aggregate expression" % alias)
return query.get_aggregation(self.db, kwargs.keys())
def count(self):
"""
Performs a SELECT COUNT() and returns the number of records as an
integer.
If the QuerySet is already fully cached this simply returns the length
of the cached results set to avoid multiple SELECT COUNT(*) calls.
"""
if self._result_cache is not None:
return len(self._result_cache)
return self.query.get_count(using=self.db)
def get(self, *args, **kwargs):
"""
Performs the query and returns a single object matching the given
keyword arguments.
"""
clone = self.filter(*args, **kwargs)
if self.query.can_filter() and not self.query.distinct_fields:
clone = clone.order_by()
num = len(clone)
if num == 1:
return clone._result_cache[0]
if not num:
raise self.model.DoesNotExist(
"%s matching query does not exist." %
self.model._meta.object_name
)
raise self.model.MultipleObjectsReturned(
"get() returned more than one %s -- it returned %s!" %
(self.model._meta.object_name, num)
)
def create(self, **kwargs):
"""
Creates a new object with the given kwargs, saving it to the database
and returning the created object.
"""
obj = self.model(**kwargs)
self._for_write = True
obj.save(force_insert=True, using=self.db)
return obj
def _populate_pk_values(self, objs):
for obj in objs:
if obj.pk is None:
obj.pk = obj._meta.pk.get_pk_value_on_save(obj)
def bulk_create(self, objs, batch_size=None):
"""
Inserts each of the instances into the database. This does *not* call
save() on each of the instances, does not send any pre/post save
signals, and does not set the primary key attribute if it is an
autoincrement field. Multi-table models are not supported.
"""
# So this case is fun. When you bulk insert you don't get the primary
# keys back (if it's an autoincrement), so you can't insert into the
# child tables which references this. There are two workarounds, 1)
# this could be implemented if you didn't have an autoincrement pk,
# and 2) you could do it by doing O(n) normal inserts into the parent
# tables to get the primary keys back, and then doing a single bulk
# insert into the childmost table. Some databases might allow doing
# this by using RETURNING clause for the insert query. We're punting
# on these for now because they are relatively rare cases.
assert batch_size is None or batch_size > 0
# Check that the parents share the same concrete model with the our
# model to detect the inheritance pattern ConcreteGrandParent ->
# MultiTableParent -> ProxyChild. Simply checking self.model._meta.proxy
# would not identify that case as involving multiple tables.
for parent in self.model._meta.get_parent_list():
if parent._meta.concrete_model is not self.model._meta.concrete_model:
raise ValueError("Can't bulk create a multi-table inherited model")
if not objs:
return objs
self._for_write = True
connection = connections[self.db]
fields = self.model._meta.concrete_fields
objs = list(objs)
self._populate_pk_values(objs)
with transaction.atomic(using=self.db, savepoint=False):
if (connection.features.can_combine_inserts_with_and_without_auto_increment_pk
and self.model._meta.has_auto_field):
self._batched_insert(objs, fields, batch_size)
else:
objs_with_pk, objs_without_pk = partition(lambda o: o.pk is None, objs)
if objs_with_pk:
self._batched_insert(objs_with_pk, fields, batch_size)
if objs_without_pk:
fields = [f for f in fields if not isinstance(f, AutoField)]
self._batched_insert(objs_without_pk, fields, batch_size)
return objs
def get_or_create(self, defaults=None, **kwargs):
"""
Looks up an object with the given kwargs, creating one if necessary.
Returns a tuple of (object, created), where created is a boolean
specifying whether an object was created.
"""
lookup, params = self._extract_model_params(defaults, **kwargs)
# The get() needs to be targeted at the write database in order
# to avoid potential transaction consistency problems.
self._for_write = True
try:
return self.get(**lookup), False
except self.model.DoesNotExist:
return self._create_object_from_params(lookup, params)
def update_or_create(self, defaults=None, **kwargs):
"""
Looks up an object with the given kwargs, updating one with defaults
if it exists, otherwise creates a new one.
Returns a tuple (object, created), where created is a boolean
specifying whether an object was created.
"""
defaults = defaults or {}
lookup, params = self._extract_model_params(defaults, **kwargs)
self._for_write = True
try:
obj = self.get(**lookup)
except self.model.DoesNotExist:
obj, created = self._create_object_from_params(lookup, params)
if created:
return obj, created
for k, v in six.iteritems(defaults):
setattr(obj, k, v)
obj.save(using=self.db)
return obj, False
def _create_object_from_params(self, lookup, params):
"""
Tries to create an object using passed params.
Used by get_or_create and update_or_create
"""
try:
with transaction.atomic(using=self.db):
obj = self.create(**params)
return obj, True
except IntegrityError:
exc_info = sys.exc_info()
try:
return self.get(**lookup), False
except self.model.DoesNotExist:
pass
six.reraise(*exc_info)
def _extract_model_params(self, defaults, **kwargs):
"""
Prepares `lookup` (kwargs that are valid model attributes), `params`
(for creating a model instance) based on given kwargs; for use by
get_or_create and update_or_create.
"""
defaults = defaults or {}
lookup = kwargs.copy()
for f in self.model._meta.fields:
if f.attname in lookup:
lookup[f.name] = lookup.pop(f.attname)
params = {k: v for k, v in kwargs.items() if LOOKUP_SEP not in k}
params.update(defaults)
return lookup, params
def _earliest_or_latest(self, field_name=None, direction="-"):
"""
Returns the latest object, according to the model's
'get_latest_by' option or optional given field_name.
"""
order_by = field_name or getattr(self.model._meta, 'get_latest_by')
assert bool(order_by), "earliest() and latest() require either a "\
"field_name parameter or 'get_latest_by' in the model"
assert self.query.can_filter(), \
"Cannot change a query once a slice has been taken."
obj = self._clone()
obj.query.set_limits(high=1)
obj.query.clear_ordering(force_empty=True)
obj.query.add_ordering('%s%s' % (direction, order_by))
return obj.get()
def earliest(self, field_name=None):
return self._earliest_or_latest(field_name=field_name, direction="")
def latest(self, field_name=None):
return self._earliest_or_latest(field_name=field_name, direction="-")
def first(self):
"""
Returns the first object of a query, returns None if no match is found.
"""
objects = list((self if self.ordered else self.order_by('pk'))[:1])
if objects:
return objects[0]
return None
def last(self):
"""
Returns the last object of a query, returns None if no match is found.
"""
objects = list((self.reverse() if self.ordered else self.order_by('-pk'))[:1])
if objects:
return objects[0]
return None
def in_bulk(self, id_list=None):
"""
Returns a dictionary mapping each of the given IDs to the object with
that ID. If `id_list` isn't provided, the entire QuerySet is evaluated.
"""
assert self.query.can_filter(), \
"Cannot use 'limit' or 'offset' with in_bulk"
if id_list is not None:
if not id_list:
return {}
qs = self.filter(pk__in=id_list).order_by()
else:
qs = self._clone()
return {obj._get_pk_val(): obj for obj in qs}
def delete(self):
"""
Deletes the records in the current QuerySet.
"""
assert self.query.can_filter(), \
"Cannot use 'limit' or 'offset' with delete."
if self._fields is not None:
raise TypeError("Cannot call delete() after .values() or .values_list()")
del_query = self._clone()
# The delete is actually 2 queries - one to find related objects,
# and one to delete. Make sure that the discovery of related
# objects is performed on the same database as the deletion.
del_query._for_write = True
# Disable non-supported fields.
del_query.query.select_for_update = False
del_query.query.select_related = False
del_query.query.clear_ordering(force_empty=True)
collector = Collector(using=del_query.db)
collector.collect(del_query)
deleted, _rows_count = collector.delete()
# Clear the result cache, in case this QuerySet gets reused.
self._result_cache = None
return deleted, _rows_count
delete.alters_data = True
delete.queryset_only = True
def _raw_delete(self, using):
"""
Deletes objects found from the given queryset in single direct SQL
query. No signals are sent, and there is no protection for cascades.
"""
return sql.DeleteQuery(self.model).delete_qs(self, using)
_raw_delete.alters_data = True
def update(self, **kwargs):
"""
Updates all elements in the current QuerySet, setting all the given
fields to the appropriate values.
"""
assert self.query.can_filter(), \
"Cannot update a query once a slice has been taken."
self._for_write = True
query = self.query.clone(sql.UpdateQuery)
query.add_update_values(kwargs)
with transaction.atomic(using=self.db, savepoint=False):
rows = query.get_compiler(self.db).execute_sql(CURSOR)
self._result_cache = None
return rows
update.alters_data = True
def _update(self, values):
"""
A version of update that accepts field objects instead of field names.
Used primarily for model saving and not intended for use by general
code (it requires too much poking around at model internals to be
useful at that level).
"""
assert self.query.can_filter(), \
"Cannot update a query once a slice has been taken."
query = self.query.clone(sql.UpdateQuery)
query.add_update_fields(values)
self._result_cache = None
return query.get_compiler(self.db).execute_sql(CURSOR)
_update.alters_data = True
_update.queryset_only = False
def exists(self):
if self._result_cache is None:
return self.query.has_results(using=self.db)
return bool(self._result_cache)
def _prefetch_related_objects(self):
# This method can only be called once the result cache has been filled.
prefetch_related_objects(self._result_cache, self._prefetch_related_lookups)
self._prefetch_done = True
##################################################
# PUBLIC METHODS THAT RETURN A QUERYSET SUBCLASS #
##################################################
def raw(self, raw_query, params=None, translations=None, using=None):
if using is None:
using = self.db
return RawQuerySet(raw_query, model=self.model,
params=params, translations=translations,
using=using)
def _values(self, *fields):
clone = self._clone()
clone._fields = fields
query = clone.query
query.select_related = False
query.clear_deferred_loading()
query.clear_select_fields()
if query.group_by is True:
query.add_fields((f.attname for f in self.model._meta.concrete_fields), False)
query.set_group_by()
query.clear_select_fields()
if fields:
field_names = []
extra_names = []
annotation_names = []
if not query._extra and not query._annotations:
# Shortcut - if there are no extra or annotations, then
# the values() clause must be just field names.
field_names = list(fields)
else:
query.default_cols = False
for f in fields:
if f in query.extra_select:
extra_names.append(f)
elif f in query.annotation_select:
annotation_names.append(f)
else:
field_names.append(f)
query.set_extra_mask(extra_names)
query.set_annotation_mask(annotation_names)
else:
field_names = [f.attname for f in self.model._meta.concrete_fields]
query.values_select = field_names
query.add_fields(field_names, True)
return clone
def values(self, *fields):
clone = self._values(*fields)
clone._iterable_class = ValuesIterable
return clone
def values_list(self, *fields, **kwargs):
flat = kwargs.pop('flat', False)
if kwargs:
raise TypeError('Unexpected keyword arguments to values_list: %s'
% (list(kwargs),))
if flat and len(fields) > 1:
raise TypeError("'flat' is not valid when values_list is called with more than one field.")
clone = self._values(*fields)
clone._iterable_class = FlatValuesListIterable if flat else ValuesListIterable
return clone
def dates(self, field_name, kind, order='ASC'):
"""
Returns a list of date objects representing all available dates for
the given field_name, scoped to 'kind'.
"""
assert kind in ("year", "month", "day"), \
"'kind' must be one of 'year', 'month' or 'day'."
assert order in ('ASC', 'DESC'), \
"'order' must be either 'ASC' or 'DESC'."
return self.annotate(
datefield=Date(field_name, kind),
plain_field=F(field_name)
).values_list(
'datefield', flat=True
).distinct().filter(plain_field__isnull=False).order_by(('-' if order == 'DESC' else '') + 'datefield')
def datetimes(self, field_name, kind, order='ASC', tzinfo=None):
"""
Returns a list of datetime objects representing all available
datetimes for the given field_name, scoped to 'kind'.
"""
assert kind in ("year", "month", "day", "hour", "minute", "second"), \
"'kind' must be one of 'year', 'month', 'day', 'hour', 'minute' or 'second'."
assert order in ('ASC', 'DESC'), \
"'order' must be either 'ASC' or 'DESC'."
if settings.USE_TZ:
if tzinfo is None:
tzinfo = timezone.get_current_timezone()
else:
tzinfo = None
return self.annotate(
datetimefield=DateTime(field_name, kind, tzinfo),
plain_field=F(field_name)
).values_list(
'datetimefield', flat=True
).distinct().filter(plain_field__isnull=False).order_by(('-' if order == 'DESC' else '') + 'datetimefield')
def none(self):
"""
Returns an empty QuerySet.
"""
clone = self._clone()
clone.query.set_empty()
return clone
##################################################################
# PUBLIC METHODS THAT ALTER ATTRIBUTES AND RETURN A NEW QUERYSET #
##################################################################
def all(self):
"""
Returns a new QuerySet that is a copy of the current one. This allows a
QuerySet to proxy for a model manager in some cases.
"""
return self._clone()
def filter(self, *args, **kwargs):
"""
Returns a new QuerySet instance with the args ANDed to the existing
set.
"""
return self._filter_or_exclude(False, *args, **kwargs)
def exclude(self, *args, **kwargs):
"""
Returns a new QuerySet instance with NOT (args) ANDed to the existing
set.
"""
return self._filter_or_exclude(True, *args, **kwargs)
def _filter_or_exclude(self, negate, *args, **kwargs):
if args or kwargs:
assert self.query.can_filter(), \
"Cannot filter a query once a slice has been taken."
clone = self._clone()
if negate:
clone.query.add_q(~Q(*args, **kwargs))
else:
clone.query.add_q(Q(*args, **kwargs))
return clone
def complex_filter(self, filter_obj):
"""
Returns a new QuerySet instance with filter_obj added to the filters.
filter_obj can be a Q object (or anything with an add_to_query()
method) or a dictionary of keyword lookup arguments.
This exists to support framework features such as 'limit_choices_to',
and usually it will be more natural to use other methods.
"""
if isinstance(filter_obj, Q) or hasattr(filter_obj, 'add_to_query'):
clone = self._clone()
clone.query.add_q(filter_obj)
return clone
else:
return self._filter_or_exclude(None, **filter_obj)
def select_for_update(self, nowait=False):
"""
Returns a new QuerySet instance that will select objects with a
FOR UPDATE lock.
"""
obj = self._clone()
obj._for_write = True
obj.query.select_for_update = True
obj.query.select_for_update_nowait = nowait
return obj
def select_related(self, *fields):
"""
Returns a new QuerySet instance that will select related objects.
If fields are specified, they must be ForeignKey fields and only those
related objects are included in the selection.
If select_related(None) is called, the list is cleared.
"""
if self._fields is not None:
raise TypeError("Cannot call select_related() after .values() or .values_list()")
obj = self._clone()
if fields == (None,):
obj.query.select_related = False
elif fields:
obj.query.add_select_related(fields)
else:
obj.query.select_related = True
return obj
def prefetch_related(self, *lookups):
"""
Returns a new QuerySet instance that will prefetch the specified
Many-To-One and Many-To-Many related objects when the QuerySet is
evaluated.
When prefetch_related() is called more than once, the list of lookups to
prefetch is appended to. If prefetch_related(None) is called, the list
is cleared.
"""
clone = self._clone()
if lookups == (None,):
clone._prefetch_related_lookups = []
else:
clone._prefetch_related_lookups.extend(lookups)
return clone
def annotate(self, *args, **kwargs):
"""
Return a query set in which the returned objects have been annotated
with extra data or aggregations.
"""
annotations = OrderedDict() # To preserve ordering of args
for arg in args:
# The default_alias property may raise a TypeError, so we use
# a try/except construct rather than hasattr in order to remain
# consistent between PY2 and PY3 (hasattr would swallow
# the TypeError on PY2).
try:
if arg.default_alias in kwargs:
raise ValueError("The named annotation '%s' conflicts with the "
"default name for another annotation."
% arg.default_alias)
except (AttributeError, TypeError):
raise TypeError("Complex annotations require an alias")
annotations[arg.default_alias] = arg
annotations.update(kwargs)
clone = self._clone()
names = self._fields
if names is None:
names = {f.name for f in self.model._meta.get_fields()}
for alias, annotation in annotations.items():
if alias in names:
raise ValueError("The annotation '%s' conflicts with a field on "
"the model." % alias)
clone.query.add_annotation(annotation, alias, is_summary=False)
for alias, annotation in clone.query.annotations.items():
if alias in annotations and annotation.contains_aggregate:
if clone._fields is None:
clone.query.group_by = True
else:
clone.query.set_group_by()
break
return clone
def order_by(self, *field_names):
"""
Returns a new QuerySet instance with the ordering changed.
"""
assert self.query.can_filter(), \
"Cannot reorder a query once a slice has been taken."
obj = self._clone()
obj.query.clear_ordering(force_empty=False)
obj.query.add_ordering(*field_names)
return obj
def distinct(self, *field_names):
"""
Returns a new QuerySet instance that will select only distinct results.
"""
assert self.query.can_filter(), \
"Cannot create distinct fields once a slice has been taken."
obj = self._clone()
obj.query.add_distinct_fields(*field_names)
return obj
def extra(self, select=None, where=None, params=None, tables=None,
order_by=None, select_params=None):
"""
Adds extra SQL fragments to the query.
"""
assert self.query.can_filter(), \
"Cannot change a query once a slice has been taken"
clone = self._clone()
clone.query.add_extra(select, select_params, where, params, tables, order_by)
return clone
def reverse(self):
"""
Reverses the ordering of the QuerySet.
"""
clone = self._clone()
clone.query.standard_ordering = not clone.query.standard_ordering
return clone
def defer(self, *fields):
"""
Defers the loading of data for certain fields until they are accessed.
The set of fields to defer is added to any existing set of deferred
fields. The only exception to this is if None is passed in as the only
parameter, in which case all deferrals are removed (None acts as a
reset option).
"""
if self._fields is not None:
raise TypeError("Cannot call defer() after .values() or .values_list()")
clone = self._clone()
if fields == (None,):
clone.query.clear_deferred_loading()
else:
clone.query.add_deferred_loading(fields)
return clone
def only(self, *fields):
"""
Essentially, the opposite of defer. Only the fields passed into this
method and that are not already specified as deferred are loaded
immediately when the queryset is evaluated.
"""
if self._fields is not None:
raise TypeError("Cannot call only() after .values() or .values_list()")
if fields == (None,):
# Can only pass None to defer(), not only(), as the rest option.
# That won't stop people trying to do this, so let's be explicit.
raise TypeError("Cannot pass None as an argument to only().")
clone = self._clone()
clone.query.add_immediate_loading(fields)
return clone
def using(self, alias):
"""
Selects which database this QuerySet should execute its query against.
"""
clone = self._clone()
clone._db = alias
return clone
###################################
# PUBLIC INTROSPECTION ATTRIBUTES #
###################################
def ordered(self):
"""
Returns True if the QuerySet is ordered -- i.e. has an order_by()
clause or a default ordering on the model.
"""
if self.query.extra_order_by or self.query.order_by:
return True
elif self.query.default_ordering and self.query.get_meta().ordering:
return True
else:
return False
ordered = property(ordered)
@property
def db(self):
"Return the database that will be used if this query is executed now"
if self._for_write:
return self._db or router.db_for_write(self.model, **self._hints)
return self._db or router.db_for_read(self.model, **self._hints)
###################
# PRIVATE METHODS #
###################
def _insert(self, objs, fields, return_id=False, raw=False, using=None):
"""
Inserts a new record for the given model. This provides an interface to
the InsertQuery class and is how Model.save() is implemented.
"""
self._for_write = True
if using is None:
using = self.db
query = sql.InsertQuery(self.model)
query.insert_values(fields, objs, raw=raw)
return query.get_compiler(using=using).execute_sql(return_id)
_insert.alters_data = True
_insert.queryset_only = False
def _batched_insert(self, objs, fields, batch_size):
"""
A little helper method for bulk_insert to insert the bulk one batch
at a time. Inserts recursively a batch from the front of the bulk and
then _batched_insert() the remaining objects again.
"""
if not objs:
return
ops = connections[self.db].ops
batch_size = (batch_size or max(ops.bulk_batch_size(fields, objs), 1))
for batch in [objs[i:i + batch_size]
for i in range(0, len(objs), batch_size)]:
self.model._base_manager._insert(batch, fields=fields,
using=self.db)
def _clone(self, **kwargs):
query = self.query.clone()
if self._sticky_filter:
query.filter_is_sticky = True
clone = self.__class__(model=self.model, query=query, using=self._db, hints=self._hints)
clone._for_write = self._for_write
clone._prefetch_related_lookups = self._prefetch_related_lookups[:]
clone._known_related_objects = self._known_related_objects
clone._iterable_class = self._iterable_class
clone._fields = self._fields
clone.__dict__.update(kwargs)
return clone
def _fetch_all(self):
if self._result_cache is None:
self._result_cache = list(self.iterator())
if self._prefetch_related_lookups and not self._prefetch_done:
self._prefetch_related_objects()
def _next_is_sticky(self):
"""
Indicates that the next filter call and the one following that should
be treated as a single filter. This is only important when it comes to
determining when to reuse tables for many-to-many filters. Required so
that we can filter naturally on the results of related managers.
This doesn't return a clone of the current QuerySet (it returns
"self"). The method is only used internally and should be immediately
followed by a filter() that does create a clone.
"""
self._sticky_filter = True
return self
def _merge_sanity_check(self, other):
"""
Checks that we are merging two comparable QuerySet classes.
"""
if self._fields is not None and (
set(self.query.values_select) != set(other.query.values_select) or
set(self.query.extra_select) != set(other.query.extra_select) or
set(self.query.annotation_select) != set(other.query.annotation_select)):
raise TypeError("Merging '%s' classes must involve the same values in each case."
% self.__class__.__name__)
def _merge_known_related_objects(self, other):
"""
Keep track of all known related objects from either QuerySet instance.
"""
for field, objects in other._known_related_objects.items():
self._known_related_objects.setdefault(field, {}).update(objects)
def _prepare(self):
if self._fields is not None:
# values() queryset can only be used as nested queries
# if they are set up to select only a single field.
if len(self._fields or self.model._meta.concrete_fields) > 1:
raise TypeError('Cannot use multi-field values as a filter value.')
return self
def _as_sql(self, connection):
"""
Returns the internal query's SQL and parameters (as a tuple).
"""
if self._fields is not None:
# values() queryset can only be used as nested queries
# if they are set up to select only a single field.
if len(self._fields or self.model._meta.concrete_fields) > 1:
raise TypeError('Cannot use multi-field values as a filter value.')
clone = self._clone()
else:
clone = self.values('pk')
if clone._db is None or connection == connections[clone._db]:
return clone.query.get_compiler(connection=connection).as_nested_sql()
raise ValueError("Can't do subqueries with queries on different DBs.")
# When used as part of a nested query, a queryset will never be an "always
# empty" result.
value_annotation = True
def _add_hints(self, **hints):
"""
Update hinting information for later use by Routers
"""
# If there is any hinting information, add it to what we already know.
# If we have a new hint for an existing key, overwrite with the new value.
self._hints.update(hints)
def _has_filters(self):
"""
Checks if this QuerySet has any filtering going on. Note that this
isn't equivalent for checking if all objects are present in results,
for example qs[1:]._has_filters() -> False.
"""
return self.query.has_filters()
def is_compatible_query_object_type(self, opts, field):
"""
Check that using this queryset as the rhs value for a lookup is
allowed. The opts are the options of the relation's target we are
querying against. For example in .filter(author__in=Author.objects.all())
the opts would be Author's (from the author field) and self.model would
be Author.objects.all() queryset's .model (Author also). The field is
the related field on the lhs side.
"""
# We trust that users of values() know what they are doing.
if self._fields is not None:
return True
return check_rel_lookup_compatibility(self.model, opts, field)
is_compatible_query_object_type.queryset_only = True
class InstanceCheckMeta(type):
def __instancecheck__(self, instance):
return isinstance(instance, QuerySet) and instance.query.is_empty()
class EmptyQuerySet(six.with_metaclass(InstanceCheckMeta)):
"""
Marker class usable for checking if a queryset is empty by .none():
isinstance(qs.none(), EmptyQuerySet) -> True
"""
def __init__(self, *args, **kwargs):
raise TypeError("EmptyQuerySet can't be instantiated")
class RawQuerySet(object):
"""
Provides an iterator which converts the results of raw SQL queries into
annotated model instances.
"""
def __init__(self, raw_query, model=None, query=None, params=None,
translations=None, using=None, hints=None):
self.raw_query = raw_query
self.model = model
self._db = using
self._hints = hints or {}
self.query = query or sql.RawQuery(sql=raw_query, using=self.db, params=params)
self.params = params or ()
self.translations = translations or {}
def resolve_model_init_order(self):
"""
Resolve the init field names and value positions
"""
model_init_fields = [f for f in self.model._meta.fields if f.column in self.columns]
annotation_fields = [(column, pos) for pos, column in enumerate(self.columns)
if column not in self.model_fields]
model_init_order = [self.columns.index(f.column) for f in model_init_fields]
model_init_names = [f.attname for f in model_init_fields]
return model_init_names, model_init_order, annotation_fields
def __iter__(self):
# Cache some things for performance reasons outside the loop.
db = self.db
compiler = connections[db].ops.compiler('SQLCompiler')(
self.query, connections[db], db
)
query = iter(self.query)
try:
model_init_names, model_init_pos, annotation_fields = self.resolve_model_init_order()
# Find out which model's fields are not present in the query.
skip = set()
for field in self.model._meta.fields:
if field.attname not in model_init_names:
skip.add(field.attname)
if skip:
if self.model._meta.pk.attname in skip:
raise InvalidQuery('Raw query must include the primary key')
model_cls = deferred_class_factory(self.model, skip)
else:
model_cls = self.model
fields = [self.model_fields.get(c) for c in self.columns]
converters = compiler.get_converters([
f.get_col(f.model._meta.db_table) if f else None for f in fields
])
for values in query:
if converters:
values = compiler.apply_converters(values, converters)
# Associate fields to values
model_init_values = [values[pos] for pos in model_init_pos]
instance = model_cls.from_db(db, model_init_names, model_init_values)
if annotation_fields:
for column, pos in annotation_fields:
setattr(instance, column, values[pos])
yield instance
finally:
# Done iterating the Query. If it has its own cursor, close it.
if hasattr(self.query, 'cursor') and self.query.cursor:
self.query.cursor.close()
def __repr__(self):
return "<RawQuerySet: %s>" % self.query
def __getitem__(self, k):
return list(self)[k]
@property
def db(self):
"Return the database that will be used if this query is executed now"
return self._db or router.db_for_read(self.model, **self._hints)
def using(self, alias):
"""
Selects which database this Raw QuerySet should execute its query against.
"""
return RawQuerySet(self.raw_query, model=self.model,
query=self.query.clone(using=alias),
params=self.params, translations=self.translations,
using=alias)
@property
def columns(self):
"""
A list of model field names in the order they'll appear in the
query results.
"""
if not hasattr(self, '_columns'):
self._columns = self.query.get_columns()
# Adjust any column names which don't match field names
for (query_name, model_name) in self.translations.items():
try:
index = self._columns.index(query_name)
self._columns[index] = model_name
except ValueError:
# Ignore translations for non-existent column names
pass
return self._columns
@property
def model_fields(self):
"""
A dict mapping column names to model field names.
"""
if not hasattr(self, '_model_fields'):
converter = connections[self.db].introspection.table_name_converter
self._model_fields = {}
for field in self.model._meta.fields:
name, column = field.get_attname_column()
self._model_fields[converter(column)] = field
return self._model_fields
class Prefetch(object):
def __init__(self, lookup, queryset=None, to_attr=None):
# `prefetch_through` is the path we traverse to perform the prefetch.
self.prefetch_through = lookup
# `prefetch_to` is the path to the attribute that stores the result.
self.prefetch_to = lookup
if to_attr:
self.prefetch_to = LOOKUP_SEP.join(lookup.split(LOOKUP_SEP)[:-1] + [to_attr])
self.queryset = queryset
self.to_attr = to_attr
def add_prefix(self, prefix):
self.prefetch_through = LOOKUP_SEP.join([prefix, self.prefetch_through])
self.prefetch_to = LOOKUP_SEP.join([prefix, self.prefetch_to])
def get_current_prefetch_through(self, level):
return LOOKUP_SEP.join(self.prefetch_through.split(LOOKUP_SEP)[:level + 1])
def get_current_prefetch_to(self, level):
return LOOKUP_SEP.join(self.prefetch_to.split(LOOKUP_SEP)[:level + 1])
def get_current_to_attr(self, level):
parts = self.prefetch_to.split(LOOKUP_SEP)
to_attr = parts[level]
as_attr = self.to_attr and level == len(parts) - 1
return to_attr, as_attr
def get_current_queryset(self, level):
if self.get_current_prefetch_to(level) == self.prefetch_to:
return self.queryset
return None
def __eq__(self, other):
if isinstance(other, Prefetch):
return self.prefetch_to == other.prefetch_to
return False
def __hash__(self):
return hash(self.__class__) ^ hash(self.prefetch_to)
def normalize_prefetch_lookups(lookups, prefix=None):
"""
Helper function that normalize lookups into Prefetch objects.
"""
ret = []
for lookup in lookups:
if not isinstance(lookup, Prefetch):
lookup = Prefetch(lookup)
if prefix:
lookup.add_prefix(prefix)
ret.append(lookup)
return ret
def prefetch_related_objects(result_cache, related_lookups):
"""
Helper function for prefetch_related functionality
Populates prefetched objects caches for a list of results
from a QuerySet
"""
if len(result_cache) == 0:
return # nothing to do
related_lookups = normalize_prefetch_lookups(related_lookups)
# We need to be able to dynamically add to the list of prefetch_related
# lookups that we look up (see below). So we need some book keeping to
# ensure we don't do duplicate work.
done_queries = {} # dictionary of things like 'foo__bar': [results]
auto_lookups = set() # we add to this as we go through.
followed_descriptors = set() # recursion protection
all_lookups = deque(related_lookups)
while all_lookups:
lookup = all_lookups.popleft()
if lookup.prefetch_to in done_queries:
if lookup.queryset:
raise ValueError("'%s' lookup was already seen with a different queryset. "
"You may need to adjust the ordering of your lookups." % lookup.prefetch_to)
continue
# Top level, the list of objects to decorate is the result cache
# from the primary QuerySet. It won't be for deeper levels.
obj_list = result_cache
through_attrs = lookup.prefetch_through.split(LOOKUP_SEP)
for level, through_attr in enumerate(through_attrs):
# Prepare main instances
if len(obj_list) == 0:
break
prefetch_to = lookup.get_current_prefetch_to(level)
if prefetch_to in done_queries:
# Skip any prefetching, and any object preparation
obj_list = done_queries[prefetch_to]
continue
# Prepare objects:
good_objects = True
for obj in obj_list:
# Since prefetching can re-use instances, it is possible to have
# the same instance multiple times in obj_list, so obj might
# already be prepared.
if not hasattr(obj, '_prefetched_objects_cache'):
try:
obj._prefetched_objects_cache = {}
except AttributeError:
# Must be in a QuerySet subclass that is not returning
# Model instances, either in Django or 3rd
# party. prefetch_related() doesn't make sense, so quit
# now.
good_objects = False
break
if not good_objects:
break
# Descend down tree
# We assume that objects retrieved are homogeneous (which is the premise
# of prefetch_related), so what applies to first object applies to all.
first_obj = obj_list[0]
prefetcher, descriptor, attr_found, is_fetched = get_prefetcher(first_obj, through_attr)
if not attr_found:
raise AttributeError("Cannot find '%s' on %s object, '%s' is an invalid "
"parameter to prefetch_related()" %
(through_attr, first_obj.__class__.__name__, lookup.prefetch_through))
if level == len(through_attrs) - 1 and prefetcher is None:
# Last one, this *must* resolve to something that supports
# prefetching, otherwise there is no point adding it and the
# developer asking for it has made a mistake.
raise ValueError("'%s' does not resolve to an item that supports "
"prefetching - this is an invalid parameter to "
"prefetch_related()." % lookup.prefetch_through)
if prefetcher is not None and not is_fetched:
obj_list, additional_lookups = prefetch_one_level(obj_list, prefetcher, lookup, level)
# We need to ensure we don't keep adding lookups from the
# same relationships to stop infinite recursion. So, if we
# are already on an automatically added lookup, don't add
# the new lookups from relationships we've seen already.
if not (lookup in auto_lookups and descriptor in followed_descriptors):
done_queries[prefetch_to] = obj_list
new_lookups = normalize_prefetch_lookups(additional_lookups, prefetch_to)
auto_lookups.update(new_lookups)
all_lookups.extendleft(new_lookups)
followed_descriptors.add(descriptor)
else:
# Either a singly related object that has already been fetched
# (e.g. via select_related), or hopefully some other property
# that doesn't support prefetching but needs to be traversed.
# We replace the current list of parent objects with the list
# of related objects, filtering out empty or missing values so
# that we can continue with nullable or reverse relations.
new_obj_list = []
for obj in obj_list:
try:
new_obj = getattr(obj, through_attr)
except exceptions.ObjectDoesNotExist:
continue
if new_obj is None:
continue
# We special-case `list` rather than something more generic
# like `Iterable` because we don't want to accidentally match
# user models that define __iter__.
if isinstance(new_obj, list):
new_obj_list.extend(new_obj)
else:
new_obj_list.append(new_obj)
obj_list = new_obj_list
def get_prefetcher(instance, attr):
"""
For the attribute 'attr' on the given instance, finds
an object that has a get_prefetch_queryset().
Returns a 4 tuple containing:
(the object with get_prefetch_queryset (or None),
the descriptor object representing this relationship (or None),
a boolean that is False if the attribute was not found at all,
a boolean that is True if the attribute has already been fetched)
"""
prefetcher = None
is_fetched = False
# For singly related objects, we have to avoid getting the attribute
# from the object, as this will trigger the query. So we first try
# on the class, in order to get the descriptor object.
rel_obj_descriptor = getattr(instance.__class__, attr, None)
if rel_obj_descriptor is None:
attr_found = hasattr(instance, attr)
else:
attr_found = True
if rel_obj_descriptor:
# singly related object, descriptor object has the
# get_prefetch_queryset() method.
if hasattr(rel_obj_descriptor, 'get_prefetch_queryset'):
prefetcher = rel_obj_descriptor
if rel_obj_descriptor.is_cached(instance):
is_fetched = True
else:
# descriptor doesn't support prefetching, so we go ahead and get
# the attribute on the instance rather than the class to
# support many related managers
rel_obj = getattr(instance, attr)
if hasattr(rel_obj, 'get_prefetch_queryset'):
prefetcher = rel_obj
return prefetcher, rel_obj_descriptor, attr_found, is_fetched
def prefetch_one_level(instances, prefetcher, lookup, level):
"""
Helper function for prefetch_related_objects
Runs prefetches on all instances using the prefetcher object,
assigning results to relevant caches in instance.
The prefetched objects are returned, along with any additional
prefetches that must be done due to prefetch_related lookups
found from default managers.
"""
# prefetcher must have a method get_prefetch_queryset() which takes a list
# of instances, and returns a tuple:
# (queryset of instances of self.model that are related to passed in instances,
# callable that gets value to be matched for returned instances,
# callable that gets value to be matched for passed in instances,
# boolean that is True for singly related objects,
# cache name to assign to).
# The 'values to be matched' must be hashable as they will be used
# in a dictionary.
rel_qs, rel_obj_attr, instance_attr, single, cache_name = (
prefetcher.get_prefetch_queryset(instances, lookup.get_current_queryset(level)))
# We have to handle the possibility that the QuerySet we just got back
# contains some prefetch_related lookups. We don't want to trigger the
# prefetch_related functionality by evaluating the query. Rather, we need
# to merge in the prefetch_related lookups.
# Copy the lookups in case it is a Prefetch object which could be reused
# later (happens in nested prefetch_related).
additional_lookups = [
copy.copy(additional_lookup) for additional_lookup
in getattr(rel_qs, '_prefetch_related_lookups', [])
]
if additional_lookups:
# Don't need to clone because the manager should have given us a fresh
# instance, so we access an internal instead of using public interface
# for performance reasons.
rel_qs._prefetch_related_lookups = []
all_related_objects = list(rel_qs)
rel_obj_cache = {}
for rel_obj in all_related_objects:
rel_attr_val = rel_obj_attr(rel_obj)
rel_obj_cache.setdefault(rel_attr_val, []).append(rel_obj)
to_attr, as_attr = lookup.get_current_to_attr(level)
# Make sure `to_attr` does not conflict with a field.
if as_attr and instances:
# We assume that objects retrieved are homogeneous (which is the premise
# of prefetch_related), so what applies to first object applies to all.
model = instances[0].__class__
try:
model._meta.get_field(to_attr)
except exceptions.FieldDoesNotExist:
pass
else:
msg = 'to_attr={} conflicts with a field on the {} model.'
raise ValueError(msg.format(to_attr, model.__name__))
for obj in instances:
instance_attr_val = instance_attr(obj)
vals = rel_obj_cache.get(instance_attr_val, [])
if single:
val = vals[0] if vals else None
to_attr = to_attr if as_attr else cache_name
setattr(obj, to_attr, val)
else:
if as_attr:
setattr(obj, to_attr, vals)
else:
# Cache in the QuerySet.all().
qs = getattr(obj, to_attr).all()
qs._result_cache = vals
# We don't want the individual qs doing prefetch_related now,
# since we have merged this into the current work.
qs._prefetch_done = True
obj._prefetched_objects_cache[cache_name] = qs
return all_related_objects, additional_lookups
class RelatedPopulator(object):
"""
RelatedPopulator is used for select_related() object instantiation.
The idea is that each select_related() model will be populated by a
different RelatedPopulator instance. The RelatedPopulator instances get
klass_info and select (computed in SQLCompiler) plus the used db as
input for initialization. That data is used to compute which columns
to use, how to instantiate the model, and how to populate the links
between the objects.
The actual creation of the objects is done in populate() method. This
method gets row and from_obj as input and populates the select_related()
model instance.
"""
def __init__(self, klass_info, select, db):
self.db = db
# Pre-compute needed attributes. The attributes are:
# - model_cls: the possibly deferred model class to instantiate
# - either:
# - cols_start, cols_end: usually the columns in the row are
# in the same order model_cls.__init__ expects them, so we
# can instantiate by model_cls(*row[cols_start:cols_end])
# - reorder_for_init: When select_related descends to a child
# class, then we want to reuse the already selected parent
# data. However, in this case the parent data isn't necessarily
# in the same order that Model.__init__ expects it to be, so
# we have to reorder the parent data. The reorder_for_init
# attribute contains a function used to reorder the field data
# in the order __init__ expects it.
# - pk_idx: the index of the primary key field in the reordered
# model data. Used to check if a related object exists at all.
# - init_list: the field attnames fetched from the database. For
# deferred models this isn't the same as all attnames of the
# model's fields.
# - related_populators: a list of RelatedPopulator instances if
# select_related() descends to related models from this model.
# - cache_name, reverse_cache_name: the names to use for setattr
# when assigning the fetched object to the from_obj. If the
# reverse_cache_name is set, then we also set the reverse link.
select_fields = klass_info['select_fields']
from_parent = klass_info['from_parent']
if not from_parent:
self.cols_start = select_fields[0]
self.cols_end = select_fields[-1] + 1
self.init_list = [
f[0].target.attname for f in select[self.cols_start:self.cols_end]
]
self.reorder_for_init = None
else:
model_init_attnames = [
f.attname for f in klass_info['model']._meta.concrete_fields
]
reorder_map = []
for idx in select_fields:
field = select[idx][0].target
init_pos = model_init_attnames.index(field.attname)
reorder_map.append((init_pos, field.attname, idx))
reorder_map.sort()
self.init_list = [v[1] for v in reorder_map]
pos_list = [row_pos for _, _, row_pos in reorder_map]
def reorder_for_init(row):
return [row[row_pos] for row_pos in pos_list]
self.reorder_for_init = reorder_for_init
self.model_cls = self.get_deferred_cls(klass_info, self.init_list)
self.pk_idx = self.init_list.index(self.model_cls._meta.pk.attname)
self.related_populators = get_related_populators(klass_info, select, self.db)
field = klass_info['field']
reverse = klass_info['reverse']
self.reverse_cache_name = None
if reverse:
self.cache_name = field.remote_field.get_cache_name()
self.reverse_cache_name = field.get_cache_name()
else:
self.cache_name = field.get_cache_name()
if field.unique:
self.reverse_cache_name = field.remote_field.get_cache_name()
def get_deferred_cls(self, klass_info, init_list):
model_cls = klass_info['model']
if len(init_list) != len(model_cls._meta.concrete_fields):
init_set = set(init_list)
skip = [
f.attname for f in model_cls._meta.concrete_fields
if f.attname not in init_set
]
model_cls = deferred_class_factory(model_cls, skip)
return model_cls
def populate(self, row, from_obj):
if self.reorder_for_init:
obj_data = self.reorder_for_init(row)
else:
obj_data = row[self.cols_start:self.cols_end]
if obj_data[self.pk_idx] is None:
obj = None
else:
obj = self.model_cls.from_db(self.db, self.init_list, obj_data)
if obj and self.related_populators:
for rel_iter in self.related_populators:
rel_iter.populate(row, obj)
setattr(from_obj, self.cache_name, obj)
if obj and self.reverse_cache_name:
setattr(obj, self.reverse_cache_name, from_obj)
def get_related_populators(klass_info, select, db):
iterators = []
related_klass_infos = klass_info.get('related_klass_infos', [])
for rel_klass_info in related_klass_infos:
rel_cls = RelatedPopulator(rel_klass_info, select, db)
iterators.append(rel_cls)
return iterators
| {
"content_hash": "cde04ab2314f45ecf7772fec458c8aec",
"timestamp": "",
"source": "github",
"line_count": 1725,
"max_line_length": 115,
"avg_line_length": 39.893913043478264,
"alnum_prop": 0.589272999404217,
"repo_name": "benjaminjkraft/django",
"id": "b1bebcaca09d8654e6b641d0f25c4cfa6c0b9cd7",
"size": "68817",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "django/db/models/query.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "52334"
},
{
"name": "HTML",
"bytes": "170510"
},
{
"name": "JavaScript",
"bytes": "256027"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "11459633"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
} |
"""Utilities for handling different array flavors in PyTables.
Variables
=========
`__docformat`__
The format of documentation strings in this module.
`internal_flavor`
The flavor used internally by PyTables.
`all_flavors`
List of all flavors available to PyTables.
`alias_map`
Maps old flavor names to the most similar current flavor.
`description_map`
Maps flavors to short descriptions of their supported objects.
`identifier_map`
Maps flavors to functions that can identify their objects.
The function associated with a given flavor will return a true
value if the object passed to it can be identified as being of
that flavor.
See the `flavor_of()` function for a friendlier interface to
flavor identification.
`converter_map`
Maps (source, destination) flavor pairs to converter functions.
Converter functions get an array of the source flavor and return
an array of the destination flavor.
See the `array_of_flavor()` and `flavor_to_flavor()` functions for
friendlier interfaces to flavor conversion.
"""
import warnings
import numpy as np
from .exceptions import FlavorError, FlavorWarning
__docformat__ = 'reStructuredText'
"""The format of documentation strings in this module."""
internal_flavor = 'numpy'
"""The flavor used internally by PyTables."""
# This is very slightly slower than a set for a small number of values
# in terms of (infrequent) lookup time, but allows `flavor_of()`
# (which may be called much more frequently) to check for flavors in
# order, beginning with the most common one.
all_flavors = [] # filled as flavors are registered
"""List of all flavors available to PyTables."""
alias_map = {} # filled as flavors are registered
"""Maps old flavor names to the most similar current flavor."""
description_map = {} # filled as flavors are registered
"""Maps flavors to short descriptions of their supported objects."""
identifier_map = {} # filled as flavors are registered
"""Maps flavors to functions that can identify their objects.
The function associated with a given flavor will return a true value
if the object passed to it can be identified as being of that flavor.
See the `flavor_of()` function for a friendlier interface to flavor
identification.
"""
converter_map = {} # filled as flavors are registered
"""Maps (source, destination) flavor pairs to converter functions.
Converter functions get an array of the source flavor and return an
array of the destination flavor.
See the `array_of_flavor()` and `flavor_to_flavor()` functions for
friendlier interfaces to flavor conversion.
"""
def check_flavor(flavor):
"""Raise a ``FlavorError`` if the `flavor` is not valid."""
if flavor not in all_flavors:
available_flavs = ", ".join(flav for flav in all_flavors)
raise FlavorError(
"flavor ``%s`` is unsupported or unavailable; "
"available flavors in this system are: %s"
% (flavor, available_flavs))
def array_of_flavor2(array, src_flavor, dst_flavor):
"""Get a version of the given `array` in a different flavor.
The input `array` must be of the given `src_flavor`, and the
returned array will be of the indicated `dst_flavor`. Both
flavors may be the same, but it is not guaranteed that the
returned array will be the same object as the input one in this
case.
If the conversion is not supported, a ``FlavorError`` is raised.
"""
convkey = (src_flavor, dst_flavor)
if convkey not in converter_map:
raise FlavorError("conversion from flavor ``%s`` to flavor ``%s`` "
"is unsupported or unavailable in this system"
% (src_flavor, dst_flavor))
convfunc = converter_map[convkey]
return convfunc(array)
def flavor_to_flavor(array, src_flavor, dst_flavor):
"""Get a version of the given `array` in a different flavor.
The input `array` must be of the given `src_flavor`, and the
returned array will be of the indicated `dst_flavor` (see below
for an exception to this). Both flavors may be the same, but it
is not guaranteed that the returned array will be the same object
as the input one in this case.
If the conversion is not supported, a `FlavorWarning` is issued
and the input `array` is returned as is.
"""
try:
return array_of_flavor2(array, src_flavor, dst_flavor)
except FlavorError as fe:
warnings.warn("%s; returning an object of the ``%s`` flavor instead"
% (fe.args[0], src_flavor), FlavorWarning)
return array
def internal_to_flavor(array, dst_flavor):
"""Get a version of the given `array` in a different `dst_flavor`.
The input `array` must be of the internal flavor, and the returned
array will be of the given `dst_flavor`. See `flavor_to_flavor()`
for more information.
"""
return flavor_to_flavor(array, internal_flavor, dst_flavor)
def array_as_internal(array, src_flavor):
"""Get a version of the given `array` in the internal flavor.
The input `array` must be of the given `src_flavor`, and the
returned array will be of the internal flavor.
If the conversion is not supported, a ``FlavorError`` is raised.
"""
return array_of_flavor2(array, src_flavor, internal_flavor)
def flavor_of(array):
"""Identify the flavor of a given `array`.
If the `array` can not be matched with any flavor, a ``TypeError``
is raised.
"""
for flavor in all_flavors:
if identifier_map[flavor](array):
return flavor
type_name = type(array).__name__
supported_descs = "; ".join(description_map[fl] for fl in all_flavors)
raise TypeError(
"objects of type ``%s`` are not supported in this context, sorry; "
"supported objects are: %s" % (type_name, supported_descs))
def array_of_flavor(array, dst_flavor):
"""Get a version of the given `array` in a different `dst_flavor`.
The flavor of the input `array` is guessed, and the returned array
will be of the given `dst_flavor`.
If the conversion is not supported, a ``FlavorError`` is raised.
"""
return array_of_flavor2(array, flavor_of(array), dst_flavor)
def restrict_flavors(keep=('python',)):
"""Disable all flavors except those in keep.
Providing an empty keep sequence implies disabling all flavors (but the
internal one). If the sequence is not specified, only optional flavors are
disabled.
.. important:: Once you disable a flavor, it can not be enabled again.
"""
remove = set(all_flavors) - set(keep) - {internal_flavor}
for flavor in remove:
_disable_flavor(flavor)
# Flavor registration
#
# The order in which flavors appear in `all_flavors` determines the
# order in which they will be tested for by `flavor_of()`, so place
# most frequent flavors first.
all_flavors.append('numpy') # this is the internal flavor
all_flavors.append('python') # this is always supported
def _register_aliases():
"""Register aliases of *available* flavors."""
for flavor in all_flavors:
aliases = eval('_%s_aliases' % flavor)
for alias in aliases:
alias_map[alias] = flavor
def _register_descriptions():
"""Register descriptions of *available* flavors."""
for flavor in all_flavors:
description_map[flavor] = eval('_%s_desc' % flavor)
def _register_identifiers():
"""Register identifier functions of *available* flavors."""
for flavor in all_flavors:
identifier_map[flavor] = eval('_is_%s' % flavor)
def _register_converters():
"""Register converter functions between *available* flavors."""
def identity(array):
return array
for src_flavor in all_flavors:
for dst_flavor in all_flavors:
# Converters with the same source and destination flavor
# are used when available, since they may perform some
# optimizations on the resulting array (e.g. making it
# contiguous). Otherwise, an identity function is used.
convfunc = None
try:
convfunc = eval(f'_conv_{src_flavor}_to_{dst_flavor}')
except NameError:
if src_flavor == dst_flavor:
convfunc = identity
if convfunc:
converter_map[(src_flavor, dst_flavor)] = convfunc
def _register_all():
"""Register all *available* flavors."""
_register_aliases()
_register_descriptions()
_register_identifiers()
_register_converters()
def _deregister_aliases(flavor):
"""Deregister aliases of a given `flavor` (no checks)."""
rm_aliases = []
for (an_alias, a_flavor) in alias_map.items():
if a_flavor == flavor:
rm_aliases.append(an_alias)
for an_alias in rm_aliases:
del alias_map[an_alias]
def _deregister_description(flavor):
"""Deregister description of a given `flavor` (no checks)."""
del description_map[flavor]
def _deregister_identifier(flavor):
"""Deregister identifier function of a given `flavor` (no checks)."""
del identifier_map[flavor]
def _deregister_converters(flavor):
"""Deregister converter functions of a given `flavor` (no checks)."""
rm_flavor_pairs = []
for flavor_pair in converter_map:
if flavor in flavor_pair:
rm_flavor_pairs.append(flavor_pair)
for flavor_pair in rm_flavor_pairs:
del converter_map[flavor_pair]
def _disable_flavor(flavor):
"""Completely disable the given `flavor` (no checks)."""
_deregister_aliases(flavor)
_deregister_description(flavor)
_deregister_identifier(flavor)
_deregister_converters(flavor)
all_flavors.remove(flavor)
# Implementation of flavors
_python_aliases = [
'List', 'Tuple',
'Int', 'Float', 'String',
'VLString', 'Object',
]
_python_desc = ("homogeneous list or tuple, "
"integer, float, complex or bytes")
def _is_python(array):
return isinstance(array, (tuple, list, int, float, complex, bytes))
_numpy_aliases = []
_numpy_desc = "NumPy array, record or scalar"
if np.lib.NumpyVersion(np.__version__) >= np.lib.NumpyVersion('1.19.0'):
def toarray(array, *args, **kwargs):
with warnings.catch_warnings():
warnings.simplefilter('error')
try:
array = np.array(array, *args, **kwargs)
except np.VisibleDeprecationWarning:
raise ValueError(
'cannot guess the desired dtype from the input')
return array
else:
toarray = np.array
def _is_numpy(array):
return isinstance(array, (np.ndarray, np.generic))
def _numpy_contiguous(convfunc):
"""Decorate `convfunc` to return a *contiguous* NumPy array.
Note: When arrays are 0-strided, the copy is avoided. This allows
to use `array` to still carry info about the dtype and shape.
"""
def conv_to_numpy(array):
nparr = convfunc(array)
if (hasattr(nparr, 'flags') and
not nparr.flags.contiguous and
sum(nparr.strides) != 0):
nparr = nparr.copy() # copying the array makes it contiguous
return nparr
conv_to_numpy.__name__ = convfunc.__name__
conv_to_numpy.__doc__ = convfunc.__doc__
return conv_to_numpy
@_numpy_contiguous
def _conv_numpy_to_numpy(array):
# Passes contiguous arrays through and converts scalars into
# scalar arrays.
nparr = np.asarray(array)
if nparr.dtype.kind == 'U':
# from Python 3 loads of common strings are disguised as Unicode
try:
# try to convert to basic 'S' type
return nparr.astype('S')
except UnicodeEncodeError:
pass
# pass on true Unicode arrays downstream in case it can be
# handled in the future
return nparr
@_numpy_contiguous
def _conv_python_to_numpy(array):
nparr = toarray(array)
if nparr.dtype.kind == 'U':
# from Python 3 loads of common strings are disguised as Unicode
try:
# try to convert to basic 'S' type
return nparr.astype('S')
except UnicodeEncodeError:
pass
# pass on true Unicode arrays downstream in case it can be
# handled in the future
return nparr
def _conv_numpy_to_python(array):
if array.shape != ():
# Lists are the default for returning multidimensional objects
array = array.tolist()
else:
# 0-dim or scalar case
array = array.item()
return array
# Now register everything related with *available* flavors.
_register_all()
def _test():
"""Run ``doctest`` on this module."""
import doctest
doctest.testmod()
if __name__ == '__main__':
_test()
| {
"content_hash": "352e25048980a944a22d83f42afe78f6",
"timestamp": "",
"source": "github",
"line_count": 428,
"max_line_length": 79,
"avg_line_length": 30.123831775700936,
"alnum_prop": 0.6573334367486233,
"repo_name": "PyTables/PyTables",
"id": "41bbbc8a3f6a65f5495a2d85405cc4cc5573c4c2",
"size": "12893",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tables/flavor.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "165578"
},
{
"name": "CMake",
"bytes": "2417"
},
{
"name": "Cython",
"bytes": "283042"
},
{
"name": "Gnuplot",
"bytes": "2104"
},
{
"name": "Makefile",
"bytes": "2489"
},
{
"name": "Python",
"bytes": "3119836"
},
{
"name": "Shell",
"bytes": "19408"
}
],
"symlink_target": ""
} |
from django import test
from ievv_opensource.ievv_sms import sms_registry
class TestAbstractSmsBackend(test.TestCase):
def test_get_sms_text_length(self):
self.assertEqual(sms_registry.AbstractSmsBackend.get_sms_text_length('Åge{^'), 7)
def test_get_max_length(self):
self.assertEqual(sms_registry.AbstractSmsBackend.get_max_length(), 918)
def test_get_part_count_simple(self):
self.assertEqual(sms_registry.AbstractSmsBackend.get_part_count('Test'), 1)
def test_get_part_count_exactly_fill_1(self):
self.assertEqual(sms_registry.AbstractSmsBackend.get_part_count('x' * 160), 1)
def test_get_part_count_6(self):
self.assertEqual(sms_registry.AbstractSmsBackend.get_part_count('x' * 900), 6)
| {
"content_hash": "f56594ddabddd1cbff2131efb66bca74",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 89,
"avg_line_length": 37.9,
"alnum_prop": 0.7189973614775725,
"repo_name": "appressoas/ievv_opensource",
"id": "06bc8ed4820724d9e36b993321a4851a45082a8d",
"size": "759",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ievv_opensource/ievv_sms/tests/test_backends/test_abstract_sms_backend.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CoffeeScript",
"bytes": "199"
},
{
"name": "Dockerfile",
"bytes": "162"
},
{
"name": "HTML",
"bytes": "7544"
},
{
"name": "JavaScript",
"bytes": "719"
},
{
"name": "Less",
"bytes": "27"
},
{
"name": "Python",
"bytes": "614046"
},
{
"name": "SCSS",
"bytes": "199"
},
{
"name": "Shell",
"bytes": "141"
},
{
"name": "TypeScript",
"bytes": "254"
}
],
"symlink_target": ""
} |
import unittest2 as unittest
import tinctest
from tinctest import TINCTestCase
from test_tinc_sample import test_data_provider
@unittest.skip('mock test case for discovery tests')
class TINCTestCaseWithDataProvider(TINCTestCase):
"""
@maintainer balasr3
"""
def test_1_sample(self):
"""
@data_provider test1
"""
print "Running this test with test data %s" %self.test_data[0]
print "Test data is %s" %self.test_data[1]
| {
"content_hash": "0c1575c50caeb3bbcbe0cb49c5ed152b",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 70,
"avg_line_length": 24.15,
"alnum_prop": 0.6666666666666666,
"repo_name": "Quikling/gpdb",
"id": "245b1b3a81aeeece4ed9451a64ee37567777400e",
"size": "483",
"binary": false,
"copies": "20",
"ref": "refs/heads/master",
"path": "src/test/tinc/tinctest/test/data_provider/test_sample_data_provider.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "5665"
},
{
"name": "Batchfile",
"bytes": "11492"
},
{
"name": "C",
"bytes": "35104900"
},
{
"name": "C++",
"bytes": "3826418"
},
{
"name": "CMake",
"bytes": "17118"
},
{
"name": "CSS",
"bytes": "7407"
},
{
"name": "Csound Score",
"bytes": "179"
},
{
"name": "DTrace",
"bytes": "1160"
},
{
"name": "Fortran",
"bytes": "14777"
},
{
"name": "GDB",
"bytes": "576"
},
{
"name": "Gherkin",
"bytes": "731336"
},
{
"name": "HTML",
"bytes": "191406"
},
{
"name": "Java",
"bytes": "268348"
},
{
"name": "JavaScript",
"bytes": "23969"
},
{
"name": "Lex",
"bytes": "196275"
},
{
"name": "M4",
"bytes": "105042"
},
{
"name": "Makefile",
"bytes": "428681"
},
{
"name": "PLSQL",
"bytes": "261269"
},
{
"name": "PLpgSQL",
"bytes": "5487194"
},
{
"name": "Perl",
"bytes": "3894496"
},
{
"name": "Perl 6",
"bytes": "14219"
},
{
"name": "Python",
"bytes": "8656525"
},
{
"name": "Roff",
"bytes": "51338"
},
{
"name": "Ruby",
"bytes": "26724"
},
{
"name": "SQLPL",
"bytes": "3824391"
},
{
"name": "Shell",
"bytes": "541518"
},
{
"name": "XS",
"bytes": "8405"
},
{
"name": "XSLT",
"bytes": "5779"
},
{
"name": "Yacc",
"bytes": "488297"
}
],
"symlink_target": ""
} |
""" Command line interface.
"""
# Copyright © 2015 1&1 Group <btw-users@googlegroups.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, unicode_literals, print_function
import re
import click
from bunch import Bunch
from . import config
from .plugins import loader
# Default name of the app, and its app directory
__app_name__ = 'nanny'
config.APP_NAME = __app_name__
# The `click` custom context settings
CONTEXT_SETTINGS = dict(
obj=Bunch(cfg=None, quiet=False, verbose=False), # namespace for custom stuff
help_option_names=['-h', '--help'],
auto_envvar_prefix=__app_name__.upper().replace('-', '_'),
)
# `--license` option decorator
def license_option(*param_decls, **attrs):
"""``--license`` option that prints license information and then exits."""
def decorator(func):
"decorator inner wrapper"
def callback(ctx, _dummy, value):
"click option callback"
if not value or ctx.resilient_parsing:
return
from . import __doc__ as license_text
license_text = re.sub(r"``([^`]+?)``", lambda m: click.style(m.group(1), bold=True), license_text)
click.echo(license_text)
ctx.exit()
attrs.setdefault('is_flag', True)
attrs.setdefault('expose_value', False)
attrs.setdefault('is_eager', True)
attrs.setdefault('help', 'Show the license and exit.')
attrs['callback'] = callback
return click.option(*(param_decls or ('--license',)), **attrs)(func)
return decorator
# Main command (root)
@click.group(context_settings=CONTEXT_SETTINGS)
@click.version_option(message=config.VERSION_INFO)
@license_option()
@click.option('-q', '--quiet', is_flag=True, default=False, help='Be quiet (show only errors).')
@click.option('-v', '--verbose', is_flag=True, default=False, help='Create extra verbose output.')
@click.option('-c', '--config', "config_paths", metavar='FILE',
multiple=True, type=click.Path(), help='Load given configuration file(s).')
@click.pass_context
def cli(ctx, quiet=False, verbose=False, config_paths=None): # pylint: disable=unused-argument
"""Nanny process launcher and watchdog tool."""
config.Configuration.from_context(ctx, config_paths, project='bootils')
ctx.obj.quiet = quiet
ctx.obj.verbose = verbose
loader.PluginLoader.load_into_context(ctx, project='bootils')
# Import sub-commands to define them AFTER `cli` is defined
config.cli = cli
from . import commands as _ # noqa pylint: disable=unused-import
if __name__ == "__main__": # imported via "python -m"?
__package__ = 'bootils' # pylint: disable=redefined-builtin
cli() # pylint: disable=no-value-for-parameter
| {
"content_hash": "f48269b96ea6c30bd77b54aa9f350398",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 110,
"avg_line_length": 37.50574712643678,
"alnum_prop": 0.6760649708856881,
"repo_name": "spodkowinski/bootils",
"id": "59b9ac192f00b63d3decaa8d0812468008030ffa",
"size": "3323",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/bootils/__main__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "66420"
},
{
"name": "Shell",
"bytes": "232"
}
],
"symlink_target": ""
} |
import datetime, re
from app import db
from app import login_manager
from app import bcrypt
def slugify(s):
return re.sub('[^\w]+', '-', s).lower()
entry_tags = db.Table('entry_tags',
db.Column('tag_id', db.Integer, db.ForeignKey('tag.id')),
db.Column('entry_id', db.Integer, db.ForeignKey('entry.id'))
)
class Entry(db.Model):
STATUS_PUBLIC = 0
STATUS_DRAFT = 1
STATUS_DELETED = 2
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(100))
slug = db.Column(db.String(100), unique=True)
body = db.Column(db.Text)
status = db.Column(db.SmallInteger, default=STATUS_PUBLIC)
created_timestamp = db.Column(
db.DateTime,
default=datetime.datetime.now)
modified_timestamp = db.Column(
db.DateTime,
default= datetime.datetime.now,
onupdate=datetime.datetime.now)
author_id = db.Column(db.Integer, db.ForeignKey("user.id"))
tags = db.relationship('Tag', secondary=entry_tags,
backref=db.backref('entries', lazy='dynamic'))
comments = db.relationship('Comment', backref='entry',
lazy='dynamic')
def __init__(self, *args, **kwargs):
super(Entry, self).__init__(*args, **kwargs)
self.generate_slug()
def generate_slug(self):
self.slug = ''
if self.title:
self.slug = slugify(self.title)
def __repr__(self):
return '<Entry: %s>' % self.title
@property
def tag_list(self):
return ', '.join(tag.name for tag in self.tags)
@property
def tease(self):
return self.body[:100]
class Tag(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64))
slug = db.Column(db.String(64), unique=True)
def __init__(self, *args, **kwargs):
super(Tag, self).__init__(*args, **kwargs)
self.slug = slugify(self.name)
def __repr__(self):
return '<Tag %s>' % self.name
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(64), unique=True)
password_hash = db.Column(db.String(255))
name = db.Column(db.String(64))
slug = db.Column(db.String(64), unique=True)
active = db.Column(db.Boolean, default=True)
admin = db.Column(db.Boolean, default=False)
created_timestamp = db.Column(db.DateTime, default=datetime.datetime.now)
entries = db.relationship('Entry', backref='author', lazy='dynamic')
def __init__(self, *args, **kwargs):
super(User, self).__init__(*args, **kwargs)
self.generate_slug()
def generate_slug(self):
if self.name:
self.slug = slugify(self.name)
def __repr__(self):
return '<User %s>' % self.name
def get_id(self):
return self.id
def is_admin(self):
return self.admin
def is_authenticated(self):
return True
def is_active(self):
return self.active
def is_anonymous(self):
return False
@staticmethod
def make_password(plaintext):
return bcrypt.generate_password_hash(plaintext)
def check_password(self, raw_password):
return bcrypt.check_password_hash(self.password_hash, raw_password)
@classmethod
def create(cls, email, password, **kwargs):
return User(
email = email,
password_hash = User.make_password(password),
**kwargs)
@staticmethod
def authenticate(email, password):
user = User.query.filter(User.email == email).first()
if user and user.check_password(password):
return user
return False
@login_manager.user_loader
def _user_loader(user_id):
return User.query.get(int(user_id))
class Comment(db.Model):
STATUS_PENDING_MODERATION = 0
STATUS_PUBLIC = 1
STATUS_SPAM = 8
STATUS_DELETED = 9
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64))
email = db.Column(db.String(64))
url = db.Column(db.String(100))
ip_address = db.Column(db.String(64))
body = db.Column(db.Text)
status = db.Column(db.SmallInteger, default=STATUS_PUBLIC)
created_timestamp = db.Column(db.DateTime, default=datetime.datetime.now)
entry_id = db.Column(db.Integer, db.ForeignKey('entry.id'))
def __repr__(self):
return '<Comment from %r>' % (self.name,)
| {
"content_hash": "463d6de8715fd3603656557e4a3f83a1",
"timestamp": "",
"source": "github",
"line_count": 150,
"max_line_length": 77,
"avg_line_length": 29.113333333333333,
"alnum_prop": 0.6201053354705748,
"repo_name": "garwon/blog",
"id": "63ec2139706a292eaafd87ecbff8e003e099c53d",
"size": "4367",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "9966"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "19069"
}
],
"symlink_target": ""
} |
import json
import os
import re
import time
from datetime import datetime
import mwparserfromhell
os.environ['PYWIKIBOT_DIR'] = os.path.dirname(os.path.realpath(__file__))
import pywikibot
from config import config_page_name # pylint: disable=E0611,W0614
os.environ['TZ'] = 'UTC'
site = pywikibot.Site()
site.login()
config_page = pywikibot.Page(site, config_page_name)
cfg = config_page.text
cfg = json.loads(cfg)
print(json.dumps(cfg, indent=4, ensure_ascii=False))
if not cfg["enable"]:
exit("disabled\n")
signpage = pywikibot.Page(site, cfg["main_page_name"])
text = signpage.text
wikicode = mwparserfromhell.parse(text)
archivelist = []
count = 0
for section in wikicode.get_sections()[1:]:
title = str(section.get(0).title)
print(title, end="\t")
lasttime = datetime(1, 1, 1)
for m in re.findall(r"(\d{4})年(\d{1,2})月(\d{1,2})日 \(.\) (\d{2}):(\d{2}) \(UTC\)", str(section)):
d = datetime(int(m[0]), int(m[1]), int(m[2]), int(m[3]), int(m[4]))
lasttime = max(lasttime, d)
print(lasttime, end="\t")
if re.search(cfg["not_processed_regex"], str(section)):
print("not processed", end="\n")
continue
if (time.time() - lasttime.timestamp() > cfg["time_to_live"]
and lasttime != datetime(1, 1, 1)):
archivestr = str(section).strip()
archivestr = re.sub(
r"{{bot-directive-archiver\|no-archive-begin}}[\s\S]+?{{bot-directive-archiver\|no-archive-end}}\n?", "", archivestr)
archivelist.append(archivestr)
count += 1
section.remove(section)
print("archive", end="\t")
print()
text = str(wikicode)
if signpage.text == text:
exit("nothing changed")
pywikibot.showDiff(signpage.text, text)
signpage.text = text
summary = cfg["main_page_summary"].format(count)
print(summary)
signpage.save(summary=summary, minor=True)
archivepage = pywikibot.Page(site, cfg["archive_page_name"])
text = archivepage.text
text += "\n\n" + "\n\n".join(archivelist)
pywikibot.showDiff(archivepage.text, text)
archivepage.text = text
summary = cfg["archive_page_summary"].format(count)
print(summary)
archivepage.save(summary=summary, minor=True)
| {
"content_hash": "34aafe030ff5a6191b531829f0e8b1d9",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 129,
"avg_line_length": 29.026666666666667,
"alnum_prop": 0.6587046394120349,
"repo_name": "Xi-Plus/Xiplus-Wikipedia-Bot",
"id": "ca6bd517f699e151fce206b63a20d88daca975a8",
"size": "2207",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "newusermessage-signatures-archive/edit.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2624"
},
{
"name": "PHP",
"bytes": "178222"
}
],
"symlink_target": ""
} |
"""
@name: Modules/House/Family/__init__.py
@author: D. Brian Kimmel
@contact: D.BrianKimmel@gmail.com
@copyright: (c) 2013-2020 by D. Brian Kimmel
@note: Created on May 17, 2013
@license: MIT License
@summary:
To add a family named 'NewFamily', do the following:
* Add a package named 'New_Family'.
* Add the family name (Capitalized) to the list MODULES below.
* Add a module named <NewFamily>_device.py
* Add any other modules needed by the Device module.
<Newfamily>_xml
<NewFamily>_data
...
* A module to interface with the controller is recommended.
<NewFamily>_pim
"""
__updated__ = '2020-02-21'
__version_info__ = (20, 2, 21)
__version__ = '.'.join(map(str, __version_info__))
CONFIG_NAME = 'families'
MODULES = [
'Acurite',
'Hue',
'Insteon',
'Lutron',
'Sonoff',
'Upb',
'X10',
'Zwave',
'Null'
]
class FamilyInformation:
""" Info about a family
This points to the
==> PyHouse_obj.House.Family[<familyname>]
indexed by lowercased family name "insteon"
"""
def __init__(self):
self.Name = None # Family Name
self.Module = None # FamilyModuleInformation()
self._Api = None # of the family_device.py file
class DeviceFamilyInformation:
""" This is used for things like Lights
"""
def __init__(self):
self.Name = None
self.Type = None
self.Address = None
# ## END DBK
| {
"content_hash": "bdea11be4db458d89f8d01078f50018b",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 66,
"avg_line_length": 23.09375,
"alnum_prop": 0.6001353179972937,
"repo_name": "DBrianKimmel/PyHouse",
"id": "b2927605b32e820515c5f4ac99db82bc80dbebdf",
"size": "1478",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "Project/src/Modules/House/Family/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "114778"
},
{
"name": "HTML",
"bytes": "15398"
},
{
"name": "JavaScript",
"bytes": "220171"
},
{
"name": "Python",
"bytes": "1491784"
},
{
"name": "Shell",
"bytes": "2131"
}
],
"symlink_target": ""
} |
"""This example adds an ad extension override to a given campaign. To get
campaigns, run get_campaigns.py.
Tags: GeoLocationService.get, AdExtensionOverrideService.mutate
Api: AdWordsOnly
"""
__author__ = 'api.kwinter@gmail.com (Kevin Winter)'
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import AdWordsClient
ad_id = 'INSERT_AD_GROUP_AD_ID_HERE'
ad_extension_id = 'INSERT_AD_EXTENSION_ID_HERE'
def main(client, ad_id, ad_extension_id):
# Initialize appropriate service.
geo_location_service = client.GetGeoLocationService(
'https://adwords-sandbox.google.com', 'v201109')
ad_extension_override_service = client.GetAdExtensionOverrideService(
'https://adwords-sandbox.google.com', 'v201109')
# Construct selector and get geo location info for a given address.
selector = {
'addresses': [
{
'streetAddress': '1600 Amphitheatre Parkway',
'cityName': 'Mountain View',
'provinceCode': 'US-CA',
'provinceName': 'California',
'postalCode': '94043',
'countryCode': 'US'
}
]
}
geo_location = geo_location_service.Get(selector)[0]
# Construct operations and add ad extension override.
operations = [
{
'operator': 'ADD',
'operand': {
'adId': ad_id,
'adExtension': {
'xsi_type': 'LocationExtension',
'id': ad_extension_id,
'address': geo_location['address'],
'geoPoint': geo_location['geoPoint'],
'encodedLocation': geo_location['encodedLocation'],
'source': 'ADWORDS_FRONTEND',
# Optional fields.
'companyName': 'ACME Inc.',
'phoneNumber': '(650) 253-0000'
# 'iconMediaId': '...',
# 'imageMediaId': '...'
},
# Optional fields.
'overrideInfo': {
'LocationOverrideInfo': {
'radius': '5',
'radiusUnits': 'MILES'
}
}
}
}
]
ad_extensions = ad_extension_override_service.Mutate(operations)[0]
# Display results.
for ad_extension in ad_extensions['value']:
print ('Ad extension override with id \'%s\' for ad with id \'%s\' was '
'added.' % (ad_extension['adExtension']['id'], ad_extension['adId']))
print
print ('Usage: %s units, %s operations' % (client.GetUnits(),
client.GetOperations()))
if __name__ == '__main__':
# Initialize client object.
client = AdWordsClient(path=os.path.join('..', '..', '..', '..', '..'))
main(client, ad_id, ad_extension_id)
| {
"content_hash": "1e0bf57e7fe7b918ff58fbe3f04ef8a6",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 80,
"avg_line_length": 32.51685393258427,
"alnum_prop": 0.5442294402211472,
"repo_name": "nearlyfreeapps/python-googleadwords",
"id": "1d7b48ab0fbe697f25b916c2ce4b974b2771bd5c",
"size": "3536",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/adspygoogle/adwords/v201109/campaign_management/add_location_extension_override.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "10581"
},
{
"name": "Python",
"bytes": "1394721"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dnollkse', '0003_delete_document'),
]
operations = [
migrations.AlterField(
model_name='config',
name='favicon_url',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='config',
name='gcal_api_key',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='config',
name='gcal_asp_calendar_id',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='config',
name='gcal_dnollk_calendar_id',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='config',
name='gcal_dnollk_calendar_url',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='config',
name='gcal_timeedit_calendar_id',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='config',
name='gform_embed_link',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='config',
name='gform_link',
field=models.TextField(blank=True, null=True),
),
]
| {
"content_hash": "38f034305a6b404ced2ef210bd72101b",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 58,
"avg_line_length": 30.264150943396228,
"alnum_prop": 0.5467581047381546,
"repo_name": "Jassob/DNollK.se",
"id": "009018fef05ad0e7d6d06defff2e116f2bcf4dfe",
"size": "1677",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "dnollkse/migrations/0004_auto_20170718_2312.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "146675"
},
{
"name": "HTML",
"bytes": "219200"
},
{
"name": "JavaScript",
"bytes": "950971"
},
{
"name": "PHP",
"bytes": "2274"
},
{
"name": "Python",
"bytes": "47780"
}
],
"symlink_target": ""
} |
"""Tests for distutils.command.sdist."""
import os
import tarfile
import unittest
import warnings
import zipfile
from os.path import join
from textwrap import dedent
from test.support import captured_stdout, check_warnings, run_unittest
try:
import zlib
ZLIB_SUPPORT = True
except ImportError:
ZLIB_SUPPORT = False
try:
import grp
import pwd
UID_GID_SUPPORT = True
except ImportError:
UID_GID_SUPPORT = False
from distutils.command.sdist import sdist, show_formats
from distutils.core import Distribution
from distutils.tests.test_config import BasePyPIRCCommandTestCase
from distutils.errors import DistutilsOptionError
from distutils.spawn import find_executable
from distutils.log import WARN
from distutils.filelist import FileList
from distutils.archive_util import ARCHIVE_FORMATS
SETUP_PY = """
from distutils.core import setup
import somecode
setup(name='fake')
"""
MANIFEST = """\
# file GENERATED by distutils, do NOT edit
README
buildout.cfg
inroot.txt
setup.py
data%(sep)sdata.dt
scripts%(sep)sscript.py
some%(sep)sfile.txt
some%(sep)sother_file.txt
somecode%(sep)s__init__.py
somecode%(sep)sdoc.dat
somecode%(sep)sdoc.txt
"""
class SDistTestCase(BasePyPIRCCommandTestCase):
def setUp(self):
# PyPIRCCommandTestCase creates a temp dir already
# and put it in self.tmp_dir
super(SDistTestCase, self).setUp()
# setting up an environment
self.old_path = os.getcwd()
os.mkdir(join(self.tmp_dir, 'somecode'))
os.mkdir(join(self.tmp_dir, 'dist'))
# a package, and a README
self.write_file((self.tmp_dir, 'README'), 'xxx')
self.write_file((self.tmp_dir, 'somecode', '__init__.py'), '#')
self.write_file((self.tmp_dir, 'setup.py'), SETUP_PY)
os.chdir(self.tmp_dir)
def tearDown(self):
# back to normal
os.chdir(self.old_path)
super(SDistTestCase, self).tearDown()
def get_cmd(self, metadata=None):
"""Returns a cmd"""
if metadata is None:
metadata = {'name': 'fake', 'version': '1.0',
'url': 'xxx', 'author': 'xxx',
'author_email': 'xxx'}
dist = Distribution(metadata)
dist.script_name = 'setup.py'
dist.packages = ['somecode']
dist.include_package_data = True
cmd = sdist(dist)
cmd.dist_dir = 'dist'
return dist, cmd
@unittest.skipUnless(ZLIB_SUPPORT, 'Need zlib support to run')
def test_prune_file_list(self):
# this test creates a project with some VCS dirs and an NFS rename
# file, then launches sdist to check they get pruned on all systems
# creating VCS directories with some files in them
os.mkdir(join(self.tmp_dir, 'somecode', '.svn'))
self.write_file((self.tmp_dir, 'somecode', '.svn', 'ok.py'), 'xxx')
os.mkdir(join(self.tmp_dir, 'somecode', '.hg'))
self.write_file((self.tmp_dir, 'somecode', '.hg',
'ok'), 'xxx')
os.mkdir(join(self.tmp_dir, 'somecode', '.git'))
self.write_file((self.tmp_dir, 'somecode', '.git',
'ok'), 'xxx')
self.write_file((self.tmp_dir, 'somecode', '.nfs0001'), 'xxx')
# now building a sdist
dist, cmd = self.get_cmd()
# zip is available universally
# (tar might not be installed under win32)
cmd.formats = ['zip']
cmd.ensure_finalized()
cmd.run()
# now let's check what we have
dist_folder = join(self.tmp_dir, 'dist')
files = os.listdir(dist_folder)
self.assertEqual(files, ['fake-1.0.zip'])
zip_file = zipfile.ZipFile(join(dist_folder, 'fake-1.0.zip'))
try:
content = zip_file.namelist()
finally:
zip_file.close()
# making sure everything has been pruned correctly
self.assertEqual(len(content), 4)
@unittest.skipUnless(ZLIB_SUPPORT, 'Need zlib support to run')
@unittest.skipIf(find_executable('tar') is None,
"The tar command is not found")
@unittest.skipIf(find_executable('gzip') is None,
"The gzip command is not found")
def test_make_distribution(self):
# now building a sdist
dist, cmd = self.get_cmd()
# creating a gztar then a tar
cmd.formats = ['gztar', 'tar']
cmd.ensure_finalized()
cmd.run()
# making sure we have two files
dist_folder = join(self.tmp_dir, 'dist')
result = os.listdir(dist_folder)
result.sort()
self.assertEqual(result, ['fake-1.0.tar', 'fake-1.0.tar.gz'])
os.remove(join(dist_folder, 'fake-1.0.tar'))
os.remove(join(dist_folder, 'fake-1.0.tar.gz'))
# now trying a tar then a gztar
cmd.formats = ['tar', 'gztar']
cmd.ensure_finalized()
cmd.run()
result = os.listdir(dist_folder)
result.sort()
self.assertEqual(result, ['fake-1.0.tar', 'fake-1.0.tar.gz'])
@unittest.skipUnless(ZLIB_SUPPORT, 'Need zlib support to run')
def test_add_defaults(self):
# http://bugs.python.org/issue2279
# add_default should also include
# data_files and package_data
dist, cmd = self.get_cmd()
# filling data_files by pointing files
# in package_data
dist.package_data = {'': ['*.cfg', '*.dat'],
'somecode': ['*.txt']}
self.write_file((self.tmp_dir, 'somecode', 'doc.txt'), '#')
self.write_file((self.tmp_dir, 'somecode', 'doc.dat'), '#')
# adding some data in data_files
data_dir = join(self.tmp_dir, 'data')
os.mkdir(data_dir)
self.write_file((data_dir, 'data.dt'), '#')
some_dir = join(self.tmp_dir, 'some')
os.mkdir(some_dir)
# make sure VCS directories are pruned (#14004)
hg_dir = join(self.tmp_dir, '.hg')
os.mkdir(hg_dir)
self.write_file((hg_dir, 'last-message.txt'), '#')
# a buggy regex used to prevent this from working on windows (#6884)
self.write_file((self.tmp_dir, 'buildout.cfg'), '#')
self.write_file((self.tmp_dir, 'inroot.txt'), '#')
self.write_file((some_dir, 'file.txt'), '#')
self.write_file((some_dir, 'other_file.txt'), '#')
dist.data_files = [('data', ['data/data.dt',
'buildout.cfg',
'inroot.txt',
'notexisting']),
'some/file.txt',
'some/other_file.txt']
# adding a script
script_dir = join(self.tmp_dir, 'scripts')
os.mkdir(script_dir)
self.write_file((script_dir, 'script.py'), '#')
dist.scripts = [join('scripts', 'script.py')]
cmd.formats = ['zip']
cmd.use_defaults = True
cmd.ensure_finalized()
cmd.run()
# now let's check what we have
dist_folder = join(self.tmp_dir, 'dist')
files = os.listdir(dist_folder)
self.assertEqual(files, ['fake-1.0.zip'])
zip_file = zipfile.ZipFile(join(dist_folder, 'fake-1.0.zip'))
try:
content = zip_file.namelist()
finally:
zip_file.close()
# making sure everything was added
self.assertEqual(len(content), 12)
# checking the MANIFEST
f = open(join(self.tmp_dir, 'MANIFEST'))
try:
manifest = f.read()
finally:
f.close()
self.assertEqual(manifest, MANIFEST % {'sep': os.sep})
@unittest.skipUnless(ZLIB_SUPPORT, 'Need zlib support to run')
def test_metadata_check_option(self):
# testing the `medata-check` option
dist, cmd = self.get_cmd(metadata={})
# this should raise some warnings !
# with the `check` subcommand
cmd.ensure_finalized()
cmd.run()
warnings = [msg for msg in self.get_logs(WARN) if
msg.startswith('warning: check:')]
self.assertEqual(len(warnings), 2)
# trying with a complete set of metadata
self.clear_logs()
dist, cmd = self.get_cmd()
cmd.ensure_finalized()
cmd.metadata_check = 0
cmd.run()
warnings = [msg for msg in self.get_logs(WARN) if
msg.startswith('warning: check:')]
self.assertEqual(len(warnings), 0)
def test_check_metadata_deprecated(self):
# makes sure make_metadata is deprecated
dist, cmd = self.get_cmd()
with check_warnings() as w:
warnings.simplefilter("always")
cmd.check_metadata()
self.assertEqual(len(w.warnings), 1)
def test_show_formats(self):
with captured_stdout() as stdout:
show_formats()
# the output should be a header line + one line per format
num_formats = len(ARCHIVE_FORMATS.keys())
output = [line for line in stdout.getvalue().split('\n')
if line.strip().startswith('--formats=')]
self.assertEqual(len(output), num_formats)
def test_finalize_options(self):
dist, cmd = self.get_cmd()
cmd.finalize_options()
# default options set by finalize
self.assertEqual(cmd.manifest, 'MANIFEST')
self.assertEqual(cmd.template, 'MANIFEST.in')
self.assertEqual(cmd.dist_dir, 'dist')
# formats has to be a string splitable on (' ', ',') or
# a stringlist
cmd.formats = 1
self.assertRaises(DistutilsOptionError, cmd.finalize_options)
cmd.formats = ['zip']
cmd.finalize_options()
# formats has to be known
cmd.formats = 'supazipa'
self.assertRaises(DistutilsOptionError, cmd.finalize_options)
# the following tests make sure there is a nice error message instead
# of a traceback when parsing an invalid manifest template
def _check_template(self, content):
dist, cmd = self.get_cmd()
os.chdir(self.tmp_dir)
self.write_file('MANIFEST.in', content)
cmd.ensure_finalized()
cmd.filelist = FileList()
cmd.read_template()
warnings = self.get_logs(WARN)
self.assertEqual(len(warnings), 1)
def test_invalid_template_unknown_command(self):
self._check_template('taunt knights *')
def test_invalid_template_wrong_arguments(self):
# this manifest command takes one argument
self._check_template('prune')
@unittest.skipIf(os.name != 'nt', 'test relevant for Windows only')
def test_invalid_template_wrong_path(self):
# on Windows, trailing slashes are not allowed
# this used to crash instead of raising a warning: #8286
self._check_template('include examples/')
@unittest.skipUnless(ZLIB_SUPPORT, 'Need zlib support to run')
def test_get_file_list(self):
# make sure MANIFEST is recalculated
dist, cmd = self.get_cmd()
# filling data_files by pointing files in package_data
dist.package_data = {'somecode': ['*.txt']}
self.write_file((self.tmp_dir, 'somecode', 'doc.txt'), '#')
cmd.formats = ['gztar']
cmd.ensure_finalized()
cmd.run()
f = open(cmd.manifest)
try:
manifest = [line.strip() for line in f.read().split('\n')
if line.strip() != '']
finally:
f.close()
self.assertEqual(len(manifest), 5)
# adding a file
self.write_file((self.tmp_dir, 'somecode', 'doc2.txt'), '#')
# make sure build_py is reinitialized, like a fresh run
build_py = dist.get_command_obj('build_py')
build_py.finalized = False
build_py.ensure_finalized()
cmd.run()
f = open(cmd.manifest)
try:
manifest2 = [line.strip() for line in f.read().split('\n')
if line.strip() != '']
finally:
f.close()
# do we have the new file in MANIFEST ?
self.assertEqual(len(manifest2), 6)
self.assertIn('doc2.txt', manifest2[-1])
@unittest.skipUnless(ZLIB_SUPPORT, 'Need zlib support to run')
def test_manifest_marker(self):
# check that autogenerated MANIFESTs have a marker
dist, cmd = self.get_cmd()
cmd.ensure_finalized()
cmd.run()
f = open(cmd.manifest)
try:
manifest = [line.strip() for line in f.read().split('\n')
if line.strip() != '']
finally:
f.close()
self.assertEqual(manifest[0],
'# file GENERATED by distutils, do NOT edit')
@unittest.skipUnless(ZLIB_SUPPORT, "Need zlib support to run")
def test_manifest_comments(self):
# make sure comments don't cause exceptions or wrong includes
contents = dedent("""\
# bad.py
#bad.py
good.py
""")
dist, cmd = self.get_cmd()
cmd.ensure_finalized()
self.write_file((self.tmp_dir, cmd.manifest), contents)
self.write_file((self.tmp_dir, 'good.py'), '# pick me!')
self.write_file((self.tmp_dir, 'bad.py'), "# don't pick me!")
self.write_file((self.tmp_dir, '#bad.py'), "# don't pick me!")
cmd.run()
self.assertEqual(cmd.filelist.files, ['good.py'])
@unittest.skipUnless(ZLIB_SUPPORT, 'Need zlib support to run')
def test_manual_manifest(self):
# check that a MANIFEST without a marker is left alone
dist, cmd = self.get_cmd()
cmd.formats = ['gztar']
cmd.ensure_finalized()
self.write_file((self.tmp_dir, cmd.manifest), 'README.manual')
self.write_file((self.tmp_dir, 'README.manual'),
'This project maintains its MANIFEST file itself.')
cmd.run()
self.assertEqual(cmd.filelist.files, ['README.manual'])
f = open(cmd.manifest)
try:
manifest = [line.strip() for line in f.read().split('\n')
if line.strip() != '']
finally:
f.close()
self.assertEqual(manifest, ['README.manual'])
archive_name = join(self.tmp_dir, 'dist', 'fake-1.0.tar.gz')
archive = tarfile.open(archive_name)
try:
filenames = [tarinfo.name for tarinfo in archive]
finally:
archive.close()
self.assertEqual(sorted(filenames), ['fake-1.0', 'fake-1.0/PKG-INFO',
'fake-1.0/README.manual'])
@unittest.skipUnless(ZLIB_SUPPORT, "requires zlib")
@unittest.skipUnless(UID_GID_SUPPORT, "Requires grp and pwd support")
@unittest.skipIf(find_executable('tar') is None,
"The tar command is not found")
@unittest.skipIf(find_executable('gzip') is None,
"The gzip command is not found")
def test_make_distribution_owner_group(self):
# now building a sdist
dist, cmd = self.get_cmd()
# creating a gztar and specifying the owner+group
cmd.formats = ['gztar']
cmd.owner = pwd.getpwuid(0)[0]
cmd.group = grp.getgrgid(0)[0]
cmd.ensure_finalized()
cmd.run()
# making sure we have the good rights
archive_name = join(self.tmp_dir, 'dist', 'fake-1.0.tar.gz')
archive = tarfile.open(archive_name)
try:
for member in archive.getmembers():
self.assertEqual(member.uid, 0)
self.assertEqual(member.gid, 0)
finally:
archive.close()
# building a sdist again
dist, cmd = self.get_cmd()
# creating a gztar
cmd.formats = ['gztar']
cmd.ensure_finalized()
cmd.run()
# making sure we have the good rights
archive_name = join(self.tmp_dir, 'dist', 'fake-1.0.tar.gz')
archive = tarfile.open(archive_name)
# note that we are not testing the group ownership here
# because, depending on the platforms and the container
# rights (see #7408)
try:
for member in archive.getmembers():
self.assertEqual(member.uid, os.getuid())
finally:
archive.close()
def test_suite():
return unittest.makeSuite(SDistTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
| {
"content_hash": "3f2bfefe7c9829961156ec7148fff5d2",
"timestamp": "",
"source": "github",
"line_count": 484,
"max_line_length": 77,
"avg_line_length": 34.06404958677686,
"alnum_prop": 0.5763328683204949,
"repo_name": "mottosso/mindbender-setup",
"id": "5444b815a8b2349883ec5ade2583b63f2067fa5a",
"size": "16487",
"binary": false,
"copies": "13",
"ref": "refs/heads/master",
"path": "bin/windows/python36/Lib/distutils/tests/test_sdist.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3519"
},
{
"name": "CSS",
"bytes": "96"
},
{
"name": "JavaScript",
"bytes": "13629"
},
{
"name": "PowerShell",
"bytes": "1447"
},
{
"name": "Python",
"bytes": "14346555"
},
{
"name": "QML",
"bytes": "2133450"
},
{
"name": "Shell",
"bytes": "4495"
}
],
"symlink_target": ""
} |
import riprova
# Store number of function calls for error simulation
calls = 0
# Max number of retries attempts
retries = 5
# Register retriable operation with custom evaluator
@riprova.retry(backoff=riprova.FibonacciBackoff(retries=retries))
def mul2(x):
global calls
if calls < 4:
calls += 1
raise RuntimeError('simulated call error')
return x * 2
# Run task
result = mul2(2)
print('Result: {}'.format(result))
| {
"content_hash": "3a82f6c3cd691888c719bddeefcb6ad0",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 65,
"avg_line_length": 18.708333333333332,
"alnum_prop": 0.7015590200445434,
"repo_name": "h2non/riprova",
"id": "85a9bcae0ef702f7ca27a073904fe8d534196a58",
"size": "473",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/fibonacci_backoff.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1435"
},
{
"name": "Python",
"bytes": "77087"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import os
import tempfile
# Find the best implementation available
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
import mock
from nose.tools import assert_raises
import numpy as np
import PIL.Image
from . import errors
from . import image as image_utils
import digits
from digits import test_utils
test_utils.skipIfNotFramework('none')
class TestLoadImage():
def test_bad_path(self):
for path in [
'some string',
'/tmp/not-a-file',
'http://not-a-url',
]:
yield self.check_none, path
def check_none(self, path):
assert_raises(
errors.LoadImageError,
image_utils.load_image,
path,
)
def test_good_file(self):
for args in [
# created mode, file extension, pixel value, loaded mode (expected)
# Grayscale
('1', 'png', 1, 'L'),
('1', 'ppm', 1, 'L'),
('L', 'png', 127, 'L'),
('L', 'jpg', 127, 'L'),
('L', 'ppm', 127, 'L'),
('LA', 'png', (127, 255), 'L'),
# Color
('RGB', 'png', (127, 127, 127), 'RGB'),
('RGB', 'jpg', (127, 127, 127), 'RGB'),
('RGB', 'ppm', (127, 127, 127), 'RGB'),
('RGBA', 'png', (127, 127, 127, 255), 'RGB'),
('P', 'png', 127, 'RGB'),
('CMYK', 'jpg', (127, 127, 127, 127), 'RGB'),
('YCbCr', 'jpg', (127, 127, 127), 'RGB'),
]:
yield self.check_good_file, args
def check_good_file(self, args):
orig_mode, suffix, pixel, new_mode = args
orig = PIL.Image.new(orig_mode, (10, 10), pixel)
# temp files cause permission errors so just generate the name
tmp = tempfile.mkstemp(suffix='.' + suffix)
orig.save(tmp[1])
new = image_utils.load_image(tmp[1])
try:
# sometimes on windows the file is not closed yet
# which can cause an exception
os.close(tmp[0])
os.remove(tmp[1])
except:
pass
assert new is not None, 'load_image should never return None'
assert new.mode == new_mode, 'Image mode should be "%s", not "%s\nargs - %s' % (new_mode, new.mode, args)
@mock.patch('digits.utils.image.requests')
def test_good_url(self, mock_requests):
# requests
response = mock.Mock()
response.status_code = mock_requests.codes.ok
img_file = os.path.join(
os.path.dirname(digits.__file__),
'static',
'images',
'mona_lisa.jpg',
)
with open(img_file, 'rb') as infile:
response.content = infile.read()
mock_requests.get.return_value = response
img = image_utils.load_image('http://some-url')
assert img is not None
def test_corrupted_file(self):
image = PIL.Image.fromarray(np.zeros((10, 10, 3), dtype=np.uint8))
# Save image to a JPEG buffer.
buffer_io = StringIO()
image.save(buffer_io, format='jpeg')
encoded = buffer_io.getvalue()
buffer_io.close()
# Corrupt the second half of the image buffer.
size = len(encoded)
corrupted = encoded[:size / 2] + encoded[size / 2:][::-1]
# Save the corrupted image to a temporary file.
fname = tempfile.mkstemp(suffix='.bin')
f = os.fdopen(fname[0], 'wb')
fname = fname[1]
f.write(corrupted)
f.close()
assert_raises(
errors.LoadImageError,
image_utils.load_image,
fname,
)
os.remove(fname)
class TestResizeImage():
@classmethod
def setup_class(cls):
cls.np_gray = np.random.randint(0, 255, (10, 10)).astype('uint8')
cls.pil_gray = PIL.Image.fromarray(cls.np_gray)
cls.np_color = np.random.randint(0, 255, (10, 10, 3)).astype('uint8')
cls.pil_color = PIL.Image.fromarray(cls.np_color)
def test_configs(self):
# lots of configs tested here
for h in [10, 15]:
for w in [10, 16]:
for t in ['gray', 'color']:
# test channels=None (should autodetect channels)
if t == 'color':
s = (h, w, 3)
else:
s = (h, w)
yield self.verify_pil, (h, w, None, None, t, s)
yield self.verify_np, (h, w, None, None, t, s)
# test channels={3,1}
for c in [3, 1]:
for m in ['squash', 'crop', 'fill', 'half_crop']:
if c == 3:
s = (h, w, 3)
else:
s = (h, w)
yield self.verify_pil, (h, w, c, m, t, s)
yield self.verify_np, (h, w, c, m, t, s)
def verify_pil(self, args):
# pass a PIL.Image to resize_image and check the returned dimensions
h, w, c, m, t, s = args
if t == 'gray':
i = self.pil_gray
else:
i = self.pil_color
r = image_utils.resize_image(i, h, w, c, m)
assert r.shape == s, 'Resized PIL.Image (orig=%s) should have been %s, but was %s %s' % (
i.size, s, r.shape, self.args_to_str(args))
assert r.dtype == np.uint8, 'image.dtype should be uint8, not %s' % r.dtype
def verify_np(self, args):
# pass a numpy.ndarray to resize_image and check the returned dimensions
h, w, c, m, t, s = args
if t == 'gray':
i = self.np_gray
else:
i = self.np_color
r = image_utils.resize_image(i, h, w, c, m)
assert r.shape == s, 'Resized np.ndarray (orig=%s) should have been %s, but was %s %s' % (
i.shape, s, r.shape, self.args_to_str(args))
assert r.dtype == np.uint8, 'image.dtype should be uint8, not %s' % r.dtype
def args_to_str(self, args):
return """
height=%s
width=%s
channels=%s
resize_mode=%s
image_type=%s
shape=%s""" % args
| {
"content_hash": "6ef576febb8ddecb53bc2ef27f27b044",
"timestamp": "",
"source": "github",
"line_count": 195,
"max_line_length": 113,
"avg_line_length": 33.37948717948718,
"alnum_prop": 0.48486710708250114,
"repo_name": "gheinrich/DIGITS-GAN",
"id": "89a76510e846de5658bb3e4cf0067e2d3fd1770c",
"size": "6578",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "digits/utils/test_image.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "4338"
},
{
"name": "HTML",
"bytes": "2634846"
},
{
"name": "JavaScript",
"bytes": "53896"
},
{
"name": "Lua",
"bytes": "110599"
},
{
"name": "Makefile",
"bytes": "113"
},
{
"name": "Protocol Buffer",
"bytes": "1749"
},
{
"name": "Python",
"bytes": "1237457"
},
{
"name": "Shell",
"bytes": "12480"
}
],
"symlink_target": ""
} |
"""Test cases for the API stream sensor."""
from homeassistant.bootstrap import async_setup_component
from .test_auth import test_auth_active_with_token
from tests.common import assert_setup_component
async def test_websocket_api(
hass, no_auth_websocket_client, hass_access_token, legacy_auth
):
"""Test API streams."""
with assert_setup_component(1):
await async_setup_component(
hass, "sensor", {"sensor": {"platform": "websocket_api"}}
)
state = hass.states.get("sensor.connected_clients")
assert state.state == "0"
await test_auth_active_with_token(hass, no_auth_websocket_client, hass_access_token)
state = hass.states.get("sensor.connected_clients")
assert state.state == "1"
await no_auth_websocket_client.close()
await hass.async_block_till_done()
state = hass.states.get("sensor.connected_clients")
assert state.state == "0"
| {
"content_hash": "b65dcc77737cf5c289dc19ac9ee43628",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 88,
"avg_line_length": 29.70967741935484,
"alnum_prop": 0.6872964169381107,
"repo_name": "Teagan42/home-assistant",
"id": "2c7117378516c25a229166f96d9f734b5ef31316",
"size": "921",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "tests/components/websocket_api/test_sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "19774313"
},
{
"name": "Shell",
"bytes": "6846"
}
],
"symlink_target": ""
} |
import unittest
from unittest.mock import patch
from airflow.providers.celery.sensors.celery_queue import CeleryQueueSensor
class TestCeleryQueueSensor(unittest.TestCase):
def setUp(self):
class TestCeleryqueueSensor(CeleryQueueSensor):
def _check_task_id(self, context):
return True
self.sensor = TestCeleryqueueSensor
@patch('celery.app.control.Inspect')
def test_poke_success(self, mock_inspect):
mock_inspect_result = mock_inspect.return_value
# test success
mock_inspect_result.reserved.return_value = {'test_queue': []}
mock_inspect_result.scheduled.return_value = {'test_queue': []}
mock_inspect_result.active.return_value = {'test_queue': []}
test_sensor = self.sensor(celery_queue='test_queue', task_id='test-task')
assert test_sensor.poke(None)
@patch('celery.app.control.Inspect')
def test_poke_fail(self, mock_inspect):
mock_inspect_result = mock_inspect.return_value
# test success
mock_inspect_result.reserved.return_value = {'test_queue': []}
mock_inspect_result.scheduled.return_value = {'test_queue': []}
mock_inspect_result.active.return_value = {'test_queue': ['task']}
test_sensor = self.sensor(celery_queue='test_queue', task_id='test-task')
assert not test_sensor.poke(None)
@patch('celery.app.control.Inspect')
def test_poke_success_with_taskid(self, mock_inspect):
test_sensor = self.sensor(
celery_queue='test_queue', task_id='test-task', target_task_id='target-task'
)
assert test_sensor.poke(None)
| {
"content_hash": "b3b84de4d6dec38f8ec30e955528aa6c",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 88,
"avg_line_length": 37.63636363636363,
"alnum_prop": 0.6594202898550725,
"repo_name": "danielvdende/incubator-airflow",
"id": "4b3820802fa180adde1938986b04906ec0ef6064",
"size": "2444",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "tests/providers/celery/sensors/test_celery_queue.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25785"
},
{
"name": "Dockerfile",
"bytes": "76693"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "164512"
},
{
"name": "JavaScript",
"bytes": "236992"
},
{
"name": "Jinja",
"bytes": "37155"
},
{
"name": "Jupyter Notebook",
"bytes": "2929"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "21824455"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "495567"
},
{
"name": "TypeScript",
"bytes": "326556"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from .models import Table, Chair
from simple_history.admin import SimpleHistoryAdmin
class TableAdmin(admin.ModelAdmin):
list_display=(
"id",
"created",
"owner",
"name",
"weight"
)
#admin.site.register(Table,TableAdmin)
admin.site.register(Table, SimpleHistoryAdmin)
class ChairAdmin(admin.ModelAdmin):
list_display=(
"name",
)
admin.site.register(Chair,ChairAdmin)
| {
"content_hash": "b00e9096394e38086761fbbe1094ae13",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 51,
"avg_line_length": 19.40909090909091,
"alnum_prop": 0.7353629976580797,
"repo_name": "wasit7/tutorials",
"id": "ebf6e0cf5ecc7cdc93d3ce4c6355c08cabe174f5",
"size": "427",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/django_generic_view/rest/myapp/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "161779"
},
{
"name": "Batchfile",
"bytes": "1953"
},
{
"name": "C",
"bytes": "580699"
},
{
"name": "C++",
"bytes": "500977"
},
{
"name": "CMake",
"bytes": "14548"
},
{
"name": "CSS",
"bytes": "12348"
},
{
"name": "Cuda",
"bytes": "16475"
},
{
"name": "Elixir",
"bytes": "391"
},
{
"name": "HTML",
"bytes": "81272"
},
{
"name": "JavaScript",
"bytes": "389"
},
{
"name": "Jupyter Notebook",
"bytes": "1175781"
},
{
"name": "Makefile",
"bytes": "8294"
},
{
"name": "PowerShell",
"bytes": "8175"
},
{
"name": "Processing",
"bytes": "10267"
},
{
"name": "Python",
"bytes": "193149"
},
{
"name": "Shell",
"bytes": "559"
},
{
"name": "XSLT",
"bytes": "2042"
}
],
"symlink_target": ""
} |
"""resource.py contains the base resource classes that user-created
resources depend on in steno3d
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from json import dumps
from pprint import pformat
from six import string_types
import properties
from .client import Comms, needs_login, pause, plot
from .props import HasSteno3DProps
class classproperty(property):
"""class decorator to enable property behavior in classmethods"""
def __get__(self, cls, owner):
return self.fget.__get__(None, owner)()
class UserContent(HasSteno3DProps):
"""Base class for everything user creates and uploads in steno3d"""
title = properties.String(
doc='Title of the model.',
default='',
required=False
)
description = properties.String(
doc='Description of the model.',
default='',
required=False
)
_sync = False
_upload_data = None
_upload_size = 0
_upload_count = 0
_upload_total_size = 0
_upload_total_count = 0
@classproperty
@classmethod
def _resource_class(cls):
"""name of the class of resource"""
if getattr(cls, '__resource_class', None) is None:
cls.__resource_class = cls.__name__.lower()
return cls.__resource_class
@classproperty
@classmethod
def _model_api_location(cls):
"""api destination for resource"""
if getattr(cls, '__model_api_location', None) is None:
cls.__model_api_location = 'resource/{className}'.format(
className=cls._resource_class
)
return cls.__model_api_location
def _upload(self, **kwargs):
if getattr(self, '_uploading', False):
return
try:
verbose = kwargs.get('verbose', True)
sync = kwargs.get('sync', False)
self._uploading = True
pause()
assert self.validate()
self._upload_dirty(**kwargs)
if getattr(self, '_upload_data', None) is None:
self._post(
self._get_dirty_data(force=True),
self._get_dirty_files(force=True)
)
else:
dirty_data = self._get_dirty_data()
dirty_files = self._get_dirty_files()
if len(dirty_data) > 0 or len(dirty_files) > 0:
self._put(dirty_data, dirty_files)
self._mark_clean(recurse=False)
self._sync = sync
progress_callback = kwargs.get('progress_callback', None)
if verbose and progress_callback is None:
progress_callback = self._progress_report
if progress_callback is not None:
if (
isinstance(self, BaseResource) and
not isinstance(self, CompositeResource)
):
UserContent._upload_size += self._nbytes()
else:
UserContent._upload_count += 1
progress = 0.9 * (
UserContent._upload_size /
UserContent._upload_total_size
) + 0.1 * (
UserContent._upload_count /
UserContent._upload_total_count
)
message = 'Uploading: {cls} {title}'.format(
cls=self._resource_class,
title=self.title
)
progress_callback({'progress': progress, 'message': message})
except Exception as err:
if self._sync and verbose:
print('Upload failed, turning off syncing. To restart '
'syncing, upload() again.')
self._sync = False
else:
raise err
finally:
self._uploading = False
@staticmethod
def _progress_report(status):
print('\rTotal progress: {:>3}% - {}'.format(
int(round(status['progress']*100)), status['message']
), end='')
def _get_dirty_data(self, force=False):
dirty = self._dirty_props
datadict = dict()
if 'title' in dirty or force:
datadict['title'] = self.title
if 'description' in dirty or force:
datadict['description'] = self.description
return datadict
def _get_dirty_files(self, force=False):
return {}
def _upload_dirty(self, **kwargs):
pass
@properties.observer(properties.everything)
def _on_property_change(self, change):
if getattr(self, '_sync', False):
self._upload(sync=self._sync)
def _post(self, datadict=None, files=None):
self._client_upload(Comms.post, 'api/' + self._model_api_location,
datadict, files)
def _put(self, datadict=None, files=None):
pause()
api_uid = 'api/{mapi}/{uid}'.format(mapi=self._model_api_location,
uid=self._upload_data['uid'])
self._client_upload(Comms.put, api_uid, datadict, files)
def _client_upload(self, request_fcn, url,
datadict=None, files=None):
req = request_fcn(
url,
data=datadict if datadict else tuple(),
files=files if files else tuple(),
)
if isinstance(req, list):
for rq in req:
if rq['status_code'] != 200:
try:
resp = pformat(rq['json'])
except ValueError:
resp = rq
raise UploadError(
'Upload failed: {location}'.format(
location=url,
) +
'\ndata: {datadict}\nfiles: {filedict}'.format(
datadict=pformat(datadict),
filedict=pformat(files),
) +
'\nresponse: {response}'.format(
response=resp,
)
)
self._upload_data = [rq['json'] for rq in req]
else:
if req['status_code'] != 200:
raise UploadError(
'Upload failed: {location}'.format(
location=url,
) +
'\ndata: {datadict}\nfiles: {filedict}'.format(
datadict=pformat(datadict),
filedict=pformat(files),
) +
'\nresponse: {response}'.format(
response=req['json'],
)
)
self._upload_data = req['json']
@property
def _json(self):
"""Return a JSON representation of the object"""
json = getattr(self, '_upload_data', None)
if json is None:
raise ValueError('JSON not available: Data not uploaded')
return json
@classmethod
def _json_from_uid(cls, uid, using=None):
if not isinstance(uid, string_types) or len(uid) != 20:
raise ValueError('{}: invalid uid'.format(uid))
resp = Comms.get('api/{mapi}/{uid}{using}'.format(
mapi=cls._model_api_location,
uid=uid,
using='?using={}'.format(using) if using else '',
))
if resp['status_code'] != 200:
raise ValueError('{uid}: {cls} query failed'.format(
uid=uid,
cls=cls._resource_class
))
return resp['json']
@classmethod
def _build(cls, src, copy=True, tab_level='', **kwargs):
verbose = kwargs.get('verbose', True)
if isinstance(src, properties.HasProperties):
raise NotImplementedError('Copying instances not supported')
if verbose:
print('{tl}Downloading {cls}'.format(
tl=tab_level,
cls=cls._resource_class
), end=': ')
if isinstance(src, string_types):
json = cls._json_from_uid(src, using=kwargs.get('using', None))
else:
json = src
title = '' if json['title'] is None else json['title']
desc = '' if json['description'] is None else json['description']
if verbose:
print(title)
res = cls._build_from_json(json, copy=copy, tab_level=tab_level,
title=title, description=desc, **kwargs)
if not copy:
res._upload_data = json
if verbose:
print('{}...Complete!'.format(tab_level))
return res
@classmethod
def _build_from_json(cls, json, copy=True, tab_level='', **kwargs):
raise NotImplementedError('Cannot build raw UserContent from json')
class BaseResource(UserContent):
"""Base class for all resources that are added to projects and
uploaded to steno3d
"""
def _get_dirty_data(self, force=False):
datadict = super(BaseResource, self)._get_dirty_data(force)
dirty = self._dirty
if 'opts' in dirty or (force and hasattr(self, 'opts')):
datadict['meta'] = self.opts._json
return datadict
def _validate_file_size(self, name, arr):
if Comms.user.logged_in:
file_limit = Comms.user.file_size_limit
if self._nbytes(arr) > file_limit:
raise FileSizeLimitExceeded(
'{name} file size ({file} bytes) exceeds limit: '
'{lim} bytes'.format(name=name,
file=self._nbytes(arr),
lim=file_limit)
)
return True
class CompositeResource(BaseResource):
"""A composite resource that stores references to lower-level objects."""
project = properties.List(
doc='Project containing the resource',
prop=UserContent,
coerce=True,
default=list,
)
def __init__(self, project=None, **kwargs):
if project is None:
raise TypeError('Resource must be constructed with its '
'containing project(s)')
super(CompositeResource, self).__init__(**kwargs)
self.project = project
@classmethod
def _url_view_from_uid(cls, uid):
"""Get full url from a uid"""
url = '{base}{mapi}/{uid}'.format(
base=Comms.base_url,
mapi=cls._model_api_location,
uid=uid)
return url
@properties.validator
def _validate_proj(self):
for proj in self.project:
if self not in proj.resources:
raise ValueError('Project/resource pointers misaligned: '
'Ensure that projects contain all the '
'resources that point to them.')
return True
@needs_login
def upload(self, sync=False, verbose=True, print_url=True):
"""Upload the resource through its containing project(s)"""
for proj in self.project:
proj.upload(sync=sync, verbose=verbose, print_url=False)
if verbose and print_url:
print(self._url)
return self._url
def _get_dirty_data(self, force=False):
datadict = super(CompositeResource, self)._get_dirty_data(force)
dirty = self._dirty_props
if 'mesh' in dirty or force:
datadict['mesh'] = dumps({
'uid': self.mesh._json['longUid']
})
if 'data' in dirty or force:
datadict['data'] = dumps([
{
'location': d.location,
'uid': d.data._json['longUid']
} for d in self.data
])
if 'textures' in dirty or (force and hasattr(self, 'textures')):
datadict['textures'] = dumps([
{
'uid': t._json['longUid']
} for t in self.textures
])
return datadict
def _upload_dirty(self, **kwargs):
dirty = self._dirty
if 'mesh' in dirty:
self.mesh._upload(**kwargs)
if 'data' in dirty:
[d.data._upload(**kwargs) for d in self.data]
if 'textures' in dirty:
[t._upload(**kwargs) for t in self.textures]
@properties.observer('project')
def _fix_proj_res(self, change):
before = change['previous']
after = change['value']
if before in (None, properties.undefined):
before = []
if after in (None, properties.undefined):
after = []
for proj in after:
if proj not in before and self not in proj.resources:
proj.resources += [self]
for proj in before:
if proj not in after and self in proj.resources:
proj.resources = [r for r in proj.resources
if r is not self]
if len(set(after)) != len(after):
post_post = []
for p in after:
if p not in post_post:
post_post += [p]
self.project = post_post
@property
def _url(self):
if getattr(self, '_upload_data', None) is not None:
return self._url_view_from_uid(self._upload_data['uid'])
@property
@needs_login
def url(self):
"""steno3d.com url of project if uploaded"""
if getattr(self, '_upload_data', None) is None:
print('Resource not uploaded: Please upload() '
'before accessing the URL.')
return self._url
@needs_login
def plot(self):
"""Display the 3D representation of the content"""
if getattr(self.project, '_upload_data', None) is None:
print('Resource not uploaded: Please upload() '
'before plotting.')
return
return self.project.plot()
@classmethod
def _build_from_json(cls, json, copy=True, tab_level='', **kwargs):
if 'project' not in kwargs:
raise KeyError('Building CompositeResource from json requires '
'project input.')
res = cls(
project=kwargs['project'],
title=kwargs['title'],
description=kwargs['description'],
opts=json['meta']
)
(mesh_string, mesh_uid) = (
json['mesh']['uid'].split('Resource')[-1].split(':')
)
mesh_class = UserContent._REGISTRY[mesh_string]
res.mesh = mesh_class._build(mesh_uid, copy, tab_level + ' ',
using=kwargs.get('using', None))
if 'textures' in json:
res.textures = []
for t in json['textures']:
(tex_string, tex_uid) = (
t['uid'].split('Resource')[-1].split(':')
)
tex_class = UserContent._REGISTRY[tex_string]
res.textures += [tex_class._build(
tex_uid, copy, tab_level + ' ',
using=kwargs.get('using', None),
)]
if 'data' in json:
res.data = []
for d in json['data']:
(data_string, data_uid) = (
d['uid'].split('Resource')[-1].split(':')
)
data_class = UserContent._REGISTRY[data_string]
res.data += [dict(
location=d['location'],
data=data_class._build(
data_uid, copy, tab_level + ' ',
using=kwargs.get('using', None),
)
)]
return res
@classmethod
def _build_from_omf(cls, omf_element, omf_project, project, verbose=False):
mesh_map = {
'PointSetGeometry': 'Mesh0D',
'LineSetGeometry': 'Mesh1D',
'SurfaceGeometry': 'Mesh2D',
'SurfaceGridGeometry': 'Mesh2DGrid',
'VolumeGridGeometry': 'Mesh3DGrid'
}
mesh_class = UserContent._REGISTRY[mesh_map[
omf_element.geometry.__class__.__name__
]]
res = cls(
project=project,
title=omf_element.name,
description=omf_element.description,
mesh=mesh_class._build_from_omf(omf_element.geometry, omf_project),
opts={'color': omf_element.color}
)
if hasattr(omf_element, 'textures'):
res.textures = []
for tex in omf_element.textures:
res.textures += [
UserContent._REGISTRY['Texture2DImage']._build_from_omf(
tex, omf_project
)
]
if hasattr(omf_element, 'data'):
data_map = {
'ScalarData': 'DataArray',
'MappedData': 'DataCategory',
}
res.data = []
for dat in omf_element.data:
if dat.__class__.__name__ not in data_map:
if verbose:
print('Data of class {} ignored'.format(
dat.__class__.__name__
))
continue
data_class = UserContent._REGISTRY[data_map[
dat.__class__.__name__
]]
res.data += [data_class._build_from_omf(dat)]
return res
class BaseMesh(BaseResource):
"""Base class for all mesh resources. These are contained within
each composite resources and define its structure
"""
@properties.validator
def _validate_mesh(self):
if Comms.user.logged_in:
file_limit = Comms.user.file_size_limit
if self._nbytes() > file_limit:
raise FileSizeLimitExceeded(
'{name} size ({file} bytes) exceeds limit: '
'{lim} bytes'.format(name=self.__class__.__name__,
file=self._nbytes(),
lim=file_limit)
)
return True
class BaseData(BaseResource):
"""Base class for all data resources. These can be contained within
each composite resource and define data corresponding to the mesh
"""
@classproperty
@classmethod
def _model_api_location(cls):
"""api destination for texture resource"""
if getattr(cls, '__model_api_location', None) is None:
cls.__model_api_location = 'resource/data/{class_name}'.format(
class_name=cls._resource_class)
return cls.__model_api_location
class BaseTexture2D(BaseResource):
"""Base class for all texture resources. These can be contained
within some composite resources and define data in space that gets
mapped to the mesh.
"""
@classproperty
@classmethod
def _model_api_location(cls):
"""api destination for texture resource"""
if getattr(cls, '__model_api_location', None) is None:
cls.__model_api_location = 'resource/texture2d/{cls_name}'.format(
cls_name=cls._resource_class)
return cls.__model_api_location
class ResourceSizeError(Exception):
"""Exception for exceeding size limits"""
class FileSizeLimitExceeded(ResourceSizeError):
"""Exception when a file to upload exceeds limits"""
class ProjectResourceLimitExceeded(ResourceSizeError):
"""Exception when number of resources in a project exceeds limits"""
class ProjectSizeLimitExceeded(ResourceSizeError):
"""Exception when total size of project exceeds limits"""
class ProjectQuotaExceeded(Exception):
"""Exception when an upload past the project quota is attempted"""
class UploadError(Exception):
"""Exception when upload fails"""
| {
"content_hash": "39e44333e7bd20a1afa88797f918d648",
"timestamp": "",
"source": "github",
"line_count": 566,
"max_line_length": 79,
"avg_line_length": 35.3922261484099,
"alnum_prop": 0.520367412140575,
"repo_name": "3ptscience/steno3dpy",
"id": "1f7cf29dde2a96a5537ce05fdf272e79303e0bd6",
"size": "20032",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "steno3d/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "515"
},
{
"name": "Python",
"bytes": "166277"
},
{
"name": "Shell",
"bytes": "2439"
}
],
"symlink_target": ""
} |
import datetime
import json
import os
import time
from uuid import UUID, uuid4
from django import forms as django_forms, http
from django.conf import settings
from django.core.exceptions import PermissionDenied
from django.core.files.storage import default_storage as storage
from django.db import transaction
from django.db.models import Count
from django.shortcuts import get_object_or_404, redirect
from django.template import loader
from django.utils.translation import ugettext, ugettext_lazy as _
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_exempt
import waffle
from django_statsd.clients import statsd
from PIL import Image
import olympia.core.logger
from olympia import amo
from olympia.access import acl
from olympia.accounts.utils import redirect_for_login
from olympia.accounts.views import API_TOKEN_COOKIE
from olympia.activity.models import ActivityLog, VersionLog
from olympia.activity.utils import log_and_notify
from olympia.addons import forms as addon_forms
from olympia.addons.models import Addon, AddonReviewerFlags, AddonUser
from olympia.addons.views import BaseFilter
from olympia.amo import messages, utils as amo_utils
from olympia.amo.decorators import json_view, login_required, post_required
from olympia.amo.templatetags.jinja_helpers import absolutify, urlparams
from olympia.amo.urlresolvers import reverse
from olympia.amo.utils import MenuItem, escape_all, render, send_mail
from olympia.api.models import APIKey
from olympia.applications.models import AppVersion
from olympia.devhub.decorators import dev_required, no_admin_disabled
from olympia.devhub.forms import AgreementForm, CheckCompatibilityForm
from olympia.devhub.models import BlogPost, RssKey
from olympia.devhub.utils import add_dynamic_theme_tag, process_validation
from olympia.files.models import File, FileUpload, FileValidation
from olympia.files.utils import parse_addon
from olympia.lib.crypto.packaged import sign_file
from olympia.reviewers.forms import PublicWhiteboardForm
from olympia.reviewers.models import Whiteboard
from olympia.reviewers.templatetags.jinja_helpers import get_position
from olympia.reviewers.utils import ReviewHelper
from olympia.search.views import BaseAjaxSearch
from olympia.users.models import UserProfile
from olympia.versions import compare
from olympia.versions.models import Version
from olympia.zadmin.models import ValidationResult, get_config
from . import feeds, forms, signals, tasks
log = olympia.core.logger.getLogger('z.devhub')
# We use a session cookie to make sure people see the dev agreement.
MDN_BASE = 'https://developer.mozilla.org/en-US/Add-ons'
def get_fileupload_by_uuid_or_404(value):
try:
UUID(value)
except ValueError:
raise http.Http404()
return get_object_or_404(FileUpload, uuid=value)
class AddonFilter(BaseFilter):
opts = (('updated', _(u'Updated')),
('name', _(u'Name')),
('created', _(u'Created')),
('popular', _(u'Downloads')),
('rating', _(u'Rating')))
class ThemeFilter(BaseFilter):
opts = (('created', _(u'Created')),
('name', _(u'Name')),
('popular', _(u'Downloads')),
('rating', _(u'Rating')))
def addon_listing(request, theme=False):
"""Set up the queryset and filtering for addon listing for Dashboard."""
if theme:
qs = request.user.addons.filter(
type__in=[amo.ADDON_PERSONA, amo.ADDON_STATICTHEME])
filter_cls = ThemeFilter
default = 'created'
else:
qs = Addon.objects.filter(authors=request.user).exclude(
type__in=[amo.ADDON_PERSONA, amo.ADDON_STATICTHEME])
filter_cls = AddonFilter
default = 'updated'
filter_ = filter_cls(request, qs, 'sort', default)
return filter_.qs, filter_
def index(request):
ctx = {'blog_posts': _get_posts()}
if request.user.is_authenticated():
user_addons = Addon.objects.filter(authors=request.user)
recent_addons = user_addons.order_by('-modified')[:3]
ctx['recent_addons'] = []
for addon in recent_addons:
ctx['recent_addons'].append({'addon': addon,
'position': get_position(addon)})
return render(request, 'devhub/index.html', ctx)
@login_required
def dashboard(request, theme=False):
addon_items = _get_items(
None, Addon.objects.filter(authors=request.user))[:4]
data = dict(rss=_get_rss_feed(request), blog_posts=_get_posts(),
timestamp=int(time.time()), addon_tab=not theme,
theme=theme, addon_items=addon_items)
if data['addon_tab']:
addons, data['filter'] = addon_listing(request)
# We know the dashboard is going to want to display feature
# compatibility. Unfortunately, cache-machine doesn't obey
# select_related properly, so to avoid the extra queries we do the next
# best thing, prefetch_related, which works fine with cache-machine.
addons = addons.prefetch_related('addonfeaturecompatibility')
data['addons'] = amo_utils.paginate(request, addons, per_page=10)
if theme:
themes, data['filter'] = addon_listing(request, theme=True)
data['themes'] = amo_utils.paginate(request, themes, per_page=10)
if 'filter' in data:
data['sorting'] = data['filter'].field
data['sort_opts'] = data['filter'].opts
return render(request, 'devhub/addons/dashboard.html', data)
@dev_required
def ajax_compat_status(request, addon_id, addon):
if not (addon.accepts_compatible_apps() and addon.current_version):
raise http.Http404()
return render(request, 'devhub/addons/ajax_compat_status.html',
dict(addon=addon))
@dev_required
def ajax_compat_error(request, addon_id, addon):
if not (addon.accepts_compatible_apps() and addon.current_version):
raise http.Http404()
return render(request, 'devhub/addons/ajax_compat_error.html',
dict(addon=addon))
@dev_required
def ajax_compat_update(request, addon_id, addon, version_id):
if not addon.accepts_compatible_apps():
raise http.Http404()
version = get_object_or_404(Version.objects, pk=version_id, addon=addon)
compat_form = forms.CompatFormSet(request.POST or None,
queryset=version.apps.all(),
form_kwargs={'version': version})
if request.method == 'POST' and compat_form.is_valid():
for compat in compat_form.save(commit=False):
compat.version = version
compat.save()
for compat in compat_form.deleted_objects:
compat.delete()
for form in compat_form.forms:
if (isinstance(form, forms.CompatForm) and
'max' in form.changed_data):
_log_max_version_change(addon, version, form.instance)
return render(request, 'devhub/addons/ajax_compat_update.html',
dict(addon=addon, version=version, compat_form=compat_form))
def _get_addons(request, addons, addon_id, action):
"""Create a list of ``MenuItem``s for the activity feed."""
items = []
a = MenuItem()
a.selected = (not addon_id)
(a.text, a.url) = (ugettext('All My Add-ons'), reverse('devhub.feed_all'))
if action:
a.url += '?action=' + action
items.append(a)
for addon in addons:
item = MenuItem()
try:
item.selected = (addon_id and addon.id == int(addon_id))
except ValueError:
pass # We won't get here... EVER
url = reverse('devhub.feed', args=[addon.slug])
if action:
url += '?action=' + action
item.text, item.url = addon.name, url
items.append(item)
return items
def _get_posts(limit=5):
return BlogPost.objects.order_by('-date_posted')[0:limit]
def _get_activities(request, action):
url = request.get_full_path()
choices = (None, 'updates', 'status', 'collections', 'reviews')
text = {None: ugettext('All Activity'),
'updates': ugettext('Add-on Updates'),
'status': ugettext('Add-on Status'),
'collections': ugettext('User Collections'),
'reviews': ugettext('User Reviews'),
}
items = []
for c in choices:
i = MenuItem()
i.text = text[c]
i.url, i.selected = urlparams(url, page=None, action=c), (action == c)
items.append(i)
return items
def _get_items(action, addons):
filters = {
'updates': (amo.LOG.ADD_VERSION, amo.LOG.ADD_FILE_TO_VERSION),
'status': (amo.LOG.USER_DISABLE, amo.LOG.USER_ENABLE,
amo.LOG.CHANGE_STATUS, amo.LOG.APPROVE_VERSION,),
'collections': (amo.LOG.ADD_TO_COLLECTION,
amo.LOG.REMOVE_FROM_COLLECTION,),
'reviews': (amo.LOG.ADD_RATING,)
}
filter_ = filters.get(action)
items = (ActivityLog.objects.for_addons(addons)
.exclude(action__in=amo.LOG_HIDE_DEVELOPER))
if filter_:
items = items.filter(action__in=[i.id for i in filter_])
return items
def _get_rss_feed(request):
key, __ = RssKey.objects.get_or_create(user=request.user)
return urlparams(reverse('devhub.feed_all'), privaterss=key.key)
def feed(request, addon_id=None):
if request.GET.get('privaterss'):
return feeds.ActivityFeedRSS()(request)
addon_selected = None
if not request.user.is_authenticated():
return redirect_for_login(request)
else:
addons_all = Addon.objects.filter(authors=request.user)
if addon_id:
addon = get_object_or_404(Addon.objects.id_or_slug(addon_id))
addons = addon # common query set
try:
key = RssKey.objects.get(addon=addons)
except RssKey.DoesNotExist:
key = RssKey.objects.create(addon=addons)
addon_selected = addon.id
rssurl = urlparams(reverse('devhub.feed', args=[addon_id]),
privaterss=key.key)
if not acl.check_addon_ownership(request, addons, dev=True,
ignore_disabled=True):
raise PermissionDenied
else:
rssurl = _get_rss_feed(request)
addon = None
addons = addons_all
action = request.GET.get('action')
items = _get_items(action, addons)
activities = _get_activities(request, action)
addon_items = _get_addons(request, addons_all, addon_selected, action)
pager = amo_utils.paginate(request, items, 20)
data = dict(addons=addon_items, pager=pager, activities=activities,
rss=rssurl, addon=addon)
return render(request, 'devhub/addons/activity.html', data)
@dev_required
def edit(request, addon_id, addon):
try:
whiteboard = Whiteboard.objects.get(pk=addon.pk)
except Whiteboard.DoesNotExist:
whiteboard = Whiteboard(pk=addon.pk)
previews = (
addon.current_version.previews.all()
if addon.current_version and addon.has_per_version_previews
else addon.previews.all())
header_preview = (
previews.first() if addon.type == amo.ADDON_STATICTHEME else None)
data = {
'page': 'edit',
'addon': addon,
'whiteboard': whiteboard,
'editable': False,
'show_listed_fields': addon.has_listed_versions(),
'valid_slug': addon.slug,
'tags': addon.tags.not_denied().values_list('tag_text', flat=True),
'previews': previews,
'header_preview': header_preview,
'supported_image_types': amo.SUPPORTED_IMAGE_TYPES,
}
return render(request, 'devhub/addons/edit.html', data)
@dev_required(theme=True)
def edit_theme(request, addon_id, addon, theme=False):
form = addon_forms.EditThemeForm(data=request.POST or None,
request=request, instance=addon)
owner_form = addon_forms.EditThemeOwnerForm(data=request.POST or None,
instance=addon)
if request.method == 'POST':
if 'owner_submit' in request.POST:
if owner_form.is_valid():
owner_form.save()
messages.success(
request, ugettext('Changes successfully saved.'))
return redirect('devhub.themes.edit', addon.slug)
elif form.is_valid():
form.save()
messages.success(request, ugettext('Changes successfully saved.'))
return redirect('devhub.themes.edit', addon.reload().slug)
else:
messages.error(
request, ugettext('Please check the form for errors.'))
return render(request, 'devhub/personas/edit.html', {
'addon': addon, 'persona': addon.persona, 'form': form,
'owner_form': owner_form})
@dev_required(owner_for_post=True, theme=True)
@post_required
def delete(request, addon_id, addon, theme=False):
# Database deletes only allowed for free or incomplete addons.
if not addon.can_be_deleted():
msg = ugettext(
'Add-on cannot be deleted. Disable this add-on instead.')
messages.error(request, msg)
return redirect(addon.get_dev_url('versions'))
form = forms.DeleteForm(request.POST, addon=addon)
if form.is_valid():
reason = form.cleaned_data.get('reason', '')
addon.delete(msg='Removed via devhub', reason=reason)
messages.success(
request,
ugettext('Theme deleted.')
if theme else ugettext('Add-on deleted.'))
return redirect('devhub.%s' % ('themes' if theme else 'addons'))
else:
if theme:
messages.error(
request,
ugettext('URL name was incorrect. Theme was not deleted.'))
return redirect(addon.get_dev_url())
else:
messages.error(
request,
ugettext('URL name was incorrect. Add-on was not deleted.'))
return redirect(addon.get_dev_url('versions'))
@dev_required
@post_required
def enable(request, addon_id, addon):
addon.update(disabled_by_user=False)
ActivityLog.create(amo.LOG.USER_ENABLE, addon)
return redirect(addon.get_dev_url('versions'))
@dev_required(owner_for_post=True)
@post_required
def cancel(request, addon_id, addon):
if addon.status == amo.STATUS_NOMINATED:
addon.update(status=amo.STATUS_NULL)
ActivityLog.create(amo.LOG.CHANGE_STATUS, addon, addon.status)
latest_version = addon.find_latest_version(
channel=amo.RELEASE_CHANNEL_LISTED)
if latest_version:
for file_ in latest_version.files.filter(
status=amo.STATUS_AWAITING_REVIEW):
file_.update(status=amo.STATUS_DISABLED)
return redirect(addon.get_dev_url('versions'))
@dev_required
@post_required
def disable(request, addon_id, addon):
# Also set the latest listed version to STATUS_DISABLED if it was
# AWAITING_REVIEW, to not waste reviewers time.
latest_version = addon.find_latest_version(
channel=amo.RELEASE_CHANNEL_LISTED)
if latest_version:
latest_version.files.filter(
status=amo.STATUS_AWAITING_REVIEW).update(
status=amo.STATUS_DISABLED)
addon.update_version()
addon.update_status()
addon.update(disabled_by_user=True)
ActivityLog.create(amo.LOG.USER_DISABLE, addon)
return redirect(addon.get_dev_url('versions'))
@dev_required(owner_for_post=True)
def ownership(request, addon_id, addon):
fs, ctx = [], {}
post_data = request.POST if request.method == 'POST' else None
# Authors.
qs = AddonUser.objects.filter(addon=addon).order_by('position')
user_form = forms.AuthorFormSet(post_data, queryset=qs)
fs.append(user_form)
# Versions.
license_form = forms.LicenseForm(post_data, version=addon.current_version)
ctx.update(license_form.get_context())
if ctx['license_form']: # if addon has a version
fs.append(ctx['license_form'])
# Policy.
if addon.type != amo.ADDON_STATICTHEME:
policy_form = forms.PolicyForm(post_data, addon=addon)
ctx.update(policy_form=policy_form)
fs.append(policy_form)
else:
policy_form = None
def mail_user_changes(author, title, template_part, recipients):
from olympia.amo.utils import send_mail
t = loader.get_template(
'users/email/{part}.ltxt'.format(part=template_part))
send_mail(title,
t.render({'author': author, 'addon': addon,
'site_url': settings.SITE_URL}),
None, recipients, use_deny_list=False)
if request.method == 'POST' and all([form.is_valid() for form in fs]):
# Authors.
authors = user_form.save(commit=False)
addon_authors_emails = list(
addon.authors.values_list('email', flat=True))
authors_emails = set(addon_authors_emails +
[author.user.email for author in authors])
for author in authors:
action = None
if not author.id or author.user_id != author._original_user_id:
action = amo.LOG.ADD_USER_WITH_ROLE
author.addon = addon
mail_user_changes(
author=author,
title=ugettext('An author has been added to your add-on'),
template_part='author_added',
recipients=authors_emails)
elif author.role != author._original_role:
action = amo.LOG.CHANGE_USER_WITH_ROLE
title = ugettext('An author has a role changed on your add-on')
mail_user_changes(
author=author,
title=title,
template_part='author_changed',
recipients=authors_emails)
author.save()
if action:
ActivityLog.create(action, author.user,
author.get_role_display(), addon)
if (author._original_user_id and
author.user_id != author._original_user_id):
ActivityLog.create(amo.LOG.REMOVE_USER_WITH_ROLE,
(UserProfile, author._original_user_id),
author.get_role_display(), addon)
for author in user_form.deleted_objects:
author.delete()
ActivityLog.create(amo.LOG.REMOVE_USER_WITH_ROLE, author.user,
author.get_role_display(), addon)
authors_emails.add(author.user.email)
mail_user_changes(
author=author,
title=ugettext('An author has been removed from your add-on'),
template_part='author_removed',
recipients=authors_emails)
if license_form in fs:
license_form.save()
if policy_form and policy_form in fs:
policy_form.save()
messages.success(request, ugettext('Changes successfully saved.'))
return redirect(addon.get_dev_url('owner'))
ctx.update(addon=addon, user_form=user_form)
return render(request, 'devhub/addons/owner.html', ctx)
@login_required
@post_required
@json_view
def compat_application_versions(request):
app_id = request.POST['application']
f = CheckCompatibilityForm()
return {'choices': f.version_choices_for_app_id(app_id)}
@login_required
def validate_addon(request):
return render(request, 'devhub/validate_addon.html',
{'title': ugettext('Validate Add-on'),
'new_addon_form': forms.DistributionChoiceForm()})
@login_required
def check_addon_compatibility(request):
form = CheckCompatibilityForm()
return render(request, 'devhub/validate_addon.html',
{'appversion_form': form,
'title': ugettext('Check Add-on Compatibility'),
'new_addon_form': forms.DistributionChoiceForm()})
def handle_upload(filedata, request, channel, app_id=None, version_id=None,
addon=None, is_standalone=False, submit=False):
automated_signing = channel == amo.RELEASE_CHANNEL_UNLISTED
user = request.user if request.user.is_authenticated() else None
upload = FileUpload.from_post(
filedata, filedata.name, filedata.size,
automated_signing=automated_signing, addon=addon, user=user)
log.info('FileUpload created: %s' % upload.uuid.hex)
if app_id and version_id:
# If app_id and version_id are present, we are dealing with a
# compatibility check (i.e. this is not an upload meant for submission,
# we were called from check_addon_compatibility()), which essentially
# consists in running the addon uploaded against the legacy validator
# with a specific min/max appversion override.
app = amo.APPS_ALL.get(int(app_id))
if not app:
raise http.Http404()
ver = get_object_or_404(AppVersion, pk=version_id)
tasks.compatibility_check.delay(upload.pk, app.guid, ver.version)
elif submit:
tasks.validate_and_submit(addon, upload, channel=channel)
else:
tasks.validate(upload, listed=(channel == amo.RELEASE_CHANNEL_LISTED))
return upload
@login_required
@post_required
def upload(request, channel='listed', addon=None, is_standalone=False):
channel = amo.CHANNEL_CHOICES_LOOKUP[channel]
filedata = request.FILES['upload']
app_id = request.POST.get('app_id')
version_id = request.POST.get('version_id')
upload = handle_upload(
filedata=filedata, request=request, app_id=app_id,
version_id=version_id, addon=addon, is_standalone=is_standalone,
channel=channel)
if addon:
return redirect('devhub.upload_detail_for_version',
addon.slug, upload.uuid.hex)
elif is_standalone:
return redirect('devhub.standalone_upload_detail', upload.uuid.hex)
else:
return redirect('devhub.upload_detail', upload.uuid.hex, 'json')
@post_required
@dev_required
def upload_for_version(request, addon_id, addon, channel):
return upload(request, channel=channel, addon=addon)
@login_required
@json_view
def standalone_upload_detail(request, uuid):
upload = get_fileupload_by_uuid_or_404(uuid)
url = reverse('devhub.standalone_upload_detail', args=[uuid])
return upload_validation_context(request, upload, url=url)
@dev_required(submitting=True)
@json_view
def upload_detail_for_version(request, addon_id, addon, uuid):
try:
upload = get_fileupload_by_uuid_or_404(uuid)
response = json_upload_detail(request, upload, addon_slug=addon.slug)
statsd.incr('devhub.upload_detail_for_addon.success')
return response
except Exception as exc:
statsd.incr('devhub.upload_detail_for_addon.error')
log.error('Error checking upload status: {} {}'.format(type(exc), exc))
raise
@dev_required(allow_reviewers=True)
def file_validation(request, addon_id, addon, file_id):
file_ = get_object_or_404(File, id=file_id)
validate_url = reverse('devhub.json_file_validation',
args=[addon.slug, file_.id])
file_url = reverse('files.list', args=[file_.id, 'file', ''])
context = {'validate_url': validate_url, 'file_url': file_url,
'file': file_, 'filename': file_.filename,
'timestamp': file_.created, 'addon': addon,
'automated_signing': file_.automated_signing}
if file_.has_been_validated:
context['validation_data'] = file_.validation.processed_validation
return render(request, 'devhub/validation.html', context)
@dev_required(allow_reviewers=True)
def bulk_compat_result(request, addon_id, addon, result_id):
qs = ValidationResult.objects.exclude(completed=None)
result = get_object_or_404(qs, pk=result_id)
job = result.validation_job
revalidate_url = reverse('devhub.json_bulk_compat_result',
args=[addon.slug, result.id])
return _compat_result(request, revalidate_url,
job.application, job.target_version,
for_addon=result.file.version.addon,
validated_filename=result.file.filename,
validated_ts=result.completed)
def _compat_result(request, revalidate_url, target_app, target_version,
validated_filename=None, validated_ts=None,
for_addon=None):
app_trans = dict((g, unicode(a.pretty)) for g, a in amo.APP_GUIDS.items())
ff_versions = (AppVersion.objects.filter(application=amo.FIREFOX.id,
version_int__gte=4000000000000)
.values_list('application', 'version')
.order_by('version_int'))
tpl = 'https://developer.mozilla.org/en/Firefox_%s_for_developers'
change_links = dict()
for app, ver in ff_versions:
major = ver.split('.')[0] # 4.0b3 -> 4
change_links['%s %s' % (amo.APP_IDS[app].guid, ver)] = tpl % major
return render(request, 'devhub/validation.html',
dict(validate_url=revalidate_url,
filename=validated_filename, timestamp=validated_ts,
target_app=target_app, target_version=target_version,
addon=for_addon, result_type='compat',
app_trans=app_trans, version_change_links=change_links))
@json_view
@csrf_exempt
@dev_required(allow_reviewers=True)
def json_file_validation(request, addon_id, addon, file_id):
file = get_object_or_404(File, id=file_id)
try:
result = file.validation
except FileValidation.DoesNotExist:
if request.method != 'POST':
return http.HttpResponseNotAllowed(['POST'])
# This API is, unfortunately, synchronous, so wait for the
# task to complete and return the result directly.
result = tasks.validate(file, synchronous=True).get()
return {'validation': result.processed_validation, 'error': None}
@json_view
@csrf_exempt
@post_required
@dev_required(allow_reviewers=True)
def json_bulk_compat_result(request, addon_id, addon, result_id):
result = get_object_or_404(ValidationResult, pk=result_id,
completed__isnull=False)
validation = json.loads(result.validation)
return {'validation': process_validation(validation), 'error': None}
@json_view
def json_upload_detail(request, upload, addon_slug=None):
addon = None
if addon_slug:
addon = get_object_or_404(Addon.objects, slug=addon_slug)
result = upload_validation_context(request, upload, addon=addon)
plat_exclude = []
if result['validation']:
try:
pkg = parse_addon(upload, addon=addon, user=request.user)
except django_forms.ValidationError as exc:
errors_before = result['validation'].get('errors', 0)
# This doesn't guard against client-side tinkering, and is purely
# to display those non-linter errors nicely in the frontend. What
# does prevent clients from bypassing those is the fact that we
# always call parse_addon() before calling from_upload(), so
# ValidationError would be raised before proceeding.
for i, msg in enumerate(exc.messages):
# Simulate a validation error so the UI displays
# it as such
result['validation']['messages'].insert(
i, {'type': 'error',
'message': escape_all(msg), 'tier': 1,
'fatal': True})
if result['validation']['ending_tier'] < 1:
result['validation']['ending_tier'] = 1
result['validation']['errors'] += 1
if not errors_before:
return json_view.error(result)
else:
app_ids = set([a.id for a in pkg.get('apps', [])])
supported_platforms = []
if amo.ANDROID.id in app_ids:
supported_platforms.extend((amo.PLATFORM_ANDROID.id,))
app_ids.remove(amo.ANDROID.id)
if len(app_ids):
# Targets any other non-mobile app:
supported_platforms.extend(amo.DESKTOP_PLATFORMS.keys())
plat_exclude = (
set(amo.SUPPORTED_PLATFORMS.keys()) - set(supported_platforms))
plat_exclude = [str(p) for p in plat_exclude]
result['addon_type'] = pkg.get('type', '')
result['platforms_to_exclude'] = plat_exclude
return result
def upload_validation_context(request, upload, addon=None, url=None):
if not url:
if addon:
url = reverse('devhub.upload_detail_for_version',
args=[addon.slug, upload.uuid.hex])
else:
url = reverse(
'devhub.upload_detail',
args=[upload.uuid.hex, 'json'])
full_report_url = reverse('devhub.upload_detail', args=[upload.uuid.hex])
validation = upload.processed_validation or ''
processed_by_linter = (
validation and
validation.get('metadata', {}).get(
'processed_by_addons_linter', False))
return {'upload': upload.uuid.hex,
'validation': validation,
'error': None,
'url': url,
'full_report_url': full_report_url,
'processed_by_addons_linter': processed_by_linter}
def upload_detail(request, uuid, format='html'):
upload = get_fileupload_by_uuid_or_404(uuid)
if upload.user_id and not request.user.is_authenticated():
return redirect_for_login(request)
if format == 'json' or request.is_ajax():
try:
response = json_upload_detail(request, upload)
statsd.incr('devhub.upload_detail.success')
return response
except Exception as exc:
statsd.incr('devhub.upload_detail.error')
log.error('Error checking upload status: {} {}'.format(
type(exc), exc))
raise
validate_url = reverse('devhub.standalone_upload_detail',
args=[upload.uuid.hex])
if upload.compat_with_app:
return _compat_result(request, validate_url,
upload.compat_with_app,
upload.compat_with_appver)
context = {'validate_url': validate_url, 'filename': upload.pretty_name,
'automated_signing': upload.automated_signing,
'timestamp': upload.created}
if upload.validation:
context['validation_data'] = upload.processed_validation
return render(request, 'devhub/validation.html', context)
class AddonDependencySearch(BaseAjaxSearch):
# No personas.
types = [amo.ADDON_EXTENSION, amo.ADDON_THEME, amo.ADDON_DICT,
amo.ADDON_SEARCH, amo.ADDON_LPAPP]
@dev_required
@json_view
def ajax_dependencies(request, addon_id, addon):
return AddonDependencySearch(request, excluded_ids=[addon_id]).items
@dev_required
def addons_section(request, addon_id, addon, section, editable=False):
show_listed = addon.has_listed_versions()
static_theme = addon.type == amo.ADDON_STATICTHEME
models = {}
if show_listed:
models.update({
'basic': addon_forms.AddonFormBasic,
'details': addon_forms.AddonFormDetails,
'support': addon_forms.AddonFormSupport,
'technical': addon_forms.AddonFormTechnical
})
if not static_theme:
models.update({'media': addon_forms.AddonFormMedia})
else:
models.update({
'basic': addon_forms.AddonFormBasicUnlisted,
'details': addon_forms.AddonFormDetailsUnlisted,
'technical': addon_forms.AddonFormTechnicalUnlisted
})
if section not in models:
raise http.Http404()
tags, previews, restricted_tags = [], [], []
cat_form = dependency_form = whiteboard_form = None
whiteboard = None
if section == 'basic' and show_listed:
tags = addon.tags.not_denied().values_list('tag_text', flat=True)
category_form_class = (forms.SingleCategoryForm if static_theme else
addon_forms.CategoryFormSet)
cat_form = category_form_class(
request.POST or None, addon=addon, request=request)
restricted_tags = addon.tags.filter(restricted=True)
elif section == 'media':
previews = forms.PreviewFormSet(
request.POST or None,
prefix='files', queryset=addon.previews.all())
elif section == 'technical' and show_listed and not static_theme:
dependency_form = forms.DependencyFormSet(
request.POST or None,
queryset=addon.addons_dependencies.all(), addon=addon,
prefix='dependencies')
if section == 'technical':
try:
whiteboard = Whiteboard.objects.get(pk=addon.pk)
except Whiteboard.DoesNotExist:
whiteboard = Whiteboard(pk=addon.pk)
whiteboard_form = PublicWhiteboardForm(request.POST or None,
instance=whiteboard,
prefix='whiteboard')
# Get the slug before the form alters it to the form data.
valid_slug = addon.slug
if editable:
if request.method == 'POST':
form = models[section](request.POST, request.FILES,
instance=addon, request=request)
if form.is_valid() and (not previews or previews.is_valid()):
addon = form.save(addon)
if previews:
for preview in previews.forms:
preview.save(addon)
editable = False
if section == 'media':
ActivityLog.create(amo.LOG.CHANGE_ICON, addon)
else:
ActivityLog.create(amo.LOG.EDIT_PROPERTIES, addon)
valid_slug = addon.slug
if cat_form:
if cat_form.is_valid():
cat_form.save()
addon.save()
else:
editable = True
if dependency_form:
if dependency_form.is_valid():
dependency_form.save()
else:
editable = True
if whiteboard_form:
if whiteboard_form.is_valid():
whiteboard_form.save()
else:
editable = True
else:
form = models[section](instance=addon, request=request)
else:
form = False
data = {
'addon': addon,
'whiteboard': whiteboard,
'show_listed_fields': show_listed,
'form': form,
'editable': editable,
'tags': tags,
'restricted_tags': restricted_tags,
'cat_form': cat_form,
'preview_form': previews,
'dependency_form': dependency_form,
'whiteboard_form': whiteboard_form,
'valid_slug': valid_slug,
'supported_image_types': amo.SUPPORTED_IMAGE_TYPES,
}
return render(request, 'devhub/addons/edit/%s.html' % section, data)
@never_cache
@dev_required(theme=True)
@json_view
def image_status(request, addon_id, addon, theme=False):
# Default icon needs no checking.
if not addon.icon_type or addon.icon_type.split('/')[0] == 'icon':
icons = True
# Persona icon is handled differently.
elif addon.type == amo.ADDON_PERSONA:
icons = True
else:
icons = storage.exists(os.path.join(addon.get_icon_dir(),
'%s-32.png' % addon.id))
previews = all(storage.exists(p.thumbnail_path)
for p in addon.previews.all())
return {'overall': icons and previews,
'icons': icons,
'previews': previews}
@json_view
def ajax_upload_image(request, upload_type, addon_id=None):
errors = []
upload_hash = ''
if 'upload_image' in request.FILES:
upload_preview = request.FILES['upload_image']
upload_preview.seek(0)
upload_hash = uuid4().hex
loc = os.path.join(settings.TMP_PATH, upload_type, upload_hash)
with storage.open(loc, 'wb') as fd:
for chunk in upload_preview:
fd.write(chunk)
is_icon = upload_type == 'icon'
is_persona = upload_type.startswith('persona_')
check = amo_utils.ImageCheck(upload_preview)
if (upload_preview.content_type not in amo.IMG_TYPES or
not check.is_image()):
if is_icon:
errors.append(ugettext('Icons must be either PNG or JPG.'))
else:
errors.append(ugettext('Images must be either PNG or JPG.'))
if check.is_animated():
if is_icon:
errors.append(ugettext('Icons cannot be animated.'))
else:
errors.append(ugettext('Images cannot be animated.'))
max_size = None
if is_icon:
max_size = settings.MAX_ICON_UPLOAD_SIZE
if is_persona:
max_size = settings.MAX_PERSONA_UPLOAD_SIZE
if max_size and upload_preview.size > max_size:
if is_icon:
errors.append(
ugettext('Please use images smaller than %dMB.')
% (max_size / 1024 / 1024 - 1))
if is_persona:
errors.append(
ugettext('Images cannot be larger than %dKB.')
% (max_size / 1024))
if check.is_image() and is_persona:
persona, img_type = upload_type.split('_') # 'header' or 'footer'
expected_size = amo.PERSONA_IMAGE_SIZES.get(img_type)[1]
with storage.open(loc, 'rb') as fp:
actual_size = Image.open(fp).size
if actual_size != expected_size:
# L10n: {0} is an image width (in pixels), {1} is a height.
errors.append(ugettext('Image must be exactly {0} pixels '
'wide and {1} pixels tall.')
.format(expected_size[0], expected_size[1]))
if errors and upload_type == 'preview' and os.path.exists(loc):
# Delete the temporary preview file in case of error.
os.unlink(loc)
else:
errors.append(ugettext('There was an error uploading your preview.'))
if errors:
upload_hash = ''
return {'upload_hash': upload_hash, 'errors': errors}
@dev_required
def upload_image(request, addon_id, addon, upload_type):
return ajax_upload_image(request, upload_type)
@dev_required
def version_edit(request, addon_id, addon, version_id):
version = get_object_or_404(Version.objects, pk=version_id, addon=addon)
static_theme = addon.type == amo.ADDON_STATICTHEME
version_form = forms.VersionForm(
request.POST or None,
request.FILES or None,
instance=version,
request=request,
) if not static_theme else None
file_form = forms.FileFormSet(request.POST or None, prefix='files',
queryset=version.files.all())
data = {'file_form': file_form}
if version_form:
data['version_form'] = version_form
is_admin = acl.action_allowed(request,
amo.permissions.REVIEWS_ADMIN)
if addon.accepts_compatible_apps():
qs = version.apps.all()
compat_form = forms.CompatFormSet(
request.POST or None, queryset=qs,
form_kwargs={'version': version})
data['compat_form'] = compat_form
if (request.method == 'POST' and
all([form.is_valid() for form in data.values()])):
data['file_form'].save()
if 'compat_form' in data:
for compat in data['compat_form'].save(commit=False):
compat.version = version
compat.save()
for compat in data['compat_form'].deleted_objects:
compat.delete()
for form in data['compat_form'].forms:
if (isinstance(form, forms.CompatForm) and
'max' in form.changed_data):
_log_max_version_change(addon, version, form.instance)
if 'version_form' in data:
# VersionForm.save() clear the pending info request if the
# developer specifically asked for it, but we've got additional
# things to do here that depend on it.
had_pending_info_request = bool(addon.pending_info_request)
data['version_form'].save()
if 'approvalnotes' in version_form.changed_data:
if had_pending_info_request:
log_and_notify(amo.LOG.APPROVAL_NOTES_CHANGED, None,
request.user, version)
else:
ActivityLog.create(amo.LOG.APPROVAL_NOTES_CHANGED,
addon, version, request.user)
if ('source' in version_form.changed_data and
version_form.cleaned_data['source']):
AddonReviewerFlags.objects.update_or_create(
addon=addon, defaults={'needs_admin_code_review': True})
if had_pending_info_request:
log_and_notify(amo.LOG.SOURCE_CODE_UPLOADED, None,
request.user, version)
else:
ActivityLog.create(amo.LOG.SOURCE_CODE_UPLOADED,
addon, version, request.user)
messages.success(request, ugettext('Changes successfully saved.'))
return redirect('devhub.versions.edit', addon.slug, version_id)
data.update(addon=addon, version=version,
is_admin=is_admin, choices=File.STATUS_CHOICES)
return render(request, 'devhub/versions/edit.html', data)
def _log_max_version_change(addon, version, appversion):
details = {'version': version.version,
'target': appversion.version.version,
'application': appversion.application}
ActivityLog.create(amo.LOG.MAX_APPVERSION_UPDATED,
addon, version, details=details)
@dev_required
@post_required
@transaction.atomic
def version_delete(request, addon_id, addon):
version_id = request.POST.get('version_id')
version = get_object_or_404(Version.objects, pk=version_id, addon=addon)
if 'disable_version' in request.POST:
messages.success(
request,
ugettext('Version %s disabled.') % version.version)
version.is_user_disabled = True # Will update the files/activity log.
version.addon.update_status()
else:
messages.success(
request,
ugettext('Version %s deleted.') % version.version)
version.delete() # Will also activity log.
return redirect(addon.get_dev_url('versions'))
@dev_required
@post_required
@transaction.atomic
def version_reenable(request, addon_id, addon):
version_id = request.POST.get('version_id')
version = get_object_or_404(Version.objects, pk=version_id, addon=addon)
messages.success(
request,
ugettext('Version %s re-enabled.') % version.version)
version.is_user_disabled = False # Will update the files/activity log.
version.addon.update_status()
return redirect(addon.get_dev_url('versions'))
def check_validation_override(request, form, addon, version):
if version and form.cleaned_data.get('admin_override_validation'):
helper = ReviewHelper(request=request, addon=addon, version=version)
helper.set_data({
'operating_systems': '',
'applications': '',
'comments': ugettext(
u'This upload has failed validation, and may '
u'lack complete validation results. Please '
u'take due care when reviewing it.')})
helper.actions['super']['method']()
def auto_sign_file(file_):
"""If the file should be automatically reviewed and signed, do it."""
addon = file_.version.addon
if file_.is_experiment: # See bug 1220097.
ActivityLog.create(amo.LOG.EXPERIMENT_SIGNED, file_)
sign_file(file_)
elif file_.version.channel == amo.RELEASE_CHANNEL_UNLISTED:
# Sign automatically without manual review.
helper = ReviewHelper(request=None, addon=addon,
version=file_.version)
# Provide the file to review/sign to the helper.
helper.set_data({'addon_files': [file_],
'comments': 'automatic validation'})
helper.handler.process_public()
ActivityLog.create(amo.LOG.UNLISTED_SIGNED, file_)
def auto_sign_version(version, **kwargs):
# Sign all the unapproved files submitted, one for each platform.
for file_ in version.files.exclude(status=amo.STATUS_PUBLIC):
auto_sign_file(file_, **kwargs)
@dev_required
def version_list(request, addon_id, addon):
qs = addon.versions.order_by('-created').transform(Version.transformer)
versions = amo_utils.paginate(request, qs)
is_admin = acl.action_allowed(request,
amo.permissions.REVIEWS_ADMIN)
token = request.COOKIES.get(API_TOKEN_COOKIE, None)
data = {'addon': addon,
'versions': versions,
'token': token,
'is_admin': is_admin}
return render(request, 'devhub/versions/list.html', data)
@dev_required
def version_bounce(request, addon_id, addon, version):
# Use filter since there could be dupes.
vs = (Version.objects.filter(version=version, addon=addon)
.order_by('-created'))
if vs:
return redirect('devhub.versions.edit', addon.slug, vs[0].id)
else:
raise http.Http404()
@json_view
@dev_required
def version_stats(request, addon_id, addon):
qs = Version.objects.filter(addon=addon)
reviews = (qs.annotate(review_count=Count('ratings'))
.values('id', 'version', 'review_count'))
data = {v['id']: v for v in reviews}
files = (
qs.annotate(file_count=Count('files')).values_list('id', 'file_count'))
for id_, file_count in files:
# For backwards compatibility
data[id_]['files'] = file_count
data[id_]['reviews'] = data[id_].pop('review_count')
return data
def get_next_version_number(addon):
if not addon:
return '1.0'
last_version = Version.unfiltered.filter(addon=addon).last()
version_int_parts = compare.dict_from_int(last_version.version_int)
version_counter = 1
while True:
next_version = '%s.0' % (version_int_parts['major'] + version_counter)
if not Version.unfiltered.filter(addon=addon,
version=next_version).exists():
return next_version
else:
version_counter += 1
@login_required
def submit_addon(request):
return render_agreement(request, 'devhub/addons/submit/start.html',
'devhub.submit.distribution')
@dev_required
def submit_version_agreement(request, addon_id, addon):
return render_agreement(
request, 'devhub/addons/submit/start.html',
reverse('devhub.submit.version', args=(addon.slug,)),
submit_page='version')
@transaction.atomic
def _submit_distribution(request, addon, next_view):
# Accept GET for the first load so we can preselect the channel.
form = forms.DistributionChoiceForm(
request.POST if request.method == 'POST' else
request.GET if request.GET.get('channel') else None)
if request.method == 'POST' and form.is_valid():
data = form.cleaned_data
args = [addon.slug] if addon else []
args.append(data['channel'])
return redirect(next_view, *args)
return render(request, 'devhub/addons/submit/distribute.html',
{'distribution_form': form,
'submit_notification_warning':
get_config('submit_notification_warning'),
'submit_page': 'version' if addon else 'addon'})
@login_required
def submit_addon_distribution(request):
if not request.user.has_read_developer_agreement():
return redirect('devhub.submit.agreement')
return _submit_distribution(request, None, 'devhub.submit.upload')
@dev_required(submitting=True)
def submit_version_distribution(request, addon_id, addon):
if not request.user.has_read_developer_agreement():
return redirect('devhub.submit.version.agreement', addon.slug)
return _submit_distribution(request, addon, 'devhub.submit.version.upload')
@transaction.atomic
def _submit_upload(request, addon, channel, next_details, next_finish,
version=None, wizard=False):
""" If this is a new addon upload `addon` will be None (and `version`);
if this is a new version upload `version` will be None; a new file for a
version will need both an addon and a version supplied.
next_details is the view that will be redirected to when details are needed
(for listed, addons/versions); next_finish is the finishing view
when no details step is needed (for unlisted addons/versions).
"""
form = forms.NewUploadForm(
request.POST or None,
request.FILES or None,
addon=addon,
version=version,
request=request
)
if request.method == 'POST' and form.is_valid():
data = form.cleaned_data
if version:
for platform in data.get('supported_platforms', []):
File.from_upload(
upload=data['upload'],
version=version,
platform=platform,
parsed_data=data['parsed_data'])
url_args = [addon.slug, version.id]
elif addon:
version = Version.from_upload(
upload=data['upload'],
addon=addon,
platforms=data.get('supported_platforms', []),
channel=channel,
source=data['source'],
parsed_data=data['parsed_data'])
url_args = [addon.slug, version.id]
else:
addon = Addon.from_upload(
upload=data['upload'],
platforms=data.get('supported_platforms', []),
source=data['source'],
channel=channel,
parsed_data=data['parsed_data'],
user=request.user)
version = addon.find_latest_version(channel=channel)
url_args = [addon.slug]
check_validation_override(request, form, addon, version)
if data.get('source'):
AddonReviewerFlags.objects.update_or_create(
addon=addon, defaults={'needs_admin_code_review': True})
activity_log = ActivityLog.objects.create(
action=amo.LOG.SOURCE_CODE_UPLOADED.id,
user=request.user,
details={
'comments': (u'This version has been automatically '
u'flagged for admin review, as it had source '
u'files attached when submitted.')})
VersionLog.objects.create(version_id=version.id,
activity_log=activity_log)
if (addon.status == amo.STATUS_NULL and
addon.has_complete_metadata() and
channel == amo.RELEASE_CHANNEL_LISTED):
addon.update(status=amo.STATUS_NOMINATED)
# auto-sign versions (the method checks eligibility)
auto_sign_version(version)
add_dynamic_theme_tag(version)
next_url = (next_details
if channel == amo.RELEASE_CHANNEL_LISTED
else next_finish)
return redirect(next_url, *url_args)
is_admin = acl.action_allowed(request,
amo.permissions.REVIEWS_ADMIN)
if addon:
channel_choice_text = (forms.DistributionChoiceForm().LISTED_LABEL
if channel == amo.RELEASE_CHANNEL_LISTED else
forms.DistributionChoiceForm().UNLISTED_LABEL)
else:
channel_choice_text = '' # We only need this for Version upload.
submit_page = 'file' if version else 'version' if addon else 'addon'
template = ('devhub/addons/submit/upload.html' if not wizard else
'devhub/addons/submit/wizard.html')
return render(request, template,
{'new_addon_form': form,
'is_admin': is_admin,
'addon': addon,
'submit_notification_warning':
get_config('submit_notification_warning'),
'submit_page': submit_page,
'channel': channel,
'channel_choice_text': channel_choice_text,
'version_number':
get_next_version_number(addon) if wizard else None})
@login_required
def submit_addon_upload(request, channel):
channel_id = amo.CHANNEL_CHOICES_LOOKUP[channel]
return _submit_upload(request, None, channel_id,
'devhub.submit.details', 'devhub.submit.finish')
@dev_required(submitting=True)
@no_admin_disabled
def submit_version_upload(request, addon_id, addon, channel):
channel_id = amo.CHANNEL_CHOICES_LOOKUP[channel]
return _submit_upload(request, addon, channel_id,
'devhub.submit.version.details',
'devhub.submit.version.finish')
@dev_required
@no_admin_disabled
def submit_version_auto(request, addon_id, addon):
if not request.user.has_read_developer_agreement():
return redirect('devhub.submit.version.agreement', addon.slug)
# choose the channel we need from the last upload
last_version = addon.find_latest_version(None, exclude=())
if not last_version:
return redirect('devhub.submit.version.distribution', addon.slug)
channel = last_version.channel
return _submit_upload(request, addon, channel,
'devhub.submit.version.details',
'devhub.submit.version.finish')
@login_required
@waffle.decorators.waffle_switch('allow-static-theme-uploads')
def submit_addon_theme_wizard(request, channel):
channel_id = amo.CHANNEL_CHOICES_LOOKUP[channel]
return _submit_upload(request, None, channel_id,
'devhub.submit.details', 'devhub.submit.finish',
wizard=True)
@dev_required
@no_admin_disabled
@waffle.decorators.waffle_switch('allow-static-theme-uploads')
def submit_version_theme_wizard(request, addon_id, addon, channel):
channel_id = amo.CHANNEL_CHOICES_LOOKUP[channel]
return _submit_upload(request, addon, channel_id,
'devhub.submit.version.details',
'devhub.submit.version.finish', wizard=True)
@dev_required
def submit_file(request, addon_id, addon, version_id):
version = get_object_or_404(Version, id=version_id)
return _submit_upload(request, addon, version.channel,
'devhub.submit.file.finish',
'devhub.submit.file.finish',
version=version)
def _submit_details(request, addon, version):
static_theme = addon.type == amo.ADDON_STATICTHEME
if version:
skip_details_step = (version.channel == amo.RELEASE_CHANNEL_UNLISTED or
(static_theme and addon.has_complete_metadata()))
if skip_details_step:
# Nothing to do here.
return redirect(
'devhub.submit.version.finish', addon.slug, version.pk)
latest_version = version
else:
# Figure out the latest version early in order to pass the same
# instance to each form that needs it (otherwise they might overwrite
# each other).
latest_version = addon.find_latest_version(
channel=amo.RELEASE_CHANNEL_LISTED)
if not latest_version:
# No listed version ? Then nothing to do in the listed submission
# flow.
return redirect('devhub.submit.finish', addon.slug)
forms_list = []
context = {
'addon': addon,
'version': version,
'submit_page': 'version' if version else 'addon',
}
post_data = request.POST if request.method == 'POST' else None
show_all_fields = not version or not addon.has_complete_metadata()
if show_all_fields:
describe_form = forms.DescribeForm(
post_data, instance=addon, request=request)
cat_form_class = (addon_forms.CategoryFormSet if not static_theme
else forms.SingleCategoryForm)
cat_form = cat_form_class(post_data, addon=addon, request=request)
license_form = forms.LicenseForm(
post_data, version=latest_version, prefix='license')
context.update(license_form.get_context())
context.update(form=describe_form, cat_form=cat_form)
forms_list.extend([describe_form, cat_form, context['license_form']])
if not static_theme:
# Static themes don't need this form
reviewer_form = forms.VersionForm(
post_data, instance=latest_version, request=request)
context.update(reviewer_form=reviewer_form)
forms_list.append(reviewer_form)
if request.method == 'POST' and all(
form.is_valid() for form in forms_list):
if show_all_fields:
addon = describe_form.save()
cat_form.save()
license_form.save(log=False)
if not static_theme:
reviewer_form.save()
if addon.status == amo.STATUS_NULL:
addon.update(status=amo.STATUS_NOMINATED)
signals.submission_done.send(sender=addon)
elif not static_theme:
reviewer_form.save()
if not version:
return redirect('devhub.submit.finish', addon.slug)
else:
return redirect('devhub.submit.version.finish',
addon.slug, version.id)
template = 'devhub/addons/submit/%s' % (
'describe.html' if show_all_fields else 'describe_minimal.html')
return render(request, template, context)
@dev_required(submitting=True)
def submit_addon_details(request, addon_id, addon):
return _submit_details(request, addon, None)
@dev_required(submitting=True)
def submit_version_details(request, addon_id, addon, version_id):
version = get_object_or_404(Version, id=version_id)
return _submit_details(request, addon, version)
def _submit_finish(request, addon, version, is_file=False):
uploaded_version = version or addon.versions.latest()
try:
author = addon.authors.all()[0]
except IndexError:
# This should never happen.
author = None
if (not version and author and
uploaded_version.channel == amo.RELEASE_CHANNEL_LISTED and
not Version.objects.exclude(pk=uploaded_version.pk)
.filter(addon__authors=author,
channel=amo.RELEASE_CHANNEL_LISTED)
.exclude(addon__status=amo.STATUS_NULL)
.exists()):
# If that's the first time this developer has submitted an listed addon
# (no other listed Version by this author exists) send them a welcome
# email.
# We can use locale-prefixed URLs because the submitter probably
# speaks the same language by the time he/she reads the email.
context = {
'addon_name': unicode(addon.name),
'app': unicode(request.APP.pretty),
'detail_url': absolutify(addon.get_url_path()),
'version_url': absolutify(addon.get_dev_url('versions')),
'edit_url': absolutify(addon.get_dev_url('edit')),
}
tasks.send_welcome_email.delay(addon.id, [author.email], context)
submit_page = 'file' if is_file else 'version' if version else 'addon'
return render(request, 'devhub/addons/submit/done.html',
{'addon': addon,
'uploaded_version': uploaded_version,
'submit_page': submit_page,
'preview': uploaded_version.previews.first()})
@dev_required(submitting=True)
def submit_addon_finish(request, addon_id, addon):
# Bounce to the details step if incomplete
if (not addon.has_complete_metadata() and
addon.find_latest_version(channel=amo.RELEASE_CHANNEL_LISTED)):
return redirect('devhub.submit.details', addon.slug)
# Bounce to the versions page if they don't have any versions.
if not addon.versions.exists():
return redirect('devhub.submit.version', addon.slug)
return _submit_finish(request, addon, None)
@dev_required
def submit_version_finish(request, addon_id, addon, version_id):
version = get_object_or_404(Version, id=version_id)
return _submit_finish(request, addon, version)
@dev_required
def submit_file_finish(request, addon_id, addon, version_id):
version = get_object_or_404(Version, id=version_id)
return _submit_finish(request, addon, version, is_file=True)
@login_required
def submit_theme(request):
if waffle.switch_is_active('disable-lwt-uploads'):
return redirect('devhub.submit.agreement')
else:
return submit_lwt_theme(request)
def submit_lwt_theme(request):
data = {}
if request.method == 'POST':
data = request.POST.dict()
if 'unsaved_data' in request.session and data['unsaved_data'] == '{}':
# Restore unsaved data on second invalid POST..
data['unsaved_data'] = request.session['unsaved_data']
form = addon_forms.ThemeForm(data=data or None,
files=request.FILES or None,
request=request)
if request.method == 'POST':
if form.is_valid():
addon = form.save()
return redirect('devhub.themes.submit.done', addon.slug)
else:
# Stored unsaved data in request.session since it gets lost on
# second invalid POST.
messages.error(
request,
ugettext('Please check the form for errors.'))
request.session['unsaved_data'] = data['unsaved_data']
return render(request, 'devhub/personas/submit.html', dict(form=form))
@dev_required(theme=True)
def submit_theme_done(request, addon_id, addon, theme):
if addon.is_public():
return redirect(addon.get_url_path())
return render(request, 'devhub/personas/submit_done.html',
dict(addon=addon))
@dev_required(theme=True)
@post_required
def remove_locale(request, addon_id, addon, theme):
POST = request.POST
if 'locale' in POST and POST['locale'] != addon.default_locale:
addon.remove_locale(POST['locale'])
return http.HttpResponse()
return http.HttpResponseBadRequest()
@dev_required
@post_required
def request_review(request, addon_id, addon):
if not addon.can_request_review():
return http.HttpResponseBadRequest()
latest_version = addon.find_latest_version(amo.RELEASE_CHANNEL_LISTED,
exclude=())
if latest_version:
for f in latest_version.files.filter(status=amo.STATUS_DISABLED):
f.update(status=amo.STATUS_AWAITING_REVIEW)
# Clear the nomination date so it gets set again in Addon.watch_status.
latest_version.update(nomination=None)
if addon.has_complete_metadata():
addon.update(status=amo.STATUS_NOMINATED)
messages.success(request, ugettext('Review requested.'))
else:
messages.success(request, _(
'You must provide further details to proceed.'))
ActivityLog.create(amo.LOG.CHANGE_STATUS, addon, addon.status)
return redirect(addon.get_dev_url('versions'))
def docs(request, doc_name=None):
mdn_docs = {
None: '',
'getting-started': '',
'reference': '',
'how-to': '',
'how-to/getting-started': '',
'how-to/extension-development': '#Extensions',
'how-to/other-addons': '#Other_types_of_add-ons',
'how-to/thunderbird-mobile': '#Application-specific',
'how-to/theme-development': '#Themes',
'themes': '/Themes/Background',
'themes/faq': '/Themes/Background/FAQ',
'policies': '/AMO/Policy',
'policies/reviews': '/AMO/Policy/Reviews',
'policies/contact': '/AMO/Policy/Contact',
'policies/agreement': '/AMO/Policy/Agreement',
}
if doc_name in mdn_docs:
return redirect(MDN_BASE + mdn_docs[doc_name],
permanent=True)
raise http.Http404()
@login_required
def api_key_agreement(request):
next_step = reverse('devhub.api_key')
return render_agreement(request, 'devhub/api/agreement.html', next_step)
def render_agreement(request, template, next_step, **extra_context):
form = AgreementForm(request.POST if request.method == 'POST' else None)
if request.method == 'POST' and form.is_valid():
# Developer has validated the form: let's update its profile and
# redirect to next step.
request.user.update(read_dev_agreement=datetime.datetime.now())
return redirect(next_step)
elif not request.user.has_read_developer_agreement():
# Developer has either posted an invalid form or just landed on the
# page but haven't read the agreement yet: show the form (with
# potential errors highlighted)
context = {
'agreement_form': form,
}
context.update(extra_context)
return render(request, template, context)
else:
# The developer has already read the agreement, we should just redirect
# to the next step.
response = redirect(next_step)
return response
@login_required
@transaction.atomic
def api_key(request):
if not request.user.has_read_developer_agreement():
return redirect(reverse('devhub.api_key_agreement'))
try:
credentials = APIKey.get_jwt_key(user=request.user)
except APIKey.DoesNotExist:
credentials = None
if request.method == 'POST' and request.POST.get('action') == 'generate':
if credentials:
log.info('JWT key was made inactive: {}'.format(credentials))
credentials.update(is_active=None)
msg = _(
'Your old credentials were revoked and are no longer valid. '
'Be sure to update all API clients with the new credentials.')
messages.success(request, msg)
new_credentials = APIKey.new_jwt_credentials(request.user)
log.info('new JWT key created: {}'.format(new_credentials))
send_key_change_email(request.user.email, new_credentials.key)
return redirect(reverse('devhub.api_key'))
if request.method == 'POST' and request.POST.get('action') == 'revoke':
credentials.update(is_active=None)
log.info('revoking JWT key for user: {}, {}'
.format(request.user.id, credentials))
send_key_revoked_email(request.user.email, credentials.key)
msg = ugettext(
'Your old credentials were revoked and are no longer valid.')
messages.success(request, msg)
return redirect(reverse('devhub.api_key'))
return render(request, 'devhub/api/key.html',
{'title': ugettext('Manage API Keys'),
'credentials': credentials})
def send_key_change_email(to_email, key):
template = loader.get_template('devhub/email/new-key-email.ltxt')
url = absolutify(reverse('devhub.api_key'))
send_mail(
ugettext('New API key created'),
template.render({'key': key, 'url': url}),
from_email=settings.DEFAULT_FROM_EMAIL,
recipient_list=[to_email],
)
def send_key_revoked_email(to_email, key):
template = loader.get_template('devhub/email/revoked-key-email.ltxt')
url = absolutify(reverse('devhub.api_key'))
send_mail(
ugettext('API key revoked'),
template.render({'key': key, 'url': url}),
from_email=settings.DEFAULT_FROM_EMAIL,
recipient_list=[to_email],
)
| {
"content_hash": "7e7d8d0f0158f4cbf11063a0772912d6",
"timestamp": "",
"source": "github",
"line_count": 1806,
"max_line_length": 79,
"avg_line_length": 38.27408637873754,
"alnum_prop": 0.6108965178016,
"repo_name": "lavish205/olympia",
"id": "b614cb4d572f0771caa7e717e9394afdbbc2d6c3",
"size": "69123",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/olympia/devhub/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "808053"
},
{
"name": "HTML",
"bytes": "614229"
},
{
"name": "JavaScript",
"bytes": "1075018"
},
{
"name": "Makefile",
"bytes": "820"
},
{
"name": "PLSQL",
"bytes": "1074"
},
{
"name": "PLpgSQL",
"bytes": "2381"
},
{
"name": "Python",
"bytes": "5064850"
},
{
"name": "SQLPL",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "11467"
},
{
"name": "Smarty",
"bytes": "1758"
}
],
"symlink_target": ""
} |
"""heatmap visualization."""
from django.conf import settings
from apps.managers.team_mgr.models import Group
def supply(request, page_name):
""" Handle the request for viz_heatmap widget."""
_ = page_name
_ = request
all_groups = Group.objects.order_by('name').all()
for group in all_groups:
group.teams = group.team_set.order_by('-name').all()
for team in group.teams:
wattdepot_source_name = team.energygoalsetting_set.all()[0].wattdepot_source_name
if not wattdepot_source_name:
wattdepot_source_name = team.name
if settings.MAKAHIKI_USE_WATTDEPOT3:
wattdepot_source_name = wattdepot_source_name.lower()
team.wattdepot_source_name = wattdepot_source_name
if settings.MAKAHIKI_USE_WATTDEPOT3:
wattdepot_version = "WATTDEPOT3"
else:
wattdepot_version = "WATTDEPOT2"
return {
"all_groups": all_groups,
"wattdepot_version": wattdepot_version,
}
| {
"content_hash": "31273fb4fc908fe925145ad15d847a8d",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 93,
"avg_line_length": 29.142857142857142,
"alnum_prop": 0.634313725490196,
"repo_name": "csdl/makahiki",
"id": "94b20d57dbb316bddee16e5aa3972b9a434422a7",
"size": "1020",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "makahiki/apps/widgets/viz_heatmap/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "107603"
},
{
"name": "HTML",
"bytes": "568630"
},
{
"name": "JavaScript",
"bytes": "244377"
},
{
"name": "Python",
"bytes": "1492680"
},
{
"name": "Shell",
"bytes": "20118"
}
],
"symlink_target": ""
} |
import psycopg2
def connect():
"""Connect to the PostgreSQL database. Returns a database connection."""
return psycopg2.connect("dbname=tournament")
def deleteMatches():
"""Remove all the match records from the database."""
db = connect()
db_cursor = db.cursor()
query = "DELETE FROM matches"
db_cursor.execute(query)
db.commit()
db.close()
def deletePlayers():
"""Remove all the player records from the database."""
db = connect()
db_cursor = db.cursor()
query = "DELETE FROM players"
db_cursor.execute(query)
db.commit()
db.close()
def countPlayers():
"""Returns the number of players currently registered."""
db = connect()
db_cursor = db.cursor()
query = "SELECT count(*) FROM players"
db_cursor.execute(query)
ret = db_cursor.fetchone()
db.close()
return ret[0]
def registerPlayer(name):
"""Adds a player to the tournament database.
The database assigns a unique serial id number for the player. (This
should be handled by your SQL database schema, not in your Python code.)
Args:
name: the player's full name (need not be unique).
"""
db = connect()
db_cursor = db.cursor()
query = "INSERT INTO players (name) VALUES (%s)"
db_cursor.execute(query, (name,))
db.commit()
db.close()
def playerStandings():
"""Returns a list of the players and their win records, sorted by wins.
The first entry in the list should be the player in first place, or a player
tied for first place if there is currently a tie.
Returns:
A list of tuples, each of which contains (id, name, wins, matches):
id: the player's unique id (assigned by the database)
name: the player's full name (as registered)
wins: the number of matches the player has won
matches: the number of matches the player has played
"""
db = connect()
db_cursor = db.cursor()
query = "SELECT * FROM player_standings"
db_cursor.execute(query)
ret = db_cursor.fetchall()
db.close()
return ret
def reportMatch(winner, loser):
"""Records the outcome of a single match between two players.
Args:
winner: the id number of the player who won
loser: the id number of the player who lost
"""
db = connect()
db_cursor = db.cursor()
query = "INSERT INTO matches (winner, loser) VALUES (%s, %s)"
db_cursor.execute(query, (winner, loser))
db.commit()
db.close()
def swissPairings():
"""Returns a list of pairs of players for the next round of a match.
Assuming that there are an even number of players registered, each player
appears exactly once in the pairings. Each player is paired with another
player with an equal or nearly-equal win record, that is, a player adjacent
to him or her in the standings.
Returns:
A list of tuples, each of which contains (id1, name1, id2, name2)
id1: the first player's unique id
name1: the first player's name
id2: the second player's unique id
name2: the second player's name
"""
db = connect()
db_cursor = db.cursor()
# Retrieve numbered (by number of wins) odd and even rows from player standings, and join them consecutively to get a next match players
query = """SELECT odd.id, odd.name, even.id, even.name
FROM player_standings_odd odd, player_standings_even even
WHERE odd.position+1=even.position"""
db_cursor.execute(query)
players = db_cursor.fetchall()
db.close()
return players
| {
"content_hash": "cdccebbd316ba9a1dd23e80ffda2a180",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 140,
"avg_line_length": 29.4390243902439,
"alnum_prop": 0.6498204915769125,
"repo_name": "guillermogfer/tournament-results",
"id": "7adfb4c127bda3e01986809a2229e1821606e4e7",
"size": "3711",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tournament.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10031"
}
],
"symlink_target": ""
} |
"""Package for provider specific object_storage_service implementations."""
__path__ = __import__('pkgutil').extend_path(__path__, __name__)
| {
"content_hash": "a2d784158d6a341b5c6ddc3c5ad408c5",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 75,
"avg_line_length": 70.5,
"alnum_prop": 0.6808510638297872,
"repo_name": "GoogleCloudPlatform/PerfKitBenchmarker",
"id": "f509ee3882bff959d2793b555d8c11a9ef48f93f",
"size": "141",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "perfkitbenchmarker/scripts/object_storage_api_test_scripts/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3420"
},
{
"name": "HTML",
"bytes": "113073"
},
{
"name": "Jinja",
"bytes": "62005"
},
{
"name": "Lua",
"bytes": "1547"
},
{
"name": "Python",
"bytes": "6076512"
},
{
"name": "R",
"bytes": "1017"
},
{
"name": "Shell",
"bytes": "76164"
},
{
"name": "Tcl",
"bytes": "14601"
}
],
"symlink_target": ""
} |
import collections
import errno
import logging
import math
import os
import re
import threading
import urllib
import urlparse
import json
import time
import Queue
import datetime
import dateutil.parser
try:
import ordereddict
except:
pass
import requests
import requests.utils
import gertty.version
from gertty import gitrepo
HIGH_PRIORITY=0
NORMAL_PRIORITY=1
LOW_PRIORITY=2
TIMEOUT=30
CLOSED_STATUSES = ['MERGED', 'ABANDONED']
class MultiQueue(object):
def __init__(self, priorities):
try:
self.queues = collections.OrderedDict()
except AttributeError:
self.queues = ordereddict.OrderedDict()
for key in priorities:
self.queues[key] = collections.deque()
self.condition = threading.Condition()
def qsize(self):
count = 0
for queue in self.queues.values():
count += len(queue)
return count
def put(self, item, priority):
added = False
self.condition.acquire()
try:
if item not in self.queues[priority]:
self.queues[priority].append(item)
added = True
self.condition.notify()
finally:
self.condition.release()
return added
def get(self):
self.condition.acquire()
try:
while True:
for queue in self.queues.values():
try:
ret = queue.popleft()
return ret
except IndexError:
pass
self.condition.wait()
finally:
self.condition.release()
def find(self, klass, priority):
results = []
self.condition.acquire()
try:
for item in self.queues[priority]:
if isinstance(item, klass):
results.append(item)
finally:
self.condition.release()
return results
class UpdateEvent(object):
def updateRelatedChanges(self, session, change):
related_change_keys = set()
related_change_keys.add(change.key)
for revision in change.revisions:
parent = session.getRevisionByCommit(revision.parent)
if parent:
related_change_keys.add(parent.change.key)
for child in session.getRevisionsByParent(revision.commit):
related_change_keys.add(child.change.key)
self.related_change_keys = related_change_keys
class ProjectAddedEvent(UpdateEvent):
def __repr__(self):
return '<ProjectAddedEvent project_key:%s>' % (
self.project_key,)
def __init__(self, project):
self.project_key = project.key
class ChangeAddedEvent(UpdateEvent):
def __repr__(self):
return '<ChangeAddedEvent project_key:%s change_key:%s>' % (
self.project_key, self.change_key)
def __init__(self, change):
self.project_key = change.project.key
self.change_key = change.key
self.related_change_keys = set()
self.review_flag_changed = True
self.status_changed = True
self.held_changed = False
class ChangeUpdatedEvent(UpdateEvent):
def __repr__(self):
return '<ChangeUpdatedEvent project_key:%s change_key:%s review_flag_changed:%s status_changed:%s>' % (
self.project_key, self.change_key, self.review_flag_changed, self.status_changed)
def __init__(self, change):
self.project_key = change.project.key
self.change_key = change.key
self.related_change_keys = set()
self.review_flag_changed = False
self.status_changed = False
self.held_changed = False
class Task(object):
def __init__(self, priority=NORMAL_PRIORITY):
self.log = logging.getLogger('gertty.sync')
self.priority = priority
self.succeeded = None
self.event = threading.Event()
self.tasks = []
self.results = []
def complete(self, success):
self.succeeded = success
self.event.set()
def wait(self, timeout=None):
self.event.wait(timeout)
return self.succeeded
def __eq__(self, other):
raise NotImplementedError()
class SyncOwnAccountTask(Task):
def __repr__(self):
return '<SyncOwnAccountTask>'
def __eq__(self, other):
if other.__class__ == self.__class__:
return True
return False
def run(self, sync):
app = sync.app
remote = sync.get('accounts/self')
sync.account_id = remote['_account_id']
with app.db.getSession() as session:
session.getAccountByID(remote['_account_id'],
remote.get('name'),
remote.get('username'),
remote.get('email'))
class SyncProjectListTask(Task):
def __repr__(self):
return '<SyncProjectListTask>'
def __eq__(self, other):
if other.__class__ == self.__class__:
return True
return False
def run(self, sync):
app = sync.app
remote = sync.get('projects/?d')
remote_keys = set(remote.keys())
with app.db.getSession() as session:
local = {}
for p in session.getProjects():
local[p.name] = p
local_keys = set(local.keys())
for name in local_keys-remote_keys:
session.delete(local[name])
for name in remote_keys-local_keys:
p = remote[name]
project = session.createProject(name,
description=p.get('description', ''))
self.log.info("Created project %s", project.name)
self.results.append(ProjectAddedEvent(project))
class SyncSubscribedProjectBranchesTask(Task):
def __repr__(self):
return '<SyncSubscribedProjectBranchesTask>'
def __eq__(self, other):
if other.__class__ == self.__class__:
return True
return False
def run(self, sync):
app = sync.app
with app.db.getSession() as session:
projects = session.getProjects(subscribed=True)
for p in projects:
sync.submitTask(SyncProjectBranchesTask(p.name, self.priority))
class SyncProjectBranchesTask(Task):
branch_re = re.compile(r'refs/heads/(.*)')
def __init__(self, project_name, priority=NORMAL_PRIORITY):
super(SyncProjectBranchesTask, self).__init__(priority)
self.project_name = project_name
def __repr__(self):
return '<SyncProjectBranchesTask %s>' % (self.project_name,)
def __eq__(self, other):
if (other.__class__ == self.__class__ and
other.project_name == self.project_name):
return True
return False
def run(self, sync):
app = sync.app
remote = sync.get('projects/%s/branches/' % urllib.quote_plus(self.project_name))
remote_branches = set()
for x in remote:
m = self.branch_re.match(x['ref'])
if m:
remote_branches.add(m.group(1))
with app.db.getSession() as session:
local = {}
project = session.getProjectByName(self.project_name)
for branch in project.branches:
local[branch.name] = branch
local_branches = set(local.keys())
for name in local_branches-remote_branches:
session.delete(local[name])
self.log.info("Deleted branch %s from project %s in local DB.", name, project.name)
for name in remote_branches-local_branches:
project.createBranch(name)
self.log.info("Added branch %s to project %s in local DB.", name, project.name)
class SyncSubscribedProjectsTask(Task):
def __repr__(self):
return '<SyncSubscribedProjectsTask>'
def __eq__(self, other):
if (other.__class__ == self.__class__):
return True
return False
def run(self, sync):
app = sync.app
with app.db.getSession() as session:
keys = [p.key for p in session.getProjects(subscribed=True)]
for i in range(0, len(keys), 10):
t = SyncProjectTask(keys[i:i+10], self.priority)
self.tasks.append(t)
sync.submitTask(t)
t = SyncQueriedChangesTask('owner', 'is:owner', self.priority)
self.tasks.append(t)
sync.submitTask(t)
t = SyncQueriedChangesTask('starred', 'is:starred', self.priority)
self.tasks.append(t)
sync.submitTask(t)
class SyncProjectTask(Task):
def __init__(self, project_keys, priority=NORMAL_PRIORITY):
super(SyncProjectTask, self).__init__(priority)
if type(project_keys) == int:
project_keys = [project_keys]
self.project_keys = project_keys
def __repr__(self):
return '<SyncProjectTask %s>' % (self.project_keys,)
def __eq__(self, other):
if (other.__class__ == self.__class__ and
other.project_keys == self.project_keys):
return True
return False
def run(self, sync):
app = sync.app
now = datetime.datetime.utcnow()
queries = []
with app.db.getSession() as session:
for project_key in self.project_keys:
project = session.getProject(project_key)
query = 'q=project:%s' % project.name
if project.updated:
# Allow 4 seconds for request time, etc.
query += ' -age:%ss' % (int(math.ceil((now-project.updated).total_seconds())) + 4,)
else:
query += ' status:open'
queries.append(query)
changes = []
sortkey = ''
done = False
while not done:
query = '&'.join(queries)
# We don't actually want to limit to 500, but that's the server-side default, and
# if we don't specify this, we won't get a _more_changes flag.
q = 'changes/?n=500%s&%s' % (sortkey, query)
self.log.debug('Query: %s ' % (q,))
responses = sync.get(q)
if len(queries) == 1:
responses = [responses]
done = True
for batch in responses:
changes += batch
if batch and '_more_changes' in batch[-1]:
sortkey = '&N=%s' % (batch[-1]['_sortkey'],)
done = False
change_ids = [c['id'] for c in changes]
with app.db.getSession() as session:
# Winnow the list of IDs to only the ones in the local DB.
change_ids = session.getChangeIDs(change_ids)
for c in changes:
# For now, just sync open changes or changes already
# in the db optionally we could sync all changes ever
if c['id'] in change_ids or (c['status'] not in CLOSED_STATUSES):
sync.submitTask(SyncChangeTask(c['id'], priority=self.priority))
for key in self.project_keys:
sync.submitTask(SetProjectUpdatedTask(key, now, priority=self.priority))
class SetProjectUpdatedTask(Task):
def __init__(self, project_key, updated, priority=NORMAL_PRIORITY):
super(SetProjectUpdatedTask, self).__init__(priority)
self.project_key = project_key
self.updated = updated
def __repr__(self):
return '<SetProjectUpdatedTask %s %s>' % (self.project_key, self.updated)
def __eq__(self, other):
if (other.__class__ == self.__class__ and
other.project_key == self.project_key and
other.updated == self.updated):
return True
return False
def run(self, sync):
app = sync.app
with app.db.getSession() as session:
project = session.getProject(self.project_key)
project.updated = self.updated
class SyncQueriedChangesTask(Task):
def __init__(self, query_name, query, priority=NORMAL_PRIORITY):
super(SyncQueriedChangesTask, self).__init__(priority)
self.query_name = query_name
self.query = query
def __repr__(self):
return '<SyncQueriedChangesTask %s>' % self.query_name
def __eq__(self, other):
if (other.__class__ == self.__class__ and
other.query_name == self.query_name and
other.query == self.query):
return True
return False
def run(self, sync):
app = sync.app
now = datetime.datetime.utcnow()
with app.db.getSession() as session:
sync_query = session.getSyncQueryByName(self.query_name)
query = 'q=%s' % self.query
if sync_query.updated:
# Allow 4 seconds for request time, etc.
query += ' -age:%ss' % (int(math.ceil((now-sync_query.updated).total_seconds())) + 4,)
else:
query += ' status:open'
for project in session.getProjects(subscribed=True):
query += ' -project:%s' % project.name
changes = []
sortkey = ''
done = False
while not done:
# We don't actually want to limit to 500, but that's the server-side default, and
# if we don't specify this, we won't get a _more_changes flag.
q = 'changes/?n=500%s&%s' % (sortkey, query)
self.log.debug('Query: %s ' % (q,))
batch = sync.get(q)
done = True
if batch:
changes += batch
if '_more_changes' in batch[-1]:
sortkey = '&N=%s' % (batch[-1]['_sortkey'],)
done = False
change_ids = [c['id'] for c in changes]
with app.db.getSession() as session:
# Winnow the list of IDs to only the ones in the local DB.
change_ids = session.getChangeIDs(change_ids)
for c in changes:
# For now, just sync open changes or changes already
# in the db optionally we could sync all changes ever
if c['id'] in change_ids or (c['status'] not in CLOSED_STATUSES):
sync.submitTask(SyncChangeTask(c['id'], priority=self.priority))
sync.submitTask(SetSyncQueryUpdatedTask(self.query_name, now, priority=self.priority))
class SetSyncQueryUpdatedTask(Task):
def __init__(self, query_name, updated, priority=NORMAL_PRIORITY):
super(SetSyncQueryUpdatedTask, self).__init__(priority)
self.query_name = query_name
self.updated = updated
def __repr__(self):
return '<SetSyncQueryUpdatedTask %s %s>' % (self.query_name, self.updated)
def __eq__(self, other):
if (other.__class__ == self.__class__ and
other.query_name == self.query_name and
other.updated == self.updated):
return True
return False
def run(self, sync):
app = sync.app
with app.db.getSession() as session:
sync_query = session.getSyncQueryByName(self.query_name)
sync_query.updated = self.updated
class SyncChangesByCommitsTask(Task):
def __init__(self, commits, priority=NORMAL_PRIORITY):
super(SyncChangesByCommitsTask, self).__init__(priority)
self.commits = commits
def __repr__(self):
return '<SyncChangesByCommitsTask %s>' % (self.commits,)
def __eq__(self, other):
if (other.__class__ == self.__class__ and
other.commits == self.commits):
return True
return False
def run(self, sync):
query = ' OR '.join(['commit:%s' % x for x in self.commits])
changes = sync.get('changes/?q=%s' % query)
self.log.debug('Query: %s ' % (query,))
for c in changes:
sync.submitTask(SyncChangeTask(c['id'], priority=self.priority))
self.log.debug("Sync change %s for its commit" % (c['id'],))
def addCommit(self, commit):
if commit in self.commits:
return True
# 100 should be under the URL length limit
if len(self.commits) >= 100:
return False
self.commits.append(commit)
return True
class SyncChangeByNumberTask(Task):
def __init__(self, number, priority=NORMAL_PRIORITY):
super(SyncChangeByNumberTask, self).__init__(priority)
self.number = number
def __repr__(self):
return '<SyncChangeByNumberTask %s>' % (self.number,)
def __eq__(self, other):
if (other.__class__ == self.__class__ and
other.number == self.number):
return True
return False
def run(self, sync):
query = '%s' % self.number
changes = sync.get('changes/?q=%s' % query)
self.log.debug('Query: %s ' % (query,))
for c in changes:
task = SyncChangeTask(c['id'], priority=self.priority)
self.tasks.append(task)
sync.submitTask(task)
self.log.debug("Sync change %s because it is number %s" % (c['id'], self.number))
class SyncChangeTask(Task):
def __init__(self, change_id, force_fetch=False, priority=NORMAL_PRIORITY):
super(SyncChangeTask, self).__init__(priority)
self.change_id = change_id
self.force_fetch = force_fetch
def __repr__(self):
return '<SyncChangeTask %s>' % (self.change_id,)
def __eq__(self, other):
if (other.__class__ == self.__class__ and
other.change_id == self.change_id and
other.force_fetch == self.force_fetch):
return True
return False
def run(self, sync):
start_time = time.time()
app = sync.app
remote_change = sync.get('changes/%s?o=DETAILED_LABELS&o=ALL_REVISIONS&o=ALL_COMMITS&o=MESSAGES&o=DETAILED_ACCOUNTS&o=CURRENT_ACTIONS&o=ALL_FILES' % self.change_id)
# Perform subqueries this task will need outside of the db session
for remote_commit, remote_revision in remote_change.get('revisions', {}).items():
remote_comments_data = sync.get('changes/%s/revisions/%s/comments' % (self.change_id, remote_commit))
remote_revision['_gertty_remote_comments_data'] = remote_comments_data
fetches = collections.defaultdict(list)
parent_commits = set()
with app.db.getSession() as session:
change = session.getChangeByID(self.change_id)
account = session.getAccountByID(remote_change['owner']['_account_id'],
name=remote_change['owner'].get('name'),
username=remote_change['owner'].get('username'),
email=remote_change['owner'].get('email'))
if not change:
project = session.getProjectByName(remote_change['project'])
created = dateutil.parser.parse(remote_change['created'])
updated = dateutil.parser.parse(remote_change['updated'])
change = project.createChange(remote_change['id'], account, remote_change['_number'],
remote_change['branch'], remote_change['change_id'],
remote_change['subject'], created,
updated, remote_change['status'],
topic=remote_change.get('topic'))
self.log.info("Created new change %s in local DB.", change.id)
result = ChangeAddedEvent(change)
else:
result = ChangeUpdatedEvent(change)
self.results.append(result)
change.owner = account
if change.status != remote_change['status']:
change.status = remote_change['status']
result.status_changed = True
if remote_change.get('starred'):
change.starred = True
else:
change.starred = False
change.subject = remote_change['subject']
change.updated = dateutil.parser.parse(remote_change['updated'])
change.topic = remote_change.get('topic')
repo = gitrepo.get_repo(change.project.name, app.config)
new_revision = False
for remote_commit, remote_revision in remote_change.get('revisions', {}).items():
revision = session.getRevisionByCommit(remote_commit)
# TODO: handle multiple parents
if 'anonymous http' in remote_revision['fetch']:
ref = remote_revision['fetch']['anonymous http']['ref']
url = remote_revision['fetch']['anonymous http']['url']
auth = False
elif 'http' in remote_revision['fetch']:
auth = True
ref = remote_revision['fetch']['http']['ref']
url = list(urlparse.urlsplit(sync.app.config.url + change.project.name))
url[1] = '%s:%s@%s' % (
urllib.quote_plus(sync.app.config.username),
urllib.quote_plus(sync.app.config.password), url[1])
url = urlparse.urlunsplit(url)
elif 'ssh' in remote_revision['fetch']:
ref = remote_revision['fetch']['ssh']['ref']
url = remote_revision['fetch']['ssh']['url']
auth = False
elif 'git' in remote_revision['fetch']:
ref = remote_revision['fetch']['git']['ref']
url = remote_revision['fetch']['git']['url']
auth = False
else:
if len(remote_revision['fetch']):
errMessage = "No supported fetch method found. Server offers: %s" % (
', '.join(remote_revision['fetch'].keys()))
else:
errMessage = "The server is missing the download-commands plugin."
raise Exception(errMessage)
if (not revision) or self.force_fetch:
fetches[url].append('+%(ref)s:%(ref)s' % dict(ref=ref))
if not revision:
revision = change.createRevision(remote_revision['_number'],
remote_revision['commit']['message'], remote_commit,
remote_revision['commit']['parents'][0]['commit'],
auth, ref)
self.log.info("Created new revision %s for change %s revision %s in local DB.",
revision.key, self.change_id, remote_revision['_number'])
new_revision = True
revision.message = remote_revision['commit']['message']
actions = remote_revision.get('actions', {})
revision.can_submit = 'submit' in actions
# TODO: handle multiple parents
if revision.parent not in parent_commits:
parent_revision = session.getRevisionByCommit(revision.parent)
if not parent_revision and change.status not in CLOSED_STATUSES:
sync._syncChangeByCommit(revision.parent, self.priority)
self.log.debug("Change %s revision %s needs parent commit %s synced" %
(change.id, remote_revision['_number'], revision.parent))
parent_commits.add(revision.parent)
result.updateRelatedChanges(session, change)
f = revision.getFile('/COMMIT_MSG')
if f is None:
f = revision.createFile('/COMMIT_MSG', None,
None, None, None)
for remote_path, remote_file in remote_revision['files'].items():
f = revision.getFile(remote_path)
if f is None:
if remote_file.get('binary'):
inserted = deleted = None
else:
inserted = remote_file.get('lines_inserted', 0)
deleted = remote_file.get('lines_deleted', 0)
f = revision.createFile(remote_path, remote_file.get('status', 'M'),
remote_file.get('old_path'),
inserted, deleted)
remote_comments_data = remote_revision['_gertty_remote_comments_data']
for remote_file, remote_comments in remote_comments_data.items():
for remote_comment in remote_comments:
account = session.getAccountByID(remote_comment['author']['_account_id'],
name=remote_comment['author'].get('name'),
username=remote_comment['author'].get('username'),
email=remote_comment['author'].get('email'))
comment = session.getCommentByID(remote_comment['id'])
if not comment:
# Normalize updated -> created
created = dateutil.parser.parse(remote_comment['updated'])
parent = False
if remote_comment.get('side', '') == 'PARENT':
parent = True
fileobj = revision.getFile(remote_file)
comment = fileobj.createComment(remote_comment['id'], account,
remote_comment.get('in_reply_to'),
created,
parent, remote_comment.get('line'),
remote_comment['message'])
self.log.info("Created new comment %s for revision %s in local DB.",
comment.key, revision.key)
else:
if comment.author != account:
comment.author = account
new_message = False
for remote_message in remote_change.get('messages', []):
if 'author' in remote_message:
account = session.getAccountByID(remote_message['author']['_account_id'],
name=remote_message['author'].get('name'),
username=remote_message['author'].get('username'),
email=remote_message['author'].get('email'))
if account.username != app.config.username:
new_message = True
else:
account = session.getSystemAccount()
message = session.getMessageByID(remote_message['id'])
if not message:
revision = session.getRevisionByNumber(change, remote_message.get('_revision_number', 1))
if revision:
# Normalize date -> created
created = dateutil.parser.parse(remote_message['date'])
message = revision.createMessage(remote_message['id'], account, created,
remote_message['message'])
self.log.info("Created new review message %s for revision %s in local DB.", message.key, revision.key)
else:
self.log.info("Unable to create new review message for revision %s because it is not in local DB (draft?).", remote_message.get('_revision_number'))
else:
if message.author != account:
message.author = account
remote_approval_entries = {}
remote_label_entries = {}
user_voted = False
for remote_label_name, remote_label_dict in remote_change.get('labels', {}).items():
for remote_approval in remote_label_dict.get('all', []):
if remote_approval.get('value') is None:
continue
remote_approval['category'] = remote_label_name
key = '%s~%s' % (remote_approval['category'], remote_approval['_account_id'])
remote_approval_entries[key] = remote_approval
if remote_approval['_account_id'] == sync.account_id and int(remote_approval['value']) != 0:
user_voted = True
for key, value in remote_label_dict.get('values', {}).items():
# +1: "LGTM"
label = dict(value=key,
description=value,
category=remote_label_name)
key = '%s~%s~%s' % (label['category'], label['value'], label['description'])
remote_label_entries[key] = label
remote_approval_keys = set(remote_approval_entries.keys())
remote_label_keys = set(remote_label_entries.keys())
local_approvals = {}
local_labels = {}
user_votes = {}
for approval in change.approvals:
if approval.draft and not new_revision:
# If we have a new revision, we need to delete
# draft local approvals because they can no longer
# be uploaded. Otherwise, keep them because we
# may be about to upload a review. Ignoring an
# approval here means it will not be deleted.
# Also keep track of these approvals so we can
# determine whether we should hold the change
# later.
user_votes[approval.category] = approval.value
# Count draft votes as having voted for the
# purposes of deciding whether to clear the
# reviewed flag later.
user_voted = True
continue
key = '%s~%s' % (approval.category, approval.reviewer.id)
if key in local_approvals:
# Delete duplicate approvals.
session.delete(approval)
else:
local_approvals[key] = approval
local_approval_keys = set(local_approvals.keys())
for label in change.labels:
key = '%s~%s~%s' % (label.category, label.value, label.description)
local_labels[key] = label
local_label_keys = set(local_labels.keys())
for key in local_approval_keys-remote_approval_keys:
session.delete(local_approvals[key])
for key in local_label_keys-remote_label_keys:
session.delete(local_labels[key])
for key in remote_approval_keys-local_approval_keys:
remote_approval = remote_approval_entries[key]
account = session.getAccountByID(remote_approval['_account_id'],
name=remote_approval.get('name'),
username=remote_approval.get('username'),
email=remote_approval.get('email'))
change.createApproval(account,
remote_approval['category'],
remote_approval['value'])
self.log.info("Created approval for change %s in local DB.", change.id)
user_value = user_votes.get(remote_approval['category'], 0)
if user_value > 0 and remote_approval['value'] < 0:
# Someone left a negative vote after the local
# user created a draft positive vote. Hold the
# change so that it doesn't look like the local
# user is ignoring negative feedback.
if not change.held:
change.held = True
result.held_changed = True
self.log.info("Setting change %s to held due to negative review after positive", change.id)
for key in remote_label_keys-local_label_keys:
remote_label = remote_label_entries[key]
change.createLabel(remote_label['category'],
remote_label['value'],
remote_label['description'])
for key in remote_approval_keys.intersection(local_approval_keys):
local_approval = local_approvals[key]
remote_approval = remote_approval_entries[key]
local_approval.value = remote_approval['value']
# For the side effect of updating account info:
account = session.getAccountByID(remote_approval['_account_id'],
name=remote_approval.get('name'),
username=remote_approval.get('username'),
email=remote_approval.get('email'))
remote_permitted_entries = {}
for remote_label_name, remote_label_values in remote_change.get('permitted_labels', {}).items():
for remote_label_value in remote_label_values:
remote_label = dict(category=remote_label_name,
value=remote_label_value)
key = '%s~%s' % (remote_label['category'], remote_label['value'])
remote_permitted_entries[key] = remote_label
remote_permitted_keys = set(remote_permitted_entries.keys())
local_permitted = {}
for permitted in change.permitted_labels:
key = '%s~%s' % (permitted.category, permitted.value)
local_permitted[key] = permitted
local_permitted_keys = set(local_permitted.keys())
for key in local_permitted_keys-remote_permitted_keys:
session.delete(local_permitted[key])
for key in remote_permitted_keys-local_permitted_keys:
remote_permitted = remote_permitted_entries[key]
change.createPermittedLabel(remote_permitted['category'],
remote_permitted['value'])
if not user_voted:
# Only consider changing the reviewed state if we don't have a vote
if new_revision or new_message:
if change.reviewed:
change.reviewed = False
result.review_flag_changed = True
for url, refs in fetches.items():
self.log.debug("Fetching from %s with refs %s", url, refs)
try:
repo.fetch(url, refs)
except Exception:
# Backwards compat with GitPython before the multi-ref fetch
# patch.
# (https://github.com/gitpython-developers/GitPython/pull/170)
for ref in refs:
self.log.debug("git fetch %s %s" % (url, ref))
repo.fetch(url, ref)
end_time = time.time()
total_time = end_time - start_time
self.log.info("Synced change %s in %0.5f seconds.", self.change_id, total_time)
class CheckReposTask(Task):
# on startup, check all projects
# for any subscribed project withot a local repo or if
# --fetch-missing-refs is supplied, check all local changes for
# missing refs, and sync the associated changes
def __repr__(self):
return '<CheckReposTask>'
def __eq__(self, other):
if (other.__class__ == self.__class__):
return True
return False
def run(self, sync):
app = sync.app
with app.db.getSession() as session:
projects = session.getProjects(subscribed=True)
for project in projects:
try:
missing = False
try:
repo = gitrepo.get_repo(project.name, app.config)
except gitrepo.GitCloneError:
missing = True
if missing or app.fetch_missing_refs:
sync.submitTask(CheckRevisionsTask(project.key,
priority=LOW_PRIORITY))
except Exception:
self.log.exception("Exception checking repo %s" %
(project.name,))
class CheckRevisionsTask(Task):
def __init__(self, project_key, priority=NORMAL_PRIORITY):
super(CheckRevisionsTask, self).__init__(priority)
self.project_key = project_key
def __repr__(self):
return '<CheckRevisionsTask %s>' % (self.project_key,)
def __eq__(self, other):
if (other.__class__ == self.__class__ and
other.project_key == self.project_key):
return True
return False
def run(self, sync):
app = sync.app
to_sync = set()
with app.db.getSession() as session:
project = session.getProject(self.project_key)
repo = None
try:
repo = gitrepo.get_repo(project.name, app.config)
except gitrepo.GitCloneError:
pass
for change in project.open_changes:
if repo:
for revision in change.revisions:
if not (repo.hasCommit(revision.parent) and
repo.hasCommit(revision.commit)):
to_sync.add(change.id)
else:
to_sync.add(change.id)
for change_id in to_sync:
sync.submitTask(SyncChangeTask(change_id, priority=self.priority))
class UploadReviewsTask(Task):
def __repr__(self):
return '<UploadReviewsTask>'
def __eq__(self, other):
if (other.__class__ == self.__class__):
return True
return False
def run(self, sync):
app = sync.app
with app.db.getSession() as session:
for c in session.getPendingTopics():
sync.submitTask(SetTopicTask(c.key, self.priority))
for c in session.getPendingRebases():
sync.submitTask(RebaseChangeTask(c.key, self.priority))
for c in session.getPendingStatusChanges():
sync.submitTask(ChangeStatusTask(c.key, self.priority))
for c in session.getPendingStarred():
sync.submitTask(ChangeStarredTask(c.key, self.priority))
for c in session.getPendingCherryPicks():
sync.submitTask(SendCherryPickTask(c.key, self.priority))
for r in session.getPendingCommitMessages():
sync.submitTask(ChangeCommitMessageTask(r.key, self.priority))
for m in session.getPendingMessages():
sync.submitTask(UploadReviewTask(m.key, self.priority))
class SetTopicTask(Task):
def __init__(self, change_key, priority=NORMAL_PRIORITY):
super(SetTopicTask, self).__init__(priority)
self.change_key = change_key
def __repr__(self):
return '<SetTopicTask %s>' % (self.change_key,)
def __eq__(self, other):
if (other.__class__ == self.__class__ and
other.change_key == self.change_key):
return True
return False
def run(self, sync):
app = sync.app
with app.db.getSession() as session:
change = session.getChange(self.change_key)
data = dict(topic=change.topic)
change.pending_topic = False
# Inside db session for rollback
sync.put('changes/%s/topic' % (change.id,),
data)
sync.submitTask(SyncChangeTask(change.id, priority=self.priority))
class RebaseChangeTask(Task):
def __init__(self, change_key, priority=NORMAL_PRIORITY):
super(RebaseChangeTask, self).__init__(priority)
self.change_key = change_key
def __repr__(self):
return '<RebaseChangeTask %s>' % (self.change_key,)
def __eq__(self, other):
if (other.__class__ == self.__class__ and
other.change_key == self.change_key):
return True
return False
def run(self, sync):
app = sync.app
with app.db.getSession() as session:
change = session.getChange(self.change_key)
change.pending_rebase = False
# Inside db session for rollback
sync.post('changes/%s/rebase' % (change.id,), {})
sync.submitTask(SyncChangeTask(change.id, priority=self.priority))
class ChangeStarredTask(Task):
def __init__(self, change_key, priority=NORMAL_PRIORITY):
super(ChangeStarredTask, self).__init__(priority)
self.change_key = change_key
def __repr__(self):
return '<ChangeStarredTask %s>' % (self.change_key,)
def __eq__(self, other):
if (other.__class__ == self.__class__ and
other.change_key == self.change_key):
return True
return False
def run(self, sync):
app = sync.app
with app.db.getSession() as session:
change = session.getChange(self.change_key)
if change.starred:
sync.put('accounts/self/starred.changes/%s' % (change.id,),
data={})
else:
sync.delete('accounts/self/starred.changes/%s' % (change.id,),
data={})
change.pending_starred = False
sync.submitTask(SyncChangeTask(change.id, priority=self.priority))
class ChangeStatusTask(Task):
def __init__(self, change_key, priority=NORMAL_PRIORITY):
super(ChangeStatusTask, self).__init__(priority)
self.change_key = change_key
def __repr__(self):
return '<ChangeStatusTask %s>' % (self.change_key,)
def __eq__(self, other):
if (other.__class__ == self.__class__ and
other.change_key == self.change_key):
return True
return False
def run(self, sync):
app = sync.app
with app.db.getSession() as session:
change = session.getChange(self.change_key)
if change.pending_status_message:
data = dict(message=change.pending_status_message)
else:
data = {}
change.pending_status = False
change.pending_status_message = None
# Inside db session for rollback
if change.status == 'ABANDONED':
sync.post('changes/%s/abandon' % (change.id,),
data)
elif change.status == 'NEW':
sync.post('changes/%s/restore' % (change.id,),
data)
elif change.status == 'SUBMITTED':
sync.post('changes/%s/submit' % (change.id,), {})
sync.submitTask(SyncChangeTask(change.id, priority=self.priority))
class SendCherryPickTask(Task):
def __init__(self, cp_key, priority=NORMAL_PRIORITY):
super(SendCherryPickTask, self).__init__(priority)
self.cp_key = cp_key
def __repr__(self):
return '<SendCherryPickTask %s>' % (self.cp_key,)
def __eq__(self, other):
if (other.__class__ == self.__class__ and
other.cp_key == self.cp_key):
return True
return False
def run(self, sync):
app = sync.app
with app.db.getSession() as session:
cp = session.getPendingCherryPick(self.cp_key)
data = dict(message=cp.message,
destination=cp.branch)
session.delete(cp)
# Inside db session for rollback
ret = sync.post('changes/%s/revisions/%s/cherrypick' %
(cp.revision.change.id, cp.revision.commit),
data)
if ret and 'id' in ret:
sync.submitTask(SyncChangeTask(ret['id'], priority=self.priority))
class ChangeCommitMessageTask(Task):
def __init__(self, revision_key, priority=NORMAL_PRIORITY):
super(ChangeCommitMessageTask, self).__init__(priority)
self.revision_key = revision_key
def __repr__(self):
return '<ChangeCommitMessageTask %s>' % (self.revision_key,)
def __eq__(self, other):
if (other.__class__ == self.__class__ and
other.revision_key == self.revision_key):
return True
return False
def run(self, sync):
app = sync.app
with app.db.getSession() as session:
revision = session.getRevision(self.revision_key)
revision.pending_message = False
data = dict(message=revision.message)
# Inside db session for rollback
sync.post('changes/%s/revisions/%s/message' %
(revision.change.id, revision.commit),
data)
change_id = revision.change.id
sync.submitTask(SyncChangeTask(change_id, priority=self.priority))
class UploadReviewTask(Task):
def __init__(self, message_key, priority=NORMAL_PRIORITY):
super(UploadReviewTask, self).__init__(priority)
self.message_key = message_key
def __repr__(self):
return '<UploadReviewTask %s>' % (self.message_key,)
def __eq__(self, other):
if (other.__class__ == self.__class__ and
other.message_key == self.message_key):
return True
return False
def run(self, sync):
app = sync.app
with app.db.getSession() as session:
message = session.getMessage(self.message_key)
if message is None:
self.log.debug("Message %s has already been uploaded" % (
self.message_key))
return
change = message.revision.change
if not change.held:
self.log.debug("Syncing %s to find out if it should be held" % (change.id,))
t = SyncChangeTask(change.id)
t.run(sync)
self.results += t.results
submit = False
change_id = None
with app.db.getSession() as session:
message = session.getMessage(self.message_key)
revision = message.revision
change = message.revision.change
if change.held:
self.log.debug("Not uploading review to %s because it is held" %
(change.id,))
return
change_id = change.id
current_revision = change.revisions[-1]
if change.pending_status and change.status == 'SUBMITTED':
submit = True
data = dict(message=message.message,
strict_labels=False)
if revision == current_revision:
data['labels'] = {}
for approval in change.draft_approvals:
data['labels'][approval.category] = approval.value
session.delete(approval)
comments = {}
for file in revision.files:
if file.draft_comments:
comment_list = []
for comment in file.draft_comments:
d = dict(line=comment.line,
message=comment.message)
if comment.parent:
d['side'] = 'PARENT'
comment_list.append(d)
session.delete(comment)
comments[file.path] = comment_list
if comments:
data['comments'] = comments
session.delete(message)
# Inside db session for rollback
sync.post('changes/%s/revisions/%s/review' % (change.id, revision.commit),
data)
if submit:
# In another db session in case submit fails after posting
# the message succeeds
with app.db.getSession() as session:
change = session.getChangeByID(change_id)
change.pending_status = False
change.pending_status_message = None
sync.post('changes/%s/submit' % (change_id,), {})
sync.submitTask(SyncChangeTask(change_id, priority=self.priority))
class PruneDatabaseTask(Task):
def __init__(self, age, priority=NORMAL_PRIORITY):
super(PruneDatabaseTask, self).__init__(priority)
self.age = age
def __repr__(self):
return '<PruneDatabaseTask %s>' % (self.age,)
def __eq__(self, other):
if (other.__class__ == self.__class__ and
other.age == self.age):
return True
return False
def run(self, sync):
if not self.age:
return
app = sync.app
with app.db.getSession() as session:
for change in session.getChanges('status:closed age:%s' % self.age):
t = PruneChangeTask(change.key, priority=self.priority)
self.tasks.append(t)
sync.submitTask(t)
t = VacuumDatabaseTask(priority=self.priority)
self.tasks.append(t)
sync.submitTask(t)
class PruneChangeTask(Task):
def __init__(self, key, priority=NORMAL_PRIORITY):
super(PruneChangeTask, self).__init__(priority)
self.key = key
def __repr__(self):
return '<PruneChangeTask %s>' % (self.key,)
def __eq__(self, other):
if (other.__class__ == self.__class__ and
other.key == self.key):
return True
return False
def run(self, sync):
app = sync.app
with app.db.getSession() as session:
change = session.getChange(self.key)
if not change:
return
repo = gitrepo.get_repo(change.project.name, app.config)
self.log.info("Pruning %s change %s status:%s updated:%s" % (
change.project.name, change.number, change.status, change.updated))
change_ref = None
for revision in change.revisions:
if change_ref is None:
change_ref = '/'.join(revision.fetch_ref.split('/')[:-1])
self.log.info("Deleting %s ref %s" % (
change.project.name, revision.fetch_ref))
repo.deleteRef(revision.fetch_ref)
self.log.info("Deleting %s ref %s" % (
change.project.name, change_ref))
try:
repo.deleteRef(change_ref)
except OSError, e:
if e.errno not in [errno.EISDIR, errno.EPERM]:
raise
session.delete(change)
class VacuumDatabaseTask(Task):
def __init__(self, priority=NORMAL_PRIORITY):
super(VacuumDatabaseTask, self).__init__(priority)
def __repr__(self):
return '<VacuumDatabaseTask>'
def __eq__(self, other):
if other.__class__ == self.__class__:
return True
return False
def run(self, sync):
app = sync.app
with app.db.getSession() as session:
session.vacuum()
class Sync(object):
def __init__(self, app):
self.user_agent = 'Gertty/%s %s' % (gertty.version.version_info.release_string(),
requests.utils.default_user_agent())
self.offline = False
self.account_id = None
self.app = app
self.log = logging.getLogger('gertty.sync')
self.queue = MultiQueue([HIGH_PRIORITY, NORMAL_PRIORITY, LOW_PRIORITY])
self.result_queue = Queue.Queue()
self.session = requests.Session()
if self.app.config.auth_type == 'basic':
authclass = requests.auth.HTTPBasicAuth
else:
authclass = requests.auth.HTTPDigestAuth
self.auth = authclass(
self.app.config.username, self.app.config.password)
self.submitTask(SyncOwnAccountTask(HIGH_PRIORITY))
self.submitTask(CheckReposTask(HIGH_PRIORITY))
self.submitTask(UploadReviewsTask(HIGH_PRIORITY))
self.submitTask(SyncProjectListTask(HIGH_PRIORITY))
self.submitTask(SyncSubscribedProjectsTask(NORMAL_PRIORITY))
self.submitTask(SyncSubscribedProjectBranchesTask(LOW_PRIORITY))
self.submitTask(PruneDatabaseTask(self.app.config.expire_age, LOW_PRIORITY))
self.periodic_thread = threading.Thread(target=self.periodicSync)
self.periodic_thread.daemon = True
self.periodic_thread.start()
def periodicSync(self):
hourly = time.time()
while True:
try:
time.sleep(60)
self.syncSubscribedProjects()
now = time.time()
if now-hourly > 3600:
hourly = now
self.pruneDatabase()
except Exception:
self.log.exception('Exception in periodicSync')
def submitTask(self, task):
if not self.offline:
if not self.queue.put(task, task.priority):
task.complete(False)
else:
task.complete(False)
def run(self, pipe):
task = None
while True:
task = self._run(pipe, task)
def _run(self, pipe, task=None):
if not task:
task = self.queue.get()
self.log.debug('Run: %s' % (task,))
try:
task.run(self)
task.complete(True)
except requests.ConnectionError, e:
self.log.warning("Offline due to: %s" % (e,))
if not self.offline:
self.submitTask(UploadReviewsTask(HIGH_PRIORITY))
self.offline = True
self.app.status.update(offline=True, refresh=False)
os.write(pipe, 'refresh\n')
time.sleep(30)
return task
except Exception:
task.complete(False)
self.log.exception('Exception running task %s' % (task,))
self.app.status.update(error=True, refresh=False)
self.offline = False
self.app.status.update(offline=False, refresh=False)
for r in task.results:
self.result_queue.put(r)
os.write(pipe, 'refresh\n')
return None
def url(self, path):
return self.app.config.url + 'a/' + path
def get(self, path):
url = self.url(path)
self.log.debug('GET: %s' % (url,))
r = self.session.get(url,
verify=self.app.config.verify_ssl,
auth=self.auth, timeout=TIMEOUT,
headers = {'Accept': 'application/json',
'Accept-Encoding': 'gzip',
'User-Agent': self.user_agent})
if r.status_code == 200:
ret = json.loads(r.text[4:])
if len(ret):
self.log.debug('200 OK, Received: %s' % (ret,))
else:
self.log.debug('200 OK, No body.')
return ret
else:
self.log.warn('HTTP response: %d', r.status_code)
def post(self, path, data):
url = self.url(path)
self.log.debug('POST: %s' % (url,))
self.log.debug('data: %s' % (data,))
r = self.session.post(url, data=json.dumps(data).encode('utf8'),
verify=self.app.config.verify_ssl,
auth=self.auth, timeout=TIMEOUT,
headers = {'Content-Type': 'application/json;charset=UTF-8',
'User-Agent': self.user_agent})
self.log.debug('Received: %s' % (r.text,))
ret = None
if r.text and len(r.text)>4:
try:
ret = json.loads(r.text[4:])
except Exception:
self.log.exception("Unable to parse result %s from post to %s" %
(r.text, url))
return ret
def put(self, path, data):
url = self.url(path)
self.log.debug('PUT: %s' % (url,))
self.log.debug('data: %s' % (data,))
r = self.session.put(url, data=json.dumps(data).encode('utf8'),
verify=self.app.config.verify_ssl,
auth=self.auth, timeout=TIMEOUT,
headers = {'Content-Type': 'application/json;charset=UTF-8',
'User-Agent': self.user_agent})
self.log.debug('Received: %s' % (r.text,))
def delete(self, path, data):
url = self.url(path)
self.log.debug('DELETE: %s' % (url,))
self.log.debug('data: %s' % (data,))
r = self.session.delete(url, data=json.dumps(data).encode('utf8'),
verify=self.app.config.verify_ssl,
auth=self.auth, timeout=TIMEOUT,
headers = {'Content-Type': 'application/json;charset=UTF-8',
'User-Agent': self.user_agent})
self.log.debug('Received: %s' % (r.text,))
def syncSubscribedProjects(self):
task = SyncSubscribedProjectsTask(LOW_PRIORITY)
self.submitTask(task)
if task.wait():
for subtask in task.tasks:
subtask.wait()
def pruneDatabase(self):
task = PruneDatabaseTask(self.app.config.expire_age, LOW_PRIORITY)
self.submitTask(task)
if task.wait():
for subtask in task.tasks:
subtask.wait()
def _syncChangeByCommit(self, commit, priority):
# Accumulate sync change by commit tasks because they often
# come in batches. This method assumes it is being called
# from within the run queue already and therefore does not
# need to worry about locking the queue.
task = None
for task in self.queue.find(SyncChangesByCommitsTask, priority):
if task.addCommit(commit):
return
task = SyncChangesByCommitsTask([commit], priority)
self.submitTask(task)
| {
"content_hash": "c1acfbd8a51b326763891d67d542c530",
"timestamp": "",
"source": "github",
"line_count": 1400,
"max_line_length": 172,
"avg_line_length": 42.11785714285714,
"alnum_prop": 0.5336385991689986,
"repo_name": "aspiers/gertty",
"id": "96a0fba31c340f2cf049bfbc57e2e3f59589f560",
"size": "59610",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gertty/sync.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "358313"
}
],
"symlink_target": ""
} |
from entities import *
class Account(Entity):
id = IntegerField(group=PRIMARY) # this field is in primary key group
iban = IntegerField(group=SECONDARY) # this is in secondary key group
balance = FloatField(default=0.0)
class Name(Entity):
first_name = StringField(group=SECONDARY)
last_name = StringField(group=SECONDARY)
class Customer(Entity):
id = IntegerField(group=PRIMARY)
name = EntityField(Name, group=SECONDARY)
accounts = ListField(ReferenceField(Account), default=list)
# Create Account objects.
a_1 = Account(1, 111, 10.0) # __init__() recognize positional arguments
a_2 = Account(id=2, iban=222, balance=20.0) # as well as keyword arguments
# Generate hashable key using primary key.
print a_1.keyify() # prints '(1,)'
# Generate hashable key using secondary key.
print a_2.keyify(SECONDARY) # prints '(222,)'
# Create Customer object.
c = Customer(1, Name('eser', 'aygun'))
# Generate hashable key using primary key.
print c.keyify() # prints '(1,)'
# Generate hashable key using secondary key.
print c.keyify(SECONDARY) # prints '(('eser', 'aygun'),)'
# Try validating an invalid object.
c.accounts.append(123)
try:
c.validate() # fails
except ValidationError:
print 'accounts list is only for Account objects'
# Try validating a valid object.
c.accounts = [a_1, a_2]
c.validate() # succeeds
| {
"content_hash": "6d1f2607683b0e25004b26d2e8c32766",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 75,
"avg_line_length": 28.020408163265305,
"alnum_prop": 0.705753823743627,
"repo_name": "eseraygun/python-entities",
"id": "8b9be07a4dc983c66a3b063730c2301d2dc377a6",
"size": "1373",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/basicusage.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "20752"
}
],
"symlink_target": ""
} |
from typing import Type
from perfetto.trace_uri_resolver import util
from perfetto.trace_uri_resolver.resolver import TraceUriResolver
class PathUriResolver(TraceUriResolver):
PREFIX: str = None
def __init__(self, path: str):
self.path = path
def resolve(self) -> TraceUriResolver.Result:
return [
TraceUriResolver.Result(
trace=util.file_generator(self.path), metadata=dict())
]
@classmethod
def from_trace_uri(cls: Type['PathUriResolver'],
args_str: str) -> 'PathUriResolver':
return PathUriResolver(args_str)
| {
"content_hash": "479c8b46ea5e15710c1867514c95cb1f",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 66,
"avg_line_length": 26.59090909090909,
"alnum_prop": 0.6871794871794872,
"repo_name": "google/perfetto",
"id": "ca5893af0ec7a88b99feebc29263ea59df90687f",
"size": "1186",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/perfetto/trace_uri_resolver/path.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "58347"
},
{
"name": "C++",
"bytes": "10532953"
},
{
"name": "CSS",
"bytes": "6080"
},
{
"name": "Dockerfile",
"bytes": "6650"
},
{
"name": "HTML",
"bytes": "15653"
},
{
"name": "Java",
"bytes": "12441"
},
{
"name": "JavaScript",
"bytes": "115174"
},
{
"name": "Makefile",
"bytes": "10869"
},
{
"name": "Meson",
"bytes": "1635"
},
{
"name": "Python",
"bytes": "969677"
},
{
"name": "SCSS",
"bytes": "116843"
},
{
"name": "Shell",
"bytes": "79903"
},
{
"name": "Starlark",
"bytes": "222184"
},
{
"name": "TypeScript",
"bytes": "1740641"
}
],
"symlink_target": ""
} |
from supriya.tools.ugentools.PureUGen import PureUGen
class Impulse(PureUGen):
r'''A non-band-limited single-sample impulse generator unit generator.
::
>>> ugentools.Impulse.ar()
Impulse.ar()
'''
### CLASS VARIABLES ###
__documentation_section__ = 'Oscillator UGens'
__slots__ = ()
_ordered_input_names = (
'frequency',
'phase',
)
### INITIALIZER ###
def __init__(
self,
calculation_rate=None,
frequency=440.,
phase=0.,
):
PureUGen.__init__(
self,
calculation_rate=calculation_rate,
frequency=frequency,
phase=phase,
)
### PUBLIC METHODS ###
@classmethod
def ar(
cls,
frequency=440,
phase=0,
):
r'''Constructs an audio-rate non-band-limited single-sample impulse
generator.
::
>>> ugentools.Impulse.ar(
... frequency=443,
... phase=0.25,
... )
Impulse.ar()
Returns unit generator graph.
'''
from supriya.tools import synthdeftools
calculation_rate = synthdeftools.CalculationRate.AUDIO
ugen = cls._new_expanded(
calculation_rate=calculation_rate,
frequency=frequency,
phase=phase,
)
return ugen
@classmethod
def kr(
cls,
frequency=440,
phase=0,
):
r'''Constructs a control-rate non-band-limited single-sample impulse
generator.
::
>>> ugentools.Impulse.kr(
... frequency=443,
... phase=0.25,
... )
Impulse.kr()
Returns unit generator graph.
'''
from supriya.tools import synthdeftools
calculation_rate = synthdeftools.CalculationRate.CONTROL
ugen = cls._new_expanded(
calculation_rate=calculation_rate,
frequency=frequency,
phase=phase,
)
return ugen
### PUBLIC PROPERTIES ###
@property
def frequency(self):
r'''Gets `frequency` input of Impulse.
::
>>> frequency = 0.5
>>> impulse = ugentools.Impulse.ar(
... frequency=frequency,
... )
>>> impulse.frequency
0.5
Returns input.
'''
index = self._ordered_input_names.index('frequency')
return self._inputs[index]
@property
def phase(self):
r'''Gets `phase` input of Impulse.
::
>>> phase = 0.25
>>> impulse = ugentools.Impulse.ar(
... phase=phase,
... )
>>> impulse.phase
0.25
Returns input.
'''
index = self._ordered_input_names.index('phase')
return self._inputs[index] | {
"content_hash": "ea54625c2e83c00d41a0b260fe6cbd87",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 76,
"avg_line_length": 22.223880597014926,
"alnum_prop": 0.48791134989926127,
"repo_name": "andrewyoung1991/supriya",
"id": "58db943bf2c0de1d80b34ba2ac374b9f4119c9a4",
"size": "3004",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "supriya/tools/ugentools/Impulse.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "6712"
},
{
"name": "CSS",
"bytes": "446"
},
{
"name": "HTML",
"bytes": "1083"
},
{
"name": "JavaScript",
"bytes": "6163"
},
{
"name": "Makefile",
"bytes": "6775"
},
{
"name": "Python",
"bytes": "2693776"
}
],
"symlink_target": ""
} |
"""Support for monitoring an SABnzbd NZB client."""
from __future__ import annotations
from datetime import timedelta
import logging
from pysabnzbd import SabnzbdApi, SabnzbdApiException
import voluptuous as vol
from homeassistant.components.discovery import SERVICE_SABNZBD
from homeassistant.const import (
CONF_API_KEY,
CONF_HOST,
CONF_NAME,
CONF_PATH,
CONF_PORT,
CONF_SENSORS,
CONF_SSL,
DATA_GIGABYTES,
DATA_MEGABYTES,
DATA_RATE_MEGABYTES_PER_SECOND,
)
from homeassistant.core import callback
from homeassistant.helpers import discovery
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.util.json import load_json, save_json
_LOGGER = logging.getLogger(__name__)
DOMAIN = "sabnzbd"
DATA_SABNZBD = "sabznbd"
_CONFIGURING: dict[str, str] = {}
ATTR_SPEED = "speed"
BASE_URL_FORMAT = "{}://{}:{}/"
CONFIG_FILE = "sabnzbd.conf"
DEFAULT_HOST = "localhost"
DEFAULT_NAME = "SABnzbd"
DEFAULT_PORT = 8080
DEFAULT_SPEED_LIMIT = "100"
DEFAULT_SSL = False
UPDATE_INTERVAL = timedelta(seconds=30)
SERVICE_PAUSE = "pause"
SERVICE_RESUME = "resume"
SERVICE_SET_SPEED = "set_speed"
SIGNAL_SABNZBD_UPDATED = "sabnzbd_updated"
SENSOR_TYPES = {
"current_status": ["Status", None, "status"],
"speed": ["Speed", DATA_RATE_MEGABYTES_PER_SECOND, "kbpersec"],
"queue_size": ["Queue", DATA_MEGABYTES, "mb"],
"queue_remaining": ["Left", DATA_MEGABYTES, "mbleft"],
"disk_size": ["Disk", DATA_GIGABYTES, "diskspacetotal1"],
"disk_free": ["Disk Free", DATA_GIGABYTES, "diskspace1"],
"queue_count": ["Queue Count", None, "noofslots_total"],
"day_size": ["Daily Total", DATA_GIGABYTES, "day_size"],
"week_size": ["Weekly Total", DATA_GIGABYTES, "week_size"],
"month_size": ["Monthly Total", DATA_GIGABYTES, "month_size"],
"total_size": ["Total", DATA_GIGABYTES, "total_size"],
}
SPEED_LIMIT_SCHEMA = vol.Schema(
{vol.Optional(ATTR_SPEED, default=DEFAULT_SPEED_LIMIT): cv.string}
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_API_KEY): cv.string,
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_PATH): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_SENSORS): vol.All(
cv.ensure_list, [vol.In(SENSOR_TYPES)]
),
vol.Optional(CONF_SSL, default=DEFAULT_SSL): cv.boolean,
}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_check_sabnzbd(sab_api):
"""Check if we can reach SABnzbd."""
try:
await sab_api.check_available()
return True
except SabnzbdApiException:
_LOGGER.error("Connection to SABnzbd API failed")
return False
async def async_configure_sabnzbd(
hass, config, use_ssl, name=DEFAULT_NAME, api_key=None
):
"""Try to configure Sabnzbd and request api key if configuration fails."""
host = config[CONF_HOST]
port = config[CONF_PORT]
web_root = config.get(CONF_PATH)
uri_scheme = "https" if use_ssl else "http"
base_url = BASE_URL_FORMAT.format(uri_scheme, host, port)
if api_key is None:
conf = await hass.async_add_executor_job(
load_json, hass.config.path(CONFIG_FILE)
)
api_key = conf.get(base_url, {}).get(CONF_API_KEY, "")
sab_api = SabnzbdApi(
base_url, api_key, web_root=web_root, session=async_get_clientsession(hass)
)
if await async_check_sabnzbd(sab_api):
async_setup_sabnzbd(hass, sab_api, config, name)
else:
async_request_configuration(hass, config, base_url, web_root)
async def async_setup(hass, config):
"""Set up the SABnzbd component."""
async def sabnzbd_discovered(service, info):
"""Handle service discovery."""
ssl = info.get("properties", {}).get("https", "0") == "1"
await async_configure_sabnzbd(hass, info, ssl)
discovery.async_listen(hass, SERVICE_SABNZBD, sabnzbd_discovered)
conf = config.get(DOMAIN)
if conf is not None:
use_ssl = conf[CONF_SSL]
name = conf.get(CONF_NAME)
api_key = conf.get(CONF_API_KEY)
await async_configure_sabnzbd(hass, conf, use_ssl, name, api_key)
return True
@callback
def async_setup_sabnzbd(hass, sab_api, config, name):
"""Set up SABnzbd sensors and services."""
sab_api_data = SabnzbdApiData(sab_api, name, config.get(CONF_SENSORS, {}))
if config.get(CONF_SENSORS):
hass.data[DATA_SABNZBD] = sab_api_data
hass.async_create_task(
discovery.async_load_platform(hass, "sensor", DOMAIN, {}, config)
)
async def async_service_handler(service):
"""Handle service calls."""
if service.service == SERVICE_PAUSE:
await sab_api_data.async_pause_queue()
elif service.service == SERVICE_RESUME:
await sab_api_data.async_resume_queue()
elif service.service == SERVICE_SET_SPEED:
speed = service.data.get(ATTR_SPEED)
await sab_api_data.async_set_queue_speed(speed)
hass.services.async_register(
DOMAIN, SERVICE_PAUSE, async_service_handler, schema=vol.Schema({})
)
hass.services.async_register(
DOMAIN, SERVICE_RESUME, async_service_handler, schema=vol.Schema({})
)
hass.services.async_register(
DOMAIN, SERVICE_SET_SPEED, async_service_handler, schema=SPEED_LIMIT_SCHEMA
)
async def async_update_sabnzbd(now):
"""Refresh SABnzbd queue data."""
try:
await sab_api.refresh_data()
async_dispatcher_send(hass, SIGNAL_SABNZBD_UPDATED, None)
except SabnzbdApiException as err:
_LOGGER.error(err)
async_track_time_interval(hass, async_update_sabnzbd, UPDATE_INTERVAL)
@callback
def async_request_configuration(hass, config, host, web_root):
"""Request configuration steps from the user."""
configurator = hass.components.configurator
# We got an error if this method is called while we are configuring
if host in _CONFIGURING:
configurator.async_notify_errors(
_CONFIGURING[host], "Failed to register, please try again."
)
return
async def async_configuration_callback(data):
"""Handle configuration changes."""
api_key = data.get(CONF_API_KEY)
sab_api = SabnzbdApi(
host, api_key, web_root=web_root, session=async_get_clientsession(hass)
)
if not await async_check_sabnzbd(sab_api):
return
def success():
"""Signal successful setup."""
conf = load_json(hass.config.path(CONFIG_FILE))
conf[host] = {CONF_API_KEY: api_key}
save_json(hass.config.path(CONFIG_FILE), conf)
req_config = _CONFIGURING.pop(host)
configurator.request_done(req_config)
hass.async_add_job(success)
async_setup_sabnzbd(hass, sab_api, config, config.get(CONF_NAME, DEFAULT_NAME))
_CONFIGURING[host] = configurator.async_request_config(
DEFAULT_NAME,
async_configuration_callback,
description="Enter the API Key",
submit_caption="Confirm",
fields=[{"id": CONF_API_KEY, "name": "API Key", "type": ""}],
)
class SabnzbdApiData:
"""Class for storing/refreshing sabnzbd api queue data."""
def __init__(self, sab_api, name, sensors):
"""Initialize component."""
self.sab_api = sab_api
self.name = name
self.sensors = sensors
async def async_pause_queue(self):
"""Pause Sabnzbd queue."""
try:
return await self.sab_api.pause_queue()
except SabnzbdApiException as err:
_LOGGER.error(err)
return False
async def async_resume_queue(self):
"""Resume Sabnzbd queue."""
try:
return await self.sab_api.resume_queue()
except SabnzbdApiException as err:
_LOGGER.error(err)
return False
async def async_set_queue_speed(self, limit):
"""Set speed limit for the Sabnzbd queue."""
try:
return await self.sab_api.set_speed_limit(limit)
except SabnzbdApiException as err:
_LOGGER.error(err)
return False
def get_queue_field(self, field):
"""Return the value for the given field from the Sabnzbd queue."""
return self.sab_api.queue.get(field)
| {
"content_hash": "d87bc7feee53ecd929e4b706fb0a52e0",
"timestamp": "",
"source": "github",
"line_count": 273,
"max_line_length": 87,
"avg_line_length": 32.3003663003663,
"alnum_prop": 0.6358584713086868,
"repo_name": "sander76/home-assistant",
"id": "a420ca5381462e9e0bfdf9d4be2b1086ac9d8a8a",
"size": "8818",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/sabnzbd/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1795"
},
{
"name": "Python",
"bytes": "36548768"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
} |
import logging
from fusion_attention import AttentionMask, FusionAttention
from fusion_reshape import FusionReshape
from onnx import numpy_helper
from onnx_model import OnnxModel
from onnx_model_bert import BertOnnxModel
logger = logging.getLogger(__name__)
class FusionBartEncoderAttention(FusionAttention):
"""
Fuse Bart Attention subgraph into one Attention node.
"""
def __init__(
self,
model: OnnxModel,
hidden_size: int,
num_heads: int,
attention_mask: AttentionMask,
):
super().__init__(model, hidden_size, num_heads, attention_mask)
def check_runtime_shape_path(
self,
reshape_qkv_2,
reshape_qkv_1,
reshape_q_2,
reshape_k_2,
reshape_v_2,
root_input,
):
concat_qkv_2_path = self.model.match_parent_path(reshape_qkv_2, ["Concat"], [1])
if concat_qkv_2_path is None:
return False
concat_qkv_2 = concat_qkv_2_path[0]
reshape_qkv_2_path_1 = self.model.match_parent_path(concat_qkv_2, ["Unsqueeze", "Gather", "Shape"], [0, 0, 0])
reshape_qkv_2_path_2 = self.model.match_parent_path(concat_qkv_2, ["Unsqueeze", "Gather", "Shape"], [1, 0, 0])
reshape_qkv_2_path_3 = self.model.match_parent_path(concat_qkv_2, ["Unsqueeze", "Gather", "Shape"], [2, 0, 0])
if reshape_qkv_2_path_1 is None or reshape_qkv_2_path_2 is None or reshape_qkv_2_path_3 is None:
return False
_, gather_1, shape_1 = reshape_qkv_2_path_1
_, gather_2, shape_2 = reshape_qkv_2_path_2
_, _, shape_3 = reshape_qkv_2_path_3
if shape_1.input[0] != root_input or shape_2.input[0] != root_input or shape_3.input[0] != root_input:
return False
reshape_qkv_1_path_1 = self.model.match_parent_path(reshape_qkv_1, ["Concat", "Unsqueeze", "Gather"], [1, 0, 0])
reshape_qkv_1_path_2 = self.model.match_parent_path(reshape_qkv_1, ["Concat", "Unsqueeze", "Gather"], [1, 2, 0])
if reshape_qkv_1_path_1 is None or reshape_qkv_1_path_2 is None:
return False
if reshape_qkv_1_path_1[-1].name != gather_1.name or reshape_qkv_1_path_2[-1].name != gather_2.name:
return False
reshape_q_2_path = self.model.match_parent_path(reshape_q_2, ["Concat", "Unsqueeze", "Mul"], [1, 0, 0])
reshape_k_2_path = self.model.match_parent_path(reshape_k_2, ["Concat", "Unsqueeze", "Mul"], [1, 0, 0])
reshape_v_2_path = self.model.match_parent_path(reshape_v_2, ["Concat", "Unsqueeze", "Mul"], [1, 0, 0])
if reshape_q_2_path is None or reshape_k_2_path is None or reshape_v_2_path is None:
return False
mul_q = reshape_q_2_path[-1]
mul_k = reshape_k_2_path[-1]
mul_v = reshape_v_2_path[-1]
gather_1_out = gather_1.output[0]
if mul_q.input[0] != gather_1_out or mul_k.input[0] != gather_1_out or mul_v.input[0] != gather_1_out:
return False
return True
def fuse(self, normalize_node, input_name_to_nodes, output_name_to_node):
# SkipLayerNormalization has two inputs, and one of them is the root input for attention.
qkv_nodes = self.model.match_parent_path(
normalize_node,
["Add", "MatMul", "Reshape", "Transpose", "Reshape", "MatMul"],
[None, 1, 0, 0, 0, 0],
)
if qkv_nodes is not None:
(
add_out,
matmul_out,
reshape_qkv_2,
transpose_qkv,
reshape_qkv_1,
matmul_qkv,
) = qkv_nodes
else:
return
other_inputs = []
for i, input in enumerate(normalize_node.input):
if input not in output_name_to_node:
continue
if input == qkv_nodes[0].output[0]:
continue
other_inputs.append(input)
if len(other_inputs) != 1:
return
root_input = other_inputs[0]
children = input_name_to_nodes[root_input]
children_types = [child.op_type for child in children]
if children_types.count("MatMul") != 3:
return
v_nodes = self.model.match_parent_path(
matmul_qkv,
["Reshape", "Transpose", "Reshape", "Add", "MatMul"],
[1, 0, 0, 0, None],
)
if v_nodes is None:
logger.debug("fuse_attention: failed to match v path")
return
(reshape_v_2, transpose_v, reshape_v_1, add_v, matmul_v) = v_nodes
qk_nodes = self.model.match_parent_path(matmul_qkv, ["Softmax", "MatMul"], [0, 0])
if qk_nodes is not None:
_, matmul_qk = qk_nodes
else:
return
q_nodes = self.model.match_parent_path(
matmul_qk,
["Reshape", "Transpose", "Reshape", "Mul", "Add", "MatMul"],
[0, 0, 0, 0, 0, 1],
)
if q_nodes is not None:
reshape_q_2, _, reshape_q_1, _, add_q, matmul_q = q_nodes
else:
return
k_nodes = self.model.match_parent_path(
matmul_qk,
["Transpose", "Reshape", "Transpose", "Reshape", "Add", "MatMul"],
[1, 0, 0, 0, 0, 1],
)
if k_nodes is not None:
_, reshape_k_2, _, reshape_k_1, add_k, matmul_k = k_nodes
else:
return
if not self.check_runtime_shape_path(
reshape_qkv_2,
reshape_qkv_1,
reshape_q_2,
reshape_k_2,
reshape_v_2,
root_input,
):
return
if matmul_v.input[0] == root_input and matmul_q.input[0] == root_input and matmul_v.input[0] == root_input:
mask_nodes = []
mask_index = None
attention_last_node = reshape_qkv_2
num_heads, hidden_size = self.get_num_heads_and_hidden_size(reshape_q_1)
if num_heads <= 0 or hidden_size <= 0 or (hidden_size % num_heads) != 0:
logger.debug("fuse_attention: failed to detect num_heads or hidden_size")
return
new_node = self.create_attention_node(
mask_index,
matmul_q,
matmul_k,
matmul_v,
add_q,
add_k,
add_v,
num_heads,
hidden_size,
root_input,
attention_last_node.output[0],
None,
)
if new_node is None:
return
self.nodes_to_add.append(new_node)
self.node_name_to_graph_name[new_node.name] = self.this_graph_name
self.nodes_to_remove.extend([attention_last_node, transpose_qkv, matmul_qkv])
self.nodes_to_remove.extend(qk_nodes)
self.nodes_to_remove.extend(q_nodes)
self.nodes_to_remove.extend(k_nodes)
self.nodes_to_remove.extend(v_nodes)
# Use prune graph to remove mask nodes since they are shared by all attention nodes.
self.nodes_to_remove.extend(mask_nodes)
self.prune_graph = True
class FusionBartReshape(FusionReshape):
def __init__(self, model: OnnxModel):
super().__init__(model)
def fuse(self, reshape_node, input_name_to_nodes, output_name_to_node):
if reshape_node.input[1] not in output_name_to_node:
return
concat_node = output_name_to_node[reshape_node.input[1]]
if concat_node.op_type != "Concat" or len(concat_node.input) != 4:
return
path0 = self.model.match_parent_path(
concat_node,
["Unsqueeze", "Gather", "Shape"],
[0, 0, 0],
output_name_to_node,
)
if path0 is None:
return
(_, gather_0, shape_0) = path0
shape = []
gather_value = self.model.get_constant_value(gather_0.input[1])
if gather_value == 0:
shape.append(0)
path1 = self.model.match_parent_path(
concat_node,
["Unsqueeze", "Gather", "Shape"],
[1, 0, 0],
output_name_to_node,
)
if path1 is None:
input_1_proto = self.model.get_initializer(concat_node.input[1])
input_2_proto = self.model.get_initializer(concat_node.input[2])
input_3_proto = self.model.get_initializer(concat_node.input[3])
if input_1_proto is None or input_2_proto is None or input_3_proto is None:
return
input_1 = numpy_helper.to_array(input_1_proto)
input_2 = numpy_helper.to_array(input_2_proto)
input_3 = numpy_helper.to_array(input_3_proto)
if len(input_1) != 1 or len(input_2) != 1 or len(input_3) != 1:
return
if not (input_1[0] == -1 and input_2[0] > 0 and input_3[0] > 0):
return
shape.extend(input_1)
shape.extend(input_2)
shape.extend(input_3)
gemm_path = self.model.match_parent_path(reshape_node, ["Add", "MatMul"], [0, 1], output_name_to_node)
if gemm_path is None:
return
top_matmul = gemm_path[-1]
root_input = top_matmul.input[0]
if shape_0.input[0] != root_input:
return
self.replace_reshape_node(shape, reshape_node, concat_node)
else:
(_, gather_1, shape_1) = path1
gather_value = self.model.get_constant_value(gather_1.input[1])
if gather_value == 1:
shape.append(0)
input_2_proto = self.model.get_initializer(concat_node.input[2])
input_3_proto = self.model.get_initializer(concat_node.input[3])
if input_2_proto is None or input_3_proto is None:
return
input_2 = numpy_helper.to_array(input_2_proto)
input_3 = numpy_helper.to_array(input_3_proto)
if len(input_2) != 1 or len(input_3) != 1:
return
if not (input_2[0] > 0 and input_3[0] > 0):
return
shape.extend(input_2)
shape.extend(input_3)
gemm_path = self.model.match_parent_path(
reshape_node, ["Mul", "Add", "MatMul"], [0, 0, 1], output_name_to_node
)
if gemm_path is None:
return
top_matmul = gemm_path[-1]
root_input = top_matmul.input[0]
if shape_0.input[0] != root_input or shape_1.input[0] != root_input:
return
self.replace_reshape_node(shape, reshape_node, concat_node)
class BartOnnxModel(BertOnnxModel):
def __init__(self, model, num_heads, hidden_size):
super().__init__(model, num_heads, hidden_size)
self.attention_mask = AttentionMask(self)
self.attention_fusion = FusionBartEncoderAttention(self, self.hidden_size, self.num_heads, self.attention_mask)
self.bart_reshape_fusion_preprocess = FusionBartReshape(self)
def fuse_attention(self):
self.attention_fusion.apply()
def preprocess(self):
self.adjust_reshape_and_expand()
self.bart_reshape_fusion_preprocess.apply()
| {
"content_hash": "d1547d40e71903630cca956544ea9bea",
"timestamp": "",
"source": "github",
"line_count": 311,
"max_line_length": 120,
"avg_line_length": 36.463022508038584,
"alnum_prop": 0.5428571428571428,
"repo_name": "microsoft/onnxruntime",
"id": "33db231c523324387a6afca7e59b5e3ed17ed119",
"size": "11588",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "onnxruntime/python/tools/transformers/onnx_model_bart.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "1763425"
},
{
"name": "Batchfile",
"bytes": "17040"
},
{
"name": "C",
"bytes": "955390"
},
{
"name": "C#",
"bytes": "2304597"
},
{
"name": "C++",
"bytes": "39435305"
},
{
"name": "CMake",
"bytes": "514764"
},
{
"name": "CSS",
"bytes": "138431"
},
{
"name": "Cuda",
"bytes": "1104338"
},
{
"name": "Dockerfile",
"bytes": "8089"
},
{
"name": "HLSL",
"bytes": "11234"
},
{
"name": "HTML",
"bytes": "5933"
},
{
"name": "Java",
"bytes": "418665"
},
{
"name": "JavaScript",
"bytes": "212575"
},
{
"name": "Jupyter Notebook",
"bytes": "218327"
},
{
"name": "Kotlin",
"bytes": "4653"
},
{
"name": "Liquid",
"bytes": "5457"
},
{
"name": "NASL",
"bytes": "2628"
},
{
"name": "Objective-C",
"bytes": "151027"
},
{
"name": "Objective-C++",
"bytes": "107084"
},
{
"name": "Pascal",
"bytes": "9597"
},
{
"name": "PowerShell",
"bytes": "16419"
},
{
"name": "Python",
"bytes": "5041661"
},
{
"name": "Roff",
"bytes": "27539"
},
{
"name": "Ruby",
"bytes": "3545"
},
{
"name": "Shell",
"bytes": "116513"
},
{
"name": "Swift",
"bytes": "115"
},
{
"name": "TypeScript",
"bytes": "973087"
}
],
"symlink_target": ""
} |
"""
_setup.py_
Cloudant / CouchDB Client Library
"""
from io import open
from os import path
from setuptools import setup, find_packages
requirements_file = open('requirements.txt')
requirements = requirements_file.read().strip().split('\n')
requirements_file.close()
version_file = open('VERSION')
version = version_file.read().strip()
version_file.close()
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup_args = {
'description': 'Cloudant / CouchDB Client Library',
'long_description': long_description,
'long_description_content_type': 'text/markdown',
'include_package_data': True,
'install_requires': requirements,
'name': 'cloudant',
'version': version,
'author': 'IBM',
'author_email': 'alfinkel@us.ibm.com',
'url': 'https://github.com/cloudant/python-cloudant',
'packages': find_packages('./src'),
'provides': find_packages('./src'),
'package_dir': {'': 'src'},
'classifiers': [
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: Apache Software License',
'Topic :: Software Development :: Libraries',
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5'
]
}
setup(**setup_args)
| {
"content_hash": "3da76e1839fd5109366fcc0c9b5f8ca2",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 73,
"avg_line_length": 30.6734693877551,
"alnum_prop": 0.6393878908848969,
"repo_name": "cloudant/python-cloudant",
"id": "a8ac4ef866638c24fde4f5762d309eeb52b43a0e",
"size": "2118",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "869584"
}
],
"symlink_target": ""
} |
import functools
import imath
import IECore
import Gaffer
import GafferUI
import GafferScene
import GafferSceneUI
import GafferDispatch
import GafferDispatchUI
##########################################################################
# Colour
##########################################################################
Gaffer.Metadata.registerValue( GafferDispatch.TaskNode, "nodeGadget:color", imath.Color3f( 0.61, 0.1525, 0.1525 ) )
Gaffer.Metadata.registerValue( GafferDispatch.TaskNode.TaskPlug, "nodule:color", imath.Color3f( 0.645, 0.2483, 0.2483 ) )
Gaffer.Metadata.registerValue( GafferDispatch.TaskNode.TaskPlug, "connectionGadget:color", imath.Color3f( 0.315, 0.0787, 0.0787 ) )
Gaffer.Metadata.registerValue( Gaffer.SubGraph, "nodeGadget:color", imath.Color3f( 0.225 ) )
Gaffer.Metadata.registerValue( Gaffer.BoxIO, "nodeGadget:color", imath.Color3f( 0.225 ) )
Gaffer.Metadata.registerValue( Gaffer.Random, "nodeGadget:color", imath.Color3f( 0.45, 0.3, 0.3 ) )
Gaffer.Metadata.registerValue( Gaffer.Expression, "nodeGadget:color", imath.Color3f( 0.3, 0.45, 0.3 ) )
Gaffer.Metadata.registerValue( Gaffer.Animation, "nodeGadget:color", imath.Color3f( 0.3, 0.3, 0.45 ) )
Gaffer.Metadata.registerValue( Gaffer.Spreadsheet, "nodeGadget:color", imath.Color3f( 0.69, 0.5445, 0.2208 ) )
Gaffer.Metadata.registerValue( GafferScene.ScenePlug, "nodule:color", imath.Color3f( 0.2401, 0.3394, 0.485 ) )
Gaffer.Metadata.registerValue( GafferScene.SceneProcessor, "nodeGadget:color", imath.Color3f( 0.495, 0.2376, 0.4229 ) )
Gaffer.Metadata.registerValue( GafferScene.SceneElementProcessor, "nodeGadget:color", imath.Color3f( 0.1886, 0.2772, 0.41 ) )
Gaffer.Metadata.registerValue( GafferScene.FilterPlug, "nodule:color", imath.Color3f( 0.69, 0.5378, 0.2283 ) )
Gaffer.Metadata.registerValue( GafferScene.Transform, "nodeGadget:color", imath.Color3f( 0.485, 0.3112, 0.2255 ) )
Gaffer.Metadata.registerValue( GafferScene.Constraint, "nodeGadget:color", imath.Color3f( 0.485, 0.3112, 0.2255 ) )
Gaffer.Metadata.registerValue( GafferScene.GlobalsProcessor, "nodeGadget:color", imath.Color3f( 0.255, 0.505, 0.28 ) )
Gaffer.Metadata.registerValue( Gaffer.FloatPlug, "nodule:color", imath.Color3f( 0.2467, 0.3762, 0.47 ) )
Gaffer.Metadata.registerValue( Gaffer.Color3fPlug, "nodule:color", imath.Color3f( 0.69, 0.5378, 0.2283 ) )
Gaffer.Metadata.registerValue( Gaffer.V3fPlug, "nodule:color", imath.Color3f( 0.47, 0.181, 0.181 ) )
##########################################################################
# Behaviour
##########################################################################
def __nodeDoubleClick( graphEditor, node ) :
GafferUI.NodeEditor.acquire( node, floating = True )
return True
GafferUI.GraphEditor.nodeDoubleClickSignal().connect( __nodeDoubleClick, scoped = False )
def __nodeContextMenu( graphEditor, node, menuDefinition ) :
menuDefinition.append( "/Edit...", { "command" : functools.partial( GafferUI.NodeEditor.acquire, node, floating = True ) } )
GafferUI.GraphEditor.appendEnabledPlugMenuDefinitions( graphEditor, node, menuDefinition )
GafferUI.GraphEditor.appendConnectionVisibilityMenuDefinitions( graphEditor, node, menuDefinition )
GafferDispatchUI.DispatcherUI.appendNodeContextMenuDefinitions( graphEditor, node, menuDefinition )
GafferUI.GraphEditor.appendContentsMenuDefinitions( graphEditor, node, menuDefinition )
GafferUI.UIEditor.appendNodeContextMenuDefinitions( graphEditor, node, menuDefinition )
GafferSceneUI.FilteredSceneProcessorUI.appendNodeContextMenuDefinitions( graphEditor, node, menuDefinition )
GafferUI.GraphBookmarksUI.appendNodeContextMenuDefinitions( graphEditor, node, menuDefinition )
GafferUI.GraphEditor.nodeContextMenuSignal().connect( __nodeContextMenu, scoped = False )
def __plugContextMenu( graphEditor, plug, menuDefinition ) :
GafferUI.GraphBookmarksUI.appendPlugContextMenuDefinitions( graphEditor, plug, menuDefinition )
GafferUI.NodeUI.appendPlugDeletionMenuDefinitions( plug, menuDefinition )
GafferUI.GraphEditor.plugContextMenuSignal().connect( __plugContextMenu, scoped = False )
def __connectionContextMenu( graphEditor, destinationPlug, menuDefinition ) :
GafferUI.GraphEditor.appendConnectionNavigationMenuDefinitions( graphEditor, destinationPlug, menuDefinition )
GafferUI.GraphEditor.connectionContextMenuSignal().connect( __connectionContextMenu, scoped = False )
| {
"content_hash": "caade512bb140d7d534d453f979e95f6",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 131,
"avg_line_length": 54.098765432098766,
"alnum_prop": 0.7382473756275674,
"repo_name": "appleseedhq/gaffer",
"id": "7f6ac11e306f74710ab10442c51961cb528817e9",
"size": "6247",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "startup/gui/graphEditor.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "39910"
},
{
"name": "C++",
"bytes": "7337901"
},
{
"name": "CMake",
"bytes": "85201"
},
{
"name": "GLSL",
"bytes": "6236"
},
{
"name": "Python",
"bytes": "7531988"
},
{
"name": "Shell",
"bytes": "15031"
}
],
"symlink_target": ""
} |
import collections
import re
import netaddr
from oslo_config import cfg
from oslo_log import log as logging
import six
from neutron.agent import firewall
from neutron.agent.linux import ip_conntrack
from neutron.agent.linux import ipset_manager
from neutron.agent.linux import iptables_comments as ic
from neutron.agent.linux import iptables_manager
from neutron.agent.linux import utils
from neutron.common import constants
from neutron.common import ipv6_utils
from neutron.extensions import portsecurity as psec
from neutron.i18n import _LI
LOG = logging.getLogger(__name__)
SG_CHAIN = 'sg-chain'
SPOOF_FILTER = 'spoof-filter'
CHAIN_NAME_PREFIX = {firewall.INGRESS_DIRECTION: 'i',
firewall.EGRESS_DIRECTION: 'o',
SPOOF_FILTER: 's'}
DIRECTION_IP_PREFIX = {firewall.INGRESS_DIRECTION: 'source_ip_prefix',
firewall.EGRESS_DIRECTION: 'dest_ip_prefix'}
IPSET_DIRECTION = {firewall.INGRESS_DIRECTION: 'src',
firewall.EGRESS_DIRECTION: 'dst'}
# length of all device prefixes (e.g. qvo, tap, qvb)
LINUX_DEV_PREFIX_LEN = 3
LINUX_DEV_LEN = 14
MAX_CONNTRACK_ZONES = 65535
comment_rule = iptables_manager.comment_rule
def port_needs_l3_security(port):
if port['fixed_ips'] or port.get('allowed_address_pairs'):
return True
else:
return False
class IptablesFirewallDriver(firewall.FirewallDriver):
"""Driver which enforces security groups through iptables rules."""
IPTABLES_DIRECTION = {firewall.INGRESS_DIRECTION: 'physdev-out',
firewall.EGRESS_DIRECTION: 'physdev-in'}
def __init__(self, namespace=None):
self.iptables = iptables_manager.IptablesManager(
use_ipv6=ipv6_utils.is_enabled(),
namespace=namespace)
# TODO(majopela, shihanzhang): refactor out ipset to a separate
# driver composed over this one
self.ipset = ipset_manager.IpsetManager(namespace=namespace)
self.ipconntrack = ip_conntrack.IpConntrackManager(
self.get_device_zone, namespace=namespace)
self._populate_initial_zone_map()
# list of port which has security group
self.filtered_ports = {}
self.unfiltered_ports = {}
self._add_fallback_chain_v4v6()
self._defer_apply = False
self._pre_defer_filtered_ports = None
self._pre_defer_unfiltered_ports = None
# List of security group rules for ports residing on this host
self.sg_rules = {}
self.pre_sg_rules = None
# List of security group member ips for ports residing on this host
self.sg_members = collections.defaultdict(
lambda: collections.defaultdict(list))
self.pre_sg_members = None
self.enable_ipset = cfg.CONF.SECURITYGROUP.enable_ipset
self._enabled_netfilter_for_bridges = False
self.updated_rule_sg_ids = set()
self.updated_sg_members = set()
self.devices_with_udpated_sg_members = collections.defaultdict(list)
def _enable_netfilter_for_bridges(self):
# we only need to set these values once, but it has to be when
# we create a bridge; before that the bridge module might not
# be loaded and the proc values aren't there.
if self._enabled_netfilter_for_bridges:
return
else:
self._enabled_netfilter_for_bridges = True
# These proc values ensure that netfilter is enabled on
# bridges; essential for enforcing security groups rules with
# OVS Hybrid. Distributions can differ on whether this is
# enabled by default or not (Ubuntu - yes, Redhat - no, for
# example).
LOG.debug("Enabling netfilter for bridges")
utils.execute(['sysctl', '-w',
'net.bridge.bridge-nf-call-arptables=1'],
run_as_root=True)
utils.execute(['sysctl', '-w',
'net.bridge.bridge-nf-call-ip6tables=1'],
run_as_root=True)
utils.execute(['sysctl', '-w',
'net.bridge.bridge-nf-call-iptables=1'],
run_as_root=True)
@property
def ports(self):
return dict(self.filtered_ports, **self.unfiltered_ports)
def _update_remote_security_group_members(self, sec_group_ids):
for sg_id in sec_group_ids:
for device in self.filtered_ports.values():
if sg_id in device.get('security_group_source_groups', []):
self.devices_with_udpated_sg_members[sg_id].append(device)
def security_group_updated(self, action_type, sec_group_ids,
device_ids=[]):
if action_type == 'sg_rule':
self.updated_rule_sg_ids.update(sec_group_ids)
elif action_type == 'sg_member':
if device_ids:
self.updated_sg_members.update(device_ids)
else:
self._update_remote_security_group_members(sec_group_ids)
def update_security_group_rules(self, sg_id, sg_rules):
LOG.debug("Update rules of security group (%s)", sg_id)
self.sg_rules[sg_id] = sg_rules
def update_security_group_members(self, sg_id, sg_members):
LOG.debug("Update members of security group (%s)", sg_id)
self.sg_members[sg_id] = collections.defaultdict(list, sg_members)
def _ps_enabled(self, port):
return port.get(psec.PORTSECURITY, True)
def _set_ports(self, port):
if not self._ps_enabled(port):
self.unfiltered_ports[port['device']] = port
self.filtered_ports.pop(port['device'], None)
else:
self.filtered_ports[port['device']] = port
self.unfiltered_ports.pop(port['device'], None)
def _unset_ports(self, port):
self.unfiltered_ports.pop(port['device'], None)
self.filtered_ports.pop(port['device'], None)
def prepare_port_filter(self, port):
LOG.debug("Preparing device (%s) filter", port['device'])
self._remove_chains()
self._set_ports(port)
self._enable_netfilter_for_bridges()
# each security group has it own chains
self._setup_chains()
self.iptables.apply()
def update_port_filter(self, port):
LOG.debug("Updating device (%s) filter", port['device'])
if port['device'] not in self.ports:
LOG.info(_LI('Attempted to update port filter which is not '
'filtered %s'), port['device'])
return
self._remove_chains()
self._set_ports(port)
self._setup_chains()
self.iptables.apply()
def remove_port_filter(self, port):
LOG.debug("Removing device (%s) filter", port['device'])
if port['device'] not in self.ports:
LOG.info(_LI('Attempted to remove port filter which is not '
'filtered %r'), port)
return
self._remove_chains()
self._unset_ports(port)
self._setup_chains()
self.iptables.apply()
def _add_accept_rule_port_sec(self, port, direction):
self._update_port_sec_rules(port, direction, add=True)
def _remove_rule_port_sec(self, port, direction):
self._update_port_sec_rules(port, direction, add=False)
def _remove_rule_from_chain_v4v6(self, chain_name, ipv4_rules, ipv6_rules):
for rule in ipv4_rules:
self.iptables.ipv4['filter'].remove_rule(chain_name, rule)
for rule in ipv6_rules:
self.iptables.ipv6['filter'].remove_rule(chain_name, rule)
def _setup_chains(self):
"""Setup ingress and egress chain for a port."""
if not self._defer_apply:
self._setup_chains_apply(self.filtered_ports,
self.unfiltered_ports)
def _setup_chains_apply(self, ports, unfiltered_ports):
self._add_chain_by_name_v4v6(SG_CHAIN)
for port in ports.values():
self._setup_chain(port, firewall.INGRESS_DIRECTION)
self._setup_chain(port, firewall.EGRESS_DIRECTION)
self.iptables.ipv4['filter'].add_rule(SG_CHAIN, '-j ACCEPT')
self.iptables.ipv6['filter'].add_rule(SG_CHAIN, '-j ACCEPT')
for port in unfiltered_ports.values():
self._add_accept_rule_port_sec(port, firewall.INGRESS_DIRECTION)
self._add_accept_rule_port_sec(port, firewall.EGRESS_DIRECTION)
def _remove_chains(self):
"""Remove ingress and egress chain for a port."""
if not self._defer_apply:
self._remove_chains_apply(self.filtered_ports,
self.unfiltered_ports)
def _remove_chains_apply(self, ports, unfiltered_ports):
for port in ports.values():
self._remove_chain(port, firewall.INGRESS_DIRECTION)
self._remove_chain(port, firewall.EGRESS_DIRECTION)
self._remove_chain(port, SPOOF_FILTER)
for port in unfiltered_ports.values():
self._remove_rule_port_sec(port, firewall.INGRESS_DIRECTION)
self._remove_rule_port_sec(port, firewall.EGRESS_DIRECTION)
self._remove_chain_by_name_v4v6(SG_CHAIN)
def _setup_chain(self, port, DIRECTION):
self._add_chain(port, DIRECTION)
self._add_rules_by_security_group(port, DIRECTION)
def _remove_chain(self, port, DIRECTION):
chain_name = self._port_chain_name(port, DIRECTION)
self._remove_chain_by_name_v4v6(chain_name)
def _add_fallback_chain_v4v6(self):
self.iptables.ipv4['filter'].add_chain('sg-fallback')
self.iptables.ipv4['filter'].add_rule('sg-fallback', '-j DROP',
comment=ic.UNMATCH_DROP)
self.iptables.ipv6['filter'].add_chain('sg-fallback')
self.iptables.ipv6['filter'].add_rule('sg-fallback', '-j DROP',
comment=ic.UNMATCH_DROP)
def _add_raw_chain(self, chain_name):
self.iptables.ipv4['raw'].add_chain(chain_name)
self.iptables.ipv6['raw'].add_chain(chain_name)
def _add_chain_by_name_v4v6(self, chain_name):
self.iptables.ipv4['filter'].add_chain(chain_name)
self.iptables.ipv6['filter'].add_chain(chain_name)
def _remove_raw_chain(self, chain_name):
self.iptables.ipv4['raw'].remove_chain(chain_name)
self.iptables.ipv6['raw'].remove_chain(chain_name)
def _remove_chain_by_name_v4v6(self, chain_name):
self.iptables.ipv4['filter'].remove_chain(chain_name)
self.iptables.ipv6['filter'].remove_chain(chain_name)
def _add_rules_to_chain_v4v6(self, chain_name, ipv4_rules, ipv6_rules,
comment=None):
for rule in ipv4_rules:
self.iptables.ipv4['filter'].add_rule(chain_name, rule,
comment=comment)
for rule in ipv6_rules:
self.iptables.ipv6['filter'].add_rule(chain_name, rule,
comment=comment)
def _get_device_name(self, port):
return port['device']
def _update_port_sec_rules(self, port, direction, add=False):
# add/remove rules in FORWARD and INPUT chain
device = self._get_device_name(port)
jump_rule = ['-m physdev --%s %s --physdev-is-bridged '
'-j ACCEPT' % (self.IPTABLES_DIRECTION[direction],
device)]
if add:
self._add_rules_to_chain_v4v6(
'FORWARD', jump_rule, jump_rule, comment=ic.PORT_SEC_ACCEPT)
else:
self._remove_rule_from_chain_v4v6('FORWARD', jump_rule, jump_rule)
if direction == firewall.EGRESS_DIRECTION:
jump_rule = ['-m physdev --%s %s --physdev-is-bridged '
'-j ACCEPT' % (self.IPTABLES_DIRECTION[direction],
device)]
if add:
self._add_rules_to_chain_v4v6('INPUT', jump_rule, jump_rule,
comment=ic.PORT_SEC_ACCEPT)
else:
self._remove_rule_from_chain_v4v6(
'INPUT', jump_rule, jump_rule)
def _add_chain(self, port, direction):
chain_name = self._port_chain_name(port, direction)
self._add_chain_by_name_v4v6(chain_name)
# Note(nati) jump to the security group chain (SG_CHAIN)
# This is needed because the packet may much two rule in port
# if the two port is in the same host
# We accept the packet at the end of SG_CHAIN.
# jump to the security group chain
device = self._get_device_name(port)
jump_rule = ['-m physdev --%s %s --physdev-is-bridged '
'-j $%s' % (self.IPTABLES_DIRECTION[direction],
device,
SG_CHAIN)]
self._add_rules_to_chain_v4v6('FORWARD', jump_rule, jump_rule,
comment=ic.VM_INT_SG)
# jump to the chain based on the device
jump_rule = ['-m physdev --%s %s --physdev-is-bridged '
'-j $%s' % (self.IPTABLES_DIRECTION[direction],
device,
chain_name)]
self._add_rules_to_chain_v4v6(SG_CHAIN, jump_rule, jump_rule,
comment=ic.SG_TO_VM_SG)
if direction == firewall.EGRESS_DIRECTION:
self._add_rules_to_chain_v4v6('INPUT', jump_rule, jump_rule,
comment=ic.INPUT_TO_SG)
def _split_sgr_by_ethertype(self, security_group_rules):
ipv4_sg_rules = []
ipv6_sg_rules = []
for rule in security_group_rules:
if rule.get('ethertype') == constants.IPv4:
ipv4_sg_rules.append(rule)
elif rule.get('ethertype') == constants.IPv6:
if rule.get('protocol') == 'icmp':
rule['protocol'] = 'icmpv6'
ipv6_sg_rules.append(rule)
return ipv4_sg_rules, ipv6_sg_rules
def _select_sgr_by_direction(self, port, direction):
return [rule
for rule in port.get('security_group_rules', [])
if rule['direction'] == direction]
def _setup_spoof_filter_chain(self, port, table, mac_ip_pairs, rules):
if mac_ip_pairs:
chain_name = self._port_chain_name(port, SPOOF_FILTER)
table.add_chain(chain_name)
for mac, ip in mac_ip_pairs:
if ip is None:
# If fixed_ips is [] this rule will be added to the end
# of the list after the allowed_address_pair rules.
table.add_rule(chain_name,
'-m mac --mac-source %s -j RETURN'
% mac.upper(), comment=ic.PAIR_ALLOW)
else:
table.add_rule(chain_name,
'-s %s -m mac --mac-source %s -j RETURN'
% (ip, mac.upper()), comment=ic.PAIR_ALLOW)
table.add_rule(chain_name, '-j DROP', comment=ic.PAIR_DROP)
rules.append('-j $%s' % chain_name)
def _build_ipv4v6_mac_ip_list(self, mac, ip_address, mac_ipv4_pairs,
mac_ipv6_pairs):
mac = str(netaddr.EUI(mac, dialect=netaddr.mac_unix))
if netaddr.IPNetwork(ip_address).version == 4:
mac_ipv4_pairs.append((mac, ip_address))
else:
mac_ipv6_pairs.append((mac, ip_address))
def _spoofing_rule(self, port, ipv4_rules, ipv6_rules):
if port_needs_l3_security(port):
# Allow dhcp client packets
ipv4_rules += [comment_rule('-p udp -m udp --sport 68 --dport 67 '
'-j RETURN', comment=ic.DHCP_CLIENT)]
# Drop Router Advts from the port.
ipv6_rules += [comment_rule('-p icmpv6 --icmpv6-type %s '
'-j DROP' % constants.ICMPV6_TYPE_RA,
comment=ic.IPV6_RA_DROP)]
ipv6_rules += [comment_rule('-p icmpv6 -j RETURN',
comment=ic.IPV6_ICMP_ALLOW)]
ipv6_rules += [comment_rule('-p udp -m udp --sport 546 --dport '
'547 -j RETURN',
comment=ic.DHCP_CLIENT)]
mac_ipv4_pairs = []
mac_ipv6_pairs = []
if isinstance(port.get('allowed_address_pairs'), list):
for address_pair in port['allowed_address_pairs']:
self._build_ipv4v6_mac_ip_list(address_pair['mac_address'],
address_pair['ip_address'],
mac_ipv4_pairs,
mac_ipv6_pairs)
for ip in port['fixed_ips']:
self._build_ipv4v6_mac_ip_list(port['mac_address'], ip,
mac_ipv4_pairs, mac_ipv6_pairs)
if not port['fixed_ips']:
mac_ipv4_pairs.append((port['mac_address'], None))
mac_ipv6_pairs.append((port['mac_address'], None))
self._setup_spoof_filter_chain(port, self.iptables.ipv4['filter'],
mac_ipv4_pairs, ipv4_rules)
self._setup_spoof_filter_chain(port, self.iptables.ipv6['filter'],
mac_ipv6_pairs, ipv6_rules)
def _drop_dhcp_rule(self, ipv4_rules, ipv6_rules):
#Note(nati) Drop dhcp packet from VM
ipv4_rules += [comment_rule('-p udp -m udp --sport 67 --dport 68 '
'-j DROP', comment=ic.DHCP_SPOOF)]
ipv6_rules += [comment_rule('-p udp -m udp --sport 547 --dport 546 '
'-j DROP', comment=ic.DHCP_SPOOF)]
def _accept_inbound_icmpv6(self):
# Allow multicast listener, neighbor solicitation and
# neighbor advertisement into the instance
icmpv6_rules = []
for icmp6_type in constants.ICMPV6_ALLOWED_TYPES:
icmpv6_rules += ['-p icmpv6 --icmpv6-type %s -j RETURN' %
icmp6_type]
return icmpv6_rules
def _select_sg_rules_for_port(self, port, direction):
"""Select rules from the security groups the port is member of."""
port_sg_ids = port.get('security_groups', [])
port_rules = []
for sg_id in port_sg_ids:
for rule in self.sg_rules.get(sg_id, []):
if rule['direction'] == direction:
if self.enable_ipset:
port_rules.append(rule)
else:
port_rules.extend(
self._expand_sg_rule_with_remote_ips(
rule, port, direction))
return port_rules
def _expand_sg_rule_with_remote_ips(self, rule, port, direction):
"""Expand a remote group rule to rule per remote group IP."""
remote_group_id = rule.get('remote_group_id')
if remote_group_id:
ethertype = rule['ethertype']
port_ips = port.get('fixed_ips', [])
for ip in self.sg_members[remote_group_id][ethertype]:
if ip not in port_ips:
ip_rule = rule.copy()
direction_ip_prefix = DIRECTION_IP_PREFIX[direction]
ip_prefix = str(netaddr.IPNetwork(ip).cidr)
ip_rule[direction_ip_prefix] = ip_prefix
yield ip_rule
else:
yield rule
def _get_remote_sg_ids(self, port, direction=None):
sg_ids = port.get('security_groups', [])
remote_sg_ids = {constants.IPv4: set(), constants.IPv6: set()}
for sg_id in sg_ids:
for rule in self.sg_rules.get(sg_id, []):
if not direction or rule['direction'] == direction:
remote_sg_id = rule.get('remote_group_id')
ether_type = rule.get('ethertype')
if remote_sg_id and ether_type:
remote_sg_ids[ether_type].add(remote_sg_id)
return remote_sg_ids
def _add_rules_by_security_group(self, port, direction):
# select rules for current port and direction
security_group_rules = self._select_sgr_by_direction(port, direction)
security_group_rules += self._select_sg_rules_for_port(port, direction)
# make sure ipset members are updated for remote security groups
if self.enable_ipset:
remote_sg_ids = self._get_remote_sg_ids(port, direction)
self._update_ipset_members(remote_sg_ids)
# split groups by ip version
# for ipv4, iptables command is used
# for ipv6, iptables6 command is used
ipv4_sg_rules, ipv6_sg_rules = self._split_sgr_by_ethertype(
security_group_rules)
ipv4_iptables_rules = []
ipv6_iptables_rules = []
# include fixed egress/ingress rules
if direction == firewall.EGRESS_DIRECTION:
self._add_fixed_egress_rules(port,
ipv4_iptables_rules,
ipv6_iptables_rules)
elif direction == firewall.INGRESS_DIRECTION:
ipv6_iptables_rules += self._accept_inbound_icmpv6()
if port_needs_l3_security(port):
# include IPv4 and IPv6 iptable rules from security group
ipv4_iptables_rules += self._convert_sgr_to_iptables_rules(
ipv4_sg_rules)
ipv6_iptables_rules += self._convert_sgr_to_iptables_rules(
ipv6_sg_rules)
# finally add the rules to the port chain for a given direction
self._add_rules_to_chain_v4v6(self._port_chain_name(port, direction),
ipv4_iptables_rules,
ipv6_iptables_rules)
def _add_fixed_egress_rules(self, port, ipv4_iptables_rules,
ipv6_iptables_rules):
self._spoofing_rule(port,
ipv4_iptables_rules,
ipv6_iptables_rules)
if port_needs_l3_security(port):
self._drop_dhcp_rule(ipv4_iptables_rules, ipv6_iptables_rules)
def _update_ipset_members(self, security_group_ids):
for ip_version, sg_ids in security_group_ids.items():
for sg_id in sg_ids:
current_ips = self.sg_members[sg_id][ip_version]
self.ipset.set_members(sg_id, ip_version, current_ips)
def _generate_ipset_rule_args(self, sg_rule, remote_gid):
ethertype = sg_rule.get('ethertype')
ipset_name = self.ipset.get_name(remote_gid, ethertype)
if not self.ipset.set_exists(remote_gid, ethertype):
#NOTE(mangelajo): ipsets for empty groups are not created
# thus we can't reference them.
return None
ipset_direction = IPSET_DIRECTION[sg_rule.get('direction')]
args = self._generate_protocol_and_port_args(sg_rule)
args += ['-m set', '--match-set', ipset_name, ipset_direction]
args += ['-j RETURN']
return args
def _generate_protocol_and_port_args(self, sg_rule):
args = self._protocol_arg(sg_rule.get('protocol'))
args += self._port_arg('sport',
sg_rule.get('protocol'),
sg_rule.get('source_port_range_min'),
sg_rule.get('source_port_range_max'))
args += self._port_arg('dport',
sg_rule.get('protocol'),
sg_rule.get('port_range_min'),
sg_rule.get('port_range_max'))
return args
def _generate_plain_rule_args(self, sg_rule):
# These arguments MUST be in the format iptables-save will
# display them: source/dest, protocol, sport, dport, target
# Otherwise the iptables_manager code won't be able to find
# them to preserve their [packet:byte] counts.
args = self._ip_prefix_arg('s', sg_rule.get('source_ip_prefix'))
args += self._ip_prefix_arg('d', sg_rule.get('dest_ip_prefix'))
args += self._generate_protocol_and_port_args(sg_rule)
args += ['-j RETURN']
return args
def _convert_sg_rule_to_iptables_args(self, sg_rule):
remote_gid = sg_rule.get('remote_group_id')
if self.enable_ipset and remote_gid:
return self._generate_ipset_rule_args(sg_rule, remote_gid)
else:
return self._generate_plain_rule_args(sg_rule)
def _convert_sgr_to_iptables_rules(self, security_group_rules):
iptables_rules = []
self._drop_invalid_packets(iptables_rules)
self._allow_established(iptables_rules)
for rule in security_group_rules:
args = self._convert_sg_rule_to_iptables_args(rule)
if args:
iptables_rules += [' '.join(args)]
iptables_rules += [comment_rule('-j $sg-fallback',
comment=ic.UNMATCHED)]
return iptables_rules
def _drop_invalid_packets(self, iptables_rules):
# Always drop invalid packets
iptables_rules += [comment_rule('-m state --state ' 'INVALID -j DROP',
comment=ic.INVALID_DROP)]
return iptables_rules
def _allow_established(self, iptables_rules):
# Allow established connections
iptables_rules += [comment_rule(
'-m state --state RELATED,ESTABLISHED -j RETURN',
comment=ic.ALLOW_ASSOC)]
return iptables_rules
def _protocol_arg(self, protocol):
if not protocol:
return []
iptables_rule = ['-p', protocol]
# iptables always adds '-m protocol' for udp and tcp
if protocol in ['udp', 'tcp']:
iptables_rule += ['-m', protocol]
return iptables_rule
def _port_arg(self, direction, protocol, port_range_min, port_range_max):
if (protocol not in ['udp', 'tcp', 'icmp', 'icmpv6']
or port_range_min is None):
return []
if protocol in ['icmp', 'icmpv6']:
# Note(xuhanp): port_range_min/port_range_max represent
# icmp type/code when protocol is icmp or icmpv6
# icmp code can be 0 so we cannot use "if port_range_max" here
if port_range_max is not None:
return ['--%s-type' % protocol,
'%s/%s' % (port_range_min, port_range_max)]
return ['--%s-type' % protocol, '%s' % port_range_min]
elif port_range_min == port_range_max:
return ['--%s' % direction, '%s' % (port_range_min,)]
else:
return ['-m', 'multiport',
'--%ss' % direction,
'%s:%s' % (port_range_min, port_range_max)]
def _ip_prefix_arg(self, direction, ip_prefix):
#NOTE (nati) : source_group_id is converted to list of source_
# ip_prefix in server side
if ip_prefix:
return ['-%s' % direction, ip_prefix]
return []
def _port_chain_name(self, port, direction):
return iptables_manager.get_chain_name(
'%s%s' % (CHAIN_NAME_PREFIX[direction], port['device'][3:]))
def filter_defer_apply_on(self):
if not self._defer_apply:
self.iptables.defer_apply_on()
self._pre_defer_filtered_ports = dict(self.filtered_ports)
self._pre_defer_unfiltered_ports = dict(self.unfiltered_ports)
self.pre_sg_members = dict(self.sg_members)
self.pre_sg_rules = dict(self.sg_rules)
self._defer_apply = True
def _remove_unused_security_group_info(self):
"""Remove any unnecessary local security group info or unused ipsets.
This function has to be called after applying the last iptables
rules, so we're in a point where no iptable rule depends
on an ipset we're going to delete.
"""
filtered_ports = self.filtered_ports.values()
remote_sgs_to_remove = self._determine_remote_sgs_to_remove(
filtered_ports)
for ip_version, remote_sg_ids in six.iteritems(remote_sgs_to_remove):
if self.enable_ipset:
self._remove_ipsets_for_remote_sgs(ip_version, remote_sg_ids)
self._remove_sg_members(remote_sgs_to_remove)
# Remove unused security group rules
for remove_group_id in self._determine_sg_rules_to_remove(
filtered_ports):
self.sg_rules.pop(remove_group_id, None)
def _determine_remote_sgs_to_remove(self, filtered_ports):
"""Calculate which remote security groups we don't need anymore.
We do the calculation for each ip_version.
"""
sgs_to_remove_per_ipversion = {constants.IPv4: set(),
constants.IPv6: set()}
remote_group_id_sets = self._get_remote_sg_ids_sets_by_ipversion(
filtered_ports)
for ip_version, remote_group_id_set in (
six.iteritems(remote_group_id_sets)):
sgs_to_remove_per_ipversion[ip_version].update(
set(self.pre_sg_members) - remote_group_id_set)
return sgs_to_remove_per_ipversion
def _get_remote_sg_ids_sets_by_ipversion(self, filtered_ports):
"""Given a port, calculates the remote sg references by ip_version."""
remote_group_id_sets = {constants.IPv4: set(),
constants.IPv6: set()}
for port in filtered_ports:
remote_sg_ids = self._get_remote_sg_ids(port)
for ip_version in (constants.IPv4, constants.IPv6):
remote_group_id_sets[ip_version] |= remote_sg_ids[ip_version]
return remote_group_id_sets
def _determine_sg_rules_to_remove(self, filtered_ports):
"""Calculate which security groups need to be removed.
We find out by subtracting our previous sg group ids,
with the security groups associated to a set of ports.
"""
port_group_ids = self._get_sg_ids_set_for_ports(filtered_ports)
return set(self.pre_sg_rules) - port_group_ids
def _get_sg_ids_set_for_ports(self, filtered_ports):
"""Get the port security group ids as a set."""
port_group_ids = set()
for port in filtered_ports:
port_group_ids.update(port.get('security_groups', []))
return port_group_ids
def _remove_ipsets_for_remote_sgs(self, ip_version, remote_sg_ids):
"""Remove system ipsets matching the provided parameters."""
for remote_sg_id in remote_sg_ids:
self.ipset.destroy(remote_sg_id, ip_version)
def _remove_sg_members(self, remote_sgs_to_remove):
"""Remove sg_member entries."""
ipv4_sec_group_set = remote_sgs_to_remove.get(constants.IPv4)
ipv6_sec_group_set = remote_sgs_to_remove.get(constants.IPv6)
for sg_id in (ipv4_sec_group_set & ipv6_sec_group_set):
if sg_id in self.sg_members:
del self.sg_members[sg_id]
def _find_deleted_sg_rules(self, sg_id):
del_rules = list()
for pre_rule in self.pre_sg_rules.get(sg_id, []):
if pre_rule not in self.sg_rules.get(sg_id, []):
del_rules.append(pre_rule)
return del_rules
def _find_devices_on_security_group(self, sg_id):
device_list = list()
for device in self.filtered_ports.values():
if sg_id in device.get('security_groups', []):
device_list.append(device)
return device_list
def _clean_deleted_sg_rule_conntrack_entries(self):
deleted_sg_ids = set()
for sg_id in self.updated_rule_sg_ids:
del_rules = self._find_deleted_sg_rules(sg_id)
if not del_rules:
continue
device_list = self._find_devices_on_security_group(sg_id)
for rule in del_rules:
self.ipconntrack.delete_conntrack_state_by_rule(
device_list, rule)
deleted_sg_ids.add(sg_id)
for id in deleted_sg_ids:
self.updated_rule_sg_ids.remove(id)
def _clean_updated_sg_member_conntrack_entries(self):
updated_device_ids = set()
for device in self.updated_sg_members:
sec_group_change = False
device_info = self.filtered_ports.get(device)
pre_device_info = self._pre_defer_filtered_ports.get(device)
if not device_info or not pre_device_info:
continue
for sg_id in pre_device_info.get('security_groups', []):
if sg_id not in device_info.get('security_groups', []):
sec_group_change = True
break
if not sec_group_change:
continue
for ethertype in [constants.IPv4, constants.IPv6]:
self.ipconntrack.delete_conntrack_state_by_remote_ips(
[device_info], ethertype, set())
updated_device_ids.add(device)
for id in updated_device_ids:
self.updated_sg_members.remove(id)
def _clean_deleted_remote_sg_members_conntrack_entries(self):
deleted_sg_ids = set()
for sg_id, devices in self.devices_with_udpated_sg_members.items():
for ethertype in [constants.IPv4, constants.IPv6]:
pre_ips = self._get_sg_members(
self.pre_sg_members, sg_id, ethertype)
cur_ips = self._get_sg_members(
self.sg_members, sg_id, ethertype)
ips = (pre_ips - cur_ips)
if devices and ips:
self.ipconntrack.delete_conntrack_state_by_remote_ips(
devices, ethertype, ips)
deleted_sg_ids.add(sg_id)
for id in deleted_sg_ids:
self.devices_with_udpated_sg_members.pop(id, None)
def _remove_conntrack_entries_from_sg_updates(self):
self._clean_deleted_sg_rule_conntrack_entries()
self._clean_updated_sg_member_conntrack_entries()
self._clean_deleted_remote_sg_members_conntrack_entries()
def _get_sg_members(self, sg_info, sg_id, ethertype):
return set(sg_info.get(sg_id, {}).get(ethertype, []))
def filter_defer_apply_off(self):
if self._defer_apply:
self._defer_apply = False
self._remove_chains_apply(self._pre_defer_filtered_ports,
self._pre_defer_unfiltered_ports)
self._setup_chains_apply(self.filtered_ports,
self.unfiltered_ports)
self.iptables.defer_apply_off()
self._remove_conntrack_entries_from_sg_updates()
self._remove_unused_security_group_info()
self._pre_defer_filtered_ports = None
self._pre_defer_unfiltered_ports = None
def _populate_initial_zone_map(self):
"""Setup the map between devices and zones based on current rules."""
self._device_zone_map = {}
rules = self.iptables.get_rules_for_table('raw')
for rule in rules:
match = re.match(r'.* --physdev-in (?P<dev>[a-zA-Z0-9\-]+)'
r'.* -j CT --zone (?P<zone>\d+).*', rule)
if match:
# strip off any prefix that the interface is using
short_port_id = match.group('dev')[LINUX_DEV_PREFIX_LEN:]
self._device_zone_map[short_port_id] = int(match.group('zone'))
LOG.debug("Populated conntrack zone map: %s", self._device_zone_map)
def get_device_zone(self, port_id):
# we have to key the device_zone_map based on the fragment of the port
# UUID that shows up in the interface name. This is because the initial
# map is populated strictly based on interface names that we don't know
# the full UUID of.
short_port_id = port_id[:(LINUX_DEV_LEN - LINUX_DEV_PREFIX_LEN)]
try:
return self._device_zone_map[short_port_id]
except KeyError:
self._free_zones_from_removed_ports()
return self._generate_device_zone(short_port_id)
def _free_zones_from_removed_ports(self):
"""Clears any entries from the zone map of removed ports."""
existing_ports = [
port['device'][:(LINUX_DEV_LEN - LINUX_DEV_PREFIX_LEN)]
for port in (list(self.filtered_ports.values()) +
list(self.unfiltered_ports.values()))
]
removed = set(self._device_zone_map) - set(existing_ports)
for dev in removed:
self._device_zone_map.pop(dev, None)
def _generate_device_zone(self, short_port_id):
"""Generates a unique conntrack zone for the passed in ID."""
zone = self._find_open_zone()
self._device_zone_map[short_port_id] = zone
LOG.debug("Assigned CT zone %(z)s to port %(dev)s.",
{'z': zone, 'dev': short_port_id})
return self._device_zone_map[short_port_id]
def _find_open_zone(self):
# call set to dedup because old ports may be mapped to the same zone.
zones_in_use = sorted(set(self._device_zone_map.values()))
if not zones_in_use:
return 1
# attempt to increment onto the highest used zone first. if we hit the
# end, go back and look for any gaps left by removed devices.
last = zones_in_use[-1]
if last < MAX_CONNTRACK_ZONES:
return last + 1
for index, used in enumerate(zones_in_use):
if used - index != 1:
# gap found, let's use it!
return index + 1
# conntrack zones exhausted :( :(
raise RuntimeError("iptables conntrack zones exhausted. "
"iptables rules cannot be applied.")
class OVSHybridIptablesFirewallDriver(IptablesFirewallDriver):
OVS_HYBRID_TAP_PREFIX = constants.TAP_DEVICE_PREFIX
def _port_chain_name(self, port, direction):
return iptables_manager.get_chain_name(
'%s%s' % (CHAIN_NAME_PREFIX[direction], port['device']))
def _get_device_name(self, port):
return (self.OVS_HYBRID_TAP_PREFIX + port['device'])[:LINUX_DEV_LEN]
def _get_br_device_name(self, port):
return ('qvb' + port['device'])[:LINUX_DEV_LEN]
def _get_jump_rule(self, port, direction):
if direction == firewall.INGRESS_DIRECTION:
device = self._get_br_device_name(port)
else:
device = self._get_device_name(port)
jump_rule = '-m physdev --physdev-in %s -j CT --zone %s' % (
device, self.get_device_zone(port['device']))
return jump_rule
def _add_raw_chain_rules(self, port, direction):
jump_rule = self._get_jump_rule(port, direction)
self.iptables.ipv4['raw'].add_rule('PREROUTING', jump_rule)
self.iptables.ipv6['raw'].add_rule('PREROUTING', jump_rule)
def _remove_raw_chain_rules(self, port, direction):
jump_rule = self._get_jump_rule(port, direction)
self.iptables.ipv4['raw'].remove_rule('PREROUTING', jump_rule)
self.iptables.ipv6['raw'].remove_rule('PREROUTING', jump_rule)
def _add_chain(self, port, direction):
super(OVSHybridIptablesFirewallDriver, self)._add_chain(port,
direction)
if direction in [firewall.INGRESS_DIRECTION,
firewall.EGRESS_DIRECTION]:
self._add_raw_chain_rules(port, direction)
def _remove_chain(self, port, direction):
super(OVSHybridIptablesFirewallDriver, self)._remove_chain(port,
direction)
if direction in [firewall.INGRESS_DIRECTION,
firewall.EGRESS_DIRECTION]:
self._remove_raw_chain_rules(port, direction)
| {
"content_hash": "06787856835d92a415ed62457ca90449",
"timestamp": "",
"source": "github",
"line_count": 904,
"max_line_length": 79,
"avg_line_length": 44.77654867256637,
"alnum_prop": 0.5695439497998913,
"repo_name": "adelina-t/neutron",
"id": "a695733e89aed77c5b6fa28549244ad7e294411f",
"size": "41120",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "neutron/agent/linux/iptables_firewall.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "7866194"
},
{
"name": "Shell",
"bytes": "13865"
}
],
"symlink_target": ""
} |
"""A setup module for the gRPC Python package."""
import os
import os.path
import shutil
import sys
from distutils import core as _core
from distutils import extension as _extension
import setuptools
from setuptools.command import egg_info
import grpc.tools.command
PY3 = sys.version_info.major == 3
# Ensure we're in the proper directory whether or not we're being used by pip.
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# Break import-style to ensure we can actually find our in-repo dependencies.
import commands
import grpc_version
LICENSE = '3-clause BSD'
PACKAGE_DIRECTORIES = {
'': '.',
}
INSTALL_REQUIRES = (
'coverage>=4.0',
'enum34>=1.0.4',
'futures>=2.2.0',
'grpcio>={version}'.format(version=grpc_version.VERSION),
'grpcio-tools>={version}'.format(version=grpc_version.VERSION),
'grpcio-health-checking>={version}'.format(version=grpc_version.VERSION),
'oauth2client>=1.4.7',
'protobuf>=3.0.0',
'six>=1.10',
)
COMMAND_CLASS = {
# Run `preprocess` *before* doing any packaging!
'preprocess': commands.GatherProto,
'build_package_protos': grpc.tools.command.BuildPackageProtos,
'build_py': commands.BuildPy,
'run_interop': commands.RunInterop,
'test_lite': commands.TestLite
}
PACKAGE_DATA = {
'tests.interop': [
'credentials/ca.pem',
'credentials/server1.key',
'credentials/server1.pem',
],
'tests.protoc_plugin': [
'protoc_plugin_test.proto',
],
'tests.unit': [
'credentials/ca.pem',
'credentials/server1.key',
'credentials/server1.pem',
],
'tests': [
'tests.json'
],
}
TEST_SUITE = 'tests'
TEST_LOADER = 'tests:Loader'
TEST_RUNNER = 'tests:Runner'
TESTS_REQUIRE = INSTALL_REQUIRES
PACKAGES = setuptools.find_packages('.')
setuptools.setup(
name='grpcio-tests',
version=grpc_version.VERSION,
license=LICENSE,
packages=list(PACKAGES),
package_dir=PACKAGE_DIRECTORIES,
package_data=PACKAGE_DATA,
install_requires=INSTALL_REQUIRES,
cmdclass=COMMAND_CLASS,
tests_require=TESTS_REQUIRE,
test_suite=TEST_SUITE,
test_loader=TEST_LOADER,
test_runner=TEST_RUNNER,
)
| {
"content_hash": "b0009cc985b667167460ca474db8f680",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 78,
"avg_line_length": 23.934065934065934,
"alnum_prop": 0.6786042240587695,
"repo_name": "a-veitch/grpc",
"id": "3524355cbfc62a752cab2cde98773bb037c04150",
"size": "3707",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/grpcio_tests/setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "27083"
},
{
"name": "C",
"bytes": "5650454"
},
{
"name": "C#",
"bytes": "1238603"
},
{
"name": "C++",
"bytes": "1881016"
},
{
"name": "CMake",
"bytes": "71719"
},
{
"name": "DTrace",
"bytes": "147"
},
{
"name": "JavaScript",
"bytes": "338059"
},
{
"name": "M4",
"bytes": "36981"
},
{
"name": "Makefile",
"bytes": "667388"
},
{
"name": "Objective-C",
"bytes": "285424"
},
{
"name": "PHP",
"bytes": "148056"
},
{
"name": "Protocol Buffer",
"bytes": "116483"
},
{
"name": "PureBasic",
"bytes": "147"
},
{
"name": "Python",
"bytes": "1158651"
},
{
"name": "Ruby",
"bytes": "586123"
},
{
"name": "Shell",
"bytes": "48311"
},
{
"name": "Swift",
"bytes": "5418"
}
],
"symlink_target": ""
} |
"""
local path implementation.
"""
from __future__ import with_statement
from contextlib import contextmanager
import sys, os, atexit, io, uuid
import py
from py._path import common
from py._path.common import iswin32, fspath
from stat import S_ISLNK, S_ISDIR, S_ISREG
from os.path import abspath, normpath, isabs, exists, isdir, isfile, islink, dirname
if sys.version_info > (3,0):
def map_as_list(func, iter):
return list(map(func, iter))
else:
map_as_list = map
ALLOW_IMPORTLIB_MODE = sys.version_info > (3,5)
if ALLOW_IMPORTLIB_MODE:
import importlib
class Stat(object):
def __getattr__(self, name):
return getattr(self._osstatresult, "st_" + name)
def __init__(self, path, osstatresult):
self.path = path
self._osstatresult = osstatresult
@property
def owner(self):
if iswin32:
raise NotImplementedError("XXX win32")
import pwd
entry = py.error.checked_call(pwd.getpwuid, self.uid)
return entry[0]
@property
def group(self):
""" return group name of file. """
if iswin32:
raise NotImplementedError("XXX win32")
import grp
entry = py.error.checked_call(grp.getgrgid, self.gid)
return entry[0]
def isdir(self):
return S_ISDIR(self._osstatresult.st_mode)
def isfile(self):
return S_ISREG(self._osstatresult.st_mode)
def islink(self):
st = self.path.lstat()
return S_ISLNK(self._osstatresult.st_mode)
class PosixPath(common.PathBase):
def chown(self, user, group, rec=0):
""" change ownership to the given user and group.
user and group may be specified by a number or
by a name. if rec is True change ownership
recursively.
"""
uid = getuserid(user)
gid = getgroupid(group)
if rec:
for x in self.visit(rec=lambda x: x.check(link=0)):
if x.check(link=0):
py.error.checked_call(os.chown, str(x), uid, gid)
py.error.checked_call(os.chown, str(self), uid, gid)
def readlink(self):
""" return value of a symbolic link. """
return py.error.checked_call(os.readlink, self.strpath)
def mklinkto(self, oldname):
""" posix style hard link to another name. """
py.error.checked_call(os.link, str(oldname), str(self))
def mksymlinkto(self, value, absolute=1):
""" create a symbolic link with the given value (pointing to another name). """
if absolute:
py.error.checked_call(os.symlink, str(value), self.strpath)
else:
base = self.common(value)
# with posix local paths '/' is always a common base
relsource = self.__class__(value).relto(base)
reldest = self.relto(base)
n = reldest.count(self.sep)
target = self.sep.join(('..', )*n + (relsource, ))
py.error.checked_call(os.symlink, target, self.strpath)
def getuserid(user):
import pwd
if not isinstance(user, int):
user = pwd.getpwnam(user)[2]
return user
def getgroupid(group):
import grp
if not isinstance(group, int):
group = grp.getgrnam(group)[2]
return group
FSBase = not iswin32 and PosixPath or common.PathBase
class LocalPath(FSBase):
""" object oriented interface to os.path and other local filesystem
related information.
"""
class ImportMismatchError(ImportError):
""" raised on pyimport() if there is a mismatch of __file__'s"""
sep = os.sep
class Checkers(common.Checkers):
def _stat(self):
try:
return self._statcache
except AttributeError:
try:
self._statcache = self.path.stat()
except py.error.ELOOP:
self._statcache = self.path.lstat()
return self._statcache
def dir(self):
return S_ISDIR(self._stat().mode)
def file(self):
return S_ISREG(self._stat().mode)
def exists(self):
return self._stat()
def link(self):
st = self.path.lstat()
return S_ISLNK(st.mode)
def __init__(self, path=None, expanduser=False):
""" Initialize and return a local Path instance.
Path can be relative to the current directory.
If path is None it defaults to the current working directory.
If expanduser is True, tilde-expansion is performed.
Note that Path instances always carry an absolute path.
Note also that passing in a local path object will simply return
the exact same path object. Use new() to get a new copy.
"""
if path is None:
self.strpath = py.error.checked_call(os.getcwd)
else:
try:
path = fspath(path)
except TypeError:
raise ValueError("can only pass None, Path instances "
"or non-empty strings to LocalPath")
if expanduser:
path = os.path.expanduser(path)
self.strpath = abspath(path)
def __hash__(self):
return hash(self.strpath)
def __eq__(self, other):
s1 = fspath(self)
try:
s2 = fspath(other)
except TypeError:
return False
if iswin32:
s1 = s1.lower()
try:
s2 = s2.lower()
except AttributeError:
return False
return s1 == s2
def __ne__(self, other):
return not (self == other)
def __lt__(self, other):
return fspath(self) < fspath(other)
def __gt__(self, other):
return fspath(self) > fspath(other)
def samefile(self, other):
""" return True if 'other' references the same file as 'self'.
"""
other = fspath(other)
if not isabs(other):
other = abspath(other)
if self == other:
return True
if iswin32:
return False # there is no samefile
return py.error.checked_call(
os.path.samefile, self.strpath, other)
def remove(self, rec=1, ignore_errors=False):
""" remove a file or directory (or a directory tree if rec=1).
if ignore_errors is True, errors while removing directories will
be ignored.
"""
if self.check(dir=1, link=0):
if rec:
# force remove of readonly files on windows
if iswin32:
self.chmod(0o700, rec=1)
import shutil
py.error.checked_call(
shutil.rmtree, self.strpath,
ignore_errors=ignore_errors)
else:
py.error.checked_call(os.rmdir, self.strpath)
else:
if iswin32:
self.chmod(0o700)
py.error.checked_call(os.remove, self.strpath)
def computehash(self, hashtype="md5", chunksize=524288):
""" return hexdigest of hashvalue for this file. """
try:
try:
import hashlib as mod
except ImportError:
if hashtype == "sha1":
hashtype = "sha"
mod = __import__(hashtype)
hash = getattr(mod, hashtype)()
except (AttributeError, ImportError):
raise ValueError("Don't know how to compute %r hash" %(hashtype,))
f = self.open('rb')
try:
while 1:
buf = f.read(chunksize)
if not buf:
return hash.hexdigest()
hash.update(buf)
finally:
f.close()
def new(self, **kw):
""" create a modified version of this path.
the following keyword arguments modify various path parts::
a:/some/path/to/a/file.ext
xx drive
xxxxxxxxxxxxxxxxx dirname
xxxxxxxx basename
xxxx purebasename
xxx ext
"""
obj = object.__new__(self.__class__)
if not kw:
obj.strpath = self.strpath
return obj
drive, dirname, basename, purebasename,ext = self._getbyspec(
"drive,dirname,basename,purebasename,ext")
if 'basename' in kw:
if 'purebasename' in kw or 'ext' in kw:
raise ValueError("invalid specification %r" % kw)
else:
pb = kw.setdefault('purebasename', purebasename)
try:
ext = kw['ext']
except KeyError:
pass
else:
if ext and not ext.startswith('.'):
ext = '.' + ext
kw['basename'] = pb + ext
if ('dirname' in kw and not kw['dirname']):
kw['dirname'] = drive
else:
kw.setdefault('dirname', dirname)
kw.setdefault('sep', self.sep)
obj.strpath = normpath(
"%(dirname)s%(sep)s%(basename)s" % kw)
return obj
def _getbyspec(self, spec):
""" see new for what 'spec' can be. """
res = []
parts = self.strpath.split(self.sep)
args = filter(None, spec.split(',') )
append = res.append
for name in args:
if name == 'drive':
append(parts[0])
elif name == 'dirname':
append(self.sep.join(parts[:-1]))
else:
basename = parts[-1]
if name == 'basename':
append(basename)
else:
i = basename.rfind('.')
if i == -1:
purebasename, ext = basename, ''
else:
purebasename, ext = basename[:i], basename[i:]
if name == 'purebasename':
append(purebasename)
elif name == 'ext':
append(ext)
else:
raise ValueError("invalid part specification %r" % name)
return res
def dirpath(self, *args, **kwargs):
""" return the directory path joined with any given path arguments. """
if not kwargs:
path = object.__new__(self.__class__)
path.strpath = dirname(self.strpath)
if args:
path = path.join(*args)
return path
return super(LocalPath, self).dirpath(*args, **kwargs)
def join(self, *args, **kwargs):
""" return a new path by appending all 'args' as path
components. if abs=1 is used restart from root if any
of the args is an absolute path.
"""
sep = self.sep
strargs = [fspath(arg) for arg in args]
strpath = self.strpath
if kwargs.get('abs'):
newargs = []
for arg in reversed(strargs):
if isabs(arg):
strpath = arg
strargs = newargs
break
newargs.insert(0, arg)
# special case for when we have e.g. strpath == "/"
actual_sep = "" if strpath.endswith(sep) else sep
for arg in strargs:
arg = arg.strip(sep)
if iswin32:
# allow unix style paths even on windows.
arg = arg.strip('/')
arg = arg.replace('/', sep)
strpath = strpath + actual_sep + arg
actual_sep = sep
obj = object.__new__(self.__class__)
obj.strpath = normpath(strpath)
return obj
def open(self, mode='r', ensure=False, encoding=None):
""" return an opened file with the given mode.
If ensure is True, create parent directories if needed.
"""
if ensure:
self.dirpath().ensure(dir=1)
if encoding:
return py.error.checked_call(io.open, self.strpath, mode, encoding=encoding)
return py.error.checked_call(open, self.strpath, mode)
def _fastjoin(self, name):
child = object.__new__(self.__class__)
child.strpath = self.strpath + self.sep + name
return child
def islink(self):
return islink(self.strpath)
def check(self, **kw):
if not kw:
return exists(self.strpath)
if len(kw) == 1:
if "dir" in kw:
return not kw["dir"] ^ isdir(self.strpath)
if "file" in kw:
return not kw["file"] ^ isfile(self.strpath)
return super(LocalPath, self).check(**kw)
_patternchars = set("*?[" + os.path.sep)
def listdir(self, fil=None, sort=None):
""" list directory contents, possibly filter by the given fil func
and possibly sorted.
"""
if fil is None and sort is None:
names = py.error.checked_call(os.listdir, self.strpath)
return map_as_list(self._fastjoin, names)
if isinstance(fil, py.builtin._basestring):
if not self._patternchars.intersection(fil):
child = self._fastjoin(fil)
if exists(child.strpath):
return [child]
return []
fil = common.FNMatcher(fil)
names = py.error.checked_call(os.listdir, self.strpath)
res = []
for name in names:
child = self._fastjoin(name)
if fil is None or fil(child):
res.append(child)
self._sortlist(res, sort)
return res
def size(self):
""" return size of the underlying file object """
return self.stat().size
def mtime(self):
""" return last modification time of the path. """
return self.stat().mtime
def copy(self, target, mode=False, stat=False):
""" copy path to target.
If mode is True, will copy copy permission from path to target.
If stat is True, copy permission, last modification
time, last access time, and flags from path to target.
"""
if self.check(file=1):
if target.check(dir=1):
target = target.join(self.basename)
assert self!=target
copychunked(self, target)
if mode:
copymode(self.strpath, target.strpath)
if stat:
copystat(self, target)
else:
def rec(p):
return p.check(link=0)
for x in self.visit(rec=rec):
relpath = x.relto(self)
newx = target.join(relpath)
newx.dirpath().ensure(dir=1)
if x.check(link=1):
newx.mksymlinkto(x.readlink())
continue
elif x.check(file=1):
copychunked(x, newx)
elif x.check(dir=1):
newx.ensure(dir=1)
if mode:
copymode(x.strpath, newx.strpath)
if stat:
copystat(x, newx)
def rename(self, target):
""" rename this path to target. """
target = fspath(target)
return py.error.checked_call(os.rename, self.strpath, target)
def dump(self, obj, bin=1):
""" pickle object into path location"""
f = self.open('wb')
import pickle
try:
py.error.checked_call(pickle.dump, obj, f, bin)
finally:
f.close()
def mkdir(self, *args):
""" create & return the directory joined with args. """
p = self.join(*args)
py.error.checked_call(os.mkdir, fspath(p))
return p
def write_binary(self, data, ensure=False):
""" write binary data into path. If ensure is True create
missing parent directories.
"""
if ensure:
self.dirpath().ensure(dir=1)
with self.open('wb') as f:
f.write(data)
def write_text(self, data, encoding, ensure=False):
""" write text data into path using the specified encoding.
If ensure is True create missing parent directories.
"""
if ensure:
self.dirpath().ensure(dir=1)
with self.open('w', encoding=encoding) as f:
f.write(data)
def write(self, data, mode='w', ensure=False):
""" write data into path. If ensure is True create
missing parent directories.
"""
if ensure:
self.dirpath().ensure(dir=1)
if 'b' in mode:
if not py.builtin._isbytes(data):
raise ValueError("can only process bytes")
else:
if not py.builtin._istext(data):
if not py.builtin._isbytes(data):
data = str(data)
else:
data = py.builtin._totext(data, sys.getdefaultencoding())
f = self.open(mode)
try:
f.write(data)
finally:
f.close()
def _ensuredirs(self):
parent = self.dirpath()
if parent == self:
return self
if parent.check(dir=0):
parent._ensuredirs()
if self.check(dir=0):
try:
self.mkdir()
except py.error.EEXIST:
# race condition: file/dir created by another thread/process.
# complain if it is not a dir
if self.check(dir=0):
raise
return self
def ensure(self, *args, **kwargs):
""" ensure that an args-joined path exists (by default as
a file). if you specify a keyword argument 'dir=True'
then the path is forced to be a directory path.
"""
p = self.join(*args)
if kwargs.get('dir', 0):
return p._ensuredirs()
else:
p.dirpath()._ensuredirs()
if not p.check(file=1):
p.open('w').close()
return p
def stat(self, raising=True):
""" Return an os.stat() tuple. """
if raising == True:
return Stat(self, py.error.checked_call(os.stat, self.strpath))
try:
return Stat(self, os.stat(self.strpath))
except KeyboardInterrupt:
raise
except Exception:
return None
def lstat(self):
""" Return an os.lstat() tuple. """
return Stat(self, py.error.checked_call(os.lstat, self.strpath))
def setmtime(self, mtime=None):
""" set modification time for the given path. if 'mtime' is None
(the default) then the file's mtime is set to current time.
Note that the resolution for 'mtime' is platform dependent.
"""
if mtime is None:
return py.error.checked_call(os.utime, self.strpath, mtime)
try:
return py.error.checked_call(os.utime, self.strpath, (-1, mtime))
except py.error.EINVAL:
return py.error.checked_call(os.utime, self.strpath, (self.atime(), mtime))
def chdir(self):
""" change directory to self and return old current directory """
try:
old = self.__class__()
except py.error.ENOENT:
old = None
py.error.checked_call(os.chdir, self.strpath)
return old
@contextmanager
def as_cwd(self):
"""
Return a context manager, which changes to the path's dir during the
managed "with" context.
On __enter__ it returns the old dir, which might be ``None``.
"""
old = self.chdir()
try:
yield old
finally:
if old is not None:
old.chdir()
def realpath(self):
""" return a new path which contains no symbolic links."""
return self.__class__(os.path.realpath(self.strpath))
def atime(self):
""" return last access time of the path. """
return self.stat().atime
def __repr__(self):
return 'local(%r)' % self.strpath
def __str__(self):
""" return string representation of the Path. """
return self.strpath
def chmod(self, mode, rec=0):
""" change permissions to the given mode. If mode is an
integer it directly encodes the os-specific modes.
if rec is True perform recursively.
"""
if not isinstance(mode, int):
raise TypeError("mode %r must be an integer" % (mode,))
if rec:
for x in self.visit(rec=rec):
py.error.checked_call(os.chmod, str(x), mode)
py.error.checked_call(os.chmod, self.strpath, mode)
def pypkgpath(self):
""" return the Python package path by looking for the last
directory upwards which still contains an __init__.py.
Return None if a pkgpath can not be determined.
"""
pkgpath = None
for parent in self.parts(reverse=True):
if parent.isdir():
if not parent.join('__init__.py').exists():
break
if not isimportable(parent.basename):
break
pkgpath = parent
return pkgpath
def _ensuresyspath(self, ensuremode, path):
if ensuremode:
s = str(path)
if ensuremode == "append":
if s not in sys.path:
sys.path.append(s)
else:
if s != sys.path[0]:
sys.path.insert(0, s)
def pyimport(self, modname=None, ensuresyspath=True):
""" return path as an imported python module.
If modname is None, look for the containing package
and construct an according module name.
The module will be put/looked up in sys.modules.
if ensuresyspath is True then the root dir for importing
the file (taking __init__.py files into account) will
be prepended to sys.path if it isn't there already.
If ensuresyspath=="append" the root dir will be appended
if it isn't already contained in sys.path.
if ensuresyspath is False no modification of syspath happens.
Special value of ensuresyspath=="importlib" is intended
purely for using in pytest, it is capable only of importing
separate .py files outside packages, e.g. for test suite
without any __init__.py file. It effectively allows having
same-named test modules in different places and offers
mild opt-in via this option. Note that it works only in
recent versions of python.
"""
if not self.check():
raise py.error.ENOENT(self)
if ensuresyspath == 'importlib':
if modname is None:
modname = self.purebasename
if not ALLOW_IMPORTLIB_MODE:
raise ImportError(
"Can't use importlib due to old version of Python")
spec = importlib.util.spec_from_file_location(
modname, str(self))
if spec is None:
raise ImportError(
"Can't find module %s at location %s" %
(modname, str(self))
)
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
return mod
pkgpath = None
if modname is None:
pkgpath = self.pypkgpath()
if pkgpath is not None:
pkgroot = pkgpath.dirpath()
names = self.new(ext="").relto(pkgroot).split(self.sep)
if names[-1] == "__init__":
names.pop()
modname = ".".join(names)
else:
pkgroot = self.dirpath()
modname = self.purebasename
self._ensuresyspath(ensuresyspath, pkgroot)
__import__(modname)
mod = sys.modules[modname]
if self.basename == "__init__.py":
return mod # we don't check anything as we might
# be in a namespace package ... too icky to check
modfile = mod.__file__
if modfile[-4:] in ('.pyc', '.pyo'):
modfile = modfile[:-1]
elif modfile.endswith('$py.class'):
modfile = modfile[:-9] + '.py'
if modfile.endswith(os.path.sep + "__init__.py"):
if self.basename != "__init__.py":
modfile = modfile[:-12]
try:
issame = self.samefile(modfile)
except py.error.ENOENT:
issame = False
if not issame:
ignore = os.getenv('PY_IGNORE_IMPORTMISMATCH')
if ignore != '1':
raise self.ImportMismatchError(modname, modfile, self)
return mod
else:
try:
return sys.modules[modname]
except KeyError:
# we have a custom modname, do a pseudo-import
import types
mod = types.ModuleType(modname)
mod.__file__ = str(self)
sys.modules[modname] = mod
try:
py.builtin.execfile(str(self), mod.__dict__)
except:
del sys.modules[modname]
raise
return mod
def sysexec(self, *argv, **popen_opts):
""" return stdout text from executing a system child process,
where the 'self' path points to executable.
The process is directly invoked and not through a system shell.
"""
from subprocess import Popen, PIPE
argv = map_as_list(str, argv)
popen_opts['stdout'] = popen_opts['stderr'] = PIPE
proc = Popen([str(self)] + argv, **popen_opts)
stdout, stderr = proc.communicate()
ret = proc.wait()
if py.builtin._isbytes(stdout):
stdout = py.builtin._totext(stdout, sys.getdefaultencoding())
if ret != 0:
if py.builtin._isbytes(stderr):
stderr = py.builtin._totext(stderr, sys.getdefaultencoding())
raise py.process.cmdexec.Error(ret, ret, str(self),
stdout, stderr,)
return stdout
def sysfind(cls, name, checker=None, paths=None):
""" return a path object found by looking at the systems
underlying PATH specification. If the checker is not None
it will be invoked to filter matching paths. If a binary
cannot be found, None is returned
Note: This is probably not working on plain win32 systems
but may work on cygwin.
"""
if isabs(name):
p = py.path.local(name)
if p.check(file=1):
return p
else:
if paths is None:
if iswin32:
paths = os.environ['Path'].split(';')
if '' not in paths and '.' not in paths:
paths.append('.')
try:
systemroot = os.environ['SYSTEMROOT']
except KeyError:
pass
else:
paths = [path.replace('%SystemRoot%', systemroot)
for path in paths]
else:
paths = os.environ['PATH'].split(':')
tryadd = []
if iswin32:
tryadd += os.environ['PATHEXT'].split(os.pathsep)
tryadd.append("")
for x in paths:
for addext in tryadd:
p = py.path.local(x).join(name, abs=True) + addext
try:
if p.check(file=1):
if checker:
if not checker(p):
continue
return p
except py.error.EACCES:
pass
return None
sysfind = classmethod(sysfind)
def _gethomedir(cls):
try:
x = os.environ['HOME']
except KeyError:
try:
x = os.environ["HOMEDRIVE"] + os.environ['HOMEPATH']
except KeyError:
return None
return cls(x)
_gethomedir = classmethod(_gethomedir)
# """
# special class constructors for local filesystem paths
# """
@classmethod
def get_temproot(cls):
""" return the system's temporary directory
(where tempfiles are usually created in)
"""
import tempfile
return py.path.local(tempfile.gettempdir())
@classmethod
def mkdtemp(cls, rootdir=None):
""" return a Path object pointing to a fresh new temporary directory
(which we created ourself).
"""
import tempfile
if rootdir is None:
rootdir = cls.get_temproot()
return cls(py.error.checked_call(tempfile.mkdtemp, dir=str(rootdir)))
def make_numbered_dir(cls, prefix='session-', rootdir=None, keep=3,
lock_timeout=172800): # two days
""" return unique directory with a number greater than the current
maximum one. The number is assumed to start directly after prefix.
if keep is true directories with a number less than (maxnum-keep)
will be removed. If .lock files are used (lock_timeout non-zero),
algorithm is multi-process safe.
"""
if rootdir is None:
rootdir = cls.get_temproot()
nprefix = prefix.lower()
def parse_num(path):
""" parse the number out of a path (if it matches the prefix) """
nbasename = path.basename.lower()
if nbasename.startswith(nprefix):
try:
return int(nbasename[len(nprefix):])
except ValueError:
pass
def create_lockfile(path):
""" exclusively create lockfile. Throws when failed """
mypid = os.getpid()
lockfile = path.join('.lock')
if hasattr(lockfile, 'mksymlinkto'):
lockfile.mksymlinkto(str(mypid))
else:
fd = py.error.checked_call(os.open, str(lockfile), os.O_WRONLY | os.O_CREAT | os.O_EXCL, 0o644)
with os.fdopen(fd, 'w') as f:
f.write(str(mypid))
return lockfile
def atexit_remove_lockfile(lockfile):
""" ensure lockfile is removed at process exit """
mypid = os.getpid()
def try_remove_lockfile():
# in a fork() situation, only the last process should
# remove the .lock, otherwise the other processes run the
# risk of seeing their temporary dir disappear. For now
# we remove the .lock in the parent only (i.e. we assume
# that the children finish before the parent).
if os.getpid() != mypid:
return
try:
lockfile.remove()
except py.error.Error:
pass
atexit.register(try_remove_lockfile)
# compute the maximum number currently in use with the prefix
lastmax = None
while True:
maxnum = -1
for path in rootdir.listdir():
num = parse_num(path)
if num is not None:
maxnum = max(maxnum, num)
# make the new directory
try:
udir = rootdir.mkdir(prefix + str(maxnum+1))
if lock_timeout:
lockfile = create_lockfile(udir)
atexit_remove_lockfile(lockfile)
except (py.error.EEXIST, py.error.ENOENT, py.error.EBUSY):
# race condition (1): another thread/process created the dir
# in the meantime - try again
# race condition (2): another thread/process spuriously acquired
# lock treating empty directory as candidate
# for removal - try again
# race condition (3): another thread/process tried to create the lock at
# the same time (happened in Python 3.3 on Windows)
# https://ci.appveyor.com/project/pytestbot/py/build/1.0.21/job/ffi85j4c0lqwsfwa
if lastmax == maxnum:
raise
lastmax = maxnum
continue
break
def get_mtime(path):
""" read file modification time """
try:
return path.lstat().mtime
except py.error.Error:
pass
garbage_prefix = prefix + 'garbage-'
def is_garbage(path):
""" check if path denotes directory scheduled for removal """
bn = path.basename
return bn.startswith(garbage_prefix)
# prune old directories
udir_time = get_mtime(udir)
if keep and udir_time:
for path in rootdir.listdir():
num = parse_num(path)
if num is not None and num <= (maxnum - keep):
try:
# try acquiring lock to remove directory as exclusive user
if lock_timeout:
create_lockfile(path)
except (py.error.EEXIST, py.error.ENOENT, py.error.EBUSY):
path_time = get_mtime(path)
if not path_time:
# assume directory doesn't exist now
continue
if abs(udir_time - path_time) < lock_timeout:
# assume directory with lockfile exists
# and lock timeout hasn't expired yet
continue
# path dir locked for exclusive use
# and scheduled for removal to avoid another thread/process
# treating it as a new directory or removal candidate
garbage_path = rootdir.join(garbage_prefix + str(uuid.uuid4()))
try:
path.rename(garbage_path)
garbage_path.remove(rec=1)
except KeyboardInterrupt:
raise
except: # this might be py.error.Error, WindowsError ...
pass
if is_garbage(path):
try:
path.remove(rec=1)
except KeyboardInterrupt:
raise
except: # this might be py.error.Error, WindowsError ...
pass
# make link...
try:
username = os.environ['USER'] #linux, et al
except KeyError:
try:
username = os.environ['USERNAME'] #windows
except KeyError:
username = 'current'
src = str(udir)
dest = src[:src.rfind('-')] + '-' + username
try:
os.unlink(dest)
except OSError:
pass
try:
os.symlink(src, dest)
except (OSError, AttributeError, NotImplementedError):
pass
return udir
make_numbered_dir = classmethod(make_numbered_dir)
def copymode(src, dest):
""" copy permission from src to dst. """
import shutil
shutil.copymode(src, dest)
def copystat(src, dest):
""" copy permission, last modification time,
last access time, and flags from src to dst."""
import shutil
shutil.copystat(str(src), str(dest))
def copychunked(src, dest):
chunksize = 524288 # half a meg of bytes
fsrc = src.open('rb')
try:
fdest = dest.open('wb')
try:
while 1:
buf = fsrc.read(chunksize)
if not buf:
break
fdest.write(buf)
finally:
fdest.close()
finally:
fsrc.close()
def isimportable(name):
if name and (name[0].isalpha() or name[0] == '_'):
name = name.replace("_", '')
return not name or name.isalnum()
| {
"content_hash": "ad34032eb056fa51cec171b6ac0b1a80",
"timestamp": "",
"source": "github",
"line_count": 1027,
"max_line_length": 111,
"avg_line_length": 35.73222979552094,
"alnum_prop": 0.5169632395018666,
"repo_name": "lmregus/Portfolio",
"id": "0e856a66da06f0dacc92feb680c435b915e3b2da",
"size": "36697",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "python/design_patterns/env/lib/python3.7/site-packages/py/_path/local.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "27682"
},
{
"name": "C++",
"bytes": "25458"
},
{
"name": "CSS",
"bytes": "12842"
},
{
"name": "HTML",
"bytes": "49171"
},
{
"name": "Java",
"bytes": "99711"
},
{
"name": "JavaScript",
"bytes": "827"
},
{
"name": "Python",
"bytes": "42857"
},
{
"name": "Shell",
"bytes": "5710"
}
],
"symlink_target": ""
} |
"""
sdksolver_stress_unit_tests.py
"stress tests" sdk solver with 10's of thousand valid solved and unsolved puzzles
read from file
:created on: 20160616
__author__ = 'Frederic Dupont'
:License: GPL3
"""
# @TODO: Try to use ddt module --> data driven tests to simplify further
import random
import unittest
from sudoku.puzzle import make_grid_from_string
from tests import get_test_data
class TestFromDataPuzzlesRandomized(unittest.TestCase):
"""stress tests of Puzzle with a large number of solved and unsolved puzzles
"""
# TODO: add stress tests for invalid solved and unsolved puzzles
def setUp(self):
self.path = 'resources/'
self.picked_tests = list(range(10000))
random.shuffle(self.picked_tests)
self.picked_tests = self.picked_tests[:100]
def test_valid_solved_puzzle(self):
"""
uses the solved grids read from a file to test the testing of a valid grid
"""
valid_solved_data = get_test_data.get_data_from(self.path + 'solved_10000_grids_startwith_123456789.txt')
self._run_random_data_against_expected(valid_solved_data, len(valid_solved_data))
def test_valid_unsolved_puzzle_45_numbers(self):
"""
uses the solved grids read from a file to test the testing of a valid grid
"""
valid_unsolved_data = get_test_data.get_data_from(self.path + 'unsolved_10000_grids_45_numbers.txt')
self._run_random_data_against_expected(valid_unsolved_data, len(valid_unsolved_data))
def test_valid_unsolved_puzzle_40_numbers(self):
"""
uses the solved grids read from a file to test the testing of a valid grid
"""
valid_unsolved_data = get_test_data.get_data_from(self.path + 'unsolved_10000_grids_40_numbers.txt')
self._run_random_data_against_expected(valid_unsolved_data, len(valid_unsolved_data))
def test_valid_unsolved_puzzle_35_numbers(self):
"""
uses the solved grids read from a file to test the testing of a valid grid
"""
valid_unsolved_data = get_test_data.get_data_from(self.path + 'unsolved_10000_grids_35_numbers.txt')
self._run_random_data_against_expected(valid_unsolved_data, len(valid_unsolved_data))
def test_valid_unsolved_puzzle_30_numbers(self):
"""
uses the solved grids read from a file to test the testing of a valid grid
"""
valid_unsolved_data = get_test_data.get_data_from(self.path + 'unsolved_10000_grids_30_numbers.txt')
self._run_random_data_against_expected(valid_unsolved_data, len(valid_unsolved_data))
def test_valid_unsolved_puzzle_25_numbers(self):
"""
uses the solved grids read from a file to test the testing of a valid grid
"""
valid_unsolved_data = get_test_data.get_data_from(self.path + 'unsolved_10000_grids_25_numbers.txt')
self._run_random_data_against_expected(valid_unsolved_data, len(valid_unsolved_data))
def _run_random_data_against_expected(self, data, num_tests):
"""de-duplicate the testing code"""
for idx in self.picked_tests:
if idx >= num_tests:
continue
else:
puzzle = make_grid_from_string(data[idx])
result = puzzle.is_valid()
self.assertTrue(result)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "e97796a5f8d5dc884b8858ad0b706c83",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 113,
"avg_line_length": 39.88235294117647,
"alnum_prop": 0.6622418879056047,
"repo_name": "ReblochonMasque/sudoku",
"id": "fa7caa46caf0190ca4478bf2dd84ffbb4ce8e2ec",
"size": "3390",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/puzzle_random_tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "136294"
}
],
"symlink_target": ""
} |
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkRectilinearSynchronizedTemplates(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkRectilinearSynchronizedTemplates(), 'Processing.',
('vtkRectilinearGrid',), ('vtkPolyData',),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
| {
"content_hash": "9916e191a14951e37c7b874955f69e82",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 69,
"avg_line_length": 42.81818181818182,
"alnum_prop": 0.692144373673036,
"repo_name": "chrisidefix/devide",
"id": "15efc3b9c82e2def766ef773a7fb04e508b5c193",
"size": "532",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "modules/vtk_basic/vtkRectilinearSynchronizedTemplates.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Diff",
"bytes": "1373"
},
{
"name": "NSIS",
"bytes": "2786"
},
{
"name": "Python",
"bytes": "3104368"
},
{
"name": "Shell",
"bytes": "7369"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/mission/quest_item/shared_brass_marshoo_q1_needed.iff"
result.attribute_template_id = -1
result.stfName("loot_nboo_n","brass_marshoo_q1_needed")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "d09bade27eabbd62f4a070d1a2834869",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 90,
"avg_line_length": 25.846153846153847,
"alnum_prop": 0.7053571428571429,
"repo_name": "anhstudios/swganh",
"id": "5660c2c170766392bacc1e61c8bb89551ddf1818",
"size": "481",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/tangible/mission/quest_item/shared_brass_marshoo_q1_needed.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
} |
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "PolyTrend", cycle_length = 30, transform = "RelativeDifference", sigma = 0.0, exog_count = 20, ar_order = 0); | {
"content_hash": "dd704a6f207c643a4021803b87b9dd5a",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 174,
"avg_line_length": 39.142857142857146,
"alnum_prop": 0.7153284671532847,
"repo_name": "antoinecarme/pyaf",
"id": "423181044ae6772d9476fb5c3c24aca97376543b",
"size": "274",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/artificial/transf_RelativeDifference/trend_PolyTrend/cycle_30/ar_/test_artificial_128_RelativeDifference_PolyTrend_30__20.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
} |
"""
Django settings for scrum project.
Generated by 'django-admin startproject' using Django 1.10.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '+#r$-0#u*c59l$h9xlqe+=^t$sc(8fc+j#=ht725ieqzabom48'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
#'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
#'django.contrib.sessions',
#'django.contrib.messages',
'django.contrib.staticfiles',
# 3rd party apps
'rest_framework',
'rest_framework.authtoken',
# internal app
'board',
]
MIDDLEWARE = [
#'django.middleware.security.SecurityMiddleware',
#'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
#'django.contrib.auth.middleware.AuthenticationMiddleware',
#'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'scrum.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'scrum.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
#'ENGINE': 'django.db.backends.sqlite3',
#'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'scrum',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'fr-fr'
TIME_ZONE = 'Europe/Paris'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
| {
"content_hash": "3684e8679d26dfc72bcb7b432ba7ae6c",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 91,
"avg_line_length": 26.007874015748033,
"alnum_prop": 0.6784741144414169,
"repo_name": "mezklador/lightweight-django",
"id": "3a8b4aee66069a128c6618b88209f700ed6a1342",
"size": "3303",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "05/scrum/scrum/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "177618"
},
{
"name": "HTML",
"bytes": "32617"
},
{
"name": "JavaScript",
"bytes": "31535"
},
{
"name": "Python",
"bytes": "82936"
},
{
"name": "Shell",
"bytes": "45"
}
],
"symlink_target": ""
} |
import os
from bunch import Bunch
import yaml
CWD = os.getcwd()
C = Bunch(solar_db="")
C.riak_ensemble = False
C.lock_bucket_type = None
C.counter_bucket_type = None
C.log_file = 'solar.log'
C.system_log_address = 'ipc:///tmp/solar_system_log'
C.tasks_address = 'ipc:///tmp/solar_tasks'
C.scheduler_address = 'ipc:///tmp/solar_scheduler'
C.timewatcher_address = 'ipc:///tmp/solar_timewatcher'
def _lookup_vals(setter, config, prefix=None):
for key, val in config.iteritems():
if prefix is None:
sub = [key]
else:
sub = prefix + [key]
if isinstance(val, Bunch):
_lookup_vals(setter, val, sub)
else:
setter(config, sub)
def from_configs():
paths = [
os.getenv('SOLAR_CONFIG', os.path.join(CWD, '.config')),
os.getenv('SOLAR_CONFIG_OVERRIDE', None),
os.path.join(CWD, '.config.override')
]
data = {}
def _load_from_path(data, path):
with open(path) as f:
loaded = yaml.load(f)
if loaded:
data.update(loaded)
for path in paths:
if not path:
continue
if not os.path.exists(path):
continue
if not os.path.isfile(path):
continue
with open(path) as f:
loaded = yaml.load(f)
if loaded:
data.update(loaded)
def _setter(config, path):
vals = data
for key in path:
if key not in vals:
return
vals = vals[key]
config[path[-1]] = vals
if data:
_lookup_vals(_setter, C)
def from_env():
def _setter(config, path):
env_key = '_'.join(path).upper()
if env_key in os.environ:
config[path[-1]] = os.environ[env_key]
_lookup_vals(_setter, C)
from_configs()
from_env()
| {
"content_hash": "4f59e96a4e34a9c4f5078154b9224982",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 64,
"avg_line_length": 23.582278481012658,
"alnum_prop": 0.5442834138486312,
"repo_name": "pigmej/solar",
"id": "9d8a06bb538c02857a5049bdc3b789e9c493806a",
"size": "2474",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "solar/config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Erlang",
"bytes": "10263"
},
{
"name": "Lua",
"bytes": "283"
},
{
"name": "Python",
"bytes": "534087"
},
{
"name": "Ruby",
"bytes": "1045"
},
{
"name": "Shell",
"bytes": "11025"
}
],
"symlink_target": ""
} |
from os import listdir, sep
from os.path import isdir, join
def all_ok(functions, item):
return all(f(item) for f in functions)
def ls(path, d_ok=(), f_ok=(), base=None):
for item in listdir(path):
if isdir(item) and all_ok(d_ok, item):
for k in ls(item, ok, item):
yield k
continue
if all_ok(f_ok, item):
if base is not None:
item = join(base, item)
yield item
def to_module(path):
return path[:path.index('.py')].replace(sep, '.')
| {
"content_hash": "17c2515ec04764995bc6f228e1593a23",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 53,
"avg_line_length": 24.863636363636363,
"alnum_prop": 0.5447897623400365,
"repo_name": "eugene-eeo/piggyback",
"id": "51ea107ea3ed2c2157f7fa964fe707d49a1d315f",
"size": "547",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "piggyback/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6746"
}
],
"symlink_target": ""
} |
import unittest
from unittest import mock
from parameterized import parameterized
from airflow import configuration
from airflow.providers.amazon.aws.hooks.glue import GlueJobHook
from airflow.providers.amazon.aws.hooks.s3 import S3Hook
from airflow.providers.amazon.aws.operators.glue import GlueJobOperator
class TestGlueJobOperator(unittest.TestCase):
@mock.patch('airflow.providers.amazon.aws.hooks.glue.GlueJobHook')
def setUp(self, glue_hook_mock):
configuration.load_test_config()
self.glue_hook_mock = glue_hook_mock
@parameterized.expand(
[
"s3://glue-examples/glue-scripts/sample_aws_glue_job.py",
"/glue-examples/glue-scripts/sample_aws_glue_job.py",
]
)
@mock.patch.object(GlueJobHook, 'get_job_state')
@mock.patch.object(GlueJobHook, 'initialize_job')
@mock.patch.object(GlueJobHook, "get_conn")
@mock.patch.object(S3Hook, "load_file")
def test_execute_without_failure(
self, script_location, mock_load_file, mock_get_conn, mock_initialize_job, mock_get_job_state
):
glue = GlueJobOperator(
task_id='test_glue_operator',
job_name='my_test_job',
script_location=script_location,
aws_conn_id='aws_default',
region_name='us-west-2',
s3_bucket='some_bucket',
iam_role_name='my_test_role',
)
mock_initialize_job.return_value = {'JobRunState': 'RUNNING', 'JobRunId': '11111'}
mock_get_job_state.return_value = 'SUCCEEDED'
glue.execute({})
mock_initialize_job.assert_called_once_with({}, {})
assert glue.job_name == 'my_test_job'
@mock.patch.object(GlueJobHook, 'job_completion')
@mock.patch.object(GlueJobHook, 'initialize_job')
@mock.patch.object(GlueJobHook, "get_conn")
@mock.patch.object(S3Hook, "load_file")
def test_execute_without_waiting_for_completion(
self, mock_load_file, mock_get_conn, mock_initialize_job, mock_job_completion
):
glue = GlueJobOperator(
task_id='test_glue_operator',
job_name='my_test_job',
script_location='s3://glue-examples/glue-scripts/sample_aws_glue_job.py',
aws_conn_id='aws_default',
region_name='us-west-2',
s3_bucket='some_bucket',
iam_role_name='my_test_role',
wait_for_completion=False,
)
mock_initialize_job.return_value = {'JobRunState': 'RUNNING', 'JobRunId': '11111'}
job_run_id = glue.execute({})
mock_initialize_job.assert_called_once_with({}, {})
mock_job_completion.assert_not_called()
assert glue.job_name == 'my_test_job'
assert job_run_id == '11111'
| {
"content_hash": "b9bfe3840e55cef723c5defd1d57185e",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 101,
"avg_line_length": 39.89855072463768,
"alnum_prop": 0.6378496185978932,
"repo_name": "bolkedebruin/airflow",
"id": "be60057392a99cdca34aa7826ac62c19c526871b",
"size": "3539",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tests/providers/amazon/aws/operators/test_glue.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25286"
},
{
"name": "Dockerfile",
"bytes": "40459"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "157840"
},
{
"name": "JavaScript",
"bytes": "167972"
},
{
"name": "Jinja",
"bytes": "33382"
},
{
"name": "Jupyter Notebook",
"bytes": "2933"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "19287942"
},
{
"name": "Shell",
"bytes": "645244"
},
{
"name": "TypeScript",
"bytes": "173854"
}
],
"symlink_target": ""
} |
from preprocessing import builder
from preprocessing.builder import Dataset
ds = Dataset("simple", 'data/simple/categories', 'data/simple/simple-20120104-titlecat.twr',
'data/simple/simple-20120104-catlinks.twr',
'data/simple/simple-20120104-pagetitle.twr',
'\t', "Articles")
ds2 = Dataset("polish", 'data/polish/pl-20170101-cattreeid.twr',
'data/polish/pl-20170101-titlecat.twr',
'data/polish/pl-20170101-catlinks.twr',
'data/polish/pl-20170101-pagetitle.twr',
' ', "Kategorie")
builder.build_graph(builder.build_matrix(ds), ds)
# builder.build_graph(builder.build_matrix(ds2), ds2)
| {
"content_hash": "cabeeb3ce094431fcdf0ad5d12546b25",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 92,
"avg_line_length": 49.8125,
"alnum_prop": 0.562107904642409,
"repo_name": "Humblehound/WikiSpatialTree",
"id": "c0432c90d07bc74bb334917d3aad9cad8d19fdf8",
"size": "797",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "build.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "50230"
}
],
"symlink_target": ""
} |
"""
<DefineSource>
@Date : Fri Nov 14 13:20:38 2014 \n
@Author : Erwan Ledoux \n\n
</DefineSource>
The Imitater
"""
#<DefineAugmentation>
import ShareYourSystem as SYS
BaseModuleStr="ShareYourSystem.Functers.Functer"
DecorationModuleStr="ShareYourSystem.Classors.Classer"
SYS.setSubModule(globals())
#</DefineAugmentation>
#<ImportSpecificModules>
import six
import inspect
from ShareYourSystem.Classors import Doer,Representer
from ShareYourSystem.Functers import Functer,Triggerer,Hooker
#</ImportSpecificModules>
#<DefineLocals>
#</DefineLocals>
#<DefineClass>
@DecorationClass()
class ImitaterClass(BaseClass):
#Definition
RepresentingKeyStrsList=[
'ImitatingFunction',
'ImitatedFunction'
]
def __init__(self,
_ImitatingFunction=None,
_ImitatedFunction=None,
**_KwargVariablesDict
):
#Call the parent init method
BaseClass.__init__(self,**_KwargVariablesDict)
def __call__(self,_Function):
#imitate
self.imitate(_Function)
#Link
self.FunctedFunction=self.ImitatedFunction
#Call the call of the parent class
return BaseClass.__call__(self,_Function)
def do_imitate(self):
#Debug
'''
print('l. 63 Imitater')
print('self.ImitatingFunction is ',self.ImitatingFunction)
print('')
'''
#Definitions
ImitatedDoMethodStr=self.ImitatingFunction.__name__
ImitatedDoStr=ImitatedDoMethodStr[0].upper()+ImitatedDoMethodStr[1:]
ImitatedDoerStr=Doer.getDoerStrWithDoStr(ImitatedDoStr)
#Debug
'''
print('ImitatedDoMethodStr is ',ImitatedDoMethodStr)
print('ImitatedDoStr is ',ImitatedDoStr)
print('ImitatedDoerStr is ',ImitatedDoerStr)
print('')
'''
#Definitions
ImitatedModule=getattr(SYS,ImitatedDoerStr)
ImitatedClass=getattr(ImitatedModule,SYS.getClassStrWithNameStr(ImitatedDoerStr))
ImitatedDoneExecStr=getattr(
ImitatedClass,
ImitatedClass.NameStr+'DoneExecStr'
).replace('def DoerFunction','def ImitaterFunction')
#Define
def imitateDo(_InstanceVariable,*_LiargVariablesList,**_KwargVariablesDict):
#Debug
'''
print('Imitater l.93 inside of the function imitateDo')
print('_InstanceVariable is ',_InstanceVariable)
print('_LiargVariablesList is ',_LiargVariablesList)
print('_KwargVariablesDict is ',_KwargVariablesDict)
print('')
'''
if len(_KwargVariablesDict)>0:
#group by
[ImitatedItemTuplesList,ImitatedNotItemTuplesList]=SYS.groupby(
lambda __ItemTuple:hasattr(_InstanceVariable,__ItemTuple[0]),
_KwargVariablesDict.items()
)
#Debug
'''
print('ImitatedItemTuplesList is ',ImitatedItemTuplesList)
print('ImitatedNotItemTuplesList is ',ImitatedNotItemTuplesList)
print('')
'''
#set in the instance the corresponding kwarged arguments
map(
lambda __ItemTuple:
#set direct explicit attributes
_InstanceVariable.__setattr__(*__ItemTuple),
ImitatedItemTuplesList
)
#Define
ImitatedKwargDict=dict(ImitatedNotItemTuplesList)
else:
#Define
ImitatedKwargDict={}
#Init
ImitatedOutputVariable=None
#Debug
'''
print('l.141 Imitater')
print('self.ImitatingFunction is ',self.ImitatingFunction)
print('ImitatedKwargDict is ',ImitatedKwargDict)
print('')
'''
#call the imitated function
if len(ImitatedKwargDict)>0:
ImitatedOutputVariable=self.ImitatingFunction(
_InstanceVariable,
*_LiargVariablesList,
**ImitatedKwargDict
)
else:
ImitatedOutputVariable=self.ImitatingFunction(
_InstanceVariable,
*_LiargVariablesList
)
#Check
if ImitatedClass.DoingGetBool==False:
#Return
return _InstanceVariable
else:
#Return the
return ImitatedOutputVariable
#Link
ImitatedFunctionKeyStr='imitate'+ImitatedDoStr+'With'+inspect.getmodule(
self.ImitatingFunction
).__name__.split('.')[-1]
if hasattr(ImitatedClass,ImitatedFunctionKeyStr)==False:
setattr(ImitatedClass,ImitatedFunctionKeyStr,imitateDo)
else:
ImitatedLastInt=sorted(
map(
lambda __MethodKeyStr:
(int)(__MethodKeyStr.split('_')[-1]),
SYS._filter(
lambda __KeyStr:
__KeyStr.startswith(ImitatedFunctionKeyStr),
ImitatedClass.__dict__.keys()
)
)
)[-1]
setattr(ImitatedClass,ImitatedFunctionKeyStr+'_'+str(ImitatedLastInt),imitateDo)
#Add to the ImitatedDoneExecStr
ImitatedDoneExecStr+='\n\treturn _InstanceVariable.'+ImitatedFunctionKeyStr+'(*_LiargVariablesList,**_KwargVariablesDict)'
#Debug
'''
print('ImitatedDoneExecStr is ')
print(ImitatedDoneExecStr)
print('')
'''
#exec
six.exec_(ImitatedDoneExecStr)
#set the name
locals()['ImitaterFunction'].__name__=self.__class__.NameStr+Doer.DoingDecorationStr+ImitatedDoMethodStr
#Link
self.ImitatedFunction=locals()['ImitaterFunction']
#</DefineClass>
| {
"content_hash": "daf99ae250f75849cf837069b00c685f",
"timestamp": "",
"source": "github",
"line_count": 211,
"max_line_length": 124,
"avg_line_length": 23.113744075829384,
"alnum_prop": 0.7094525322944433,
"repo_name": "Ledoux/ShareYourSystem",
"id": "3f8b88c19c593c404707ef144e11ba3ed5d51987",
"size": "4901",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Pythonlogy/draft/Imitater/__init__.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "86"
},
{
"name": "C++",
"bytes": "4244220"
},
{
"name": "CSS",
"bytes": "142769"
},
{
"name": "CoffeeScript",
"bytes": "37331"
},
{
"name": "HTML",
"bytes": "36211676"
},
{
"name": "JavaScript",
"bytes": "2147968"
},
{
"name": "Jupyter Notebook",
"bytes": "7930602"
},
{
"name": "Makefile",
"bytes": "6362"
},
{
"name": "PHP",
"bytes": "11096341"
},
{
"name": "Python",
"bytes": "5700092"
},
{
"name": "Ruby",
"bytes": "60"
},
{
"name": "Scala",
"bytes": "2412"
},
{
"name": "Shell",
"bytes": "2525"
},
{
"name": "Swift",
"bytes": "154"
},
{
"name": "TeX",
"bytes": "2556"
},
{
"name": "XSLT",
"bytes": "20993"
}
],
"symlink_target": ""
} |
import pytest
from neotiles import (
MatrixSize, PixelColor, Tile, TileManager, TilePosition)
from neotiles.matrixes import NTNeoPixelMatrix, NTRGBMatrix
from .fixtures import manager_neopixel, manager_rgb
class TestTileManager:
@pytest.mark.parametrize('manager', [manager_neopixel(), manager_rgb()])
def test_instantiate(self, manager):
"""
Test instantiations.
"""
cols = manager.matrix_size.cols
rows = manager.matrix_size.rows
# Check default properties.
assert isinstance(manager.matrix_size, MatrixSize) is True
assert manager.matrix_size == (cols, rows)
assert len(manager.tiles_meta) == 0
assert len(manager.tiles) == 0
pixels = manager.pixels
assert len(pixels) == rows
for row in pixels:
assert len(row) == cols
@pytest.mark.parametrize('manager', [manager_neopixel(), manager_rgb()])
def test_register_tile(self, manager):
"""
Test tile registration.
"""
red_tile = Tile(default_color=PixelColor(128, 0, 0))
grn_tile = Tile(default_color=PixelColor(0, 128, 0))
# Register two tiles, making sure the tiles length looks good.
manager.register_tile(
tile=red_tile, size=(4, 4), root=(0, 0))
assert len(manager.tiles_meta) == 1
manager.register_tile(
tile=grn_tile, size=(4, 4), root=(4, 0))
assert len(manager.tiles_meta) == 2
# Check that the tiles dict looks good.
tiles = manager.tiles_meta
assert sorted(tiles[0].keys()) == ['root', 'tile_object']
assert tiles[0]['tile_object'] == red_tile
assert isinstance(tiles[0]['root'], TilePosition) is True
assert tiles[0]['root'] == (0, 0)
# Check that the tile_objects list looks OK.
assert len(manager.tiles) == 2
assert isinstance(manager.tiles[0], Tile) is True
assert isinstance(manager.tiles[1], Tile) is True
@pytest.mark.parametrize('manager', [manager_neopixel(), manager_rgb()])
def test_deregister_tile(self, manager):
"""
Test tile deregistration.
"""
red_tile = Tile(default_color=PixelColor(128, 0, 0))
grn_tile = Tile(default_color=PixelColor(0, 128, 0))
# Register two tiles, making sure the tiles length looks good.
manager.register_tile(
tile=red_tile, size=(4, 4), root=(0, 0))
assert len(manager.tiles_meta) == 1
manager.register_tile(
tile=grn_tile, size=(4, 4), root=(4, 0))
assert len(manager.tiles_meta) == 2
# Deregister each tile.
manager.deregister_tile(red_tile)
assert len(manager.tiles_meta) == 1
manager.deregister_tile(grn_tile)
assert len(manager.tiles_meta) == 0
@pytest.mark.parametrize('manager', [manager_neopixel(), manager_rgb()])
def test_data(self, manager):
"""
Test sending data to the tile objects.
"""
red_tile = Tile(default_color=PixelColor(128, 0, 0))
grn_tile = Tile(default_color=PixelColor(0, 128, 0))
manager.register_tile(
tile=red_tile, size=(4, 4), root=(0, 0))
manager.register_tile(
tile=grn_tile, size=(4, 4), root=(4, 0))
data = 'some data'
manager.send_data_to_tiles(data)
for tile_object in manager.tiles:
assert tile_object._data == data
@pytest.mark.parametrize('manager', [manager_neopixel(), manager_rgb()])
def test_brightness(self, manager):
"""
Test setting the brightness attribute.
"""
manager.brightness = 100
assert manager.brightness == 100
with pytest.raises(ValueError):
manager.brightness = -1
with pytest.raises(ValueError):
manager.brightness = 256
with pytest.raises(ValueError):
manager.brightness = 'string'
with pytest.raises(ValueError):
manager.brightness = [50]
@pytest.mark.parametrize('manager', [manager_neopixel(), manager_rgb()])
def test_pixels(self, manager):
"""
Test retrieving the pixel colors.
"""
cols = manager.matrix_size.cols
rows = manager.matrix_size.rows
red_pixel = PixelColor(128, 0, 0, 0)
red_tile = Tile(default_color=red_pixel)
manager.register_tile(
tile=red_tile, size=(cols, rows), root=(0, 0))
# Ensure we have the right number of cols and rows, and ensure that
# each pixel is correct.
pixels = manager.pixels
assert len(pixels) == rows
for row in pixels:
assert len(row) == cols
for matrix_pixel in row:
assert matrix_pixel == red_pixel
@pytest.mark.parametrize('manager', [manager_neopixel(), manager_rgb()])
def test_tile_visibility(self, manager):
"""
Test that an invisible tile does not have its pixels drawn to the
virtual matrix.
"""
cols = manager.matrix_size.cols
rows = manager.matrix_size.rows
red_pixel = PixelColor(128, 0, 0, 0)
red_tile = Tile(default_color=red_pixel)
red_tile.visible = False
manager.register_tile(
tile=red_tile, size=(cols, rows), root=(0, 0))
manager._set_pixels_from_tiles()
pixels = manager.pixels
for row in pixels:
for matrix_pixel in row:
# Default pixel color is 0, 0, 0, 0
assert (matrix_pixel.red == matrix_pixel.green ==
matrix_pixel.blue == matrix_pixel.white == 0)
# With tile visibility enabled, the red pixels should get drawn to the
# virtual matrix.
red_tile.visible = True
manager._set_pixels_from_tiles()
for row in manager.pixels:
for matrix_pixel in row:
assert matrix_pixel == red_pixel
@pytest.mark.parametrize('manager', [manager_neopixel(), manager_rgb()])
def test_unsettable_attributes(self, manager):
"""
Try setting unsettable attributes.
"""
for unsettable in ['matrix_size', 'tiles', 'tiles_meta', 'pixels']:
with pytest.raises(AttributeError):
setattr(manager, unsettable, 'foo')
@pytest.mark.parametrize('matrix', [
NTNeoPixelMatrix(size=(3, 3), led_pin=18),
NTRGBMatrix()
])
def test_repr(self, matrix):
"""
Test the repr output for the different matrix types.
"""
tm = TileManager(matrix)
assert repr(tm) == (
'TileManager(matrix={}, draw_fps=10)'.format(repr(matrix))
)
@pytest.mark.parametrize('matrix', [
NTNeoPixelMatrix(size=(3, 3), led_pin=18),
])
def test_str(self, matrix):
"""
Test the stringified output.
"""
# Test default (no tile).
tm = TileManager(matrix)
assert str(tm) == (
'[ 0] 0, 0, 0, 0 [ 1] 0, 0, 0, 0 [ 2] 0, 0, 0, 0 \n'
'[ 3] 0, 0, 0, 0 [ 4] 0, 0, 0, 0 [ 5] 0, 0, 0, 0 \n'
'[ 6] 0, 0, 0, 0 [ 7] 0, 0, 0, 0 [ 8] 0, 0, 0, 0'
)
# Test with RGB.
red_tile = Tile(default_color=PixelColor(128, 0, 0))
tm.register_tile(tile=red_tile, size=(3, 3), root=(0, 0))
assert str(tm) == (
'[ 0] 128, 0, 0 [ 1] 128, 0, 0 [ 2] 128, 0, 0 \n'
'[ 3] 128, 0, 0 [ 4] 128, 0, 0 [ 5] 128, 0, 0 \n'
'[ 6] 128, 0, 0 [ 7] 128, 0, 0 [ 8] 128, 0, 0'
)
# Test with RGBW.
red_tile = Tile(default_color=PixelColor(128, 1, 2, 3))
tm.register_tile(tile=red_tile, size=(3, 3), root=(0, 0))
assert str(tm) == (
'[ 0] 128, 1, 2, 3 [ 1] 128, 1, 2, 3 [ 2] 128, 1, 2, 3 \n'
'[ 3] 128, 1, 2, 3 [ 4] 128, 1, 2, 3 [ 5] 128, 1, 2, 3 \n'
'[ 6] 128, 1, 2, 3 [ 7] 128, 1, 2, 3 [ 8] 128, 1, 2, 3'
)
# Test normalized RGB.
red_tile = Tile(default_color=PixelColor(1.0, 0, 0.5))
tm.register_tile(tile=red_tile, size=(3, 3), root=(0, 0))
assert str(tm) == (
'[ 0] 255, 0,127 [ 1] 255, 0,127 [ 2] 255, 0,127 \n'
'[ 3] 255, 0,127 [ 4] 255, 0,127 [ 5] 255, 0,127 \n'
'[ 6] 255, 0,127 [ 7] 255, 0,127 [ 8] 255, 0,127'
)
| {
"content_hash": "ac32b9355b9206a61af9a5f696671473",
"timestamp": "",
"source": "github",
"line_count": 234,
"max_line_length": 82,
"avg_line_length": 36.47435897435897,
"alnum_prop": 0.5454012888107791,
"repo_name": "mjoblin/neotiles",
"id": "df96b456f444a0d55408f85dff21bed795ce3c2c",
"size": "8535",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_tilemanager.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "73123"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import filecmp
import os
import numpy as np
import tensorflow as tf
from tensorflow_examples.lite.model_maker.core import test_util
from tensorflow_examples.lite.model_maker.core.data_util import object_detector_dataloader
from tensorflow_examples.lite.model_maker.third_party.efficientdet import hparams_config
from tensorflow_examples.lite.model_maker.third_party.efficientdet import utils
class MockDetectorModelSpec(object):
def __init__(self, model_name):
self.model_name = model_name
config = hparams_config.get_detection_config(model_name)
config.image_size = utils.parse_image_size(config.image_size)
config.update({'debug': False})
self.config = config
class ObjectDectectorDataLoaderTest(tf.test.TestCase):
def test_from_pascal_voc(self):
images_dir, annotations_dir, label_map = test_util.create_pascal_voc(
self.get_temp_dir())
model_spec = MockDetectorModelSpec('efficientdet-lite0')
data = object_detector_dataloader.DataLoader.from_pascal_voc(
images_dir, annotations_dir, label_map)
self.assertIsInstance(data, object_detector_dataloader.DataLoader)
self.assertLen(data, 1)
self.assertEqual(data.label_map, label_map)
self.assertTrue(os.path.isfile(data.annotations_json_file))
self.assertGreater(os.path.getsize(data.annotations_json_file), 0)
expected_json_file = test_util.get_test_data_path('annotations.json')
self.assertTrue(filecmp.cmp(data.annotations_json_file, expected_json_file))
ds = data.gen_dataset(model_spec, batch_size=1, is_training=False)
for i, (images, labels) in enumerate(ds):
self.assertEqual(i, 0)
images_shape = tf.shape(images).numpy()
expected_shape = np.array([1, *model_spec.config.image_size, 3])
self.assertTrue((images_shape == expected_shape).all())
self.assertLen(labels, 15)
ds1 = data.gen_dataset(model_spec, batch_size=1, is_training=True)
# Comments out this assert since it fails externally.
# self.assertEqual(ds1.cardinality(), tf.data.INFINITE_CARDINALITY)
for images, labels in ds1.take(10):
images_shape = tf.shape(images).numpy()
expected_shape = np.array([1, *model_spec.config.image_size, 3])
self.assertTrue((images_shape == expected_shape).all())
self.assertLen(labels, 15)
def test_from_csv(self):
model_spec = MockDetectorModelSpec('efficientdet-lite0')
csv_file = test_util.get_test_data_path('salads_ml_use.csv')
image_dir = test_util.get_test_data_path('salad_images')
train_data, validation_data, test_data = \
object_detector_dataloader.DataLoader.from_csv(csv_file, image_dir)
label_map = {1: 'Baked Goods', 2: 'Cheese', 3: 'Salad'}
# Checks the training data.
self.assertIsInstance(train_data, object_detector_dataloader.DataLoader)
self.assertLen(train_data, 1)
self.assertEqual(train_data.label_map, label_map)
self.assertTrue(os.path.isfile(train_data.annotations_json_file))
self.assertGreater(os.path.getsize(train_data.annotations_json_file), 0)
expected_json_file = test_util.get_test_data_path('train_annotations.json')
self.assertTrue(
filecmp.cmp(train_data.annotations_json_file, expected_json_file))
dataset = train_data.gen_dataset(model_spec, batch_size=1, is_training=True)
for images, labels in dataset.take(10):
images_shape = tf.shape(images).numpy()
expected_shape = np.array([1, *model_spec.config.image_size, 3])
self.assertTrue((images_shape == expected_shape).all())
self.assertLen(labels, 15)
# Checks the validation data.
self.assertIsNone(validation_data)
# Checks the test data.
self.assertIsInstance(test_data, object_detector_dataloader.DataLoader)
self.assertLen(test_data, 2)
self.assertEqual(test_data.label_map, label_map)
self.assertTrue(os.path.isfile(test_data.annotations_json_file))
self.assertGreater(os.path.getsize(test_data.annotations_json_file), 0)
expected_json_file = test_util.get_test_data_path('test_annotations.json')
self.assertTrue(
filecmp.cmp(test_data.annotations_json_file, expected_json_file))
dataset = test_data.gen_dataset(model_spec, batch_size=1, is_training=False)
for i, (images, labels) in enumerate(dataset):
self.assertLess(i, 2)
images_shape = tf.shape(images).numpy()
expected_shape = np.array([1, *model_spec.config.image_size, 3])
self.assertTrue((images_shape == expected_shape).all())
self.assertLen(labels, 15)
if __name__ == '__main__':
tf.test.main()
| {
"content_hash": "5dd60414e92ce5bc7571a9e75819d418",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 90,
"avg_line_length": 40.4051724137931,
"alnum_prop": 0.7136761254533817,
"repo_name": "tensorflow/examples",
"id": "3430feaf988fbc10941f026fc537568ac0cd5661",
"size": "5296",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow_examples/lite/model_maker/core/data_util/object_detector_dataloader_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "106227"
},
{
"name": "CMake",
"bytes": "1553"
},
{
"name": "CSS",
"bytes": "4746"
},
{
"name": "Dockerfile",
"bytes": "467"
},
{
"name": "HTML",
"bytes": "12491"
},
{
"name": "Java",
"bytes": "305092"
},
{
"name": "JavaScript",
"bytes": "24461"
},
{
"name": "Jupyter Notebook",
"bytes": "1733035"
},
{
"name": "Kotlin",
"bytes": "631463"
},
{
"name": "Objective-C",
"bytes": "14639"
},
{
"name": "Objective-C++",
"bytes": "14293"
},
{
"name": "Python",
"bytes": "1232357"
},
{
"name": "Ruby",
"bytes": "3744"
},
{
"name": "Shell",
"bytes": "41573"
},
{
"name": "Starlark",
"bytes": "17498"
},
{
"name": "Swift",
"bytes": "553535"
}
],
"symlink_target": ""
} |
"""SCons.Scanner.Fortran
This module implements the dependency scanner for Fortran code.
"""
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Scanner/Fortran.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
import re
import SCons.Node
import SCons.Node.FS
import SCons.Scanner
import SCons.Util
import SCons.Warnings
class F90Scanner(SCons.Scanner.Classic):
"""
A Classic Scanner subclass for Fortran source files which takes
into account both USE and INCLUDE statements. This scanner will
work for both F77 and F90 (and beyond) compilers.
Currently, this scanner assumes that the include files do not contain
USE statements. To enable the ability to deal with USE statements
in include files, add logic right after the module names are found
to loop over each include file, search for and locate each USE
statement, and append each module name to the list of dependencies.
Caching the search results in a common dictionary somewhere so that
the same include file is not searched multiple times would be a
smart thing to do.
"""
def __init__(self, name, suffixes, path_variable,
use_regex, incl_regex, def_regex, *args, **kw):
self.cre_use = re.compile(use_regex, re.M)
self.cre_incl = re.compile(incl_regex, re.M)
self.cre_def = re.compile(def_regex, re.M)
def _scan(node, env, path, self=self):
node = node.rfile()
if not node.exists():
return []
return self.scan(node, env, path)
kw['function'] = _scan
kw['path_function'] = SCons.Scanner.FindPathDirs(path_variable)
kw['recursive'] = 1
kw['skeys'] = suffixes
kw['name'] = name
SCons.Scanner.Current.__init__(self, *args, **kw)
def scan(self, node, env, path=()):
# cache the includes list in node so we only scan it once:
if node.includes != None:
mods_and_includes = node.includes
else:
# retrieve all included filenames
includes = self.cre_incl.findall(node.get_text_contents())
# retrieve all USE'd module names
modules = self.cre_use.findall(node.get_text_contents())
# retrieve all defined module names
defmodules = self.cre_def.findall(node.get_text_contents())
# Remove all USE'd module names that are defined in the same file
# (case-insensitively)
d = {}
for m in defmodules:
d[m.lower()] = 1
modules = [m for m in modules if m.lower() not in d]
# Convert module name to a .mod filename
suffix = env.subst('$FORTRANMODSUFFIX')
modules = [x.lower() + suffix for x in modules]
# Remove unique items from the list
mods_and_includes = SCons.Util.unique(includes+modules)
node.includes = mods_and_includes
# This is a hand-coded DSU (decorate-sort-undecorate, or
# Schwartzian transform) pattern. The sort key is the raw name
# of the file as specifed on the USE or INCLUDE line, which lets
# us keep the sort order constant regardless of whether the file
# is actually found in a Repository or locally.
nodes = []
source_dir = node.get_dir()
if callable(path):
path = path()
for dep in mods_and_includes:
n, i = self.find_include(dep, source_dir, path)
if n is None:
SCons.Warnings.warn(SCons.Warnings.DependencyWarning,
"No dependency generated for file: %s (referenced by: %s) -- file not found" % (i, node))
else:
sortkey = self.sort_key(dep)
nodes.append((sortkey, n))
return [pair[1] for pair in sorted(nodes)]
def FortranScan(path_variable="FORTRANPATH"):
"""Return a prototype Scanner instance for scanning source files
for Fortran USE & INCLUDE statements"""
# The USE statement regex matches the following:
#
# USE module_name
# USE :: module_name
# USE, INTRINSIC :: module_name
# USE, NON_INTRINSIC :: module_name
#
# Limitations
#
# -- While the regex can handle multiple USE statements on one line,
# it cannot properly handle them if they are commented out.
# In either of the following cases:
#
# ! USE mod_a ; USE mod_b [entire line is commented out]
# USE mod_a ! ; USE mod_b [in-line comment of second USE statement]
#
# the second module name (mod_b) will be picked up as a dependency
# even though it should be ignored. The only way I can see
# to rectify this would be to modify the scanner to eliminate
# the call to re.findall, read in the contents of the file,
# treating the comment character as an end-of-line character
# in addition to the normal linefeed, loop over each line,
# weeding out the comments, and looking for the USE statements.
# One advantage to this is that the regex passed to the scanner
# would no longer need to match a semicolon.
#
# -- I question whether or not we need to detect dependencies to
# INTRINSIC modules because these are built-in to the compiler.
# If we consider them a dependency, will SCons look for them, not
# find them, and kill the build? Or will we there be standard
# compiler-specific directories we will need to point to so the
# compiler and SCons can locate the proper object and mod files?
# Here is a breakdown of the regex:
#
# (?i) : regex is case insensitive
# ^ : start of line
# (?: : group a collection of regex symbols without saving the match as a "group"
# ^|; : matches either the start of the line or a semicolon - semicolon
# ) : end the unsaved grouping
# \s* : any amount of white space
# USE : match the string USE, case insensitive
# (?: : group a collection of regex symbols without saving the match as a "group"
# \s+| : match one or more whitespace OR .... (the next entire grouped set of regex symbols)
# (?: : group a collection of regex symbols without saving the match as a "group"
# (?: : establish another unsaved grouping of regex symbols
# \s* : any amount of white space
# , : match a comma
# \s* : any amount of white space
# (?:NON_)? : optionally match the prefix NON_, case insensitive
# INTRINSIC : match the string INTRINSIC, case insensitive
# )? : optionally match the ", INTRINSIC/NON_INTRINSIC" grouped expression
# \s* : any amount of white space
# :: : match a double colon that must appear after the INTRINSIC/NON_INTRINSIC attribute
# ) : end the unsaved grouping
# ) : end the unsaved grouping
# \s* : match any amount of white space
# (\w+) : match the module name that is being USE'd
#
#
use_regex = "(?i)(?:^|;)\s*USE(?:\s+|(?:(?:\s*,\s*(?:NON_)?INTRINSIC)?\s*::))\s*(\w+)"
# The INCLUDE statement regex matches the following:
#
# INCLUDE 'some_Text'
# INCLUDE "some_Text"
# INCLUDE "some_Text" ; INCLUDE "some_Text"
# INCLUDE kind_"some_Text"
# INCLUDE kind_'some_Text"
#
# where some_Text can include any alphanumeric and/or special character
# as defined by the Fortran 2003 standard.
#
# Limitations:
#
# -- The Fortran standard dictates that a " or ' in the INCLUDE'd
# string must be represented as a "" or '', if the quotes that wrap
# the entire string are either a ' or ", respectively. While the
# regular expression below can detect the ' or " characters just fine,
# the scanning logic, presently is unable to detect them and reduce
# them to a single instance. This probably isn't an issue since,
# in practice, ' or " are not generally used in filenames.
#
# -- This regex will not properly deal with multiple INCLUDE statements
# when the entire line has been commented out, ala
#
# ! INCLUDE 'some_file' ; INCLUDE 'some_file'
#
# In such cases, it will properly ignore the first INCLUDE file,
# but will actually still pick up the second. Interestingly enough,
# the regex will properly deal with these cases:
#
# INCLUDE 'some_file'
# INCLUDE 'some_file' !; INCLUDE 'some_file'
#
# To get around the above limitation, the FORTRAN programmer could
# simply comment each INCLUDE statement separately, like this
#
# ! INCLUDE 'some_file' !; INCLUDE 'some_file'
#
# The way I see it, the only way to get around this limitation would
# be to modify the scanning logic to replace the calls to re.findall
# with a custom loop that processes each line separately, throwing
# away fully commented out lines before attempting to match against
# the INCLUDE syntax.
#
# Here is a breakdown of the regex:
#
# (?i) : regex is case insensitive
# (?: : begin a non-saving group that matches the following:
# ^ : either the start of the line
# | : or
# ['">]\s*; : a semicolon that follows a single quote,
# double quote or greater than symbol (with any
# amount of whitespace in between). This will
# allow the regex to match multiple INCLUDE
# statements per line (although it also requires
# the positive lookahead assertion that is
# used below). It will even properly deal with
# (i.e. ignore) cases in which the additional
# INCLUDES are part of an in-line comment, ala
# " INCLUDE 'someFile' ! ; INCLUDE 'someFile2' "
# ) : end of non-saving group
# \s* : any amount of white space
# INCLUDE : match the string INCLUDE, case insensitive
# \s+ : match one or more white space characters
# (?\w+_)? : match the optional "kind-param _" prefix allowed by the standard
# [<"'] : match the include delimiter - an apostrophe, double quote, or less than symbol
# (.+?) : match one or more characters that make up
# the included path and file name and save it
# in a group. The Fortran standard allows for
# any non-control character to be used. The dot
# operator will pick up any character, including
# control codes, but I can't conceive of anyone
# putting control codes in their file names.
# The question mark indicates it is non-greedy so
# that regex will match only up to the next quote,
# double quote, or greater than symbol
# (?=["'>]) : positive lookahead assertion to match the include
# delimiter - an apostrophe, double quote, or
# greater than symbol. This level of complexity
# is required so that the include delimiter is
# not consumed by the match, thus allowing the
# sub-regex discussed above to uniquely match a
# set of semicolon-separated INCLUDE statements
# (as allowed by the F2003 standard)
include_regex = """(?i)(?:^|['">]\s*;)\s*INCLUDE\s+(?:\w+_)?[<"'](.+?)(?=["'>])"""
# The MODULE statement regex finds module definitions by matching
# the following:
#
# MODULE module_name
#
# but *not* the following:
#
# MODULE PROCEDURE procedure_name
#
# Here is a breakdown of the regex:
#
# (?i) : regex is case insensitive
# ^\s* : any amount of white space
# MODULE : match the string MODULE, case insensitive
# \s+ : match one or more white space characters
# (?!PROCEDURE) : but *don't* match if the next word matches
# PROCEDURE (negative lookahead assertion),
# case insensitive
# (\w+) : match one or more alphanumeric characters
# that make up the defined module name and
# save it in a group
def_regex = """(?i)^\s*MODULE\s+(?!PROCEDURE)(\w+)"""
scanner = F90Scanner("FortranScan",
"$FORTRANSUFFIXES",
path_variable,
use_regex,
include_regex,
def_regex)
return scanner
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| {
"content_hash": "af19d6147f4f14cdd35635edf62e63ee",
"timestamp": "",
"source": "github",
"line_count": 316,
"max_line_length": 125,
"avg_line_length": 45.32911392405063,
"alnum_prop": 0.599832449036582,
"repo_name": "EmanueleCannizzaro/scons",
"id": "179c8dbfd04616fb86ca84887acaf291a8222a7d",
"size": "14324",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/engine/SCons/Scanner/Fortran.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "2491"
},
{
"name": "C",
"bytes": "659"
},
{
"name": "C++",
"bytes": "598"
},
{
"name": "CSS",
"bytes": "18502"
},
{
"name": "D",
"bytes": "1997"
},
{
"name": "HTML",
"bytes": "817651"
},
{
"name": "Java",
"bytes": "6860"
},
{
"name": "JavaScript",
"bytes": "215495"
},
{
"name": "Makefile",
"bytes": "3795"
},
{
"name": "Perl",
"bytes": "29978"
},
{
"name": "Python",
"bytes": "7510453"
},
{
"name": "Roff",
"bytes": "556545"
},
{
"name": "Ruby",
"bytes": "11074"
},
{
"name": "Shell",
"bytes": "52682"
},
{
"name": "XSLT",
"bytes": "7567242"
}
],
"symlink_target": ""
} |
try:
import pkg_resources
pkg_resources.declare_namespace(__name__)
except ImportError:
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__) # @ReservedAssignment
| {
"content_hash": "3caeea89b14d474bb10ee7d1fe7fa62a",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 69,
"avg_line_length": 25.5,
"alnum_prop": 0.6715686274509803,
"repo_name": "pgbidkar/vsphere-automation-sdk-python",
"id": "59b403a2246a77aa245bcc52d60b8b6a72185c93",
"size": "288",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "samples/vsphere/common/vim/helpers/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1232"
},
{
"name": "Python",
"bytes": "2656"
}
],
"symlink_target": ""
} |
from collections import deque
import base64
import gzip as gzip_module
import hashlib
import os
import re
import time
import uuid
from html import escape
from io import BytesIO
def resolve_content(response):
return b"".join(item for item in response.iter_content(read_file=True))
class Pipeline(object):
pipes = {}
def __init__(self, pipe_string):
self.pipe_functions = self.parse(pipe_string)
def parse(self, pipe_string):
functions = []
for item in PipeTokenizer().tokenize(pipe_string):
if not item:
break
if item[0] == "function":
functions.append((self.pipes[item[1]], []))
elif item[0] == "argument":
functions[-1][1].append(item[1])
return functions
def __call__(self, request, response):
for func, args in self.pipe_functions:
response = func(request, response, *args)
return response
class PipeTokenizer(object):
def __init__(self):
#This whole class can likely be replaced by some regexps
self.state = None
def tokenize(self, string):
self.string = string
self.state = self.func_name_state
self._index = 0
while self.state:
yield self.state()
yield None
def get_char(self):
if self._index >= len(self.string):
return None
rv = self.string[self._index]
self._index += 1
return rv
def func_name_state(self):
rv = ""
while True:
char = self.get_char()
if char is None:
self.state = None
if rv:
return ("function", rv)
else:
return None
elif char == "(":
self.state = self.argument_state
return ("function", rv)
elif char == "|":
if rv:
return ("function", rv)
else:
rv += char
def argument_state(self):
rv = ""
while True:
char = self.get_char()
if char is None:
self.state = None
return ("argument", rv)
elif char == "\\":
rv += self.get_escape()
if rv is None:
#This should perhaps be an error instead
return ("argument", rv)
elif char == ",":
return ("argument", rv)
elif char == ")":
self.state = self.func_name_state
return ("argument", rv)
else:
rv += char
def get_escape(self):
char = self.get_char()
escapes = {"n": "\n",
"r": "\r",
"t": "\t"}
return escapes.get(char, char)
class pipe(object):
def __init__(self, *arg_converters):
self.arg_converters = arg_converters
self.max_args = len(self.arg_converters)
self.min_args = 0
opt_seen = False
for item in self.arg_converters:
if not opt_seen:
if isinstance(item, opt):
opt_seen = True
else:
self.min_args += 1
else:
if not isinstance(item, opt):
raise ValueError("Non-optional argument cannot follow optional argument")
def __call__(self, f):
def inner(request, response, *args):
if not (self.min_args <= len(args) <= self.max_args):
raise ValueError("Expected between %d and %d args, got %d" %
(self.min_args, self.max_args, len(args)))
arg_values = tuple(f(x) for f, x in zip(self.arg_converters, args))
return f(request, response, *arg_values)
Pipeline.pipes[f.__name__] = inner
#We actually want the undecorated function in the main namespace
return f
class opt(object):
def __init__(self, f):
self.f = f
def __call__(self, arg):
return self.f(arg)
def nullable(func):
def inner(arg):
if arg.lower() == "null":
return None
else:
return func(arg)
return inner
def boolean(arg):
if arg.lower() in ("true", "1"):
return True
elif arg.lower() in ("false", "0"):
return False
raise ValueError
@pipe(int)
def status(request, response, code):
"""Alter the status code.
:param code: Status code to use for the response."""
response.status = code
return response
@pipe(str, str, opt(boolean))
def header(request, response, name, value, append=False):
"""Set a HTTP header.
Replaces any existing HTTP header of the same name unless
append is set, in which case the header is appended without
replacement.
:param name: Name of the header to set.
:param value: Value to use for the header.
:param append: True if existing headers should not be replaced
"""
if not append:
response.headers.set(name, value)
else:
response.headers.append(name, value)
return response
@pipe(str)
def trickle(request, response, delays):
"""Send the response in parts, with time delays.
:param delays: A string of delays and amounts, in bytes, of the
response to send. Each component is separated by
a colon. Amounts in bytes are plain integers, whilst
delays are floats prefixed with a single d e.g.
d1:100:d2
Would cause a 1 second delay, would then send 100 bytes
of the file, and then cause a 2 second delay, before sending
the remainder of the file.
If the last token is of the form rN, instead of sending the
remainder of the file, the previous N instructions will be
repeated until the whole file has been sent e.g.
d1:100:d2:r2
Causes a delay of 1s, then 100 bytes to be sent, then a 2s delay
and then a further 100 bytes followed by a two second delay
until the response has been fully sent.
"""
def parse_delays():
parts = delays.split(":")
rv = []
for item in parts:
if item.startswith("d"):
item_type = "delay"
item = item[1:]
value = float(item)
elif item.startswith("r"):
item_type = "repeat"
value = int(item[1:])
if not value % 2 == 0:
raise ValueError
else:
item_type = "bytes"
value = int(item)
if len(rv) and rv[-1][0] == item_type:
rv[-1][1] += value
else:
rv.append((item_type, value))
return rv
delays = parse_delays()
if not delays:
return response
content = resolve_content(response)
offset = [0]
if not ("Cache-Control" in response.headers or
"Pragma" in response.headers or
"Expires" in response.headers):
response.headers.set("Cache-Control", "no-cache, no-store, must-revalidate")
response.headers.set("Pragma", "no-cache")
response.headers.set("Expires", "0")
def add_content(delays, repeat=False):
for i, (item_type, value) in enumerate(delays):
if item_type == "bytes":
yield content[offset[0]:offset[0] + value]
offset[0] += value
elif item_type == "delay":
time.sleep(value)
elif item_type == "repeat":
if i != len(delays) - 1:
continue
while offset[0] < len(content):
for item in add_content(delays[-(value + 1):-1], True):
yield item
if not repeat and offset[0] < len(content):
yield content[offset[0]:]
response.content = add_content(delays)
return response
@pipe(nullable(int), opt(nullable(int)))
def slice(request, response, start, end=None):
"""Send a byte range of the response body
:param start: The starting offset. Follows python semantics including
negative numbers.
:param end: The ending offset, again with python semantics and None
(spelled "null" in a query string) to indicate the end of
the file.
"""
content = resolve_content(response)[start:end]
response.content = content
response.headers.set("Content-Length", len(content))
return response
class ReplacementTokenizer(object):
def arguments(self, token):
unwrapped = token[1:-1].decode('utf8')
return ("arguments", re.split(r",\s*", unwrapped) if unwrapped else [])
def ident(self, token):
return ("ident", token.decode('utf8'))
def index(self, token):
token = token[1:-1].decode('utf8')
try:
index = int(token)
except ValueError:
index = token
return ("index", index)
def var(self, token):
token = token[:-1].decode('utf8')
return ("var", token)
def tokenize(self, string):
assert isinstance(string, bytes)
return self.scanner.scan(string)[0]
scanner = re.Scanner([(br"\$\w+:", var),
(br"\$?\w+", ident),
(br"\[[^\]]*\]", index),
(br"\([^)]*\)", arguments)])
class FirstWrapper(object):
def __init__(self, params):
self.params = params
def __getitem__(self, key):
try:
if isinstance(key, str):
key = key.encode('iso-8859-1')
return self.params.first(key)
except KeyError:
return ""
@pipe(opt(nullable(str)))
def sub(request, response, escape_type="html"):
"""Substitute environment information about the server and request into the script.
:param escape_type: String detailing the type of escaping to use. Known values are
"html" and "none", with "html" the default for historic reasons.
The format is a very limited template language. Substitutions are
enclosed by {{ and }}. There are several available substitutions:
host
A simple string value and represents the primary host from which the
tests are being run.
domains
A dictionary of available domains indexed by subdomain name.
ports
A dictionary of lists of ports indexed by protocol.
location
A dictionary of parts of the request URL. Valid keys are
'server, 'scheme', 'host', 'hostname', 'port', 'path' and 'query'.
'server' is scheme://host:port, 'host' is hostname:port, and query
includes the leading '?', but other delimiters are omitted.
headers
A dictionary of HTTP headers in the request.
header_or_default(header, default)
The value of an HTTP header, or a default value if it is absent.
For example::
{{header_or_default(X-Test, test-header-absent)}}
GET
A dictionary of query parameters supplied with the request.
uuid()
A pesudo-random UUID suitable for usage with stash
file_hash(algorithm, filepath)
The cryptographic hash of a file. Supported algorithms: md5, sha1,
sha224, sha256, sha384, and sha512. For example::
{{file_hash(md5, dom/interfaces.html)}}
fs_path(filepath)
The absolute path to a file inside the wpt document root
So for example in a setup running on localhost with a www
subdomain and a http server on ports 80 and 81::
{{host}} => localhost
{{domains[www]}} => www.localhost
{{ports[http][1]}} => 81
It is also possible to assign a value to a variable name, which must start
with the $ character, using the ":" syntax e.g.::
{{$id:uuid()}}
Later substitutions in the same file may then refer to the variable
by name e.g.::
{{$id}}
"""
content = resolve_content(response)
new_content = template(request, content, escape_type=escape_type)
response.content = new_content
return response
class SubFunctions(object):
@staticmethod
def uuid(request):
return str(uuid.uuid4())
# Maintain a list of supported algorithms, restricted to those that are
# available on all platforms [1]. This ensures that test authors do not
# unknowingly introduce platform-specific tests.
#
# [1] https://docs.python.org/2/library/hashlib.html
supported_algorithms = ("md5", "sha1", "sha224", "sha256", "sha384", "sha512")
@staticmethod
def file_hash(request, algorithm, path):
assert isinstance(algorithm, str)
if algorithm not in SubFunctions.supported_algorithms:
raise ValueError("Unsupported encryption algorithm: '%s'" % algorithm)
hash_obj = getattr(hashlib, algorithm)()
absolute_path = os.path.join(request.doc_root, path)
try:
with open(absolute_path, "rb") as f:
hash_obj.update(f.read())
except IOError:
# In this context, an unhandled IOError will be interpreted by the
# server as an indication that the template file is non-existent.
# Although the generic "Exception" is less precise, it avoids
# triggering a potentially-confusing HTTP 404 error in cases where
# the path to the file to be hashed is invalid.
raise Exception('Cannot open file for hash computation: "%s"' % absolute_path)
return base64.b64encode(hash_obj.digest()).strip()
@staticmethod
def fs_path(request, path):
if not path.startswith("/"):
subdir = request.request_path[len(request.url_base):]
if "/" in subdir:
subdir = subdir.rsplit("/", 1)[0]
root_rel_path = subdir + "/" + path
else:
root_rel_path = path[1:]
root_rel_path = root_rel_path.replace("/", os.path.sep)
absolute_path = os.path.abspath(os.path.join(request.doc_root, root_rel_path))
if ".." in os.path.relpath(absolute_path, request.doc_root):
raise ValueError("Path outside wpt root")
return absolute_path
@staticmethod
def header_or_default(request, name, default):
return request.headers.get(name, default)
def template(request, content, escape_type="html"):
#TODO: There basically isn't any error handling here
tokenizer = ReplacementTokenizer()
variables = {}
def config_replacement(match):
content, = match.groups()
tokens = tokenizer.tokenize(content)
tokens = deque(tokens)
token_type, field = tokens.popleft()
assert isinstance(field, str)
if token_type == "var":
variable = field
token_type, field = tokens.popleft()
assert isinstance(field, str)
else:
variable = None
if token_type != "ident":
raise Exception("unexpected token type %s (token '%r'), expected ident" % (token_type, field))
if field in variables:
value = variables[field]
elif hasattr(SubFunctions, field):
value = getattr(SubFunctions, field)
elif field == "headers":
value = request.headers
elif field == "GET":
value = FirstWrapper(request.GET)
elif field == "hosts":
value = request.server.config.all_domains
elif field == "domains":
value = request.server.config.all_domains[""]
elif field == "host":
value = request.server.config["browser_host"]
elif field in request.server.config:
value = request.server.config[field]
elif field == "location":
value = {"server": "%s://%s:%s" % (request.url_parts.scheme,
request.url_parts.hostname,
request.url_parts.port),
"scheme": request.url_parts.scheme,
"host": "%s:%s" % (request.url_parts.hostname,
request.url_parts.port),
"hostname": request.url_parts.hostname,
"port": request.url_parts.port,
"path": request.url_parts.path,
"pathname": request.url_parts.path,
"query": "?%s" % request.url_parts.query}
elif field == "url_base":
value = request.url_base
else:
raise Exception("Undefined template variable %s" % field)
while tokens:
ttype, field = tokens.popleft()
if ttype == "index":
value = value[field]
elif ttype == "arguments":
value = value(request, *field)
else:
raise Exception(
"unexpected token type %s (token '%r'), expected ident or arguments" % (ttype, field)
)
assert isinstance(value, (int, (bytes, str))), tokens
if variable is not None:
variables[variable] = value
escape_func = {"html": lambda x:escape(x, quote=True),
"none": lambda x:x}[escape_type]
# Should possibly support escaping for other contexts e.g. script
# TODO: read the encoding of the response
# cgi.escape() only takes text strings in Python 3.
if isinstance(value, bytes):
value = value.decode("utf-8")
elif isinstance(value, int):
value = str(value)
return escape_func(value).encode("utf-8")
template_regexp = re.compile(br"{{([^}]*)}}")
new_content = template_regexp.sub(config_replacement, content)
return new_content
@pipe()
def gzip(request, response):
"""This pipe gzip-encodes response data.
It sets (or overwrites) these HTTP headers:
Content-Encoding is set to gzip
Content-Length is set to the length of the compressed content
"""
content = resolve_content(response)
response.headers.set("Content-Encoding", "gzip")
out = BytesIO()
with gzip_module.GzipFile(fileobj=out, mode="w") as f:
f.write(content)
response.content = out.getvalue()
response.headers.set("Content-Length", len(response.content))
return response
| {
"content_hash": "2238fb9b7586b031ac7dd0597784148a",
"timestamp": "",
"source": "github",
"line_count": 555,
"max_line_length": 106,
"avg_line_length": 33.5045045045045,
"alnum_prop": 0.5613874697499328,
"repo_name": "ric2b/Vivaldi-browser",
"id": "740a2268fdd98c0e08846b7fc2a36e36ff79d94d",
"size": "18595",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "chromium/third_party/wpt_tools/wpt/tools/wptserve/wptserve/pipes.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""Automatically setup docs for a project
Call from command line:
bench setup-docs app path
"""
from __future__ import unicode_literals
import os, json, frappe, shutil, re
from frappe.website.context import get_context
from frappe.utils import markdown
class setup_docs(object):
def __init__(self, app):
"""Generate source templates for models reference and module API
and templates at `templates/autodoc`
"""
self.app = app
frappe.flags.web_pages_folders = ['docs',]
frappe.flags.web_pages_apps = [self.app,]
self.hooks = frappe.get_hooks(app_name = self.app)
self.app_title = self.hooks.get("app_title")[0]
self.setup_app_context()
def setup_app_context(self):
self.docs_config = frappe.get_module(self.app + ".config.docs")
version = self.hooks.get("app_version")[0]
self.app_context = {
"app": frappe._dict({
"name": self.app,
"title": self.app_title,
"description": self.hooks.get("app_description")[0],
"version": version,
"publisher": self.hooks.get("app_publisher")[0],
"icon": self.hooks.get("app_icon")[0],
"email": self.hooks.get("app_email")[0],
"headline": self.docs_config.headline,
"brand_html": getattr(self.docs_config, 'brand_html', None),
"sub_heading": self.docs_config.sub_heading,
"source_link": self.docs_config.source_link,
"hide_install": getattr(self.docs_config, "hide_install", False),
"docs_base_url": self.docs_config.docs_base_url,
"long_description": markdown(getattr(self.docs_config, "long_description", "")),
"license": self.hooks.get("app_license")[0],
"branch": getattr(self.docs_config, "branch", None) or "develop",
"style": getattr(self.docs_config, "style", ""),
"google_analytics_id": getattr(self.docs_config, "google_analytics_id", "")
}),
"metatags": {
"description": self.hooks.get("app_description")[0],
},
"get_doctype_app": frappe.get_doctype_app
}
def build(self, docs_version):
"""Build templates for docs models and Python API"""
self.docs_path = frappe.get_app_path(self.app, "docs")
self.path = os.path.join(self.docs_path, docs_version)
self.app_context["app"]["docs_version"] = docs_version
self.app_title = self.hooks.get("app_title")[0]
self.app_path = frappe.get_app_path(self.app)
print "Deleting current..."
shutil.rmtree(self.path, ignore_errors = True)
os.makedirs(self.path)
self.make_home_pages()
for basepath, folders, files in os.walk(self.app_path):
# make module home page
if "/doctype/" not in basepath and "doctype" in folders:
module = os.path.basename(basepath)
module_folder = os.path.join(self.models_base_path, module)
self.make_folder(module_folder,
template = "templates/autodoc/module_home.html",
context = {"name": module})
self.update_index_txt(module_folder)
# make for model files
if "/doctype/" in basepath:
parts = basepath.split("/")
#print parts
module, doctype = parts[-3], parts[-1]
if doctype not in ("doctype", "boilerplate"):
self.write_model_file(basepath, module, doctype)
# standard python module
if self.is_py_module(basepath, folders, files):
self.write_modules(basepath, folders, files)
self.build_user_docs()
def make_home_pages(self):
"""Make standard home pages for docs, developer docs, api and models
from templates"""
# make dev home page
with open(os.path.join(self.docs_path, "index.html"), "w") as home:
home.write(frappe.render_template("templates/autodoc/docs_home.html",
self.app_context))
# make dev home page
with open(os.path.join(self.path, "index.html"), "w") as home:
home.write(frappe.render_template("templates/autodoc/dev_home.html",
self.app_context))
# make folders
self.models_base_path = os.path.join(self.path, "models")
self.make_folder(self.models_base_path,
template = "templates/autodoc/models_home.html")
self.api_base_path = os.path.join(self.path, "api")
self.make_folder(self.api_base_path,
template = "templates/autodoc/api_home.html")
# make /user
user_path = os.path.join(self.docs_path, "user")
if not os.path.exists(user_path):
os.makedirs(user_path)
# make /assets/img
img_path = os.path.join(self.docs_path, "assets", "img")
if not os.path.exists(img_path):
os.makedirs(img_path)
def build_user_docs(self):
"""Build templates for user docs pages, if missing."""
#user_docs_path = os.path.join(self.docs_path, "user")
# license
with open(os.path.join(self.app_path, "..", "license.txt"), "r") as license_file:
self.app_context["license_text"] = markdown(license_file.read())
html = frappe.render_template("templates/autodoc/license.html",
context = self.app_context)
with open(os.path.join(self.docs_path, "license.html"), "w") as license_file:
license_file.write(html.encode("utf-8"))
# contents
shutil.copy(os.path.join(frappe.get_app_path("frappe", "templates", "autodoc",
"contents.html")), os.path.join(self.docs_path, "contents.html"))
shutil.copy(os.path.join(frappe.get_app_path("frappe", "templates", "autodoc",
"contents.py")), os.path.join(self.docs_path, "contents.py"))
# install
html = frappe.render_template("templates/autodoc/install.md",
context = self.app_context)
with open(os.path.join(self.docs_path, "install.md"), "w") as f:
f.write(html)
self.update_index_txt(self.docs_path)
def make_docs(self, target, local = False):
self.target = target
self.local = local
frappe.flags.local_docs = local
if self.local:
self.docs_base_url = ""
else:
self.docs_base_url = self.docs_config.docs_base_url
# add for processing static files (full-index)
frappe.local.docs_base_url = self.docs_base_url
# write in target path
self.write_files()
# copy assets/js, assets/css, assets/img
self.copy_assets()
def is_py_module(self, basepath, folders, files):
return "__init__.py" in files \
and (not "/doctype" in basepath) \
and (not "/patches" in basepath) \
and (not "/change_log" in basepath) \
and (not "/report" in basepath) \
and (not "/page" in basepath) \
and (not "/templates" in basepath) \
and (not "/tests" in basepath) \
and (not "/docs" in basepath)
def write_modules(self, basepath, folders, files):
module_folder = os.path.join(self.api_base_path, os.path.relpath(basepath, self.app_path))
self.make_folder(module_folder)
for f in files:
if f.endswith(".py"):
full_module_name = os.path.relpath(os.path.join(basepath, f),
self.app_path)[:-3].replace("/", ".")
module_name = full_module_name.replace(".__init__", "")
module_doc_path = os.path.join(module_folder,
self.app + "." + module_name + ".html")
self.make_folder(basepath)
if not os.path.exists(module_doc_path):
print "Writing " + module_doc_path
with open(module_doc_path, "w") as f:
context = {"name": self.app + "." + module_name}
context.update(self.app_context)
context['full_module_name'] = self.app + '.' + full_module_name
f.write(frappe.render_template("templates/autodoc/pymodule.html",
context).encode('utf-8'))
self.update_index_txt(module_folder)
def make_folder(self, path, template=None, context=None):
if not template:
template = "templates/autodoc/package_index.html"
if not os.path.exists(path):
os.makedirs(path)
index_txt_path = os.path.join(path, "index.txt")
print "Writing " + index_txt_path
with open(index_txt_path, "w") as f:
f.write("")
index_html_path = os.path.join(path, "index.html")
if not context:
name = os.path.basename(path)
if name==".":
name = self.app
context = {
"title": name
}
context.update(self.app_context)
print "Writing " + index_html_path
with open(index_html_path, "w") as f:
f.write(frappe.render_template(template, context))
def update_index_txt(self, path):
index_txt_path = os.path.join(path, "index.txt")
pages = filter(lambda d: ((d.endswith(".html") or d.endswith(".md")) and d not in ("index.html",)) \
or os.path.isdir(os.path.join(path, d)), os.listdir(path))
pages = [d.rsplit(".", 1)[0] for d in pages]
index_parts = []
if os.path.exists(index_txt_path):
with open(index_txt_path, "r") as f:
index_parts = filter(None, f.read().splitlines())
if not set(pages).issubset(set(index_parts)):
print "Updating " + index_txt_path
with open(index_txt_path, "w") as f:
f.write("\n".join(pages))
def write_model_file(self, basepath, module, doctype):
model_path = os.path.join(self.models_base_path, module, doctype + ".html")
if not os.path.exists(model_path):
model_json_path = os.path.join(basepath, doctype + ".json")
if os.path.exists(model_json_path):
with open(model_json_path, "r") as j:
doctype_real_name = json.loads(j.read()).get("name")
print "Writing " + model_path
with open(model_path, "w") as f:
context = {"doctype": doctype_real_name}
context.update(self.app_context)
f.write(frappe.render_template("templates/autodoc/doctype.html",
context).encode("utf-8"))
def write_files(self):
"""render templates and write files to target folder"""
frappe.flags.home_page = "index"
from frappe.website.router import get_pages, make_toc
pages = get_pages(self.app)
# clear the user, current folder in target
shutil.rmtree(os.path.join(self.target, "user"), ignore_errors=True)
shutil.rmtree(os.path.join(self.target, "current"), ignore_errors=True)
def raw_replacer(matchobj):
if '{% raw %}' in matchobj.group(0):
return matchobj.group(0)
else:
return '{% raw %}' + matchobj.group(0) + '{% endraw %}'
cnt = 0
for path, context in pages.iteritems():
print "Writing {0}".format(path)
# set this for get_context / website libs
frappe.local.path = path
context.update({
"page_links_with_extn": True,
"relative_links": True,
"docs_base_url": self.docs_base_url,
"url_prefix": self.docs_base_url,
})
context.update(self.app_context)
context = get_context(path, context)
if context.basename:
target_path_fragment = context.route + '.html'
else:
# index.html
target_path_fragment = context.route + '/index.html'
target_filename = os.path.join(self.target, target_path_fragment.strip('/'))
context.brand_html = context.app.brand_html
context.top_bar_items = context.favicon = None
self.docs_config.get_context(context)
if not context.brand_html:
if context.docs_icon:
context.brand_html = '<i class="{0}"></i> {1}'.format(context.docs_icon, context.app.title)
else:
context.brand_html = context.app.title
if not context.top_bar_items:
context.top_bar_items = [
# {"label": "Contents", "url": self.docs_base_url + "/contents.html", "right": 1},
{"label": "User Guide", "url": self.docs_base_url + "/user", "right": 1},
{"label": "Developer Docs", "url": self.docs_base_url + "/current", "right": 1},
]
context.top_bar_items = [{"label": '<i class="octicon octicon-search"></i>', "url": "#",
"right": 1}] + context.top_bar_items
context.parents = []
parent_route = os.path.dirname(context.route)
if pages[parent_route]:
context.parents = [pages[parent_route]]
context.only_static = True
context.base_template_path = "templates/autodoc/base_template.html"
if '<code>' in context.source:
context.source = re.sub('\<code\>(.*)\</code\>', raw_replacer, context.source)
html = frappe.render_template(context.source, context)
html = make_toc(context, html, self.app)
if not "<!-- autodoc -->" in html:
html = html.replace('<!-- edit-link -->',
edit_link.format(\
source_link = self.docs_config.source_link,
app_name = self.app,
branch = context.app.branch,
target = context.template))
if not os.path.exists(os.path.dirname(target_filename)):
os.makedirs(os.path.dirname(target_filename))
with open(target_filename, "w") as htmlfile:
htmlfile.write(html.encode("utf-8"))
cnt += 1
print "Wrote {0} files".format(cnt)
def copy_assets(self):
"""Copy jquery, bootstrap and other assets to files"""
print "Copying assets..."
assets_path = os.path.join(self.target, "assets")
# copy assets from docs
source_assets = frappe.get_app_path(self.app, "docs", "assets")
if os.path.exists(source_assets):
for basepath, folders, files in os.walk(source_assets):
target_basepath = os.path.join(assets_path, os.path.relpath(basepath, source_assets))
# make the base folder
if not os.path.exists(target_basepath):
os.makedirs(target_basepath)
# copy all files in the current folder
for f in files:
shutil.copy(os.path.join(basepath, f), os.path.join(target_basepath, f))
# make missing folders
for fname in ("js", "css", "img"):
path = os.path.join(assets_path, fname)
if not os.path.exists(path):
os.makedirs(path)
copy_files = {
"js/lib/jquery/jquery.min.js": "js/jquery.min.js",
"js/lib/bootstrap.min.js": "js/bootstrap.min.js",
"js/lib/highlight.pack.js": "js/highlight.pack.js",
"js/docs.js": "js/docs.js",
"css/bootstrap.css": "css/bootstrap.css",
"css/font-awesome.css": "css/font-awesome.css",
"css/docs.css": "css/docs.css",
"css/hljs.css": "css/hljs.css",
"css/fonts": "css/fonts",
"css/octicons": "css/octicons",
# always overwrite octicons.css to fix the path
"css/octicons/octicons.css": "css/octicons/octicons.css",
"images/frappe-bird-grey.svg": "img/frappe-bird-grey.svg",
"images/favicon.png": "img/favicon.png",
"images/background.png": "img/background.png",
"images/smiley.png": "img/smiley.png",
"images/up.png": "img/up.png"
}
for source, target in copy_files.iteritems():
source_path = frappe.get_app_path("frappe", "public", source)
if os.path.isdir(source_path):
if not os.path.exists(os.path.join(assets_path, target)):
shutil.copytree(source_path, os.path.join(assets_path, target))
else:
shutil.copy(source_path, os.path.join(assets_path, target))
# fix path for font-files, background
files = (
os.path.join(assets_path, "css", "octicons", "octicons.css"),
os.path.join(assets_path, "css", "font-awesome.css"),
os.path.join(assets_path, "css", "docs.css"),
)
for path in files:
with open(path, "r") as css_file:
text = unicode(css_file.read(), 'utf-8')
with open(path, "w") as css_file:
if "docs.css" in path:
css_file.write(text.replace("/assets/img/",
self.docs_base_url + '/assets/img/').encode('utf-8'))
else:
css_file.write(text.replace("/assets/frappe/", self.docs_base_url + '/assets/').encode('utf-8'))
edit_link = '''
<div class="page-container">
<div class="page-content">
<div class="edit-container text-center">
<i class="fa fa-smile"></i>
<a class="text-muted edit" href="http://revaluesoft.com">
Improve this page
</a>
</div>
</div>
</div>'''
| {
"content_hash": "474b5e61bd1c07339097f4ba24fbfed4",
"timestamp": "",
"source": "github",
"line_count": 459,
"max_line_length": 102,
"avg_line_length": 32.64052287581699,
"alnum_prop": 0.65812308103057,
"repo_name": "elba7r/frameworking",
"id": "d9de3897c1ccac62790a6ebdefa19b7cce2b81b4",
"size": "14982",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "frappe/utils/setup_docs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "336604"
},
{
"name": "HTML",
"bytes": "198854"
},
{
"name": "JavaScript",
"bytes": "1262443"
},
{
"name": "Python",
"bytes": "1614773"
},
{
"name": "Shell",
"bytes": "517"
}
],
"symlink_target": ""
} |
import threading
import weakref
from django.utils.inspect import func_accepts_kwargs
def _make_id(target):
if hasattr(target, '__func__'):
return (id(target.__self__), id(target.__func__))
return id(target)
NONE_ID = _make_id(None)
# A marker for caching
NO_RECEIVERS = object()
class Signal:
"""
Base class for all signals
Internal attributes:
receivers
{ receiverkey (id) : weakref(receiver) }
"""
def __init__(self, providing_args=None, use_caching=False):
"""
Create a new signal.
providing_args
A list of the arguments this signal can pass along in a send() call.
"""
self.receivers = []
if providing_args is None:
providing_args = []
self.providing_args = set(providing_args)
self.lock = threading.Lock()
self.use_caching = use_caching
# For convenience we create empty caches even if they are not used.
# A note about caching: if use_caching is defined, then for each
# distinct sender we cache the receivers that sender has in
# 'sender_receivers_cache'. The cache is cleaned when .connect() or
# .disconnect() is called and populated on send().
self.sender_receivers_cache = weakref.WeakKeyDictionary() if use_caching else {}
self._dead_receivers = False
def connect(self, receiver, sender=None, weak=True, dispatch_uid=None):
"""
Connect receiver to sender for signal.
Arguments:
receiver
A function or an instance method which is to receive signals.
Receivers must be hashable objects.
If weak is True, then receiver must be weak referenceable.
Receivers must be able to accept keyword arguments.
If a receiver is connected with a dispatch_uid argument, it
will not be added if another receiver was already connected
with that dispatch_uid.
sender
The sender to which the receiver should respond. Must either be
a Python object, or None to receive events from any sender.
weak
Whether to use weak references to the receiver. By default, the
module will attempt to use weak references to the receiver
objects. If this parameter is false, then strong references will
be used.
dispatch_uid
An identifier used to uniquely identify a particular instance of
a receiver. This will usually be a string, though it may be
anything hashable.
"""
from django.conf import settings
# If DEBUG is on, check that we got a good receiver
if settings.configured and settings.DEBUG:
assert callable(receiver), "Signal receivers must be callable."
# Check for **kwargs
if not func_accepts_kwargs(receiver):
raise ValueError("Signal receivers must accept keyword arguments (**kwargs).")
if dispatch_uid:
lookup_key = (dispatch_uid, _make_id(sender))
else:
lookup_key = (_make_id(receiver), _make_id(sender))
if weak:
ref = weakref.ref
receiver_object = receiver
# Check for bound methods
if hasattr(receiver, '__self__') and hasattr(receiver, '__func__'):
ref = weakref.WeakMethod
receiver_object = receiver.__self__
receiver = ref(receiver)
weakref.finalize(receiver_object, self._remove_receiver)
with self.lock:
self._clear_dead_receivers()
for r_key, _ in self.receivers:
if r_key == lookup_key:
break
else:
self.receivers.append((lookup_key, receiver))
self.sender_receivers_cache.clear()
def disconnect(self, receiver=None, sender=None, dispatch_uid=None):
"""
Disconnect receiver from sender for signal.
If weak references are used, disconnect need not be called. The receiver
will be remove from dispatch automatically.
Arguments:
receiver
The registered receiver to disconnect. May be none if
dispatch_uid is specified.
sender
The registered sender to disconnect
dispatch_uid
the unique identifier of the receiver to disconnect
"""
if dispatch_uid:
lookup_key = (dispatch_uid, _make_id(sender))
else:
lookup_key = (_make_id(receiver), _make_id(sender))
disconnected = False
with self.lock:
self._clear_dead_receivers()
for index in range(len(self.receivers)):
(r_key, _) = self.receivers[index]
if r_key == lookup_key:
disconnected = True
del self.receivers[index]
break
self.sender_receivers_cache.clear()
return disconnected
def has_listeners(self, sender=None):
return bool(self._live_receivers(sender))
def send(self, sender, **named):
"""
Send signal from sender to all connected receivers.
If any receiver raises an error, the error propagates back through send,
terminating the dispatch loop. So it's possible that all receivers
won't be called if an error is raised.
Arguments:
sender
The sender of the signal. Either a specific object or None.
named
Named arguments which will be passed to receivers.
Returns a list of tuple pairs [(receiver, response), ... ].
"""
if not self.receivers or self.sender_receivers_cache.get(sender) is NO_RECEIVERS:
return []
return [
(receiver, receiver(signal=self, sender=sender, **named))
for receiver in self._live_receivers(sender)
]
def send_robust(self, sender, **named):
"""
Send signal from sender to all connected receivers catching errors.
Arguments:
sender
The sender of the signal. Can be any python object (normally one
registered with a connect if you actually want something to
occur).
named
Named arguments which will be passed to receivers. These
arguments must be a subset of the argument names defined in
providing_args.
Return a list of tuple pairs [(receiver, response), ... ]. May raise
DispatcherKeyError.
If any receiver raises an error (specifically any subclass of
Exception), the error instance is returned as the result for that
receiver.
"""
if not self.receivers or self.sender_receivers_cache.get(sender) is NO_RECEIVERS:
return []
# Call each receiver with whatever arguments it can accept.
# Return a list of tuple pairs [(receiver, response), ... ].
responses = []
for receiver in self._live_receivers(sender):
try:
response = receiver(signal=self, sender=sender, **named)
except Exception as err:
responses.append((receiver, err))
else:
responses.append((receiver, response))
return responses
def _clear_dead_receivers(self):
# Note: caller is assumed to hold self.lock.
if self._dead_receivers:
self._dead_receivers = False
new_receivers = []
for r in self.receivers:
if isinstance(r[1], weakref.ReferenceType) and r[1]() is None:
continue
new_receivers.append(r)
self.receivers = new_receivers
def _live_receivers(self, sender):
"""
Filter sequence of receivers to get resolved, live receivers.
This checks for weak references and resolves them, then returning only
live receivers.
"""
receivers = None
if self.use_caching and not self._dead_receivers:
receivers = self.sender_receivers_cache.get(sender)
# We could end up here with NO_RECEIVERS even if we do check this case in
# .send() prior to calling _live_receivers() due to concurrent .send() call.
if receivers is NO_RECEIVERS:
return []
if receivers is None:
with self.lock:
self._clear_dead_receivers()
senderkey = _make_id(sender)
receivers = []
for (receiverkey, r_senderkey), receiver in self.receivers:
if r_senderkey == NONE_ID or r_senderkey == senderkey:
receivers.append(receiver)
if self.use_caching:
if not receivers:
self.sender_receivers_cache[sender] = NO_RECEIVERS
else:
# Note, we must cache the weakref versions.
self.sender_receivers_cache[sender] = receivers
non_weak_receivers = []
for receiver in receivers:
if isinstance(receiver, weakref.ReferenceType):
# Dereference the weak reference.
receiver = receiver()
if receiver is not None:
non_weak_receivers.append(receiver)
else:
non_weak_receivers.append(receiver)
return non_weak_receivers
def _remove_receiver(self, receiver=None):
# Mark that the self.receivers list has dead weakrefs. If so, we will
# clean those up in connect, disconnect and _live_receivers while
# holding self.lock. Note that doing the cleanup here isn't a good
# idea, _remove_receiver() will be called as side effect of garbage
# collection, and so the call can happen while we are already holding
# self.lock.
self._dead_receivers = True
def receiver(signal, **kwargs):
"""
A decorator for connecting receivers to signals. Used by passing in the
signal (or list of signals) and keyword arguments to connect::
@receiver(post_save, sender=MyModel)
def signal_receiver(sender, **kwargs):
...
@receiver([post_save, post_delete], sender=MyModel)
def signals_receiver(sender, **kwargs):
...
"""
def _decorator(func):
if isinstance(signal, (list, tuple)):
for s in signal:
s.connect(func, **kwargs)
else:
signal.connect(func, **kwargs)
return func
return _decorator
| {
"content_hash": "3eaf4e8a726ed96b45f63011ec138957",
"timestamp": "",
"source": "github",
"line_count": 299,
"max_line_length": 94,
"avg_line_length": 36.45150501672241,
"alnum_prop": 0.5784016882282779,
"repo_name": "auready/django",
"id": "5599b9fdfcb7d84ddc88492ddc5cb34519cd4539",
"size": "10899",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "django/dispatch/dispatcher.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "53169"
},
{
"name": "HTML",
"bytes": "173634"
},
{
"name": "JavaScript",
"bytes": "448151"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "12200962"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('symposion_speakers', '0002_speaker_twitter_username'),
]
operations = [
migrations.AlterField(
model_name='speaker',
name='created',
field=models.DateTimeField(default=django.utils.timezone.now, editable=False, verbose_name='Created'),
),
]
| {
"content_hash": "8888151c35c9e6dd00b3a3e34a31e7d5",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 114,
"avg_line_length": 25.789473684210527,
"alnum_prop": 0.6510204081632653,
"repo_name": "pydata/symposion",
"id": "cf6dfcdbd214757c871169446744ac2499ca5f5c",
"size": "563",
"binary": false,
"copies": "1",
"ref": "refs/heads/patched",
"path": "symposion/speakers/migrations/0003_make_speaker_created_timezone_aware.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "79954"
},
{
"name": "Python",
"bytes": "250547"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from windyer_comments.models import Comment
class CommentAdmin(admin.ModelAdmin):
search_fields = ('user__username', 'article__title', 'text')
list_filter = ('create_time',)
list_display = ('user', 'article', 'text', 'create_time')
fields = ('user', 'article', 'parent', 'text')
admin.site.register(Comment, CommentAdmin)
| {
"content_hash": "2564ce291c17f5bdd0942c4724260b3d",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 64,
"avg_line_length": 34,
"alnum_prop": 0.6871657754010695,
"repo_name": "windyer/windyer_blog",
"id": "0ea28d5989a3f95f268fc79ff10d52e8a041cfd9",
"size": "398",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "windyer_comments/admin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "161853"
},
{
"name": "HTML",
"bytes": "130796"
},
{
"name": "JavaScript",
"bytes": "235396"
},
{
"name": "Nginx",
"bytes": "618"
},
{
"name": "Python",
"bytes": "69431"
},
{
"name": "Ruby",
"bytes": "578"
},
{
"name": "Shell",
"bytes": "26"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Static()
result.template = "object/static/particle/shared_particle_test_31.iff"
result.attribute_template_id = -1
result.stfName("obj_n","unknown_object")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "181fcc41e88b25b1f01673882eb24b86",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 71,
"avg_line_length": 23.076923076923077,
"alnum_prop": 0.69,
"repo_name": "anhstudios/swganh",
"id": "eb21f06bc77d5603b27498aecd692932815cf1ae",
"size": "445",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/static/particle/shared_particle_test_31.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
} |
"""
WSGI config for blogproject project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "blogproject.settings")
application = get_wsgi_application()
| {
"content_hash": "0a994d2087bbd8e222e53709dae4e9e7",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 78,
"avg_line_length": 25,
"alnum_prop": 0.775,
"repo_name": "RebelBIrd/Blog",
"id": "27ee598dc0f7fea0723314286476a0c276bf1969",
"size": "400",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "blogproject/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "88987"
},
{
"name": "HTML",
"bytes": "13313"
},
{
"name": "JavaScript",
"bytes": "4310"
},
{
"name": "Python",
"bytes": "13835"
}
],
"symlink_target": ""
} |
'''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GL import _types as _cs
# End users want this...
from OpenGL.raw.GL._types import *
from OpenGL.raw.GL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GL_ARB_draw_instanced'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GL,'GL_ARB_draw_instanced',error_checker=_errors._error_checker)
@_f
@_p.types(None,_cs.GLenum,_cs.GLint,_cs.GLsizei,_cs.GLsizei)
def glDrawArraysInstancedARB(mode,first,count,primcount):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLsizei,_cs.GLenum,ctypes.c_void_p,_cs.GLsizei)
def glDrawElementsInstancedARB(mode,count,type,indices,primcount):pass
| {
"content_hash": "d69ec28a2ab0d0a581a82b23b360a662",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 115,
"avg_line_length": 38.9,
"alnum_prop": 0.7583547557840618,
"repo_name": "alexus37/AugmentedRealityChess",
"id": "8729a7243b35efcaadb2a3e41177e1c98d80f9ac",
"size": "778",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "pythonAnimations/pyOpenGLChess/engineDirectory/oglc-env/lib/python2.7/site-packages/OpenGL/raw/GL/ARB/draw_instanced.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "158062"
},
{
"name": "C++",
"bytes": "267993"
},
{
"name": "CMake",
"bytes": "11319"
},
{
"name": "Fortran",
"bytes": "3707"
},
{
"name": "Makefile",
"bytes": "14618"
},
{
"name": "Python",
"bytes": "12813086"
},
{
"name": "Roff",
"bytes": "3310"
},
{
"name": "Shell",
"bytes": "3855"
}
],
"symlink_target": ""
} |
import sys
from mox3 import mox
from neutronclient.neutron.v2_0.fw import firewallpolicy
from neutronclient import shell
from neutronclient.tests.unit import test_cli20
class CLITestV20FirewallPolicyJSON(test_cli20.CLITestV20Base):
def setUp(self):
super(CLITestV20FirewallPolicyJSON, self).setUp()
def test_create_firewall_policy_with_mandatory_params(self):
"""firewall-policy-create with mandatory (none) params only."""
resource = 'firewall_policy'
cmd = firewallpolicy.CreateFirewallPolicy(test_cli20.MyApp(sys.stdout),
None)
tenant_id = 'my-tenant'
name = 'my-name'
my_id = 'myid'
args = ['--tenant-id', tenant_id,
'--admin-state_up',
name, ]
position_names = ['name', ]
position_values = [name, ]
self._test_create_resource(resource, cmd, name, my_id, args,
position_names, position_values,
admin_state_up=True, tenant_id=tenant_id)
def test_create_firewall_policy_with_all_params(self):
"""firewall-policy-create with rule param of misc format."""
resource = 'firewall_policy'
cmd = firewallpolicy.CreateFirewallPolicy(test_cli20.MyApp(sys.stdout),
None)
name = 'my-name'
description = 'my-desc'
firewall_rules_res = ['rule_id1', 'rule_id2']
tenant_id = 'my-tenant'
my_id = 'myid'
position_names = ['name', ]
position_values = [name, ]
#check for both str and unicode format firewall_rules_arg
for firewall_rules_arg in ['rule_id1 rule_id2', u'rule_id1 rule_id2']:
args = ['--description', description,
'--shared',
'--firewall-rules', firewall_rules_arg,
'--audited',
'--tenant-id', tenant_id,
'--admin-state_up',
name]
self._test_create_resource(resource, cmd, name, my_id, args,
position_names, position_values,
description=description, shared=True,
firewall_rules=firewall_rules_res,
audited=True, admin_state_up=True,
tenant_id=tenant_id)
def test_list_firewall_policies(self):
"""firewall-policy-list."""
resources = "firewall_policies"
cmd = firewallpolicy.ListFirewallPolicy(test_cli20.MyApp(sys.stdout),
None)
self._test_list_resources(resources, cmd, True)
def test_list_firewall_policies_pagination(self):
"""firewall-policy-list."""
resources = "firewall_policies"
cmd = firewallpolicy.ListFirewallPolicy(test_cli20.MyApp(sys.stdout),
None)
self._test_list_resources_with_pagination(resources, cmd)
def test_list_firewall_policies_sort(self):
"""sorted list: firewall-policy-list --sort-key name --sort-key id
--sort-key asc --sort-key desc
"""
resources = "firewall_policies"
cmd = firewallpolicy.ListFirewallPolicy(test_cli20.MyApp(sys.stdout),
None)
self._test_list_resources(resources, cmd,
sort_key=["name", "id"],
sort_dir=["asc", "desc"])
def test_list_firewall_policies_limit(self):
"""size (1000) limited list: firewall-policy-list -P."""
resources = "firewall_policies"
cmd = firewallpolicy.ListFirewallPolicy(test_cli20.MyApp(sys.stdout),
None)
self._test_list_resources(resources, cmd, page_size=1000)
def test_show_firewall_policy_id(self):
"""firewall-policy-show test_id."""
resource = 'firewall_policy'
cmd = firewallpolicy.ShowFirewallPolicy(test_cli20.MyApp(sys.stdout),
None)
args = ['--fields', 'id', self.test_id]
self._test_show_resource(resource, cmd, self.test_id, args, ['id'])
def test_show_firewall_policy_id_name(self):
"""firewall-policy-show."""
resource = 'firewall_policy'
cmd = firewallpolicy.ShowFirewallPolicy(test_cli20.MyApp(sys.stdout),
None)
args = ['--fields', 'id', '--fields', 'name', self.test_id]
self._test_show_resource(resource, cmd, self.test_id,
args, ['id', 'name'])
def test_update_firewall_policy(self):
"""firewall-policy-update myid --name newname."""
resource = 'firewall_policy'
cmd = firewallpolicy.UpdateFirewallPolicy(test_cli20.MyApp(sys.stdout),
None)
self._test_update_resource(resource, cmd, 'myid',
['myid', '--name', 'newname'],
{'name': 'newname', })
def test_update_firewall_policy_with_rules(self):
"""firewall-policy-update myid --firewall-rules "rule1 rule2"."""
resource = 'firewall_policy'
cmd = firewallpolicy.UpdateFirewallPolicy(test_cli20.MyApp(sys.stdout),
None)
firewall_rules_arg = u'rule_id3 rule_id4'
firewall_rules_res = ['rule_id3', 'rule_id4']
self._test_update_resource(
resource, cmd, 'myid',
['myid', '--firewall-rules', firewall_rules_arg],
{'firewall_rules': firewall_rules_res, })
def test_delete_firewall_policy(self):
"""firewall-policy-delete my-id."""
resource = 'firewall_policy'
cmd = firewallpolicy.DeleteFirewallPolicy(test_cli20.MyApp(sys.stdout),
None)
my_id = 'myid1'
args = [my_id]
self._test_delete_resource(resource, cmd, my_id, args)
def test_insert_firewall_rule(self):
"""firewall-policy-insert-rule myid newruleid
--insert-before ruleAid
--insert-after ruleBid
"""
resource = 'firewall_policy'
cmd = firewallpolicy.FirewallPolicyInsertRule(
test_cli20.MyApp(sys.stdout),
None)
myid = 'myid'
args = ['myid', 'newrule',
'--insert-before', 'rule2',
'--insert-after', 'rule1']
extrafields = {'firewall_rule_id': 'newrule',
'insert_before': 'rule2',
'insert_after': 'rule1'}
self.mox.StubOutWithMock(cmd, "get_client")
self.mox.StubOutWithMock(self.client.httpclient, "request")
cmd.get_client().MultipleTimes().AndReturn(self.client)
body = extrafields
path = getattr(self.client, resource + "_insert_path")
self.client.httpclient.request(
test_cli20.MyUrlComparator(
test_cli20.end_url(path % myid, format=self.format),
self.client),
'PUT', body=test_cli20.MyComparator(body, self.client),
headers=mox.ContainsKeyValue(
'X-Auth-Token',
test_cli20.TOKEN)).AndReturn((test_cli20.MyResp(204), None))
args.extend(['--request-format', self.format])
self.mox.ReplayAll()
cmd_parser = cmd.get_parser(resource + "_insert_rule")
shell.run_command(cmd, cmd_parser, args)
self.mox.VerifyAll()
self.mox.UnsetStubs()
def test_remove_firewall_rule(self):
"""firewall-policy-remove-rule myid ruleid
"""
resource = 'firewall_policy'
cmd = firewallpolicy.FirewallPolicyRemoveRule(
test_cli20.MyApp(sys.stdout),
None)
myid = 'myid'
args = ['myid', 'removerule']
extrafields = {'firewall_rule_id': 'removerule', }
self.mox.StubOutWithMock(cmd, "get_client")
self.mox.StubOutWithMock(self.client.httpclient, "request")
cmd.get_client().MultipleTimes().AndReturn(self.client)
body = extrafields
path = getattr(self.client, resource + "_remove_path")
self.client.httpclient.request(
test_cli20.MyUrlComparator(
test_cli20.end_url(path % myid, format=self.format),
self.client),
'PUT', body=test_cli20.MyComparator(body, self.client),
headers=mox.ContainsKeyValue(
'X-Auth-Token',
test_cli20.TOKEN)).AndReturn((test_cli20.MyResp(204), None))
args.extend(['--request-format', self.format])
self.mox.ReplayAll()
cmd_parser = cmd.get_parser(resource + "_remove_rule")
shell.run_command(cmd, cmd_parser, args)
self.mox.VerifyAll()
self.mox.UnsetStubs()
class CLITestV20FirewallPolicyXML(CLITestV20FirewallPolicyJSON):
format = 'xml'
| {
"content_hash": "fb1277d06a4f0f813b7e0c2fec89235c",
"timestamp": "",
"source": "github",
"line_count": 209,
"max_line_length": 79,
"avg_line_length": 43.92344497607655,
"alnum_prop": 0.5465141612200436,
"repo_name": "cernops/python-neutronclient",
"id": "a8786dbbca9e4ad2391a58b77097e08061a48a74",
"size": "9868",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "neutronclient/tests/unit/fw/test_cli20_firewallpolicy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "233"
},
{
"name": "Python",
"bytes": "798156"
},
{
"name": "Shell",
"bytes": "7500"
}
],
"symlink_target": ""
} |
"""
Copyright (c) 2014 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from myhdl import *
import os
try:
from queue import Queue
except ImportError:
from Queue import Queue
import axis_ep
module = 'axis_adapter'
srcs = []
srcs.append("../rtl/%s.v" % module)
srcs.append("test_%s_64_8.v" % module)
src = ' '.join(srcs)
build_cmd = "iverilog -o test_%s.vvp %s" % (module, src)
def dut_axis_adapter_64_8(clk,
rst,
current_test,
input_axis_tdata,
input_axis_tkeep,
input_axis_tvalid,
input_axis_tready,
input_axis_tlast,
input_axis_tuser,
output_axis_tdata,
output_axis_tkeep,
output_axis_tvalid,
output_axis_tready,
output_axis_tlast,
output_axis_tuser):
if os.system(build_cmd):
raise Exception("Error running build command")
return Cosimulation("vvp -m myhdl test_%s.vvp -lxt2" % module,
clk=clk,
rst=rst,
current_test=current_test,
input_axis_tdata=input_axis_tdata,
input_axis_tkeep=input_axis_tkeep,
input_axis_tvalid=input_axis_tvalid,
input_axis_tready=input_axis_tready,
input_axis_tlast=input_axis_tlast,
input_axis_tuser=input_axis_tuser,
output_axis_tdata=output_axis_tdata,
output_axis_tkeep=output_axis_tkeep,
output_axis_tvalid=output_axis_tvalid,
output_axis_tready=output_axis_tready,
output_axis_tlast=output_axis_tlast,
output_axis_tuser=output_axis_tuser)
def bench():
# Inputs
clk = Signal(bool(0))
rst = Signal(bool(0))
current_test = Signal(intbv(0)[8:])
input_axis_tdata = Signal(intbv(0)[64:])
input_axis_tkeep = Signal(intbv(0)[8:])
input_axis_tvalid = Signal(bool(0))
input_axis_tlast = Signal(bool(0))
input_axis_tuser = Signal(bool(0))
output_axis_tready = Signal(bool(0))
# Outputs
input_axis_tready = Signal(bool(0))
output_axis_tdata = Signal(intbv(0)[8:])
output_axis_tkeep = Signal(intbv(0)[1:])
output_axis_tvalid = Signal(bool(0))
output_axis_tlast = Signal(bool(0))
output_axis_tuser = Signal(bool(0))
# sources and sinks
source_queue = Queue()
source_pause = Signal(bool(0))
sink_queue = Queue()
sink_pause = Signal(bool(0))
source = axis_ep.AXIStreamSource(clk,
rst,
tdata=input_axis_tdata,
tkeep=input_axis_tkeep,
tvalid=input_axis_tvalid,
tready=input_axis_tready,
tlast=input_axis_tlast,
tuser=input_axis_tuser,
fifo=source_queue,
pause=source_pause,
name='source')
sink = axis_ep.AXIStreamSink(clk,
rst,
tdata=output_axis_tdata,
tkeep=output_axis_tkeep,
tvalid=output_axis_tvalid,
tready=output_axis_tready,
tlast=output_axis_tlast,
tuser=output_axis_tuser,
fifo=sink_queue,
pause=sink_pause,
name='sink')
# DUT
dut = dut_axis_adapter_64_8(clk,
rst,
current_test,
input_axis_tdata,
input_axis_tkeep,
input_axis_tvalid,
input_axis_tready,
input_axis_tlast,
input_axis_tuser,
output_axis_tdata,
output_axis_tkeep,
output_axis_tvalid,
output_axis_tready,
output_axis_tlast,
output_axis_tuser)
@always(delay(4))
def clkgen():
clk.next = not clk
def wait_normal():
while input_axis_tvalid or output_axis_tvalid:
yield clk.posedge
def wait_pause_source():
while input_axis_tvalid or output_axis_tvalid:
source_pause.next = True
yield clk.posedge
yield clk.posedge
yield clk.posedge
source_pause.next = False
yield clk.posedge
def wait_pause_sink():
while input_axis_tvalid or output_axis_tvalid:
sink_pause.next = True
yield clk.posedge
yield clk.posedge
yield clk.posedge
sink_pause.next = False
yield clk.posedge
@instance
def check():
yield delay(100)
yield clk.posedge
rst.next = 1
yield clk.posedge
rst.next = 0
yield clk.posedge
yield delay(100)
yield clk.posedge
for payload_len in range(1,18):
yield clk.posedge
print("test 1: test packet, length %d" % payload_len)
current_test.next = 1
test_frame = axis_ep.AXIStreamFrame(b'\xDA\xD1\xD2\xD3\xD4\xD5' +
b'\x5A\x51\x52\x53\x54\x55' +
b'\x80\x00' +
bytearray(range(payload_len)))
for wait in wait_normal, wait_pause_source, wait_pause_sink:
source_queue.put(test_frame)
yield clk.posedge
yield clk.posedge
yield wait()
yield clk.posedge
yield clk.posedge
yield clk.posedge
rx_frame = None
if not sink_queue.empty():
rx_frame = sink_queue.get()
assert rx_frame == test_frame
assert sink_queue.empty()
yield delay(100)
yield clk.posedge
print("test 2: back-to-back packets, length %d" % payload_len)
current_test.next = 2
test_frame1 = axis_ep.AXIStreamFrame(b'\xDA\xD1\xD2\xD3\xD4\xD5' +
b'\x5A\x51\x52\x53\x54\x55' +
b'\x80\x00' +
bytearray(range(payload_len)))
test_frame2 = axis_ep.AXIStreamFrame(b'\xDA\xD1\xD2\xD3\xD4\xD5' +
b'\x5A\x51\x52\x53\x54\x55' +
b'\x80\x00' +
bytearray(range(payload_len)))
for wait in wait_normal, wait_pause_source, wait_pause_sink:
source_queue.put(test_frame1)
source_queue.put(test_frame2)
yield clk.posedge
yield clk.posedge
yield wait()
yield clk.posedge
yield clk.posedge
yield clk.posedge
rx_frame = None
if not sink_queue.empty():
rx_frame = sink_queue.get()
assert rx_frame == test_frame1
rx_frame = None
if not sink_queue.empty():
rx_frame = sink_queue.get()
assert rx_frame == test_frame2
assert sink_queue.empty()
yield delay(100)
yield clk.posedge
print("test 3: tuser assert, length %d" % payload_len)
current_test.next = 3
test_frame1 = axis_ep.AXIStreamFrame(b'\xDA\xD1\xD2\xD3\xD4\xD5' +
b'\x5A\x51\x52\x53\x54\x55' +
b'\x80\x00' +
bytearray(range(payload_len)))
test_frame2 = axis_ep.AXIStreamFrame(b'\xDA\xD1\xD2\xD3\xD4\xD5' +
b'\x5A\x51\x52\x53\x54\x55' +
b'\x80\x00' +
bytearray(range(payload_len)))
test_frame1.user = 1
for wait in wait_normal, wait_pause_source, wait_pause_sink:
source_queue.put(test_frame1)
source_queue.put(test_frame2)
yield clk.posedge
yield clk.posedge
yield wait()
yield clk.posedge
yield clk.posedge
yield clk.posedge
rx_frame = None
if not sink_queue.empty():
rx_frame = sink_queue.get()
assert rx_frame == test_frame1
assert rx_frame.user[-1]
rx_frame = None
if not sink_queue.empty():
rx_frame = sink_queue.get()
assert rx_frame == test_frame2
assert sink_queue.empty()
yield delay(100)
raise StopSimulation
return dut, source, sink, clkgen, check
def test_bench():
os.chdir(os.path.dirname(os.path.abspath(__file__)))
sim = Simulation(bench())
sim.run()
if __name__ == '__main__':
print("Running test...")
test_bench()
| {
"content_hash": "93da109864cd9d496e1c6999c7920fce",
"timestamp": "",
"source": "github",
"line_count": 321,
"max_line_length": 78,
"avg_line_length": 33.46105919003115,
"alnum_prop": 0.48924681128386555,
"repo_name": "alexforencich/hdg2000",
"id": "39cc80003c66a7adf3a01542db0144ea700bf3c9",
"size": "10763",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fpga/lib/axis/tb/test_axis_adapter_64_8.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "9054"
},
{
"name": "Python",
"bytes": "934476"
},
{
"name": "Shell",
"bytes": "8661"
},
{
"name": "Verilog",
"bytes": "687285"
}
],
"symlink_target": ""
} |
"""Hold a local cache of the online Zotero library.
Provide overview of what has changed in the online Zotero library since last
sync.
Expose a simple API to query and refresh the cache.
"""
# Zotero API:
# http://programminghistorian.org/lessons/zotero-api/intro-to-the-zotero-api
# For syncing with finer granularity in the future:
# https://www.zotero.org/support/dev/web_api/v3/syncing
import http.client
import logging
import os
import re
# 3rd party
import pyzotero
import pyzotero.zotero
# App
import d1_onedrive.impl.onedrive_exceptions
try:
import pickle
except ImportError:
import pickle
class ZoteroClient(object):
def __init__(self, options):
self._options = options
self._user_id = self._get_setting("ZOTERO_USER")
self._api_access_key = self._get_setting("ZOTERO_API_ACCESS_KEY")
self._check_api_key()
self._zotero_client = pyzotero.zotero.Zotero(
self._user_id, "user", self._api_access_key
)
def __enter__(self):
self._init_cache()
self._unpickle_from_disk()
return self
def __exit__(self, type, value, traceback):
self._pickle_to_disk()
def refresh(self):
"""Refresh the local cache of the online Zotero library if stale."""
while self.cache_is_stale():
logging.info("Refreshing Zotero Library cache")
self.force_refresh()
def force_refresh(self):
self._init_cache()
self._cache["collection_trees"] = self._create_collection_trees()
self._cache["filtered_tree"] = self.create_filtered_tree()
self._cache["library_version"] = self._get_current_library_version()
def get_filtered_sub_tree(self, path):
"""Get a sub-tree rooted at [path] that contains only DataONE items.
The path is a list of collection names.
"""
return self._get_filtered_sub_tree_recursive(path)
def iterate_collection_trees(self):
for collection_tree in self._cache["collection_trees"]:
yield collection_tree, [collection_tree["name"]]
for f in self.iterate_collection_tree(collection_tree):
yield f
def iterate_collection_tree(self, collection_tree, path=None):
if path is None:
path = []
for collection in collection_tree["collections"]:
yield collection, path + [collection["name"]]
for f in self.iterate_collection_tree(
collection, path + [collection["name"]]
):
yield f
def iterate_filtered_tree(self, filtered_tree=None, path=None):
if filtered_tree is None:
filtered_tree = self._cache["filtered_tree"]
yield filtered_tree, []
if path is None:
path = []
for f in filtered_tree["collections"]:
yield filtered_tree["collections"][f], path + [f]
for f in self.iterate_filtered_tree(
filtered_tree["collections"][f], path + [f]
):
yield f
def cache_is_stale(self):
current_library_version = self._get_current_library_version()
logging.debug(
"Zotero online version: {}. Cached version: {}".format(
self._cache["library_version"], current_library_version
)
)
return self._cache["library_version"] < current_library_version
#
# Private.
#
def _get_setting(self, key):
try:
return self._options.__dict__[key.lower()]
except KeyError:
try:
return os.environ[key]
except KeyError:
raise d1_onedrive.impl.onedrive_exceptions.ONEDriveException(
"Required value must be set in settings.py or OS environment: {}".format(
key
)
)
def _init_cache(self):
self._cache = {"filtered_tree": {}, "collections": None, "library_version": 0}
def _create_collection_trees(self):
collections = self._zotero_client.collections()
return self._arrange_collections_into_collection_trees(collections)
def _arrange_collections_into_collection_trees(self, collections):
# The Zotero API returns the tree of collections as a flat list where each
# collection includes the key to its parent. The root collection returns
# False as its parent. It's more convenient to work with the collection tree
# recursively, so the tree is built here.
#
# Since Python creates references instead of copies when objects are
# appended to a list, the tree can be built with only two passes.
t = dict((e["collectionKey"], e) for e in collections)
for e in collections:
e["collections"] = []
for e in collections:
if e["parent"]:
t[e["parent"]]["collections"].append(e)
# May now have many trees. Return the ones that start at root (they include
# all others)
trees = []
for e in collections:
if not e["parent"]:
trees.append(e)
return trees
def create_filtered_tree(self):
filtered_tree = {}
for t in self._cache["collection_trees"]:
self._create_filtered_trees_from_collections_recursive(filtered_tree, t)
self._add_top_level_items_to_filtered_tree_root(filtered_tree)
return filtered_tree
def _create_filtered_trees_from_collections_recursive(
self, filtered_tree, collection_tree
):
sub_tree = {"collections": {}}
self._add_collection_items_to_filtered_tree(sub_tree, collection_tree)
filtered_tree.setdefault("collections", {})
filtered_tree["collections"][collection_tree["name"]] = sub_tree
for c in collection_tree["collections"]:
self._create_filtered_trees_from_collections_recursive(sub_tree, c)
def _add_collection_items_to_filtered_tree(self, filtered_tree, collection):
filtered_tree.setdefault("identifiers", [])
filtered_tree.setdefault("queries", [])
collection_items = self._zotero_client.collection_items(
collection["collectionKey"]
)
for i in collection_items:
self._add_item_to_filtered_tree_if_dataone_pid(filtered_tree, i)
self._add_item_to_filtered_tree_if_dataone_query(filtered_tree, i)
def _add_top_level_items_to_filtered_tree_root(self, filtered_tree):
# Notes about top
# https://groups.google.com/forum/#!topic/zotero-dev/MsJ3JBvpNrM
# Parents are typically top-level objects with metadata, and children are
# usually things like notes and file attachments.
filtered_tree.setdefault("identifiers", [])
filtered_tree.setdefault("queries", [])
top_level_items = self._zotero_client.everything(self._zotero_client.top())
for i in top_level_items:
self._add_item_to_filtered_tree_if_dataone_pid(filtered_tree, i)
self._add_item_to_filtered_tree_if_dataone_query(filtered_tree, i)
def _add_item_to_filtered_tree_if_dataone_pid(self, filtered_tree, item):
# tree.setdefault('identifiers', [])
m = re.match(r"(https://cn.dataone.org/cn/v1/resolve/)(.*)", item["url"])
if m:
filtered_tree["identifiers"].append(m.group(2))
def _add_item_to_filtered_tree_if_dataone_query(self, filtered_tree, item):
# filtered_tree.setdefault('queries', [])
m = re.match(r"(https://cn.dataone.org/cn/v1/query/solr/\?)(.*)", item["url"])
if m:
filtered_tree["queries"].append(m.group(2))
def _unpickle_from_disk(self):
try:
with open(self._options.zotero_cache_path, "rb") as f:
self._cache = pickle.load(f)
except (IOError, pickle.PickleError):
pass
def _pickle_to_disk(self):
with open(self._options.zotero_cache_path, "wb") as f:
pickle.dump(self._cache, f)
def _get_filtered_sub_tree_recursive(self, path, filtered_tree=None):
if filtered_tree is None:
filtered_tree = self._cache["filtered_tree"]
if not path:
return filtered_tree
try:
return self._get_filtered_sub_tree_recursive(
path[1:], filtered_tree["collections"][path[0]]
)
except KeyError:
raise d1_onedrive.impl.onedrive_exceptions.ONEDriveException("Invalid path")
def _check_api_key(self):
host = "api.zotero.org"
url = "/users/{}/items?limit=1&key={}&v=3".format(
self._user_id, self._api_access_key
)
connection = http.client.HTTPSConnection(host)
connection.request("GET", url)
if connection.getresponse().status == 403:
raise d1_onedrive.impl.onedrive_exceptions.ONEDriveException(
"Invalid Zotero User ID or API key. UserID: {}, API Key: {}.".format(
self._user_id, self._api_access_key
)
)
def _get_current_library_version(self):
# As far as I can tell, this information is not exposed in pyzotero, so
# I use a direct web api call.
host = "api.zotero.org"
url = "/users/{}/items?limit=1&format=versions&key={}&v=3".format(
self._user_id, self._api_access_key
)
connection = http.client.HTTPSConnection(host)
connection.request("GET", url)
response = connection.getresponse()
return int(response.getheader("Last-Modified-Version"))
| {
"content_hash": "f51ffbb950e8f4550f8bea1de57dcc37",
"timestamp": "",
"source": "github",
"line_count": 254,
"max_line_length": 93,
"avg_line_length": 38.09055118110236,
"alnum_prop": 0.6059948320413436,
"repo_name": "DataONEorg/d1_python",
"id": "8aa7a51c395cbb26fa60c659d13145b443cfd820",
"size": "10486",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "client_onedrive/src/d1_onedrive/impl/clients/onedrive_zotero_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "4798"
},
{
"name": "HTML",
"bytes": "13358"
},
{
"name": "Inno Setup",
"bytes": "3430"
},
{
"name": "JavaScript",
"bytes": "2068"
},
{
"name": "Python",
"bytes": "3547939"
},
{
"name": "Shell",
"bytes": "5670"
},
{
"name": "XSLT",
"bytes": "89205"
}
],
"symlink_target": ""
} |
import uuid
from contextlib import contextmanager
import shapely.geometry
import shapely.wkb
import psycopg2
PG_TYPES = {
'str': 'VARCHAR',
'float': 'DOUBLE PRECISION',
'int': 'INTEGER',
}
def json_schema_to_pg_columns(schema):
table_statements = []
for key, prop_type in schema['properties'].iteritems():
table_statements.append('%s %s' % (key, PG_TYPES[prop_type]))
return ',\n'.join(table_statements)
class TempPGDB(object):
def __init__(self, connection, tablename, schema=None):
self.connection = connection
self.tablename = tablename
self.schema = schema
@contextmanager
def savepoint(self, cur, raise_errors=False):
savepoint_name = 'savepoint' + uuid.uuid4().get_hex()
try:
cur.execute('SAVEPOINT %s' % savepoint_name)
yield
except psycopg2.ProgrammingError:
cur.execute('ROLLBACK TO SAVEPOINT %s' % savepoint_name)
if raise_errors:
raise
def drop_table_or_view(self, cur, name):
with self.savepoint(cur):
cur.execute('DROP TABLE "' + name + '" CASCADE')
with self.savepoint(cur):
cur.execute('DROP VIEW "' + name + '" CASCADE')
def ensure_metadata_table(self, cur):
with self.savepoint(cur):
create_table_stmt = """
CREATE TABLE table_metadata (
tablename VARCHAR PRIMARY KEY,
feature_ids VARCHAR[]
);
"""
cur.execute(create_table_stmt)
def create_table(self):
cur = self.connection.cursor()
self.ensure_metadata_table(cur)
# remove old metadata entry for tablename
cur.execute("DELETE FROM table_metadata WHERE tablename = %s", [self.tablename])
self.drop_table_or_view(cur, self.tablename)
if not self.schema:
raise ValueError('schema of data not set')
properties = json_schema_to_pg_columns(self.schema)
if properties:
properties = ',\n' + properties
create_table_stmt = """
CREATE TABLE %(tablename)s (
__id SERIAL PRIMARY KEY,
__modified BOOL DEFAULT true
%(properties)s
);
""" % {
'tablename': self.tablename,
'properties': properties,
}
cur.execute(create_table_stmt)
update_trigger = """
CREATE OR REPLACE FUNCTION update_modified_column()
RETURNS TRIGGER AS $$
BEGIN
NEW.__modified = true;
RETURN NEW;
END;
$$ language 'plpgsql';
CREATE TRIGGER update_%(tablename)s_modified BEFORE UPDATE
ON %(tablename)s FOR EACH ROW EXECUTE PROCEDURE
update_modified_column();
""" % {'tablename': self.tablename}
cur.execute(update_trigger)
add_geometry_stmt = """
SELECT AddGeometryColumn ('', '%(tablename)s', 'geometry',
%(srid)s, '%(pg_geometry_type)s', 2);
""" % {
'tablename': self.tablename,
'srid': 3857,
'pg_geometry_type': 'POLYGON'
}
cur.execute(add_geometry_stmt)
def store_feature_ids(self, cur, feature_ids):
if not feature_ids:
return
insert_statement = """
INSERT INTO table_metadata (tablename, feature_ids)
VALUES (%s, %s);
"""
update_statement = """
UPDATE table_metadata
SET feature_ids = feature_ids || %s
WHERE tablename = %s;
"""
cur.execute("SELECT 1 FROM table_metadata WHERE tablename = %s", [self.tablename])
if cur.rowcount == 1:
cur.execute(update_statement, [feature_ids, self.tablename])
else:
cur.execute(insert_statement, [self.tablename, feature_ids])
def imported_feature_ids(self):
cur = self.connection.cursor()
cur.execute("SELECT feature_ids FROM table_metadata WHERE tablename = %s", [self.tablename])
results = cur.fetchone()
return results[0] if results else None
def insert_features(self, features):
cur = self.connection.cursor()
feature_ids = []
schema_properties = set(self.schema.get('properties', {}).keys())
for feature in features:
feature_id = feature['properties'].get('_id')
extra_arg_names = [n for n in feature['properties'].iterkeys() if n in schema_properties]
extra_args = ', %s' * len(extra_arg_names)
extra_arg_names_list = ', ' + ', '.join('"' + name.lower() + '"' for name in extra_arg_names)
insert_statement = """
INSERT INTO %(tablename)s (geometry, __modified %(extra_arg_names)s)
VALUES (ST_GeomFromWKB(%%s, 3857), false %(extra_args)s);
""" % {
'tablename': self.tablename,
'extra_arg_names': extra_arg_names_list,
'extra_args': extra_args,
}
try:
geometry = shapely.geometry.asShape(feature['geometry'])
except ValueError:
# feature is not a geometry
continue
if geometry.type not in ('Polygon', ):
# skip non polygons
continue
cur.execute(insert_statement,
[psycopg2.Binary(geometry.wkb)]
+ [feature['properties'][n] for n in extra_arg_names])
if feature_id:
feature_ids.append(feature_id)
self.store_feature_ids(cur, feature_ids)
def load_features(self):
cur = self.connection.cursor()
property_keys = self.schema['properties'].keys()
extra_arg_names = ', ' + ', '.join('"' + name.lower() + '"' for name in property_keys)
select_stmt = """
SELECT ST_AsBinary(geometry), __modified %(extra_arg_names)s FROM %(tablename)s;
""" % {
'extra_arg_names': extra_arg_names,
'tablename': self.tablename,
}
cur.execute(select_stmt)
for row in cur:
# filter out emtpy properties
properties = dict((k, v) for k, v in zip(property_keys, row[2:]) if v is not None)
feature = {
'geometry': shapely.geometry.mapping(shapely.wkb.loads(str(row[0]))),
'properties': properties,
'modified': row[1],
}
yield feature | {
"content_hash": "1a081ddbe51a97ccc2f44553643e6171",
"timestamp": "",
"source": "github",
"line_count": 192,
"max_line_length": 105,
"avg_line_length": 33.890625,
"alnum_prop": 0.5506377747041648,
"repo_name": "omniscale/gbi-server",
"id": "703274934e4e60655a33b131195530302a2d117e",
"size": "7161",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/gbi_server/lib/postgis.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "19718"
},
{
"name": "HTML",
"bytes": "100537"
},
{
"name": "JavaScript",
"bytes": "46641"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "202205"
}
],
"symlink_target": ""
} |
"""
Module for models for a combined storage with a Salesforce and normal database.
usage purposes e.g.:
- Backup salesforce objects including its primary keys.
- Use the backup in queries, including ForeignKeys related correctly to the same database.
- Update the object in Salesfoce to the original values from backup object
if the primary key still exists in salesforce.
- Create a new non-salesforce object with an automatic uuid pk.
- Use it as a much faster alternative than "data sandbox refresh".
The non-salesforce database uses a default uuid() key or a provided salesforce Id value.
The module less supported in future Django versions or unsupported.
Interesting not yet implemented ideas are:
- Use a normal Salesforce backup zip like a read-only database (list
of Salesforce objects that can be filtered and fast saved to a non-salesforce database
by bulk_create().
- Could be useful for tests with realistic data on a database with rollback.
"""
from typing import Any, Generic, TYPE_CHECKING
from django.db import models, router
from django.db.backends.base.base import BaseDatabaseWrapper as DatabaseWrapper
from salesforce.backend import DJANGO_30_PLUS
from salesforce.backend.indep import get_sf_alt_pk
from salesforce.models import * # NOQA; pylint:disable=wildcard-import,unused-wildcard-import
from salesforce.models import SalesforceAutoField, SalesforceModelBase, SF_PK, _T
from salesforce.router import is_sf_database
if not TYPE_CHECKING:
from salesforce.backend import manager
__all__ = ('SalesforceModel',)
class SfCharAutoField(SalesforceAutoField):
"""Auto field that allows Salesforce ID or UUID in an alternate database"""
# db_returning = False # this was a simple fix for Django >= 3.0,
# # but a fix by "_do_insert()" is better.
validators = () # type: ignore[assignment] # easier than an immutable list
def get_internal_type(self) -> str:
return 'CharField'
def db_type(self, connection: DatabaseWrapper) -> str:
if connection.vendor != 'salesforce':
# it is 'varchar(32)'
return models.CharField(max_length=32).db_type(connection=connection)
return 'AutoField'
def rel_db_type(self, connection: DatabaseWrapper) -> str:
if connection.vendor != 'salesforce':
return models.CharField(max_length=32).db_type(connection=connection)
return 'AutoField'
if TYPE_CHECKING:
class SalesforceModel(models.Model, Generic[_T], # type: ignore[no-redef] # pylint:disable=function-redefined
metaclass=SalesforceModelBase):
_salesforce_object = ...
def __init__(self, *args: Any, **kwargs: Any) -> None: # pylint:disable=super-init-not-called
tmp = models.manager.Manager() # type: models.manager.Manager[_T]
self.objects = tmp
else:
# pylint:disable=function-redefined
class SalesforceModel(models.Model, metaclass=SalesforceModelBase): # pylint:disable=function-redefined
"""
Abstract model class for Salesforce objects that can be saved to other db.
(It is not a subclass of salesforce.models.SalesforceModel. That is not
a big problem if we don't check inheritance but only the '_salesforce_object'
attribute or if we use only this or only the original implementation.)
"""
_salesforce_object = 'extended'
objects = manager.SalesforceManager() # type: manager.SalesforceManager[_T]
class Meta:
# pylint:disable=duplicate-code
abstract = True
base_manager_name = 'objects'
id = SfCharAutoField(primary_key=True, name=SF_PK, db_column='Id', verbose_name='ID', auto_created=True,
editable=False)
def save(self, force_insert=False, force_update=False, using=None, update_fields=None):
using = using or router.db_for_write(self.__class__, instance=self)
if self.pk is None and not force_update and not is_sf_database(using):
self.pk = get_sf_alt_pk()
super().save(force_insert=force_insert, force_update=force_update,
using=using, update_fields=update_fields)
if not isinstance(self.pk, str):
raise ValueError("The primary key value is not assigned correctly")
if DJANGO_30_PLUS:
def _do_insert(self, manager, using, fields, returning_fields, raw): # pylint:disable=redefined-outer-name
# the check "is_sf_database(using)" is used for something unexpected
if self.pk and not is_sf_database(using):
returning_fields = []
return super()._do_insert(manager, using, fields, returning_fields, raw)
| {
"content_hash": "e9e4d0681368e49037457a838e24358b",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 119,
"avg_line_length": 45.93333333333333,
"alnum_prop": 0.6782085838689612,
"repo_name": "django-salesforce/django-salesforce",
"id": "42cbfc32301eaabb03c29bbc173a9976845acc2b",
"size": "4823",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "salesforce/models_extend.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1161"
},
{
"name": "Python",
"bytes": "507498"
},
{
"name": "Shell",
"bytes": "8401"
}
],
"symlink_target": ""
} |
import tkinter as tk
from pygubu.i18n import _
from pygubu.api.v1 import register_widget, register_custom_property
from pygubu.plugins.ttk.ttkstdwidgets import TTKFrame
from ttkwidgets.frames import ScrolledFrame, ToggledFrame
from ..ttkwidgets import _designer_tab_label, _plugin_uid
class ScrolledFrameBO(TTKFrame):
class_ = ScrolledFrame
container = True
container_layout = True
OPTIONS_CUSTOM = (
"compound",
"canvaswidth",
"canvasheight",
"canvasborder",
"autohidescrollbar",
)
properties = (
TTKFrame.OPTIONS_STANDARD + TTKFrame.OPTIONS_SPECIFIC + OPTIONS_CUSTOM
)
ro_properties = TTKFrame.ro_properties + OPTIONS_CUSTOM
def get_child_master(self):
return self.widget.interior
def _process_property_value(self, pname, value):
if pname == "autohidescrollbar":
return tk.getboolean(value)
if pname == "compound":
return tk.LEFT if value == "left" else tk.RIGHT
return super(ScrolledFrameBO, self)._process_property_value(
pname, value
)
def code_child_master(self):
return f"{self.code_identifier()}.interior"
def _code_process_property_value(self, targetid, pname, value):
if pname == "autohidescrollbar":
return tk.getboolean(value)
return super()._code_process_property_value(targetid, pname, value)
_builder_uid = f"{_plugin_uid}.ScrolledFrame"
register_widget(
_builder_uid,
ScrolledFrameBO,
"ScrolledFrame",
("ttk", _designer_tab_label),
group=0,
)
register_custom_property(
_builder_uid,
"compound",
"choice",
default_value=tk.RIGHT,
values=("", tk.LEFT, tk.RIGHT),
state="readonly",
help=_("side the scrollbar should be on"),
)
register_custom_property(
_builder_uid, "canvaswidth", "dimensionentry", default_value=400
)
register_custom_property(
_builder_uid, "canvasheight", "dimensionentry", default_value=400
)
register_custom_property(_builder_uid, "canvasborder", "dimensionentry")
register_custom_property(
_builder_uid,
"autohidescrollbar",
"choice",
values=("", "true", "false"),
state="readonly",
)
class ToggledFrameBO(TTKFrame):
class_ = ToggledFrame
container = True
OPTIONS_CUSTOM = (
"compound",
"width",
"text",
)
properties = (
TTKFrame.OPTIONS_STANDARD + TTKFrame.OPTIONS_SPECIFIC + OPTIONS_CUSTOM
)
ro_properties = TTKFrame.ro_properties + OPTIONS_CUSTOM
def get_child_master(self):
return self.widget.interior
def _process_property_value(self, pname, value):
final_value = value
if pname in ("autohidescrollbar",):
final_value = tk.getboolean(value)
else:
final_value = super(ToggledFrameBO, self)._process_property_value(
pname, value
)
return final_value
def code_child_master(self):
return f"{self.code_identifier()}.interior"
_builder_uid = f"{_plugin_uid}.ToggledFrame"
register_widget(
_builder_uid,
ToggledFrameBO,
"ToggledFrame",
("ttk", _designer_tab_label),
group=0,
)
register_custom_property(
_builder_uid,
"compound",
"choice",
default_value=tk.RIGHT,
help=_("position of the toggle arrow compared to the text"),
values=("", tk.TOP, tk.BOTTOM, tk.LEFT, tk.RIGHT, tk.CENTER, tk.NONE),
state="readonly",
)
register_custom_property(
_builder_uid,
"width",
"naturalnumber",
help=_("width of the closed ToggledFrame (in characters)"),
)
register_custom_property(
_builder_uid,
"text",
"entry",
help=_("text to display next to the toggle arrow"),
)
| {
"content_hash": "f7068c308d9fb564ec38b77f889b236a",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 78,
"avg_line_length": 26.913669064748202,
"alnum_prop": 0.6426089280940925,
"repo_name": "alejandroautalan/pygubu",
"id": "f0ad79bb90a5b8452119f8bd2bca2c8ecaa22928",
"size": "3741",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/pygubu/plugins/ttkwidgets/frames.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "430354"
},
{
"name": "Shell",
"bytes": "840"
}
],
"symlink_target": ""
} |
""" Defines the Image component class.
"""
from __future__ import absolute_import
# Enthought library imports
from traits.api import Array, Bool, Enum, Instance, Property, cached_property
# Local imports
from enable.component import Component
from kiva.image import GraphicsContext
class Image(Component):
""" Component that displays a static image
This is extremely simple right now. By default it will draw the array into
the entire region occupied by the component, stretching or shrinking as
needed. By default the bounds are set to the width and height of the data
array, and we provide the same information to constraints-based layout
with the layout_size_hint trait.
"""
#: the image data as an array
data = Array(shape=(None, None, (3,4)), dtype='uint8')
#: the format of the image data (eg. RGB vs. RGBA)
format = Property(Enum('rgb24', 'rgba32'), depends_on='data')
#: the size-hint for constraints-based layout
layout_size_hint = Property(data, depends_on='data')
#: the image as an Image GC
_image = Property(Instance(GraphicsContext), depends_on='data')
@classmethod
def from_file(cls, filename, **traits):
from PIL import Image
from numpy import asarray
data = asarray(Image.open(filename))
return cls(data=data, **traits)
def __init__(self, data, **traits):
# the default bounds are the size of the image
traits.setdefault('bounds', data.shape[1::-1])
super(Image, self).__init__(data=data, **traits)
def _draw_mainlayer(self, gc, view_bounds=None, mode="normal"):
""" Draws the image. """
with gc:
gc.draw_image(self._image, (self.x, self.y, self.width, self.height))
@cached_property
def _get_format(self):
if self.data.shape[-1] == 3:
return 'rgb24'
elif self.data.shape[-1] == 4:
return 'rgba32'
else:
raise ValueError('Data array not correct shape')
@cached_property
def _get_layout_size_hint(self):
return self.data.shape[1::-1]
@cached_property
def _get__image(self):
if not self.data.flags['C_CONTIGUOUS']:
data = self.data.copy()
else:
data = self.data
image_gc = GraphicsContext(data, pix_format=self.format)
return image_gc
| {
"content_hash": "e72621ea32bc7537ec1384093a024fd9",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 81,
"avg_line_length": 32.12162162162162,
"alnum_prop": 0.6386201093815734,
"repo_name": "tommy-u/enable",
"id": "646bb6d02a1b59c5ce38f60c35f400a7d40ab6f8",
"size": "2377",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "enable/primitives/image.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "240"
},
{
"name": "C",
"bytes": "5526949"
},
{
"name": "C++",
"bytes": "3058044"
},
{
"name": "DIGITAL Command Language",
"bytes": "35819"
},
{
"name": "Groff",
"bytes": "236"
},
{
"name": "Makefile",
"bytes": "58238"
},
{
"name": "Objective-C",
"bytes": "16551"
},
{
"name": "Python",
"bytes": "2202660"
},
{
"name": "Shell",
"bytes": "6286"
}
],
"symlink_target": ""
} |
import os
import sys
import json
import fnmatch
TEST_DIR = "/webvtt/"
CATEGORIES_FILE = "../categories.json"
class Test:
def __init__(self, file, name, status, message):
self.file = file
self.name = name
self.status = status
self.message = message
self.passed = status == 'PASS'
self.categories = []
@classmethod
def from_json(cls, json):
file = json["test"]
if not file.startswith(TEST_DIR):
return []
file = file[len(TEST_DIR):]
status = json["status"]
message = json["message"]
tests = []
for test in json["subtests"]:
name = test["name"]
if status == 'OK':
test_status = test["status"]
test_message = test["message"]
else:
test_status, test_message = status, message
tests.append(Test(file, name, test_status, test_message))
return tests
class Category:
def __init__(self, names):
self.names = set(names)
self.tests = {}
@classmethod
def from_json(cls, json):
return Category(json)
def add_test(self, name, test):
self.tests[test] = name
def __contains__(self, name):
return name in self.names
def parse_results(file):
data = json.load(file)
results = data["results"]
tests = []
for result in results:
tests += Test.from_json(result)
return tests
def parse_categories(file, tests, categories = None, categories_map = None):
data = json.load(file)
basepath = os.path.dirname(file.name)
categories = categories or []
if categories_map:
categories_map = dict(categories_map)
else:
categories_map = {}
if ":categories" in data:
for cat_data in data[":categories"]:
category = Category.from_json(cat_data)
categories.append(category)
for name in category.names:
categories_map[name] = category
for pattern, category_name in data.items():
if pattern.startswith(":"):
continue
category = categories_map[category_name]
file_pattern = os.path.normpath(os.path.join(basepath, pattern))
for test in tests:
if fnmatch.fnmatch(test.name, file_pattern) or fnmatch.fnmatch(test.file, file_pattern):
category.add_test(category_name, test)
test.categories.append(category)
if ":subcategories" in data:
for subcat_name in data[":subcategories"]:
path = os.path.join(basepath, subcat_name)
file = open(path, "r")
parse_categories(file, tests, categories, categories_map)
return categories
def main(argv):
if len(argv) == 1:
if argv[0] == '-':
results_file = sys.stdin
else:
results_file = open(argv[0], "r")
else:
print("USAGE: python3 categorize_results.py <file>")
print("<file>\tA file containing wpt results. Or `-` for reading results from stdin.")
return
filepath = os.path.dirname(__file__)
categories_path = os.path.join(filepath, CATEGORIES_FILE)
categories_file = open(categories_path, "r")
tests = parse_results(results_file)
categories = parse_categories(categories_file, tests)
for category in categories:
tests_by_name = { name: [] for name in category.names }
for test, name in category.tests.items():
tests_by_name[name].append(test)
for name in category.names:
test_group = tests_by_name[name]
amount = len(test_group)
if amount == 0:
continue
passed = sum(1 for test in test_group if test.passed)
print("{}:\t{}/{} - {}%".format(name, passed, amount, round(passed / amount * 100, 2)))
if __name__ == "__main__":
main(sys.argv[1:])
| {
"content_hash": "69c160903b899efefd907a237e619f38",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 100,
"avg_line_length": 28.759124087591243,
"alnum_prop": 0.5741116751269035,
"repo_name": "nwjs/chromium.src",
"id": "6cb18c3c2de2f5d637e3ca38efdf85bfb34c031e",
"size": "3940",
"binary": false,
"copies": "88",
"ref": "refs/heads/nw70",
"path": "third_party/blink/web_tests/external/wpt/webvtt/tools/categorize_results.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import Cython.Compiler.Errors as Errors
from Cython.CodeWriter import CodeWriter
from Cython.Compiler.TreeFragment import TreeFragment, strip_common_indent
from Cython.Compiler.Visitor import TreeVisitor, VisitorTransform
from Cython.Compiler import TreePath
import unittest
import os, sys
import tempfile
class NodeTypeWriter(TreeVisitor):
def __init__(self):
super(NodeTypeWriter, self).__init__()
self._indents = 0
self.result = []
def visit_Node(self, node):
if not self.access_path:
name = u"(root)"
else:
tip = self.access_path[-1]
if tip[2] is not None:
name = u"%s[%d]" % tip[1:3]
else:
name = tip[1]
self.result.append(u" " * self._indents +
u"%s: %s" % (name, node.__class__.__name__))
self._indents += 1
self.visitchildren(node)
self._indents -= 1
def treetypes(root):
"""Returns a string representing the tree by class names.
There's a leading and trailing whitespace so that it can be
compared by simple string comparison while still making test
cases look ok."""
w = NodeTypeWriter()
w.visit(root)
return u"\n".join([u""] + w.result + [u""])
class CythonTest(unittest.TestCase):
def setUp(self):
self.listing_file = Errors.listing_file
self.echo_file = Errors.echo_file
Errors.listing_file = Errors.echo_file = None
def tearDown(self):
Errors.listing_file = self.listing_file
Errors.echo_file = self.echo_file
def assertLines(self, expected, result):
"Checks that the given strings or lists of strings are equal line by line"
if not isinstance(expected, list): expected = expected.split(u"\n")
if not isinstance(result, list): result = result.split(u"\n")
for idx, (expected_line, result_line) in enumerate(zip(expected, result)):
self.assertEqual(expected_line, result_line, "Line %d:\nExp: %s\nGot: %s" % (idx, expected_line, result_line))
self.assertEqual(len(expected), len(result),
"Unmatched lines. Got:\n%s\nExpected:\n%s" % ("\n".join(expected), u"\n".join(result)))
def codeToLines(self, tree):
writer = CodeWriter()
writer.write(tree)
return writer.result.lines
def codeToString(self, tree):
return "\n".join(self.codeToLines(tree))
def assertCode(self, expected, result_tree):
result_lines = self.codeToLines(result_tree)
expected_lines = strip_common_indent(expected.split("\n"))
for idx, (line, expected_line) in enumerate(zip(result_lines, expected_lines)):
self.assertEqual(expected_line, line, "Line %d:\nGot: %s\nExp: %s" % (idx, line, expected_line))
self.assertEqual(len(result_lines), len(expected_lines),
"Unmatched lines. Got:\n%s\nExpected:\n%s" % ("\n".join(result_lines), expected))
def assertNodeExists(self, path, result_tree):
self.assertNotEqual(TreePath.find_first(result_tree, path), None,
"Path '%s' not found in result tree" % path)
def fragment(self, code, pxds={}, pipeline=[]):
"Simply create a tree fragment using the name of the test-case in parse errors."
name = self.id()
if name.startswith("__main__."): name = name[len("__main__."):]
name = name.replace(".", "_")
return TreeFragment(code, name, pxds, pipeline=pipeline)
def treetypes(self, root):
return treetypes(root)
def should_fail(self, func, exc_type=Exception):
"""Calls "func" and fails if it doesn't raise the right exception
(any exception by default). Also returns the exception in question.
"""
try:
func()
self.fail("Expected an exception of type %r" % exc_type)
except exc_type as e:
self.assert_(isinstance(e, exc_type))
return e
def should_not_fail(self, func):
"""Calls func and succeeds if and only if no exception is raised
(i.e. converts exception raising into a failed testcase). Returns
the return value of func."""
try:
return func()
except:
self.fail(str(sys.exc_info()[1]))
class TransformTest(CythonTest):
"""
Utility base class for transform unit tests. It is based around constructing
test trees (either explicitly or by parsing a Cython code string); running
the transform, serialize it using a customized Cython serializer (with
special markup for nodes that cannot be represented in Cython),
and do a string-comparison line-by-line of the result.
To create a test case:
- Call run_pipeline. The pipeline should at least contain the transform you
are testing; pyx should be either a string (passed to the parser to
create a post-parse tree) or a node representing input to pipeline.
The result will be a transformed result.
- Check that the tree is correct. If wanted, assertCode can be used, which
takes a code string as expected, and a ModuleNode in result_tree
(it serializes the ModuleNode to a string and compares line-by-line).
All code strings are first stripped for whitespace lines and then common
indentation.
Plans: One could have a pxd dictionary parameter to run_pipeline.
"""
def run_pipeline(self, pipeline, pyx, pxds={}):
tree = self.fragment(pyx, pxds).root
# Run pipeline
for T in pipeline:
tree = T(tree)
return tree
class TreeAssertVisitor(VisitorTransform):
# actually, a TreeVisitor would be enough, but this needs to run
# as part of the compiler pipeline
def visit_CompilerDirectivesNode(self, node):
directives = node.directives
if 'test_assert_path_exists' in directives:
for path in directives['test_assert_path_exists']:
if TreePath.find_first(node, path) is None:
Errors.error(
node.pos,
"Expected path '%s' not found in result tree" % path)
if 'test_fail_if_path_exists' in directives:
for path in directives['test_fail_if_path_exists']:
if TreePath.find_first(node, path) is not None:
Errors.error(
node.pos,
"Unexpected path '%s' found in result tree" % path)
self.visitchildren(node)
return node
visit_Node = VisitorTransform.recurse_to_children
def unpack_source_tree(tree_file, dir=None):
if dir is None:
dir = tempfile.mkdtemp()
header = []
cur_file = None
f = open(tree_file)
try:
lines = f.readlines()
finally:
f.close()
del f
try:
for line in lines:
if line[:5] == '#####':
filename = line.strip().strip('#').strip().replace('/', os.path.sep)
path = os.path.join(dir, filename)
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
if cur_file is not None:
f, cur_file = cur_file, None
f.close()
cur_file = open(path, 'w')
elif cur_file is not None:
cur_file.write(line)
elif line.strip() and not line.lstrip().startswith('#'):
if line.strip() not in ('"""', "'''"):
header.append(line)
finally:
if cur_file is not None:
cur_file.close()
return dir, ''.join(header)
| {
"content_hash": "d7c2eea7d37811d1891702766f4d0f2e",
"timestamp": "",
"source": "github",
"line_count": 204,
"max_line_length": 122,
"avg_line_length": 37.73529411764706,
"alnum_prop": 0.6023642504546636,
"repo_name": "bdh1011/wau",
"id": "08f870a1c02941997d77fadba3588611140207d6",
"size": "7698",
"binary": false,
"copies": "13",
"ref": "refs/heads/master",
"path": "venv/lib/python2.7/site-packages/Cython/TestUtils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1176"
},
{
"name": "C",
"bytes": "5022853"
},
{
"name": "C++",
"bytes": "43676"
},
{
"name": "CSS",
"bytes": "10359"
},
{
"name": "D",
"bytes": "1841"
},
{
"name": "FORTRAN",
"bytes": "3707"
},
{
"name": "GAP",
"bytes": "14120"
},
{
"name": "Groff",
"bytes": "7236"
},
{
"name": "HTML",
"bytes": "1709320"
},
{
"name": "JavaScript",
"bytes": "1200059"
},
{
"name": "Jupyter Notebook",
"bytes": "310219"
},
{
"name": "Lua",
"bytes": "11887"
},
{
"name": "Makefile",
"bytes": "112163"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Objective-C",
"bytes": "1291"
},
{
"name": "Perl",
"bytes": "171375"
},
{
"name": "Python",
"bytes": "49407229"
},
{
"name": "Ruby",
"bytes": "58403"
},
{
"name": "Shell",
"bytes": "47672"
},
{
"name": "Smarty",
"bytes": "22599"
},
{
"name": "Tcl",
"bytes": "426334"
},
{
"name": "XSLT",
"bytes": "153073"
}
],
"symlink_target": ""
} |
from twitter import *
import simplejson
import serial
import datetime
import time
import threading
QUIT = 0
prevtweet = ""
def centerstring(string,width):
""" Pad a string to a specific width """
return " "*((width-len(string))/2)+string
def padstring(string,width):
"""pad a string to a maximum length"""
if len(string) > width:
result = string[0:width]
else:
result = string + " "*(width-len(string))
return result
def runtime():
rangthebell = 0
while QUIT == 0:
# dates = centerstring(datetime.datetime.now().strftime("%B %d, %Y"),20)
# times = centerstring(datetime.datetime.now().strftime("%I:%M:%S %p"),20)
#
# p.write("\x80")
# p.write("%s\r%s" % (dates,times))
dates = datetime.datetime.now().isoformat(' ')[0:19]
p.write("\x80") # move to 0,0 on the display
p.write(padstring(dates,20)) # make sure to have a nice clean line by filling it all out
if datetime.datetime.now().strftime("%M")[-1:] == "5":
if rangthebell == 0:
p.write("\xD2\xE1\xD1\xE4\xD2\xE1") # do an anoying beep at the minute mark
rangthebell = 1
else:
rangthebell = 0
time.sleep(1)
def checktweet():
turl = 'http://api.twitter.com/1.1/search/tweets.json?q='
CONSUMER_KEY = 'xxx'
CONSUMER_SECRET = 'xxx'
OAUTH_TOKEN = 'XXX'
OAUTH_SECRET = 'XXX'
t = Twitter( auth=OAuth(OAUTH_TOKEN,OAUTH_SECRET,CONSUMER_KEY,CONSUMER_SECRET) )
prevtweet = ""
while QUIT == 0:
twitter_results = t.statuses.home_timeline()
tweet = twitter_results[0]['text'].encode('ascii','ignore') # convert to ascii and ignore unicode conv. errors
if prevtweet != tweet:
# p.write("\xA8") # second line 0 position (line 3 on the display)
p.write("\x94") # first line 0 position (line 2 on the display)
p.write(padstring(tweet,60))
p.write("\xD2\xE7\xD1\xE1\xD2\xE5")
print "-"*150
print "From: %s" % twitter_results[0]['user']['screen_name']
print tweet
print "-"*150
prevtweet = tweet
seconds = 0
while seconds < 180:
time.sleep (1)
seconds += 1
p.write("\xCD")
p.write("%03d" % (180-seconds))
if QUIT:
break
p.write("\xD0\xE7\xE2\xE2")
#time.sleep(60)
if __name__ == "__main__":
# open up the serial port
p = serial.Serial("/dev/ttyAMA0", baudrate=19200, timeout=2)
p.write("starting the clock!")
# clear the screen and get ready to display with backlight on
p.write("\x16") # turns it on with no cursor blink
p.write("\x11") # turn on the back light
p.write("\x0C") # clear the screen. Must wait 5ms before we move on
t1 = threading.Thread(target = runtime, args=())
t2 = threading.Thread(target = checktweet, args=())
t1.start()
t2.start()
try:
while 1:
time.sleep(.1)
except KeyboardInterrupt:
print "Quiting"
QUIT = 1
print "Exiting clock"
t1.join()
print "Exiting tweet"
t2.join()
print "Exits complete"
p.write("\x15") # turns display off, but not backlight
p.write("\x12") # turns backlight off
p.close()
QUIT = 1
pass
print 'exiting'
| {
"content_hash": "58a2c78c71a08a3323580f39b6e67df3",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 115,
"avg_line_length": 24.763779527559056,
"alnum_prop": 0.6146263910969794,
"repo_name": "tachijuan/python",
"id": "6978acb42cd2b9022046d187f3dcd003abefb5dd",
"size": "3145",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "myscripts/timeandtweet.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1088"
},
{
"name": "Python",
"bytes": "65208"
}
],
"symlink_target": ""
} |
"""
Table object like a HTML-Table, buildup with DXF R12 entities.
Cells can contain Multiline-Text or DXF-BLOCKs, or you can create your own
cell-type by extending the CustomCell object.
Cells can span over columns and rows.
Text cells can contain text with an arbitrary rotation angle, or letters can be
stacked top-to-bottom.
BlockCells contains block references (INSERT-entity) created from a block
definition (BLOCK), if the block definition contains attribute definitions
(ATTDEF-entity), attribs created by Attdef.new_attrib() will be added to the
block reference (ATTRIB-entity).
"""
__author__ = "mozman <mozman@gmx.at>"
import sys
if sys.version_info[0] > 2:
xrange = range
from copy import deepcopy
import dxfwrite.const as const
from dxfwrite.base import DXFList, dxfstr
from dxfwrite.entities import Line, Solid, Insert
from dxfwrite.mtext import MText
__all__ = ['Table', 'CustomCell']
DEFAULT_TABLE_BGLAYER = 'TABLEBACKGROUND'
DEFAULT_TABLE_FGLAYER = 'TABLECONTENT'
DEFAULT_TABLE_GRIDLAYER = 'TABLEGRID'
DEFAULT_TABLE_HEIGHT = 1.0
DEFAULT_TABLE_WIDTH = 2.5
DEFAULT_TEXTSTYLE = 'STANDARD'
DEFAULT_CELL_TEXT_HEIGHT = 0.7
DEFAULT_CELL_LINESPACING = 1.5
DEFAULT_CELL_XSCALE = 1.0
DEFAULT_CELL_YSCALE = 1.0
DEFAULT_CELL_HALIGN = const.LEFT
DEFAULT_CELL_VALIGN = const.TOP
DEFAULT_CELL_TEXTCOLOR = const.BYLAYER
DEFAULT_CELL_BG_COLOR = None
DEFAULT_CELL_HMARGIN = 0.1
DEFAULT_CELL_VMARGIN = 0.1
DEFAULT_BORDER_COLOR = 5
DEFAULT_BORDER_LINETYPE = None
DEFAULT_BORDER_STATUS = True
DEFAULT_BORDER_PRIORITY = 50
VISIBLE = 1
HIDDEN = 0
class Table(object):
"""A HTML-table like object.
The table object contains the table data cells.
"""
name = 'TABLE'
def __init__(self, insert, nrows, ncols, default_grid=True):
"""
:param insert: insert point as 2D or 3D point
:param int nrows: row count
:param int ncols: column count
:param bool default_grid: if **True** always a solid line grid will
be drawn, if **False**, only explicit defined borders will be
drawn, default grid has a priority of 50.
"""
self.insert = insert
self.nrows = nrows
self.ncols = ncols
self.row_heights = [DEFAULT_TABLE_HEIGHT] * nrows
self.col_widths = [DEFAULT_TABLE_WIDTH] * ncols
self.bglayer = DEFAULT_TABLE_BGLAYER
self.fglayer = DEFAULT_TABLE_FGLAYER
self.gridlayer = DEFAULT_TABLE_GRIDLAYER
self.styles = {'default': Style.get_default_cell_style()}
if not default_grid:
default_style = self.get_cell_style('default')
default_style.set_border_status(False, False, False, False)
self._cells = {} # data cells
self.frames = [] # border frame objects
# visibility_map stores the visibility of the cells, created in _setup
self.visibility_map = None
# grid manages the border lines, created in _setup
self.grid = None
# data contains the resulting dxf entities
self.data = None
self.empty_cell = Cell(self) # represents all empty cells
def set_col_width(self, column, value):
""" Set column width to value (in drawing units).
:param int column: zero based column index
:param float value: new column width in drawing units
"""
self.col_widths[column] = float(value)
def set_row_height(self, row, value):
""" Set row height to value (in drawing units).
:param int row: zero based row index
:param float value: new row height in drawing units
"""
self.row_heights[row] = float(value)
def text_cell(self, row, col, text, span=(1, 1), style='default'):
""" Create a new text cell at position (row, col), with 'text' as
content, text can be a multi-line text, use ``'\\n'`` as line
separator.
The cell spans over **span** cells and has the cell style with the
name **style**.
"""
cell = TextCell(self, text, style=style, span=span)
return self.set_cell(row, col, cell)
# pylint: disable-msg=W0102
def block_cell(self, row, col, blockdef, span=(1, 1), attribs={}, style='default'):
""" Create a new block cell at position (row, col).
Content is a block reference inserted by a :ref:`INSERT` entity,
attributes will be added if the block definition contains :ref:`ATTDEF`. Assignments
are defined by attribs-key to attdef-tag association.
Example: attribs = {'num': 1} if an :ref:`ATTDEF` with tag=='num' in
the block definition exists, an attrib with text=str(1) will be
created and added to the insert entity.
The cell spans over 'span' cells and has the cell style with the
name 'style'.
"""
cell = BlockCell(self, blockdef, style=style, attribs=attribs, span=span)
return self.set_cell(row, col, cell)
def set_cell(self, row, col, cell):
""" Insert a cell at position (row, col).
"""
row, col = self.validate_index(row, col)
self._cells[row, col] = cell
return cell
def get_cell(self, row, col):
""" Get cell at position (row, col).
"""
row, col = self.validate_index(row, col)
try:
return self._cells[row, col]
except KeyError:
return self.empty_cell # emtpy cell with default style
def validate_index(self, row, col):
row = int(row)
col = int(col)
if row < 0 or row >= self.nrows or \
col < 0 or col >= self.ncols:
raise IndexError('cell index out of range')
return row, col
def frame(self, row, col, width=1, height=1, style='default'):
""" Create a Frame object which frames the cell area starting at
(row, col) covering 'width' columns and 'heigth' rows.
"""
frame = Frame(self, pos=(row, col), span=(height, width),
style=style)
self.frames.append(frame)
return frame
def new_cell_style(self, name, **kwargs):
""" Create a new Style object 'name'.
:param kwargs: see Style.get_default_cell_style()
"""
style = deepcopy(self.get_cell_style('default'))
style.update(kwargs)
self.styles[name] = style
return style
def new_border_style(self, color=const.BYLAYER, status=True,
priority=100, linetype=None):
""" Create a new border style.
:param bool status: True for visible, else False
:param int color: dxf color index
:param string linetype: linetype name, BYLAYER if None
:param int priority: drawing priority - higher values covers lower
values
"""
border_style = Style.get_default_border_style()
border_style['color'] = color
border_style['linetype'] = linetype
border_style['status'] = status
border_style['priority'] = priority
return border_style
def get_cell_style(self, name):
""" Get cell style by name.
"""
return self.styles[name]
def iter_visible_cells(self):
""" Iterate over all visible cells.
:return: a generator which yields all visible cells as tuples: (row , col, cell)
"""
if self.visibility_map is None:
raise Exception("Can only be called at dxf creation.")
return ((row, col, self.get_cell(row, col))
for row, col in self.visibility_map)
def __dxf__(self):
return dxfstr(self.__dxftags__())
def __dxftags__(self):
self._build_table()
result = self.data
self.data = None # don't need to keep this data in memory
return result
def _setup(self):
""" Table generation setup.
"""
self.data = DXFList()
self.visibility_map = VisibilityMap(self)
self.grid = Grid(self)
def _build_table(self):
""" Table is generated on calling the __dxftags__() method.
"""
self._setup()
self.grid.draw_lines()
for row, col, cell in self.iter_visible_cells():
self.grid.draw_cell_background(row, col, cell)
self.grid.draw_cell_content(row, col, cell)
self._cleanup()
def _cleanup(self):
""" Table generation cleanup.
"""
self.visibility_map = None
self.grid = None
class VisibilityMap(object):
""" Stores the visibility of the table cells.
"""
def __init__(self, table):
""" Create the visibility map for table.
"""
self.table = table
self._hidden_cells = {}
self._create_visibility_map()
def _create_visibility_map(self):
""" Set visibility for all existing cells.
"""
for row, col in iter(self):
cell = self.table.get_cell(row, col)
self._set_span_visibility(row, col, cell.span)
def _set_span_visibility(self, row, col, span):
""" Set the visibility of the given cell.
The cell itself is visible, all other cells in the span-range
(tuple: width, height) are invisible, they are covered by the
main cell (row, col).
"""
if span != (1, 1):
nrows, ncols = span
for rowx in xrange(nrows):
for colx in xrange(ncols):
# switch all cells in span range to invisible
self.hide(row+rowx, col+colx)
# switch content cell visible
self.show(row, col)
def show(self, row, col):
""" Show cell (row, col).
"""
try:
del self._hidden_cells[(row, col)]
except KeyError:
pass
def hide(self, row, col):
""" Hide cell (row, col).
"""
self._hidden_cells[(row, col)] = HIDDEN
def iter_all_cells(self):
""" Iterate over all cell indices, yields (row, col) tuples.
"""
for row in xrange(self.table.nrows):
for col in xrange(self.table.ncols):
yield row, col
def is_visible_cell(self, row, col):
""" True if cell (row, col) is visible, else False.
"""
return (row, col) not in self._hidden_cells
def __iter__(self):
""" Iterate over all visible cells.
"""
return ( (row, col) for (row, col) in self.iter_all_cells() \
if self.is_visible_cell(row, col) )
class Style(dict):
""" Cell style object.
"""
@staticmethod
def get_default_cell_style():
return Style({
# textstyle is ignored by block cells
'textstyle': 'STANDARD',
# text height in drawing units, ignored by block cells
'textheight': DEFAULT_CELL_TEXT_HEIGHT,
# line spacing in percent = <textheight>*<linespacing>, ignored by block cells
'linespacing': DEFAULT_CELL_LINESPACING,
# text stretch or block reference x-axis scaling factor
'xscale': DEFAULT_CELL_XSCALE,
# block reference y-axis scaling factor, ignored by text cells
'yscale': DEFAULT_CELL_YSCALE,
# dxf color index, ignored by block cells
'textcolor': DEFAULT_CELL_TEXTCOLOR,
# text or block rotation in degrees
'rotation' : 0.,
# Letters are stacked top-to-bottom, but not rotated
'stacked': False,
# horizontal alignment (const.LEFT, const.CENTER, const.RIGHT)
'halign': DEFAULT_CELL_HALIGN,
# vertical alignment (const.TOP, const.MIDDLE, const.BOTTOM)
'valign': DEFAULT_CELL_VALIGN,
# left and right margin in drawing units
'hmargin': DEFAULT_CELL_HMARGIN,
# top and bottom margin
'vmargin': DEFAULT_CELL_VMARGIN,
# background color, dxf color index, ignored by block cells
'bgcolor': DEFAULT_CELL_BG_COLOR,
# left border style
'left': Style.get_default_border_style(),
# top border style
'top': Style.get_default_border_style(),
# right border style
'right': Style.get_default_border_style(),
# bottom border style
'bottom': Style.get_default_border_style(),
})
@staticmethod
def get_default_border_style():
return {
# border status, True for visible, False for hidden
'status': DEFAULT_BORDER_STATUS,
# dxf color index
'color': DEFAULT_BORDER_COLOR,
# linetype name, BYLAYER if None
'linetype': DEFAULT_BORDER_LINETYPE,
# drawing priority, higher values cover lower values
'priority': DEFAULT_BORDER_PRIORITY,
}
def set_border_status(self, left=True, right=True, top=True, bottom=True):
""" Set status of all cell borders at once.
"""
for border, status in (('left', left),
('right', right),
('top', top),
('bottom', bottom)):
self[border]['status'] = status
def set_border_style(self, style,
left=True, right=True, top=True, bottom=True):
""" Set border styles of all cell borders at once.
"""
for border, status in (('left', left),
('right', right),
('top', top),
('bottom', bottom)):
if status:
self[border] = style
class Grid(object):
""" Grid contains the graphical representation of the table.
"""
def __init__(self, table):
""" Constructor
:param table: associated data table
"""
self.table = table
# contains the x-axis coords of the grid lines between the data columns.
self.col_pos = self._calc_col_pos()
# contains the y-axis coords of the grid lines between the data rows.
self.row_pos = self._calc_row_pos()
# contans the horizontal border elements, list of border styles
# get index with _border_index(row, col), which means the border element
# above row, col, and row-indices are [0 .. nrows+1], nrows+1 for the
# grid line below the last row; list contains only the border style with
# the highest priority.
self._hborders = None # created in _init_borders
# same as _hborders but for the vertical borders,
# col-indices are [0 .. ncols+1], ncols+1 for the last grid line right
# of the last column
self._vborders = None # created in _init_borders
# border style to delete borders inside of merged cells
self.noborder = dict(status=False, priority=999, linetype=None, color=0)
def _init_borders(self, hborder, vborder):
""" Init the _hborders with <hborder> and _vborders with <vborder>.
"""
# <border_count> has more elements than necessary, but it unifies the
# index calculation for _vborders and _hborders.
# exact values are:
# hborder_count = ncols * (nrows+1), hindex = ncols * <row> + <col>
# vborder_count = nrows * (ncols+1), vindex = (ncols+1) * <row> + <col>
border_count = (self.table.nrows+1) * (self.table.ncols+1)
self._hborders = [hborder] * border_count
self._vborders = [vborder] * border_count
def _border_index(self, row, col):
""" Calculate linear index for border arrays _hborders and _vborders.
"""
return row * (self.table.ncols+1) + col
def set_hborder(self, row, col, border_style):
""" Set <border_style> for the horizontal border element above <row>, <col>.
"""
return self._set_border_style(self._hborders, row, col, border_style)
def set_vborder(self, row, col, border_style):
""" Set <border_style> for the vertical border element left of <row>, <col>.
"""
return self._set_border_style(self._vborders, row, col, border_style)
def _set_border_style(self, borders, row, col, border_style):
""" Set <border_style> for <row>, <col> in <borders>.
"""
border_index = self._border_index(row, col)
actual_borderstyle = borders[border_index]
if border_style['priority'] >= actual_borderstyle['priority']:
borders[border_index] = border_style
def get_hborder(self, row, col):
""" Get the horizontal border element above <row>, <col>.
Last grid line (below <nrows>) is the element above of <nrows+1>.
"""
return self._get_border(self._hborders, row, col)
def get_vborder(self, row, col):
""" Get the vertical border element left of <row>, <col>.
Last grid line (right of <ncols>) is the element left of <ncols+1>.
"""
return self._get_border(self._vborders, row, col)
def _get_border(self, borders, row, col):
""" Get border element at <row>, <col> from <borders>.
"""
return borders[self._border_index(row, col)]
def _sum_fields(self, start_value, fields, append, sign=1.):
""" Adds step-by-step the fields-values, starting with <start_value>,
and appends the resulting values to an other object with the
append-method.
"""
position = start_value
append(position)
for element in fields:
position += element * sign
append(position)
def _calc_col_pos(self):
""" Calculate the x-axis coords of the grid lines between the columns.
"""
col_pos = []
start_x = self.table.insert[0]
self._sum_fields(start_x,
self.table.col_widths,
col_pos.append)
return col_pos
def _calc_row_pos(self):
""" Calculate the y-axis coords of the grid lines between the rows.
"""
row_pos = []
start_y = self.table.insert[1]
self._sum_fields(start_y,
self.table.row_heights,
row_pos.append, -1.)
return row_pos
def cell_coords(self, row, col, span):
""" Get the coordinates of the cell <row>,<col> as absolute drawing units.
:return: a tuple (left, right, top, bottom)
"""
top = self.row_pos[row]
bottom = self.row_pos[row+span[0]]
left = self.col_pos[col]
right = self.col_pos[col+span[1]]
return left, right, top, bottom
def draw_cell_background(self, row, col, cell):
""" Draw the cell background for <row>, <col> as DXF-SOLID entity.
"""
style = cell.style
if style['bgcolor'] is None:
return
# get cell coords in absolute drawing units
left, right, top, bottom = self.cell_coords(row, col, cell.span)
ltop = (left, top)
lbot = (left, bottom)
rtop = (right, top)
rbot = (right, bottom)
self.table.data.append(Solid(
points=[ltop, lbot, rbot, rtop],
color=style['bgcolor'],
layer=self.table.bglayer))
def draw_cell_content(self, row, col, cell):
""" Draw the cell content for <row>,<col>, calls the cell
method <cell>.get_dxf_entity() (has to return an object with a __dxf__()
method) to create the cell content.
"""
# get cell coords in absolute drawing units
coords = self.cell_coords(row, col, cell.span)
dxf_entity = cell.get_dxf_entity(coords, self.table.fglayer)
self.table.data.append(dxf_entity)
def draw_lines(self):
""" Draw all grid lines.
"""
# Init borders with default_style top- and left border.
default_style = self.table.get_cell_style('default')
hborder = default_style['top']
vborder = default_style['left']
self._init_borders(hborder, vborder)
self._set_frames(self.table.frames)
self._set_borders(self.table.iter_visible_cells())
self._draw_borders(self.table)
def _set_borders(self, visible_cells):
""" Set borders of the visible cells.
"""
for row, col, cell in visible_cells:
bottom_row = row + cell.span[0]
right_col = col + cell.span[1]
self._set_rect_borders(row, bottom_row, col, right_col, cell.style)
self._set_inner_borders(row, bottom_row, col, right_col,
self.noborder)
def _set_inner_borders(self, top_row, bottom_row, left_col, right_col, border_style):
""" Set <border_style> to the inner borders of the rectangle <top_row...
"""
if bottom_row - top_row > 1:
for col in xrange(left_col, right_col):
for row in xrange(top_row+1, bottom_row):
self.set_hborder(row, col, border_style)
if right_col - left_col > 1:
for row in xrange(top_row, bottom_row):
for col in xrange(left_col+1, right_col):
self.set_vborder(row, col, border_style)
def _set_rect_borders(self, top_row, bottom_row, left_col, right_col, style):
""" Set border <style> to the rectangle <top_row><bottom_row...
The values describing the grid lines between the cells, see doc-strings
for set_hborder and set_vborder and see comments for self._hborders and
self._vborders.
"""
for col in xrange(left_col, right_col):
self.set_hborder(top_row, col, style['top'])
self.set_hborder(bottom_row, col, style['bottom'])
for row in xrange(top_row, bottom_row):
self.set_vborder(row, left_col, style['left'])
self.set_vborder(row, right_col, style['right'])
def _set_frames(self, frames):
""" Set borders for all defined frames.
"""
for frame in frames:
top_row = frame.pos[0]
left_col = frame.pos[1]
bottom_row = top_row + frame.span[0]
right_col = left_col + frame.span[1]
self._set_rect_borders(top_row, bottom_row, left_col, right_col,
frame.style)
def _draw_borders(self, table):
""" Draw the grid lines as DXF-LINE entities.
"""
def append_line(start, end, style):
""" Appends the DXF-LINE entity to the table dxf data.
"""
if style['status']:
table.data.append(Line(
start=start,
end=end,
layer=layer,
color=style['color'],
linetype=style['linetype']))
def draw_hborders():
""" Draw the horizontal grid lines.
"""
for row in xrange(table.nrows+1):
yrow = self.row_pos[row]
for col in xrange(table.ncols):
xleft = self.col_pos[col]
xright = self.col_pos[col+1]
style = self.get_hborder(row, col)
append_line((xleft, yrow), (xright, yrow), style)
def draw_vborders():
""" Draw the vertical grid lines.
"""
for col in xrange(table.ncols+1):
xcol = self.col_pos[col]
for row in xrange(table.nrows):
ytop = self.row_pos[row]
ybottom = self.row_pos[row+1]
style = self.get_vborder(row, col)
append_line((xcol, ytop), (xcol, ybottom), style)
layer = table.gridlayer
draw_hborders()
draw_vborders()
class Frame(object):
""" Represent a rectangle cell area enclosed by border lines.
"""
def __init__(self, table, pos=(0, 0), span=(1 ,1), style='default'):
""" Constructor
:param table: the assigned data table
:param pos: tuple (row, col), border goes left and top of pos
:param span: count of cells that Frame covers, border goes right and below
of this cells
:param str style: style name as string
"""
self.table = table
self.pos = pos
self.span = span
self.stylename = style
@property
def style(self):
""" :returns: Style() object of the associated table.
"""
return self.table.get_cell_style(self.stylename)
class Cell(object):
""" Cell represents the table cell data.
"""
@property
def span(self):
return self._span
@span.setter
def span(self, value):
""" Ensures that span values are >= 1 in each direction.
"""
self._span = (max(1, value[0]), max(1, value[1]))
@property
def style(self):
""" :returns: Style() object of the associated table.
"""
return self.table.get_cell_style(self.stylename)
def __init__(self, table, style='default', span=(1, 1)):
""" Constructor
:param table: assigned data table
:param str style: style name as string
:param span: tuple(spanrows, spancols), count of cells that cell covers
Cell does not know its own position in the data table, because a cell
can be used multiple times in the same or in different tables.
Therefore the cell itself can not determine if the cell-range
reaches beyond the table borders.
"""
self.table = table
self.stylename = style
# span values has to be >= 1
self.span = span
# pylint: disable-msg=W0613
def get_dxf_entity(self, coords, layer):
return DXFList()
def get_workspace_coords(self, coords):
""" Reduces the cell-coords about the hmargin and the vmargin values.
"""
hmargin = self.style['hmargin']
vmargin = self.style['vmargin']
return ( coords[0]+hmargin, # left
coords[1]-hmargin, # right
coords[2]-vmargin, # top
coords[3]+vmargin ) # bottom
class TextCell(Cell):
"""Represents a multi line text. Text lines are separated by '\n'."""
def __init__(self, table, text, style='default', span=(1, 1)):
""" Constructor
:param table: assigned data table
:param text: multi line text, lines separated by '\n'
:param style: style-name as string
:param span: tuple(spanrows, spancols), count of cells that cell covers
see Cell.__init__()
"""
super(TextCell, self).__init__(table, style, span)
self.text = text
def get_dxf_entity(self, coords, layer):
""" Create the cell content as MText-object.
:param coords: tuple of border-coordinates : left, right, top, bottom
:param layer: layer, which should be used for dxf entities
"""
if not len(self.text):
return DXFList()
left, right, top, bottom = self.get_workspace_coords(coords)
style = self.style
halign = style['halign']
valign = style['valign']
rotated = self.style['rotation']
text = self.text
if style['stacked']:
rotated = 0.
text = '\n'.join( (char for char in self.text.replace('\n', ' ')) )
xpos = (left, float(left+right)/2., right)[halign]
ypos = (bottom, float(bottom+top)/2., top)[valign-1]
return MText(text, (xpos, ypos),
linespacing=self.style['linespacing'],
style=self.style['textstyle'],
height=self.style['textheight'],
rotation=rotated,
xscale=self.style['xscale'],
halign=halign,
valign=valign,
color=self.style['textcolor'],
layer=layer)
class CustomCell(Cell):
""" Cell with 'user' generated content.
"""
def __init__(self, table, style='default', span=(1, 1)):
"""Constructor
:param table: assigned data table
:param str style: style name as string
:param span: tuple(spanrows, spancols), count of cells that cell covers
see Cell.__init__()
"""
super(CustomCell, self).__init__(table, style, span)
def get_dxf_entity(self, coords, layer):
""" Override this methode and create an arbitrary dxf element
:param coords: tuple of border-coordinates : left, right, top, bottom
:param layer: layer, which should be used for dxf entities
"""
# get access to all style parameter
style = self.style # pylint: disable-msg=W0612
# reduce borders about hmargin and vmargin
# pylint: disable-msg=W0612
left, right, top, bottom = self.get_workspace_coords(coords)
# and now do what you want ...
# return a dxf entity which implements the __dxf__ protocoll
# DXFList is a good choice
raise NotImplementedError()
class BlockCell(Cell):
""" Cell that contains a block reference.
"""
# pylint: disable-msg=W0102
def __init__(self, table, blockdef, style='default', attribs={}, span=(1, 1)):
""" Constructor
:param table: assigned data table
:param blockdef: block definition to insert (as INSERT-Entity), but we
need the blockdef to create the ATTRIB-Entities
:param dict attribs: dict, with ATTRIB-Tags as keys
:param str style: style name as string
:param span: tuple(spanrows, spancols), count of cells that cell covers
see also Cell.__init__()
"""
super(BlockCell, self).__init__(table, style, span)
self.blockdef = blockdef # dxf block definition!
self.attribs = attribs
def get_dxf_entity(self, border_coords, layer):
""" Create the cell content as INSERT-entity with trailing
ATTRIB-Entities.
:param border_coords: tuple of border-coordinates : left, right, top, bottom
:param str layer: layer, which should be used for dxf entities
"""
left, right, top, bottom = self.get_workspace_coords(border_coords)
style = self.style
halign = style['halign']
valign = style['valign']
xpos = (left, float(left + right) / 2., right)[halign]
ypos = (bottom, float(bottom + top) / 2., top)[valign-1]
insert = Insert(blockname=self.blockdef['name'],
insert=(xpos, ypos),
xscale=style['xscale'],
yscale=style['yscale'],
rotation=style['rotation'],
layer=layer)
# process attribs
for key, value in self.attribs.items():
try:
attdef = self.blockdef.find_attdef(key)
attrib = attdef.new_attrib(text=str(value))
insert.add(attrib, relative=True)
except KeyError:
pass # ignore none existing ATTDEFs
return insert
| {
"content_hash": "533b1520a764ec6d10e8768eec465970",
"timestamp": "",
"source": "github",
"line_count": 829,
"max_line_length": 92,
"avg_line_length": 37.583835946924005,
"alnum_prop": 0.575633084058157,
"repo_name": "sbarton272/AcousticBarcodes-Explorations",
"id": "54c696e2df1c8d1231e7b6e2a84bba1836b18d37",
"size": "31367",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "barcodes/dxfwrite/build/lib/dxfwrite/table.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Matlab",
"bytes": "8313"
},
{
"name": "Python",
"bytes": "725409"
},
{
"name": "Shell",
"bytes": "153"
}
],
"symlink_target": ""
} |
"""
Script that loads random forest models trained on the sider and toxcast datasets, predicts on sweetlead,
creates covariance matrix
@Author Aneesh Pappu
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import sys
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from deepchem.models.multitask import SingletaskToMultitask
from deepchem import metrics
from deepchem.metrics import Metric
from deepchem.models.sklearn_models import SklearnModel
from deepchem.splits import StratifiedSplitter, RandomSplitter
from sweetlead_datasets import load_sweet
sys.path.append('./../toxcast')
sys.path.append('./../sider')
from tox_datasets import load_tox
from sider_datasets import load_sider
"""
Load toxicity models now
"""
# Set some global variables up top
reload = False
verbosity = "high"
base_tox_data_dir = "/home/apappu/deepchem-models/toxcast_models/toxcast/toxcast_data"
tox_tasks, tox_dataset, tox_transformers = load_tox(
base_tox_data_dir, reload=reload)
#removes directory if present -- warning
base_tox_dir = "/home/apappu/deepchem-models/toxcast_models/toxcast/toxcast_analysis"
tox_train_dir = os.path.join(base_tox_dir, "train_dataset")
tox_valid_dir = os.path.join(base_tox_dir, "valid_dataset")
tox_test_dir = os.path.join(base_tox_dir, "test_dataset")
tox_model_dir = os.path.join(base_tox_dir, "model")
tox_splitter = StratifiedSplitter()
#default split is 80-10-10 train-valid-test split
tox_train_dataset, tox_valid_dataset, tox_test_dataset = tox_splitter.train_valid_test_split(
tox_dataset, tox_train_dir, tox_valid_dir, tox_test_dir)
# Fit Logistic Regression models
tox_task_types = {task: "classification" for task in tox_tasks}
classification_metric = Metric(metrics.roc_auc_score, np.mean,
verbosity=verbosity,
mode="classification")
params_dict = {
"batch_size": None,
"data_shape": tox_train_dataset.get_data_shape(),
}
def model_builder(tasks, task_types, model_params, model_dir, verbosity=None):
return SklearnModel(tasks, task_types, model_params, model_dir,
model_instance=RandomForestClassifier(
class_weight="balanced",
n_estimators=500,
n_jobs=-1),
verbosity=verbosity)
tox_model = SingletaskToMultitask(tox_tasks, tox_task_types, params_dict, tox_model_dir,
model_builder, verbosity=verbosity)
tox_model.reload()
"""
Load sider models now
"""
base_sider_data_dir = "/home/apappu/deepchem-models/toxcast_models/sider/sider_data"
sider_tasks, sider_dataset, sider_transformers = load_sider(
base_sider_data_dir, reload=reload)
base_sider_dir = "/home/apappu/deepchem-models/toxcast_models/sider/sider_analysis"
sider_train_dir = os.path.join(base_sider_dir, "train_dataset")
sider_valid_dir = os.path.join(base_sider_dir, "valid_dataset")
sider_test_dir = os.path.join(base_sider_dir, "test_dataset")
sider_model_dir = os.path.join(base_sider_dir, "model")
sider_splitter = RandomSplitter()
sider_train_dataset, sider_valid_dataset, sider_test_dataset = sider_splitter.train_valid_test_split(
sider_dataset, sider_train_dir, sider_valid_dir, sider_test_dir)
# Fit Logistic Regression models
sider_task_types = {task: "classification" for task in sider_tasks}
params_dict = {
"batch_size": None,
"data_shape": sider_train_dataset.get_data_shape(),
}
sider_model = SingletaskToMultitask(sider_tasks, sider_task_types, params_dict, sider_model_dir,
model_builder, verbosity=verbosity)
sider_model.reload()
"""
Load sweetlead dataset now. Pass in dataset object and appropriate transformers to predict functions
"""
base_sweet_data_dir = "/home/apappu/deepchem-models/toxcast_models/sweetlead/sweet_data"
sweet_dataset, sweet_transformers = load_sweet(
base_sweet_data_dir, reload=reload)
sider_predictions = sider_model.predict(sweet_dataset, sweet_transformers)
tox_predictions = tox_model.predict(sweet_dataset, sweet_transformers)
sider_dimensions = sider_predictions.shape[1]
tox_dimensions = tox_predictions.shape[1]
confusion_matrix = np.zeros(shape=(tox_dimensions, sider_dimensions))
for i in range(tox_predictions.shape[0]):
nonzero_tox = np.nonzero(tox_predictions[i, :])
nonzero_sider = np.nonzero(sider_predictions[i, :])
for j in nonzero_tox[0]:
for k in nonzero_sider[0]:
confusion_matrix[j,k] +=1
df = pd.DataFrame(confusion_matrix)
df.to_csv("./tox_sider_matrix.csv")
| {
"content_hash": "6de38758d34ca3d227b8af0dfd2bc211",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 104,
"avg_line_length": 33.99270072992701,
"alnum_prop": 0.7199914107794717,
"repo_name": "joegomes/deepchem",
"id": "68aa503b7757d6a826a56a128dfdb540d5adb979",
"size": "4657",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "examples/sweetlead/sweet.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1424453"
},
{
"name": "Shell",
"bytes": "4837"
}
],
"symlink_target": ""
} |
from argparse import ArgumentParser
from typing import Any
from django.core.management.base import BaseCommand, CommandError
from django.db.models import Q
from zerver.models import Message, Realm, Recipient, Stream, Subscription, get_realm
class Command(BaseCommand):
help = "Generate statistics on the streams for a realm."
def add_arguments(self, parser: ArgumentParser) -> None:
parser.add_argument('realms', metavar='<realm>', nargs='*',
help="realm to generate statistics for")
def handle(self, *args: Any, **options: str) -> None:
if options['realms']:
try:
realms = [get_realm(string_id) for string_id in options['realms']]
except Realm.DoesNotExist as e:
raise CommandError(e)
else:
realms = Realm.objects.all()
for realm in realms:
streams = Stream.objects.filter(realm=realm).exclude(Q(name__istartswith="tutorial-"))
# private stream count
private_count = 0
# public stream count
public_count = 0
for stream in streams:
if stream.invite_only:
private_count += 1
else:
public_count += 1
print("------------")
print(realm.string_id, end=' ')
print("{:>10} {} public streams and".format("(", public_count), end=' ')
print(f"{private_count} private streams )")
print("------------")
print("{:>25} {:>15} {:>10} {:>12}".format("stream", "subscribers", "messages", "type"))
for stream in streams:
if stream.invite_only:
stream_type = 'private'
else:
stream_type = 'public'
print(f"{stream.name:>25}", end=' ')
recipient = Recipient.objects.filter(type=Recipient.STREAM, type_id=stream.id)
print("{:10}".format(len(Subscription.objects.filter(recipient=recipient,
active=True))), end=' ')
num_messages = len(Message.objects.filter(recipient=recipient))
print(f"{num_messages:12}", end=' ')
print(f"{stream_type:>15}")
print("")
| {
"content_hash": "23b530c0457bf66f2ffe227395e47126",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 100,
"avg_line_length": 42.107142857142854,
"alnum_prop": 0.5262934690415606,
"repo_name": "showell/zulip",
"id": "940604c99dbffbccf0a19bfbf92b4389d02b2f6e",
"size": "2358",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "analytics/management/commands/stream_stats.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "433235"
},
{
"name": "Dockerfile",
"bytes": "2941"
},
{
"name": "Emacs Lisp",
"bytes": "157"
},
{
"name": "HTML",
"bytes": "634357"
},
{
"name": "Handlebars",
"bytes": "235334"
},
{
"name": "JavaScript",
"bytes": "3341135"
},
{
"name": "Perl",
"bytes": "8594"
},
{
"name": "Puppet",
"bytes": "79720"
},
{
"name": "Python",
"bytes": "8120030"
},
{
"name": "Ruby",
"bytes": "8480"
},
{
"name": "Shell",
"bytes": "133132"
},
{
"name": "TypeScript",
"bytes": "20603"
}
],
"symlink_target": ""
} |
from __future__ import division
import logging
import numpy as np
import itertools
from cvxopt import matrix, solvers
import matplotlib.pyplot as plt
from descartes.patch import PolygonPatch
from cops_and_robots.fusion.gaussian_mixture import GaussianMixture
from mpl_toolkits.axes_grid1 import make_axes_locatable
import time
# PRODUCT MODEL ###############################################################
def product_model(models):
"""Generate a product model from multiple softmax models.
"""
from softmax import Softmax
n = len(models) # number of measurements
# Figure out how many terms are needed in denominator
M = 1 # total number of terms
for sm in models:
if sm.has_subclasses:
M *= sm.num_subclasses
else:
M *= sm.num_classes
# Generate lists of all parameters
#<>TODO: keep this a numpy-only operation, as per:
# http://stackoverflow.com/questions/1208118/using-numpy-to-build-an-array-of-all-combinations-of-two-arrays
model_weights = []
model_biases = []
model_labels = []
for i, sm in enumerate(models):
model_weights.append(sm.weights.tolist())
model_biases.append(sm.biases.tolist())
if sm.has_subclasses:
# Use class labels of each subclass
class_labels = []
for label in sm.subclass_labels:
i = label.find('__')
if i != -1:
label = label[:i]
class_labels.append(label)
model_labels.append(class_labels)
else:
model_labels.append(sm.class_labels)
# Get all possible combinations of parameters
weight_combs = list(itertools.product(*model_weights))
bias_combs = list(itertools.product(*model_biases))
label_combs = list(itertools.product(*model_labels))
# Evaluate all combinations of model parameters
product_weights = np.empty((M, models[0].weights.shape[1]))
product_biases = np.empty(M)
product_labels = []
for i, _ in enumerate(bias_combs):
product_weights[i] = np.array(weight_combs[i]).sum(axis=0)
product_biases[i] = np.array(bias_combs[i]).sum()
str_ = " + ".join(label_combs[i])
product_labels.append(str_)
sm = Softmax(weights=product_weights,
biases=product_biases,
labels=product_labels,
)
return sm
# GEOMETRIC MODEL #############################################################
def find_redundant_constraints(G_full, h_full, break_index=-1, verbose=False):
"""Determine which constraints effect the feasible region."""
result = []
redundant_constraints = []
feasible = []
for i, _ in enumerate(G_full):
if i > break_index and break_index > 0:
break
G = np.delete(G_full, i, axis=0)
h = np.delete(h_full, i)
# Objective function: max c.x (or min -c.x)
c = -G_full[i] # use the constraint as the objective basis
beta = h_full[i] # maximum in the constraint basis
# <>TODO: Check to make sure c is a dense column matrix
try:
G = matrix(np.asarray(G, dtype=np.float))
h = matrix(np.asarray(h, dtype=np.float))
c = matrix(np.asarray(c, dtype=np.float))
except:
logging.error('ERROR! Not able to convert arrays into matrices.')
return None, None
solvers.options['show_progress'] = False
sol = solvers.lp(c,G,h)
optimal_pt = sol['x']
# If dual is infeasible, max is unbounded (i.e. infinity)
if sol['status'] == 'dual infeasible' or optimal_pt is None:
optimal_val = np.inf
else:
optimal_val = -np.asarray(sol['primal objective'])
optimal_pt = np.asarray(optimal_pt).reshape(G_full.shape[1])
if sol['status'] == 'primal infeasible':
feasible.append(False)
else:
feasible.append((True))
is_redundant = optimal_val <= beta
if is_redundant:
redundant_constraints.append(i)
if verbose:
logging.info('Without constraint {}, we have the following:'.format(i))
logging.info(np.asarray(sol['x']))
logging.info('\tOptimal value (z_i) {} at point {}.'
.format(optimal_val, optimal_pt))
logging.info('\tRemoved constraint maximum (b_i) of {}.'.format(beta))
logging.info('\tRedundant? {}\n\n'.format(is_redundant))
result.append({'optimal value': optimal_val,
'optimal point': optimal_pt,
'is redundant': is_redundant
})
if not all(feasible):
redundant_constraints = None
return result, redundant_constraints
def remove_redundant_constraints(G, h, **kwargs):
"""Remove redundant inequalities from a set of inequalities Gx <= h.
"""
_, redundant_constraints = find_redundant_constraints(G, h, **kwargs)
if redundant_constraints is None:
return None, None
G = np.delete(G, redundant_constraints, axis=0)
h = np.delete(h, redundant_constraints)
return G, h
def generate_inequalities(softmax_model, measurement):
"""Produce inequalities in the form Gx <= h
"""
# Identify the measurement and index
for i, label in enumerate(softmax_model.class_labels):
if label == measurement:
break
else:
if softmax_model.has_subclasses:
for i, label in enumerate(softmax_model.subclass_labels):
if label == measurement:
break
# logging.error('Measurement not found!')
# Look at log-odds boundaries
G = np.empty_like(np.delete(softmax_model.weights, 0, axis=0))
h = np.empty_like(np.delete(softmax_model.biases, 0))
k = 0
for j, weights in enumerate(softmax_model.weights):
if j == i:
continue
G[k] = -(softmax_model.weights[i] - softmax_model.weights[j])
h[k] = (softmax_model.biases[i] - softmax_model.biases[j])
k += 1
return G, h
def geometric_model(models, measurements, show_comp_models=False, *args, **kwargs):
"""
Could be MMS or softmax models
"""
minclass_measurements = [] # Lowest level (class or subclass)
for i, model in enumerate(models):
model_minclass_measurements = []
if model.has_subclasses:
for subclass_label in model.subclass_labels:
test_label = measurements[i] + '__'
if test_label in subclass_label:
model_minclass_measurements.append(subclass_label)
# Has subclasses, but measurement is not a subclass
if len(model_minclass_measurements) == 0:
model_minclass_measurements = [measurements[i]]
else:
model_minclass_measurements = [measurements[i]]
minclass_measurements.append(model_minclass_measurements)
# Find the softmax model from each combination of subclasses
measurement_combs = list(itertools.product(*minclass_measurements))
comp_models = []
for measurement_comb in measurement_combs:
sm = geometric_softmax_model(models, measurement_comb, *args, **kwargs)
if sm is not None:
comp_models.append(sm)
# Visualize the component models
if show_comp_models:
fig = plt.figure()
s = int(np.ceil(np.sqrt(len(comp_models))))
hr_translation ={'Near__0': 'Front',
'Near__1': 'Left',
'Near__2': 'Back',
'Near__3': 'Right',
}
for i, comp_model in enumerate(comp_models):
ax = fig.add_subplot(s,s,i +1)
# fig = plt.figure(figsize=(10,10))
# ax = fig.add_subplot(111)
comp_model.plot(ax=ax, fig=fig, plot_probs=False,
plot_legend=False, show_plot=False)
# Print human readable titles
hr_title = []
for meas in comp_model.class_labels[0].split(' + '):
a = meas.find('__')
if a == -1:
hr_title.append(meas)
else:
hr_title.append(hr_translation[meas])
ax.set_title(" + ".join(hr_title), fontsize=20)
plt.show()
#Change label names
from softmax import Softmax
joint_measurement = " + ".join(measurements)
for i, comp_model in enumerate(comp_models):
weights = comp_model.weights
biases = comp_model.biases
labels = [joint_measurement] + comp_model.class_labels[1:]
comp_models[i] = Softmax(weights, biases, labels=labels)
comp_models[i].parent_labels = comp_model.class_labels[0]
if len(comp_models) == 1:
return comp_models[0]
else:
return comp_models
def geometric_softmax_model(models, measurements, verbose=False, state_spec='x y', bounds=None):
"""Generate one softmax model from others using geometric constraints.
"""
from softmax import Softmax
# Get the full, redundant set of inequalities from all models
G_full = []
h_full = []
for i, sm in enumerate(models):
G, h = generate_inequalities(sm, measurements[i])
G_full.append(G)
h_full.append(h)
G_full = np.asarray(G_full).reshape(-1, G.shape[1])
h_full = np.asarray(h_full).reshape(-1)
# Remove redundant constraints to get weights and biases
G, h = remove_redundant_constraints(G_full, h_full, verbose=verbose)
if G is None:
return None
z = np.zeros((G.shape[1]))
new_weights = np.vstack((z, G))
new_biases = np.hstack((0, -h))
# Generate a label for the important class, and generic ones for the rest
labels = [" + ".join(measurements)]
for i in range(h.size):
labels.append('Class ' + str(i + 1))
sm = Softmax(new_weights, new_biases, labels=labels, state_spec=state_spec,
bounds=bounds)
return sm
# NEIGHBOURHOOD MODEL ###############################################################
def find_neighbours(self, class_=None):
"""Method of a Softmax model to find neighbours for all its classes.
"""
if self.has_subclasses:
classes = self.subclasses
else:
classes = self.classes
for label, class_ in classes.iteritems():
class_.find_class_neighbours()
# print "{} has neighbours: {}".format(class_.label, class_.neighbours)
def neighbourhood_model(models, measurements, iteration=1):
"""Generate one softmax model from each measurement class' neighbours.
Called at two separate times.
"""
from softmax import Softmax
neighbourhood_models = []
for i, model in enumerate(models):
# Find neighbours for (sub)classes and initialize neighbourhood params
if iteration == 1:
model.find_neighbours() #<>TODO: this should happen offline
else:
measurement_class = model.classes[measurements[0]]
if measurement_class.has_subclasses:
for _, subclass in measurement_class.subclasses.iteritems():
subclass.find_class_neighbours()
else:
measurement_class.find_class_neighbours()
class_label = measurements[i]
if model.has_subclasses:
classes = model.subclasses
else:
classes = model.classes
neighbourhood_weights = []
neighbourhood_biases = []
neighbourhood_labels = []
# Find labels associated with (sub)classes
if model.has_subclasses:
labels = []
class_ = model.classes[class_label]
for subclass_label, subclass in class_.subclasses.iteritems():
labels.append(subclass_label)
else:
labels = [class_label]
# Find measurement parameters
for label in labels:
neighbourhood_weights.append(classes[label].weights)
neighbourhood_biases.append(classes[label].bias)
neighbourhood_labels.append(class_label)
# Find parameters of neighbours to measurement
unique_neighbour_labels = []
for label in labels: # for each (sub)class measurement
neighbour_labels = classes[label].neighbours
for neighbour_label in neighbour_labels:
# Find the neighbour (super)class and its label
i = neighbour_label.find('__')
if i != -1:
neighbour_class_label = neighbour_label[:i]
else:
neighbour_class_label = neighbour_label
neighbour_class = model.classes[neighbour_class_label]
# Add that class to the neighbourhood if it's new
if neighbour_class_label not in unique_neighbour_labels \
and neighbour_class_label != class_label:
unique_neighbour_labels.append(neighbour_class_label)
if neighbour_class.has_subclasses:
n_classes = neighbour_class.subclasses
else:
n_classes = {neighbour_class_label:neighbour_class}
for _, nc in n_classes.iteritems():
neighbourhood_weights.append(nc.weights)
neighbourhood_biases.append(nc.bias)
neighbourhood_labels.append(neighbour_class_label)
neighbourhood_weights = np.asarray(neighbourhood_weights)
neighbourhood_biases = np.asarray(neighbourhood_biases)
sm = Softmax(weights=neighbourhood_weights,
biases=neighbourhood_biases,
labels=neighbourhood_labels
)
neighbourhood_models.append(sm)
neighbourhood_sm = product_model(neighbourhood_models)
return neighbourhood_sm
# HELPERS #####################################################################
def prob_difference(models, joint_measurement):
#<>TODO: use arbitrary bounds
probs = []
for model in models:
prob = model.probability(class_=joint_measurement)
sq = int(np.ceil(np.sqrt(prob.size)))
prob = prob.reshape(sq,sq)
del model.probs
probs.append(prob)
prob_diff = -probs[0]
for prob in probs[1:]:
prob_diff += prob
prob_diff = prob_diff.reshape(sq,sq)
return prob_diff
def compare_probs(sm1, sm2, measurements, visualize=True, verbose=False):
bounds = [-5, -5, 5, 5]
res = 0.1
probs1 = sm1.probability(class_=measurements[0])
probs1 = probs1.reshape(101,101)
del sm1.probs
max_i = np.unravel_index(probs1.argmax(), probs1.shape)
min_i = np.unravel_index(probs1.argmin(), probs1.shape)
sm1stats = {'max prob': probs1.max(),
'max prob coord': np.array(max_i) * res + np.array(bounds[0:2]),
'min prob': probs1.min(),
'min prob coord': np.array(min_i) * res + np.array(bounds[0:2]),
'avg prob': probs1.mean(),
}
probs2 = sm2.probability(class_=measurements[1])
probs2 = probs2.reshape(101,101)
del sm2.probs
sm2stats = {'max prob': probs2.max(),
'max prob coord': np.array(max_i) * res + np.array(bounds[0:2]),
'min prob': probs2.min(),
'min prob coord': np.array(min_i) * res + np.array(bounds[0:2]),
'avg prob': probs2.mean(),
}
prob_diff21 = probs2 - probs1
prob_diff21 = prob_diff21.reshape(101,101)
diffstats = {'max diff': prob_diff21.max(),
'min diff': prob_diff21.min(),
'avg diff': prob_diff21.mean()
}
if verbose:
print 'Exact softmax stats:'
for key, value in sm1stats.iteritems():
print('{}: {}'.format(key, value))
print '\nGeometric softmax stats:'
for key, value in sm2stats.iteritems():
print('{}: {}'.format(key, value))
print '\n Difference stats:'
for key, value in diffstats.iteritems():
print('{}: {}'.format(key, value))
# Iterate scaled version of LP-generated softmax
scales = np.linspace(0.7, 0.9, 101)
for scale in scales:
weights = sm2.weights * scale
biases = sm2.biases * scale
labels = sm2.class_labels
sm3 = Softmax(weights,biases, labels=labels)
# probs3 = sm3.probability(class_=measurements[1])
probs3 = probs2 * scale
probs3 = probs3.reshape(101,101)
# del sm3.probs
prob_diff31 = np.abs(probs3 - probs1)
prob_diff31 = prob_diff31.reshape(101,101)
print('Avg: {}, max: {}, at scale of {}'
.format(prob_diff31.mean(), prob_diff31.max(), scale))
if visualize:
fig = plt.figure(figsize=(12,10))
ax = fig.add_subplot(2,2,1, projection='3d')
sm1.plot(class_=measurements[0], ax=ax, fig=fig, show_plot=False,
plot_legend=False)
ax.set_title('Model 1: {}'.format(measurements[0]))
ax = fig.add_subplot(2,2,2, projection='3d')
sm2.plot(class_=measurements[0], ax=ax, fig=fig, show_plot=False,
plot_legend=False)
ax.set_title('Model 2: {}'.format(measurements[1]))
ax = fig.add_subplot(2,2,3)
c = ax.pcolormesh(sm1.X, sm1.Y, prob_diff21)
ax.set_title("Difference b/w 1 & 2")
ax.set_xlim(bounds[0],bounds[2])
ax.set_ylim(bounds[1],bounds[3])
ax = fig.add_subplot(2,2,4)
c = ax.pcolormesh(sm1.X, sm1.Y, prob_diff31)
ax.set_title("Difference b/w 1 & 2 (rescaled)")
ax.set_xlim(bounds[0],bounds[2])
ax.set_ylim(bounds[1],bounds[3])
fig.subplots_adjust(right=0.8)
cax = fig.add_axes([0.85, 0.15, 0.025, 0.7])
fig.colorbar(c, cax=cax)
plt.show()
# TESTS #######################################################################
def test_synthesis_techniques(test_set=1, visualize=True, visualize_base=True, use_MMS=False,
show_comp_models=True):
from _models import _make_regular_2D_poly, intrinsic_space_model, range_model
# Create the softmax models to be combined
poly1 = _make_regular_2D_poly(4, max_r=2, theta=np.pi/4)
poly2 = _make_regular_2D_poly(4, origin=[2,1.5], max_r=3, theta=np.pi/4)
poly3 = _make_regular_2D_poly(4, max_r=4, theta=np.pi/4)
poly4 = _make_regular_2D_poly(4, max_r=1, origin=[-1.5,0], theta=np.pi/4)
poly5 = _make_regular_2D_poly(4, max_r=3, origin=[1.5,0],theta=np.pi/4)
if use_MMS:
if test_set == 1:
sm1 = range_model(poly=poly1)
sm2 = range_model(poly=poly2)
sm3 = range_model(poly=poly3)
models = [sm1, sm2, sm3]
measurements = ['Near', 'Near', 'Inside']
polygons = [poly1, poly2, poly3]
elif test_set == 2:
sm4 = range_model(poly=poly4)
sm5 = range_model(poly=poly5)
models = [sm4, sm5]
measurements = ['Near', 'Inside']
polygons = [poly4, poly5]
elif test_set == 3:
poly1 = _make_regular_2D_poly(4, max_r=2, origin=[2,2], theta=np.pi/4)
poly2 = _make_regular_2D_poly(4, max_r=2, origin=[-2,-2], theta=np.pi/4)
poly3 = _make_regular_2D_poly(4, max_r=10, theta=np.pi/4)
poly4 = _make_regular_2D_poly(4, max_r=1, origin=[0,0], theta=np.pi/4)
poly5 = _make_regular_2D_poly(4, max_r=2, origin=[-2,2],theta=np.pi/4)
sm1 = range_model(poly=poly1)
sm2 = range_model(poly=poly2)
sm3 = range_model(poly=poly3)
sm4 = range_model(poly=poly4)
sm5 = range_model(poly=poly5)
models = [sm1, sm2, sm3, sm5, sm5]
measurements = ['Near', 'Near', 'Inside','Outside','Near']
polygons = [poly1, poly2, poly3, poly4, poly5]
else:
if test_set == 1:
sm1 = intrinsic_space_model(poly=poly1)
sm2 = intrinsic_space_model(poly=poly2)
sm3 = intrinsic_space_model(poly=poly3)
measurements = ['Front', 'Inside', 'Inside']
models = [sm1, sm2, sm3]
polygons = [poly1, poly2, poly3]
else:
sm4 = intrinsic_space_model(poly=poly4)
sm5 = intrinsic_space_model(poly=poly5)
measurements = ['Left', 'Inside',]
models = [sm4, sm5]
polygons = [poly4, poly5]
joint_measurement = " + ".join(measurements)
if visualize_base:
fig = plt.figure(figsize=(10,10))
s = len(models)
for i, model in enumerate(models):
ax = fig.add_subplot(1,s,i + 1)
if model.has_subclasses:
ps = True
else:
ps = False
model.plot(ax=ax, fig=fig, plot_probs=False, plot_legend=True,
show_plot=False, plot_subclasses=ps)
ax.set_title("{}".format(measurements[i]))
plt.show()
# # Synthesize the softmax models
# logging.info('Synthesizing product model...')
# s = time.time()
# product_sm = product_model(models)
# e = time.time()
# product_time = e - s
# logging.info('Took {} seconds\n'.format((product_time)))
# logging.info('Synthesizing neighbourhood model (iter 1)...')
# s = time.time()
# neighbour_sm = neighbourhood_model(models, measurements)
# e = time.time()
# neighbour_time = e - s
# logging.info('Took {} seconds\n'.format((neighbour_time)))
# logging.info('Synthesizing neighbourhood model (iter 2)...')
# s = time.time()
# neighbour_sm2 = neighbourhood_model([neighbour_sm], [joint_measurement], iteration=2)
# e = time.time()
# neighbour2_time = e - s
# logging.info('Took {} seconds\n'.format((neighbour2_time)))
logging.info('Synthesizing geometric model...')
s = time.time()
geometric_sm_models = geometric_model(models, measurements, show_comp_models=show_comp_models)
e = time.time()
geometric_time = e - s
logging.info('Took {} seconds\n'.format((geometric_time)))
# Find their differences
# neighbour_diff = prob_difference([product_sm, neighbour_sm], joint_measurement)
# neighbour_diff2 = prob_difference([product_sm, neighbour_sm2], joint_measurement)
# if not type(geometric_sm_models) == list:
# geometric_sm_models = [geometric_sm_models]
# geometric_diff = prob_difference([product_sm] + geometric_sm_models, joint_measurement)
# Fuse all of them with a normal
# prior = GaussianMixture([0.8,0.2], [-np.ones(2), 4*np.ones(2)], [3*np.eye(2), 2*np.eye(2)])
prior = GaussianMixture([0.25, 0.25, 0.25, 0.25],
[-2*np.ones(2), 2*np.ones(2), np.array([-2,2]), np.array([2,-2])],
[2*np.eye(2), 2*np.eye(2), 2*np.eye(2), 2*np.eye(2),]
)
# n = 20
# prior = GaussianMixture(np.ones(n),
# np.random.random((n,2))*20 - 10,
# np.tile(2*np.eye(2),(n,1,1)),
# max_num_mixands=n,
# )
from cops_and_robots.fusion.variational_bayes import VariationalBayes
vb = VariationalBayes()
# logging.info('Fusing Product model...')
# s = time.time()
# mu, sigma, beta = vb.update(measurement=joint_measurement,
# likelihood=product_sm,
# prior=prior,
# )
# if beta.size == 1:
# logging.info('Got a posterior with mean {} and covariance: \n {}'
# .format(mu, sigma))
# product_post = GaussianMixture(beta, mu, sigma)
# e = time.time()
# product_fusion_time = e - s
# logging.info('Took {} seconds\n'.format((product_fusion_time)))
# logging.info('Fusing Neighbourhood model 1...')
# s = time.time()
# mu, sigma, beta = vb.update(measurement=joint_measurement,
# likelihood=neighbour_sm,
# prior=prior,
# )
# if beta.size == 1:
# logging.info('Got a posterior with mean {} and covariance: \n {}'
# .format(mu, sigma))
# neighbour_post = GaussianMixture(beta, mu, sigma)
# e = time.time()
# neighbour_fusion_time = e - s
# logging.info('Took {} seconds\n'.format((neighbour_fusion_time)))
# logging.info('Fusing Neighbourhood model 2...')
# s = time.time()
# mu, sigma, beta = vb.update(measurement=joint_measurement,
# likelihood=neighbour_sm2,
# prior=prior,
# )
# if beta.size == 1:
# logging.info('Got a posterior with mean {} and covariance: \n {}'
# .format(mu, sigma))
# neighbour2_post = GaussianMixture(beta, mu, sigma)
# e = time.time()
# neighbour2_fusion_time = e - s
# logging.info('Took {} seconds\n'.format((neighbour2_fusion_time)))
logging.info('Fusing Geometric model...')
mixtures = []
raw_weights = []
s = time.time()
# ems = ['Near + Inside__0', 'Near + Inside__2', 'Near + Inside__3',]
for u, mixand_weight in enumerate(prior.weights):
prior_mixand = GaussianMixture(1, prior.means[u], prior.covariances[u])
for i, geometric_sm in enumerate(geometric_sm_models):
# exact_measurements = geometric_sm.parent_labels.split(' + ')
mu, sigma, beta = vb.update(measurement=joint_measurement,
likelihood=geometric_sm,
prior=prior_mixand,
get_raw_beta=True,
# exact_likelihoods=models,
# exact_measurements=exact_measurements,
)
new_mixture = GaussianMixture(beta, mu, sigma)
mixtures.append(new_mixture)
raw_weights.append(beta * mixand_weight)
# Renormalize raw weights
raw_weights = np.array(raw_weights)
raw_weights /= raw_weights.sum()
mixtures[0].combine_gms(mixtures[1:], raw_weights=raw_weights)
geometric_post = mixtures[0]
e = time.time()
geometric_fusion_time = e - s
logging.info('Took {} seconds\n'.format((geometric_fusion_time)))
# Compute KLDs
neighbour_kld = neighbour_post.compute_kld(product_post)
neighbour2_kld = neighbour2_post.compute_kld(product_post)
geometric_kld = geometric_post.compute_kld(product_post)
compression_time = [product_time, neighbour_time, neighbour2_time, geometric_time]
fusion_time = [product_fusion_time, neighbour_fusion_time, neighbour2_fusion_time, geometric_fusion_time]
KLDs = [0, neighbour_kld, neighbour2_kld, geometric_kld]
return compression_time, fusion_time, KLDs
if visualize:
# fig = plt.figure(figsize=(18,10))
bounds = [-5,-5,5,5]
num_levels = 500
# Plot critical regions (and polys on the product model)
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(111)
# ax = fig.add_subplot(3,4,1)
product_sm.plot(plot_probs=False, ax=ax, fig=fig, title='', show_plot=False,
plot_legend=False)
# ax.set_title('Product Model ({:.0f} terms, {:.3f}s)'
ax.set_title('{:.0f} terms, {:.3f}s synthesis'
.format(product_sm.biases.size, product_time),
fontsize=20)
for poly in polygons:
from shapely.affinity import translate
poly = translate(poly, -0.05, -0.05)
patch = PolygonPatch(poly, facecolor='none', zorder=2,
linewidth=2.5, edgecolor='black',)
ax.add_patch(patch)
plt.axis('scaled')
ax.set_xlim(bounds[0],bounds[2])
ax.set_ylim(bounds[1],bounds[3])
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(111)
# ax = fig.add_subplot(3,4,2)
neighbour_sm.plot(plot_probs=False, ax=ax, fig=fig, title='', show_plot=False,
plot_legend=False)
# ax.set_title('Neighbour Model 1 ({:.0f} terms, {:.3f}s)'
ax.set_title('{:.0f} terms, {:.3f}s synthesis'
.format(neighbour_sm.biases.size, neighbour_time),
fontsize=20)
plt.axis('scaled')
ax.set_xlim(bounds[0],bounds[2])
ax.set_ylim(bounds[1],bounds[3])
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(111)
# ax = fig.add_subplot(3,4,3)
neighbour_sm2.plot(plot_probs=False, ax=ax, fig=fig, title='', show_plot=False,
plot_legend=False)
# ax.set_title('Neighbour Model 2 ({:.0f} terms, {:.3f}s)'
ax.set_title('{:.0f} terms, {:.3f}s synthesis'
.format(neighbour_sm2.biases.size, neighbour2_time),
fontsize=20)
plt.axis('scaled')
ax.set_xlim(bounds[0],bounds[2])
ax.set_ylim(bounds[1],bounds[3])
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(111)
# ax = fig.add_subplot(3,4,4)
geometric_sm.plot(plot_probs=False, ax=ax, fig=fig, title='', show_plot=False,
plot_legend=False)
# ax.set_title('Geometric Model ({:.0f} terms, {:.3f}s)'
ax.set_title('{:.0f} terms, {:.3f}s synthesis'
.format(geometric_sm.biases.size, geometric_time),
fontsize=20)
plt.axis('scaled')
ax.set_xlim(bounds[0],bounds[2])
ax.set_ylim(bounds[1],bounds[3])
# Plot prior
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(111)
# ax = fig.add_subplot(3,4,5)
# title = 'Prior Distribution'
title = ''
prior.plot(ax=ax, fig=fig, num_levels=num_levels, bounds=bounds, title=title, show_colorbar=True)
ax.set_title('')
ax.set_xlabel(r'$X_1$')
ax.set_ylabel(r'$X_2$')
plt.suptitle('')
# Plot probability differences
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(111)
# ax = fig.add_subplot(3,4,6)
plt.axis('scaled')
c = ax.pcolormesh(product_sm.X, product_sm.Y, neighbour_diff,)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.1)
cbar = plt.colorbar(c, cax)
cbar.ax.tick_params(labelsize=20)
ax.set_title("")
# ax.set_title("Neighbour1 minus Product")
ax.set_xlim(bounds[0],bounds[2])
ax.set_ylim(bounds[1],bounds[3])
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(111)
# ax = fig.add_subplot(3,4,7)
plt.axis('scaled')
c = ax.pcolormesh(product_sm.X, product_sm.Y, neighbour_diff2,)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.1)
plt.colorbar(c, cax)
ax.set_title("Neighbour2 minus Product")
ax.set_xlim(bounds[0],bounds[2])
ax.set_ylim(bounds[1],bounds[3])
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(111)
# ax = fig.add_subplot(3,4,8)
plt.axis('scaled')
c = ax.pcolormesh(product_sm.X, product_sm.Y, geometric_diff,)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.1)
cbar = plt.colorbar(c, cax)
cbar.ax.tick_params(labelsize=20)
ax.set_title("")
# ax.set_title("Geometric minus Product")
ax.set_xlim(bounds[0],bounds[2])
ax.set_ylim(bounds[1],bounds[3])
# Plot posteriors
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(111)
# ax = fig.add_subplot(3,4,9)
# title = 'Product Posterior ({:.3f}s)'\
title = '{:.3f}s fusion'\
.format(product_fusion_time)
product_post.plot(ax=ax, fig=fig, num_levels=num_levels, bounds=bounds, title=title, show_colorbar=True)
ax.set_xlabel(r'$X_1$')
ax.set_ylabel(r'$X_2$')
plt.suptitle('')
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(111)
# ax = fig.add_subplot(3,4,10)
# title = 'Neighbour 1 Posterior (KLD {:.2f}, {:.3f}s)'\
title = 'KLD of {:.2f}, {:.3f}s fusion'\
.format(neighbour_kld, neighbour_fusion_time)
neighbour_post.plot(ax=ax, fig=fig, num_levels=num_levels, bounds=bounds, title=title, show_colorbar=True)
ax.set_xlabel(r'$X_1$')
ax.set_ylabel(r'$X_2$')
plt.suptitle('')
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(111)
# ax = fig.add_subplot(3,4,11)
# title = 'Neighbour 2 Posterior (KLD {:.2f}, {:.3f}s)'\
title = 'KLD of {:.2f}, {:.3f}s fusion'\
.format(neighbour2_kld, neighbour2_fusion_time)
neighbour2_post.plot(ax=ax, fig=fig, num_levels=num_levels, bounds=bounds, title=title, show_colorbar=True)
ax.set_xlabel(r'$X_1$')
ax.set_ylabel(r'$X_2$')
plt.suptitle('')
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(111)
# ax = fig.add_subplot(3,4,12)
# title = 'Geometric Posterior (KLD {:.2f}, {:.3f}s)'\
title = 'KLD of {:.2f}, {:.3f}s fusion'\
.format(geometric_kld, geometric_fusion_time)
geometric_post.plot(ax=ax, fig=fig, num_levels=num_levels, bounds=bounds, title=title, show_colorbar=True)
ax.set_xlabel(r'$X_1$')
ax.set_ylabel(r'$X_2$')
plt.suptitle('')
if use_MMS:
type_ = 'MMS'
else:
type_ = 'Softmax'
fig.suptitle("{} combination of '{}'"
.format(type_, joint_measurement),
fontsize=15)
plt.show()
def product_test(models, visualize=False, create_combinations=False):
"""
"""
sm3 = product_model(models)
# Manually create 'front + interior'
fi_weights = np.array([sm2.weights[0],
sm2.weights[1],
sm2.weights[2],
-sm1.weights[1],
sm2.weights[4],
sm1.weights[4] - sm1.weights[1],
])
fi_biases = np.array([sm2.biases[0],
sm2.biases[1],
sm2.biases[2],
-sm1.biases[1],
sm2.biases[4],
sm1.biases[4] -sm1.biases[1],
])
sm4 = Softmax(fi_weights, fi_biases)
# Plotting
if visualize:
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(2,2,1)
sm1.plot(plot_poly=True, plot_probs=False, ax=ax, fig=fig, show_plot=False,
plot_legend=False)
ax.set_title('Model 1')
ax = fig.add_subplot(2,2,2)
sm2.plot(plot_poly=True, plot_probs=False, ax=ax, fig=fig, show_plot=False,
plot_legend=False)
ax.set_title('Model 2')
ax = fig.add_subplot(2,2,3)
sm3.plot(plot_poly=True, plot_probs=False, ax=ax, fig=fig, show_plot=False,
plot_legend=False)
ax.set_title('Exact Intersection')
ax = fig.add_subplot(2,2,4, projection='3d')
sm3.plot(class_='Front + Inside', ax=ax, fig=fig, show_plot=False,
plot_legend=False)
ax.set_title("Likelihood of 'Front + Inside'")
ax.set_zlabel('P(D=i|X)')
ax.zaxis._axinfo['label']['space_factor'] = 2.8
plt.show()
return sm3
def geometric_model_test(measurements, verbose=False, visualize=False):
poly1 = _make_regular_2D_poly(4, max_r=2, theta=np.pi/4)
poly2 = _make_regular_2D_poly(4, origin=[1,1.5], max_r=2, theta=np.pi/4)
bounds = [-5, -5, 5, 5]
sm1 = intrinsic_space_model(poly=poly1, bounds=bounds)
sm2 = intrinsic_space_model(poly=poly2, bounds=bounds)
A1, b1 = generate_inequalities(sm1, measurements[0])
A2, b2 = generate_inequalities(sm2, measurements[1])
G_full = np.vstack((A1, A2))
h_full = np.hstack((b1, b2))
A, b = remove_redundant_constraints(G_full, h_full, verbose=verbose)
new_weights = np.vstack(([0,0], A))
new_biases = np.hstack((0, -b))
labels = [measurements[0] + ' + ' + measurements[1]]
for i in range(b.size):
labels.append('Class ' + str(i + 1))
sm3 = Softmax(new_weights, new_biases, labels=labels)
if visualize:
fig, axes = plt.subplots(1,3, figsize=(18,6))
ax = axes[0]
sm1.plot(plot_poly=True, plot_probs=False, ax=ax, fig=fig, show_plot=False,
plot_legend=False)
ax.set_title('Model 1: {}'.format(measurements[0]))
ax = axes[1]
sm2.plot(plot_poly=True, plot_probs=False, ax=ax, fig=fig, show_plot=False,
plot_legend=False)
ax.set_title('Model 2: {}'.format(measurements[1]))
ax = axes[2]
sm3.plot(plot_poly=True, plot_probs=False, ax=ax, fig=fig, show_plot=False,
plot_legend=False)
ax.set_title("Synthesis of the two")
plt.show()
return sm3
def product_vs_lp():
measurements = ['Front', 'Inside']
sm1 = product_test(visualize=False)
sm2 = geometric_model_test(measurements, visualize=False)
combined_measurements = [measurements[0] + ' + ' + measurements[1]]* 2
compare_probs(sm1, sm2, measurements=combined_measurements)
def test_1D():
from _models import speed_model
sm = speed_model()
geometric_sm = geometric_softmax_model([sm], ['Medium'], state_spec='x', bounds=[0, 0, 0.4, 0.4])
# print geometric_sm.bounds
fig = plt.figure(figsize=(14,6))
ax = fig.add_subplot(1, 3, 1)
sm.plot(plot_probs=True, plot_dominant_classes=False, fig=fig, ax=ax, plot_legend=False,
show_plot=False)
ax.set_title('Speed Model')
ax = fig.add_subplot(1, 3, 2)
geometric_sm.plot(plot_probs=True, plot_dominant_classes=False, fig=fig, ax=ax, plot_legend=False,
show_plot=False)
ax.set_title('Speed Model')
plt.show()
def test_find_redundant_constraints(verbose=False, show_timing=True, n_runs=1000):
"""Tested against results of LP method in section 3.2 of [1].
[1] S. Paulraj and P. Sumathi, "A comparative study of redundant
constraints identification methods in linear programming problems,"
Math. Probl. Eng., vol. 2010.
"""
G_full = np.array([[2, 1, 1],
[3, 1, 1],
[0, 1, 1],
[1, 2, 1],
[-1, 0, 0],
[0, -1, 0],
[0, 0, -1],
], dtype=np.float)
h_full = np.array([30,
26,
13,
45,
0,
0,
0,
], dtype=np.float)
break_index = 3
truth_data = [{'optimal value': 21.67,
'optimal point': np.array([4.33, 13.00, 0.00]),
'is redundant': True,
},
{'optimal value': 45.,
'optimal point': np.array([15., 0.00, 0.00]),
'is redundant': False,
},
{'optimal value': 26,
'optimal point': np.array([0.00, 19.00, 7.00]),
'is redundant': False,
},
{'optimal value': 30.33,
'optimal point': np.array([4.33, 13.00, 0.00]),
'is redundant': True,
}]
if show_timing:
import timeit
def wrapper(func, *args, **kwargs):
def wrapped():
return func(*args, **kwargs)
return wrapped
wrapped = wrapper(find_redundant_constraints, G_full, h_full, break_index, False)
total_time = timeit.timeit(wrapped, number=n_runs)
if verbose:
logging.info('LINEAR PROGRAMMING CONSTRAINT REDUCTION RESULTS \n')
if show_timing:
logging.info('Average execution time over {} runs: {}s\n'
.format(n_runs, total_time / n_runs))
results, _ = find_redundant_constraints(G_full, h_full, break_index, verbose)
# Compare with truth
diffs = []
for i, result in enumerate(results):
ovd = result['optimal value'] - truth_data[i]['optimal value']
opd = result['optimal point'] - truth_data[i]['optimal point']
isr = result['is redundant'] == truth_data[i]['is redundant']
diffs.append({'optimal value diff': ovd,
'optimal point diff': opd,
'redundancies agree': isr})
logging.info("TRUTH MODEL COMPARISON\n")
for i, diff in enumerate(diffs):
logging.info('Constraint {}'.format(i))
for d, v in diff.iteritems():
logging.info('{}: {}'.format(d,v))
logging.info('\n')
def test_box_constraints(verbose=False):
"""Remove a known redundant constraint for a box polytope.
Constraints:
-x1 \leq 2
x1 \leq 2
x1 \leq 4
-x2 \leq 1
x2 \leq 1
"""
# Define our full set of inequalities of the form Ax \leq b
G_full = np.array([[-1, 0],
[1, 0],
[1, 0],
[0, -1],
[0, 1],
], dtype=np.float)
h_full = np.array([2,
2,
4,
1,
1,
], dtype=np.float)
A,b = remove_redundant_constraints(G_full, h_full, verbose=verbose)
return A, b
if __name__ == '__main__':
np.set_printoptions(precision=5, suppress=False)
logging.getLogger().setLevel(logging.INFO)
# test_1D()
N = 30
outs = []
for i in range(N):
out = test_synthesis_techniques(test_set=1, use_MMS=True, visualize=False,
visualize_base=False, show_comp_models=False)
outs.append(out)
outs = np.array(outs)
print outs.mean(axis=0)
print outs.std(axis=0)
# np.save("softmax_results.nps", np.array(outs))
| {
"content_hash": "aeb2b751ab3596cab479b9fab3f31867",
"timestamp": "",
"source": "github",
"line_count": 1119,
"max_line_length": 115,
"avg_line_length": 38.171581769437,
"alnum_prop": 0.5519735917966007,
"repo_name": "COHRINT/cops_and_robots",
"id": "1fda4f80479da7e09645ad4acd9265cca1729831",
"size": "42714",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "src/cops_and_robots/fusion/softmax/_synthesis.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "3882"
},
{
"name": "CSS",
"bytes": "4701"
},
{
"name": "JavaScript",
"bytes": "217197"
},
{
"name": "Jupyter Notebook",
"bytes": "8190659"
},
{
"name": "Makefile",
"bytes": "6844"
},
{
"name": "Matlab",
"bytes": "12537"
},
{
"name": "PHP",
"bytes": "42478"
},
{
"name": "Python",
"bytes": "711182"
},
{
"name": "Shell",
"bytes": "3376"
}
],
"symlink_target": ""
} |
from ..args import ConversionConfigArg
from ..config import AudioStreamConfig
from ..conversion import Converter
from kao_command.args import Arg, FlagArg
class AddAudio:
""" Represents a command to create a Conversion Config file """
description = "Add a audio stream to a config file"
args = [Arg('stream', action='store', help='Stream # to add to the config (1 for the first stream)'),
ConversionConfigArg(),
FlagArg('-e', '--encoder', action='store', help="The encoder to use for the Audio Stream"),
FlagArg('-m', '--mixdown', action='store', help="The mixdown format to use for the Audio Stream"),
FlagArg('-d', '--drc', action='store', help="The Dynamic Range Compression value to use for the Audio Stream"),
FlagArg('-g', '--gain', action='store', help="The Decibel Gain value")]
def run(self, *, stream, config, encoder, mixdown, drc, gain):
""" Run the command """
if stream == 'all':
config.audio.includeAll = True
else:
audioStream = AudioStreamConfig(number=stream)
config.audio.streams.append(audioStream)
if encoder:
audioStream.encoder = encoder
if mixdown:
audioStream.mixdown = mixdown
if drc:
audioStream.drc = drc
if gain:
audioStream.gain = gain
config.save() | {
"content_hash": "3f3d7a4023203e5bd1e281be7f09766a",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 123,
"avg_line_length": 45.18181818181818,
"alnum_prop": 0.5808182427900738,
"repo_name": "cloew/Kaovert",
"id": "929e032c35af8dad8be339ad52af0f45ca2e613b",
"size": "1491",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kaovert/commands/add_audio.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "27917"
}
],
"symlink_target": ""
} |
import os
import pty
import threading
import argparse
import subprocess
import shutil
from litex.tools.litex_term import LiteXTerm
from rowhammer_tester.scripts.utils import RemoteClient, litex_server
def pty2crossover(m, stop):
while not stop.is_set():
r = os.read(m, 1)
wb.regs.uart_xover_rxtx.write(ord(r))
def crossover2pty(m, stop):
while not stop.is_set():
if wb.regs.uart_xover_rxempty.read() == 0:
r = wb.regs.uart_xover_rxtx.read()
os.write(m, bytes(chr(r).encode("utf-8")))
if __name__ == "__main__":
term_priority = ['picocom', 'minicom', 'litex_term']
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--srv', action='store_true', help='Start litex server in background')
parser.add_argument('-b', '--baudrate', default='1e6', help='Serial baud rate')
parser.add_argument(
'-t',
'--term',
choices=['auto', *term_priority],
default='auto',
help='Select serial terminal emulator')
args = parser.parse_args()
if args.srv:
litex_server()
wb = RemoteClient()
wb.open()
m, s = pty.openpty()
tty = os.ttyname(s)
print("LiteX Crossover UART created: {}".format(tty))
stop_event = threading.Event()
threads = [
threading.Thread(target=pty2crossover, args=[m, stop_event], daemon=True),
threading.Thread(target=crossover2pty, args=[m, stop_event], daemon=True),
]
for thread in threads:
thread.start()
baudrate = int(float(args.baudrate))
term = args.term
if term == 'auto':
try:
term = next(filter(lambda t: shutil.which(t) is not None, term_priority))
except StopIteration:
term = 'litex_term'
print('Using serial backend: {}'.format(args.term))
if term == 'litex_term':
# installed with latex so no additional dependencies, but it is slow
term = LiteXTerm(
serial_boot=False, kernel_image=None, kernel_address=None, json_images=None, safe=True)
term.open(tty, baudrate)
term.console.configure()
term.start()
term.join()
elif term == 'picocom':
subprocess.run(['picocom', '-b', str(baudrate), tty])
elif term == 'minicom':
subprocess.run(['minicom', '-b', str(baudrate), '-D', tty])
else:
raise ValueError(term)
stop_event.set()
for thread in threads:
thread.join(timeout=0.05)
wb.close()
| {
"content_hash": "e7c4b91ff52ed107dae61f6bd7eb9e92",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 99,
"avg_line_length": 29.511627906976745,
"alnum_prop": 0.6150512214342002,
"repo_name": "antmicro/litex-rowhammer-tester",
"id": "773a11b7312c3660e1872800cbfb53619cc03ed8",
"size": "2561",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rowhammer_tester/scripts/bios_console.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "17834"
},
{
"name": "Makefile",
"bytes": "5390"
},
{
"name": "Python",
"bytes": "268485"
},
{
"name": "Shell",
"bytes": "1958"
}
],
"symlink_target": ""
} |
from denorm.helpers import find_fks, find_m2ms
from django.db import models
from django.db.models.fields import related
from django.db import connections, connection
from denorm.models import DirtyInstance
from django.contrib.contenttypes.models import ContentType
from denorm.db import triggers
class DenormDependency(object):
"""
Base class for real dependency classes.
"""
def get_triggers(self, using):
"""
Must return a list of ``denorm.triggers.Trigger`` instances
"""
return []
def get_quote_name(self, using):
if using:
cconnection = connections[using]
else:
cconnection = connection
return cconnection.ops.quote_name
def setup(self, this_model):
"""
Remembers the model this dependency was declared in.
"""
self.this_model = this_model
class DependOnRelated(DenormDependency):
def __init__(self, othermodel, foreign_key=None, type=None, skip=None):
self.other_model = othermodel
self.fk_name = foreign_key
self.type = type
self.skip = skip or ()
def setup(self, this_model):
super(DependOnRelated, self).setup(this_model)
# FIXME: this should not be necessary
if self.other_model == related.RECURSIVE_RELATIONSHIP_CONSTANT:
self.other_model = self.this_model
if isinstance(self.other_model, (str, unicode)):
# if ``other_model`` is a string, it certainly is a lazy relation.
related.add_lazy_relation(self.this_model, None, self.other_model, self.resolved_model)
else:
# otherwise it can be resolved directly
self.resolved_model(None, self.other_model, None)
def resolved_model(self, data, model, cls):
"""
Does all the initialization that had to wait until we knew which
model we depend on.
"""
self.other_model = model
# Create a list of all ForeignKeys and ManyToManyFields between both related models, in both directions
candidates = [('forward', fk) for fk in find_fks(self.this_model, self.other_model, self.fk_name)]
if self.other_model != self.this_model or self.type:
candidates += [('backward', fk) for fk in find_fks(self.other_model, self.this_model, self.fk_name)]
candidates += [('forward_m2m', fk) for fk in find_m2ms(self.this_model, self.other_model, self.fk_name)]
if self.other_model != self.this_model or self.type:
candidates += [('backward_m2m', fk) for fk in find_m2ms(self.other_model, self.this_model, self.fk_name)]
# If a relation type was given (forward,backward,forward_m2m or backward_m2m),
# filter out all relations that do not match this type.
candidates = [x for x in candidates if not self.type or self.type == x[0]]
if len(candidates) > 1:
raise ValueError("%s has more than one ForeignKey or ManyToManyField to %s (or reverse); cannot auto-resolve."
% (self.this_model, self.other_model))
if not candidates:
raise ValueError("%s has no ForeignKeys or ManyToManyFields to %s (or reverse); cannot auto-resolve."
% (self.this_model, self.other_model))
# Now the candidates list contains exactly one item, thats our winner.
self.type, self.field = candidates[0]
class CacheKeyDependOnRelated(DependOnRelated):
def get_triggers(self, using):
qn = self.get_quote_name(using)
if not self.type:
# 'resolved_model' model never got called...
raise ValueError("The model '%s' could not be resolved, it probably does not exist" % self.other_model)
content_type = str(ContentType.objects.get_for_model(self.this_model).pk)
if self.type == "forward":
# With forward relations many instances of ``this_model``
# may be related to one instance of ``other_model``
action_new = triggers.TriggerActionUpdate(
model=self.this_model,
columns=(self.fieldname,),
values=(triggers.RandomBigInt(),),
where="%s = NEW.%s" % (
qn(self.field.get_attname_column()[1]),
qn(self.other_model._meta.pk.get_attname_column()[1]),
),
)
action_old = triggers.TriggerActionUpdate(
model=self.this_model,
columns=(self.fieldname,),
values=(triggers.RandomBigInt(),),
where="%s = OLD.%s" % (
qn(self.field.get_attname_column()[1]),
qn(self.other_model._meta.pk.get_attname_column()[1]),
),
)
return [
triggers.Trigger(self.other_model, "after", "update", [action_new], content_type, using, self.skip),
triggers.Trigger(self.other_model, "after", "insert", [action_new], content_type, using, self.skip),
triggers.Trigger(self.other_model, "after", "delete", [action_old], content_type, using, self.skip),
]
if self.type == "backward":
# With backward relations a change in ``other_model`` can affect
# only one or two instances of ``this_model``.
# If the ``other_model`` instance changes the value its ForeignKey
# pointing to ``this_model`` both the old and the new related instance
# are affected, otherwise only the one it is pointing to is affected.
action_new = triggers.TriggerActionUpdate(
model=self.this_model,
columns=(self.fieldname,),
values=(triggers.RandomBigInt(),),
where="%s = NEW.%s" % (
qn(self.this_model._meta.pk.get_attname_column()[1]),
qn(self.field.get_attname_column()[1]),
),
)
action_old = triggers.TriggerActionUpdate(
model=self.this_model,
columns=(self.fieldname,),
values=(triggers.RandomBigInt(),),
where="%s = OLD.%s" % (
qn(self.this_model._meta.pk.get_attname_column()[1]),
qn(self.field.get_attname_column()[1]),
),
)
return [
triggers.Trigger(self.other_model, "after", "update", [action_new, action_old], content_type, using, self.skip),
triggers.Trigger(self.other_model, "after", "insert", [action_new], content_type, using, self.skip),
triggers.Trigger(self.other_model, "after", "delete", [action_old], content_type, using, self.skip),
]
if "m2m" in self.type:
# The two directions of M2M relations only differ in the column
# names used in the intermediate table.
if isinstance(self.field, models.ManyToManyField):
if "forward" in self.type:
column_name = self.field.m2m_column_name()
reverse_column_name = self.field.m2m_reverse_name()
if "backward" in self.type:
column_name = self.field.m2m_reverse_name()
reverse_column_name = self.field.m2m_column_name()
else:
if "forward" in self.type:
column_name = self.field.object_id_field_name
reverse_column_name = self.field.rel.to._meta.pk.column
if "backward" in self.type:
column_name = self.field.rel.to._meta.pk.column
reverse_column_name = self.field.object_id_field_name
# The first part of a M2M dependency is exactly like a backward
# ForeignKey dependency. ``this_model`` is backward FK related
# to the intermediate table.
action_m2m_new = triggers.TriggerActionUpdate(
model=self.this_model,
columns=(self.fieldname,),
values=(triggers.RandomBigInt(),),
where="%s = NEW.%s" % (
qn(self.this_model._meta.pk.get_attname_column()[1]),
qn(column_name),
),
)
action_m2m_old = triggers.TriggerActionUpdate(
model=self.this_model,
columns=(self.fieldname,),
values=(triggers.RandomBigInt(),),
where="%s = OLD.%s" % (
qn(self.this_model._meta.pk.get_attname_column()[1]),
qn(column_name),
),
)
trigger_list = [
triggers.Trigger(self.field, "after", "update", [action_m2m_new, action_m2m_old], content_type, using, self.skip),
triggers.Trigger(self.field, "after", "insert", [action_m2m_new], content_type, using, self.skip),
triggers.Trigger(self.field, "after", "delete", [action_m2m_old], content_type, using, self.skip),
]
if isinstance(self.field, models.ManyToManyField):
# Additionally to the dependency on the intermediate table
# ``this_model`` is dependant on updates to the ``other_model``-
# There is no need to track insert or delete events here,
# because a relation can only be created or deleted by
# by modifying the intermediate table.
#
# Generic relations are excluded because they have the
# same m2m_table and model table.
sql, params = triggers.TriggerNestedSelect(
self.field.m2m_db_table(),
(column_name,),
**{reverse_column_name: 'NEW.%s' % qn(self.other_model._meta.pk.get_attname_column()[1])}
).sql()
action_new = triggers.TriggerActionUpdate(
model=self.this_model,
columns=(self.fieldname,),
values=(triggers.RandomBigInt(),),
where=(self.this_model._meta.pk.get_attname_column()[1] + ' IN (' + sql + ')', params),
)
trigger_list.append(triggers.Trigger(self.other_model, "after", "update", [action_new], content_type, using, self.skip))
return trigger_list
return []
class CallbackDependOnRelated(DependOnRelated):
"""
A DenormDependency that handles callbacks depending on fields
in other models that are related to the dependent model.
Two models are considered related if there is a ForeignKey or ManyToManyField
on either of them pointing to the other one.
"""
def __init__(self, othermodel, foreign_key=None, type=None, skip=None):
"""
Attaches a dependency to a callable, indicating the return value depends on
fields in an other model that is related to the model the callable belongs to
either through a ForeignKey in either direction or a ManyToManyField.
**Arguments:**
othermodel (required)
Either a model class or a string naming a model class.
foreign_key
The name of the ForeignKey or ManyToManyField that creates the relation
between the two models.
Only necessary if there is more than one relationship between the two models.
type
One of 'forward', 'backward', 'forward_m2m' or 'backward_m2m'.
If there are relations in both directions specify which one to use.
skip
Use this to specify what fields change on every save().
These fields will not be checked and will not make a model dirty when they change, to prevent infinite loops.
"""
super(CallbackDependOnRelated, self).__init__(othermodel, foreign_key, type, skip)
def get_triggers(self, using):
qn = self.get_quote_name(using)
if not self.type:
# 'resolved_model' model never got called...
raise ValueError("The model '%s' could not be resolved, it probably does not exist" % self.other_model)
content_type = str(ContentType.objects.get_for_model(self.this_model).pk)
if self.type == "forward":
# With forward relations many instances of ``this_model``
# may be related to one instance of ``other_model``
# so we need to do a nested select query in the trigger
# to find them all.
action_new = triggers.TriggerActionInsert(
model=DirtyInstance,
columns=("content_type_id", "object_id"),
values=triggers.TriggerNestedSelect(
self.this_model._meta.pk.model._meta.db_table,
(content_type,
self.this_model._meta.pk.get_attname_column()[1]),
**{self.field.get_attname_column()[1]: "NEW.%s" % qn(self.other_model._meta.pk.get_attname_column()[1])}
)
)
action_old = triggers.TriggerActionInsert(
model=DirtyInstance,
columns=("content_type_id", "object_id"),
values=triggers.TriggerNestedSelect(
self.this_model._meta.pk.model._meta.db_table,
(content_type,
self.this_model._meta.pk.get_attname_column()[1]),
**{self.field.get_attname_column()[1]: "OLD.%s" % qn(self.other_model._meta.pk.get_attname_column()[1])}
)
)
return [
triggers.Trigger(self.other_model, "after", "update", [action_new], content_type, using, self.skip),
triggers.Trigger(self.other_model, "after", "insert", [action_new], content_type, using, self.skip),
triggers.Trigger(self.other_model, "after", "delete", [action_old], content_type, using, self.skip),
]
if self.type == "backward":
# With backward relations a change in ``other_model`` can affect
# only one or two instances of ``this_model``.
# If the ``other_model`` instance changes the value its ForeignKey
# pointing to ``this_model`` both the old and the new related instance
# are affected, otherwise only the one it is pointing to is affected.
action_new = triggers.TriggerActionInsert(
model=DirtyInstance,
columns=("content_type_id", "object_id"),
values=triggers.TriggerNestedSelect(
self.field.model._meta.db_table,
(content_type,
self.field.get_attname_column()[1]),
**{self.field.model._meta.pk.get_attname_column()[1]: "NEW.%s" % qn(self.other_model._meta.pk.get_attname_column()[1])}
)
)
action_old = triggers.TriggerActionInsert(
model=DirtyInstance,
columns=("content_type_id", "object_id"),
values=triggers.TriggerNestedSelect(
self.field.model._meta.db_table,
(content_type,
self.field.get_attname_column()[1]),
**{self.field.model._meta.pk.get_attname_column()[1]: "OLD.%s" % qn(self.other_model._meta.pk.get_attname_column()[1])}
)
)
return [
triggers.Trigger(self.other_model, "after", "update", [action_new, action_old], content_type, using, self.skip),
triggers.Trigger(self.other_model, "after", "insert", [action_new], content_type, using, self.skip),
triggers.Trigger(self.other_model, "after", "delete", [action_old], content_type, using, self.skip),
]
if "m2m" in self.type:
# The two directions of M2M relations only differ in the column
# names used in the intermediate table.
if isinstance(self.field, models.ManyToManyField):
if "forward" in self.type:
column_name = qn(self.field.m2m_column_name())
reverse_column_name = self.field.m2m_reverse_name()
if "backward" in self.type:
column_name = qn(self.field.m2m_reverse_name())
reverse_column_name = self.field.m2m_column_name()
else:
if "forward" in self.type:
column_name = qn(self.field.object_id_field_name)
reverse_column_name = self.field.rel.to._meta.pk.column
if "backward" in self.type:
column_name = qn(self.field.rel.to._meta.pk.column)
reverse_column_name = self.field.object_id_field_name
# The first part of a M2M dependency is exactly like a backward
# ForeignKey dependency. ``this_model`` is backward FK related
# to the intermediate table.
action_m2m_new = triggers.TriggerActionInsert(
model=DirtyInstance,
columns=("content_type_id", "object_id"),
values=(
content_type,
"NEW.%s" % column_name,
)
)
action_m2m_old = triggers.TriggerActionInsert(
model=DirtyInstance,
columns=("content_type_id", "object_id"),
values=(
content_type,
"OLD.%s" % column_name,
)
)
trigger_list = [
triggers.Trigger(self.field, "after", "update", [action_m2m_new, action_m2m_old], content_type, using, self.skip),
triggers.Trigger(self.field, "after", "insert", [action_m2m_new], content_type, using, self.skip),
triggers.Trigger(self.field, "after", "delete", [action_m2m_old], content_type, using, self.skip),
]
if isinstance(self.field, models.ManyToManyField):
# Additionally to the dependency on the intermediate table
# ``this_model`` is dependant on updates to the ``other_model``-
# There is no need to track insert or delete events here,
# because a relation can only be created or deleted by
# by modifying the intermediate table.
#
# Generic relations are excluded because they have the
# same m2m_table and model table.
action_new = triggers.TriggerActionInsert(
model=DirtyInstance,
columns=("content_type_id", "object_id"),
values=triggers.TriggerNestedSelect(
self.field.m2m_db_table(),
(content_type, column_name),
**{reverse_column_name: 'NEW.%s' % qn(self.other_model._meta.pk.get_attname_column()[1])}
)
)
trigger_list.append(triggers.Trigger(self.other_model, "after", "update", [action_new], content_type, using, self.skip))
return trigger_list
return []
def make_depend_decorator(Class):
"""
Create a decorator that attaches an instance of the given class
to the decorated function, passing all remaining arguments to the classes
__init__.
"""
import functools
def decorator(*args, **kwargs):
def deco(func):
if not hasattr(func, 'depend'):
func.depend = []
func.depend.append((Class, args, kwargs))
return func
return deco
functools.update_wrapper(decorator, Class.__init__)
return decorator
depend_on_related = make_depend_decorator(CallbackDependOnRelated)
| {
"content_hash": "35799a349140ad1d05ac95f312c92a47",
"timestamp": "",
"source": "github",
"line_count": 423,
"max_line_length": 139,
"avg_line_length": 47.096926713947994,
"alnum_prop": 0.5630960746912961,
"repo_name": "incuna/django-denorm",
"id": "c7c321bdee7c4c9fc7c779e97c86914fe91e4720",
"size": "19946",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "denorm/dependencies.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "135414"
}
],
"symlink_target": ""
} |
"""
Helper to find the path to the correct third_party directory
"""
from os import path
import sys
import platform
# Find the root path of the checkout.
# In the Chromium repository, this is the src/chromium directory.
# In the external repository, standalone build, this is the devtools-frontend directory.
# In the external repository, integrated build, this is the src/chromium directory.
def root_path():
SCRIPTS_PATH = path.dirname(path.abspath(__file__))
ABS_DEVTOOLS_PATH = path.dirname(SCRIPTS_PATH)
PARENT_PATH = path.dirname(ABS_DEVTOOLS_PATH)
# TODO(1011259): remove Chromium repository handling
if path.basename(PARENT_PATH) == 'renderer':
# Chromium repository
return path.dirname(path.dirname(path.dirname(PARENT_PATH)))
elif path.basename(PARENT_PATH) == 'devtools-frontend':
# External repository, integrated build
return path.dirname(path.dirname(PARENT_PATH))
else:
# External repository, standalone build
return ABS_DEVTOOLS_PATH
# This is the third_party path relative to the root of the checkout.
def third_party_path():
return path.join(root_path(), 'third_party')
# This points to the node binary downloaded as part of the checkout.
def node_path():
try:
old_sys_path = sys.path[:]
sys.path.append(path.join(third_party_path(), 'node'))
import node
finally:
sys.path = old_sys_path
return node.GetBinaryPath()
def devtools_root_path():
return path.dirname((path.dirname(path.abspath(__file__))))
def node_modules_path():
return path.join(devtools_root_path(), 'node_modules')
def eslint_path():
return path.join(node_modules_path(), 'eslint', 'bin', 'eslint.js')
def check_localizable_resources_path():
return path.join(devtools_root_path(), 'scripts', 'localization', 'check_localizable_resources.js')
def check_localized_strings_path():
return path.join(devtools_root_path(), 'scripts', 'localization', 'check_localizability.js')
def karma_path():
return path.join(node_modules_path(), 'karma', 'bin', 'karma')
def boot_perf_test_path():
return path.join(devtools_root_path(), 'test', 'perf', 'bootperf.js')
def hosted_mode_script_path():
return path.join(devtools_root_path(), 'scripts', 'hosted_mode', 'server.js')
def downloaded_chrome_binary_path():
return path.abspath(path.join(
*{
'Linux': (devtools_root_path(), 'third_party', 'chrome', 'chrome-linux', 'chrome'),
'Darwin': (devtools_root_path(), 'third_party', 'chrome', 'chrome-mac', 'Chromium.app', 'Contents', 'MacOS', 'Chromium'),
'Windows': (devtools_root_path(), 'third_party', 'chrome', 'chrome-win', 'chrome.exe'),
}[platform.system()]))
def license_checker_path():
return path.join(node_modules_path(), 'license-checker', 'bin', 'license-checker')
def rollup_path():
return path.join(
node_modules_path(),
'rollup',
'dist',
'bin',
'rollup',
)
def package_lock_json_path():
return path.join(devtools_root_path(), 'package-lock.json')
def package_json_path():
return path.join(devtools_root_path(), 'package.json')
def browser_protocol_path():
return path.join(third_party_path(), 'blink', 'public', 'devtools_protocol', 'browser_protocol.pdl')
| {
"content_hash": "38243232963dace710c9772e0f5344c4",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 129,
"avg_line_length": 30.254545454545454,
"alnum_prop": 0.6703725961538461,
"repo_name": "youtube/cobalt",
"id": "def3726fd5272bd7f973b8c3d9a2d0699b0c9ec9",
"size": "3490",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "third_party/devtools/scripts/devtools_paths.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import subprocess
import shlex
subprocess.call(shlex.split('sudo PYTHONPATH="../LEDs/python/build/lib.linux-armv7l-2.7" python ../ledRV1_part2.py')) | {
"content_hash": "a7564de97d8abf173633e3e413757011",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 117,
"avg_line_length": 37.25,
"alnum_prop": 0.7718120805369127,
"repo_name": "ssaini4/RoboticLamp",
"id": "db60fb9b46121fd5c6a9218b82ed25a3a7120300",
"size": "149",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main/RVcases/ledRV1.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "243609"
},
{
"name": "Python",
"bytes": "215545"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.