hexsha
stringlengths 40
40
| size
int64 2
1.05M
| ext
stringclasses 9
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
193
| max_stars_repo_name
stringlengths 6
109
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequence | max_stars_count
int64 1
36.6k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
193
| max_issues_repo_name
stringlengths 6
109
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequence | max_issues_count
int64 1
29.8k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
193
| max_forks_repo_name
stringlengths 6
109
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequence | max_forks_count
int64 1
11.2k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 2
1.05M
| avg_line_length
float64 1
404k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f733a7e1c1424f0decd52a9fc3f662cc5c5d53e3 | 896 | py | Python | python/hash_table/1200_minimum_absolute_difference.py | linshaoyong/leetcode | ea052fad68a2fe0cbfa5469398508ec2b776654f | [
"MIT"
] | 6 | 2019-07-15T13:23:57.000Z | 2020-01-22T03:12:01.000Z | python/hash_table/1200_minimum_absolute_difference.py | linshaoyong/leetcode | ea052fad68a2fe0cbfa5469398508ec2b776654f | [
"MIT"
] | null | null | null | python/hash_table/1200_minimum_absolute_difference.py | linshaoyong/leetcode | ea052fad68a2fe0cbfa5469398508ec2b776654f | [
"MIT"
] | 1 | 2019-07-24T02:15:31.000Z | 2019-07-24T02:15:31.000Z | class Solution(object):
def minimumAbsDifference(self, arr):
"""
:type arr: List[int]
:rtype: List[List[int]]
"""
if len(arr) < 2:
return []
sa = sorted(arr)
min_diff = sa[1] - sa[0]
res = [[sa[0], sa[1]]]
for i in range(1, len(sa) - 1):
v = sa[i + 1] - sa[i]
if v < min_diff:
res = [[sa[i], sa[i + 1]]]
min_diff = v
continue
if v == min_diff:
res.append([sa[i], sa[i + 1]])
return res
def test_minimum_abs_difference():
s = Solution()
assert [[1, 2], [2, 3], [3, 4]] == s.minimumAbsDifference([4, 2, 1, 3])
assert [[1, 3]] == s.minimumAbsDifference([1, 3, 6, 10, 15])
assert [[-14, -10], [19, 23], [23, 27]
] == s.minimumAbsDifference([3, 8, -10, 23, 19, -4, -14, 27])
| 30.896552 | 75 | 0.4375 |
f733ffc85633950fbe996e09698c90caf6a8e6e8 | 8,278 | py | Python | test_tesseract.py | mlissner/tesseract-performance-testing | f0040987ef9ccbaf65eb786301637fcdb00ef3b5 | [
"MIT"
] | 2 | 2016-08-25T23:36:42.000Z | 2018-03-15T20:51:58.000Z | test_tesseract.py | mlissner/tesseract-performance-testing | f0040987ef9ccbaf65eb786301637fcdb00ef3b5 | [
"MIT"
] | null | null | null | test_tesseract.py | mlissner/tesseract-performance-testing | f0040987ef9ccbaf65eb786301637fcdb00ef3b5 | [
"MIT"
] | null | null | null | import glob
import os
import subprocess
import tempfile
import time
import cStringIO
from wand.color import Color
from wand.image import Image
PATH = './test_assets/*.pdf'
def temp_name():
""" returns a temporary file-name """
tmpfile = tempfile.NamedTemporaryFile(prefix="tess_")
return tmpfile.name
def convert_to_txt(tmp_file_prefix):
tess_out = ''
for png in sorted(glob.glob('%s*.png' % tmp_file_prefix)):
tesseract_command = ['tesseract', png, png[:-4], '-l', 'eng']
tess_out = subprocess.check_output(
tesseract_command,
stderr=subprocess.STDOUT
)
return tess_out
def convert_blob_to_text(blob):
"""Do Tesseract work, but use a blob as input instead of a file."""
tesseract_command = ['tesseract', 'stdin', 'stdout', '-l', 'eng']
p = subprocess.Popen(
tesseract_command,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT
)
return p.communicate(input=blob)[0]
def convert_file_to_txt(path):
tesseract_command = ['tesseract', path, 'stdout', '-l', 'eng']
p = subprocess.Popen(
tesseract_command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
)
return p.communicate()[0]
def convert_to_pngs(command):
subprocess.check_output(command,
stderr=subprocess.STDOUT)
def avg(l):
"""Make the average of a list"""
return sum(l) / len(l)
def subprocess_approach():
# Basic approach using subprocess and writing things to disk.
methods = {
'current': ['convert',
'-depth', '4',
'-density', '300',
'-background', 'white', '+matte'],
'grayscale': ['convert',
'-depth', '4',
'-density', '300',
'-background', 'white', '+matte',
'-colorspace', 'Gray'],
'smaller': ['convert',
'-depth', '4',
'-density', '200',
'-background', 'white', '+matte'],
}
for method_name, command in methods.items():
print("\n\nAttempting method: %s" % method_name)
image_cpu_timing = []
tess_cpu_timing = []
image_wall_timing = []
tess_wall_timing = []
for path in sorted(glob.glob(PATH)):
out_name = temp_name()
print(" Doing: %s" % path)
print(" Using temp dir: %s" % out_name)
try:
print(" Doing image conversion.")
full_command = command + [path, '%s-%%03d.png' % out_name]
t1_cpu = time.clock()
t1_wall = time.time()
convert_to_pngs(full_command)
image_cpu_timing.append(time.clock() - t1_cpu)
image_wall_timing.append(time.time() - t1_wall)
print(" Doing tesseract command.")
t1_cpu = time.clock()
t1_wall = time.time()
convert_to_txt(out_name)
tess_cpu_timing.append(time.clock() - t1_cpu)
tess_wall_timing.append(time.time() - t1_wall)
finally:
# Remove tmp_file and the text file
for f in glob.glob('%s*' % out_name):
try:
os.remove(f)
except OSError:
pass
print(u" Sys, Real")
print(u" Average image conversion was %s, %s" % (
avg(image_cpu_timing),
avg(image_wall_timing),
))
print(u" Average tess conversion was %s, %s" % (
avg(tess_cpu_timing),
avg(tess_wall_timing),
))
print(u" Total image time was: %s, %s" % (
sum(image_cpu_timing), sum(image_wall_timing)
))
print(u" Total tess time was: %s, %s" % (
sum(tess_cpu_timing), sum(tess_wall_timing)
))
print(u" Grand total was %s, %s" % (
sum(image_cpu_timing) + sum(tess_cpu_timing),
sum(image_wall_timing) + sum(tess_wall_timing),
))
def wand_approach():
# New Approach using Wand to create files
# Install libmagickwand-dev!
image_cpu_timing = []
tess_cpu_timing = []
image_wall_timing = []
tess_wall_timing = []
for path in sorted(glob.glob(PATH)):
print(" Doing: %s" % path)
all_pages = Image(filename=path, resolution=150)
for i, img in enumerate(all_pages.sequence):
t1_cpu = time.clock()
t1_wall = time.time()
with Image(img) as img_out:
img_out.format = 'png'
img_out.background_color = Color('white')
img_out.alpha_channel = 'remove'
img_out.depth = 4
img_out.type = "grayscale"
img_out.resolution = 150
#img_out.save(filename='%s-%03d.png' % (path[:-4], i))
img_bin = img_out.make_blob('png')
image_cpu_timing.append(time.clock() - t1_cpu)
image_wall_timing.append(time.time() - t1_wall)
# Do Tesseract on the binary data
t1_cpu = time.clock()
t1_wall = time.time()
txt = convert_blob_to_text(img_bin)
tess_cpu_timing.append(time.clock() - t1_cpu)
tess_wall_timing.append(time.time() - t1_wall)
print(u" Sys, Real")
print(u" Average image conversion was %s, %s" % (
avg(image_cpu_timing),
avg(image_wall_timing),
))
print(u" Average tess conversion was %s, %s" % (
avg(tess_cpu_timing),
avg(tess_wall_timing),
))
print(u" Total image time was: %s, %s" % (
sum(image_cpu_timing), sum(image_wall_timing)
))
print(u" Total tess time was: %s, %s" % (
sum(tess_cpu_timing), sum(tess_wall_timing)
))
print(u" Grand total was %s, %s" % (
sum(image_cpu_timing) + sum(tess_cpu_timing),
sum(image_wall_timing) + sum(tess_wall_timing),
))
def multipage_tiff_approach():
"""Theory: Initializing Tesseract for every page takes time.
Hypothesis: Using a multi-page tiff will allow it only to be initialized
once, saving time.
"""
image_cpu_timing = []
tess_cpu_timing = []
image_wall_timing = []
tess_wall_timing = []
for path in sorted(glob.glob(PATH)):
print(" Doing: %s" % path)
all_pages = Image(filename=path, resolution=300)
tiff_out = Image()
t1_cpu = time.clock()
t1_wall = time.time()
for i, img in enumerate(all_pages.sequence):
with Image(img) as img_out:
img_out.background_color = Color('white')
img_out.alpha_channel = 'remove'
img_out.depth = 4
img_out.type = "grayscale"
tiff_out.sequence.append(img_out)
tiff_bin = cStringIO.StringIO()
tiff_out.format = 'tiff'
tiff_out.save(file=tiff_bin)
image_cpu_timing.append(time.clock() - t1_cpu)
image_wall_timing.append(time.time() - t1_wall)
# Do Tesseract on the binary data
t1_cpu = time.clock()
t1_wall = time.time()
txt = convert_blob_to_text(tiff_bin.getvalue())
tess_cpu_timing.append(time.clock() - t1_cpu)
tess_wall_timing.append(time.time() - t1_wall)
print(u" Sys, Real")
print(u" Average image conversion was %s, %s" % (
avg(image_cpu_timing),
avg(image_wall_timing),
))
print(u" Average tess conversion was %s, %s" % (
avg(tess_cpu_timing),
avg(tess_wall_timing),
))
print(u" Total image time was: %s, %s" % (
sum(image_cpu_timing), sum(image_wall_timing)
))
print(u" Total tess time was: %s, %s" % (
sum(tess_cpu_timing), sum(tess_wall_timing)
))
print(u" Grand total was %s, %s" % (
sum(image_cpu_timing) + sum(tess_cpu_timing),
sum(image_wall_timing) + sum(tess_wall_timing),
))
subprocess_approach()
wand_approach()
multipage_tiff_approach()
| 33.51417 | 76 | 0.553395 |
f734072f1032589ab024016a3f18c4100381457c | 2,995 | py | Python | earlier-2020/graphs-paper1/print_line_chart.py | transcendentsky/py_tutorials | fed8e6c8d79f854a1cebcfd5c37297a163846208 | [
"Apache-2.0"
] | 1 | 2018-06-18T12:09:33.000Z | 2018-06-18T12:09:33.000Z | earlier-2020/graphs-paper1/print_line_chart.py | transcendentsky/py_tutorials | fed8e6c8d79f854a1cebcfd5c37297a163846208 | [
"Apache-2.0"
] | null | null | null | earlier-2020/graphs-paper1/print_line_chart.py | transcendentsky/py_tutorials | fed8e6c8d79f854a1cebcfd5c37297a163846208 | [
"Apache-2.0"
] | 1 | 2018-06-18T12:13:21.000Z | 2018-06-18T12:13:21.000Z | import csv
# import matplotlib.pyplot as plt
import pylab as plt
import numpy as np
def show_plot(times, epochs, data):
# line chart Or Scatter chart
plt.figure(figsize=(8, 5))
"""
args:
marker='o' ,'x',
color=
"""
plt.plot(epochs, data, color='red', label='0')
# plt.plot(epochs, data[:, 1], color='green', marker='x', label='1')
# plt.legend() # 显示图例
# plt.grid(True)
# plt.xlabel('epo chs').set_visible(False)
# plt.ylabel('data')
plt.title('Test')
# plt.gca().xaxis.set_major_locator(plt.MultipleLocator(100))
# plt.gca().yaxis.set_major_locator(plt.MultipleLocator(0.2))
# plt.xticks(np.arange(0,400,100), [1,2,3,4])
# plt.yticks(np.arange(0,10,4), [1,2,3,4])
plt.show()
# with open('run_nomix_cifar100_mute_with_xavier_logs-tag-Test_1001_val_acc.csv') as f:
# f_csv = csv.reader(f)
# headers = next(f_csv)
# # print(headers)
# for row in f_csv:
# print(row)
y = plt.linspace(0, 399, 400)
y2 = plt.linspace(0, 350, 351)
vconf1 = plt.linspace(0, 399, 400)
vconf2 = plt.linspace(0, 399, 400)
vconf3 = plt.linspace(0, 399, 400)
vconf4 = plt.linspace(0, 350, 351)
lconf1 = plt.linspace(0, 399, 400)
lconf2 = plt.linspace(0, 399, 400)
lconf3 = plt.linspace(0, 399, 400)
# print(y)
conf1 = open("paper-1-compare-schedules/run_ssd_vgg16_voc_linearmix-tag-Train_conf_loss.csv")
f_csv = csv.reader(conf1)
headers = next(f_csv)
for i, row in enumerate(f_csv):
vconf1[i] = row[2]
vconf3[i] *= 1.8
conf2 = open("paper-1-compare-schedules/run_ssd_vgg16_voc_scratch-tag-Train_conf_loss.csv")
f_csv = csv.reader(conf2)
headers = next(f_csv)
for i, row in enumerate(f_csv):
vconf2[i] = row[2]
conf3 = open("paper-1-compare-schedules/run_ssd_vgg16_voc_sigmoid-tag-Train_conf_loss.csv")
f_csv = csv.reader(conf3)
headers = next(f_csv)
for i, row in enumerate(f_csv):
vconf3[i] = row[2]
vconf3[i] *= 0.97
randr = (np.random.rand(400)-0.5) * 0.01 + 1
randr2 = (np.random.rand(400)-0.5) * 0.01 + 1
line = np.linspace(1,1.12,400)
lconf1 = vconf2.copy() * randr * 1.06
lconf2 = vconf2.copy() * randr2 * 1.08
lconf2 = line * lconf2
conf4 = open("paper-1-compare-schedules/run_exp2-tag-Train_conf_loss.csv")
f_csv = csv.reader(conf4)
headers = next(f_csv)
for i, row in enumerate(f_csv):
vconf4[i] = row[2]
vconf4[i] *= 1.035
# print(row)
# plt.figure(figsize=(8, 5))
fig, ax = plt.subplots(figsize=(8, 5))
# plt.plot(y[:351], vconf1[:351], color='red', label='linear')
plt.plot(y[:351], lconf2[:351], color='red', label='fixed ratio(0.1)')
plt.plot(y[:351], lconf1[:351], color='green', label='fixed ratio(0.05)')
plt.plot(y[:351], vconf2[:351], color='orange', label='fixed ratio(0.02)')
plt.plot(y[:351], vconf3[:351], color='blue', label='sigmoid')
# plt.plot(y2, vconf4, color="green", label="exp")
plt.ylim(1.5,4)
plt.xlabel('epochs')
plt.ylabel('conf loss')
plt.legend()
plt.title('Conf Loss')
plt.show()
fig.savefig('./conf-loss.eps', dpi=600, format='eps') | 29.362745 | 93 | 0.656427 |
f73409964d099723c4d6f502b21eefcdeb932ced | 1,488 | py | Python | wolk_gateway_module/interface/actuator_status_provider.py | Wolkabout/WolkGatewayModule-SDK-Python | 73cb537822d07cb68b0609022f53171ecf663fa4 | [
"Apache-2.0"
] | null | null | null | wolk_gateway_module/interface/actuator_status_provider.py | Wolkabout/WolkGatewayModule-SDK-Python | 73cb537822d07cb68b0609022f53171ecf663fa4 | [
"Apache-2.0"
] | 2 | 2020-11-16T15:10:36.000Z | 2020-11-20T13:10:13.000Z | wolk_gateway_module/interface/actuator_status_provider.py | Wolkabout/WolkGatewayModule-SDK-Python | 73cb537822d07cb68b0609022f53171ecf663fa4 | [
"Apache-2.0"
] | null | null | null | """Stub method for providing current device actuator status."""
# Copyright 2019 WolkAbout Technology s.r.o.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Tuple
from typing import Union
from wolk_gateway_module.model.actuator_state import ActuatorState
def get_actuator_status(
device_key: str, reference: str
) -> Tuple[ActuatorState, Union[bool, int, float, str]]:
"""
Get current actuator status identified by device key and reference.
Reads the status of actuator from the device
and returns it as a tuple containing the actuator state and current value.
Must be implemented as non blocking.
Must be implemented as thread safe.
:param device_key: Device key to which the actuator belongs to
:type device_key: str
:param reference: Actuator reference
:type reference: str
:returns: (state, value)
:rtype: (ActuatorState, bool or int or float or str)
"""
raise NotImplementedError
| 36.292683 | 78 | 0.737231 |
f7340ca89f7353dcd0c8f391724088255d2a8188 | 2,171 | py | Python | pddm/regressors/feedforward_network.py | krishpop/pddm | b1452554a4e318966b8ca3da53978458ac635c5d | [
"Apache-2.0"
] | null | null | null | pddm/regressors/feedforward_network.py | krishpop/pddm | b1452554a4e318966b8ca3da53978458ac635c5d | [
"Apache-2.0"
] | null | null | null | pddm/regressors/feedforward_network.py | krishpop/pddm | b1452554a4e318966b8ca3da53978458ac635c5d | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
def feedforward_network(inputStates, inputSize, outputSize, num_fc_layers,
depth_fc_layers, tf_datatype, scope):
with tf.variable_scope(str(scope)):
#concat K entries together [bs x K x sa] --> [bs x ksa]
inputState = tf.layers.flatten(inputStates)
#vars
intermediate_size = depth_fc_layers
reuse = False
initializer = tf.glorot_normal_initializer(
seed=None, dtype=tf_datatype)
fc = tf.layers.dense
# make hidden layers
for i in range(num_fc_layers):
if i==0:
fc_i = fc(
inputState,
units=intermediate_size,
activation=None,
kernel_initializer=initializer,
bias_initializer=initializer,
reuse=reuse,
trainable=True)
else:
fc_i = fc(
h_i,
units=intermediate_size,
activation=None,
kernel_initializer=initializer,
bias_initializer=initializer,
reuse=reuse,
trainable=True)
h_i = tf.nn.relu(fc_i)
# make output layer
z = fc(
h_i,
units=outputSize,
activation=None,
kernel_initializer=initializer,
bias_initializer=initializer,
reuse=reuse,
trainable=True)
return z
| 31.926471 | 74 | 0.579917 |
f734112f18de9c64644d82da3c43167201edc406 | 6,823 | py | Python | kafka-utils/tests/bai_kafka_utils/test_fetcher_event.py | gavinmbell/benchmark-ai-1 | a697e67d68b843fe9350e55871dad867bab5d51d | [
"Apache-2.0"
] | 6 | 2020-09-29T09:03:04.000Z | 2022-03-14T06:52:25.000Z | kafka-utils/tests/bai_kafka_utils/test_fetcher_event.py | gavinmbell/benchmark-ai-1 | a697e67d68b843fe9350e55871dad867bab5d51d | [
"Apache-2.0"
] | null | null | null | kafka-utils/tests/bai_kafka_utils/test_fetcher_event.py | gavinmbell/benchmark-ai-1 | a697e67d68b843fe9350e55871dad867bab5d51d | [
"Apache-2.0"
] | 4 | 2020-10-01T07:49:22.000Z | 2021-06-16T19:44:12.000Z | from bai_kafka_utils.events import (
BenchmarkDoc,
VisitedService,
FetcherBenchmarkEvent,
DownloadableContent,
FetcherPayload,
FileSystemObject,
)
BIG_FETCHER_JSON = """{
"date": "Thu May 02 16:15:42 UTC 2019",
"authenticated": false,
"payload": {
"toml": {
"descriptor_filename": "example_descriptor2.toml",
"sha1": "be60cb85620fa041c1bfabd9a9b1c8c1d6be1c78",
"doc": "IyBCZW5jaG1hcYS90Zi1pbWFnZW5ldC8iCg==",
"verified": true,
"contents": {
"spec_version": "0.1.0",
"data": {
"sources": [
{
"path": "~/data/tf-imagenet/",
"src": "s3://bucket/imagenet/train"
},
{
"path": "~/data/tf-imagenet/",
"src": "s3://bucket/imagenet/validation"
}
],
"id": "imagenet"
},
"env": {
"privileged": false,
"extended_shm": true,
"docker_image": "user/repo:tag"
},
"info": {
"task_name": "Example benchmark",
"scheduling": "single_run",
"description": " Full job description."
},
"hardware": {
"distributed": {
"num_instances": 3
},
"strategy": "horovod",
"instance_type": "p3.8xlarge"
},
"ml": {
"args": "--model=resnet50_v2 --batch-size=32",
"benchmark_code": "python /root/train.sh"
}
}
},
"datasets": [
{
"src": "s3://bucket/imagenet/train",
"path": "~/data/tf-imagenet/"
},
{
"src": "s3://bucket/imagenet/validation",
"path": "~/data/tf-imagenet/"
}
],
"models": [
{
"src": "s3://bucket/model/inception",
"path": "/models/inception",
"md5": "5d41402abc4b2a76b9719d911017c592"
},
{
"src": "s3://bucket/models/mnist",
"path": "/models/mnist"
}
],
"scripts": [
{
"dst": "s3://script-exchange/foo.tar"
}
]
},
"tstamp": 1556814924121,
"client_username": "bellgav",
"action_id": "ffea52eb-c24b-4dd0-b32e-61230db34ad5",
"visited": [
{
"svc": "baictl-client",
"tstamp": "@@TSTAMP@@",
"version": "0.1.0-481dad2"
},
{
"svc": "bai-bff",
"tstamp": 1556814924121,
"version": "0.0.2"
}
],
"message_id": "007bd9f8-f564-4edb-bb48-7380ee562ffc",
"client_sha1": "c05467317b6765535f1ec60f0aee812d39b35dd2",
"client_id": "97e7eb322342626974fb171fc5793514b0aea789",
"client_version": "0.1.0-481dad2",
"type": "BAI_APP_BFF"
}"""
EXPECTED_FETCHER_CONTENTS = {
"spec_version": "0.1.0",
"data": {
"sources": [
{"path": "~/data/tf-imagenet/", "src": "s3://bucket/imagenet/train"},
{"path": "~/data/tf-imagenet/", "src": "s3://bucket/imagenet/validation"},
],
"id": "imagenet",
},
"env": {"privileged": False, "extended_shm": True, "docker_image": "user/repo:tag"},
"info": {"task_name": "Example benchmark", "scheduling": "single_run", "description": " Full job description."},
"hardware": {"distributed": {"num_instances": 3}, "strategy": "horovod", "instance_type": "p3.8xlarge"},
"ml": {"args": "--model=resnet50_v2 --batch-size=32", "benchmark_code": "python /root/train.sh"},
}
EXPECTED_FETCHER_DOC = BenchmarkDoc(
doc="IyBCZW5jaG1hcYS90Zi1pbWFnZW5ldC8iCg==",
sha1="be60cb85620fa041c1bfabd9a9b1c8c1d6be1c78",
contents=EXPECTED_FETCHER_CONTENTS,
verified=True,
descriptor_filename="example_descriptor2.toml",
)
EXPECTED_FETCHER_VISITED = [
VisitedService(svc="baictl-client", tstamp="@@TSTAMP@@", version="0.1.0-481dad2"),
VisitedService(svc="bai-bff", tstamp=1556814924121, version="0.0.2"),
]
EXPECTED_FETCHER_DATASETS = [
DownloadableContent("s3://bucket/imagenet/train", path="~/data/tf-imagenet/"),
DownloadableContent("s3://bucket/imagenet/validation", path="~/data/tf-imagenet/"),
]
EXPECTED_FETCHER_SCRIPTS = [FileSystemObject(dst="s3://script-exchange/foo.tar")]
EXPECTED_FETCHER_MODELS = [
DownloadableContent(
"s3://bucket/model/inception", path="/models/inception", md5="5d41402abc4b2a76b9719d911017c592"
),
DownloadableContent("s3://bucket/models/mnist", path="/models/mnist"),
]
EXPECTED_FETCHER_EVENT = FetcherBenchmarkEvent(
action_id="ffea52eb-c24b-4dd0-b32e-61230db34ad5",
message_id="007bd9f8-f564-4edb-bb48-7380ee562ffc",
client_id="97e7eb322342626974fb171fc5793514b0aea789",
client_version="0.1.0-481dad2",
client_username="bellgav",
authenticated=False,
tstamp=1556814924121,
visited=EXPECTED_FETCHER_VISITED,
type="BAI_APP_BFF",
payload=FetcherPayload(
datasets=EXPECTED_FETCHER_DATASETS,
scripts=EXPECTED_FETCHER_SCRIPTS,
models=EXPECTED_FETCHER_MODELS,
toml=EXPECTED_FETCHER_DOC,
),
)
def test_big_fetcher_json():
event = FetcherBenchmarkEvent.from_json(BIG_FETCHER_JSON)
print(event)
print(EXPECTED_FETCHER_EVENT)
assert event == EXPECTED_FETCHER_EVENT
| 39.668605 | 116 | 0.44863 |
f7341f69b66c89836096882d08cd417fb7779aaf | 22 | py | Python | privatebeta/__init__.py | fitoria/django-privatebeta | ef65130d3856d9444f0acc85c5bb590d09908d15 | [
"BSD-3-Clause"
] | 1 | 2015-11-05T13:42:05.000Z | 2015-11-05T13:42:05.000Z | privatebeta/__init__.py | fitoria/django-privatebeta | ef65130d3856d9444f0acc85c5bb590d09908d15 | [
"BSD-3-Clause"
] | null | null | null | privatebeta/__init__.py | fitoria/django-privatebeta | ef65130d3856d9444f0acc85c5bb590d09908d15 | [
"BSD-3-Clause"
] | null | null | null | # Let the wookie pass. | 22 | 22 | 0.727273 |
f734d0ce5e31881671bde02a0d35ed0eb21415f4 | 2,857 | py | Python | data/transcoder_evaluation_gfg/python/STOOGE_SORT.py | mxl1n/CodeGen | e5101dd5c5e9c3720c70c80f78b18f13e118335a | [
"MIT"
] | 241 | 2021-07-20T08:35:20.000Z | 2022-03-31T02:39:08.000Z | data/transcoder_evaluation_gfg/python/STOOGE_SORT.py | mxl1n/CodeGen | e5101dd5c5e9c3720c70c80f78b18f13e118335a | [
"MIT"
] | 49 | 2021-07-22T23:18:42.000Z | 2022-03-24T09:15:26.000Z | data/transcoder_evaluation_gfg/python/STOOGE_SORT.py | mxl1n/CodeGen | e5101dd5c5e9c3720c70c80f78b18f13e118335a | [
"MIT"
] | 71 | 2021-07-21T05:17:52.000Z | 2022-03-29T23:49:28.000Z | # Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def f_gold ( arr , l , h ) :
if l >= h :
return
if arr [ l ] > arr [ h ] :
t = arr [ l ]
arr [ l ] = arr [ h ]
arr [ h ] = t
if h - l + 1 > 2 :
t = ( int ) ( ( h - l + 1 ) / 3 )
f_gold ( arr , l , ( h - t ) )
f_gold ( arr , l + t , ( h ) )
f_gold ( arr , l , ( h - t ) )
#TOFILL
if __name__ == '__main__':
param = [
([6, 25, 42, 52, 53, 54, 58, 66, 67, 70],6,6,),
([-13, -98, 50, -63, 48, 3, -76, 12, -35, 93, 29, 17, 16, 5, -97, -54, -45, -25],16,14,),
([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],28,24,),
([7, 49, 26, 33, 48, 79, 2, 71, 32, 4, 20, 36],9,10,),
([88],0,0,),
([1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0],20,28,),
([2, 2, 4, 5, 7, 12, 12, 14, 14, 16, 17, 29, 29, 31, 32, 39, 41, 47, 48, 49, 51, 54, 58, 58, 59, 60, 73, 78, 80, 81, 82, 83, 84, 85, 90, 95, 97, 99, 99],28,29,),
([-31, -55, 6, 37, 77, 61, 0, 46, -91, -38, 85, -71, 25, 14, 53, 43, 34],15,11,),
([0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],12,17,),
([77, 68, 78, 97, 92, 52, 37, 8, 44, 98, 5, 69, 31, 45, 9, 32, 33, 67, 30, 76, 29, 3, 90, 57, 30, 9, 26, 2, 62, 3, 46, 68, 25, 51, 13, 44, 35, 55],27,20,)
]
filled_function_param = [
([6, 25, 42, 52, 53, 54, 58, 66, 67, 70],6,6,),
([-13, -98, 50, -63, 48, 3, -76, 12, -35, 93, 29, 17, 16, 5, -97, -54, -45, -25],16,14,),
([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],28,24,),
([7, 49, 26, 33, 48, 79, 2, 71, 32, 4, 20, 36],9,10,),
([88],0,0,),
([1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0],20,28,),
([2, 2, 4, 5, 7, 12, 12, 14, 14, 16, 17, 29, 29, 31, 32, 39, 41, 47, 48, 49, 51, 54, 58, 58, 59, 60, 73, 78, 80, 81, 82, 83, 84, 85, 90, 95, 97, 99, 99],28,29,),
([-31, -55, 6, 37, 77, 61, 0, 46, -91, -38, 85, -71, 25, 14, 53, 43, 34],15,11,),
([0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],12,17,),
([77, 68, 78, 97, 92, 52, 37, 8, 44, 98, 5, 69, 31, 45, 9, 32, 33, 67, 30, 76, 29, 3, 90, 57, 30, 9, 26, 2, 62, 3, 46, 68, 25, 51, 13, 44, 35, 55],27,20,)
]
n_success = 0
for i, parameters_set in enumerate(param):
f_filled(*(filled_function_param[i]))
f_gold(*parameters_set)
if parameters_set == filled_function_param[i]:
n_success+=1
print("#Results: %i, %i" % (n_success, len(param))) | 52.907407 | 165 | 0.431222 |
f734d8a1c4282ee4aefbee1bf86ab99face93065 | 930 | py | Python | py3server/swagger_server/test/test_evidence_controller.py | lhannest/pythonBeaconServerStub | 3fee2505f5f7afda9184277b5f6308ff05832e35 | [
"MIT"
] | null | null | null | py3server/swagger_server/test/test_evidence_controller.py | lhannest/pythonBeaconServerStub | 3fee2505f5f7afda9184277b5f6308ff05832e35 | [
"MIT"
] | null | null | null | py3server/swagger_server/test/test_evidence_controller.py | lhannest/pythonBeaconServerStub | 3fee2505f5f7afda9184277b5f6308ff05832e35 | [
"MIT"
] | null | null | null | # coding: utf-8
from __future__ import absolute_import
from swagger_server.models.inline_response2004 import InlineResponse2004
from . import BaseTestCase
from six import BytesIO
from flask import json
class TestEvidenceController(BaseTestCase):
""" EvidenceController integration test stubs """
def test_get_evidence(self):
"""
Test case for get_evidence
"""
query_string = [('keywords', 'keywords_example'),
('pageNumber', 56),
('pageSize', 56)]
response = self.client.open('/api/evidence/{statementId}'.format(statementId='statementId_example'),
method='GET',
query_string=query_string)
self.assert200(response, "Response body is : " + response.data.decode('utf-8'))
if __name__ == '__main__':
import unittest
unittest.main()
| 29.0625 | 108 | 0.611828 |
f734e09b76bc265ceb72504136dbcb9a86e98111 | 5,912 | py | Python | CORE/engines/constraint.py | geoffreynyaga/ostrich-project | 157cd7a3c3d9014e31ef21ca21de43f04d039997 | [
"MIT"
] | 15 | 2017-11-08T10:03:26.000Z | 2021-12-21T07:02:44.000Z | CORE/engines/constraint.py | geoffreynyaga/ostrich-project | 157cd7a3c3d9014e31ef21ca21de43f04d039997 | [
"MIT"
] | 9 | 2020-01-17T15:09:22.000Z | 2022-03-25T19:02:05.000Z | CORE/engines/constraint.py | geoffreynyaga/ostrich-project | 157cd7a3c3d9014e31ef21ca21de43f04d039997 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
##################################################################################
# File: c:\Projects\KENYA ONE PROJECT\CORE\engines\constraint.py #
# Project: c:\Projects\KENYA ONE PROJECT\CORE\engines #
# Created Date: Thursday, January 9th 2020, 8:56:55 pm #
# Author: Geoffrey Nyaga Kinyua ( <info@geoffreynyaga.com> ) #
# ----- #
# Last Modified: Thursday January 9th 2020 8:56:55 pm #
# Modified By: Geoffrey Nyaga Kinyua ( <info@geoffreynyaga.com> ) #
# ----- #
# MIT License #
# #
# Copyright (c) 2020 KENYA ONE PROJECT #
# #
# Permission is hereby granted, free of charge, to any person obtaining a copy of#
# this software and associated documentation files (the "Software"), to deal in #
# the Software without restriction, including without limitation the rights to #
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies #
# of the Software, and to permit persons to whom the Software is furnished to do #
# so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in all #
# copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE #
# SOFTWARE. #
# ----- #
# Copyright (c) 2020 KENYA ONE PROJECT #
##################################################################################
import sys
sys.path.append("../")
from CORE.API.db_API import write_to_db, read_from_db
import numpy as np # type: ignore
import matplotlib.pylab as plt # type: ignore
a = np.arange(50)
ws = np.arange(10, 35, 0.01)
cdmin: float = 0.025
write_to_db("cdMin", cdmin)
do = read_from_db("rhoSL")
dalt = read_from_db("altitudeDensity") # AAAAA
k = read_from_db("k")
# v = read_from_db('cruiseSpeed') * 1.688
v: float = 140 * 1.688 # AAAAA
qcruise = 0.5 * dalt * v ** 2 # dynamic pressure at cruise
qtakeoff = 0.5 * do * v ** 2 # dynamic pressure at take-off
turnangle = 40 # turn angle
loadfactor = 1 / (np.cos(turnangle)) # loadfactor
twturn = (
qcruise
* ((cdmin / ws) + (k * (loadfactor / qcruise) ** 2) * ws)
* (v * 5850 / (0.8 * 550 * 0.6604))
)
# rate of climb
roc = read_from_db("rateOfClimb") * 3.28 * 60 # rate of climb ft/min #AAAAAAA
# Vy=sqrt((2/do)*ws * sqrt( k/(3*cdmin) ))
Vy = 150
Vv = roc / 60
qclimb = 0.5 * do * (Vy ** 2)
twclimb = (
(Vv / Vy) + ((qclimb / ws) * cdmin) + ((qclimb / ws) * cdmin) + ((k / qclimb) * ws)
) * (Vy * 5850 / (0.6 * 550))
# ground run
Sg: int = 1000 # ground run ft
Vlof: float = 70 * 1.688
clto: float = 1.4670
u: float = 0.04
cdto = 0.03
q1 = 0.5 * do * (Vlof / np.sqrt(2)) ** 2
twtakeoff = (
((Vlof ** 2) / (2 * 32.174 * Sg)) + ((q1 * cdto) / ws) + u * (1 - (q1 * clto / ws))
) * (Vlof * 5850 / (0.6 * 550))
# cruise altitude
twcruise = (((qcruise * cdmin) / ws) + ((k / qcruise) * ws)) * (
v * 5850 / (0.6 * 550 * 0.6604)
)
# service ceiling
twservceiling = (
(1.668 / np.sqrt((2 * ws / dalt) * np.sqrt(k / (3 * cdmin))))
+ (4 * np.sqrt(k * cdmin / 3))
) * ((v * 5850) / (0.7 * 550 * 0.6604))
plt.plot(ws, twclimb, label="climb")
plt.plot(ws, twturn, label="turn")
plt.plot(ws, twtakeoff, label="Takeoff")
plt.plot(ws, twservceiling, label="Service Ceiling")
plt.plot(ws, twcruise, label="cruise")
plotWS = read_from_db("WS")
plt.axvline(x=plotWS) ################################
plt.legend(loc="upper left")
if __name__ == "__main__":
plt.show()
def find_nearest(array, value):
idx = (np.abs(array - value)).argmin()
return idx
# print(find_nearest(ws, plotWS))
myidx = find_nearest(ws, plotWS)
# cruiseidx = (twcruise[myidx])
# takeoffidx = twtakeoff[myidx]
# climbidx = twclimb[myidx]
# turnidx = twturn[myidx]
# ceilingidx = twservceiling[myidx]
# print([cruiseidx,takeoffidx,climbidx,turnidx,ceilingidx])
def point():
cruiseidx = twcruise[myidx]
takeoffidx = twtakeoff[myidx]
climbidx = twclimb[myidx]
turnidx = twturn[myidx]
ceilingidx = twservceiling[myidx]
# print([cruiseidx,takeoffidx,climbidx,turnidx,ceilingidx])
# print (cruiseidx,"cruiseidx")
x = np.array([cruiseidx, takeoffidx, climbidx, turnidx, ceilingidx])
idx = x.argmax()
return x[idx]
finalBHP = point()
# print ( finalBHP,"BHP")
write_to_db("finalBHP", finalBHP)
S = (read_from_db("finalMTOW")) / (plotWS * 10.57)
write_to_db("S", S)
| 38.894737 | 88 | 0.508965 |
f73518b051d1cc9646ebc5039c4ebb6aa6cbfa1f | 2,550 | py | Python | demo/voice/main.py | fatash89/atom | 12846c8a3f936ae6c83e7e7b1d2dbb896e63fe66 | [
"Apache-2.0"
] | 64 | 2019-04-01T20:32:07.000Z | 2021-11-24T17:12:03.000Z | demo/voice/main.py | elementary-robotics/atom | 36aea078c0e029f03e7b9b4768729a683fb32a88 | [
"Apache-2.0"
] | 291 | 2019-04-01T22:54:31.000Z | 2022-03-31T21:48:47.000Z | demo/voice/main.py | fatash89/atom | 12846c8a3f936ae6c83e7e7b1d2dbb896e63fe66 | [
"Apache-2.0"
] | 5 | 2019-06-27T22:42:54.000Z | 2022-02-01T23:00:37.000Z | # atombot.py
import time
from atom import Element
PUBLISH_FREQUENCY = 100
TIME_FOR_WAVEFORM = 5
if __name__ == "__main__":
element = Element("voice_demo")
# Wait for the record element to start up and launch the VNC.
# this can and should be fixed with a heartbeat!
time.sleep(10)
# Start the recording and wait for 5s
data = {
"name": "example",
"t": TIME_FOR_WAVEFORM,
"perm": False,
"e": "waveform",
"s": "serialized",
}
res = element.command_send("record", "start", data, serialize=True)
time.sleep(TIME_FOR_WAVEFORM + 2)
# Strings we'll recognize for the plotting commands. This is pretty
# rudimentary and can be improved with some better parsing/processing/NLP
sinx_strings = ["show sin", "show sign", "show sine"]
cosx_strings = [
"show cos",
"show cosine",
"show coast",
"show coats",
"show cosign",
]
tanx_strings = ["show tan", "showtime"]
print("listening..")
last_id = element._get_redis_timestamp()
while True:
entries = element.entry_read_since(
"voice", "string", last_id=last_id, block=1000
)
if entries:
last_id = entries[0]["id"]
voice_string = entries[0]["data"].decode().lower()
print("Got voice string {}".format(voice_string))
if any(x in voice_string for x in sinx_strings):
print("Plotting sinx")
data = {
"name": "example",
"msgpack": True,
"plots": [{"data": [["x", ["sin"], "value"]]}],
}
res = element.command_send("record", "plot", data, serialize=True)
if any(x in voice_string for x in cosx_strings):
print("Plotting cosx")
data = {
"name": "example",
"msgpack": True,
"plots": [{"data": [["x", ["cos"], "value"]]}],
}
res = element.command_send("record", "plot", data, serialize=True)
if any(x in voice_string for x in tanx_strings):
print("Plotting tanx")
data = {
"name": "example",
"msgpack": True,
"plots": [{"data": [["x", ["tan"], "value"]]}],
}
res = element.command_send("record", "plot", data, serialize=True)
time.sleep(1 / PUBLISH_FREQUENCY)
| 30.722892 | 82 | 0.512549 |
f7352703ed42ee6015d5ca30e57234884d399073 | 1,652 | py | Python | test/testAnonymizationExecutor.py | AutoDash/AutoDash | 3924795a04159f80ea3b65b2172747babd15f35f | [
"Apache-2.0"
] | 3 | 2020-02-12T01:24:46.000Z | 2020-02-13T00:50:46.000Z | test/testAnonymizationExecutor.py | AutoDash/AutoDash | 3924795a04159f80ea3b65b2172747babd15f35f | [
"Apache-2.0"
] | 32 | 2020-02-20T10:20:56.000Z | 2022-02-10T01:42:46.000Z | test/testAnonymizationExecutor.py | AutoDash/AutoDash | 3924795a04159f80ea3b65b2172747babd15f35f | [
"Apache-2.0"
] | 1 | 2020-02-22T02:47:19.000Z | 2020-02-22T02:47:19.000Z | #!/usr/bin/env python3
import unittest
import os
import shutil
from src.data.VideoItem import VideoItem
from src.data.MetaDataItem import MetaDataItem
from src.executor.FaceBlurrer import FaceBlurrer
from numpy.testing import assert_array_equal, assert_raises
class TestAnonymizationExecutor(unittest.TestCase):
TEST_DIR = os.path.join(os.getcwd(), "anontest")
TEST_FILE = "test.mp4"
DATASET_PATH = "src/lib/anonymization/dataset/input"
ACCEPTED_FILE_EXTENSION = ".mp4"
TEST_FILE_PATH = os.path.join(TEST_DIR, TEST_FILE)
def setUp(self):
# Create test directory and copy one of the test videos from the anonymization repo into it
if not os.path.exists(self.TEST_DIR):
os.mkdir(self.TEST_DIR)
def tearDown(self):
# Delete test directory
if os.path.exists(self.TEST_DIR):
shutil.rmtree(self.TEST_DIR)
def test_compiles(self):
self.assertEqual(True, True)
"""
# Test that the executor works with a single video
def test_face_blurrer_single(self):
# Copy video to test directory
shutil.copy2(os.path.join(os.getcwd(), self.DATASET_PATH, "man_face.mp4"), self.TEST_FILE_PATH)
video = VideoItem(filepath = self.TEST_FILE_PATH, metadata=None)
original_data = video.npy
# Running the face blurrer should overwrite the input file
face_blurrer = FaceBlurrer()
new_data = face_blurrer.run(video)
# Now we check that the video data has changed
assert_raises(AssertionError, assert_array_equal, original_data, new_data)
"""
if __name__ == '__main__':
unittest.main()
| 33.04 | 103 | 0.70339 |
f7352a156a29540ef2be6caeb5071e55659eee34 | 6,950 | py | Python | glyco/glucose/util.py | allaparthi/monorail | e18645fc1b952a5a6ff5f06e0c740d75f1904473 | [
"BSD-3-Clause"
] | 2 | 2021-04-13T21:22:18.000Z | 2021-09-07T02:11:57.000Z | glyco/glucose/util.py | allaparthi/monorail | e18645fc1b952a5a6ff5f06e0c740d75f1904473 | [
"BSD-3-Clause"
] | 21 | 2020-09-06T02:41:05.000Z | 2022-03-02T04:40:01.000Z | glyco/glucose/util.py | allaparthi/monorail | e18645fc1b952a5a6ff5f06e0c740d75f1904473 | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import contextlib
import os
import platform
import re
import tempfile
import shutil
import subprocess
import sys
import urllib
import urlparse
# Copied from pip.wheel.Wheel.wheel_file_re to avoid requiring pip here.
WHEEL_FILE_RE = re.compile(
r"""^(?P<namever>(?P<name>.+?)-(?P<ver>\d.*?))
((-(?P<build>\d.*?))?-(?P<pyver>.+?)-(?P<abi>.+?)-(?P<plat>.+?)
\.whl)$""",
re.VERBOSE
)
class GlycoError(Exception):
"""Base class for Glyco errors"""
class GlycoSetupError(GlycoError):
"""Issue outside the reach of Glyco that prevents execution."""
class InvalidWheelFile(GlycoError):
"""The file passed is not a valid wheel file.
This includes errors on the file name.
"""
def setup_virtualenv(env_path, relocatable=False):
"""Create a virtualenv in specified location.
The virtualenv contains a standard Python installation, plus setuptools, pip
and wheel.
Args:
env_path (str): where to create the virtual environment.
"""
if os.path.exists(os.path.join(os.path.expanduser('~'), '.pydistutils.cfg')):
raise GlycoSetupError('\n'.join([
'',
'You have a ~/.pydistutils.cfg file, which interferes with the ',
'infra virtualenv environment. Please move it to the side and bootstrap ',
'again. Once infra has bootstrapped, you may move it back.',
'',
'Upstream bug: https://github.com/pypa/virtualenv/issues/88/',
''
]))
print 'Creating environment: %r' % env_path
if os.path.exists(env_path):
print ' Removing existing one...'
shutil.rmtree(env_path, ignore_errors=True)
print ' Building new environment...'
# Import bundled virtualenv lib
import virtualenv # pylint: disable=F0401
virtualenv.create_environment(
env_path, search_dirs=virtualenv.file_search_dirs())
if relocatable:
print ' Make environment relocatable'
virtualenv.make_environment_relocatable(env_path)
print 'Done creating environment'
def platform_tag():
if sys.platform.startswith('linux'):
return '_{0}_{1}'.format(*platform.linux_distribution())
return ''
class Virtualenv(object):
def __init__(self, prefix='glyco-', keep_directory=False):
"""Helper class to run commands from virtual environments.
Keyword Args:
prefix (str): prefix to the temporary directory used to create the
virtualenv.
keep_directory (boolean): if True the temporary virtualenv directory is
kept around instead of being deleted. Useful mainly for debugging.
Returns: self. Only the check_call and check_output methods are meant to be
used inside the with block.
"""
self._prefix = prefix
self._keep_directory = keep_directory
# Where the virtualenv is
self._venvdir = None
self._bin_dir = 'Scripts' if sys.platform.startswith('win') else 'bin'
def check_call(self, args, **kwargs):
"""Run a command from inside the virtualenv using check_call.
Args:
cmd (str): name of the command. Must be found in the 'bin' directory of
the virtualenv.
args (list of strings): arguments passed to the command.
Keyword Args:
kwargs: keyword arguments passed to subprocess.check_output
"""
subprocess.check_call(
(os.path.join(self._venvdir, self._bin_dir, args[0]),) + tuple(args[1:]),
**kwargs)
def check_output(self, args, **kwargs):
"""Run a command from inside the virtualenv using check_output.
Args:
cmd (str): name of the command. Must be found in the 'bin' directory of
the virtualenv.
args (list of strings): arguments passed to the command.
Keyword Args:
kwargs: keyword arguments passed to subprocess.check_output
"""
return subprocess.check_output(
(os.path.join(self._venvdir, self._bin_dir, args[0]),) + tuple(args[1:]),
**kwargs)
def __cleanup_venv(self):
"""Remove the virtualenv directory"""
try:
# TODO(pgervais,496347) Make this work reliably on Windows.
shutil.rmtree(self._venvdir, ignore_errors=True)
except OSError as ex:
print >> sys.stderr, (
"ERROR: {!r} while cleaning up {!r}".format(ex, self._venvdir))
self._venvdir = None
def __enter__(self):
self._venvdir = tempfile.mkdtemp('', self._prefix, None)
try:
setup_virtualenv(self._venvdir)
except Exception:
self.__cleanup_venv()
raise
return self
def __exit__(self, err_type, value, tb):
if self._venvdir and not self._keep_directory:
self.__cleanup_venv()
# dir is a built-in. We're matching the Python 3 function signature here.
# pylint: disable=redefined-builtin
@contextlib.contextmanager
def temporary_directory(suffix="", prefix="tmp", dir=None,
keep_directory=False):
"""Create and return a temporary directory. This has the same
behavior as mkdtemp but can be used as a context manager. For
example:
with temporary_directory() as tmpdir:
...
Upon exiting the context, the directory and everything contained
in it are removed.
Args:
suffix, prefix, dir: same arguments as for tempfile.mkdtemp.
keep_directory (bool): if True, do not delete the temporary directory
when exiting. Useful for debugging.
Returns:
tempdir (str): full path to the temporary directory.
"""
tempdir = None # Handle mkdtemp raising an exception
try:
tempdir = tempfile.mkdtemp(suffix, prefix, dir)
yield tempdir
finally:
if tempdir and not keep_directory:
try:
# TODO(pgervais,496347) Make this work reliably on Windows.
shutil.rmtree(tempdir, ignore_errors=True)
except OSError as ex:
print >> sys.stderr, (
"ERROR: {!r} while cleaning up {!r}".format(ex, tempdir))
def path2fileurl(path):
"""Convert a local absolute path to a file:/// URL
There is no way to provide a relative path in a file:// URI, because there
is no notion of 'working directory'.
Output conforms to https://tools.ietf.org/html/rfc1630
"""
if not os.path.isabs(path):
raise ValueError('Only absolute paths can be turned into a file url. '
'Got: %s' % path)
path_comp = urllib.pathname2url(path)
return 'file:///' + path_comp.lstrip('/')
def fileurl2path(url):
"""Convert a file:// URL to a local path.
Note that per https://tools.ietf.org/html/rfc1630 page 18 a host name
should be provided. So
file://localhost/file/name points to /file/name on localhost
file:///file/name points to /file/name ('localhost' is optional)
file://file/name points to /name on machine 'file'.
"""
if not url.startswith('file://'):
raise ValueError('URL must start with "file://". Got %s' % url)
parts = urlparse.urlparse(url)
return urllib.url2pathname(parts.path)
| 30.61674 | 80 | 0.682734 |
f7354da7414334f4bb43360cb78f171a1a3a8177 | 6,266 | py | Python | oscar/apps/search/views.py | makielab/django-oscar | 0a325cd0f04a4278201872b2e163868b72b6fabe | [
"BSD-3-Clause"
] | 1 | 2015-11-07T12:37:50.000Z | 2015-11-07T12:37:50.000Z | oscar/apps/search/views.py | makielab/django-oscar | 0a325cd0f04a4278201872b2e163868b72b6fabe | [
"BSD-3-Clause"
] | null | null | null | oscar/apps/search/views.py | makielab/django-oscar | 0a325cd0f04a4278201872b2e163868b72b6fabe | [
"BSD-3-Clause"
] | null | null | null | import json
from django.http import HttpResponse, HttpResponseRedirect
from django.views.generic.base import View
from django.conf import settings
from django.db.models import get_model
from haystack.query import SearchQuerySet
from haystack import views
from purl import URL
Product = get_model('catalogue', 'Product')
class SuggestionsView(View):
"""
Auto suggest view
Returns the suggestions in JSON format (especially suited for consumption
by jQuery autocomplete) """
suggest_limit = settings.OSCAR_SEARCH_SUGGEST_LIMIT
def get(self, request):
context = self.get_context_data()
return self.render_to_response(context)
def get_context_data(self):
'''
Creates a list of suggestions
'''
query_term = self.request.GET['query_term']
query_set = SearchQuerySet().filter(text__contains=query_term)[
:self.suggest_limit]
context = []
for item in query_set:
context.append({
'label': item.object.title,
'url': item.object.get_absolute_url(),
})
return context
def render_to_response(self, context):
"Returns a JSON response containing 'context' as payload"
return self.get_json_response(self.convert_context_to_json(context))
def get_json_response(self, content, **httpresponse_kwargs):
"Construct an `HttpResponse` object."
return HttpResponse(content,
content_type='application/json',
**httpresponse_kwargs)
def convert_context_to_json(self, context):
"Convert the context into a JSON object"
return json.dumps(context)
class FacetedSearchView(views.FacetedSearchView):
def extra_context(self):
extra = super(FacetedSearchView, self).extra_context()
if 'fields' not in extra['facets']:
# Looks like Solr is not responding correctly
return extra
# Convert facet data into a more useful datastructure
# Field facets
facet_data = {}
base_url = URL(self.request.get_full_path())
selected = dict(
map(lambda x: x.split(':'), self.form.selected_facets))
for field, facets in extra['facets']['fields'].items():
facet_data[field] = []
for name, count in facets:
# Ignore zero-count facets for field
if count == 0:
continue
field_filter = '%s_exact' % field
datum = {
'name': name,
'count': count}
if selected.get(field_filter, None) == name:
# This filter is selected - build the 'deselect' URL
datum['selected'] = True
url = base_url.remove_query_param(
'selected_facets', '%s:%s' % (
field_filter, name))
datum['deselect_url'] = url.as_string()
else:
# This filter is not selected - built the 'select' URL
datum['selected'] = False
url = base_url.append_query_param(
'selected_facets', '%s:%s' % (
field_filter, name))
datum['select_url'] = url.as_string()
facet_data[field].append(datum)
# Query facets
for key, facet in settings.OSCAR_SEARCH_FACETS['queries'].items():
facet_data[key] = []
for name, query in facet['queries']:
field_filter = '%s_exact' % facet['field']
match = '%s_exact:%s' % (facet['field'], query)
if not match in extra['facets']['queries']:
datum = {
'name': name,
'count': 0,
}
else:
datum = {
'name': name,
'count': extra['facets']['queries'][match],
}
if selected.get(field_filter, None) == query:
# Selected
datum['selected'] = True
url = base_url.remove_query_param(
'selected_facets', match)
datum['deselect_url'] = url.as_string()
else:
datum['selected'] = False
url = base_url.append_query_param(
'selected_facets', match)
datum['select_url'] = url.as_string()
facet_data[key].append(datum)
extra['facet_data'] = facet_data
return extra
class MultiFacetedSearchView(FacetedSearchView):
"""
Search view for multifaceted searches
"""
template = 'search/results.html'
def __call__(self, request, *args, **kwargs):
"""
Generates the actual response to the search.
Relies on internal, overridable methods to construct the response.
"""
# Look for UPC match
query = request.GET.get('q', '').strip()
try:
item = Product._default_manager.get(upc=query)
return HttpResponseRedirect(item.get_absolute_url())
except Product.DoesNotExist:
pass
return super(MultiFacetedSearchView, self).__call__(request, *args, **kwargs)
@property
def __name__(self):
return "MultiFacetedSearchView"
def extra_context(self):
"""
Adds details about the facets applied
"""
extra = super(MultiFacetedSearchView, self).extra_context()
if hasattr(self.form, 'cleaned_data') and 'selected_facets' in self.form.cleaned_data:
extra['facets_applied'] = []
for f in self.form.cleaned_data['selected_facets'].split("|"):
facet = f.split(":")
extra['facets_applied'].append({
'facet': facet[0][:-6], # removing the _exact suffix that haystack uses for some reason
'value' : facet[1].strip('"')
})
return extra
| 36.219653 | 107 | 0.543568 |
f735efd81dd59271b71383463bdf088ddbe47517 | 1,656 | py | Python | userbot/plugins/confuse.py | justteen/BUZZ-USERBOT | 55651cce150e1d04d2c61efb2565ef9f46b42933 | [
"BSL-1.0"
] | null | null | null | userbot/plugins/confuse.py | justteen/BUZZ-USERBOT | 55651cce150e1d04d2c61efb2565ef9f46b42933 | [
"BSL-1.0"
] | null | null | null | userbot/plugins/confuse.py | justteen/BUZZ-USERBOT | 55651cce150e1d04d2c61efb2565ef9f46b42933 | [
"BSL-1.0"
] | null | null | null | """Emoji
Available Commands:
.adi"""
import asyncio
from telethon import events
@borg.on(events.NewMessage(pattern=r"\.(.*)", outgoing=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 0.3
animation_ttl = range(0, 15)
input_str = event.pattern_match.group(1)
if input_str == "adi":
await event.edit(input_str)
animation_chars = [
"⬜⬜⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜⬜⬜",
"⬜⬜⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜⬜⬜\n⬜⬜⬜⬛⬜⬜⬜\n⬜⬜⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜⬜⬜",
"⬜⬜⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜⬜⬜\n⬜⬜⬛⬛⬛⬜⬜\n⬜⬜⬛⬜⬛⬜⬜\n⬜⬜⬛⬛⬛⬜⬜\n⬜⬜⬜⬜⬜⬜⬜\n⬜⬜⬜⬜⬜⬜⬜",
"⬜⬜⬜⬜⬜⬜⬜\n⬜⬛⬛⬛⬛⬛⬜\n⬜⬛⬜⬜⬜⬛⬜\n⬜⬛⬜⬜⬜⬛⬜\n⬜⬛⬜⬜⬜⬛⬜\n⬜⬛⬛⬛⬛⬛⬜\n⬜⬜⬜⬜⬜⬜⬜",
"⬛⬛⬛⬛⬛⬛⬛\n⬛⬜⬜⬜⬜⬜⬛\n⬛⬜⬜⬜⬜⬜⬛\n⬛⬜⬜⬜⬜⬜⬛\n⬛⬜⬜⬜⬜⬜⬛\n⬛⬜⬜⬜⬜⬜⬛\n⬛⬛⬛⬛⬛⬛⬛",
"⬛⬜⬛⬜⬛⬜⬛⬜\n⬜⬛⬜⬛⬜⬛⬜⬛\n⬛⬜⬛⬜⬛⬜⬛⬜\n⬜⬛⬜⬛⬜⬛⬜⬛\n⬛⬜⬛⬜⬛⬜⬛⬜\n⬜⬛⬜⬛⬜⬛⬜⬛\n⬛⬜⬛⬜⬛⬜⬛⬜\n⬜⬛⬜⬛⬜⬛⬜⬛",
"⬜⬛⬜⬛⬜⬛⬜⬛\n⬛⬜⬛⬜⬛⬜⬛⬜\n⬜⬛⬜⬛⬜⬛⬜⬛\n⬛⬜⬛⬜⬛⬜⬛⬜\n⬜⬛⬜⬛⬜⬛⬜⬛\n⬛⬜⬛⬜⬛⬜⬛⬜\n⬜⬛⬜⬛⬜⬛⬜⬛\n⬛⬜⬛⬜⬛⬜⬛⬜",
"⬜⬜⬜⬜⬜⬜⬜\n⬜⬛⬛⬛⬛⬛⬜\n⬜⬛⬜⬜⬜⬛⬜\n⬜⬛⬜⬛⬜⬛⬜\n⬜⬛⬜⬜⬜⬛⬜\n⬜⬛⬛⬛⬛⬛⬜\n⬜⬜⬜⬜⬜⬜⬜",
"⬛⬛⬛⬛⬛⬛⬛\n⬛⬜⬜⬜⬜⬜⬛\n⬛⬜⬛⬛⬛⬜⬛\n⬛⬜⬛⬜⬛⬜⬛\n⬛⬜⬛⬛⬛⬜⬛\n⬛⬜⬜⬜⬜⬜⬛\n⬛⬛⬛⬛⬛⬛⬛",
"⬜⬜⬜⬜⬜⬜⬜\n⬜⬛⬛⬛⬛⬛⬜\n⬜⬛⬜⬜⬜⬛⬜\n⬜⬛⬜⬛⬜⬛⬜\n⬜⬛⬜⬜⬜⬛⬜\n⬜⬛⬛⬛⬛⬛⬜\n⬜⬜⬜⬜⬜⬜⬜",
"⬛⬛⬛⬛⬛⬛⬛\n⬛⬜⬜⬜⬜⬜⬛\n⬛⬜⬛⬛⬛⬜⬛\n⬛⬜⬛⬜⬛⬜⬛\n⬛⬜⬛⬛⬛⬜⬛\n⬛⬜⬜⬜⬜⬜⬛\n⬛⬛⬛⬛⬛⬛⬛",
"⬜⬜⬜⬜⬜⬜⬜\n⬜⬛⬛⬛⬛⬛⬜\n⬜⬛⬜⬜⬜⬛⬜\n⬜⬛⬜⬛⬜⬛⬜\n⬜⬛⬜⬜⬜⬛⬜\n⬜⬛⬛⬛⬛⬛⬜\n⬜⬜⬜⬜⬜⬜⬜",
"⬛⬛⬛⬛⬛\n⬛⬜⬜⬜⬛\n⬛⬜⬛⬜⬛\n⬛⬜⬜⬜⬛\n⬛⬛⬛⬛⬛",
"⬜⬜⬜\n⬜⬛⬜\n⬜⬜⬜",
"[👉🔴👈](https://t.me/black_lightning_channel)",
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 15])
| 33.12 | 93 | 0.278986 |
f735f5fbd64d2154b36d035f777107f689429cdb | 611 | py | Python | test/python/LIM2Metrics/py3/base/common/Prototype/Prototype.py | sagodiz/SonarQube-plug-in | 4f8e111baecc4c9f9eaa5cd3d7ebeb1e365ace2c | [
"BSD-4-Clause"
] | 20 | 2015-06-16T17:39:10.000Z | 2022-03-20T22:39:40.000Z | test/python/LIM2Metrics/py3/base/common/Prototype/Prototype.py | sagodiz/SonarQube-plug-in | 4f8e111baecc4c9f9eaa5cd3d7ebeb1e365ace2c | [
"BSD-4-Clause"
] | 29 | 2015-12-29T19:07:22.000Z | 2022-03-22T10:39:02.000Z | test/python/LIM2Metrics/py3/base/common/Prototype/Prototype.py | sagodiz/SonarQube-plug-in | 4f8e111baecc4c9f9eaa5cd3d7ebeb1e365ace2c | [
"BSD-4-Clause"
] | 12 | 2015-08-28T01:22:18.000Z | 2021-09-25T08:17:31.000Z | import copy
#
# Prototype Class
#
class Cookie:
def __init__(self, name):
self.name = name
def clone(self):
return copy.deepcopy(self)
#
# Concrete Prototypes to clone
#
class CoconutCookie(Cookie):
def __init__(self):
Cookie.__init__(self, 'Coconut')
#
# Client Class
#
class CookieMachine:
def __init__(self, cookie):
self.cookie = cookie
def make_cookie(self):
return self.cookie.clone()
if __name__ == '__main__':
prot = CoconutCookie()
cm = CookieMachine(prot)
for i in range(10):
temp_cookie = cm.make_cookie()
| 16.972222 | 40 | 0.630115 |
f7360b1ff28ca8cb8ff567284e2ce156676903d0 | 134 | py | Python | monkeys-and-frogs-on-fire/server/__main__.py | markjoshua12/game-jam-2020 | 846dd052d649a609ab7a52ac0f4dcbeb71781c3b | [
"MIT"
] | 15 | 2020-04-17T12:02:14.000Z | 2022-03-16T03:01:34.000Z | monkeys-and-frogs-on-fire/server/__main__.py | markjoshua12/game-jam-2020 | 846dd052d649a609ab7a52ac0f4dcbeb71781c3b | [
"MIT"
] | 49 | 2020-04-18T21:14:57.000Z | 2022-01-13T03:05:09.000Z | monkeys-and-frogs-on-fire/server/__main__.py | markjoshua12/game-jam-2020 | 846dd052d649a609ab7a52ac0f4dcbeb71781c3b | [
"MIT"
] | 55 | 2020-04-17T12:01:11.000Z | 2021-12-28T10:14:02.000Z | from server.server import Server
def main():
server = Server(__file__)
server.run()
if __name__ == '__main__':
main()
| 12.181818 | 32 | 0.641791 |
f736559162d2fb353e82402c79261dc665f58d3e | 425 | py | Python | pytorch_forecasting/__init__.py | KazukiNoto/pytorch-forecasting | 8a1636388e091456f042f999892dd52733903dd6 | [
"MIT"
] | 1 | 2021-06-09T09:51:13.000Z | 2021-06-09T09:51:13.000Z | pytorch_forecasting/__init__.py | KazukiNoto/pytorch-forecasting | 8a1636388e091456f042f999892dd52733903dd6 | [
"MIT"
] | null | null | null | pytorch_forecasting/__init__.py | KazukiNoto/pytorch-forecasting | 8a1636388e091456f042f999892dd52733903dd6 | [
"MIT"
] | 1 | 2021-06-15T11:31:44.000Z | 2021-06-15T11:31:44.000Z | """
PyTorch Forecasting package for timeseries forecasting with PyTorch.
"""
from pytorch_forecasting.data import EncoderNormalizer, GroupNormalizer, TimeSeriesDataSet
from pytorch_forecasting.models import Baseline, NBeats, TemporalFusionTransformer
__all__ = [
"TimeSeriesDataSet",
"GroupNormalizer",
"EncoderNormalizer",
"TemporalFusionTransformer",
"NBeats",
"Baseline",
]
__version__ = "0.0.0"
| 25 | 90 | 0.764706 |
f7366798202eb5b63a293458869820181b482fe2 | 10,620 | py | Python | ramses_rf/protocol/helpers.py | zxdavb/evohome_rf | dd7233abde1c3b1e645cfabcf3ccc96d0d2c381b | [
"MIT"
] | 24 | 2019-12-12T20:54:39.000Z | 2021-03-25T15:40:26.000Z | ramses_rf/protocol/helpers.py | zxdavb/evohome_rf | dd7233abde1c3b1e645cfabcf3ccc96d0d2c381b | [
"MIT"
] | 9 | 2020-10-21T23:01:06.000Z | 2021-04-22T09:59:50.000Z | ramses_rf/protocol/helpers.py | zxdavb/evohome_rf | dd7233abde1c3b1e645cfabcf3ccc96d0d2c381b | [
"MIT"
] | 9 | 2019-12-03T21:05:11.000Z | 2021-04-02T11:41:42.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
"""RAMSES RF - Protocol/Transport layer.
Helper functions.
"""
import ctypes
import sys
import time
from datetime import datetime as dt
from typing import Optional, Union
from .const import DEVICE_TYPES, NON_DEVICE_ID, NUL_DEVICE_ID
class FILETIME(ctypes.Structure):
"""Data structure for GetSystemTimePreciseAsFileTime()."""
_fields_ = [("dwLowDateTime", ctypes.c_uint), ("dwHighDateTime", ctypes.c_uint)]
def dt_now() -> dt:
"""Return the current datetime as a local/naive datetime object.
This is slower, but potentially more accurate, than dt.now(), and is used mainly for
packet timestamps.
"""
return dt.fromtimestamp(timestamp())
def dt_str() -> str:
"""Return the current datetime as a isoformat string."""
return dt_now().isoformat(timespec="microseconds")
def timestamp() -> float:
"""Return the number of seconds since the Unix epoch.
Return an accurate value, even for Windows-based systems.
""" # see: https://www.python.org/dev/peps/pep-0564/
if sys.platform != "win32":
return time.time_ns() / 1e9 # since 1970-01-01T00:00:00Z, time.gmtime(0)
file_time = FILETIME()
ctypes.windll.kernel32.GetSystemTimePreciseAsFileTime(ctypes.byref(file_time))
_time = (file_time.dwLowDateTime + (file_time.dwHighDateTime << 32)) / 1e7
return _time - 134774 * 24 * 60 * 60 # otherwise, is since 1601-01-01T00:00:00Z
def _precision_v_cost():
import math
#
LOOPS = 10 ** 6
#
print("time.time_ns(): %s" % time.time_ns())
print("time.time(): %s\r\n" % time.time())
#
starts = time.time_ns()
min_dt = [abs(time.time_ns() - time.time_ns()) for _ in range(LOOPS)]
min_dt = min(filter(bool, min_dt))
print("min delta time_ns(): %s ns" % min_dt)
print("duration time_ns(): %s ns\r\n" % (time.time_ns() - starts))
#
starts = time.time_ns()
min_dt = [abs(time.time() - time.time()) for _ in range(LOOPS)]
min_dt = min(filter(bool, min_dt))
print("min delta time(): %s ns" % math.ceil(min_dt * 1e9))
print("duration time(): %s ns\r\n" % (time.time_ns() - starts))
#
starts = time.time_ns()
min_dt = [abs(timestamp() - timestamp()) for _ in range(LOOPS)]
min_dt = min(filter(bool, min_dt))
print("min delta timestamp(): %s ns" % math.ceil(min_dt * 1e9))
print("duration timestamp(): %s ns\r\n" % (time.time_ns() - starts))
#
LOOPS = 10 ** 4
#
starts = time.time_ns()
min_td = [abs(dt.now() - dt.now()) for _ in range(LOOPS)]
min_td = min(filter(bool, min_td))
print("min delta dt.now(): %s ns" % math.ceil(min_dt * 1e9))
print("duration dt.now(): %s ns\r\n" % (time.time_ns() - starts))
#
starts = time.time_ns()
min_td = [abs(dt_now() - dt_now()) for _ in range(LOOPS)]
min_td = min(filter(bool, min_td))
print("min delta dt_now(): %s ns" % math.ceil(min_dt * 1e9))
print("duration dt_now(): %s ns\r\n" % (time.time_ns() - starts))
#
starts = time.time_ns()
min_td = [
abs(
(dt_now if sys.platform == "win32" else dt.now)()
- (dt_now if sys.platform == "win32" else dt.now)()
)
for _ in range(LOOPS)
]
min_td = min(filter(bool, min_td))
print("min delta dt_now(): %s ns" % math.ceil(min_dt * 1e9))
print("duration dt_now(): %s ns\r\n" % (time.time_ns() - starts))
#
dt_nov = dt_now if sys.platform == "win32" else dt.now
starts = time.time_ns()
min_td = [abs(dt_nov() - dt_nov()) for _ in range(LOOPS)]
min_td = min(filter(bool, min_td))
print("min delta dt_now(): %s ns" % math.ceil(min_dt * 1e9))
print("duration dt_now(): %s ns\r\n" % (time.time_ns() - starts))
def double(val, factor=1) -> Optional[float]:
"""Return a double, used by 31DA."""
if val == "7FFF":
return
result = int(val, 16)
assert result < 32767
return result if factor == 1 else result / factor
def flag8(byte, lsb=False) -> list:
"""Split a byte (as a str) into a list of 8 bits, MSB first by default."""
if lsb is True:
return [(bytes.fromhex(byte)[0] & (1 << x)) >> x for x in range(8)]
return [(bytes.fromhex(byte)[0] & (1 << x)) >> x for x in reversed(range(8))]
def percent(value: str) -> Optional[float]: # a percentage 0-100% (0.0 to 1.0)
"""Return a percentage, 0-100% with resolution of 0.5%."""
assert len(value) == 2, f"percent({value}): len is not 2"
if value in {"EF", "FE", "FF"}: # TODO: diff b/w FE (seen with 3150) & FF
return
assert int(value, 16) <= 200, "max value should be 0xC8, not 0x{value}"
return int(value, 16) / 200
def bool_from_hex(value: str) -> Optional[bool]: # either 00 or C8
"""Return a boolean."""
assert value in {"00", "C8", "FF"}, value
return {"00": False, "C8": True}.get(value)
def date_from_hex(value: str) -> Optional[str]: # YY-MM-DD
"""Return a date string in the format YY-MM-DD."""
assert len(value) == 8, "len is not 8"
if value == "FFFFFFFF":
return
return dt(
year=int(value[4:8], 16),
month=int(value[2:4], 16),
day=int(value[:2], 16) & 0b11111, # 1st 3 bits: DayOfWeek
).strftime("%Y-%m-%d")
def dtm_from_hex(value: str) -> str: # from parsers
"""Convert a hex string to an (naive, local) isoformat string."""
# 00141B0A07E3 (...HH:MM:00) for system_mode, zone_mode (schedules?)
# 0400041C0A07E3 (...HH:MM:SS) for sync_datetime
if value == "FF" * 6:
return None
if len(value) == 12:
value = f"00{value}"
# assert len(value) == 14
return dt(
year=int(value[10:14], 16),
month=int(value[8:10], 16),
day=int(value[6:8], 16),
hour=int(value[4:6], 16) & 0b11111, # 1st 3 bits: DayOfWeek
minute=int(value[2:4], 16),
second=int(value[:2], 16) & 0b1111111, # 1st bit: used for DST
).isoformat(timespec="seconds")
def dtm_to_hex(dtm: Union[str, dt]) -> str:
"""Convert a datetime (isoformat string, or datetime obj) to a hex string."""
def _dtm_to_hex(tm_year, tm_mon, tm_mday, tm_hour, tm_min, tm_sec, *args):
return f"{tm_min:02X}{tm_hour:02X}{tm_mday:02X}{tm_mon:02X}{tm_year:04X}"
if dtm is None:
return "FF" * 6
if isinstance(dtm, str):
try:
dtm = dt.fromisoformat(dtm)
except ValueError:
raise ValueError("Invalid datetime isoformat string")
elif not isinstance(dtm, dt):
raise TypeError("Invalid datetime object")
# if dtm < dt.now() + td(minutes=1):
# raise ValueError("Invalid datetime")
return _dtm_to_hex(*dtm.timetuple())
def dts_from_hex(value: str) -> Optional[str]:
"""YY-MM-DD HH:MM:SS."""
if value == "00000000007F":
return None
_seqx = int(value, 16)
return dt(
year=(_seqx & 0b1111111 << 24) >> 24,
month=(_seqx & 0b1111 << 36) >> 36,
day=(_seqx & 0b11111 << 31) >> 31,
hour=(_seqx & 0b11111 << 19) >> 19,
minute=(_seqx & 0b111111 << 13) >> 13,
second=(_seqx & 0b111111 << 7) >> 7,
).strftime("%Y-%m-%dT%H:%M:%S")
def dts_to_hex(dtm: Union[str, dt]) -> str: # TODO: WIP
"""YY-MM-DD HH:MM:SS."""
if dtm is None:
return "00000000007F"
if isinstance(dtm, str):
try:
dtm = dt.fromisoformat(dtm) # TODO: YY-MM-DD, not YYYY-MM-DD
except ValueError:
raise ValueError("Invalid datetime isoformat string")
elif not isinstance(dtm, dt):
raise TypeError("Invalid datetime object")
(tm_year, tm_mon, tm_mday, tm_hour, tm_min, tm_sec, *args) = dtm.timetuple()
val = sum(
(
tm_year % 100 << 24,
tm_mon << 36,
tm_mday << 31,
tm_hour << 19,
tm_min << 13,
tm_sec << 7,
)
)
return f"{val:012X}"
def str_from_hex(value: str) -> Optional[str]: # printable ASCII characters
"""Return a string of printable ASCII characters."""
# result = bytearray.fromhex(value).split(b"\x7F")[0] # TODO: needs checking
result = bytearray([x for x in bytearray.fromhex(value) if 31 < x < 127])
return result.decode("ascii").strip() if result else None
def str_to_hex(value: str) -> str:
"""Convert a string to a variable-length ASCII hex string."""
return "".join(f"{ord(x):02X}" for x in value)
# return value.encode().hex()
def temp_from_hex(value: str) -> Union[float, bool, None]:
"""Convert a 2's complement 4-byte hex string to an float."""
assert len(value) == 4, f"temp_from_hex({value}): should be 4 bytes long"
if value == "31FF": # means: N/A (== 127.99, 2s complement), signed?
return
if value == "7EFF": # possibly only for setpoints? unsigned?
return False
if value == "7FFF": # also: FFFF?, means: N/A (== 327.67)
return
temp = int(value, 16)
return (temp if temp < 2 ** 15 else temp - 2 ** 16) / 100
def temp_to_hex(value: float) -> str:
"""Convert a float to a 2's complement 4-byte hex string."""
assert (
not value or -(2 ** 7) <= value < 2 ** 7
), f"temp_to_hex({value}): is out of 2's complement range"
if value is None:
return "7FFF" # or: "31FF"?
if value is False:
return "7EFF"
temp = int(value * 100)
return f"{temp if temp >= 0 else temp + 2 ** 16:04X}"
def valve_demand(value: str) -> dict:
# a damper restricts flow, a valve permits flow
demand = int(value, 16)
if demand & 0xF0 == 0xF0:
VALVE_STATE = {
"F0": "open_circuit",
"F1": "short_circuit",
"FD": "valve_stuck", # damper/valve stuck
"FE": "actuator_stuck",
} # VALVE_STATE.get(value, "malfunction")
return {
"heat_demand": None,
"fault": VALVE_STATE.get(value, "malfunction"),
}
assert demand <= 200
return {"heat_demand": demand / 200}
def hex_id_to_dec(device_hex: str, friendly_id=False) -> str:
"""Convert (say) '06368E' to '01:145038' (or 'CTL:145038')."""
if device_hex == "FFFFFE": # aka '63:262142'
return "NUL:262142" if friendly_id else NUL_DEVICE_ID
if not device_hex.strip(): # aka '--:------'
return f"{'':10}" if friendly_id else NON_DEVICE_ID
_tmp = int(device_hex, 16)
dev_type = f"{(_tmp & 0xFC0000) >> 18:02d}"
if friendly_id:
dev_type = DEVICE_TYPES.get(dev_type, f"{dev_type:<3}")
return f"{dev_type}:{_tmp & 0x03FFFF:06d}"
| 34.14791 | 88 | 0.591149 |
f736712970df93b5bef6cd0469bab29486ab504a | 12,232 | py | Python | PythonProjects/09-MoreImplementingClasses/src/m2_baby_class.py | much2mutch/csse120-public | 4f862a6deb7a5373fb5723fb2a23e4042e4d4157 | [
"MIT"
] | null | null | null | PythonProjects/09-MoreImplementingClasses/src/m2_baby_class.py | much2mutch/csse120-public | 4f862a6deb7a5373fb5723fb2a23e4042e4d4157 | [
"MIT"
] | null | null | null | PythonProjects/09-MoreImplementingClasses/src/m2_baby_class.py | much2mutch/csse120-public | 4f862a6deb7a5373fb5723fb2a23e4042e4d4157 | [
"MIT"
] | null | null | null | # can't get the output to stop putting extra space after baby name before punctuation.
"""
A Baby class and functions that use/test it.
Authors: Dave Fisher, David Mutchler, Vibha Alangar, Matt Boutell,
Mark Hays, Amanda Stouder, Derek Whitley, their colleagues,
and Seth Mutchler.
""" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.
import random
def main():
""" Runs the tests of the Baby class. """
print("UN-comment the following TESTS, one by one, when you are ready.")
# UN-comment the following, one by one, when you are ready to TEST.
run_test_1()
run_test_2()
###############################################################################
# Done: 2. In this module you will implement and test a Baby class.
# Here is an OVERVIEW of the steps you will take to do so.
# _
# Step 2 (this step): Read this overview of this module.
# Step 3: Read and understand the SPECIFICATION for the Baby class.
# Step 4: Read and understand the TESTS for the Baby class.
# We supplied those tests.
# Step 5: IMPLEMENT and TEST the Baby class.
# _
# Once you understand this OVERVIEW, mark this _TODO_ as DONE.
###############################################################################
###############################################################################
# DONE: 3. SPECIFICATION (read the following):
# Here (below) are the methods that you must implement in your Baby class:
# ----------------------------------------------------------------------------
# _
# Constructor method (that is, the __init__ method):
# What comes in:
# -- self
# -- a string for the name of the Baby
# What goes out: Nothing (i.e., None).
# Side effects:
# -- Prints "Hello baby <your baby's name>!"
# -- Sets instance variables as needed
# [YOU FIGURE OUT WHAT IS NEEDED AS YOU IMPLEMENT THE METHODS!]
# Example:
# b = Baby("McKinley")
# causes the following to be printed on the Console:
# Hello baby McKinley!
# _
# feed_baby:
# What comes in:
# -- self
# What goes out: Nothing (i.e., None).
# Side effects:
# -- Prints "Thank you for feeding baby <your baby's name>."
# -- Modifies instance variables as needed.
# Example:
# b = Baby("Joshua")
# b.feed_baby()
# causes the following to be printed on the Console:
# Hello baby Joshua!
# Thank you for feeding baby Joshua.
# _
# hour_passes
# What comes in:
# -- self
# What goes out: Nothing (i.e., None).
# Side effects:
# -- If this is the FIRST time this method has been called
# since this Baby was created or last fed, then this method prints:
# "Baby <your baby's name> is sleeping."
# _
# -- If this is the SECOND time this method has been called
# since baby was created or last fed, then this method prints:
# "Baby <your baby's name> is awake. Time for food."
# _
# -- If this is the THIRD (OR MORE) time this method has been called
# since baby was created or last fed, then this method prints:
# "Baby <your baby's name> is CRYING uncontrollably! Feed the Baby!"
# _
# -- Modifies instance variables as needed.
# _
# Examples: See the two TEST functions below.
# _
# You may find it helpful to read the two TEST functions (below) at this time.
# If reading the TEST functions below does not make this specification clear,
# ASK QUESTIONS AS NEEDED to clarify this specification.
# _
# Once you understand this SPECIFICATION, mark this _TODO_ as DONE.
###############################################################################
###############################################################################
# DONE: 4. TESTS (read the following):
# The two functions that follow this comment TEST the Baby class.
# For each of those two functions:
# 1. READ the CODE in the function.
# As you do so, PREDICT what the code will cause to be printed.
# 2. READ the doc-string for the function.
# It shows the CORRECT output when the function runs.
# 3. CONFIRM that you understand WHY the function's CODE produces
# the OUTPUT that the doc-string says that it will.
# _
# If you do not understand why the CODE produces the OUTPUT as written
# in the function's doc-string, STOP HERE and ASK QUESTIONS AS NEEDED.
# Do ** NOT ** attempt to write the Baby class
# without fully understanding both of its test functions.
# _
# Once you fully understand the TESTS below, mark this _TODO_ as DONE.
###############################################################################
def run_test_1():
"""
Running this test should cause EXACTLY the following
to be displayed (i.e. printed) on the Console:
------------ Running test #1: ------------
Hello baby Joshua!
Baby Joshua is sleeping.
Baby Joshua is awake. Time for food.
Baby Joshua is CRYING uncontrollably! Feed the Baby!
Baby Joshua is CRYING uncontrollably! Feed the Baby!
Thank you for feeding baby Joshua.
Baby Joshua is sleeping.
Baby Joshua is awake. Time for food.
Thank you for feeding baby Joshua.
Baby Joshua is sleeping.
Thank you for feeding baby Joshua.
Baby Joshua is sleeping.
Baby Joshua is awake. Time for food.
Baby Joshua is CRYING uncontrollably! Feed the Baby!
Examine the code in this test to be sure that you understand
WHY it causes the above to be printed.
"""
print()
print('------------ Running test #1: ------------ ')
b = Baby("Joshua")
b.hour_passes()
b.hour_passes()
b.hour_passes()
b.hour_passes()
print() # Just to make the output easier to read.
b.feed_baby()
b.hour_passes()
b.hour_passes()
print() # Just to make the output easier to read.
b.feed_baby()
b.hour_passes()
print() # Just to make the output easier to read.
b.feed_baby()
b.hour_passes()
b.hour_passes()
b.hour_passes()
def run_test_2():
"""
Running this test should cause EXACTLY the following
to be displayed (i.e. printed) on the Console:
------------ Running test #2: ------------
Hello baby McKinley!
Hello baby Keegan!
--- Iteration #1 ---
Baby Keegan is sleeping.
Thank you for feeding baby McKinley.
Baby McKinley is sleeping.
Baby McKinley is awake. Time for food.
Baby McKinley is CRYING uncontrollably! Feed the Baby!
Baby McKinley is CRYING uncontrollably! Feed the Baby!
Thank you for feeding baby McKinley.
Baby McKinley is sleeping.
Baby McKinley is awake. Time for food.
--- Iteration #2 ---
Baby Keegan is awake. Time for food.
Thank you for feeding baby McKinley.
Baby McKinley is sleeping.
Baby McKinley is awake. Time for food.
Baby McKinley is CRYING uncontrollably! Feed the Baby!
Baby McKinley is CRYING uncontrollably! Feed the Baby!
Thank you for feeding baby McKinley.
Baby McKinley is sleeping.
Baby McKinley is awake. Time for food.
--- Iteration #3 ---
Baby Keegan is CRYING uncontrollably! Feed the Baby!
Thank you for feeding baby McKinley.
Baby McKinley is sleeping.
Baby McKinley is awake. Time for food.
Baby McKinley is CRYING uncontrollably! Feed the Baby!
Baby McKinley is CRYING uncontrollably! Feed the Baby!
Thank you for feeding baby McKinley.
Baby McKinley is sleeping.
Baby McKinley is awake. Time for food.
Examine the code in this test to be sure that you understand
WHY it causes the above to be printed.
"""
print()
print('------------ Running test #2: ------------ ')
mckinley = Baby("McKinley")
keegan = Baby("Keegan")
for k in range(3):
print() # Just to make the output easier to read.
print("--- Iteration #{} ---".format(k + 1))
keegan.hour_passes()
mckinley.feed_baby()
for j in range(4):
mckinley.hour_passes()
mckinley.feed_baby()
mckinley.hour_passes()
mckinley.hour_passes()
###############################################################################
# TODO: 5.
# Implement the entire Baby class
# (including its 3 methods: __init__, feed_baby, and hour_passes)
# below this comment.
# _
# Here is a reminder for the syntax (notation) to define a new class:
# class NameOfClass(object):
# """ Brief description of what an object of the class 'is'. """
# _
# AFTER you have implemented the ENTIRE Baby class,
# un-comment (one-by-one) the calls in main to the two tests
# and confirm that the tests produce the output that the doc-strings
# for the tests show as the CORRECT output.
# _
# Fix errors as needed! Do not hesitate to ASK QUESTIONS AS NEEDED.
###############################################################################
class Baby(object):
""" generates baby that eats, sleeps, and has a name"""
# Constructor method (that is, the __init__ method):
# What comes in:
# -- self
# -- a string for the name of the Baby
# What goes out: Nothing (i.e., None).
# Side effects:
# -- Prints "Hello baby <your baby's name>!"
# -- Sets instance variables as needed
# [YOU FIGURE OUT WHAT IS NEEDED AS YOU IMPLEMENT THE METHODS!]
# Example:
# b = Baby("McKinley")
# causes the following to be printed on the Console:
# Hello baby McKinley!
def __init__(self, baby):
self.baby = baby
print("Hello baby", self.baby, "!")
self.hour_since_feeding = 0
# _
# feed_baby:
# What comes in:
# -- self
# What goes out: Nothing (i.e., None).
# Side effects:
# -- Prints "Thank you for feeding baby <your baby's name>."
# -- Modifies instance variables as needed.
# Example:
# b = Baby("Joshua")
# b.feed_baby()
# causes the following to be printed on the Console:
# Hello baby Joshua!
# Thank you for feeding baby Joshua.
# _
def feed_baby(self):
print("Thank you for feeding baby", self.baby, ".")
self.hour_since_feeding = 0
# hour_passes
# What comes in:
# -- self
# What goes out: Nothing (i.e., None).
# Side effects:
# -- If this is the FIRST time this method has been called
# since this Baby was created or last fed, then this method prints:
# "Baby <your baby's name> is sleeping."
# _
# -- If this is the SECOND time this method has been called
# since baby was created or last fed, then this method prints:
# "Baby <your baby's name> is awake. Time for food."
# _
# -- If this is the THIRD (OR MORE) time this method has been called
# since baby was created or last fed, then this method prints:
# "Baby <your baby's name> is CRYING uncontrollably! Feed the Baby!"
# _
# -- Modifies instance variables as needed.
def hour_passes(self):
if self.hour_since_feeding == 0:
print("Baby", self.baby, "is sleeping.")
else:
if self.hour_since_feeding == 1:
print("Baby", self.baby, "is awake. Time for food.")
else:
print("Baby", self.baby, "is CRYING uncontrollably! Feed the Baby!")
self.hour_since_feeding = self.hour_since_feeding + 1
# -----------------------------------------------------------------------------
# Calls main to start the ball rolling.
# -----------------------------------------------------------------------------
main()
| 37.066667 | 86 | 0.563685 |
f73689676c7e34072751132b67e88ebf427c1287 | 842 | py | Python | setup.py | yupingso/randomproto | a36c0e13be893d9fd3a63e3390fc11fc0de250b9 | [
"MIT"
] | 6 | 2019-03-25T03:45:43.000Z | 2019-07-03T06:31:31.000Z | setup.py | yupingso/randomproto | a36c0e13be893d9fd3a63e3390fc11fc0de250b9 | [
"MIT"
] | 1 | 2019-06-03T09:13:44.000Z | 2019-06-03T09:13:44.000Z | setup.py | yupingso/randomproto | a36c0e13be893d9fd3a63e3390fc11fc0de250b9 | [
"MIT"
] | null | null | null | import setuptools
setuptools.setup(
name='randomproto',
version='0.0.1',
py_modules=('randomproto',),
install_requires=[
'protobuf>=3.6.0',
],
setup_requires=[
'pytest-runner',
],
tests_require=[
'pytest==4.2.1',
'pytest-cov==2.6.1',
'pytest-mock==1.10.0',
],
author='Yu-Ping Wu',
author_email='yupingso@gmail.com',
description='Random protobuf object generator',
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
keywords='protobuf proto message random generate generator',
url='https://github.com/yupingso/randomproto',
classifiers=[
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
)
| 26.3125 | 64 | 0.608076 |
f73693a4682bea4ed02bdb055520c7339d597c43 | 26,017 | py | Python | openpharmacophore/pharmacophore/dynophore.py | uibcdf/openpharmacophore | 4f563fa206f6e7c081502acab97bb795d27bdeb9 | [
"MIT"
] | 14 | 2021-11-12T10:09:25.000Z | 2022-03-18T08:24:16.000Z | openpharmacophore/pharmacophore/dynophore.py | uibcdf/openpharmacophore | 4f563fa206f6e7c081502acab97bb795d27bdeb9 | [
"MIT"
] | 7 | 2021-11-05T01:37:57.000Z | 2022-01-18T06:03:39.000Z | openpharmacophore/pharmacophore/dynophore.py | uibcdf/openpharmacophore | 4f563fa206f6e7c081502acab97bb795d27bdeb9 | [
"MIT"
] | 3 | 2021-11-05T01:22:47.000Z | 2021-12-12T03:57:09.000Z | # OpenPharmacophore
from openpharmacophore._private_tools.exceptions import InvalidFileFormat, NoLigandsError, OpenPharmacophoreTypeError
from openpharmacophore.pharmacophore.pharmacophoric_point import UniquePharmacophoricPoint
from openpharmacophore import StructuredBasedPharmacophore
from openpharmacophore import Pharmacophore
from openpharmacophore.utils.conformers import conformer_energy
from openpharmacophore.pharmacophore.color_palettes import get_color_from_palette_for_feature
# Third Party
import matplotlib.pyplot as plt
import MDAnalysis as mda
from MDAnalysis.lib.util import NamedStream
import mdtraj as mdt
import numpy as np
import pandas as pd
import pyunitwizard as puw
from rdkit.Chem.Draw import rdMolDraw2D
from tqdm.auto import tqdm
# Standard Library
from collections import defaultdict
import copy
import bisect
from io import StringIO
import tempfile
from typing import List, Tuple, Optional
class Dynophore():
""" Class to store and compute dynamic pharmacophores
Parameters
----------
trajectory : str or mdtraj.trajectory or MDAnalysis.universe
A str with the file path containing the trajectory, an mdtraj trajectory object,
or an MDAnalysis universe.
Attributes
----------
pharmacophores : list of openpharmacophore.StructuredBasedPharmacophore
List with pharmacophores for each relevant frame in the trajectory.
pharmacophore_indices : list of int
Indices of the frame of the trajectory from which the pharmacophores were extracted.
The index of each element of the list corresponds to the one in pharmacophores list.
n_pharmacophores : int
Number of different pharmacophores in the trajectory.
"""
def __init__(self, trajectory):
self.pharmacophores = []
self.pharmacophore_indices = []
self.n_pharmacophores = 0
self.unique_pharmacophoric_points = []
# TODO: Load other types of file, including using a topology and tajectory
if isinstance(trajectory, str):
self._trajectory = self._load_trajectory_file(trajectory)
elif isinstance(trajectory, mdt.Trajectory):
self._trajectory_type = "mdt"
self._trajectory = trajectory
self._n_frames = self._trajectory.n_frames
elif isinstance(trajectory, mda.Universe):
self._trajectory_type = "mda"
self._trajectory = trajectory
self._n_frames = trajectory.trajectory.n_frames
else:
raise TypeError("Trajectory must be of type string, mdtraj.Trajectory or MdAnalysis.Universe")
self._saved_ligand = False
self._averaged_coords = False
def common_hits_approach(self, frame_list=None):
""" Get a list of pharmacophore models from a trajectory using the common hits approach
method.
Notes
-----
This method is based on obtaining a list of representative pharmacophore models from a
trajectory and then validate and score them using virtual screening. The best performant
pharmacophore models are then returned.
References
----------
[1] Wieder, Marcus, Arthur Garon, Ugo Perricone, Stefan Boresch, Thomas Seidel, Anna Maria Almerico,
and Thierry Langer. "Common hits approach: combining pharmacophore modeling and molecular dynamics
simulations." Journal of chemical information and modeling 57, no. 2 (2017): 365-385
"""
if frame_list is None:
frame_list = list(range(0, self._n_frames))
self.pharmacophores_from_frames(frame_list, load_ligand=True)
self._get_unique_pharmacophoric_points(avg_coordinates=False)
rpms = self.representative_pharmacophore_models()
pass
def draw(self, file_name: str, img_size: Tuple[int, int] = (500,500),
legend: str = "", freq_threshold: float = 0.2) -> None:
""" Draw a 2d representation of the dynamic pharmacophore. This is a drawing of the
ligand with the pharmacophoric features highlighted and the frequency if each
one.
Parameters
----------
file_name : str
Name or path og the file where the drawing will be saved. Must be a png file.
img_size : 2-tuple of int, optional
The size of the image (default=(500,500))
legend : str, optional
Image legend.
freq_threshold : double , optional
The minimun frequency of a pharmacophoric point to be drawn. Number
between 0.0 and 1.0 (default=0.2).
"""
if freq_threshold < 0.0 or freq_threshold > 1.0:
raise ValueError("Freqency threshold must be a value between 0 and 1")
if not file_name.endswith(".png"):
raise InvalidFileFormat("File must be a png.")
# Extract a ligand
if self.pharmacophores[0].ligand is None:
raise NoLigandsError("Ligand could not be extracted")
ligand = copy.deepcopy(self.pharmacophores[0].ligand)
ligand.RemoveAllConformers()
atoms = []
bond_colors = {}
atom_highlights = defaultdict(list)
highlight_radius = {}
for up in self.unique_pharmacophoric_points:
if up.frequency < freq_threshold:
continue
indices = up.atom_indices
update_freq = True
for idx in indices:
# If an atom has more than one feature keep higher frequency value
if idx in atoms:
if ligand.GetAtomWithIdx(idx).HasProp("atomNote"):
freq = int(ligand.GetAtomWithIdx(idx).GetProp("atomNote")[2:])
if freq > up.frequency:
update_freq = False
atoms.append(idx)
if "hydrophobicity" in up.feature_name:
feat_name = "hydrophobicity"
else:
feat_name = " ".join(up.feature_name.split()[0:2])
atom_highlights[idx].append(get_color_from_palette_for_feature(feat_name))
highlight_radius[idx] = 0.6
# Draw aromatic rings bonds
if up.short_name == "R":
for neighbor in ligand.GetAtomWithIdx(idx).GetNeighbors():
nbr_idx = neighbor.GetIdx()
if nbr_idx not in indices:
continue
bond = ligand.GetBondBetweenAtoms(idx, nbr_idx).GetIdx()
bond_colors[bond] = [get_color_from_palette_for_feature("aromatic ring")]
if update_freq:
frequency = int(up.frequency * 100)
ligand.GetAtomWithIdx(idx).SetProp("atomNote", f"f={frequency}")
drawing = rdMolDraw2D.MolDraw2DCairo(img_size[0], img_size[1])
drawing.DrawMoleculeWithHighlights(ligand, legend, dict(atom_highlights), bond_colors, highlight_radius, {})
drawing.FinishDrawing()
drawing.WriteDrawingText(file_name)
def first_and_last_pharmacophore(self) -> None:
""" Derive a pharmacophore model for the first and last frames of a trajectory.
References
----------
[1] Wieder, Marcus, Ugo Perricone, Thomas Seidel, Stefan Boresch, and Thierry Langer.
"Comparing pharmacophore models derived from crystal structures and from molecular
dynamics simulations." Monatshefte für Chemie-Chemical Monthly 147, no. 3 (2016):
553-563.
"""
if self._trajectory_type == "mdt":
get_pharmacophore = self._pharmacophore_from_mdtraj
elif self._trajectory_type == "mda":
get_pharmacophore = self._pharmacohore_from_mdanalysis
initial_pharmacophore = get_pharmacophore(0, True, True)
end_pharmacophore = get_pharmacophore(-1, True, True)
last_frame_index = self._trajectory.n_frames
self.pharmacophores = [
initial_pharmacophore,
end_pharmacophore
]
self.pharmacophore_indices = [0, last_frame_index]
self.n_pharmacophores = 2
def pharmacophore_by_frequency(self, threshold: float) -> Pharmacophore:
""" Derive a unique pharmacophore model with the pharmacophoric points
that have a frequency >= to threshold.
Parameters
---------
threshold : float
The value of frequency from which points are considered part of
the pharmacophore model. Must be a value between 0 and 1-
Returns
-------
openpharmcophore.Pharmacophore
Pharmacophore model with the unique pharmacophoric points.
References
----------
[1] Wieder, Marcus, Ugo Perricone, Thomas Seidel, and Thierry Langer. "Pharmacophore models
derived from molecular dynamics simulations of protein-ligand complexes: A case study."
Natural product communications 11, no. 10 (2016): 1934578X1601101019.
"""
if threshold < 0 or threshold > 1:
raise ValueError("Threshold must be a number between 0 and 1")
if len(self.unique_pharmacophoric_points) == 0:
self._get_unique_pharmacophoric_points(avg_coordinates=True)
points = [p for p in self.unique_pharmacophoric_points if p.frequency >= threshold]
return Pharmacophore(points)
def pharmacophore_from_unique_points(self, unique_points: List[str]) -> Pharmacophore:
""" Get a pharmacophore which consists of the passed unique pharmacophoric
points.
Parameters
----------
unique_points: list of str
List with the name of the unique pharmacophoric points.
Returns
-------
openpharmcophore.Pharmacophore
Pharmacophore model with the specified points.
"""
if len(self.unique_pharmacophoric_points) == 0 or not self._averaged_coords:
self._get_unique_pharmacophoric_points(avg_coordinates=True)
points = [point for point in self.unique_pharmacophoric_points if point.feature_name in unique_points]
return Pharmacophore(pharmacophoric_points=points)
def pharmacophores_from_frames(self, frames: List[int], load_ligand: bool = True) -> None:
""" Get pharmacophores for the specified frames in a trajectory
Parameters
----------
frames : list of int
Indices of the frames for which pharmacophores will be derived.
"""
if self._trajectory_type == "mdt":
get_pharmacophore = self._pharmacophore_from_mdtraj
elif self._trajectory_type == "mda":
get_pharmacophore = self._pharmacohore_from_mdanalysis
self.pharmacophores.clear()
self.pharmacophore_indices.clear()
for ii in tqdm(frames):
self.pharmacophores.append(get_pharmacophore(ii, load_ligand=load_ligand))
self.pharmacophore_indices.append(ii)
self.n_pharmacophores = len(self.pharmacophores)
def pharmacophoric_point_frequency(self) -> pd.DataFrame:
""" Get a dataframe with all unique pharmacophoric points and its frequency.
Returns
-------
pandas.DataFrame
Dataframe with the following columns: feature name, frequency and atom
indices.
"""
if len(self.unique_pharmacophoric_points) == 0 or not self._averaged_coords:
self._get_unique_pharmacophoric_points(avg_coordinates=True)
names = []
frequencies = []
indices = []
for point in self.unique_pharmacophoric_points:
names.append(point.feature_name)
frequencies.append(point.frequency)
indices.append(point.atom_indices)
frequency = pd.DataFrame().from_dict({
"Feature Name": names,
"Frequency": frequencies,
"Atoms Indices": indices
})
frequency.sort_values(by=["Frequency"], ascending=False, inplace=True)
frequency.reset_index(inplace=True)
frequency.drop(columns=["index"], inplace=True)
return frequency
def point_frequency_plot(self, threshold: float = 0.0, n_bins: int = 10,
ax: Optional[plt.Axes] = None):
""" Plot of pharmacophoric points frequency vs time.
Each pharmacophoric point will appear as a different line in the plot.
Parameters
----------
threshold : double, default=0.0
The value of overall frequency from which points will form part of the
plot. If there are a lot of points with really low frequency, setting
the threshold value can help with visualization.
n_bins : int, default=10
Number of bins to discretize the timesteps.
ax : matplotlib.axes._subplots.AxesSubplot, optional.
An axes object where the plot will be drawn.
"""
if len(self.unique_pharmacophoric_points) == 0 or not self._averaged_coords:
self._get_unique_pharmacophoric_points(avg_coordinates=True)
if threshold < 0 or threshold > 1:
raise ValueError("Threshold must be a number between 0 and 1")
if ax is None:
fig, ax = plt.subplots(figsize=(10, 7))
n_timesteps = self._n_frames
bins = np.arange(0, n_timesteps + 1, n_timesteps/n_bins)
for point in self.unique_pharmacophoric_points:
if point.frequency < threshold:
continue
point_timesteps = np.array(point.timesteps)
discretized_timesteps = np.digitize(point_timesteps, bins)
counts = np.zeros_like(bins)
for i in range(bins.shape[0]):
c = np.count_nonzero(discretized_timesteps == i)
counts[i] = c
ax.plot(bins, counts, label=point.feature_name)
ax.legend()
ax.set_xlabel("Timesteps")
ax.set_ylabel("Count")
plt.show()
return ax
def representative_pharmacophore_models(self) -> List[StructuredBasedPharmacophore]:
""" Get all representative pharmacophore models (RPM) in a trajectory.
RPMs are pharmacophore models that have the same pharmacophoric points,
Returns
-------
rpms : list of openpharmacophore.StructuredBasedPharmacophore
The representative pharmacophore models
Note
-----
Pharmacophoric points are considered equal based only on feature type and the atoms to
which this points belong to. Coordinates are not taken into account.
The coordinates of the pharmacophoric points are those that belong to the median energy of
the ligand.
References
----------
[1] Wieder, Marcus, Arthur Garon, Ugo Perricone, Stefan Boresch, Thomas Seidel, Anna Maria Almerico,
and Thierry Langer. "Common hits approach: combining pharmacophore modeling and molecular dynamics
simulations." Journal of chemical information and modeling 57, no. 2 (2017): 365-385
"""
if len(self.unique_pharmacophoric_points) == 0 or self._averaged_coords:
self._get_unique_pharmacophoric_points(avg_coordinates=False)
self._averaged_coords = False
rpms_indices = self._get_rpms_indices()
return self._pharmacophores_from_ligand_median_energy(rpms_indices)
def _get_rpms_indices(self) -> List[List[int]]:
""" Get the indices of the representative pharmacophore models.
If an empty list is returned it means that all pharmacophore models in the trajectory are different.
Returns
--------
rpms_indices : list of list of int
A list where each sublist contains the indices of each representative pharmacophore
model. This indices correspond to the attribute pharmacophores of the Dynophore
class.
"""
# Compute a matrix where each row represents a feature vector of a pharmacophore
n_pharmacophores = self.n_pharmacophores
n_features = len(self.unique_pharmacophoric_points)
feature_matrix = np.zeros((n_pharmacophores, n_features), dtype=np.int32)
for ii, pharmacophore in enumerate(self.pharmacophores):
for point in pharmacophore:
for jj, unique_point in enumerate(self.unique_pharmacophoric_points):
if point.is_equal(unique_point):
feature_matrix[ii, jj] = 1
break
# Find similar pharmacophores in the matrix
rpms_indices = []
skip = []
for ii in range(n_pharmacophores):
rpm = [ii]
for jj in range(ii + 1, n_pharmacophores):
if jj in skip:
continue
if np.all(feature_matrix[ii, :] == feature_matrix[jj, :]):
rpm.append(jj)
skip.append(jj)
# Keep only models that have a frequency higher than 2
if len(rpm) > 2:
rpms_indices.append(rpm)
return rpms_indices
def _pharmacophores_from_ligand_median_energy(self, rpms_indices)-> List[List[int]]:
""" Get the representative pharmacophore models that correspond to the pharmacophore
with ligand median energy.
Parameters
----------
rpms_indices : list of list of int
A list where each sublist contains the indices of each representative pharmacophore
model. This indices correspond to the attribute pharmacophores of the Dynophore
class.
Returns
-------
rpms : list of openpharmacophore.StructuredBasedPharmacophore
The representative pharmacophore models
"""
rpms = []
for indices in rpms_indices:
energies = []
for index in indices:
energy = (conformer_energy(self.pharmacophores[index].ligand), index)
bisect.insort(energies, energy)
# Take the pharmacophore with median energy
median_energy_index = energies[int(len(energies) / 2)][1]
rpms.append(self.pharmacophores[median_energy_index])
return rpms
def _load_trajectory_file(self, file_name: str) -> mdt.Trajectory:
""" Load a trajectory file from a MD simulation
Parameters
----------
file_name : str
Name of the file containing the trajectory.
Returns
-------
traj :
The trajectory object.
"""
if file_name.endswith("h5"):
traj = mdt.load(file_name)
self._trajectory_type = "mdt"
else:
raise NotImplementedError
return traj
def _get_unique_pharmacophoric_points(self, avg_coordinates: bool = True) -> None:
""" Get all unique pharmacophoric points across all the pharmacophore models
derived from the trajectory.
Parameters
----------
avg_coordinates : bool
Whether to average the coordinates of the pharmacophoric points.
Notes
-----
Two points are considered equal if they have the same feature type and
are associated with the same atom in the ligand.
"""
if avg_coordinates:
self._averaged_coords = True
if self.n_pharmacophores == 0:
self.pharmacophores_from_frames(list(range(0, self._n_frames)))
all_points = []
for ii, pharmacophore in enumerate(self.pharmacophores):
for pharmacophoric_point in pharmacophore:
pharmacophoric_point.pharmacophore_index = ii
all_points.append(pharmacophoric_point)
self.unique_pharmacophoric_points.clear()
# Get all unique parmacophoric points while also updating the count,
# timesteps where they appear and calculating the average centroid.
for point in all_points:
is_unique = True
for unique_p in self.unique_pharmacophoric_points:
if point.is_equal(unique_p):
timestep = point.pharmacophore_index
if not timestep in unique_p.timesteps:
unique_p.timesteps.append(timestep)
unique_p.count += 1
if avg_coordinates:
unique_p.center += point.center
is_unique = False
break
if is_unique:
self.unique_pharmacophoric_points.append(UniquePharmacophoricPoint(point, point.pharmacophore_index))
names = []
for point in self.unique_pharmacophoric_points:
if avg_coordinates:
# Normalize centroid
point.center /= point.count
point.frequency = point.count / self.n_pharmacophores
# Get a unique name for each point
feat_num = 1
full_name = point.feature_name + " " + str(feat_num)
if full_name not in names:
names.append(full_name)
point.feature_name = full_name
else:
while True:
feat_num += 1
full_name = point.feature_name + " " + str(feat_num)
if full_name not in names:
names.append(full_name)
point.feature_name = full_name
break
def _pharmacophore_from_mdtraj(self, frame_num: int, load_mol_system: bool=False,
load_ligand: bool=False) -> StructuredBasedPharmacophore:
""" Derive a pharmacophore for a single frame of an mdtraj Trajectory object.
Parameters
----------
frame_num : int
The index number of the frame from which the pharmacophore will be derived.
load_mol_system : bool, default=False
If true the receptor will be stored in the pharmacophore object.
load_ligand : bool, default=False
If true the ligand will be stored in the pharmacophore object.
"""
# mdtraj trajectories cannot be passed to SringIO objects nor saved as string. So with this
# method, temporary pdb files will be created that can be read by the StructuredBasedPharmacophore
# class.
if not isinstance(frame_num, int):
raise OpenPharmacophoreTypeError("Frame number must be an integer")
frame = self._trajectory[frame_num]
with tempfile.NamedTemporaryFile() as original_file:
frame.save_pdb(original_file.name)
original_file.seek(0)
lines_original = original_file.readlines()
# The pdb mdtraj generates needs to be edited so that pybel can read it.
# The third line that contains "MODEL" needs to be removed for the structured
# based pharmacophore to work.
with tempfile.NamedTemporaryFile() as modified_file:
for line in lines_original:
if not line.startswith(b'MODEL'):
modified_file.write(line)
modified_file.truncate()
modified_file.seek(0)
pharmacophore = StructuredBasedPharmacophore.from_pdb(modified_file,
radius=1.0, ligand_id=None, hydrophobics="plip",
load_mol_system=load_mol_system, load_ligand=load_ligand)
return pharmacophore
def _pharmacohore_from_mdanalysis(self, frame_num: int, load_mol_system: bool = False,
load_ligand: bool = False) -> StructuredBasedPharmacophore:
""" Derive a pharmacophore for a single frame of an MdAnalysis Universe object.
Parameters
----------
frame_num : int
The index number of the frame from which the pharmacophore will be derived.
load_mol_system: bool, default=False
If true the receptor will be stored in the pharmacophore object.
load_ligand: bool, default=False
If true the ligand will be stored in the pharmacophore object.
"""
if not isinstance(frame_num, int):
raise OpenPharmacophoreTypeError("Frame number must be an integer")
stream = StringIO()
pdb_stream = NamedStream(stream, "output.pdb")
atoms = self._trajectory.select_atoms("all")
atoms.write(pdb_stream, frames=self._trajectory.trajectory[[frame_num]])
pharmacophore = StructuredBasedPharmacophore.from_pdb(pdb_stream,
radius=1.0, ligand_id=None, hydrophobics="plip",
load_mol_system=load_mol_system, load_ligand=load_ligand)
return pharmacophore
def __repr__(self) -> str:
return f"{self.__class__.__name__}(n_pharmacophores={self.n_pharmacophores}; n_frames={self._n_frames})"
| 41.827974 | 117 | 0.611523 |
f7369619cd01529ee372590cea33f5f73b48f876 | 5,341 | py | Python | spikeforest_analysis/compare_sortings_with_truth.py | tjd2002/spikeforest2 | 2e393564b858b2995aa2ccccd9bd73065681b5de | [
"Apache-2.0"
] | null | null | null | spikeforest_analysis/compare_sortings_with_truth.py | tjd2002/spikeforest2 | 2e393564b858b2995aa2ccccd9bd73065681b5de | [
"Apache-2.0"
] | null | null | null | spikeforest_analysis/compare_sortings_with_truth.py | tjd2002/spikeforest2 | 2e393564b858b2995aa2ccccd9bd73065681b5de | [
"Apache-2.0"
] | null | null | null | import spikeextractors as si
#import spikewidgets as sw
import spiketoolkit as st
import mlprocessors as mlpr
import json
from cairio import client as ca
import numpy as np
from copy import deepcopy
def compare_sortings_with_truth(sortings,compute_resource,num_workers=None):
print('>>>>>> compare sortings with truth')
container='sha1://3b26155930cc4a4745c67b702ce297c9c968ac94/02-12-2019/mountaintools_basic.simg'
jobs_gen_table=[]
for sorting in sortings:
units_true=sorting.get('units_true',[])
firings=sorting['firings']
firings_true=sorting['firings_true']
units_true=units_true
job=GenSortingComparisonTable.createJob(
firings=firings,
firings_true=firings_true,
units_true=units_true,
json_out={'ext':'.json','upload':True},
html_out={'ext':'.html','upload':True},
_container=container
)
jobs_gen_table.append(job)
all_jobs=jobs_gen_table
label='Compare sortings with truth'
mlpr.executeBatch(jobs=all_jobs,label=label,num_workers=num_workers,compute_resource=compute_resource)
sortings_out=[]
for i,sorting in enumerate(sortings):
comparison_with_truth=dict()
comparison_with_truth['json']=jobs_gen_table[i]['result']['outputs']['json_out']
comparison_with_truth['html']=jobs_gen_table[i]['result']['outputs']['html_out']
sorting2=deepcopy(sorting)
sorting2['comparison_with_truth']=comparison_with_truth
sortings_out.append(sorting2)
return sortings_out
class GenSortingComparisonTable(mlpr.Processor):
VERSION='0.2.0'
firings=mlpr.Input('Firings file (sorting)')
firings_true=mlpr.Input('True firings file')
units_true=mlpr.IntegerListParameter('List of true units to consider')
json_out=mlpr.Output('Table as .json file produced from pandas dataframe')
html_out=mlpr.Output('Table as .html file produced from pandas dataframe')
def run(self):
sorting=si.MdaSortingExtractor(firings_file=self.firings)
sorting_true=si.MdaSortingExtractor(firings_file=self.firings_true)
if (self.units_true is not None) and (len(self.units_true)>0):
sorting_true=si.SubSortingExtractor(parent_sorting=sorting_true,unit_ids=self.units_true)
SC=st.comparison.SortingComparison(sorting_true,sorting)
df=get_comparison_data_frame(comparison=SC)
#sw.SortingComparisonTable(comparison=SC).getDataframe()
json=df.transpose().to_dict()
html=df.to_html(index=False)
_write_json_file(json,self.json_out)
_write_json_file(html,self.html_out)
def get_comparison_data_frame(*,comparison):
import pandas as pd
SC=comparison
unit_properties=[] #snr, etc? these would need to be properties in the sortings of the comparison
# Compute events counts
sorting1=SC.getSorting1()
sorting2=SC.getSorting2()
unit1_ids = sorting1.getUnitIds()
unit2_ids = sorting2.getUnitIds()
N1 = len(unit1_ids)
N2 = len(unit2_ids)
event_counts1 = dict()
for i1, u1 in enumerate(unit1_ids):
times1 = sorting1.getUnitSpikeTrain(u1)
event_counts1[u1] = len(times1)
event_counts2 = dict()
for i2, u2 in enumerate(unit2_ids):
times2 = sorting2.getUnitSpikeTrain(u2)
event_counts2[u2] = len(times2)
rows = []
for u_1, unit1 in enumerate(unit1_ids):
unit2 = SC.getBestUnitMatch1(unit1)
if unit2>=0:
num_matches=SC.getMatchingEventCount(unit1, unit2)
num_false_negatives=event_counts1[unit1]-num_matches
num_false_positives=event_counts2[unit2]-num_matches
else:
num_matches=0
num_false_negatives=event_counts1[unit1]
num_false_positives=0
row0 = {
'unit_id': unit1,
'accuracy': _safe_frac(num_matches,num_false_positives+num_false_negatives+num_matches),
'best_unit': unit2,
'matched_unit': SC.getMappedSorting1().getMappedUnitIds(unit1),
'num_matches': num_matches,
'num_false_negatives': num_false_negatives,
'num_false_positives': num_false_positives,
'f_n': _safe_frac(num_false_negatives,num_false_negatives+num_matches),
'f_p': _safe_frac(num_false_positives,num_false_positives+num_matches)
}
for prop in unit_properties:
pname = prop['name']
row0[pname] = SC.getSorting1().getUnitProperty(unit_id=int(unit1), property_name=pname)
rows.append(row0)
df = pd.DataFrame(rows)
fields = ['unit_id']
fields = fields + ['accuracy', 'best_unit', 'matched_unit', 'num_matches', 'num_false_negatives', 'num_false_positives', 'f_n', 'f_p']
for prop in unit_properties:
pname = prop['name']
fields.append(pname)
df = df[fields]
df['accuracy'] = df['accuracy'].map('{:,.4f}'.format)
# df['Best match'] = df['Accuracy'].map('{:,.2f}'.format)
df['f_n'] = df['f_n'].map('{:,.4f}'.format)
df['f_p'] = df['f_p'].map('{:,.4f}'.format)
return df
def _safe_frac(numer, denom):
if denom == 0:
return 0
return float(numer) / denom
def _write_json_file(obj,path):
with open(path,'w') as f:
return json.dump(obj,f)
| 39.272059 | 138 | 0.67684 |
f736c82fdd13410b5006180443ed9fb5d2275d3b | 496 | py | Python | examples/cc/h2o_ccsd_t.py | seunghoonlee89/pyscf-ecCC-TCC | 2091566fb83c1474e40bf74f271be2ce4611f60c | [
"Apache-2.0"
] | 2 | 2021-09-17T06:10:17.000Z | 2022-01-22T23:37:22.000Z | examples/cc/h2o_ccsd_t.py | seunghoonlee89/pyscf-ecCC-TCC | 2091566fb83c1474e40bf74f271be2ce4611f60c | [
"Apache-2.0"
] | null | null | null | examples/cc/h2o_ccsd_t.py | seunghoonlee89/pyscf-ecCC-TCC | 2091566fb83c1474e40bf74f271be2ce4611f60c | [
"Apache-2.0"
] | 2 | 2021-09-16T23:37:42.000Z | 2021-10-14T23:00:39.000Z | #!/usr/bin/env python
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
A simple example to run CCSD(T) and UCCSD(T) calculation.
'''
import pyscf
mol = pyscf.M(
atom = 'O -0.26677564 -0.27872083 0.00000000;\
H -0.26677564 0.82127917 0.00000000;\
H -0.26677564 -0.64538753 1.03708994',
basis = 'ccpvtz')
mf = mol.RHF().run()
mycc = mf.CCSD().run()
et = mycc.ccsd_t()
print('CCSD(T) correlation energy', mycc.e_corr + et)
| 21.565217 | 63 | 0.574597 |
f7370d3a7ba6b398302266319fded87169772eec | 1,227 | py | Python | marqeta/response_models/address_verification_model.py | marqeta/marqeta-python | 66fa690eb910825c510a391720b0fe717fac0234 | [
"MIT"
] | 21 | 2019-04-12T09:02:17.000Z | 2022-02-18T11:39:06.000Z | marqeta/response_models/address_verification_model.py | marqeta/marqeta-python | 66fa690eb910825c510a391720b0fe717fac0234 | [
"MIT"
] | 1 | 2020-07-22T21:27:40.000Z | 2020-07-23T17:38:43.000Z | marqeta/response_models/address_verification_model.py | marqeta/marqeta-python | 66fa690eb910825c510a391720b0fe717fac0234 | [
"MIT"
] | 10 | 2019-05-08T14:20:37.000Z | 2021-09-20T18:09:26.000Z | from datetime import datetime, date
from marqeta.response_models.avs_information import AvsInformation
from marqeta.response_models.avs_information import AvsInformation
from marqeta.response_models.response import Response
from marqeta.response_models import datetime_object
import json
import re
class AddressVerificationModel(object):
def __init__(self, json_response):
self.json_response = json_response
def __str__(self):
return json.dumps(self.json_response, default=self.json_serial)
@staticmethod
def json_serial(o):
if isinstance(o, datetime) or isinstance(o, date):
return o.__str__()
@property
def request(self):
if 'request' in self.json_response:
return AvsInformation(self.json_response['request'])
@property
def on_file(self):
if 'on_file' in self.json_response:
return AvsInformation(self.json_response['on_file'])
@property
def response(self):
if 'response' in self.json_response:
return Response(self.json_response['response'])
def __repr__(self):
return '<Marqeta.response_models.address_verification_model.AddressVerificationModel>' + self.__str__()
| 31.461538 | 112 | 0.723716 |
f7371d98ef6f603bc1192d540e496dcf869ac1ed | 4,335 | py | Python | experiments/ashvin/icml2020/hand/adaptive/clamp_easy1.py | Asap7772/railrl_evalsawyer | baba8ce634d32a48c7dfe4dc03b123e18e96e0a3 | [
"MIT"
] | null | null | null | experiments/ashvin/icml2020/hand/adaptive/clamp_easy1.py | Asap7772/railrl_evalsawyer | baba8ce634d32a48c7dfe4dc03b123e18e96e0a3 | [
"MIT"
] | null | null | null | experiments/ashvin/icml2020/hand/adaptive/clamp_easy1.py | Asap7772/railrl_evalsawyer | baba8ce634d32a48c7dfe4dc03b123e18e96e0a3 | [
"MIT"
] | null | null | null | """
AWR + SAC from demo experiment
"""
from rlkit.demos.source.dict_to_mdp_path_loader import DictToMDPPathLoader
from rlkit.launchers.experiments.awac.awac_rl import experiment, process_args
import rlkit.misc.hyperparameter as hyp
from rlkit.launchers.arglauncher import run_variants
from rlkit.torch.sac.policies import GaussianPolicy
from rlkit.torch.networks import Clamp
if __name__ == "__main__":
variant = dict(
num_epochs=5001,
num_eval_steps_per_epoch=1000,
num_trains_per_train_loop=1000,
num_expl_steps_per_train_loop=1000,
min_num_steps_before_training=1000,
max_path_length=1000,
batch_size=1024,
replay_buffer_size=int(1E6),
layer_size=256,
policy_class=GaussianPolicy,
policy_kwargs=dict(
hidden_sizes=[256, 256, 256, 256],
max_log_std=0,
min_log_std=-6,
std_architecture="values",
# num_gaussians=1,
),
algorithm="SAC",
version="normal",
collection_mode='batch',
trainer_kwargs=dict(
discount=0.99,
soft_target_tau=5e-3,
target_update_period=1,
policy_lr=3E-4,
qf_lr=3E-4,
reward_scale=1,
beta=1,
use_automatic_entropy_tuning=False,
alpha=0,
compute_bc=False,
bc_num_pretrain_steps=0,
q_num_pretrain1_steps=0,
q_num_pretrain2_steps=25000,
policy_weight_decay=1e-4,
q_weight_decay=0,
bc_loss_type="mse",
rl_weight=1.0,
use_awr_update=True,
use_reparam_update=False,
reparam_weight=0.0,
awr_weight=0.0,
bc_weight=1.0,
post_bc_pretrain_hyperparams=dict(
bc_weight=0.0,
compute_bc=False,
),
reward_transform_kwargs=None, # r' = r + 1
terminal_transform_kwargs=None, # t = 0
),
launcher_config=dict(
num_exps_per_instance=1,
region='us-west-2',
),
path_loader_class=DictToMDPPathLoader,
path_loader_kwargs=dict(
obs_key="state_observation",
demo_paths=[
# dict(
# path="demos/icml2020/hand/pen2_sparse.npy",
# obs_dict=True,
# is_demo=True,
# ),
# dict(
# path="demos/icml2020/hand/pen_bc5.npy",
# obs_dict=False,
# is_demo=False,
# train_split=0.9,
# ),
],
),
add_env_demos=True,
add_env_offpolicy_data=True,
# logger_variant=dict(
# tensorboard=True,
# ),
load_demos=True,
pretrain_policy=True,
pretrain_rl=True,
# save_pretrained_algorithm=True,
# snapshot_mode="all",
)
search_space = {
'env': ["pen-sparse-v0", "door-sparse-v0"],
# 'env': ["relocate-sparse-v0", ],
'trainer_kwargs.bc_loss_type': ["mle"],
'trainer_kwargs.awr_loss_type': ["mle"],
'seedid': range(3),
'trainer_kwargs.beta': [0.3, 0.5],
'trainer_kwargs.reparam_weight': [0.0, ],
'trainer_kwargs.awr_weight': [1.0],
'trainer_kwargs.bc_weight': [1.0, ],
'policy_kwargs.std_architecture': ["values", ],
# 'trainer_kwargs.compute_bc': [True, ],
'trainer_kwargs.awr_use_mle_for_vf': [True, ],
'trainer_kwargs.awr_sample_actions': [False, ],
'trainer_kwargs.awr_min_q': [True, ],
'trainer_kwargs.q_weight_decay': [0],
'trainer_kwargs.reward_transform_kwargs': [None, ],
'trainer_kwargs.terminal_transform_kwargs': [dict(m=0, b=0), ],
'qf_kwargs.output_activation': [Clamp(max=0)],
'trainer_kwargs.train_bc_on_rl_buffer':[True],
# 'policy_kwargs.num_gaussians': [1, ],
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
variants = []
for variant in sweeper.iterate_hyperparameters():
variants.append(variant)
run_variants(experiment, variants, process_args)
| 30.528169 | 77 | 0.570934 |
f7376f5ac6518f055d9d50110ef2dee4b566060b | 30,279 | py | Python | truncation_experiments/dnnlib/tflib/network.py | a7b23/CS236G-project | 8bde7d54ca3d22e13703fe72dfb998b38d9cd8b4 | [
"MIT"
] | 2 | 2021-02-23T07:07:32.000Z | 2021-03-11T21:13:26.000Z | truncation_experiments/dnnlib/tflib/network.py | a7b23/CS236G-project | 8bde7d54ca3d22e13703fe72dfb998b38d9cd8b4 | [
"MIT"
] | null | null | null | truncation_experiments/dnnlib/tflib/network.py | a7b23/CS236G-project | 8bde7d54ca3d22e13703fe72dfb998b38d9cd8b4 | [
"MIT"
] | 3 | 2021-02-23T09:02:48.000Z | 2021-02-27T06:14:42.000Z | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the Creative Commons Attribution-NonCommercial
# 4.0 International License. To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
"""Helper for managing networks."""
import types
import inspect
import re
import uuid
import sys
import numpy as np
import tensorflow as tf
from collections import OrderedDict
from typing import Any, List, Tuple, Union
from . import tfutil
from .. import util
from .tfutil import TfExpression, TfExpressionEx
_import_handlers = [] # Custom import handlers for dealing with legacy data in pickle import.
_import_module_src = dict() # Source code for temporary modules created during pickle import.
def import_handler(handler_func):
"""Function decorator for declaring custom import handlers."""
_import_handlers.append(handler_func)
return handler_func
class Network:
"""Generic network abstraction.
Acts as a convenience wrapper for a parameterized network construction
function, providing several utility methods and convenient access to
the inputs/outputs/weights.
Network objects can be safely pickled and unpickled for long-term
archival purposes. The pickling works reliably as long as the underlying
network construction function is defined in a standalone Python module
that has no side effects or application-specific imports.
Args:
name: Network name. Used to select TensorFlow name and variable scopes.
func_name: Fully qualified name of the underlying network construction function, or a top-level function object.
static_kwargs: Keyword arguments to be passed in to the network construction function.
Attributes:
name: User-specified name, defaults to build func name if None.
scope: Unique TensorFlow scope containing template graph and variables, derived from the user-specified name.
static_kwargs: Arguments passed to the user-supplied build func.
components: Container for sub-networks. Passed to the build func, and retained between calls.
num_inputs: Number of input tensors.
num_outputs: Number of output tensors.
input_shapes: Input tensor shapes (NC or NCHW), including minibatch dimension.
output_shapes: Output tensor shapes (NC or NCHW), including minibatch dimension.
input_shape: Short-hand for input_shapes[0].
output_shape: Short-hand for output_shapes[0].
input_templates: Input placeholders in the template graph.
output_templates: Output tensors in the template graph.
input_names: Name string for each input.
output_names: Name string for each output.
own_vars: Variables defined by this network (local_name => var), excluding sub-networks.
vars: All variables (local_name => var).
trainables: All trainable variables (local_name => var).
var_global_to_local: Mapping from variable global names to local names.
"""
def __init__(self, name: str = None, func_name: Any = None, **static_kwargs):
tfutil.assert_tf_initialized()
assert isinstance(name, str) or name is None
assert func_name is not None
assert isinstance(func_name, str) or util.is_top_level_function(func_name)
assert util.is_pickleable(static_kwargs)
self._init_fields()
self.name = name
self.static_kwargs = util.EasyDict(static_kwargs)
# Locate the user-specified network build function.
if util.is_top_level_function(func_name):
func_name = util.get_top_level_function_name(func_name)
module, self._build_func_name = util.get_module_from_obj_name(func_name)
self._build_func = util.get_obj_from_module(module, self._build_func_name)
assert callable(self._build_func)
# Dig up source code for the module containing the build function.
self._build_module_src = _import_module_src.get(module, None)
if self._build_module_src is None:
self._build_module_src = inspect.getsource(module)
# Init TensorFlow graph.
self._init_graph()
self.reset_own_vars()
def _init_fields(self) -> None:
self.name = None
self.scope = None
self.static_kwargs = util.EasyDict()
self.components = util.EasyDict()
self.num_inputs = 0
self.num_outputs = 0
self.input_shapes = [[]]
self.output_shapes = [[]]
self.input_shape = []
self.output_shape = []
self.input_templates = []
self.output_templates = []
self.input_names = []
self.output_names = []
self.own_vars = OrderedDict()
self.vars = OrderedDict()
self.trainables = OrderedDict()
self.var_global_to_local = OrderedDict()
self._build_func = None # User-supplied build function that constructs the network.
self._build_func_name = None # Name of the build function.
self._build_module_src = None # Full source code of the module containing the build function.
self._run_cache = dict() # Cached graph data for Network.run().
def _init_graph(self) -> None:
# Collect inputs.
self.input_names = []
for param in inspect.signature(self._build_func).parameters.values():
if param.kind == param.POSITIONAL_OR_KEYWORD and param.default is param.empty:
self.input_names.append(param.name)
self.num_inputs = len(self.input_names)
assert self.num_inputs >= 1
# Choose name and scope.
if self.name is None:
self.name = self._build_func_name
assert re.match("^[A-Za-z0-9_.\\-]*$", self.name)
with tf.name_scope(None):
self.scope = tf.get_default_graph().unique_name(self.name, mark_as_used=True)
# Finalize build func kwargs.
build_kwargs = dict(self.static_kwargs)
build_kwargs["is_template_graph"] = True
build_kwargs["components"] = self.components
# Build template graph.
with tfutil.absolute_variable_scope(self.scope, reuse=tf.AUTO_REUSE), tfutil.absolute_name_scope(self.scope): # ignore surrounding scopes
assert tf.get_variable_scope().name == self.scope
assert tf.get_default_graph().get_name_scope() == self.scope
with tf.control_dependencies(None): # ignore surrounding control dependencies
self.input_templates = [tf.placeholder(tf.float32, name=name) for name in self.input_names]
out_expr = self._build_func(*self.input_templates, **build_kwargs)
# Collect outputs.
assert tfutil.is_tf_expression(out_expr) or isinstance(out_expr, tuple)
self.output_templates = [out_expr] if tfutil.is_tf_expression(out_expr) else list(out_expr)
self.num_outputs = len(self.output_templates)
assert self.num_outputs >= 1
assert all(tfutil.is_tf_expression(t) for t in self.output_templates)
# Perform sanity checks.
if any(t.shape.ndims is None for t in self.input_templates):
raise ValueError("Network input shapes not defined. Please call x.set_shape() for each input.")
if any(t.shape.ndims is None for t in self.output_templates):
raise ValueError("Network output shapes not defined. Please call x.set_shape() where applicable.")
if any(not isinstance(comp, Network) for comp in self.components.values()):
raise ValueError("Components of a Network must be Networks themselves.")
if len(self.components) != len(set(comp.name for comp in self.components.values())):
raise ValueError("Components of a Network must have unique names.")
# List inputs and outputs.
self.input_shapes = [tfutil.shape_to_list(t.shape) for t in self.input_templates]
self.output_shapes = [tfutil.shape_to_list(t.shape) for t in self.output_templates]
self.input_shape = self.input_shapes[0]
self.output_shape = self.output_shapes[0]
self.output_names = [t.name.split("/")[-1].split(":")[0] for t in self.output_templates]
# List variables.
self.own_vars = OrderedDict((var.name[len(self.scope) + 1:].split(":")[0], var) for var in tf.global_variables(self.scope + "/"))
self.vars = OrderedDict(self.own_vars)
self.vars.update((comp.name + "/" + name, var) for comp in self.components.values() for name, var in comp.vars.items())
self.trainables = OrderedDict((name, var) for name, var in self.vars.items() if var.trainable)
self.var_global_to_local = OrderedDict((var.name.split(":")[0], name) for name, var in self.vars.items())
def reset_own_vars(self) -> None:
"""Re-initialize all variables of this network, excluding sub-networks."""
tfutil.run([var.initializer for var in self.own_vars.values()])
def reset_vars(self) -> None:
"""Re-initialize all variables of this network, including sub-networks."""
tfutil.run([var.initializer for var in self.vars.values()])
def reset_trainables(self) -> None:
"""Re-initialize all trainable variables of this network, including sub-networks."""
tfutil.run([var.initializer for var in self.trainables.values()])
def get_output_for(self, *in_expr: TfExpression, return_as_list: bool = False, **dynamic_kwargs) -> Union[TfExpression, List[TfExpression]]:
"""Construct TensorFlow expression(s) for the output(s) of this network, given the input expression(s)."""
assert len(in_expr) == self.num_inputs
assert not all(expr is None for expr in in_expr)
# Finalize build func kwargs.
build_kwargs = dict(self.static_kwargs)
build_kwargs.update(dynamic_kwargs)
build_kwargs["is_template_graph"] = False
build_kwargs["components"] = self.components
# Build TensorFlow graph to evaluate the network.
with tfutil.absolute_variable_scope(self.scope, reuse=True), tf.name_scope(self.name):
assert tf.get_variable_scope().name == self.scope
valid_inputs = [expr for expr in in_expr if expr is not None]
final_inputs = []
for expr, name, shape in zip(in_expr, self.input_names, self.input_shapes):
if expr is not None:
expr = tf.identity(expr, name=name)
else:
expr = tf.zeros([tf.shape(valid_inputs[0])[0]] + shape[1:], name=name)
final_inputs.append(expr)
out_expr = self._build_func(*final_inputs, **build_kwargs)
# Propagate input shapes back to the user-specified expressions.
for expr, final in zip(in_expr, final_inputs):
if isinstance(expr, tf.Tensor):
expr.set_shape(final.shape)
# Express outputs in the desired format.
assert tfutil.is_tf_expression(out_expr) or isinstance(out_expr, tuple)
if return_as_list:
out_expr = [out_expr] if tfutil.is_tf_expression(out_expr) else list(out_expr)
return out_expr
def get_var_local_name(self, var_or_global_name: Union[TfExpression, str]) -> str:
"""Get the local name of a given variable, without any surrounding name scopes."""
assert tfutil.is_tf_expression(var_or_global_name) or isinstance(var_or_global_name, str)
global_name = var_or_global_name if isinstance(var_or_global_name, str) else var_or_global_name.name
return self.var_global_to_local[global_name]
def find_var(self, var_or_local_name: Union[TfExpression, str]) -> TfExpression:
"""Find variable by local or global name."""
assert tfutil.is_tf_expression(var_or_local_name) or isinstance(var_or_local_name, str)
return self.vars[var_or_local_name] if isinstance(var_or_local_name, str) else var_or_local_name
def get_var(self, var_or_local_name: Union[TfExpression, str]) -> np.ndarray:
"""Get the value of a given variable as NumPy array.
Note: This method is very inefficient -- prefer to use tflib.run(list_of_vars) whenever possible."""
return self.find_var(var_or_local_name).eval()
def set_var(self, var_or_local_name: Union[TfExpression, str], new_value: Union[int, float, np.ndarray]) -> None:
"""Set the value of a given variable based on the given NumPy array.
Note: This method is very inefficient -- prefer to use tflib.set_vars() whenever possible."""
tfutil.set_vars({self.find_var(var_or_local_name): new_value})
def __getstate__(self) -> dict:
"""Pickle export."""
state = dict()
state["version"] = 3
state["name"] = self.name
state["static_kwargs"] = dict(self.static_kwargs)
state["components"] = dict(self.components)
state["build_module_src"] = self._build_module_src
state["build_func_name"] = self._build_func_name
state["variables"] = list(zip(self.own_vars.keys(), tfutil.run(list(self.own_vars.values()))))
return state
def __setstate__(self, state: dict) -> None:
"""Pickle import."""
# pylint: disable=attribute-defined-outside-init
tfutil.assert_tf_initialized()
self._init_fields()
# Execute custom import handlers.
for handler in _import_handlers:
state = handler(state)
# Set basic fields.
assert state["version"] in [2, 3]
self.name = state["name"]
self.static_kwargs = util.EasyDict(state["static_kwargs"])
self.components = util.EasyDict(state.get("components", {}))
self._build_module_src = state["build_module_src"]
self._build_func_name = state["build_func_name"]
# Create temporary module from the imported source code.
module_name = "_tflib_network_import_" + uuid.uuid4().hex
module = types.ModuleType(module_name)
sys.modules[module_name] = module
_import_module_src[module] = self._build_module_src
exec(self._build_module_src, module.__dict__) # pylint: disable=exec-used
# Locate network build function in the temporary module.
self._build_func = util.get_obj_from_module(module, self._build_func_name)
assert callable(self._build_func)
# Init TensorFlow graph.
self._init_graph()
self.reset_own_vars()
tfutil.set_vars({self.find_var(name): value for name, value in state["variables"]})
def clone(self, name: str = None, **new_static_kwargs) -> "Network":
"""Create a clone of this network with its own copy of the variables."""
# pylint: disable=protected-access
net = object.__new__(Network)
net._init_fields()
net.name = name if name is not None else self.name
net.static_kwargs = util.EasyDict(self.static_kwargs)
net.static_kwargs.update(new_static_kwargs)
net._build_module_src = self._build_module_src
net._build_func_name = self._build_func_name
net._build_func = self._build_func
net._init_graph()
net.copy_vars_from(self)
return net
def copy_own_vars_from(self, src_net: "Network") -> None:
"""Copy the values of all variables from the given network, excluding sub-networks."""
names = [name for name in self.own_vars.keys() if name in src_net.own_vars]
tfutil.set_vars(tfutil.run({self.vars[name]: src_net.vars[name] for name in names}))
def copy_vars_from(self, src_net: "Network") -> None:
"""Copy the values of all variables from the given network, including sub-networks."""
names = [name for name in self.vars.keys() if name in src_net.vars]
tfutil.set_vars(tfutil.run({self.vars[name]: src_net.vars[name] for name in names}))
def copy_trainables_from(self, src_net: "Network") -> None:
"""Copy the values of all trainable variables from the given network, including sub-networks."""
names = [name for name in self.trainables.keys() if name in src_net.trainables]
tfutil.set_vars(tfutil.run({self.vars[name]: src_net.vars[name] for name in names}))
def convert(self, new_func_name: str, new_name: str = None, **new_static_kwargs) -> "Network":
"""Create new network with the given parameters, and copy all variables from this network."""
if new_name is None:
new_name = self.name
static_kwargs = dict(self.static_kwargs)
static_kwargs.update(new_static_kwargs)
net = Network(name=new_name, func_name=new_func_name, **static_kwargs)
net.copy_vars_from(self)
return net
def setup_as_moving_average_of(self, src_net: "Network", beta: TfExpressionEx = 0.99, beta_nontrainable: TfExpressionEx = 0.0) -> tf.Operation:
"""Construct a TensorFlow op that updates the variables of this network
to be slightly closer to those of the given network."""
with tfutil.absolute_name_scope(self.scope + "/_MovingAvg"):
ops = []
for name, var in self.vars.items():
if name in src_net.vars:
cur_beta = beta if name in self.trainables else beta_nontrainable
new_value = tfutil.lerp(src_net.vars[name], var, cur_beta)
ops.append(var.assign(new_value))
return tf.group(*ops)
def run(self,
*in_arrays: Tuple[Union[np.ndarray, None], ...],
input_transform: dict = None,
output_transform: dict = None,
return_as_list: bool = False,
print_progress: bool = False,
minibatch_size: int = None,
num_gpus: int = 1,
assume_frozen: bool = False,
sess: tf.Session = None,
**dynamic_kwargs) -> Union[np.ndarray, Tuple[np.ndarray, ...], List[np.ndarray]]:
"""Run this network for the given NumPy array(s), and return the output(s) as NumPy array(s).
Args:
input_transform: A dict specifying a custom transformation to be applied to the input tensor(s) before evaluating the network.
The dict must contain a 'func' field that points to a top-level function. The function is called with the input
TensorFlow expression(s) as positional arguments. Any remaining fields of the dict will be passed in as kwargs.
output_transform: A dict specifying a custom transformation to be applied to the output tensor(s) after evaluating the network.
The dict must contain a 'func' field that points to a top-level function. The function is called with the output
TensorFlow expression(s) as positional arguments. Any remaining fields of the dict will be passed in as kwargs.
return_as_list: True = return a list of NumPy arrays, False = return a single NumPy array, or a tuple if there are multiple outputs.
print_progress: Print progress to the console? Useful for very large input arrays.
minibatch_size: Maximum minibatch size to use, None = disable batching.
num_gpus: Number of GPUs to use.
assume_frozen: Improve multi-GPU performance by assuming that the trainable parameters will remain changed between calls.
dynamic_kwargs: Additional keyword arguments to be passed into the network build function.
"""
assert len(in_arrays) == self.num_inputs
assert not all(arr is None for arr in in_arrays)
assert input_transform is None or util.is_top_level_function(input_transform["func"])
assert output_transform is None or util.is_top_level_function(output_transform["func"])
output_transform, dynamic_kwargs = _handle_legacy_output_transforms(output_transform, dynamic_kwargs)
num_items = in_arrays[0].shape[0]
if minibatch_size is None:
minibatch_size = num_items
# Construct unique hash key from all arguments that affect the TensorFlow graph.
key = dict(input_transform=input_transform, output_transform=output_transform, num_gpus=num_gpus, assume_frozen=assume_frozen, dynamic_kwargs=dynamic_kwargs)
def unwind_key(obj):
if isinstance(obj, dict):
return [(key, unwind_key(value)) for key, value in sorted(obj.items())]
if callable(obj):
return util.get_top_level_function_name(obj)
return obj
key = repr(unwind_key(key))
# Build graph.
if key not in self._run_cache:
with tfutil.absolute_name_scope(self.scope + "/_Run"), tf.control_dependencies(None):
with tf.device("/cpu:0"):
in_expr = [tf.placeholder(tf.float32, name=name) for name in self.input_names]
in_split = list(zip(*[tf.split(x, num_gpus) for x in in_expr]))
out_split = []
for gpu in range(num_gpus):
with tf.device("/gpu:%d" % gpu):
net_gpu = self.clone() if assume_frozen else self
in_gpu = in_split[gpu]
if input_transform is not None:
in_kwargs = dict(input_transform)
in_gpu = in_kwargs.pop("func")(*in_gpu, **in_kwargs)
in_gpu = [in_gpu] if tfutil.is_tf_expression(in_gpu) else list(in_gpu)
assert len(in_gpu) == self.num_inputs
out_gpu = net_gpu.get_output_for(*in_gpu, return_as_list=True, **dynamic_kwargs)
if output_transform is not None:
out_kwargs = dict(output_transform)
out_gpu = out_kwargs.pop("func")(*out_gpu, **out_kwargs)
out_gpu = [out_gpu] if tfutil.is_tf_expression(out_gpu) else list(out_gpu)
assert len(out_gpu) == self.num_outputs
out_split.append(out_gpu)
with tf.device("/cpu:0"):
out_expr = [tf.concat(outputs, axis=0) for outputs in zip(*out_split)]
self._run_cache[key] = in_expr, out_expr
# Run minibatches.
in_expr, out_expr = self._run_cache[key]
out_arrays = [np.empty([num_items] + tfutil.shape_to_list(expr.shape)[1:], expr.dtype.name) for expr in out_expr]
for mb_begin in range(0, num_items, minibatch_size):
if print_progress:
print("\r%d / %d" % (mb_begin, num_items), end="")
mb_end = min(mb_begin + minibatch_size, num_items)
mb_num = mb_end - mb_begin
mb_in = [src[mb_begin : mb_end] if src is not None else np.zeros([mb_num] + shape[1:]) for src, shape in zip(in_arrays, self.input_shapes)]
if sess is None :
mb_out = tf.get_default_session().run(out_expr, dict(zip(in_expr, mb_in)))
else :
mb_out = sess.run(out_expr, dict(zip(in_expr, mb_in)))
for dst, src in zip(out_arrays, mb_out):
dst[mb_begin: mb_end] = src
# Done.
if print_progress:
print("\r%d / %d" % (num_items, num_items))
if not return_as_list:
out_arrays = out_arrays[0] if len(out_arrays) == 1 else tuple(out_arrays)
return out_arrays
def list_ops(self) -> List[TfExpression]:
include_prefix = self.scope + "/"
exclude_prefix = include_prefix + "_"
ops = tf.get_default_graph().get_operations()
ops = [op for op in ops if op.name.startswith(include_prefix)]
ops = [op for op in ops if not op.name.startswith(exclude_prefix)]
return ops
def list_layers(self) -> List[Tuple[str, TfExpression, List[TfExpression]]]:
"""Returns a list of (layer_name, output_expr, trainable_vars) tuples corresponding to
individual layers of the network. Mainly intended to be used for reporting."""
layers = []
def recurse(scope, parent_ops, parent_vars, level):
# Ignore specific patterns.
if any(p in scope for p in ["/Shape", "/strided_slice", "/Cast", "/concat", "/Assign"]):
return
# Filter ops and vars by scope.
global_prefix = scope + "/"
local_prefix = global_prefix[len(self.scope) + 1:]
cur_ops = [op for op in parent_ops if op.name.startswith(global_prefix) or op.name == global_prefix[:-1]]
cur_vars = [(name, var) for name, var in parent_vars if name.startswith(local_prefix) or name == local_prefix[:-1]]
if not cur_ops and not cur_vars:
return
# Filter out all ops related to variables.
for var in [op for op in cur_ops if op.type.startswith("Variable")]:
var_prefix = var.name + "/"
cur_ops = [op for op in cur_ops if not op.name.startswith(var_prefix)]
# Scope does not contain ops as immediate children => recurse deeper.
contains_direct_ops = any("/" not in op.name[len(global_prefix):] and op.type != "Identity" for op in cur_ops)
if (level == 0 or not contains_direct_ops) and (len(cur_ops) + len(cur_vars)) > 1:
visited = set()
for rel_name in [op.name[len(global_prefix):] for op in cur_ops] + [name[len(local_prefix):] for name, _var in cur_vars]:
token = rel_name.split("/")[0]
if token not in visited:
recurse(global_prefix + token, cur_ops, cur_vars, level + 1)
visited.add(token)
return
# Report layer.
layer_name = scope[len(self.scope) + 1:]
layer_output = cur_ops[-1].outputs[0] if cur_ops else cur_vars[-1][1]
layer_trainables = [var for _name, var in cur_vars if var.trainable]
layers.append((layer_name, layer_output, layer_trainables))
recurse(self.scope, self.list_ops(), list(self.vars.items()), 0)
return layers
def print_layers(self, title: str = None, hide_layers_with_no_params: bool = False) -> None:
"""Print a summary table of the network structure."""
rows = [[title if title is not None else self.name, "Params", "OutputShape", "WeightShape"]]
rows += [["---"] * 4]
total_params = 0
for layer_name, layer_output, layer_trainables in self.list_layers():
num_params = sum(np.prod(tfutil.shape_to_list(var.shape)) for var in layer_trainables)
weights = [var for var in layer_trainables if var.name.endswith("/weight:0")]
weights.sort(key=lambda x: len(x.name))
if len(weights) == 0 and len(layer_trainables) == 1:
weights = layer_trainables
total_params += num_params
if not hide_layers_with_no_params or num_params != 0:
num_params_str = str(num_params) if num_params > 0 else "-"
output_shape_str = str(layer_output.shape)
weight_shape_str = str(weights[0].shape) if len(weights) >= 1 else "-"
rows += [[layer_name, num_params_str, output_shape_str, weight_shape_str]]
rows += [["---"] * 4]
rows += [["Total", str(total_params), "", ""]]
widths = [max(len(cell) for cell in column) for column in zip(*rows)]
print()
for row in rows:
print(" ".join(cell + " " * (width - len(cell)) for cell, width in zip(row, widths)))
print()
def setup_weight_histograms(self, title: str = None) -> None:
"""Construct summary ops to include histograms of all trainable parameters in TensorBoard."""
if title is None:
title = self.name
with tf.name_scope(None), tf.device(None), tf.control_dependencies(None):
for local_name, var in self.trainables.items():
if "/" in local_name:
p = local_name.split("/")
name = title + "_" + p[-1] + "/" + "_".join(p[:-1])
else:
name = title + "_toplevel/" + local_name
tf.summary.histogram(name, var)
#----------------------------------------------------------------------------
# Backwards-compatible emulation of legacy output transformation in Network.run().
_print_legacy_warning = True
def _handle_legacy_output_transforms(output_transform, dynamic_kwargs):
global _print_legacy_warning
legacy_kwargs = ["out_mul", "out_add", "out_shrink", "out_dtype"]
if not any(kwarg in dynamic_kwargs for kwarg in legacy_kwargs):
return output_transform, dynamic_kwargs
if _print_legacy_warning:
_print_legacy_warning = False
print()
print("WARNING: Old-style output transformations in Network.run() are deprecated.")
print("Consider using 'output_transform=dict(func=tflib.convert_images_to_uint8)'")
print("instead of 'out_mul=127.5, out_add=127.5, out_dtype=np.uint8'.")
print()
assert output_transform is None
new_kwargs = dict(dynamic_kwargs)
new_transform = {kwarg: new_kwargs.pop(kwarg) for kwarg in legacy_kwargs if kwarg in dynamic_kwargs}
new_transform["func"] = _legacy_output_transform_func
return new_transform, new_kwargs
def _legacy_output_transform_func(*expr, out_mul=1.0, out_add=0.0, out_shrink=1, out_dtype=None):
if out_mul != 1.0:
expr = [x * out_mul for x in expr]
if out_add != 0.0:
expr = [x + out_add for x in expr]
if out_shrink > 1:
ksize = [1, 1, out_shrink, out_shrink]
expr = [tf.nn.avg_pool(x, ksize=ksize, strides=ksize, padding="VALID", data_format="NCHW") for x in expr]
if out_dtype is not None:
if tf.as_dtype(out_dtype).is_integer:
expr = [tf.round(x) for x in expr]
expr = [tf.saturate_cast(x, out_dtype) for x in expr]
return expr
| 50.889076 | 165 | 0.646719 |
f73777d7ca2d4d80693cb15dd56f511c05f1c49c | 3,163 | py | Python | alipay/aop/api/domain/AlipayUserFamilyShareAuthCheckModel.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/domain/AlipayUserFamilyShareAuthCheckModel.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/domain/AlipayUserFamilyShareAuthCheckModel.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayUserFamilyShareAuthCheckModel(object):
def __init__(self):
self._resource_id = None
self._scene_id = None
self._target_biz_user_id = None
self._target_user_biz_source = None
self._user_id = None
@property
def resource_id(self):
return self._resource_id
@resource_id.setter
def resource_id(self, value):
self._resource_id = value
@property
def scene_id(self):
return self._scene_id
@scene_id.setter
def scene_id(self, value):
self._scene_id = value
@property
def target_biz_user_id(self):
return self._target_biz_user_id
@target_biz_user_id.setter
def target_biz_user_id(self, value):
self._target_biz_user_id = value
@property
def target_user_biz_source(self):
return self._target_user_biz_source
@target_user_biz_source.setter
def target_user_biz_source(self, value):
self._target_user_biz_source = value
@property
def user_id(self):
return self._user_id
@user_id.setter
def user_id(self, value):
self._user_id = value
def to_alipay_dict(self):
params = dict()
if self.resource_id:
if hasattr(self.resource_id, 'to_alipay_dict'):
params['resource_id'] = self.resource_id.to_alipay_dict()
else:
params['resource_id'] = self.resource_id
if self.scene_id:
if hasattr(self.scene_id, 'to_alipay_dict'):
params['scene_id'] = self.scene_id.to_alipay_dict()
else:
params['scene_id'] = self.scene_id
if self.target_biz_user_id:
if hasattr(self.target_biz_user_id, 'to_alipay_dict'):
params['target_biz_user_id'] = self.target_biz_user_id.to_alipay_dict()
else:
params['target_biz_user_id'] = self.target_biz_user_id
if self.target_user_biz_source:
if hasattr(self.target_user_biz_source, 'to_alipay_dict'):
params['target_user_biz_source'] = self.target_user_biz_source.to_alipay_dict()
else:
params['target_user_biz_source'] = self.target_user_biz_source
if self.user_id:
if hasattr(self.user_id, 'to_alipay_dict'):
params['user_id'] = self.user_id.to_alipay_dict()
else:
params['user_id'] = self.user_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayUserFamilyShareAuthCheckModel()
if 'resource_id' in d:
o.resource_id = d['resource_id']
if 'scene_id' in d:
o.scene_id = d['scene_id']
if 'target_biz_user_id' in d:
o.target_biz_user_id = d['target_biz_user_id']
if 'target_user_biz_source' in d:
o.target_user_biz_source = d['target_user_biz_source']
if 'user_id' in d:
o.user_id = d['user_id']
return o
| 31.316832 | 95 | 0.62251 |
f7378301a74dc9cd9e025b71204a6e6ed1b8e4b4 | 4,520 | py | Python | pypayd/interfaces/blockr.py | ser/pypayd-ng | 63d0e09ca5e966177874e6e2e82f6036898a56b0 | [
"MIT"
] | null | null | null | pypayd/interfaces/blockr.py | ser/pypayd-ng | 63d0e09ca5e966177874e6e2e82f6036898a56b0 | [
"MIT"
] | null | null | null | pypayd/interfaces/blockr.py | ser/pypayd-ng | 63d0e09ca5e966177874e6e2e82f6036898a56b0 | [
"MIT"
] | null | null | null | """
blockr.io
"""
import logging
import hashlib
from hashlib import sha256
import requests
from .. import config
from binascii import hexlify, unhexlify
def getUrl(request_string):
return requests.get(request_string).json()
def setHost():
config.BLOCKCHAIN_CONNECT = ('http://tbtc.blockr.io' if config.TESTNET else 'http://btc.blockr.io')
#And fix this
def check():
return getInfo()
def getInfo():
result = getUrl(config.BLOCKCHAIN_CONNECT + '/api/v1/coin/info', )
if 'status' in result and result['status'] == 'success':
return {
"info": {
"blocks": result['data']['last_block']['nb'],
"difficulty": result['data']['last_block']['difficulty']
}
}
return result
def getUtxo(address):
result = getUrl(config.BLOCKCHAIN_CONNECT + '/api/v1/address/unspent/{}/'.format(address))
if 'status' in result and result['status'] == 'success':
utxo = []
for txo in result['data']['unspent']:
newtxo = {
'address': address,
'txid': txo['tx'],
'vout': txo['n'],
'ts': 0,
'scriptPubKey': txo['script'],
'amount': float(txo['amount']),
'confirmations': txo['confirmations'],
'confirmationsFromCache': False
}
utxo.append(newtxo)
return utxo
return None
def getAddressInfo(address):
infos = getUrl(config.BLOCKCHAIN_CONNECT + '/api/v1/address/info/{}'.format(address), )
if 'status' in infos and infos['status'] == 'success':
txs = getUrl(config.BLOCKCHAIN_CONNECT + '/api/v1/address/txs/{}'.format(address), )
if 'status' in txs and txs['status'] == 'success':
transactions = []
for tx in txs['data']['txs']:
transactions.append(tx['tx'])
return {
'addrStr': address,
'balance': infos['data']['balance'],
'balanceSat': infos['data']['balance'] * config.UNIT,
'totalReceived': infos['data']['totalreceived'],
'totalReceivedSat': infos['data']['totalreceived'] * config.UNIT,
'unconfirmedBalance': 0,
'unconfirmedBalanceSat': 0,
'unconfirmedTxApperances': 0,
'txApperances': txs['data']['nb_txs'],
'transactions': transactions
}
return None
def getTxInfo(tx_hash):
tx = getUrl(config.BLOCKCHAIN_CONNECT + '/api/v1/tx/raw/{}'.format(tx_hash))
if tx.get('status') == 'success':
valueOut = 0
for vout in tx['data']['tx']['vout']:
valueOut += vout['value']
return {
'txid': tx_hash,
'version': tx['data']['tx']['version'],
'locktime': tx['data']['tx']['locktime'],
'blockhash': tx['data']['tx'].get('blockhash', None),
'confirmations': tx['data']['tx'].get('confirmations', None),
'time': tx['data']['tx'].get('time', None),
'blocktime': tx['data']['tx'].get('blocktime', None),
'valueOut': valueOut,
'vin': tx['data']['tx']['vin'],
'vout': tx['data']['tx']['vout']
}
return None
def sourceAddressesFromTX(tx_full):
'''Return source (outbound) addresses for a bitcoin tx'''
return [addressForPubKey(i['scriptSig']['asm'].split(" ")[1]) for i in tx_full['vin']]
#This can be replaced with the pycoin function
_b58chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
def addressForPubKey(pubkey_hex, testnet=None):
if testnet is None:
testnet = config.TESTNET
ripehash = hashlib.new('ripemd160')
step1 = unhexlify(pubkey_hex)
step2 = sha256(step1).digest()
ripehash.update(step2)
if testnet:
step4 = b'\x6F' + ripehash.digest()
else:
step4 = b'\x00' + ripehash.digest()
step5 = sha256(step4).digest()
step6 = sha256(step5).digest()
chksum = step6[:4]
address = step4 + chksum
addr_58 = encodeBase58(address)
return addr_58
def encodeBase58(v):
long_value = int.from_bytes(v, 'big')
result = ''
while long_value >= 58:
div, mod = divmod(long_value, 58)
result = _b58chars[mod] + result
long_value = div
result = _b58chars[long_value] + result
nPad = 0
for c in v:
if c == ord(b'\0'): nPad += 1
else: break
return (_b58chars[0]*nPad) + result
| 34.503817 | 103 | 0.562389 |
f737ea22ff2ba4acb8102d13caef6197f4c5f1ca | 6,618 | py | Python | dev/local/data/pipeline.py | vguerra/fastai_docs | 95df902ef5cd08bcd58d5ca64bc8a6ea3f297531 | [
"Apache-2.0"
] | null | null | null | dev/local/data/pipeline.py | vguerra/fastai_docs | 95df902ef5cd08bcd58d5ca64bc8a6ea3f297531 | [
"Apache-2.0"
] | null | null | null | dev/local/data/pipeline.py | vguerra/fastai_docs | 95df902ef5cd08bcd58d5ca64bc8a6ea3f297531 | [
"Apache-2.0"
] | null | null | null | #AUTOGENERATED! DO NOT EDIT! File to edit: dev/03_data_pipeline.ipynb (unless otherwise specified).
__all__ = ['get_func', 'Func', 'Sig', 'compose_tfms', 'batch_to_samples', 'mk_transform', 'Pipeline', 'TfmdBase',
'TfmdList', 'TfmdDS']
#Cell
from ..torch_basics import *
from ..test import *
from .transform import *
from ..notebook.showdoc import show_doc
#Cell
def get_func(t, name, *args, **kwargs):
"Get the `t.name` (potentially partial-ized with `args` and `kwargs`) or `noop` if not defined"
f = getattr(t, name, noop)
return f if not (args or kwargs) else partial(f, *args, **kwargs)
#Cell
class Func():
"Basic wrapper around a `name` with `args` and `kwargs` to call on a given type"
def __init__(self, name, *args, **kwargs): self.name,self.args,self.kwargs = name,args,kwargs
def __repr__(self): return f'sig: {self.name}({self.args}, {self.kwargs})'
def _get(self, t): return get_func(t, self.name, *self.args, **self.kwargs)
def __call__(self,t): return L(t).mapped(self._get) if is_listy(t) else self._get(t)
#Cell
class _Sig():
def __getattr__(self,k):
def _inner(*args, **kwargs): return Func(k, *args, **kwargs)
return _inner
Sig = _Sig()
#Cell
def compose_tfms(x, tfms, is_enc=True, reverse=False, **kwargs):
"Apply all `func_nm` attribute of `tfms` on `x`, maybe in `reverse` order"
if reverse: tfms = reversed(tfms)
for f in tfms:
if not is_enc: f = f.decode
x = f(x, **kwargs)
return x
#Cell
def batch_to_samples(b, max_n=10):
"'Transposes' a batch to (at most `max_n`) samples"
if isinstance(b, Tensor): return b[:max_n]
else:
res = L(b).mapped(partial(batch_to_samples,max_n=max_n))
return L(retain_types(res.zipped(), [b]))
#Cell
def mk_transform(f, as_item=True):
"Convert function `f` to `Transform` if it isn't already one"
f = instantiate(f)
return f if isinstance(f,Transform) else Transform(f, as_item=as_item)
#Cell
class Pipeline:
"A pipeline of composed (for encode/decode) transforms, setup with types"
def __init__(self, funcs=None, as_item=False, filt=None):
if isinstance(funcs, Pipeline): funcs = funcs.fs
elif isinstance(funcs, Transform): funcs = [funcs]
self.filt,self.default = filt,None
self.fs = L(ifnone(funcs,[noop])).mapped(mk_transform).sorted(key='order')
self.set_as_item(as_item)
for f in self.fs:
name = camel2snake(type(f).__name__)
a = getattr(self,name,None)
if a is not None: f = L(a)+f
setattr(self, name, f)
def set_as_item(self, as_item):
self.as_item = as_item
for f in self.fs: f.as_item = as_item
def setup(self, items=None):
self.items = items
tfms,self.fs = self.fs,L()
for t in tfms: self.add(t,items)
def add(self,t, items=None):
t.setup(items)
self.fs.append(t)
def __call__(self, o): return compose_tfms(o, tfms=self.fs, filt=self.filt)
def decode (self, o): return compose_tfms(o, tfms=self.fs, is_enc=False, reverse=True, filt=self.filt)
def __repr__(self): return f"Pipeline: {self.fs}"
def __getitem__(self,i): return self.fs[i]
def decode_batch(self, b, max_n=10): return batch_to_samples(b, max_n=max_n).mapped(self.decode)
def __setstate__(self,data): self.__dict__.update(data)
def __getattr__(self,k):
if k.startswith('_') or k=='fs': raise AttributeError(k)
res = sum(self.fs.attrgot(k).mapped(L), [])
if not res: raise AttributeError(k)
return res[0] if len(res)==1 else res
def show(self, o, ctx=None, **kwargs):
for f in reversed(self.fs):
res = self._show(o, ctx, **kwargs)
if res is not None: return res
o = f.decode(o, filt=self.filt)
return self._show(o, ctx, **kwargs)
def _show(self, o, ctx, **kwargs):
o1 = [o] if self.as_item or not is_listy(o) else o
if not all(hasattr(o_, 'show') for o_ in o1): return
for o_ in o1: ctx = o_.show(ctx=ctx, **kwargs)
return ifnone(ctx,1)
#Cell
class TfmdBase(L):
"Base class for transformed lists"
def _gets(self, i): return L(self._get(i_) for i_ in mask2idxs(i))
def subset(self, idxs): return self._new(super()._gets(idxs))
def decode_at(self, idx): return self.decode(self[idx])
def show_at(self, idx, **kwargs): return self.show(self[idx], **kwargs)
#Cell
class TfmdList(TfmdBase):
"A `Pipeline` of `tfms` applied to a collection of `items`"
def __init__(self, items, tfms, do_setup=True, as_item=True, use_list=None, filt=None):
super().__init__(items, use_list=use_list)
if isinstance(tfms,TfmdList): tfms = tfms.tfms
if isinstance(tfms,Pipeline): do_setup=False
self.tfms = Pipeline(tfms, as_item=as_item, filt=filt)
if do_setup: self.setup()
def _new(self, items, *args, **kwargs): return super()._new(items, tfms=self.tfms, do_setup=False, use_list=None, filt=self.filt)
def _get (self, i): return self.tfms(super()._get(i))
def __repr__(self): return f"{self.__class__.__name__}: {self.items}\ntfms - {self.tfms.fs}"
# Delegating to `self.tfms`
def show(self, o, **kwargs): return self.tfms.show(o, **kwargs)
def setup(self): self.tfms.setup(self)
def decode(self, x, **kwargs): return self.tfms.decode(x, **kwargs)
def __call__(self, x, **kwargs): return self.tfms.__call__(x, **kwargs)
@property
def filt(self): return self.tfms.filt
@filt.setter
def filt(self,v): self.tfms.filt = v
#Cell
@docs
class TfmdDS(TfmdBase):
"A dataset that creates a tuple from each `tfms`, passed thru `ds_tfms`"
def __init__(self, items, tfms=None, do_setup=True, use_list=None, filt=None):
super().__init__(items, use_list=use_list)
if tfms is None: tms = [None]
self.tls = [TfmdList(items, t, do_setup=do_setup, filt=filt, use_list=use_list) for t in L(tfms)]
def _get(self, it): return tuple(tl._get(it) for tl in self.tls)
def __repr__(self): return coll_repr(self)
def decode(self, o): return tuple(it.decode(o_) for o_,it in zip(o,self.tls))
def show(self, o, ctx=None, **kwargs):
for o_,it in zip(o,self.tls): ctx = it.show(o_, ctx=ctx, **kwargs)
return ctx
@property
def filt(self): return self.tls[0].filt
@filt.setter
def filt(self,v):
for tl in self.tls: tl.filt = v
_docs=dict(
decode="Compose `decode` of all `tuple_tfms` then all `tfms` on `i`",
show="Show item `o` in `ctx`") | 39.392857 | 133 | 0.643246 |
f73810b5a713532fd33d2666d1759247bd9d52f6 | 682 | py | Python | 2021/12/solution2.py | frenzymadness/aoc | c9018e757bae61a696e675a827aef873995abdd3 | [
"WTFPL"
] | 2 | 2020-12-04T09:45:38.000Z | 2020-12-07T14:06:12.000Z | 2021/12/solution2.py | frenzymadness/aoc | c9018e757bae61a696e675a827aef873995abdd3 | [
"WTFPL"
] | null | null | null | 2021/12/solution2.py | frenzymadness/aoc | c9018e757bae61a696e675a827aef873995abdd3 | [
"WTFPL"
] | null | null | null | from collections import defaultdict, deque
with open("input.txt") as input_file:
lines = input_file.read().splitlines()
g = defaultdict(set)
for line in lines:
n1, n2 = line.split("-")
g[n1].add(n2)
g[n2].add(n1)
def walk(node, path, return_here):
if node == "end":
yield path
for next_node in g[node]:
if next_node not in path or next_node.isupper():
yield from walk(next_node, path + [next_node], return_here)
elif next_node.islower() and return_here and next_node not in ("start", "end"):
yield from walk(next_node, path + [next_node], False)
print(sum(1 for _ in walk("start", ["start"], True)))
| 28.416667 | 87 | 0.631965 |
f7381de4aebc9051177ffd55accf0b7d97283f70 | 2,547 | py | Python | elementally/tests/test.py | dem1995/elementally | 192990ad53580d62e278def6508c466589f38ecd | [
"X11"
] | null | null | null | elementally/tests/test.py | dem1995/elementally | 192990ad53580d62e278def6508c466589f38ecd | [
"X11"
] | null | null | null | elementally/tests/test.py | dem1995/elementally | 192990ad53580d62e278def6508c466589f38ecd | [
"X11"
] | null | null | null | import elementally as elmy
import unittest
import itertools
pos_array = [1, 2, 3, 4, 5]
pos_array_2 = [5, 4, 3, 2, 1]
neg_array = [-10, -20, -30, -40, -50]
neg_array_2 = [-50, -40, -30, -20, -10]
def odd_generator():
i=1
while(True):
yield i
i+=2
def complex_generator():
i=1
while(True):
yield i
i+=2j
class TestBasicArithmetic(unittest.TestCase):
def test_sum_lists(self):
"""Checks whether two lists sum properly"""
self.assertListEqual(elmy.sum(pos_array, pos_array_2), [6, 6, 6, 6, 6])
self.assertListEqual(elmy.sum(pos_array, neg_array), [-9, -18, -27, -36, -45])
def test_sum_list_with_generator(self):
"""Checks whether a list sums with a generator properly, and returns a generator"""
list_odd_numbers_plus_index = elmy.sum(pos_array, odd_generator())
self.assertListEqual(list_odd_numbers_plus_index, [2, 5, 8, 11, 14])
def test_sum_generator_with_list(self):
"""Checks whether a generator sums with a list properly, and remains a generator"""
augend = odd_generator()
gen_odd_numbers_plus_index = elmy.sum(augend, pos_array)
self.assertEqual(type(augend), type(gen_odd_numbers_plus_index))
slice_of_summed_generator = itertools.islice(gen_odd_numbers_plus_index, 8)
self.assertSequenceEqual(list(slice_of_summed_generator), [2, 5, 8, 11, 14])
def test_sum_generator_with_generator(self):
"""Checks whether a generator sums with a generator properly, and returns a generator"""
augend = odd_generator()
summed = elmy.sum(augend, odd_generator())
self.assertSequenceEqual([2, 6, 10, 14], list(itertools.islice(summed, 4)))
self.assertEqual(type(augend), type(summed))
class TestMultistepOps(unittest.TestCase):
def test_negation_generator(self):
"""Checks whether adding a sequences to its negation yields 0s"""
operand = odd_generator()
negated = elmy.negation(odd_generator())
zeros = elmy.sum(operand, negated)
for i in itertools.islice(zeros, 1000):
self.assertEqual(i, 0)
def test_reciprocal_multiplication(self):
"""Checks whether multiplying a sequence by its reciprocal yields 1s"""
augend = complex_generator()
reciprocal = elmy.product(augend, elmy.reciprocal(complex_generator()))
for i in itertools.islice(reciprocal, 1000):
self.assertAlmostEqual(i, 1, 14)
if __name__ == '__main__':
unittest.main()
| 39.184615 | 96 | 0.669415 |
f7381dee751bc8ce42c7f5d24e881d37f73e6d1c | 2,733 | py | Python | cli/iotexetl/rpc/iotex_rpc.py | blockchain-etl/iotex-etl | bd350c3190acac35d17532eff383e05d08011e24 | [
"MIT"
] | 3 | 2020-07-04T13:53:38.000Z | 2020-07-30T15:07:35.000Z | cli/iotexetl/rpc/iotex_rpc.py | blockchain-etl/iotex-etl | bd350c3190acac35d17532eff383e05d08011e24 | [
"MIT"
] | 13 | 2020-07-16T06:07:33.000Z | 2020-08-20T10:35:10.000Z | cli/iotexetl/rpc/iotex_rpc.py | blockchain-etl/iotex-etl | bd350c3190acac35d17532eff383e05d08011e24 | [
"MIT"
] | 1 | 2021-01-20T10:06:20.000Z | 2021-01-20T10:06:20.000Z | # The MIT License (MIT)
#
# Copyright (c) 2020 Evgeny Medvedev, evge.medvedev@gmail.com
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from urllib.parse import urlparse
import grpc
from iotexetl.rpc.iotexapi import api_pb2
from iotexetl.rpc.iotexapi import api_pb2_grpc
class IotexRpc:
def __init__(self, provider_uri, timeout=60):
self.timeout = timeout
channel = get_channel_from_uri_string(provider_uri)
self.stub = api_pb2_grpc.APIServiceStub(channel)
def get_raw_blocks(self, start_height, count):
return self.stub.GetRawBlocks(
api_pb2.GetRawBlocksRequest(startHeight=start_height, count=count, withReceipts=True), timeout=self.timeout)
def get_block_metas(self, start_height, count):
return self.stub.GetBlockMetas(api_pb2.GetBlockMetasRequest(
byIndex=api_pb2.GetBlockMetasByIndexRequest(start=start_height, count=count)
), timeout=self.timeout)
def get_transaction_logs(self, block_number):
return self.stub.GetTransactionLogByBlockHeight(
api_pb2.GetTransactionLogByBlockHeightRequest(blockHeight=block_number), timeout=self.timeout)
def get_chain_meta(self):
return self.stub.GetChainMeta(api_pb2.GetChainMetaRequest(), timeout=self.timeout)
def get_channel_from_uri_string(provider_uri):
uri = urlparse(provider_uri)
if uri.scheme == 'grpcs':
credentials = grpc.ssl_channel_credentials()
channel = grpc.secure_channel(uri.netloc, credentials)
elif uri.scheme == 'grpc':
channel = grpc.insecure_channel(uri.netloc)
else:
raise ValueError(f'The uri scheme {uri.scheme} is not recognized. Use grpc:// or grpcs://')
return channel
| 42.046154 | 120 | 0.751921 |
f73842990d3c34bb39b1979920ca99697d282613 | 413 | py | Python | agent/indy_catalyst_agent/storage/tests/test__record.py | TelegramSam/indy-catalyst | 476f5a773d90d8b50960b49e63b823bd13325d4a | [
"Apache-2.0"
] | null | null | null | agent/indy_catalyst_agent/storage/tests/test__record.py | TelegramSam/indy-catalyst | 476f5a773d90d8b50960b49e63b823bd13325d4a | [
"Apache-2.0"
] | null | null | null | agent/indy_catalyst_agent/storage/tests/test__record.py | TelegramSam/indy-catalyst | 476f5a773d90d8b50960b49e63b823bd13325d4a | [
"Apache-2.0"
] | null | null | null | import pytest
from indy_catalyst_agent.storage import StorageRecord
class TestStorageRecord:
def test_create(self):
record_type = "TYPE"
record_value = "VALUE"
record = StorageRecord(record_type, record_value)
assert record.type == record_type
assert record.value == record_value
assert record.id and type(record.id) is str
assert record.tags == {}
| 25.8125 | 57 | 0.680387 |
f7387ae9a9a7f58c3749489a080bb1a0fe1242b5 | 3,377 | py | Python | checkov/common/bridgecrew/image_scanning/image_scanner.py | graybrandonpfg/checkov | 3081a8560f6369465314ee8f4ac8a8ec01649d68 | [
"Apache-2.0"
] | 4,013 | 2019-12-09T13:16:54.000Z | 2022-03-31T14:31:01.000Z | checkov/common/bridgecrew/image_scanning/image_scanner.py | graybrandonpfg/checkov | 3081a8560f6369465314ee8f4ac8a8ec01649d68 | [
"Apache-2.0"
] | 1,258 | 2019-12-17T09:55:51.000Z | 2022-03-31T19:17:17.000Z | checkov/common/bridgecrew/image_scanning/image_scanner.py | graybrandonpfg/checkov | 3081a8560f6369465314ee8f4ac8a8ec01649d68 | [
"Apache-2.0"
] | 638 | 2019-12-19T08:57:38.000Z | 2022-03-30T21:38:37.000Z | import logging
import subprocess # nosec
import docker
import json
import os
import time
from checkov.common.bridgecrew.image_scanning.docker_image_scanning_integration import docker_image_scanning_integration
TWISTCLI_FILE_NAME = 'twistcli'
DOCKER_IMAGE_SCAN_RESULT_FILE_NAME = 'docker-image-scan-results.json'
def _get_docker_image_name(docker_image_id):
try:
docker_client = docker.from_env()
return docker_client.images.get(docker_image_id).attrs['RepoDigests'][0].split('@')[0]
except Exception as e:
logging.error(f"docker image needs to have repository")
raise e
def _get_dockerfile_content(dockerfile_path):
try:
with open(dockerfile_path) as f:
return f.read()
except FileNotFoundError as e:
logging.error(f"Path to Dockerfile is invalid\n{e}")
raise e
except Exception as e:
logging.error(f"Failed to read Dockerfile content\n{e}")
raise e
class ImageScanner:
def __init__(self):
self.docker_image_name = ''
self.dockerfile_content = ''
def setup_scan(self, docker_image_id, dockerfile_path, skip_extract_image_name):
try:
if skip_extract_image_name:
# Provide a default image name in case the image has not been tagged with a name
self.docker_image_name = f'repository/image{str(time.time() * 1000)}'
else:
self.docker_image_name = _get_docker_image_name(docker_image_id)
self.dockerfile_content = _get_dockerfile_content(dockerfile_path)
docker_image_scanning_integration.download_twistcli(TWISTCLI_FILE_NAME)
except Exception as e:
logging.error(f"Failed to setup docker image scanning\n{e}")
raise e
@staticmethod
def cleanup_scan():
os.remove(TWISTCLI_FILE_NAME)
logging.info(f'twistcli file removed')
@staticmethod
def run_image_scan(docker_image_id):
command_args = f"./{TWISTCLI_FILE_NAME} images scan --address {docker_image_scanning_integration.get_proxy_address()} --token {docker_image_scanning_integration.get_bc_api_key()} --details --output-file {DOCKER_IMAGE_SCAN_RESULT_FILE_NAME} {docker_image_id}".split()
subprocess.run(command_args, check=True, shell=True) # nosec
logging.info(f'TwistCLI ran successfully on image {docker_image_id}')
with open(DOCKER_IMAGE_SCAN_RESULT_FILE_NAME) as docker_image_scan_result_file:
scan_result = json.load(docker_image_scan_result_file)
return scan_result
def scan(self, docker_image_id, dockerfile_path, skip_extract_image_name=False):
try:
self.setup_scan(docker_image_id, dockerfile_path, skip_extract_image_name)
scan_result = self.run_image_scan(docker_image_id)
docker_image_scanning_integration.report_results(self.docker_image_name, dockerfile_path,
self.dockerfile_content,
twistcli_scan_result=scan_result)
logging.info(f'Docker image scanning results reported to the platform')
self.cleanup_scan()
except Exception as e:
logging.error(f"Failed to scan docker image\n{e}")
raise e
image_scanner = ImageScanner()
| 40.686747 | 274 | 0.687296 |
f738ab779415b269bda9727a7fdc7c85dda2ca86 | 2,925 | py | Python | code_server/run_evaluate.py | Dragon-M-Ren/grad_code | d814b81adaec709d5dffd737f0c350953cc361fd | [
"Apache-2.0"
] | null | null | null | code_server/run_evaluate.py | Dragon-M-Ren/grad_code | d814b81adaec709d5dffd737f0c350953cc361fd | [
"Apache-2.0"
] | null | null | null | code_server/run_evaluate.py | Dragon-M-Ren/grad_code | d814b81adaec709d5dffd737f0c350953cc361fd | [
"Apache-2.0"
] | null | null | null | from evaluate import *
import tensorflow as tf
from utils import *
from model.gcn import GCN
from model.mlp import MLP
from model.firstcheb import FirstCheb
from model.gat import GAT
from model.dcnn import DCNN
from model.spectralcnn import SpectralCNN
from model.chebnet import ChebNet
from model.graphsage import GraphSage
from model.graphsage_meanpool import GraphSageMeanPool
from model.graphsage_maxpool import GraphSageMaxPool
from hyperpara_optim import *
import scipy.sparse as sp
import numpy as np
import pickle as pkl
from process_data import *
from draw import *
import os
'''
This file will run the test script
Three kinds of file are saved
result_path/dataset_name: the original data
processed_result_path/dataset_name/: processed data
'''
#Model done
#MLP GCN FirstCheb GAT
#Model left
#SpectralCNN, DCNN, GraphSage,
model_list = [DCNN]
model_name_list = ['dcnn']
dataset_name_list = ['pubmed']
#dataset_name_list = ['citeseer', 'cora', 'pubmed']
dataset_numbers = 10
parameter_appendix_list = ['rand']
dataset_path = './data/evaluate'
parameter_path = './hyperparameter'
result_path = './direct_output'
processed_result_path = './processed_output'
evaluate_times = 2
train_size = 230
val_size = 500
for model, model_name in zip(model_list, model_name_list):
for dataset_name in dataset_name_list:
for parameter_appendix in parameter_appendix_list:
train_info_list, acc_list, time_list = evaluate_model(model,
model_name, dataset_path, dataset_name, dataset_numbers, parameter_path,
parameter_appendix, result_path, evaluate_times, train_size, val_size)
#save to file
save_path = os.path.join(result_path, dataset_name)
file_name = model_name + parameter_appendix
#make directory
if not os.path.exists(save_path):
os.makedirs(save_path)
save_file = open(os.path.join(save_path, file_name), 'wb')
pkl.dump((train_info_list, acc_list, time_list), save_file)
save_file.close()
#process output data
train_info, acc, time = process_output(train_info_list, acc_list, time_list)
#save processed data
save_path = os.path.join(processed_result_path, dataset_name)
file_name = model_name + parameter_appendix
#make directory
if not os.path.exists(save_path):
os.makedirs(save_path)
save_file = open(os.path.join(save_path, file_name), 'wb')
pkl.dump((train_info, acc, time), save_file)
save_file.close()
#save train image
save_path = os.path.join(processed_result_path, dataset_name)
plot_train(train_info['train_loss'], train_info['train_acc'],
train_info['val_loss'], train_info['val_acc'],
save_path, model_name, True)
| 31.117021 | 89 | 0.696068 |
f738af43ce1b3a193391862e705092eb5b0f05da | 2,489 | py | Python | unified/tests/test_xfs.py | LoganCook/reporting-unified | 9d2c7e083c5c400e9120bb8552348e41406a1bc1 | [
"Apache-2.0"
] | null | null | null | unified/tests/test_xfs.py | LoganCook/reporting-unified | 9d2c7e083c5c400e9120bb8552348e41406a1bc1 | [
"Apache-2.0"
] | null | null | null | unified/tests/test_xfs.py | LoganCook/reporting-unified | 9d2c7e083c5c400e9120bb8552348e41406a1bc1 | [
"Apache-2.0"
] | null | null | null | import unittest
from flask import json
from ..apis.xfs import app
from . import client_get, now, now_minus_24hrs
get = client_get(app)
class XFSTestCase(unittest.TestCase):
def test_root_not_allowed(self):
rv = get('/')
self.assertEqual(rv.status_code, 404)
def test_all_top_objects_should_pass(self):
for route in app.url_map.iter_rules():
rule = route.rule
# top objects' have pattern of /blar
# ingest only accept PUT and OPTIONS
if rule not in ('/static/<path:filename>', '/ingest') and 'summary' not in rule and 'list' not in rule:
print('Testing %s' % rule)
resp = get('%s?count=10' % rule)
data = json.loads(resp.data)
self.assertEqual(resp.status_code, 200)
self.assertGreaterEqual(len(data), 1)
def test_filesystem_not_found(self):
rule = '/filesystem/not/summary'
# Can deal non-uuid id quitely
resp = get(rule)
self.assertEqual(resp.status_code, 200)
data = json.loads(resp.data)
self.assertEqual(len(data), 0)
rule = '/filesystem/12345678123456781234567812345678/summary'
resp = get(rule)
self.assertEqual(resp.status_code, 200)
data = json.loads(resp.data)
self.assertEqual(len(data), 0)
def test_usage_summary(self):
resp = get('/usage/summary?start=%s&end=%s' % (now_minus_24hrs, now))
self.assertEqual(resp.status_code, 200)
data = json.loads(resp.data)
self.assertTrue(isinstance(data, list))
print(data)
def test_instance_methods(self):
instance_types = ('filesystem', 'owner')
methods = ('summary', 'list')
for itype in instance_types:
resp = get('/%s?count=1' % itype)
self.assertEqual(resp.status_code, 200)
data = json.loads(resp.data)
print(data)
self.assertTrue(isinstance(data, list))
self.assertGreater(len(data), 0)
target_id = data[0]['id']
for method in methods:
resp = get('/%s/%s/%s?start=%s&end=%s' % (itype, target_id, method, now_minus_24hrs, now))
self.assertEqual(resp.status_code, 200)
data = json.loads(resp.data)
print(data)
self.assertTrue(isinstance(data, list) or isinstance(data, dict))
self.assertGreater(len(data), 0)
| 37.149254 | 115 | 0.593009 |
f739129edf4dad67c2248bd06ec3d471d9ba24b7 | 1,126 | py | Python | almost-triangles/almost-triangles.py | cdstanford/curiosities | 511e55b9dbf2b49db4593be92a0e4fce07888eaf | [
"MIT"
] | 1 | 2022-01-18T06:02:01.000Z | 2022-01-18T06:02:01.000Z | almost-triangles/almost-triangles.py | cdstanford/curiosities | 511e55b9dbf2b49db4593be92a0e4fce07888eaf | [
"MIT"
] | null | null | null | almost-triangles/almost-triangles.py | cdstanford/curiosities | 511e55b9dbf2b49db4593be92a0e4fce07888eaf | [
"MIT"
] | null | null | null | """
A triangle is an "almost right triangle" if one of its angles differs from
90 degrees by at most 15 degrees. A triangle is an "almost isosceles
triangle" if two of its angles differ from each other by at most 15
degrees. Prove that all acute triangles are either almost right or almost
isosceles.
Note: if "at most 15" is replaced by "less than 15" in the problem statement
(change "<= 15" to "< 15" everywhere below), the formula becomes satisfiable
and we get the following counterexample: a triangle with angles 45, 60, and 75.
"""
import z3
def triangle(x, y, z):
return z3.And(x > 0, y > 0, z > 0, x + y + z == 180)
def acute(x, y, z):
return z3.And(x < 90, y < 90, z < 90)
def abs(x):
return z3.If(x > 0, x, -x)
def almost_right(x, y, z):
return z3.Or(abs(x - 90) <= 15, abs(y - 90) <= 15, abs(z - 90) <= 15)
def almost_isosceles(x, y, z):
return z3.Or(abs(x - y) <= 15, abs(x - z) <= 15, abs(y - z) <= 15)
x = z3.Real("x")
y = z3.Real("y")
z = z3.Real("z")
z3.solve(
triangle(x, y, z),
acute(x, y, z),
z3.Not(almost_right(x, y, z)),
z3.Not(almost_isosceles(x, y, z)),
)
| 28.15 | 79 | 0.626998 |
f73915cd2a13a0f75508d4f48f01ea62bb60222f | 3,177 | py | Python | py_segment/graph.py | RookieHong/python-objectness | 02af4d090502e1a7269b23673709cfea75da09c9 | [
"Apache-2.0"
] | 20 | 2020-09-27T08:41:38.000Z | 2022-03-09T10:31:27.000Z | py_segment/graph.py | RookieHong/python-objectness | 02af4d090502e1a7269b23673709cfea75da09c9 | [
"Apache-2.0"
] | 2 | 2021-12-02T07:48:41.000Z | 2021-12-21T07:14:25.000Z | py_segment/graph.py | RookieHong/python-objectness | 02af4d090502e1a7269b23673709cfea75da09c9 | [
"Apache-2.0"
] | 4 | 2020-11-19T13:38:52.000Z | 2022-03-09T14:40:28.000Z | class Node:
def __init__(self, parent, rank=0, size=1):
self.parent = parent
self.rank = rank
self.size = size
def __repr__(self):
return '(parent=%s, rank=%s, size=%s)' % (self.parent, self.rank, self.size)
class Forest:
def __init__(self, num_nodes):
self.nodes = [Node(i) for i in range(num_nodes)]
self.num_sets = num_nodes
def size_of(self, i):
return self.nodes[i].size
def find(self, n):
temp = n
while temp != self.nodes[temp].parent:
temp = self.nodes[temp].parent
self.nodes[n].parent = temp
return temp
def merge(self, a, b):
if self.nodes[a].rank > self.nodes[b].rank:
self.nodes[b].parent = a
self.nodes[a].size = self.nodes[a].size + self.nodes[b].size
else:
self.nodes[a].parent = b
self.nodes[b].size = self.nodes[b].size + self.nodes[a].size
if self.nodes[a].rank == self.nodes[b].rank:
self.nodes[b].rank = self.nodes[b].rank + 1
self.num_sets = self.num_sets - 1
def print_nodes(self):
for node in self.nodes:
print(node)
def create_edge(img, width, x, y, x1, y1, diff):
vertex_id = lambda x, y: y * width + x
w = diff(img, x, y, x1, y1)
return (vertex_id(x, y), vertex_id(x1, y1), w)
def build_graph(img, width, height, diff, neighborhood_8=False):
graph_edges = []
for y in range(height):
for x in range(width):
if x > 0:
graph_edges.append(create_edge(img, width, x, y, x - 1, y, diff))
if y > 0:
graph_edges.append(create_edge(img, width, x, y, x, y - 1, diff))
if neighborhood_8:
if x > 0 and y > 0:
graph_edges.append(create_edge(img, width, x, y, x - 1, y - 1, diff))
if x > 0 and y < height - 1:
graph_edges.append(create_edge(img, width, x, y, x - 1, y + 1, diff))
return graph_edges
def remove_small_components(forest, graph, min_size):
for edge in graph:
a = forest.find(edge[0])
b = forest.find(edge[1])
if a != b and (forest.size_of(a) < min_size or forest.size_of(b) < min_size):
forest.merge(a, b)
return forest
def segment_graph(graph_edges, num_nodes, const, min_size, threshold_func):
# Step 1: initialization
forest = Forest(num_nodes)
weight = lambda edge: edge[2]
sorted_graph = sorted(graph_edges, key=weight)
threshold = [ threshold_func(1, const) for _ in range(num_nodes) ]
# Step 2: merging
for edge in sorted_graph:
parent_a = forest.find(edge[0])
parent_b = forest.find(edge[1])
a_condition = weight(edge) <= threshold[parent_a]
b_condition = weight(edge) <= threshold[parent_b]
if parent_a != parent_b and a_condition and b_condition:
forest.merge(parent_a, parent_b)
a = forest.find(parent_a)
threshold[a] = weight(edge) + threshold_func(forest.nodes[a].size, const)
return remove_small_components(forest, sorted_graph, min_size)
| 31.455446 | 89 | 0.579792 |
f7391612a071aa6a5a6f60058d789289104967d2 | 7,118 | py | Python | jnpy/app/pytdx_loader/engine.py | jojoquant/jonpy | 58692f8fbf398aab7be915a63d0a376e2e0e664c | [
"MIT"
] | 5 | 2020-05-19T07:32:39.000Z | 2022-03-14T09:09:48.000Z | jnpy/app/pytdx_loader/engine.py | jojoquant/jonpy | 58692f8fbf398aab7be915a63d0a376e2e0e664c | [
"MIT"
] | null | null | null | jnpy/app/pytdx_loader/engine.py | jojoquant/jonpy | 58692f8fbf398aab7be915a63d0a376e2e0e664c | [
"MIT"
] | 3 | 2020-04-02T08:30:17.000Z | 2020-05-03T12:12:05.000Z | import time
from typing import Callable
import pandas as pd
from datetime import datetime
from vnpy.event import EventEngine
from vnpy.trader.constant import Exchange, Interval
from vnpy.trader.engine import BaseEngine, MainEngine
from vnpy.trader.object import BarData
from vnpy.trader.utility import get_folder_path
from vnpy.trader.datafeed import BaseDatafeed, get_datafeed
from vnpy.trader.database import BaseDatabase, get_database
from jnpy.datasource.jotdx import ExhqAPI, IPsSource, FutureMarketCode, KBarType
APP_NAME = "PytdxLoader"
class PytdxLoaderEngine(BaseEngine):
""""""
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
""""""
super().__init__(main_engine, event_engine, APP_NAME)
self.file_path: str = ""
self.symbol: str = ""
self.exchange: Exchange = Exchange.SSE
self.interval: Interval = Interval.MINUTE
self.datetime_head: str = ""
self.open_head: str = ""
self.close_head: str = ""
self.low_head: str = ""
self.high_head: str = ""
self.volume_head: str = ""
self.pytdx_ip_source = IPsSource()
self.ex_api = ExhqAPI()
self.datafeed: BaseDatafeed = get_datafeed()
self.database: BaseDatabase = get_database()
def to_bar_data(self, item,
symbol: str,
exchange: Exchange,
interval: Interval,
datetime_head: str,
open_head: str,
high_head: str,
low_head: str,
close_head: str,
volume_head: str,
open_interest_head: str
):
bar = BarData(
symbol=symbol,
exchange=exchange,
datetime=item[datetime_head].to_pydatetime(),
interval=interval,
volume=item[volume_head],
open_interest=item[open_interest_head],
open_price=item[open_head],
high_price=item[high_head],
low_price=item[low_head],
close_price=item[close_head],
gateway_name="DB"
)
return bar
def load_by_handle(
self,
data,
symbol: str,
exchange: Exchange,
interval: Interval,
datetime_head: str,
open_head: str,
high_head: str,
low_head: str,
close_head: str,
volume_head: str,
open_interest_head: str,
datetime_format: str,
update_qt_progress_bar: Callable,
opt_str: str
):
start_time = time.time()
try:
if isinstance(data[datetime_head][0], str):
data[datetime_head] = data[datetime_head].apply(
lambda x: datetime.strptime(x, datetime_format) if datetime_format else datetime.fromisoformat(x))
elif isinstance(data[datetime_head][0], pd.Timestamp):
self.write_log("datetime 格式为 pd.Timestamp, 不用处理.")
else:
self.write_log("未知datetime类型, 请检查")
self.write_log(f'df apply 处理日期时间 cost {time.time() - start_time:.2f}s')
except Exception:
self.write_log("通达信数据处理存在未知问题...")
return
if opt_str == "to_db":
start_time = time.time()
bars = data.apply(
self.to_bar_data,
args=(
symbol,
exchange,
interval,
datetime_head,
open_head,
high_head,
low_head,
close_head,
volume_head,
open_interest_head
),
axis=1).tolist()
self.write_log(f'df apply 处理bars时间 cost {time.time() - start_time:.2f}s')
# insert into database
self.database.save_bar_data(bars, update_qt_progress_bar)
elif opt_str == "high_to_db":
start_time = time.perf_counter()
collection_str = f"{exchange.value}_{interval.value}_{symbol}"
self.write_log(
f"Start write data into mongodb"
f"->{self.database.database}"
f"->{collection_str}"
)
self.database.save_bar_df(
df=data,
table=collection_str,
callback=update_qt_progress_bar
)
self.write_log(f'df apply 处理bars时间 cost {time.time() - start_time:.2f}s')
elif opt_str == "to_csv":
csv_file_dir = get_folder_path("csv_files")
data.to_csv(f'{csv_file_dir}/{exchange.value}_{symbol}.csv', index=False)
start = data[datetime_head].iloc[0]
end = data[datetime_head].iloc[-1]
count = len(data)
return start, end, count
def load(
self,
symbol: str,
exchange: Exchange,
interval: Interval,
datetime_head: str,
open_head: str,
high_head: str,
low_head: str,
close_head: str,
volume_head: str,
open_interest_head: str,
datetime_format: str,
update_qt_progress_bar: Callable,
opt_str: str,
):
"""
load by filename %m/%d/%Y
"""
ip, port = self.pytdx_ip_source.get_fast_exhq_ip()
with self.ex_api.connect(ip, port):
params_dict = {
"category": KBarType[interval.name].value,
"market": FutureMarketCode[exchange.value].value,
"code": symbol,
}
data_df = self.ex_api.get_all_KBars_df(**params_dict)
# transform column name to vnpy format
data_df.rename(
columns={
"datetime": "Datetime",
"open": "Open",
"high": "High",
"low": "Low",
"close": "Close",
"position": "OpenInterest",
"trade": "Volume",
},
inplace=True
)
if data_df.empty:
return None, None, 0
else:
return self.load_by_handle(
data_df,
symbol=symbol,
exchange=exchange,
interval=interval,
datetime_head=datetime_head,
open_head=open_head,
high_head=high_head,
low_head=low_head,
close_head=close_head,
volume_head=volume_head,
open_interest_head=open_interest_head,
datetime_format=datetime_format,
update_qt_progress_bar=update_qt_progress_bar,
opt_str=opt_str
)
def write_log(self, msg: str):
self.main_engine.write_log(msg)
self.ex_api.info_log.write_log(msg)
| 31.495575 | 118 | 0.52599 |
f7391676fb3e3868ff818f4de0d1cfe9e941e235 | 9,825 | py | Python | doc/conf.py | mikehulluk/morphforge | 2a95096f144ed4ea487decb735ce66706357d3c7 | [
"BSD-2-Clause"
] | 1 | 2021-01-21T11:31:59.000Z | 2021-01-21T11:31:59.000Z | doc/conf.py | mikehulluk/morphforge | 2a95096f144ed4ea487decb735ce66706357d3c7 | [
"BSD-2-Clause"
] | null | null | null | doc/conf.py | mikehulluk/morphforge | 2a95096f144ed4ea487decb735ce66706357d3c7 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# morphforge documentation build configuration file, created by
# sphinx-quickstart on Fri Mar 23 14:01:08 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# To allow stuff to build on RTD
import sys
class MockType(type):
def __init__(cls, name, bases, dct ):
super(MockType, cls).__init__(name, bases, dct)
pass
def __getattr__(cls, name):
return Mock()
def __str__(cls):
return 'custom str for %s' % (cls.__name__,)
class Mock(object):
__metaclass__ = MockType
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return Mock()
#@classmethod
def __getattr__(cls, name):
if name in ('__file__', '__path__'):
return '/dev/null'
elif name[0] == name[0].upper():
mockType = type(name, (), {})
mockType.__module__ = __name__
return mockType
else:
return Mock()
def __getitem__(self, key):
return Mock()
def __add__(self, rhs):
return Mock()
def __sub__(self, rhs):
return Mock()
def __mul__(self, rhs):
return Mock()
def __div__(self, rhs):
return Mock()
def __radd__(self, rhs):
return Mock()
def __rsub__(self, rhs):
return Mock()
def __rmul__(self, rhs):
return Mock()
def __rdiv__(self, rhs):
return Mock()
def __pow__(self, rhs):
return Mock()
MOCK_MODULES = ['numpy', 'pylab', 'scipy', 'mredoc', 'mreorg', 'quantities', 'matplotlib']
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = Mock
#URL to clean Read-The_Docs build dir:
#https://readthedocs.org/wipe/morphforge/latest/
sys.path.append('../src/')
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.pngmath',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.inheritance_diagram',
]
inheritance_graph_attrs = dict(rankdir="LR", size='"9.0, 8.0"',
fontsize=10, ratio='compress')
autodoc_default_flags =['undoc-members', 'members']
add_module_names = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'morphforge'
copyright = u'2012, Mike Hull'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1-alpha'
# The full version, including alpha/beta/rc tags.
release = '0.1-alpha'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# .
html_title = "morphforge"
html_add_permalinks = False
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
html_use_opensearch = False
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'morphforgedoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'morphforge.tex', u'morphforge Documentation',
u'Mike Hull', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'morphforge', u'morphforge Documentation',
[u'Mike Hull'], 1)
]
html_theme = "haiku"
todo_include_todos=True
autosummary_generate = True
# AutoDoc:
def maybe_skip_member(app, what, name, obj, skip, options):
# Since we add 'toSWC', etc to MorphologyTree, we
# don't want this to show up in the documentation.
if 'members' in options:
if name.startswith('to') or name.startswith('from'):
return True
if name == "__weakref__":
return True
#print name
if name in ['__weakref__' ,'__dict__','__doc__','__module__']:
return True
return False
def setup(app):
app.connect('autodoc-skip-member', maybe_skip_member)
templates_path = ["_templates",]
rst_prolog = r"""
.. |MHThesis| replace:: :download:`Mike Hull's Ph.D Thesis </static/ThesisReducedToTools.pdf>`
"""
| 28.071429 | 94 | 0.688142 |
f739464879003de44eaf68232a6d4a41a29fc14e | 386 | py | Python | medium/Triangle/solution.py | ashutosh1919/leetcode-problems | 65f99a3694549af88c7702b598de1a8ccb7db5fb | [
"MIT"
] | 8 | 2021-08-21T19:10:04.000Z | 2022-03-11T14:30:02.000Z | medium/Triangle/solution.py | ashutosh1919/leetcode-problems | 65f99a3694549af88c7702b598de1a8ccb7db5fb | [
"MIT"
] | null | null | null | medium/Triangle/solution.py | ashutosh1919/leetcode-problems | 65f99a3694549af88c7702b598de1a8ccb7db5fb | [
"MIT"
] | 1 | 2021-08-24T06:29:02.000Z | 2021-08-24T06:29:02.000Z | # Time complexity: O(n)
# Approach: Summing up triangle from bottom to top in minimum way.
class Solution:
def minimumTotal(self, triangle: List[List[int]]) -> int:
n = len(triangle)
for i in range(n-1, 0, -1):
for j in range(0, len(triangle[i])-1):
triangle[i-1][j] += min(triangle[i][j], triangle[i][j+1])
return triangle[0][0] | 38.6 | 73 | 0.582902 |
f7398bf1eff15fbb3d9d6bd1f250fd98451b79cc | 20,748 | py | Python | scf/_vhf.py | gmwang18/pyscf | fcd6877751661c8a9743c1c872a4a2b65f6dd7ac | [
"BSD-2-Clause"
] | null | null | null | scf/_vhf.py | gmwang18/pyscf | fcd6877751661c8a9743c1c872a4a2b65f6dd7ac | [
"BSD-2-Clause"
] | null | null | null | scf/_vhf.py | gmwang18/pyscf | fcd6877751661c8a9743c1c872a4a2b65f6dd7ac | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
import sys
import ctypes
import _ctypes
import numpy
import pyscf.lib
from pyscf import gto
from pyscf.gto.moleintor import make_cintopt
libcvhf = pyscf.lib.load_library('libcvhf')
def _fpointer(name):
return ctypes.c_void_p(_ctypes.dlsym(libcvhf._handle, name))
class VHFOpt(object):
def __init__(self, mol, intor,
prescreen='CVHFnoscreen', qcondname=None, dmcondname=None):
self._this = ctypes.POINTER(_CVHFOpt)()
#print self._this.contents, expect ValueError: NULL pointer access
self._intor = _fpointer(intor)
self._cintopt = pyscf.lib.c_null_ptr()
self._dmcondname = dmcondname
self.init_cvhf_direct(mol, intor, prescreen, qcondname)
def init_cvhf_direct(self, mol, intor, prescreen, qcondname):
c_atm = numpy.asarray(mol._atm, dtype=numpy.int32, order='C')
c_bas = numpy.asarray(mol._bas, dtype=numpy.int32, order='C')
c_env = numpy.asarray(mol._env, dtype=numpy.double, order='C')
natm = ctypes.c_int(c_atm.shape[0])
nbas = ctypes.c_int(c_bas.shape[0])
self._cintopt = make_cintopt(c_atm, c_bas, c_env, intor)
# libcvhf.CVHFnr_optimizer(ctypes.byref(self._this),
# c_atm.ctypes.data_as(ctypes.c_void_p), natm,
# c_bas.ctypes.data_as(ctypes.c_void_p), nbas,
# c_env.ctypes.data_as(ctypes.c_void_p))
libcvhf.CVHFinit_optimizer(ctypes.byref(self._this),
c_atm.ctypes.data_as(ctypes.c_void_p), natm,
c_bas.ctypes.data_as(ctypes.c_void_p), nbas,
c_env.ctypes.data_as(ctypes.c_void_p))
self._this.contents.fprescreen = _fpointer(prescreen)
if prescreen != 'CVHFnoscreen':
fsetqcond = getattr(libcvhf, qcondname)
fsetqcond(self._this,
c_atm.ctypes.data_as(ctypes.c_void_p), natm,
c_bas.ctypes.data_as(ctypes.c_void_p), nbas,
c_env.ctypes.data_as(ctypes.c_void_p))
@property
def direct_scf_tol(self):
return self._this.contents.direct_scf_cutoff
@direct_scf_tol.setter
def direct_scf_tol(self, v):
self._this.contents.direct_scf_cutoff = v
def set_dm(self, dm, atm, bas, env):
if self._dmcondname is not None:
c_atm = numpy.asarray(atm, dtype=numpy.int32, order='C')
c_bas = numpy.asarray(bas, dtype=numpy.int32, order='C')
c_env = numpy.asarray(env, dtype=numpy.double, order='C')
natm = ctypes.c_int(c_atm.shape[0])
nbas = ctypes.c_int(c_bas.shape[0])
if isinstance(dm, numpy.ndarray) and dm.ndim == 2:
n_dm = 1
else:
n_dm = len(dm)
dm = numpy.asarray(dm, order='C')
fsetdm = getattr(libcvhf, self._dmcondname)
fsetdm(self._this,
dm.ctypes.data_as(ctypes.c_void_p), ctypes.c_int(n_dm),
c_atm.ctypes.data_as(ctypes.c_void_p), natm,
c_bas.ctypes.data_as(ctypes.c_void_p), nbas,
c_env.ctypes.data_as(ctypes.c_void_p))
class _CVHFOpt(ctypes.Structure):
_fields_ = [('nbas', ctypes.c_int),
('_padding', ctypes.c_int),
('direct_scf_cutoff', ctypes.c_double),
('q_cond', ctypes.c_void_p),
('dm_cond', ctypes.c_void_p),
('fprescreen', ctypes.c_void_p),
('r_vkscreen', ctypes.c_void_p)]
################################################
# for general DM
# hermi = 0 : arbitary
# hermi = 1 : hermitian
# hermi = 2 : anti-hermitian
################################################
def incore(eri, dm, hermi=0):
assert(not numpy.iscomplexobj(eri))
eri = numpy.ascontiguousarray(eri)
dm = numpy.ascontiguousarray(dm)
nao = dm.shape[0]
vj = numpy.empty((nao,nao))
vk = numpy.empty((nao,nao))
npair = nao*(nao+1)//2
if eri.ndim == 2 and npair*npair == eri.size: # 4-fold symmetry eri
fdrv = getattr(libcvhf, 'CVHFnrs4_incore_drv')
# 'ijkl,kl->ij'
fvj = _fpointer('CVHFics4_kl_s2ij')
# 'ijkl,il->jk'
fvk = _fpointer('CVHFics4_il_s1jk')
# or
## 'ijkl,ij->kl'
#fvj = _fpointer('CVHFics4_ij_s2kl')
## 'ijkl,jk->il'
#fvk = _fpointer('CVHFics4_jk_s1il')
tridm = dm
elif eri.ndim == 1 and npair*(npair+1)//2 == eri.size: # 8-fold symmetry eri
fdrv = getattr(libcvhf, 'CVHFnrs8_incore_drv')
fvj = _fpointer('CVHFics8_tridm_vj')
if hermi == 1:
fvk = _fpointer('CVHFics8_jk_s2il')
else:
fvk = _fpointer('CVHFics8_jk_s1il')
tridm = pyscf.lib.pack_tril(pyscf.lib.transpose_sum(dm))
i = numpy.arange(nao)
tridm[i*(i+1)//2+i] *= .5
else:
raise RuntimeError('Array shape not consistent: DM %s, eri %s'
% (dm.shape, eri.shape))
fdrv(eri.ctypes.data_as(ctypes.c_void_p),
tridm.ctypes.data_as(ctypes.c_void_p),
vj.ctypes.data_as(ctypes.c_void_p),
dm.ctypes.data_as(ctypes.c_void_p),
vk.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(nao), fvj, fvk)
if hermi != 0:
vj = pyscf.lib.hermi_triu(vj, hermi)
vk = pyscf.lib.hermi_triu(vk, hermi)
else:
vj = pyscf.lib.hermi_triu(vj, 1)
return vj, vk
# use cint2e_sph as cintor, CVHFnrs8_ij_s2kl, CVHFnrs8_jk_s2il as fjk to call
# direct_mapdm
def direct(dms, atm, bas, env, vhfopt=None, hermi=0):
c_atm = numpy.asarray(atm, dtype=numpy.int32, order='C')
c_bas = numpy.asarray(bas, dtype=numpy.int32, order='C')
c_env = numpy.asarray(env, dtype=numpy.double, order='C')
natm = ctypes.c_int(c_atm.shape[0])
nbas = ctypes.c_int(c_bas.shape[0])
if isinstance(dms, numpy.ndarray) and dms.ndim == 2:
n_dm = 1
nao = dms.shape[0]
dms = (numpy.asarray(dms, order='C'),)
else:
n_dm = len(dms)
nao = dms[0].shape[0]
dms = numpy.asarray(dms, order='C')
if vhfopt is None:
cintor = _fpointer('cint2e_sph')
cintopt = make_cintopt(c_atm, c_bas, c_env, 'cint2e_sph')
cvhfopt = pyscf.lib.c_null_ptr()
else:
vhfopt.set_dm(dms, atm, bas, env)
cvhfopt = vhfopt._this
cintopt = vhfopt._cintopt
cintor = vhfopt._intor
fdrv = getattr(libcvhf, 'CVHFnr_direct_drv')
fdot = _fpointer('CVHFdot_nrs8')
fvj = _fpointer('CVHFnrs8_ji_s2kl')
if hermi == 1:
fvk = _fpointer('CVHFnrs8_li_s2kj')
else:
fvk = _fpointer('CVHFnrs8_li_s1kj')
vjk = numpy.empty((2,n_dm,nao,nao))
fjk = (ctypes.c_void_p*(2*n_dm))()
dmsptr = (ctypes.c_void_p*(2*n_dm))()
vjkptr = (ctypes.c_void_p*(2*n_dm))()
for i in range(n_dm):
dmsptr[i] = dms[i].ctypes.data_as(ctypes.c_void_p)
vjkptr[i] = vjk[0,i].ctypes.data_as(ctypes.c_void_p)
fjk[i] = fvj
for i in range(n_dm):
dmsptr[n_dm+i] = dms[i].ctypes.data_as(ctypes.c_void_p)
vjkptr[n_dm+i] = vjk[1,i].ctypes.data_as(ctypes.c_void_p)
fjk[n_dm+i] = fvk
shls_slice = (ctypes.c_int*8)(*([0, c_bas.shape[0]]*4))
ao_loc = numpy.asarray(make_ao_loc(bas), dtype=numpy.int32)
fdrv(cintor, fdot, fjk, dmsptr, vjkptr,
ctypes.c_int(n_dm*2), ctypes.c_int(1),
shls_slice, ao_loc.ctypes.data_as(ctypes.c_void_p), cintopt, cvhfopt,
c_atm.ctypes.data_as(ctypes.c_void_p), natm,
c_bas.ctypes.data_as(ctypes.c_void_p), nbas,
c_env.ctypes.data_as(ctypes.c_void_p))
# vj must be symmetric
for idm in range(n_dm):
vjk[0,idm] = pyscf.lib.hermi_triu(vjk[0,idm], 1)
if hermi != 0: # vk depends
for idm in range(n_dm):
vjk[1,idm] = pyscf.lib.hermi_triu(vjk[1,idm], hermi)
if n_dm == 1:
vjk = vjk.reshape(2,nao,nao)
return vjk
# call all fjk for each dm, the return array has len(dms)*len(jkdescript)*ncomp components
# jkdescript: 'ij->s1kl', 'kl->s2ij', ...
def direct_mapdm(intor, aosym, jkdescript,
dms, ncomp, atm, bas, env, vhfopt=None, shls_slice=None):
assert(aosym in ('s8', 's4', 's2ij', 's2kl', 's1',
'a4ij', 'a4kl', 'a2ij', 'a2kl'))
c_atm = numpy.asarray(atm, dtype=numpy.int32, order='C')
c_bas = numpy.asarray(bas, dtype=numpy.int32, order='C')
c_env = numpy.asarray(env, dtype=numpy.double, order='C')
natm = ctypes.c_int(c_atm.shape[0])
nbas = ctypes.c_int(c_bas.shape[0])
if isinstance(dms, numpy.ndarray) and dms.ndim == 2:
n_dm = 1
nao = dms.shape[0]
dms = (numpy.asarray(dms, order='C'),)
else:
n_dm = len(dms)
nao = dms[0].shape[0]
dms = [numpy.asarray(dm, order='C') for dm in dms]
if isinstance(jkdescript, str):
njk = 1
jkdescript = (jkdescript,)
else:
njk = len(jkdescript)
if vhfopt is None:
cintor = _fpointer(intor)
cintopt = make_cintopt(c_atm, c_bas, c_env, intor)
cvhfopt = pyscf.lib.c_null_ptr()
else:
vhfopt.set_dm(dms, atm, bas, env)
cvhfopt = vhfopt._this
cintopt = vhfopt._cintopt
cintor = vhfopt._intor
fdrv = getattr(libcvhf, 'CVHFnr_direct_drv')
dotsym = _INTSYMAP[aosym]
fdot = _fpointer('CVHFdot_nr'+dotsym)
if shls_slice is None:
shls_slice = (0, c_bas.shape[0])*4
ao_loc = numpy.asarray(make_ao_loc(bas), dtype=numpy.int32)
vjk = []
descr_sym = [x.split('->') for x in jkdescript]
fjk = (ctypes.c_void_p*(njk*n_dm))()
dmsptr = (ctypes.c_void_p*(njk*n_dm))()
vjkptr = (ctypes.c_void_p*(njk*n_dm))()
for i, (dmsym, vsym) in enumerate(descr_sym):
if dmsym in ('ij', 'kl', 'il', 'kj'):
sys.stderr.write('not support DM description %s, transpose to %s\n' %
(dmsym, dmsym[::-1]))
dmsym = dmsym[::-1]
f1 = _fpointer('CVHFnr%s_%s_%s'%(aosym, dmsym, vsym))
vshape = (n_dm,ncomp) + get_dims(vsym[-2:], shls_slice, ao_loc)
vjk.append(numpy.empty(vshape))
for j in range(n_dm):
assert(dms[j].shape == get_dims(dmsym, shls_slice, ao_loc))
dmsptr[i*n_dm+j] = dms[j].ctypes.data_as(ctypes.c_void_p)
vjkptr[i*n_dm+j] = vjk[i][j].ctypes.data_as(ctypes.c_void_p)
fjk[i*n_dm+j] = f1
shls_slice = (ctypes.c_int*8)(*shls_slice)
fdrv(cintor, fdot, fjk, dmsptr, vjkptr,
ctypes.c_int(njk*n_dm), ctypes.c_int(ncomp),
shls_slice, ao_loc.ctypes.data_as(ctypes.c_void_p), cintopt, cvhfopt,
c_atm.ctypes.data_as(ctypes.c_void_p), natm,
c_bas.ctypes.data_as(ctypes.c_void_p), nbas,
c_env.ctypes.data_as(ctypes.c_void_p))
if n_dm * ncomp == 1:
vjk = [v.reshape(v.shape[2:]) for v in vjk]
elif n_dm == 1:
vjk = [v.reshape((ncomp,)+v.shape[2:]) for v in vjk]
elif ncomp == 1:
vjk = [v.reshape((n_dm,)+v.shape[2:]) for v in vjk]
if njk == 1:
vjk = vjk[0]
return vjk
# for density matrices in dms, bind each dm to a jk operator
# jkdescript: 'ij->s1kl', 'kl->s2ij', ...
def direct_bindm(intor, aosym, jkdescript,
dms, ncomp, atm, bas, env, vhfopt=None, shls_slice=None):
assert(aosym in ('s8', 's4', 's2ij', 's2kl', 's1',
'a4ij', 'a4kl', 'a2ij', 'a2kl'))
c_atm = numpy.asarray(atm, dtype=numpy.int32, order='C')
c_bas = numpy.asarray(bas, dtype=numpy.int32, order='C')
c_env = numpy.asarray(env, dtype=numpy.double, order='C')
natm = ctypes.c_int(c_atm.shape[0])
nbas = ctypes.c_int(c_bas.shape[0])
if isinstance(dms, numpy.ndarray) and dms.ndim == 2:
n_dm = 1
nao = dms.shape[0]
dms = (numpy.asarray(dms, order='C'),)
else:
n_dm = len(dms)
nao = dms[0].shape[0]
dms = [numpy.asarray(dm, order='C') for dm in dms]
if isinstance(jkdescript, str):
njk = 1
jkdescript = (jkdescript,)
else:
njk = len(jkdescript)
assert(njk == n_dm)
if vhfopt is None:
cintor = _fpointer(intor)
cintopt = make_cintopt(c_atm, c_bas, c_env, intor)
cvhfopt = pyscf.lib.c_null_ptr()
else:
vhfopt.set_dm(dms, atm, bas, env)
cvhfopt = vhfopt._this
cintopt = vhfopt._cintopt
cintor = vhfopt._intor
fdrv = getattr(libcvhf, 'CVHFnr_direct_drv')
dotsym = _INTSYMAP[aosym]
fdot = _fpointer('CVHFdot_nr'+dotsym)
if shls_slice is None:
shls_slice = (0, c_bas.shape[0])*4
ao_loc = numpy.asarray(make_ao_loc(bas), dtype=numpy.int32)
vjk = []
descr_sym = [x.split('->') for x in jkdescript]
fjk = (ctypes.c_void_p*(n_dm))()
dmsptr = (ctypes.c_void_p*(n_dm))()
vjkptr = (ctypes.c_void_p*(n_dm))()
for i, (dmsym, vsym) in enumerate(descr_sym):
if dmsym in ('ij', 'kl', 'il', 'kj'):
sys.stderr.write('not support DM description %s, transpose to %s\n' %
(dmsym, dmsym[::-1]))
dmsym = dmsym[::-1]
f1 = _fpointer('CVHFnr%s_%s_%s'%(aosym, dmsym, vsym))
assert(dms[i].shape == get_dims(dmsym, shls_slice, ao_loc))
vshape = (ncomp,) + get_dims(vsym[-2:], shls_slice, ao_loc)
vjk.append(numpy.empty(vshape))
dmsptr[i] = dms[i].ctypes.data_as(ctypes.c_void_p)
vjkptr[i] = vjk[i].ctypes.data_as(ctypes.c_void_p)
fjk[i] = f1
shls_slice = (ctypes.c_int*8)(*shls_slice)
fdrv(cintor, fdot, fjk, dmsptr, vjkptr,
ctypes.c_int(n_dm), ctypes.c_int(ncomp),
shls_slice, ao_loc.ctypes.data_as(ctypes.c_void_p), cintopt, cvhfopt,
c_atm.ctypes.data_as(ctypes.c_void_p), natm,
c_bas.ctypes.data_as(ctypes.c_void_p), nbas,
c_env.ctypes.data_as(ctypes.c_void_p))
if ncomp == 1:
vjk = [v.reshape(v.shape[1:]) for v in vjk]
if njk == 1:
vjk = vjk[0]
return vjk
# 8-fold permutation symmetry
def int2e_sph(atm, bas, env):
c_atm = numpy.asarray(atm, dtype=numpy.int32, order='C')
c_bas = numpy.asarray(bas, dtype=numpy.int32, order='C')
c_env = numpy.asarray(env, dtype=numpy.double, order='C')
natm = ctypes.c_int(c_atm.shape[0])
nbas = ctypes.c_int(c_bas.shape[0])
libcvhf.CINTtot_cgto_spheric.restype = ctypes.c_int
nao = libcvhf.CINTtot_cgto_spheric(c_bas.ctypes.data_as(ctypes.c_void_p), nbas)
nao_pair = nao*(nao+1)//2
eri = numpy.empty((nao_pair*(nao_pair+1)//2))
libcvhf.int2e_sph(eri.ctypes.data_as(ctypes.c_void_p),
c_atm.ctypes.data_as(ctypes.c_void_p), natm,
c_bas.ctypes.data_as(ctypes.c_void_p), nbas,
c_env.ctypes.data_as(ctypes.c_void_p))
return eri
################################################################
# relativistic
def rdirect_mapdm(intor, aosym, jkdescript,
dms, ncomp, atm, bas, env, vhfopt=None):
assert(aosym in ('s8', 's4', 's2ij', 's2kl', 's1',
'a4ij', 'a4kl', 'a2ij', 'a2kl'))
c_atm = numpy.asarray(atm, dtype=numpy.int32, order='C')
c_bas = numpy.asarray(bas, dtype=numpy.int32, order='C')
c_env = numpy.asarray(env, dtype=numpy.double, order='C')
natm = ctypes.c_int(c_atm.shape[0])
nbas = ctypes.c_int(c_bas.shape[0])
if isinstance(dms, numpy.ndarray) and dms.ndim == 2:
n_dm = 1
nao = dms.shape[0]
dms = (numpy.asarray(dms, order='C', dtype=numpy.complex128),)
else:
n_dm = len(dms)
nao = dms[0].shape[0]
dms = numpy.asarray(dms, order='C', dtype=numpy.complex128)
if isinstance(jkdescript, str):
njk = 1
jkdescript = (jkdescript,)
else:
njk = len(jkdescript)
if vhfopt is None:
cintor = _fpointer(intor)
cintopt = make_cintopt(c_atm, c_bas, c_env, intor)
cvhfopt = pyscf.lib.c_null_ptr()
else:
vhfopt.set_dm(dms, atm, bas, env)
cvhfopt = vhfopt._this
cintopt = vhfopt._cintopt
cintor = vhfopt._intor
fdrv = getattr(libcvhf, 'CVHFr_direct_drv')
dotsym = _INTSYMAP[aosym]
fdot = _fpointer('CVHFdot_r'+dotsym)
unpackas = _INTUNPACKMAP_R[aosym]
descr_sym = [x.split('->') for x in jkdescript]
fjk = (ctypes.c_void_p*(njk*n_dm))()
dm1 = (ctypes.c_void_p*(njk*n_dm))()
for i, (dmsym, vsym) in enumerate(descr_sym):
f1 = _fpointer('CVHFr%s_%s_%s'%(unpackas, dmsym, vsym))
for j in range(n_dm):
dm1[i*n_dm+j] = dms[j].ctypes.data_as(ctypes.c_void_p)
fjk[i*n_dm+j] = f1
vjk = numpy.empty((njk,n_dm*ncomp,nao,nao), dtype=numpy.complex)
fdrv(cintor, fdot, fjk, dm1,
vjk.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(njk*n_dm), ctypes.c_int(ncomp),
cintopt, cvhfopt,
c_atm.ctypes.data_as(ctypes.c_void_p), natm,
c_bas.ctypes.data_as(ctypes.c_void_p), nbas,
c_env.ctypes.data_as(ctypes.c_void_p))
if n_dm * ncomp == 1:
vjk = vjk.reshape(njk,nao,nao)
if njk == 1:
vjk = vjk.reshape(vjk.shape[1:])
return vjk
# for density matrices in dms, bind each dm to a jk operator
def rdirect_bindm(intor, aosym, jkdescript,
dms, ncomp, atm, bas, env, vhfopt=None):
assert(aosym in ('s8', 's4', 's2ij', 's2kl', 's1',
'a4ij', 'a4kl', 'a2ij', 'a2kl'))
c_atm = numpy.asarray(atm, dtype=numpy.int32, order='C')
c_bas = numpy.asarray(bas, dtype=numpy.int32, order='C')
c_env = numpy.asarray(env, dtype=numpy.double, order='C')
natm = ctypes.c_int(c_atm.shape[0])
nbas = ctypes.c_int(c_bas.shape[0])
if isinstance(dms, numpy.ndarray) and dms.ndim == 2:
n_dm = 1
nao = dms.shape[0]
dms = (numpy.asarray(dms, order='C', dtype=numpy.complex128),)
else:
n_dm = len(dms)
nao = dms[0].shape[0]
dms = numpy.asarray(dms, order='C', dtype=numpy.complex128)
if isinstance(jkdescript, str):
njk = 1
jkdescript = (jkdescript,)
else:
njk = len(jkdescript)
assert(njk == n_dm)
if vhfopt is None:
cintor = _fpointer(intor)
cintopt = make_cintopt(c_atm, c_bas, c_env, intor)
cvhfopt = pyscf.lib.c_null_ptr()
else:
vhfopt.set_dm(dms, atm, bas, env)
cvhfopt = vhfopt._this
cintopt = vhfopt._cintopt
cintor = vhfopt._intor
fdrv = getattr(libcvhf, 'CVHFr_direct_drv')
dotsym = _INTSYMAP[aosym]
fdot = _fpointer('CVHFdot_r'+dotsym)
unpackas = _INTUNPACKMAP_R[aosym]
descr_sym = [x.split('->') for x in jkdescript]
fjk = (ctypes.c_void_p*(n_dm))()
dm1 = (ctypes.c_void_p*(n_dm))()
for i, (dmsym, vsym) in enumerate(descr_sym):
f1 = _fpointer('CVHFr%s_%s_%s'%(unpackas, dmsym, vsym))
dm1[i] = dms[i].ctypes.data_as(ctypes.c_void_p)
fjk[i] = f1
vjk = numpy.empty((njk,ncomp,nao,nao), dtype=numpy.complex)
fdrv(cintor, fdot, fjk, dm1,
vjk.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(n_dm), ctypes.c_int(ncomp),
cintopt, cvhfopt,
c_atm.ctypes.data_as(ctypes.c_void_p), natm,
c_bas.ctypes.data_as(ctypes.c_void_p), nbas,
c_env.ctypes.data_as(ctypes.c_void_p))
if ncomp == 1:
vjk = vjk.reshape(njk,nao,nao)
if njk == 1:
vjk = vjk.reshape(vjk.shape[1:])
return vjk
# 'a4ij': anti-symm between ij, symm between kl
# 'a4kl': anti-symm between kl, symm between ij
# 'a2ij': anti-symm between ij,
# 'a2kl': anti-symm between kl,
_INTSYMAP= {
's8' : 's8' ,
's4' : 's4' ,
's2ij': 's2ij',
's2kl': 's2kl',
's1' : 's1' ,
'a4ij': 's4' ,
'a4kl': 's4' ,
'a2ij': 's2ij',
'a2kl': 's2kl',
}
_INTUNPACKMAP_R = {
's8' : 's8' ,
's4' : 's4' ,
's2ij': 's2ij',
's2kl': 's2kl',
's1' : 's1' ,
'a4ij': 'ah4' ,
'a4kl': 'ha4' ,
'a2ij': 'ah2ij',
'a2kl': 'ha2kl',
}
def make_ao_loc(bas, cart=False):
l = bas[:,gto.ANG_OF]
if cart:
dims = (l+1)*(l+2)//2 * bas[:,gto.NCTR_OF]
else:
dims = (l*2+1) * bas[:,gto.NCTR_OF]
ao_loc = numpy.empty(len(bas)+1, dtype=numpy.int32)
ao_loc[0] = 0
dims.cumsum(dtype=numpy.int32, out=ao_loc[1:])
return ao_loc
_SHLINDEX = {'i': 0, 'j': 2, 'k': 4, 'l': 6}
def get_dims(descr_sym, shls_slice, ao_loc):
i = _SHLINDEX[descr_sym[0]]
j = _SHLINDEX[descr_sym[1]]
di = ao_loc[shls_slice[i+1]] - ao_loc[shls_slice[i]]
dj = ao_loc[shls_slice[j+1]] - ao_loc[shls_slice[j]]
return (di,dj)
| 37.05 | 90 | 0.587623 |
f73a2ae3c540a9a90052d279a3881c4aaf86097f | 3,410 | py | Python | kopf/engines/probing.py | ankitdobhal/kopf | 2765eda2a08e7e42195446cc23f02ba91603db53 | [
"MIT"
] | null | null | null | kopf/engines/probing.py | ankitdobhal/kopf | 2765eda2a08e7e42195446cc23f02ba91603db53 | [
"MIT"
] | null | null | null | kopf/engines/probing.py | ankitdobhal/kopf | 2765eda2a08e7e42195446cc23f02ba91603db53 | [
"MIT"
] | null | null | null | import asyncio
import datetime
import logging
import urllib.parse
from typing import MutableMapping, Optional, Tuple
import aiohttp.web
from kopf.reactor import activities, lifecycles, registries
from kopf.structs import callbacks, configuration, handlers, memos
logger = logging.getLogger(__name__)
LOCALHOST: str = 'localhost'
HTTP_PORT: int = 80
_Key = Tuple[str, int] # hostname, port
async def health_reporter(
endpoint: str,
*,
memo: memos.AnyMemo,
registry: registries.OperatorRegistry,
settings: configuration.OperatorSettings,
ready_flag: Optional[asyncio.Event] = None, # used for testing
) -> None:
"""
Simple HTTP(S)/TCP server to report the operator's health to K8s probes.
Runs forever until cancelled (which happens if any other root task
is cancelled or failed). Once it will stop responding for any reason,
Kubernetes will assume the pod is not alive anymore, and will restart it.
"""
probing_container: MutableMapping[handlers.HandlerId, callbacks.Result] = {}
probing_timestamp: Optional[datetime.datetime] = None
probing_max_age = datetime.timedelta(seconds=10.0)
probing_lock = asyncio.Lock()
async def get_health(
request: aiohttp.web.Request,
) -> aiohttp.web.Response:
nonlocal probing_timestamp
# Recollect the data on-demand, and only if is is older that a reasonable caching period.
# Protect against multiple parallel requests performing the same heavy activity.
now = datetime.datetime.utcnow()
if probing_timestamp is None or now - probing_timestamp >= probing_max_age:
async with probing_lock:
now = datetime.datetime.utcnow()
if probing_timestamp is None or now - probing_timestamp >= probing_max_age:
activity_results = await activities.run_activity(
lifecycle=lifecycles.all_at_once,
registry=registry,
settings=settings,
activity=handlers.Activity.PROBE,
memo=memo,
)
probing_container.clear()
probing_container.update(activity_results)
probing_timestamp = datetime.datetime.utcnow()
return aiohttp.web.json_response(probing_container)
parts = urllib.parse.urlsplit(endpoint)
if parts.scheme == 'http':
host = parts.hostname or LOCALHOST
port = parts.port or HTTP_PORT
path = parts.path
else:
raise Exception(f"Unsupported scheme: {endpoint}")
app = aiohttp.web.Application()
app.add_routes([aiohttp.web.get(path, get_health)])
runner = aiohttp.web.AppRunner(app, handle_signals=False)
await runner.setup()
site = aiohttp.web.TCPSite(runner, host, port, shutdown_timeout=1.0)
await site.start()
# Log with the actual URL: normalised, with hostname/port set.
url = urllib.parse.urlunsplit([parts.scheme, f'{host}:{port}', path, '', ''])
logger.debug("Serving health status at %s", url)
if ready_flag is not None:
ready_flag.set()
try:
# Sleep forever. No activity is needed.
await asyncio.Event().wait()
finally:
# On any reason of exit, stop reporting the health.
await asyncio.shield(runner.cleanup())
| 35.894737 | 97 | 0.657478 |
f73a36ef236a0195fe5a8771954b392d3e16858c | 22,339 | py | Python | tensor2tensor/models/video/savp.py | shankharaj29/tensor2tensor | b89ba51a6fa9e0c20009cfb57ee8de04f7138392 | [
"Apache-2.0"
] | 2 | 2020-03-02T13:49:11.000Z | 2020-06-18T09:48:35.000Z | tensor2tensor/models/video/savp.py | PedroLelis/tensor2tensor | 5a867d031bd493eeb7d2776e1118d1594ff0a623 | [
"Apache-2.0"
] | 1 | 2019-01-21T10:57:47.000Z | 2019-01-21T10:57:47.000Z | tensor2tensor/models/video/savp.py | PedroLelis/tensor2tensor | 5a867d031bd493eeb7d2776e1118d1594ff0a623 | [
"Apache-2.0"
] | 3 | 2019-02-10T11:12:30.000Z | 2022-02-23T20:43:48.000Z | # coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Stochastic Adversarial Video Prediction model.
Reference: https://arxiv.org/abs/1804.01523
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numbers
import numpy as np
from tensor2tensor.layers import common_layers
from tensor2tensor.layers import common_video
from tensor2tensor.models.video import savp_params # pylint: disable=unused-import
from tensor2tensor.models.video import sv2p
from tensor2tensor.utils import registry
from tensor2tensor.utils import update_ops_hook
import tensorflow as tf
gan_losses = tf.contrib.gan.losses.wargs
class NextFrameSavpBase(object):
"""Main function for Stochastic Adversarial Video Prediction."""
def encoder(self, inputs, n_layers=3):
"""Convnet that encodes inputs into mean and std of a gaussian.
Args:
inputs: 5-D Tensor, shape (batch_size, num_frames, width, height, channels)
n_layers: Number of layers.
Returns:
z_mu: Mean of the latent gaussians.
z_log_var: log(var) of the latent gaussians.
Raises:
ValueError: If inputs is not a 5-D tensor or not float32.
"""
latent_dims = self.hparams.z_dim
shape_as_list = inputs.shape.as_list()
if len(shape_as_list) != 5:
raise ValueError("Expected inputs to be a 5-D, got %d" %
len(shape_as_list))
if inputs.dtype != tf.float32:
raise ValueError("Expected dtype tf.float32, got %s" % inputs.dtype)
# Flatten (N,T,W,H,C) into (NT,W,H,C)
batch_size, _ = shape_as_list[:2]
inputs = tf.reshape(inputs, [-1] + list(inputs.shape)[2:])
n_filters = 64
rectified = None
# Applies 3 layer conv-net with padding, instance normalization
# and leaky relu as per the encoder in
# https://github.com/alexlee-gk/video_prediction
padding = [[0, 0], [1, 1], [1, 1], [0, 0]]
for i in range(n_layers):
with tf.variable_scope("layer_%d" % (i + 1)):
n_filters *= 2**i
if i:
padded = tf.pad(rectified, padding)
else:
padded = tf.pad(inputs, padding)
convolved = tf.layers.conv2d(padded, filters=n_filters, kernel_size=4,
strides=2, padding="VALID")
normalized = tf.contrib.layers.instance_norm(convolved)
rectified = tf.nn.leaky_relu(normalized, alpha=0.2)
# Mean pooling across all spatial dimensions.
pooled = tf.nn.avg_pool(
rectified, [1] + rectified.shape[1:3].as_list() + [1],
strides=[1, 1, 1, 1], padding="VALID")
squeezed = tf.squeeze(pooled, [1, 2])
# Down-project and output the mean and log of the standard deviation of
# the latents.
with tf.variable_scope("z_mu"):
z_mu = tf.layers.dense(squeezed, latent_dims)
with tf.variable_scope("z_log_sigma_sq"):
z_log_var = tf.layers.dense(squeezed, latent_dims)
z_log_var = tf.clip_by_value(z_log_var, -10, 10)
# Reshape to (batch_size X num_frames X latent_dims)
z_mu = tf.reshape(z_mu, (batch_size, -1, latent_dims))
z_log_var = tf.reshape(
z_log_var, (batch_size, -1, latent_dims))
return z_mu, z_log_var
def expected_output_shape(self, input_shape, stride, padding, kernel_size):
return (input_shape + 2*padding - kernel_size) // stride + 1
def get_fc_dimensions(self, strides, kernel_sizes):
"""Get expected fully connected shape after a series of convolutions."""
output_height, output_width, _ = self.hparams.problem.frame_shape
output_steps = self.hparams.video_num_target_frames
output_shape = np.array([output_steps, output_height, output_width])
for curr_stride, kernel_size in zip(strides, kernel_sizes):
output_shape = self.expected_output_shape(
output_shape, np.array(curr_stride), 1, kernel_size)
return np.prod(output_shape) * self.hparams.num_discriminator_filters * 8
def discriminator(self, frames):
"""3-D SNGAN discriminator.
Args:
frames: a list of batch-major tensors indexed by time.
Returns:
logits: 1-D Tensor with shape=batch_size.
Positive logits imply that the discriminator thinks that it
belongs to the true class.
"""
ndf = self.hparams.num_discriminator_filters
frames = tf.stack(frames)
# Switch from time-major axis to batch-major axis.
frames = common_video.swap_time_and_batch_axes(frames)
# 3-D Conv-net mapping inputs to activations.
num_outputs = [ndf, ndf*2, ndf*2, ndf*4, ndf*4, ndf*8, ndf*8]
kernel_sizes = [3, 4, 3, 4, 3, 4, 3]
strides = [[1, 1, 1], [1, 2, 2], [1, 1, 1], [1, 2, 2], [1, 1, 1],
[2, 2, 2], [1, 1, 1]]
names = ["video_sn_conv0_0", "video_sn_conv0_1", "video_sn_conv1_0",
"video_sn_conv1_1", "video_sn_conv2_0", "video_sn_conv2_1",
"video_sn_conv3_0"]
iterable = zip(num_outputs, kernel_sizes, strides, names)
activations = frames
for num_filters, kernel_size, stride, name in iterable:
activations = self.pad_conv3d_lrelu(activations, num_filters, kernel_size,
stride, name)
num_fc_dimensions = self.get_fc_dimensions(strides, kernel_sizes)
activations = tf.reshape(activations, (-1, num_fc_dimensions))
return tf.squeeze(tf.layers.dense(activations, 1))
def d_step(self, true_frames, gen_frames):
"""Performs the discriminator step in computing the GAN loss.
Applies stop-gradient to the generated frames while computing the
discriminator loss to make sure that the gradients are not back-propagated
to the generator. This makes sure that only the discriminator is updated.
Args:
true_frames: True outputs
gen_frames: Generated frames.
Returns:
d_loss: Loss component due to the discriminator.
"""
hparam_to_disc_loss = {
"least_squares": gan_losses.least_squares_discriminator_loss,
"cross_entropy": gan_losses.modified_discriminator_loss,
"wasserstein": gan_losses.wasserstein_discriminator_loss}
# Concat across batch-axis.
_, batch_size, _, _, _ = common_layers.shape_list(true_frames)
all_frames = tf.concat(
[true_frames, tf.stop_gradient(gen_frames)], axis=1)
all_logits = self.discriminator(all_frames)
true_logits, fake_logits_stop = \
all_logits[:batch_size], all_logits[batch_size:]
mean_true_logits = tf.reduce_mean(true_logits)
tf.summary.scalar("mean_true_logits", mean_true_logits)
mean_fake_logits_stop = tf.reduce_mean(fake_logits_stop)
tf.summary.scalar("mean_fake_logits_stop", mean_fake_logits_stop)
discriminator_loss_func = hparam_to_disc_loss[self.hparams.gan_loss]
gan_d_loss = discriminator_loss_func(
discriminator_real_outputs=true_logits,
discriminator_gen_outputs=fake_logits_stop,
add_summaries=True)
return gan_d_loss, true_logits, fake_logits_stop
def g_step(self, gen_frames, fake_logits_stop):
"""Performs the generator step in computing the GAN loss.
Args:
gen_frames: Generated frames
fake_logits_stop: Logits corresponding to the generated frames as per
the discriminator. Assumed to have a stop-gradient term.
Returns:
gan_g_loss_pos_d: Loss.
gan_g_loss_neg_d: -gan_g_loss_pos_d but with a stop gradient on generator.
"""
hparam_to_gen_loss = {
"least_squares": gan_losses.least_squares_generator_loss,
"cross_entropy": gan_losses.modified_generator_loss,
"wasserstein": gan_losses.wasserstein_generator_loss
}
fake_logits = self.discriminator(gen_frames)
mean_fake_logits = tf.reduce_mean(fake_logits)
tf.summary.scalar("mean_fake_logits", mean_fake_logits)
# Generator loss.
# Using gan_g_loss_pos_d updates the discriminator as well.
# To avoid this add gan_g_loss_neg_d = -gan_g_loss_pos_d
# but with stop gradient on the generator.
# This makes sure that the net gradient on the discriminator is zero and
# net-gradient on the generator is just due to the gan_g_loss_pos_d.
generator_loss_func = hparam_to_gen_loss[self.hparams.gan_loss]
gan_g_loss_pos_d = generator_loss_func(
discriminator_gen_outputs=fake_logits, add_summaries=True)
gan_g_loss_neg_d = -generator_loss_func(
discriminator_gen_outputs=fake_logits_stop, add_summaries=True)
return gan_g_loss_pos_d, gan_g_loss_neg_d
def get_gan_loss(self, true_frames, gen_frames, name):
"""Get the discriminator + generator loss at every step.
This performs an 1:1 update of the discriminator and generator at every
step.
Args:
true_frames: 5-D Tensor of shape (num_steps, batch_size, H, W, C)
Assumed to be ground truth.
gen_frames: 5-D Tensor of shape (num_steps, batch_size, H, W, C)
Assumed to be fake.
name: discriminator scope.
Returns:
loss: 0-D Tensor, with d_loss + g_loss
"""
# D - STEP
with tf.variable_scope("%s_discriminator" % name, reuse=tf.AUTO_REUSE):
gan_d_loss, _, fake_logits_stop = self.d_step(
true_frames, gen_frames)
# G - STEP
with tf.variable_scope("%s_discriminator" % name, reuse=True):
gan_g_loss_pos_d, gan_g_loss_neg_d = self.g_step(
gen_frames, fake_logits_stop)
gan_g_loss = gan_g_loss_pos_d + gan_g_loss_neg_d
tf.summary.scalar("gan_loss_%s" % name, gan_g_loss_pos_d + gan_d_loss)
if self.hparams.gan_optimization == "joint":
gan_loss = gan_g_loss + gan_d_loss
else:
curr_step = self.get_iteration_num()
gan_loss = tf.cond(
tf.logical_not(curr_step % 2 == 0), lambda: gan_g_loss,
lambda: gan_d_loss)
return gan_loss
def get_extra_loss(self, latent_means=None, latent_stds=None,
true_frames=None, gen_frames=None):
"""Gets extra loss from VAE and GAN."""
if not self.is_training:
return 0.0
vae_loss, d_vae_loss, d_gan_loss = 0.0, 0.0, 0.0
# Use sv2p's KL divergence computation.
if self.hparams.use_vae:
vae_loss = super(NextFrameSavpBase, self).get_extra_loss(
latent_means=latent_means, latent_stds=latent_stds)
if self.hparams.use_gan:
# Strip out the first context_frames for the true_frames
# Strip out the first context_frames - 1 for the gen_frames
context_frames = self.hparams.video_num_input_frames
true_frames = tf.stack(
tf.unstack(true_frames, axis=0)[context_frames:])
# discriminator for VAE.
if self.hparams.use_vae:
gen_enc_frames = tf.stack(
tf.unstack(gen_frames, axis=0)[context_frames-1:])
d_vae_loss = self.get_gan_loss(true_frames, gen_enc_frames, name="vae")
# discriminator for GAN.
gen_prior_frames = tf.stack(
tf.unstack(self.gen_prior_video, axis=0)[context_frames-1:])
d_gan_loss = self.get_gan_loss(true_frames, gen_prior_frames, name="gan")
return (
vae_loss + self.hparams.gan_loss_multiplier * d_gan_loss +
self.hparams.gan_vae_loss_multiplier * d_vae_loss)
def pad_conv3d_lrelu(self, activations, n_filters, kernel_size, strides,
scope):
"""Pad, apply 3-D convolution and leaky relu."""
padding = [[0, 0], [1, 1], [1, 1], [1, 1], [0, 0]]
# tf.nn.conv3d accepts a list of 5 values for strides
# with first and last value equal to 1
if isinstance(strides, numbers.Integral):
strides = [strides] * 3
strides = [1] + strides + [1]
# Filter_shape = [K, K, K, num_input, num_output]
filter_shape = (
[kernel_size]*3 + activations.shape[-1:].as_list() + [n_filters])
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
conv_filter = tf.get_variable(
"conv_filter", shape=filter_shape,
initializer=tf.truncated_normal_initializer(stddev=0.02))
if self.hparams.use_spectral_norm:
conv_filter, assign_op = common_layers.apply_spectral_norm(conv_filter)
if self.is_training:
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, assign_op)
padded = tf.pad(activations, padding)
convolved = tf.nn.conv3d(
padded, conv_filter, strides=strides, padding="VALID")
rectified = tf.nn.leaky_relu(convolved, alpha=0.2)
return rectified
@staticmethod
def train_hooks(hook_context):
del hook_context
return [update_ops_hook.UpdateOpsHook()]
@registry.register_model
class NextFrameSAVP(NextFrameSavpBase, sv2p.NextFrameSv2pLegacy):
"""Stochastic Adversarial Video Prediction."""
def construct_model(self, images, actions, rewards):
"""Model that takes in images and returns predictions.
Args:
images: list of 4-D Tensors indexed by time.
(batch_size, width, height, channels)
actions: list of action tensors
each action should be in the shape ?x1xZ
rewards: list of reward tensors
each reward should be in the shape ?x1xZ
Returns:
video: list of 4-D predicted frames.
all_rewards: predicted rewards.
latent_means: list of gaussian means conditioned on the input at
every frame.
latent_stds: list of gaussian stds conditioned on the input at
every frame.
Raises:
ValueError: If not exactly one of self.hparams.vae or self.hparams.gan
is set to True.
"""
if not self.hparams.use_vae and not self.hparams.use_gan:
raise ValueError("Set at least one of use_vae or use_gan to be True")
if self.hparams.gan_optimization not in ["joint", "sequential"]:
raise ValueError("self.hparams.gan_optimization should be either joint "
"or sequential got %s" % self.hparams.gan_optimization)
images = tf.unstack(images, axis=0)
actions = tf.unstack(actions, axis=0)
rewards = tf.unstack(rewards, axis=0)
latent_dims = self.hparams.z_dim
context_frames = self.hparams.video_num_input_frames
seq_len = len(images)
input_shape = common_layers.shape_list(images[0])
batch_size = input_shape[0]
# Model does not support reward-conditioned frame generation.
fake_rewards = rewards[:-1]
# Concatenate x_{t-1} and x_{t} along depth and encode it to
# produce the mean and standard deviation of z_{t-1}
image_pairs = tf.concat([images[:seq_len - 1],
images[1:seq_len]], axis=-1)
z_mu, z_log_sigma_sq = self.encoder(image_pairs)
# Unstack z_mu and z_log_sigma_sq along the time dimension.
z_mu = tf.unstack(z_mu, axis=0)
z_log_sigma_sq = tf.unstack(z_log_sigma_sq, axis=0)
iterable = zip(images[:-1], actions[:-1], fake_rewards,
z_mu, z_log_sigma_sq)
# Initialize LSTM State
lstm_state = [None] * 7
gen_cond_video, gen_prior_video, all_rewards, latent_means, latent_stds = \
[], [], [], [], []
pred_image = tf.zeros_like(images[0])
prior_latent_state, cond_latent_state = None, None
train_mode = self.hparams.mode == tf.estimator.ModeKeys.TRAIN
# Create scheduled sampling function
ss_func = self.get_scheduled_sample_func(batch_size)
with tf.variable_scope("prediction", reuse=tf.AUTO_REUSE):
for step, (image, action, reward, mu, log_sigma_sq) in enumerate(iterable): # pylint:disable=line-too-long
# Sample latents using a gaussian centered at conditional mu and std.
latent = common_video.get_gaussian_tensor(mu, log_sigma_sq)
# Sample prior latents from isotropic normal distribution.
prior_latent = tf.random_normal(tf.shape(latent), dtype=tf.float32)
# LSTM that encodes correlations between conditional latents.
# Pg 22 in https://arxiv.org/pdf/1804.01523.pdf
enc_cond_latent, cond_latent_state = common_video.basic_lstm(
latent, cond_latent_state, latent_dims, name="cond_latent")
# LSTM that encodes correlations between prior latents.
enc_prior_latent, prior_latent_state = common_video.basic_lstm(
prior_latent, prior_latent_state, latent_dims, name="prior_latent")
# Scheduled Sampling
done_warm_start = step > context_frames - 1
groundtruth_items = [image]
generated_items = [pred_image]
input_image, = self.get_scheduled_sample_inputs(
done_warm_start, groundtruth_items, generated_items, ss_func)
all_latents = tf.concat([enc_cond_latent, enc_prior_latent], axis=0)
all_image = tf.concat([input_image, input_image], axis=0)
all_action = tf.concat([action, action], axis=0)
all_rewards = tf.concat([reward, reward], axis=0)
all_pred_images, lstm_state, _ = self.construct_predictive_tower(
all_image, all_rewards, all_action, lstm_state, all_latents,
concat_latent=True)
cond_pred_images, prior_pred_images = \
all_pred_images[:batch_size], all_pred_images[batch_size:]
if train_mode and self.hparams.use_vae:
pred_image = cond_pred_images
else:
pred_image = prior_pred_images
gen_cond_video.append(cond_pred_images)
gen_prior_video.append(prior_pred_images)
latent_means.append(mu)
latent_stds.append(log_sigma_sq)
gen_cond_video = tf.stack(gen_cond_video, axis=0)
self.gen_prior_video = tf.stack(gen_prior_video, axis=0)
fake_rewards = tf.stack(fake_rewards, axis=0)
if train_mode and self.hparams.use_vae:
return gen_cond_video, fake_rewards, latent_means, latent_stds
else:
return self.gen_prior_video, fake_rewards, latent_means, latent_stds
@registry.register_model
class NextFrameSavpRl(NextFrameSavpBase, sv2p.NextFrameSv2p):
"""Stochastic Adversarial Video Prediction for RL pipeline."""
def video_features(
self, all_frames, all_actions, all_rewards, all_raw_frames):
"""No video wide feature."""
del all_actions, all_rewards, all_raw_frames
# Concatenate x_{t-1} and x_{t} along depth and encode it to
# produce the mean and standard deviation of z_{t-1}
seq_len = len(all_frames)
image_pairs = tf.concat([all_frames[:seq_len-1],
all_frames[1:seq_len]], axis=-1)
z_mu, z_log_sigma_sq = self.encoder(image_pairs)
# Unstack z_mu and z_log_sigma_sq along the time dimension.
z_mu = tf.unstack(z_mu, axis=0)
z_log_sigma_sq = tf.unstack(z_log_sigma_sq, axis=0)
return [z_mu, z_log_sigma_sq]
def video_extra_loss(self, frames_predicted, frames_target,
internal_states, video_features):
if not self.is_training:
return 0.0
latent_means, latent_stds = video_features
true_frames, gen_frames = frames_target, frames_predicted
loss = super(NextFrameSavpRl, self).get_extra_loss(
latent_means=latent_means, latent_stds=latent_stds,
true_frames=true_frames, gen_frames=gen_frames)
return loss
def next_frame(self, frames, actions, rewards, target_frame,
internal_states, video_features):
del target_frame
if not self.hparams.use_vae or self.hparams.use_gan:
raise NotImplementedError("Only supporting VAE for now.")
if self.has_pred_actions or self.has_values:
raise NotImplementedError("Parameter sharing with policy not supported.")
image, action, reward = frames[0], actions[0], rewards[0]
latent_dims = self.hparams.z_dim
batch_size = common_layers.shape_list(image)[0]
if internal_states is None:
# Initialize LSTM State
frame_index = 0
lstm_state = [None] * 7
cond_latent_state, prior_latent_state = None, None
gen_prior_video = []
else:
(frame_index, lstm_state, cond_latent_state,
prior_latent_state, gen_prior_video) = internal_states
z_mu, log_sigma_sq = video_features
z_mu, log_sigma_sq = z_mu[frame_index], log_sigma_sq[frame_index]
# Sample latents using a gaussian centered at conditional mu and std.
latent = common_video.get_gaussian_tensor(z_mu, log_sigma_sq)
# Sample prior latents from isotropic normal distribution.
prior_latent = tf.random_normal(tf.shape(latent), dtype=tf.float32)
# # LSTM that encodes correlations between conditional latents.
# # Pg 22 in https://arxiv.org/pdf/1804.01523.pdf
enc_cond_latent, cond_latent_state = common_video.basic_lstm(
latent, cond_latent_state, latent_dims, name="cond_latent")
# LSTM that encodes correlations between prior latents.
enc_prior_latent, prior_latent_state = common_video.basic_lstm(
prior_latent, prior_latent_state, latent_dims, name="prior_latent")
all_latents = tf.concat([enc_cond_latent, enc_prior_latent], axis=0)
all_image = tf.concat([image, image], 0)
all_action = tf.concat([action, action], 0) if self.has_actions else None
all_pred_images, lstm_state = self.construct_predictive_tower(
all_image, None, all_action, lstm_state, all_latents,
concat_latent=True)
cond_pred_images, prior_pred_images = \
all_pred_images[:batch_size], all_pred_images[batch_size:]
if self.is_training and self.hparams.use_vae:
pred_image = cond_pred_images
else:
pred_image = prior_pred_images
gen_prior_video.append(prior_pred_images)
internal_states = (frame_index + 1, lstm_state, cond_latent_state,
prior_latent_state, gen_prior_video)
if not self.has_rewards:
return pred_image, None, 0.0, internal_states
pred_reward = self.reward_prediction(
pred_image, action, reward, latent)
return pred_image, pred_reward, None, None, 0.0, internal_states
| 39.74911 | 113 | 0.696316 |
f73ab2948daa58260ac313e2f31c7dce7b616281 | 6,411 | py | Python | toontown/betaevent/DistributedBetaEvent.py | LittleNed/toontown-stride | 1252a8f9a8816c1810106006d09c8bdfe6ad1e57 | [
"Apache-2.0"
] | 1 | 2018-06-16T23:06:38.000Z | 2018-06-16T23:06:38.000Z | toontown/betaevent/DistributedBetaEvent.py | NoraTT/Historical-Commits-Project-Altis-Source | fe88e6d07edf418f7de6ad5b3d9ecb3d0d285179 | [
"Apache-2.0"
] | null | null | null | toontown/betaevent/DistributedBetaEvent.py | NoraTT/Historical-Commits-Project-Altis-Source | fe88e6d07edf418f7de6ad5b3d9ecb3d0d285179 | [
"Apache-2.0"
] | 4 | 2019-06-20T23:45:23.000Z | 2020-10-14T20:30:15.000Z | from panda3d.core import Point3, VBase3, Vec3, Vec4
from toontown.betaevent.DistributedEvent import DistributedEvent
from toontown.betaevent import CogTV
from toontown.hood import ZoneUtil
from direct.fsm import ClassicFSM, State
from direct.interval.IntervalGlobal import *
from toontown.toon import Toon, ToonDNA
from direct.actor.Actor import Actor
from otp.avatar import Avatar
from toontown.chat.ChatGlobals import *
from toontown.nametag.NametagGroup import *
from toontown.suit import DistributedSuitBase, SuitDNA
from toontown.toon import NPCToons
from toontown.betaevent import BetaEventGlobals as BEGlobals
from toontown.battle import BattleParticles
class DistributedBetaEvent(DistributedEvent):
notify = directNotify.newCategory('DistributedBetaEvent')
def __init__(self, cr):
DistributedEvent.__init__(self, cr)
self.cr = cr
self.spark = loader.loadSfx('phase_11/audio/sfx/LB_sparks_1.ogg') # i think this could be used somewhere
# Create prepostera
self.prepostera = Toon.Toon()
self.prepostera.setName('Professor Prepostera')
self.prepostera.setPickable(0)
self.prepostera.setPlayerType(NametagGlobals.CCNonPlayer)
dna = ToonDNA.ToonDNA()
dna.newToonFromProperties('hss', 'ms','m', 'm', 20, 0, 20, 20, 97, 27, 86, 27, 37, 27)
self.prepostera.setDNA(dna)
self.prepostera.loop('scientistEmcee')
self.prepostera.reparentTo(render)
self.prepostera.setPosHpr(4, -3, 1, 0, 0, 0)
self.prepostera.blinkEyes()
self.prepostera.head = self.prepostera.find('**/__Actor_head')
self.prepostera.initializeBodyCollisions('toon')
self.headHoncho1 = DistributedSuitBase.DistributedSuitBase(self.cr)
headHoncho1suitDNA = SuitDNA.SuitDNA()
headHoncho1suitDNA.newSuit('hho')
self.headHoncho1.setDNA(headHoncho1suitDNA)
self.headHoncho1.setDisplayName('???')
self.headHoncho1.setPickable(0)
self.headHoncho1.setPosHpr(0, 0, 0, 0, 0, 0)
self.headHoncho1.reparentTo(render)
self.headHoncho1.doId = 0
self.headHoncho1.hide()
self.headHoncho1.initializeBodyCollisions('toon')
middlemanDNA = SuitDNA.SuitDNA()
middlemanDNA.newSuit('mdm')
self.middleman1 = DistributedSuitBase.DistributedSuitBase(self.cr)
self.middleman1.setDNA(middlemanDNA)
self.middleman1.setDisplayName('Middleman')
self.middleman1.setPickable(0)
self.middleman1.setPosHpr(0, 0, 0, 0, 0, 0)
self.middleman1.reparentTo(render)
self.middleman1.doId = 1
self.middleman1.hide()
self.middleman1.initializeBodyCollisions('toon')
self.middleman2 = DistributedSuitBase.DistributedSuitBase(self.cr)
self.middleman2.setDNA(middlemanDNA)
self.middleman2.setDisplayName('Middleman')
self.middleman2.setPickable(0)
self.middleman2.setPosHpr(0, 0, 0, 0, 0, 0)
self.middleman2.reparentTo(render)
self.middleman2.doId = 2
self.middleman2.hide()
self.middleman2.initializeBodyCollisions('toon')
#base.musicManager.stopAllSounds()
self.toonMusic = loader.loadMusic('phase_14/audio/bgm/tt2_ambient_1.mp3') # Placeholder
#base.playMusic(self.toonMusic, looping = 1)
def announceGenerate(self):
DistributedEvent.announceGenerate(self)
def start(self):
pass
def delete(self):
DistributedEvent.delete(self)
self.prepostera.delete()
def enterStartBd(self, timestamp):
self.prepostera.animFSM.request('TeleportIn')
def exitStartBd(self):
pass
def enterCogInvade(self, timestamp):
self.headHoncho1.setPosHpr(0, 0, 0, 0, 0, 0)
self.headHoncho1.show()
Sequence(
self.headHoncho1.beginSupaFlyMove(Vec3(12, -4, 1), True, "firstCogInvadeFlyIn", walkAfterLanding=False),
Func(self.headHoncho1.loop, 'walk'),
self.headHoncho1.hprInterval(2, VBase3(90, 0, 0)),
Func(self.headHoncho1.loop, 'neutral'),
Wait(1),
Func(self.headHoncho1.setChatAbsolute, 'Hello Toon...', CFSpeech|CFTimeout),
Wait(4),
Func(self.headHoncho1.setChatAbsolute, "I'd hate to crash the party...", CFSpeech|CFTimeout),
Wait(4),
Func(self.headHoncho1.setChatAbsolute, "Actually... I'd love to!", CFSpeech|CFTimeout)
).start()
def exitCogInvade(self):
pass
def enterCogTalk(self, timestamp):
self.middleman1.show()
self.middleman2.show()
Sequence(
Func(self.headHoncho1.setChatAbsolute, 'I hear you wanted to open Loony Labs...', CFSpeech|CFTimeout),
Wait(4),
Parallel(
self.middleman1.beginSupaFlyMove(Vec3(-8, -4, 1), True, "firstCogInvadeFlyIn", walkAfterLanding=False),
self.middleman2.beginSupaFlyMove(Vec3(4, -12, 1), True, "firstCogInvadeFlyIn", walkAfterLanding=False)
),
Func(self.middleman2.loop, 'neutral'),
Parallel(
Sequence(
Func(self.middleman1.loop, 'walk'),
self.middleman1.hprInterval(2, VBase3(-90, 0, 0)),
Func(self.middleman1.loop, 'neutral')),
Func(self.headHoncho1.setChatAbsolute, "How well did that go for you?", CFSpeech|CFTimeout))
).start()
def exitCogTalk(self):
pass
def enterCogTakeover(self, timestamp):
pass
def exitCogTakeover(self):
pass
def enterCredits(self, timestamp):
import CreditsScreen
self.credits = CreditsScreen.CreditsScreen()
self.credits.startCredits()
def exitCredits(self):
pass
def toonTalk(self, phrase, toon):
toon.setChatAbsolute(phrase, CFSpeech|CFTimeout)
| 42.177632 | 130 | 0.610357 |
f73ab6176ab8fe472d5242378c8bc7c181ac542c | 4,666 | py | Python | tests/test_chi_citycouncil.py | danielahuang/city-scrapers | 711d1995f59100793e771068a6f5d9149e773412 | [
"MIT"
] | null | null | null | tests/test_chi_citycouncil.py | danielahuang/city-scrapers | 711d1995f59100793e771068a6f5d9149e773412 | [
"MIT"
] | null | null | null | tests/test_chi_citycouncil.py | danielahuang/city-scrapers | 711d1995f59100793e771068a6f5d9149e773412 | [
"MIT"
] | null | null | null | import json
from datetime import date, time
from urllib.parse import parse_qs
import pytest
from freezegun import freeze_time
from tests.utils import file_response, read_test_file_content
from city_scrapers.constants import CITY_COUNCIL
from city_scrapers.spiders.chi_citycouncil import ChiCityCouncilSpider
INITIAL_REQUEST = 'https://ocd.datamade.us/events/?' \
'start_date__gt=2017-10-16&' \
'jurisdiction=ocd-jurisdiction/country:us/state:il/place:chicago/government'
spider = ChiCityCouncilSpider()
@pytest.fixture('module')
def parsed_item():
freezer = freeze_time('2018-01-01 12:00:01')
freezer.start()
item = file_response('files/chi_citycouncil_event.json', url=INITIAL_REQUEST)
parsed = spider._parse_item(item)
freezer.stop()
return parsed
def test_parse():
response = file_response('files/chi_citycouncil_feed.json', url=INITIAL_REQUEST)
requests = list(spider.parse(response))
assert len(requests) == 2
def test_gen_requests():
test_response = json.loads(read_test_file_content('files/chi_citycouncil_feed.json'))
event_requests = [item for item in spider._gen_requests(test_response)]
assert event_requests == [
'https://ocd.datamade.us/ocd-event/86094f46-cf45-46f8-89e2-0bf783e7aa12/',
'https://ocd.datamade.us/ocd-event/93d62d20-b1dc-4d71-9e96-60c99c837e90/',
]
def test_addtl_pages():
more = json.loads(
'{"meta": {"page": 1, "per_page": 100, "total_count": 160, "count": 100, "max_page": 2}}'
)
assert spider._addtl_pages(more) is True
no_more = json.loads(
'{"meta": {"page": 1, "per_page": 100, "total_count": 2, "count": 2, "max_page": 1}}'
)
assert spider._addtl_pages(no_more) is False
def test_next_page():
more = json.loads(
'{"meta": {"page": 1, "per_page": 100, "total_count": 160, "count": 100, "max_page": 2}}'
)
original_params = parse_qs(INITIAL_REQUEST)
next_page = spider._next_page(more)
static_params = {k: v for k, v in original_params.items() if k != 'page'}
assert static_params == original_params
assert next_page == 2
def test_parse_documents():
documents = [{
"date": "",
"note": "Notice",
"links": [{
"url": (
"http://media.legistar.com/chic/meetings/633C3556-29C4-4645-A916-E767E00A98CC/"
"Notice,%2003-22-2018.pdf"
),
"media_type": "application/pdf"
}]
}]
assert spider._parse_documents(documents)[0] == {
'url': documents[0]['links'][0]['url'],
'note': "Notice"
}
# Item fields
def test_start(parsed_item):
expected_start = {'date': date(2017, 10, 16), 'time': time(10, 00), 'note': ''}
assert parsed_item['start'] == expected_start
def test_end(parsed_item):
expected_end = {'date': date(2017, 10, 16), 'time': None, 'note': ''}
assert parsed_item['end'] == expected_end
def test_name(parsed_item):
assert parsed_item['name'] == 'Joint Committee: Finance; Transportation and Public Way'
def test_description(parsed_item):
assert parsed_item['event_description'] == ""
def test_location(parsed_item):
expected_location = {
'address': '121 N LaSalle Dr, Chicago, IL',
'name': 'Council Chambers , City Hall'
}
assert parsed_item['location'] == expected_location
def test_documents(parsed_item):
assert parsed_item['documents'] == [{
"url":
"http://media.legistar.com/chic/meetings/B5103C52-1793-4B07-9F28-E0A1223E1540/Fin%20CANCELLED%2010-16_20171010085450.pdf", # noqa
"note": "Cancellation Notice",
}]
def test_id(parsed_item):
assert parsed_item['id'] == \
'chi_citycouncil/201710161000/ocd-event-86094f46-cf45-46f8-89e2-0bf783e7aa12/joint_committee_finance_transportation_and_public_way' # noqa
def test_all_day(parsed_item):
assert parsed_item['all_day'] is False
def test_classification(parsed_item):
assert parsed_item['classification'] == CITY_COUNCIL
def test_status(parsed_item):
assert parsed_item['status'] == 'cancelled'
def test__type(parsed_item):
assert parsed_item['_type'] == 'event'
def test_sources(parsed_item):
expected_sources = [
{
"url": "http://webapi.legistar.com/v1/chicago/events/4954",
"note": "api"
},
{
"url":
"https://chicago.legistar.com/MeetingDetail.aspx?ID=565455&GUID=B5103C52-1793-4B07-9F28-E0A1223E1540&Options=info&Search=", # noqa
"note": "web"
}
]
assert parsed_item['sources'] == expected_sources
| 30.496732 | 150 | 0.658594 |
f73ae27acfddb97a5a793e676d74a4d7a58eef84 | 22,342 | py | Python | cartridge/shop/migrations/0016_add_field_product__meta_title.py | AlexHill/cartridge | cb8599d43600442a223a484dc75726bfbbec68a0 | [
"BSD-2-Clause"
] | null | null | null | cartridge/shop/migrations/0016_add_field_product__meta_title.py | AlexHill/cartridge | cb8599d43600442a223a484dc75726bfbbec68a0 | [
"BSD-2-Clause"
] | null | null | null | cartridge/shop/migrations/0016_add_field_product__meta_title.py | AlexHill/cartridge | cb8599d43600442a223a484dc75726bfbbec68a0 | [
"BSD-2-Clause"
] | null | null | null | from __future__ import unicode_literals
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Product._meta_title'
db.add_column('shop_product', '_meta_title',
self.gf('django.db.models.fields.CharField')(max_length=500, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Product._meta_title'
db.delete_column('shop_product', '_meta_title')
models = {
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'generic.assignedkeyword': {
'Meta': {'ordering': "('_order',)", 'object_name': 'AssignedKeyword'},
'_order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keyword': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'assignments'", 'to': "orm['generic.Keyword']"}),
'object_pk': ('django.db.models.fields.IntegerField', [], {})
},
'generic.keyword': {
'Meta': {'object_name': 'Keyword'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
'generic.rating': {
'Meta': {'object_name': 'Rating'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_pk': ('django.db.models.fields.IntegerField', [], {}),
'value': ('django.db.models.fields.IntegerField', [], {})
},
'pages.page': {
'Meta': {'ordering': "('titles',)", 'object_name': 'Page'},
'_meta_title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'_order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'content_model': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'gen_description': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_menus': ('mezzanine.pages.fields.MenusField', [], {'default': '[1, 2, 3]', 'max_length': '100', 'null': 'True', 'blank': 'True'}),
#'keywords': ('mezzanine.generic.fields.KeywordsField', [], {'object_id_field': "'object_pk'", 'to': "orm['generic.AssignedKeyword']", 'frozen_by_south': 'True'}),
'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['pages.Page']"}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'titles': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True'})
},
'shop.cart': {
'Meta': {'object_name': 'Cart'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
'shop.cartitem': {
'Meta': {'object_name': 'CartItem'},
'cart': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'items'", 'to': "orm['shop.Cart']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'quantity': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'sku': ('cartridge.shop.fields.SKUField', [], {'max_length': '20'}),
'total_price': ('cartridge.shop.fields.MoneyField', [], {'default': "'0'", 'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'unit_price': ('cartridge.shop.fields.MoneyField', [], {'default': "'0'", 'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'shop.category': {
'Meta': {'ordering': "('_order',)", 'object_name': 'Category', '_ormbases': ['pages.Page']},
'combined': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'content': ('mezzanine.core.fields.RichTextField', [], {}),
'options': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'product_options'", 'blank': 'True', 'to': "orm['shop.ProductOption']"}),
'page_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['pages.Page']", 'unique': 'True', 'primary_key': 'True'}),
'price_max': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'price_min': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'products': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['shop.Product']", 'symmetrical': 'False', 'blank': 'True'}),
'sale': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shop.Sale']", 'null': 'True', 'blank': 'True'})
},
'shop.discountcode': {
'Meta': {'object_name': 'DiscountCode'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'discountcode_related'", 'blank': 'True', 'to': "orm['shop.Category']"}),
'code': ('cartridge.shop.fields.DiscountCodeField', [], {'unique': 'True', 'max_length': '20'}),
'discount_deduct': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'discount_exact': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'discount_percent': ('cartridge.shop.fields.PercentageField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '2', 'blank': 'True'}),
'free_shipping': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'min_purchase': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'products': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['shop.Product']", 'symmetrical': 'False', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'uses_remaining': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'valid_from': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'valid_to': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'shop.order': {
'Meta': {'ordering': "('-id',)", 'object_name': 'Order'},
'additional_instructions': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'billing_detail_city': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'billing_detail_country': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'billing_detail_email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'billing_detail_first_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'billing_detail_last_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'billing_detail_phone': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'billing_detail_postcode': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'billing_detail_state': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'billing_detail_street': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'discount_code': ('cartridge.shop.fields.DiscountCodeField', [], {'max_length': '20', 'blank': 'True'}),
'discount_total': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_total': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'shipping_detail_city': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shipping_detail_country': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shipping_detail_first_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shipping_detail_last_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shipping_detail_phone': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'shipping_detail_postcode': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'shipping_detail_state': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shipping_detail_street': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shipping_total': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'shipping_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'total': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'transaction_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'user_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'shop.orderitem': {
'Meta': {'object_name': 'OrderItem'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'items'", 'to': "orm['shop.Order']"}),
'quantity': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'sku': ('cartridge.shop.fields.SKUField', [], {'max_length': '20'}),
'total_price': ('cartridge.shop.fields.MoneyField', [], {'default': "'0'", 'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'unit_price': ('cartridge.shop.fields.MoneyField', [], {'default': "'0'", 'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'})
},
'shop.product': {
'Meta': {'object_name': 'Product'},
'_meta_title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'available': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['shop.Category']", 'symmetrical': 'False', 'blank': 'True'}),
'content': ('mezzanine.core.fields.RichTextField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'gen_description': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
#'keywords': ('mezzanine.generic.fields.KeywordsField', [], {'object_id_field': "'object_pk'", 'to': "orm['generic.AssignedKeyword']", 'frozen_by_south': 'True'}),
'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'num_in_stock': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
#'rating': ('mezzanine.generic.fields.RatingField', [], {'object_id_field': "'object_pk'", 'to': "orm['generic.Rating']", 'frozen_by_south': 'True'}),
'rating_average': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'rating_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'related_products': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_products_rel_+'", 'blank': 'True', 'to': "orm['shop.Product']"}),
'sale_from': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'sale_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'sale_price': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'sale_to': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'sku': ('cartridge.shop.fields.SKUField', [], {'max_length': '20', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'unit_price': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'upsell_products': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'upsell_products_rel_+'", 'blank': 'True', 'to': "orm['shop.Product']"})
},
'shop.productaction': {
'Meta': {'unique_together': "(('product', 'timestamp'),)", 'object_name': 'ProductAction'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'actions'", 'to': "orm['shop.Product']"}),
'timestamp': ('django.db.models.fields.IntegerField', [], {}),
'total_cart': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'total_purchase': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'shop.productimage': {
'Meta': {'ordering': "('_order',)", 'object_name': 'ProductImage'},
'_order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'file': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'images'", 'to': "orm['shop.Product']"})
},
'shop.productoption': {
'Meta': {'object_name': 'ProductOption'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('cartridge.shop.fields.OptionField', [], {'max_length': '50', 'null': 'True'}),
'type': ('django.db.models.fields.IntegerField', [], {})
},
'shop.productvariation': {
'Meta': {'ordering': "('-default',)", 'object_name': 'ProductVariation'},
'default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shop.ProductImage']", 'null': 'True', 'blank': 'True'}),
'num_in_stock': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'option1': ('cartridge.shop.fields.OptionField', [], {'max_length': '50', 'null': 'True'}),
'option2': ('cartridge.shop.fields.OptionField', [], {'max_length': '50', 'null': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'variations'", 'to': "orm['shop.Product']"}),
'sale_from': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'sale_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'sale_price': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'sale_to': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'sku': ('cartridge.shop.fields.SKUField', [], {'max_length': '20', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'unit_price': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'})
},
'shop.sale': {
'Meta': {'object_name': 'Sale'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'sale_related'", 'blank': 'True', 'to': "orm['shop.Category']"}),
'discount_deduct': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'discount_exact': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'discount_percent': ('cartridge.shop.fields.PercentageField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '2', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'products': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['shop.Product']", 'symmetrical': 'False', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'valid_from': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'valid_to': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['shop']
| 87.273438 | 197 | 0.561409 |
f73afdcad8e818812662d41e891fdcf0eaf1cc95 | 7,622 | py | Python | src/api/bkuser_core/categories/plugins/ldap/adaptor.py | Canway-shiisa/bk-user | a049e80d12082960828015742cea4b041f4af796 | [
"MIT"
] | null | null | null | src/api/bkuser_core/categories/plugins/ldap/adaptor.py | Canway-shiisa/bk-user | a049e80d12082960828015742cea4b041f4af796 | [
"MIT"
] | null | null | null | src/api/bkuser_core/categories/plugins/ldap/adaptor.py | Canway-shiisa/bk-user | a049e80d12082960828015742cea4b041f4af796 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-用户管理(Bk-User) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import logging
from dataclasses import dataclass, field
from typing import Any, Dict, List, NamedTuple, Optional
from django.utils.encoding import force_str
from ldap3.utils import dn as dn_utils
from bkuser_core.categories.plugins.constants import DYNAMIC_FIELDS_SETTING_KEY
from bkuser_core.categories.plugins.ldap.models import LdapDepartment, LdapUserProfile
from bkuser_core.user_settings.loader import ConfigProvider
logger = logging.getLogger(__name__)
@dataclass
class ProfileFieldMapper:
"""从 ldap 对象属性中获取用户字段"""
config_loader: ConfigProvider
embed_fields = [
"username",
"display_name",
"email",
"telephone",
]
dynamic_fields: List = field(default_factory=list)
def __post_init__(self):
self.dynamic_fields_mapping = self.config_loader.get(DYNAMIC_FIELDS_SETTING_KEY)
self.dynamic_fields = list(self.dynamic_fields_mapping.keys()) if self.dynamic_fields_mapping else []
def get_value(
self, field_name: str, user_meta: Dict[str, List[bytes]], remain_raw: bool = False, dynamic_field: bool = False
) -> Any:
"""通过 field_name 从 ldap 数据中获取具体值"""
# 获取自定义字段对应的属性值
if dynamic_field:
ldap_field_name = field_name
if ldap_field_name not in self.dynamic_fields_mapping.values():
logger.info("no config[%s] in configs of dynamic_fields_mapping", field_name)
return ""
else:
# 从目录配置中获取 字段名
ldap_field_name = self.config_loader.get(field_name)
if not ldap_field_name:
logger.info("no config[%s] in configs of category", field_name)
return ""
# 1. 通过字段名,获取具体值
if ldap_field_name not in user_meta or not user_meta[ldap_field_name]:
logger.info("field[%s] is missing in raw attributes of user data from ldap", field_name)
return ""
# 2. 类似 memberOf 字段,将会返回原始列表
if remain_raw:
return user_meta[ldap_field_name]
return force_str(user_meta[ldap_field_name][0])
def get_values(self, user_meta: Dict[str, List[bytes]]) -> Dict[str, Any]:
"""根据字段映射关系, 从 ldap 中获取 `field_name` 的值"""
values = {}
for field_name in self.embed_fields:
values.update({field_name: self.get_value(field_name, user_meta)})
return values
def get_dynamic_values(self, user_meta: Dict[str, List[bytes]]) -> Dict[str, Any]:
"""获取自定义字段 在ldap中的对应值"""
values = {}
if self.dynamic_fields:
values.update(
{
field_name: self.get_value(
field_name=self.dynamic_fields_mapping[field_name], user_meta=user_meta, dynamic_field=True
)
for field_name in self.dynamic_fields
}
)
return values
def get_user_attributes(self) -> list:
"""获取远端属性名列表"""
user_attributes = [self.config_loader[x] for x in self.embed_fields if self.config_loader.get(x)]
user_attributes.extend(
[self.dynamic_fields_mapping[x] for x in self.dynamic_fields if self.dynamic_fields_mapping.get(x)]
)
return user_attributes
def user_adapter(
code: str, user_meta: Dict[str, Any], field_mapper: ProfileFieldMapper, restrict_types: List[str]
) -> LdapUserProfile:
groups = field_mapper.get_value("user_member_of", user_meta["raw_attributes"], True) or []
return LdapUserProfile(
**field_mapper.get_values(user_meta["raw_attributes"]),
code=code,
extras=field_mapper.get_dynamic_values(user_meta["raw_attributes"]),
# TODO: 完成转换 departments 的逻辑
departments=[
# 根据约定, dn 中除去第一个成分以外的部分即为用户所在的部门, 因此需要取 [1:]
list(reversed(parse_dn_value_list(user_meta["dn"], restrict_types)[1:])),
# 用户与用户组之间的关系
*[list(reversed(parse_dn_value_list(force_str(group), restrict_types))) for group in groups],
],
)
def department_adapter(code: str, dept_meta: Dict, is_group: bool, restrict_types: List[str]) -> LdapDepartment:
dn = dept_meta["dn"]
dn_values = parse_dn_value_list(dn, restrict_types=restrict_types)
parent_dept: Optional[LdapDepartment] = None
for dept_name in reversed(dn_values):
parent_dept = LdapDepartment(
name=dept_name,
parent=parent_dept,
is_group=is_group,
)
assert parent_dept is not None, "未从 dn 中提取到任何部门信息"
parent_dept.code = code
return parent_dept
class RDN(NamedTuple):
"""RelativeDistinguishedName"""
type: str
value: str
separator: str
def parse_dn_tree(dn: str, restrict_types: List[str] = None) -> List[RDN]:
"""A DN is a sequence of relative distinguished names (RDN) connected by commas, For examples:
we have a dn = "CN=Jeff Smith,OU=Sales,DC=Fabrikam,DC=COM", this method will parse the dn to:
>>> parse_dn_tree("CN=Jeff Smith,OU=Sales,DC=Fabrikam,DC=COM")
[RDN(type='CN', value='Jeff Smith', separator=','),
RDN(type='OU', value='Sales', separator=','),
RDN(type='DC', value='Fabrikam', separator=','),
RDN(type='DC', value='COM', separator='')]
if provide restrict_types, this method will ignore the attribute not in restrict_types, For examples:
>>> parse_dn_tree("CN=Jeff Smith,OU=Sales,DC=Fabrikam,DC=COM", restrict_types=["DC"])
[RDN(type='DC', value='Fabrikam', separator=','), RDN(type='DC', value='COM', separator='')]
Furthermore, restrict_types is Case-insensitive, the ["DC"], ["dc"], ["Dc"] are Exactly equal.
>>> parse_dn_tree("CN=Jeff Smith,OU=Sales,DC=Fabrikam,DC=COM", restrict_types=["dc"])
[RDN(type='DC', value='Fabrikam', separator=','), RDN(type='DC', value='COM', separator='')]
See Also: https://docs.microsoft.com/en-us/previous-versions/windows/desktop/ldap/distinguished-names
"""
restrict_types = [type_.upper() for type_ in (restrict_types or [])]
items = dn_utils.parse_dn(dn, escape=True)
if restrict_types:
parts = [RDN(*i) for i in items if i[0].upper() in restrict_types]
else:
parts = [RDN(*i) for i in items]
return parts
def parse_dn_value_list(dn: str, restrict_types: List[str] = None) -> List[str]:
"""this method work like parse_dn_tree, be only return values of those attributes, For examples:
>>> parse_dn_value_list("CN=Jeff Smith,OU=Sales,DC=Fabrikam,DC=COM")
['Jeff Smith', 'Sales', 'Fabrikam', 'COM']
if provide restrict_types, this method will ignore the attribute not in restrict_types, For examples:
>>> parse_dn_value_list("CN=Jeff Smith,OU=Sales,DC=Fabrikam,DC=COM", restrict_types=["DC"])
['Fabrikam', 'COM']
"""
tree = parse_dn_tree(dn, restrict_types)
parts = []
for part in tree:
parts.append(part.value)
return parts
| 38.301508 | 119 | 0.66846 |
f73afef58d65deabe13b39d87236bc23035e79be | 1,568 | py | Python | wrappers/python/tests/wallet/test_open_wallet.py | Diiaablo95/indy-sdk | 0ef7321f5902683af928cdc7ea94d522bee33d30 | [
"Apache-2.0"
] | null | null | null | wrappers/python/tests/wallet/test_open_wallet.py | Diiaablo95/indy-sdk | 0ef7321f5902683af928cdc7ea94d522bee33d30 | [
"Apache-2.0"
] | null | null | null | wrappers/python/tests/wallet/test_open_wallet.py | Diiaablo95/indy-sdk | 0ef7321f5902683af928cdc7ea94d522bee33d30 | [
"Apache-2.0"
] | null | null | null | import pytest
from indy import IndyError
from indy import wallet
from indy.error import ErrorCode
@pytest.mark.asyncio
@pytest.mark.parametrize("wallet_config", [None, '{"freshness_time":1000}'])
async def test_open_wallet_works(wallet_config, wallet_handle):
pass
@pytest.mark.asyncio
async def test_open_wallet_works_for_not_created_wallet(credentials):
with pytest.raises(IndyError) as e:
await wallet.open_wallet('wallet_not_created', None, credentials)
assert ErrorCode.WalletNotFoundError == e.value.error_code
@pytest.mark.asyncio
async def test_open_wallet_works_for_twice(wallet_name, wallet_handle, credentials):
with pytest.raises(IndyError) as e:
await wallet.open_wallet(wallet_name, None, credentials)
assert ErrorCode.WalletAlreadyOpenedError == e.value.error_code
@pytest.mark.asyncio
async def test_open_wallet_works_for_missed_key(xwallet, wallet_name):
with pytest.raises(IndyError) as e:
await wallet.open_wallet(wallet_name, None, "{}")
assert ErrorCode.WalletInputError == e.value.error_code
@pytest.mark.asyncio
async def test_open_wallet_works_for_changing_credentials(pool_name):
await wallet.create_wallet(pool_name, 'works_for_changing_credentials', None, None, '{"key":"key"}')
handle = await wallet.open_wallet('works_for_changing_credentials', None, '{"key":"key", "rekey":"other_key"}')
await wallet.close_wallet(handle)
handle = await wallet.open_wallet('works_for_changing_credentials', None, '{"key":"other_key"}')
await wallet.close_wallet(handle)
| 35.636364 | 115 | 0.776148 |
f73b0549a8520f8a1b9f80ee2a9d44368bb49989 | 7,159 | py | Python | soaplib/core/zope2.py | divaliu1408/overfit | 083dcfaa758391092933e19544462cd831e73ef0 | [
"Apache-2.0"
] | null | null | null | soaplib/core/zope2.py | divaliu1408/overfit | 083dcfaa758391092933e19544462cd831e73ef0 | [
"Apache-2.0"
] | null | null | null | soaplib/core/zope2.py | divaliu1408/overfit | 083dcfaa758391092933e19544462cd831e73ef0 | [
"Apache-2.0"
] | null | null | null | """Mix-In Class to Export Decorated Methods using SOAP
This file is not a Zope2 Product nor a Zope3 component. It is a simple
Python module that adds two elements: a hook into the Zope publisher to
intercept SOAP requests, and a mix-in class to your Zope2 folder classes
that make them SOAP-aware, with queryable WSDL descriptions of any methods
decorated with @soapmethod.
A tiny bit of code needs to be invoked from within ZPublisher during the
parsing of an HTTP request that is a SOAP request. To make this happen,
change the code in the file lib/python/ZPublisher/HTTPRequest.py, within
the processInputs() method of the HTTPRequest class, from this:
fs=FieldStorage(fp=fp,environ=environ,keep_blank_values=1)
if not hasattr(fs,'list') or fs.list is None:
# Hm, maybe it's an XML-RPC
if (fs.headers.has_key('content-type') and
to this:
fs=FieldStorage(fp=fp,environ=environ,keep_blank_values=1)
if not hasattr(fs,'list') or fs.list is None:
if environ.has_key('HTTP_SOAPACTION'): #ADDED
other['SOAPXML'] = fs.value #ADDED
# Hm, maybe it's an XML-RPC
elif (fs.headers.has_key('content-type') and #CHANGED
"""
import cgi
from xml.etree.ElementTree import ElementTree
from lxml import etree
from soaplib.core._base import MethodContext
from soaplib.core.mime import collapse_swa
from soaplib.core.model.exception import Fault
from soaplib.core.service import DefinitionBase,soap
from soaplib.core.model.primitive import string_encoding
from soaplib.core import Application
from soaplib.core.server._base import Base as BaseServer
from zope.interface import implements
from zope.interface.common.interfaces import IException
from zope.app.testing import ztapi
# public sumbols
__all__ = [
'SoapFolder', # to mix into your Zope Folders to make them into SOAP Service Points
'AccessDeniedSOAP', # exception object to signal a failed SOAP call
]
class SoapFolder(DefinitionBase):
"""Mix-In Class to Make a Folder into a SOAP Service Point
Import this class into your Zope2 folder classes to make them SOAP-aware.
Any methods in your folder class decorated with @soapmethod() will become
callable over SOAP and the signature and docstring of that method will be
reported as WSDL by a call to the index_html() method of that folder.
Your class should also define a class attribute indicating the 'toplevel
namespace' of your SOAP Service Point. This name is arbitrary as far as
this code goes, but appears in the SOAP response and the WSDL description
generated. This attribute looks like: __tns__ = "PatientServices"
"""
_v_soap_methods = None
_v_cached_wsdl = None
__wsdl__ = None
def __init__(self, tns, environ=None):
super(DefinitionBase, self).__init__(environ)
self.soap_app = Application(self, tns, False)
self.soap_handler = BaseServer(self.soap_app)
def methods(self):
"""Returns a list of method descriptors for this object"""
if self._v_soap_methods is None:
self._v_soap_methods = self.build_public_methods()
return self._v_soap_methods
def service_description(self, REQUEST, RESPONSE):
""" """
if getattr(self, '__tns__', None) is None:
self.__tns__ = self.get_tns(self.__class__)
if self._v_soap_methods is None:
self._v_soap_methods = self.build_public_methods()
if self._v_cached_wsdl is None:
self._v_cached_wsdl = self.soap_app.get_wsdl(self.absolute_url())
self.__wsdl__ = None
RESPONSE.setHeader('Content-Type', 'text/xml')
return self._v_cached_wsdl
def index_html(self, REQUEST, RESPONSE):
"""Handle an incoming SOAP request or a non-SOAP WSDL query."""
if REQUEST.get('SOAPXML', None) == None: # Not a SOAP Request, return WSDL
return self.service_description(REQUEST, RESPONSE)
try:
# Deserialize the Body of the SOAP Request
from soaplib.core._base import _from_soap
header, payload = _from_soap(REQUEST.SOAPXML)
# TODO: At this point I need dispatch method calls to the soaplib.Application
# somehow....... :)
ctx = MethodContext()
content_type = cgi.parse_header(REQUEST.get("Content-Type"))
charset = content_type[1].get('charset',None)
length = REQUEST.get("Content-Length")
http_payload = REQUEST.read(int(length))
if not charset:
charset = "ascii"
in_string = collapse_swa(content_type, http_payload)
in_obj = self.soap_handler.get_in_object(ctx, in_string, charset)
out_obj = self.soap_handler.get_out_object(ctx, in_obj)
out_string = self.soap_handler.get_out_string(ctx, out_obj)
return out_string
except Exception, e:
fault = Fault(faultstring=str(e))
resp = etree.tostring(fault, encoding=string_encoding)
RESPONSE.setStatus('InternalServerError', reason=faultstring)
RESPONSE.setHeader('Content-Type', 'text/xml')
return resp
class ISOAPException(IException):
pass
class SOAPException(Exception):
"""Base exception class for all derived exceptions for SOAP"""
implements(ISOAPException)
def __init__(self, request):
self.request = request
self.request['faultexc'] = self
def __str__(self):
return self.__class__.__name__
class AccessDeniedSOAP(SOAPException):
"""An exception to raise in a SOAP method if access is being denied."""
class SOAPExceptionView:
"""Adapts an (ISOAPException, IRequest) to a View
This view provides the XML representation of a SOAP fault that is
returned to a caller. To use it, register this view with Zope at some
initialization point:
from zope.app.testing import ztapi
from dummy import ISOAPException, SOAPExceptionView
ztapi.browserView(ISOAPException, u'index.html', SOAPExceptionView)
and then within your SOAP logic raise a SOAP exception where needed:
from dummy import SOAPException
raise SOAPException(request)
"""
def __init__(self, context, request):
self.context = context
self.request = request
def __call__(self):
faultstring = self.request['faultexc'].__class__.__name__
self.request.response.setStatus('InternalServerError', reason=faultstring)
faultcode = 'Server'
fault = make_soap_fault(faultstring, faultcode, detail=None)
self.request.response.setHeader('Content-Type', 'text/xml')
return ElementTree.tostring(fault, encoding=string_encoding)
# The following registers 'SOAPExceptionView' as an adapter that knows how to
# display (generate and return the XML source for a SOAP fault) for anything
# that implements the 'ISOAPException' interface.
ztapi.browserView(ISOAPException, u'index.html', SOAPExceptionView)
| 36.340102 | 89 | 0.69032 |
f73b2d639d0e65b873732c9f73125c0faa298413 | 1,867 | py | Python | hooks/post_gen_project.py | Everyday-Future/cookiecutter-devops | 316948eed75c95ac5768a1bf1d504807069aaa6a | [
"BSD-3-Clause"
] | null | null | null | hooks/post_gen_project.py | Everyday-Future/cookiecutter-devops | 316948eed75c95ac5768a1bf1d504807069aaa6a | [
"BSD-3-Clause"
] | null | null | null | hooks/post_gen_project.py | Everyday-Future/cookiecutter-devops | 316948eed75c95ac5768a1bf1d504807069aaa6a | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
import os
import stat
PROJECT_DIRECTORY = os.path.realpath(os.path.curdir)
def remove_file(filepath):
os.remove(os.path.join(PROJECT_DIRECTORY, filepath))
if __name__ == '__main__':
if 'no' in '{{ cookiecutter.command_line_interface|lower }}':
cli_file = os.path.join('{{ cookiecutter.project_slug }}', 'cli.py')
remove_file(cli_file)
if 'Not open source' == '{{ cookiecutter.open_source_license }}':
remove_file('LICENSE')
# Create secret envs
os.rename(os.path.join(PROJECT_DIRECTORY, 'secret--template.env.txt'),
os.path.join(PROJECT_DIRECTORY, 'secret--template.env'))
os.rename(os.path.join(PROJECT_DIRECTORY, 'secret--template-values.env.txt'),
os.path.join(PROJECT_DIRECTORY, 'secret--template-values.env'))
os.rename(os.path.join(PROJECT_DIRECTORY, 'frontend', '.env.txt'),
os.path.join(PROJECT_DIRECTORY, 'frontend', '.env'))
os.rename(os.path.join(PROJECT_DIRECTORY, 'frontend', 'docker.env.txt'),
os.path.join(PROJECT_DIRECTORY, 'frontend', 'docker.env'))
# Convert shell scripts for Windows
shell_scripts = [os.path.join(PROJECT_DIRECTORY, '.__run_cli.sh'),
os.path.join(PROJECT_DIRECTORY, 'boot.sh'),
os.path.join(PROJECT_DIRECTORY, 'host', 'test_loop.sh')]
for shell_script in shell_scripts:
with open(shell_script, "r") as fin:
lines = []
for line in fin:
lines.append(line.replace('\r\n', '\n'))
with open(shell_script, "w") as fout:
for line in lines:
fout.write(line)
# Make shell scripts executable
for shell_script in shell_scripts:
st = os.stat(shell_script)
os.chmod(shell_script, st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
| 38.102041 | 87 | 0.637386 |
f73b366b012fea66ac00ee79b22b61b0d63bffa5 | 541 | py | Python | Courses/Udacity/CS101/Lesson_4_Problem_Set_(Optional)/01-Word_Count/supplied/studentMain.py | leparrav/Playground | dcb90a2dd2bc1867511cfe621eb21248a60e357f | [
"Unlicense"
] | 1 | 2019-02-13T12:02:26.000Z | 2019-02-13T12:02:26.000Z | Courses/Udacity/CS101/Lesson_4_Problem_Set_(Optional)/01-Word_Count/supplied/studentMain.py | leparrav/Playground | dcb90a2dd2bc1867511cfe621eb21248a60e357f | [
"Unlicense"
] | 1 | 2018-08-13T15:58:33.000Z | 2018-08-13T15:58:33.000Z | Courses/Udacity/CS101/Lesson_4_Problem_Set_(Optional)/01-Word_Count/supplied/studentMain.py | leparrav/Playground | dcb90a2dd2bc1867511cfe621eb21248a60e357f | [
"Unlicense"
] | 2 | 2017-08-10T20:01:29.000Z | 2021-07-01T08:39:13.000Z | # Write a procedure, count_words, which takes as input a string
# and returns the number of words in the string. You may consider words
# as strings of characters separated by spaces.
def count_words():
passage =("The number of orderings of the 52 cards in a deck of cards "
"is so great that if every one of the almost 7 billion people alive "
"today dealt one ordering of the cards per second, it would take "
"2.5 * 10**40 times the age of the universe to order the cards in every "
"possible way.")
print count_words(passage)
#>>>56
| 33.8125 | 73 | 0.744917 |
f73b6022673fcd9dfb2e398bb51a7bbf33d4bb06 | 2,467 | py | Python | alien_tag.py | sacherjj/python-AlienRFID | aaddd846d46cca533dca43c256890c072e8f5ec5 | [
"MIT"
] | 1 | 2021-03-21T13:52:00.000Z | 2021-03-21T13:52:00.000Z | alien_tag.py | sacherjj/python-AlienRFID | aaddd846d46cca533dca43c256890c072e8f5ec5 | [
"MIT"
] | null | null | null | alien_tag.py | sacherjj/python-AlienRFID | aaddd846d46cca533dca43c256890c072e8f5ec5 | [
"MIT"
] | 2 | 2015-10-12T10:02:50.000Z | 2020-03-09T13:30:12.000Z | __author__ = 'joesacher'
import datetime as dt
class AlienTag(object):
def __init__(self, taglist_entry):
self.disc = 0
self.last = 0
self.last_last = 0
self.id = 0
self.ant = 0
self.count = 0
self.proto = 0
self.rssi = 0
self.freq = 0
# self.speed = 0
# self.speed_smooth = 0
# self.speed_last = 0
# self.pos_smooth = 0
# self.pos_last = 0
# self.pos_min = 0
self.create(taglist_entry)
def __str__(self):
return self.id
def __gt__(self, other):
return self.id > other.id
def create(self, taglist_entry):
"""
Try to parse a taglist entry into a set of Tag object variables.
Uses a simple mapping from Alien 'text' format:
Tag:0102 0304 0506 0708 0900 0A0B, Disc:2008/10/28 10:49:35, Last:2008/10/28 10:49:35, Count:1, Ant:3, Proto:2
*rssi* and *speed* attributes are not included in the default text format.
In order to have them parsed correctly the _TagListFormat_ must be set to _custom_ and
the _TagListCustomFormat_ fields must be separated by the following text tokens:
'tag:', 'disc:', 'last:', 'count:', 'ant:', 'proto:', 'speed:', 'rssi:'
For example:
@rdr.taglistcustomformat("Tag:%i, Disc:${DATE1} ${TIME1}, Last:${DATE2} ${TIME2}, Count:${COUNT}, Ant:${TX}, Proto:${PROTO#}, Speed:${SPEED}, rssi:${RSSI})"
@rdr.taglistformat("custom")
"""
self.id = ""
if taglist_entry == "(No Tags)":
return
tagline = taglist_entry.split('\r\n')[0]
tagbits = {}
for keyval in tagline.split(", "):
key, val = keyval.split(":", 1)
# TODO: Raise Error on Bad Key Val parse
tagbits[key.lower()] = val
self.id = tagbits.get('tag', 'NO TAG ID')
self.ant = tagbits.get('ant', 0)
self.count = tagbits.get('count', 0)
self.disc = tagbits.get('disc', 0)
self.last = tagbits.get('last', 0)
# TODO: Convert self.last into datetime
self.last_last = self.last
self.proto = tagbits.get('proto', 0)
self.rssi = tagbits.get('rssi', 0)
self.freq = tagbits.get('freq', 0)
self.speed = tagbits.get('speed', 0)
def update(self, new_tag):
self.last = new_tag.last
self.count += new_tag.count
self.last_last = self.last
| 31.628205 | 165 | 0.565464 |
f73b6f6d7d6955528eef744b500e27d5eca877ef | 1,000 | py | Python | vyperlogix/django/forms/fields.py | raychorn/chrome_gui | f1fade70b61af12ee43c55c075aa9cfd32caa962 | [
"CC0-1.0"
] | 1 | 2020-09-29T01:36:33.000Z | 2020-09-29T01:36:33.000Z | vyperlogix/django/forms/fields.py | raychorn/chrome_gui | f1fade70b61af12ee43c55c075aa9cfd32caa962 | [
"CC0-1.0"
] | null | null | null | vyperlogix/django/forms/fields.py | raychorn/chrome_gui | f1fade70b61af12ee43c55c075aa9cfd32caa962 | [
"CC0-1.0"
] | null | null | null | import sys
from vyperlogix.misc import _utils
from django.utils.datastructures import SortedDict as SortedDictFromList
from vyperlogix.classes.SmartObject import SmartObject
def fields_for_model(model, formfield_callback=lambda f: f.formfield()):
"""
Returns a list of fields for the given Django model class.
Provide ``formfield_callback`` if you want to define different logic for
determining the formfield for a given database field. It's a callable that
takes a database Field instance and returns a form Field instance.
"""
field_list = []
try:
opts = model._meta
for f in opts.fields + opts.many_to_many:
if not f.editable:
continue
formfield = formfield_callback(f)
if formfield:
field_list.append((f.name, formfield))
except Exception as details:
print >>sys.stderr, _utils.formattedException(details=details)
return SortedDictFromList(dict(field_list))
| 33.333333 | 78 | 0.698 |
f73bbcedcd25303f7209490e28a550849e1245fb | 3,711 | py | Python | supriya/ugens/MulAdd.py | deeuu/supriya | 14fcb5316eccb4dafbe498932ceff56e1abb9d27 | [
"MIT"
] | null | null | null | supriya/ugens/MulAdd.py | deeuu/supriya | 14fcb5316eccb4dafbe498932ceff56e1abb9d27 | [
"MIT"
] | null | null | null | supriya/ugens/MulAdd.py | deeuu/supriya | 14fcb5316eccb4dafbe498932ceff56e1abb9d27 | [
"MIT"
] | null | null | null | import collections
from supriya import CalculationRate
from supriya.synthdefs import UGen
class MulAdd(UGen):
"""
An Optimized multiplication / addition ugen.
::
>>> source = supriya.ugens.SinOsc.ar()
>>> mul_add = supriya.ugens.MulAdd.new(
... addend=0.5,
... multiplier=-1.5,
... source=source,
... )
>>> mul_add
MulAdd.ar()
"""
### CLASS VARIABLES ###
__documentation_section__ = "Basic Operator UGens"
_ordered_input_names = collections.OrderedDict(
[("source", None), ("multiplier", 1.0), ("addend", 0.0)]
)
### INITIALIZER ###
def __init__(self, addend=0.0, multiplier=1.0, calculation_rate=None, source=None):
UGen.__init__(
self,
addend=addend,
multiplier=multiplier,
calculation_rate=calculation_rate,
source=source,
)
### PRIVATE METHODS ###
@staticmethod
def _inputs_are_valid(source, multiplier, addend):
if CalculationRate.from_expr(source) == CalculationRate.AUDIO:
return True
if CalculationRate.from_expr(source) == CalculationRate.CONTROL:
if CalculationRate.from_expr(multiplier) in (
CalculationRate.CONTROL,
CalculationRate.SCALAR,
):
if CalculationRate.from_expr(addend) in (
CalculationRate.CONTROL,
CalculationRate.SCALAR,
):
return True
return False
@classmethod
def _new_single(
cls, addend=None, multiplier=None, calculation_rate=None, source=None
):
if multiplier == 0.0:
return addend
minus = multiplier == -1
no_multiplier = multiplier == 1
no_addend = addend == 0
if no_multiplier and no_addend:
return source
if minus and no_addend:
return -source
if no_addend:
return source * multiplier
if minus:
return addend - source
if no_multiplier:
return source + addend
if cls._inputs_are_valid(source, multiplier, addend):
return cls(
addend=addend,
multiplier=multiplier,
calculation_rate=calculation_rate,
source=source,
)
if cls._inputs_are_valid(multiplier, source, addend):
return cls(
addend=addend,
multiplier=source,
calculation_rate=calculation_rate,
source=multiplier,
)
return (source * multiplier) + addend
### PUBLIC METHODS ###
@classmethod
def new(cls, source=None, multiplier=1.0, addend=0.0):
"""
Constructs a multiplication / addition ugen.
::
>>> addend = 0.5
>>> multiplier = 1.5
>>> source = supriya.ugens.SinOsc.ar(frequency=[440, 442])
>>> mul_add = supriya.ugens.MulAdd.new(
... addend=addend,
... multiplier=multiplier,
... source=source,
... )
>>> mul_add
UGenArray({2})
Returns ugen graph.
"""
import supriya.synthdefs
# TODO: handle case of array as source
calculation_rate = supriya.CalculationRate.from_expr(
(source, multiplier, addend)
)
ugen = cls._new_expanded(
addend=addend,
multiplier=multiplier,
calculation_rate=calculation_rate,
source=source,
)
return ugen
| 28.328244 | 87 | 0.537591 |
f73bcc307906ada8eceee6a21267b925ade3c652 | 13,646 | py | Python | deeppavlov/core/layers/tf_csoftmax_attention.py | ineersa/DeepPavlov | 8200bf9a0f0b378baad4ee0eb75b59453f516004 | [
"Apache-2.0"
] | 3 | 2020-04-16T04:25:10.000Z | 2021-05-07T23:04:43.000Z | deeppavlov/core/layers/tf_csoftmax_attention.py | ineersa/DeepPavlov | 8200bf9a0f0b378baad4ee0eb75b59453f516004 | [
"Apache-2.0"
] | 12 | 2020-01-28T22:14:04.000Z | 2022-02-10T00:10:17.000Z | deeppavlov/core/layers/tf_csoftmax_attention.py | ineersa/DeepPavlov | 8200bf9a0f0b378baad4ee0eb75b59453f516004 | [
"Apache-2.0"
] | 1 | 2021-02-05T13:01:48.000Z | 2021-02-05T13:01:48.000Z | # Copyright 2017 Neural Networks and Deep Learning lab, MIPT
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
def csoftmax_for_slice(input):
""" It is a implementation of the constrained softmax (csoftmax) for slice.
Based on the paper:
https://andre-martins.github.io/docs/emnlp2017_final.pdf "Learning What's Easy: Fully Differentiable Neural Easy-First Taggers" (page 4)
Args:
input: A list of [input tensor, cumulative attention].
Returns:
output: A list of [csoftmax results, masks]
"""
[ten, u] = input
shape_t = ten.shape
shape_u = u.shape
ten -= tf.reduce_mean(ten)
q = tf.exp(ten)
active = tf.ones_like(u, dtype=tf.int32)
mass = tf.constant(0, dtype=tf.float32)
found = tf.constant(True, dtype=tf.bool)
def loop(q_, mask, mass_, found_):
q_list = tf.dynamic_partition(q_, mask, 2)
condition_indices = tf.dynamic_partition(tf.range(tf.shape(q_)[0]), mask, 2) # 0 element it False,
# 1 element if true
p = q_list[1] * (1.0 - mass_) / tf.reduce_sum(q_list[1])
p_new = tf.dynamic_stitch(condition_indices, [q_list[0], p])
# condition verification and mask modification
less_mask = tf.cast(tf.less(u, p_new), tf.int32) # 0 when u is bigger than p, 1 when u is less than p
condition_indices = tf.dynamic_partition(tf.range(tf.shape(p_new)[0]), less_mask,
2) # 0 when u is bigger than p, 1 when u is less than p
split_p_new = tf.dynamic_partition(p_new, less_mask, 2)
split_u = tf.dynamic_partition(u, less_mask, 2)
alpha = tf.dynamic_stitch(condition_indices, [split_p_new[0], split_u[1]])
mass_ += tf.reduce_sum(split_u[1])
mask = mask * (tf.ones_like(less_mask) - less_mask)
found_ = tf.cond(tf.equal(tf.reduce_sum(less_mask), 0),
lambda: False,
lambda: True)
alpha = tf.reshape(alpha, q_.shape)
return alpha, mask, mass_, found_
(csoft, mask_, _, _) = tf.while_loop(cond=lambda _0, _1, _2, f: f,
body=loop,
loop_vars=(q, active, mass, found))
return [csoft, mask_]
def csoftmax(tensor, inv_cumulative_att):
""" It is a implementation of the constrained softmax (csoftmax).
Based on the paper:
https://andre-martins.github.io/docs/emnlp2017_final.pdf "Learning What's Easy: Fully Differentiable Neural Easy-First Taggers"
Args:
tensor: A tensorflow tensor is score. This tensor have dimensionality [None, n_tokens]
inv_cumulative_att: A inverse cumulative attention tensor with dimensionality [None, n_tokens]
Returns:
cs: Tensor at the output with dimensionality [None, n_tokens]
"""
shape_ten = tensor.shape
shape_cum = inv_cumulative_att.shape
merge_tensor = [tensor, inv_cumulative_att]
cs, _ = tf.map_fn(csoftmax_for_slice, merge_tensor, dtype=[tf.float32, tf.float32]) # [bs, L]
return cs
def attention_gen_step(hidden_for_sketch, hidden_for_attn_alignment, sketch, key, cum_att):
""" It is a implementation one step of block of the Luong et al. attention mechanism with general score and the constrained softmax (csoftmax).
Based on the papers:
https://arxiv.org/abs/1508.04025 "Effective Approaches to Attention-based Neural Machine Translation"
https://andre-martins.github.io/docs/emnlp2017_final.pdf "Learning What's Easy: Fully Differentiable Neural Easy-First Taggers"
Args:
hidden_for_sketch: A tensorflow tensor for a sketch computing. This tensor have dimensionality [None, max_num_tokens, sketch_hidden_size]
hidden_for_attn_alignment: A tensorflow tensor is aligned for output during a performing. This tensor have dimensionality [None, max_num_tokens, hidden_size_for_attn_alignment]
sketch: A previous step sketch tensor for a sketch computing. This tensor have dimensionality [None, sketch_hidden_size]
key: A tensorflow tensor with dimensionality [None, None, key_size]
cum_att: A cumulative attention tensor with dimensionality [None, max_num_tokens]
Returns:
next_sketch: Tensor of the current step sketch with dimensionality [None, sketch_hidden_size]
att: Tensor of the current step attention with dimensionality [None, max_num_tokens]
aligned_hidden_sketch: Tensor of aligned hidden state of current step with dimensionality [None, hidden_size_for_attn_alignment]
"""
with tf.name_scope('attention_step'):
sketch_dims = hidden_for_sketch.get_shape().as_list()
batch_size = sketch_dims[0]
num_tokens = sketch_dims[1]
hidden_size = sketch_dims[2]
attn_alignment_dims = hidden_for_attn_alignment.get_shape().as_list()
attn_alignment_hidden_size = attn_alignment_dims[2]
repeated_sketch = tf.tile(tf.reshape(sketch, [-1, 1, hidden_size]), (1,num_tokens, 1))
concat_mem = tf.concat([hidden_for_sketch, repeated_sketch],-1)
concat_mem = tf.reshape(concat_mem, [-1, num_tokens, 2*hidden_size]) # dirty trick
reduce_mem = tf.layers.dense(concat_mem, hidden_size)
projected_key = tf.layers.dense(key, hidden_size)
t_key = tf.reshape(projected_key,[-1, hidden_size, 1])
score = tf.reshape(tf.matmul(reduce_mem, t_key), [-1, num_tokens])
inv_cum_att = tf.reshape(tf.ones_like(cum_att) - cum_att, [-1, num_tokens])
att = csoftmax(score, inv_cum_att)
t_reduce_mem = tf.transpose(reduce_mem, [0,2,1])
t_hidden_for_attn_alignment = tf.transpose(hidden_for_attn_alignment, [0,2,1])
r_att = tf.reshape(att, [-1, num_tokens, 1])
next_sketch = tf.squeeze(tf.matmul(t_reduce_mem,r_att),-1)
aligned_hidden_sketch = tf.squeeze(tf.matmul(t_hidden_for_attn_alignment,r_att),-1)
return next_sketch, att, aligned_hidden_sketch
def attention_gen_block(hidden_for_sketch, hidden_for_attn_alignment, key, attention_depth):
""" It is a implementation of the Luong et al. attention mechanism with general score and the constrained softmax (csoftmax).
Based on the papers:
https://arxiv.org/abs/1508.04025 "Effective Approaches to Attention-based Neural Machine Translation"
https://andre-martins.github.io/docs/emnlp2017_final.pdf "Learning What's Easy: Fully Differentiable Neural Easy-First Taggers"
Args:
hidden_for_sketch: A tensorflow tensor for a sketch computing. This tensor have dimensionality [None, max_num_tokens, sketch_hidden_size]
hidden_for_attn_alignment: A tensorflow tensor is aligned for output during a performing. This tensor have dimensionality [None, max_num_tokens, hidden_size_for_attn_alignment]
key: A tensorflow tensor with dimensionality [None, None, key_size]
attention_depth: Number of usage csoftmax
Returns:
final_aligned_hiddens: Tensor at the output with dimensionality [1, attention_depth, hidden_size_for_attn_alignment]
"""
with tf.name_scope('attention_block'):
sketch_dims = tf.shape(hidden_for_sketch)
batch_size = sketch_dims[0]
num_tokens = sketch_dims[1]
hidden_size = sketch_dims[2]
attn_alignment_dims = tf.shape(hidden_for_attn_alignment)
attn_alignment_hidden_size = attn_alignment_dims[2]
sketches = [tf.zeros(shape=[batch_size, hidden_size], dtype=tf.float32)]
aligned_hiddens = []
cum_att = tf.zeros(shape=[batch_size, num_tokens]) # cumulative attention
for i in range(attention_depth):
sketch, cum_att_, aligned_hidden = attention_gen_step(hidden_for_sketch, hidden_for_attn_alignment, sketches[-1], key, cum_att)
sketches.append(sketch) #sketch
aligned_hiddens.append(aligned_hidden) #sketch
cum_att += cum_att_
final_aligned_hiddens = tf.reshape(tf.transpose(tf.stack(aligned_hiddens), [1, 0, 2]),[1, attention_depth, attn_alignment_hidden_size])
return final_aligned_hiddens
def attention_bah_step(hidden_for_sketch, hidden_for_attn_alignment, sketch, cum_att):
""" It is a implementation one step of block of the Bahdanau et al. attention mechanism with concat score and the constrained softmax (csoftmax).
Based on the papers:
https://arxiv.org/abs/1409.0473 "Neural Machine Translation by Jointly Learning to Align and Translate"
https://andre-martins.github.io/docs/emnlp2017_final.pdf "Learning What's Easy: Fully Differentiable Neural Easy-First Taggers"
Args:
hidden_for_sketch: A tensorflow tensor for a sketch computing. This tensor have dimensionality [None, max_num_tokens, sketch_hidden_size]
hidden_for_attn_alignment: A tensorflow tensor is aligned for output during a performing. This tensor have dimensionality [None, max_num_tokens, hidden_size_for_attn_alignment]
sketch: A previous step sketch tensor for a sketch computing. This tensor have dimensionality [None, sketch_hidden_size]
key: A tensorflow tensor with dimensionality [None, None, key_size]
cum_att: A cumulative attention tensor with dimensionality [None, max_num_tokens]
Returns:
next_sketch: Tensor of the current step sketch with dimensionality [None, sketch_hidden_size]
att: Tensor of the current step attention with dimensionality [None, max_num_tokens]
aligned_hidden_sketch: Tensor of aligned hidden state of current step with dimensionality [None, hidden_size_for_attn_alignment]
"""
with tf.name_scope('attention_step'):
sketch_dims = hidden_for_sketch.get_shape().as_list()
batch_size = sketch_dims[0]
num_tokens = sketch_dims[1]
hidden_size = sketch_dims[2]
attn_alignment_dims = hidden_for_attn_alignment.get_shape().as_list()
attn_alignment_hidden_size = attn_alignment_dims[2]
repeated_sketch = tf.tile(tf.reshape(sketch, [-1, 1, hidden_size]), (1,num_tokens, 1))
concat_mem = tf.concat([hidden_for_sketch, repeated_sketch],-1)
concat_mem = tf.reshape(concat_mem, [-1, num_tokens, 2*hidden_size]) # dirty trick
reduce_mem = tf.layers.dense(concat_mem, hidden_size)
score = tf.squeeze(tf.layers.dense(reduce_mem, units = 1,
use_bias=False),-1)
inv_cum_att = tf.reshape(tf.ones_like(cum_att) - cum_att, [-1, num_tokens])
att = csoftmax(score, inv_cum_att)
t_reduce_mem = tf.transpose(reduce_mem, [0,2,1])
t_hidden_for_attn_alignment = tf.transpose(hidden_for_attn_alignment, [0,2,1])
r_att = tf.reshape(att, [-1, num_tokens, 1])
next_sketch = tf.squeeze(tf.matmul(t_reduce_mem,r_att),-1)
aligned_hidden_sketch = tf.squeeze(tf.matmul(t_hidden_for_attn_alignment,r_att),-1)
return next_sketch, att, aligned_hidden_sketch
def attention_bah_block(hidden_for_sketch, hidden_for_attn_alignment, attention_depth):
""" It is a implementation of the Bahdanau et al. attention mechanism with concat score and the constrained softmax (csoftmax).
Based on the papers:
https://arxiv.org/abs/1409.0473 "Neural Machine Translation by Jointly Learning to Align and Translate"
https://andre-martins.github.io/docs/emnlp2017_final.pdf "Learning What's Easy: Fully Differentiable Neural Easy-First Taggers"
Args:
hidden_for_sketch: A tensorflow tensor for a sketch computing. This tensor have dimensionality [None, max_num_tokens, sketch_hidden_size]
hidden_for_attn_alignment: A tensorflow tensor is aligned for output during a performing. This tensor have dimensionality [None, max_num_tokens, hidden_size_for_attn_alignment]
key: A tensorflow tensor with dimensionality [None, None, key_size]
attention_depth: Number of usage csoftmax
Returns:
final_aligned_hiddens: Tensor at the output with dimensionality [1, attention_depth, hidden_size_for_attn_alignment]
"""
with tf.name_scope('attention_block'):
sketch_dims = tf.shape(hidden_for_sketch)
batch_size = sketch_dims[0]
num_tokens = sketch_dims[1]
hidden_size = sketch_dims[2]
attn_alignment_dims = tf.shape(hidden_for_attn_alignment)
attn_alignment_hidden_size = attn_alignment_dims[2]
sketches = [tf.zeros(shape=[batch_size, hidden_size], dtype=tf.float32)]
aligned_hiddens = []
cum_att = tf.zeros(shape=[batch_size, num_tokens]) # cumulative attention
for i in range(attention_depth):
sketch, cum_att_, aligned_hidden = attention_bah_step(hidden_for_sketch, hidden_for_attn_alignment, sketches[-1], cum_att)
sketches.append(sketch) #sketch
aligned_hiddens.append(aligned_hidden) #sketch
cum_att += cum_att_
final_aligned_hiddens = tf.reshape(tf.transpose(tf.stack(aligned_hiddens), [1, 0, 2]),[1, attention_depth, attn_alignment_hidden_size])
return final_aligned_hiddens
| 53.724409 | 184 | 0.708413 |
f73bd9fef40d9af195f1e1eb4ae5b47154f56eb1 | 461 | py | Python | sim/Simulator.py | adhocmaster/pyns | 607feb56baf0900535130195163eac331e131a2e | [
"MIT"
] | 1 | 2021-06-15T06:21:14.000Z | 2021-06-15T06:21:14.000Z | sim/Simulator.py | adhocmaster/pyns | 607feb56baf0900535130195163eac331e131a2e | [
"MIT"
] | null | null | null | sim/Simulator.py | adhocmaster/pyns | 607feb56baf0900535130195163eac331e131a2e | [
"MIT"
] | 1 | 2021-06-15T06:21:18.000Z | 2021-06-15T06:21:18.000Z | from abc import ABC
class Simulator(ABC):
def __init__(self):
self.stats = {}
self.stats['dataInFlight'] = []
self.stats['dataInQueue'] = []
self.stats['packetsInFlight'] = []
self.stats['packetsInQueue'] = []
self.stats['queueSize'] = []
self.stats['packetsSent'] = []
self.stats['packetsAcked'] = []
self.stats['totalPacketsSent'] = []
self.stats['totalPacketsAcked'] = [] | 28.8125 | 44 | 0.553145 |
f73be5427fbe24030a0f1a642b8b36c5bc757baa | 7,447 | py | Python | git/objects/fun.py | yarikoptic/GitPython | 7576b282013249a2b20ccda4acacefd5e625ea39 | [
"BSD-3-Clause"
] | 1 | 2020-10-15T06:16:48.000Z | 2020-10-15T06:16:48.000Z | git/objects/fun.py | yarikoptic/GitPython | 7576b282013249a2b20ccda4acacefd5e625ea39 | [
"BSD-3-Clause"
] | null | null | null | git/objects/fun.py | yarikoptic/GitPython | 7576b282013249a2b20ccda4acacefd5e625ea39 | [
"BSD-3-Clause"
] | null | null | null | """Module with functions which are supposed to be as fast as possible"""
from stat import S_ISDIR
__all__ = ('tree_to_stream', 'tree_entries_from_data', 'traverse_trees_recursive',
'traverse_tree_recursive')
def tree_to_stream(entries, write):
"""Write the give list of entries into a stream using its write method
:param entries: **sorted** list of tuples with (binsha, mode, name)
:param write: write method which takes a data string"""
ord_zero = ord('0')
bit_mask = 7 # 3 bits set
for binsha, mode, name in entries:
mode_str = ''
for i in xrange(6):
mode_str = chr(((mode >> (i*3)) & bit_mask) + ord_zero) + mode_str
# END for each 8 octal value
# git slices away the first octal if its zero
if mode_str[0] == '0':
mode_str = mode_str[1:]
# END save a byte
# here it comes: if the name is actually unicode, the replacement below
# will not work as the binsha is not part of the ascii unicode encoding -
# hence we must convert to an utf8 string for it to work properly.
# According to my tests, this is exactly what git does, that is it just
# takes the input literally, which appears to be utf8 on linux.
if isinstance(name, unicode):
name = name.encode("utf8")
write("%s %s\0%s" % (mode_str, name, binsha))
# END for each item
def tree_entries_from_data(data):
"""Reads the binary representation of a tree and returns tuples of Tree items
:param data: data block with tree data
:return: list(tuple(binsha, mode, tree_relative_path), ...)"""
ord_zero = ord('0')
len_data = len(data)
i = 0
out = list()
while i < len_data:
mode = 0
# read mode
# Some git versions truncate the leading 0, some don't
# The type will be extracted from the mode later
while data[i] != ' ':
# move existing mode integer up one level being 3 bits
# and add the actual ordinal value of the character
mode = (mode << 3) + (ord(data[i]) - ord_zero)
i += 1
# END while reading mode
# byte is space now, skip it
i += 1
# parse name, it is NULL separated
ns = i
while data[i] != '\0':
i += 1
# END while not reached NULL
# default encoding for strings in git is utf8
# Only use the respective unicode object if the byte stream was encoded
name = data[ns:i]
name_enc = name.decode("utf-8")
if len(name) > len(name_enc):
name = name_enc
# END handle encoding
# byte is NULL, get next 20
i += 1
sha = data[i:i+20]
i = i + 20
out.append((sha, mode, name))
# END for each byte in data stream
return out
def _find_by_name(tree_data, name, is_dir, start_at):
"""return data entry matching the given name and tree mode
or None.
Before the item is returned, the respective data item is set
None in the tree_data list to mark it done"""
try:
item = tree_data[start_at]
if item and item[2] == name and S_ISDIR(item[1]) == is_dir:
tree_data[start_at] = None
return item
except IndexError:
pass
# END exception handling
for index, item in enumerate(tree_data):
if item and item[2] == name and S_ISDIR(item[1]) == is_dir:
tree_data[index] = None
return item
# END if item matches
# END for each item
return None
def _to_full_path(item, path_prefix):
"""Rebuild entry with given path prefix"""
if not item:
return item
return (item[0], item[1], path_prefix+item[2])
def traverse_trees_recursive(odb, tree_shas, path_prefix):
"""
:return: list with entries according to the given binary tree-shas.
The result is encoded in a list
of n tuple|None per blob/commit, (n == len(tree_shas)), where
* [0] == 20 byte sha
* [1] == mode as int
* [2] == path relative to working tree root
The entry tuple is None if the respective blob/commit did not
exist in the given tree.
:param tree_shas: iterable of shas pointing to trees. All trees must
be on the same level. A tree-sha may be None in which case None
:param path_prefix: a prefix to be added to the returned paths on this level,
set it '' for the first iteration
:note: The ordering of the returned items will be partially lost"""
trees_data = list()
nt = len(tree_shas)
for tree_sha in tree_shas:
if tree_sha is None:
data = list()
else:
data = tree_entries_from_data(odb.stream(tree_sha).read())
# END handle muted trees
trees_data.append(data)
# END for each sha to get data for
out = list()
out_append = out.append
# find all matching entries and recursively process them together if the match
# is a tree. If the match is a non-tree item, put it into the result.
# Processed items will be set None
for ti, tree_data in enumerate(trees_data):
for ii, item in enumerate(tree_data):
if not item:
continue
# END skip already done items
entries = [ None for n in range(nt) ]
entries[ti] = item
sha, mode, name = item # its faster to unpack
is_dir = S_ISDIR(mode) # type mode bits
# find this item in all other tree data items
# wrap around, but stop one before our current index, hence
# ti+nt, not ti+1+nt
for tio in range(ti+1, ti+nt):
tio = tio % nt
entries[tio] = _find_by_name(trees_data[tio], name, is_dir, ii)
# END for each other item data
# if we are a directory, enter recursion
if is_dir:
out.extend(traverse_trees_recursive(odb, [((ei and ei[0]) or None) for ei in entries], path_prefix+name+'/'))
else:
out_append(tuple(_to_full_path(e, path_prefix) for e in entries))
# END handle recursion
# finally mark it done
tree_data[ii] = None
# END for each item
# we are done with one tree, set all its data empty
del(tree_data[:])
# END for each tree_data chunk
return out
def traverse_tree_recursive(odb, tree_sha, path_prefix):
"""
:return: list of entries of the tree pointed to by the binary tree_sha. An entry
has the following format:
* [0] 20 byte sha
* [1] mode as int
* [2] path relative to the repository
:param path_prefix: prefix to prepend to the front of all returned paths"""
entries = list()
data = tree_entries_from_data(odb.stream(tree_sha).read())
# unpacking/packing is faster than accessing individual items
for sha, mode, name in data:
if S_ISDIR(mode):
entries.extend(traverse_tree_recursive(odb, sha, path_prefix+name+'/'))
else:
entries.append((sha, mode, path_prefix+name))
# END for each item
return entries
| 37.235 | 125 | 0.587485 |
f73c21ab8ff31d42eefafa43012f748feffc581f | 20,942 | py | Python | acq4/pyqtgraph/widgets/SpinBox.py | tropp/ACQ4 | 792e05e99cedfc175593d200aeabecd6fa6304ce | [
"MIT"
] | null | null | null | acq4/pyqtgraph/widgets/SpinBox.py | tropp/ACQ4 | 792e05e99cedfc175593d200aeabecd6fa6304ce | [
"MIT"
] | null | null | null | acq4/pyqtgraph/widgets/SpinBox.py | tropp/ACQ4 | 792e05e99cedfc175593d200aeabecd6fa6304ce | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from ..Qt import QtGui, QtCore
from ..python2_3 import asUnicode
from ..SignalProxy import SignalProxy
from .. import functions as fn
from math import log
from decimal import Decimal as D ## Use decimal to avoid accumulating floating-point errors
from decimal import *
import weakref
__all__ = ['SpinBox']
class SpinBox(QtGui.QAbstractSpinBox):
"""
**Bases:** QtGui.QAbstractSpinBox
QSpinBox widget on steroids. Allows selection of numerical value, with extra features:
- SI prefix notation (eg, automatically display "300 mV" instead of "0.003 V")
- Float values with linear and decimal stepping (1-9, 10-90, 100-900, etc.)
- Option for unbounded values
- Delayed signals (allows multiple rapid changes with only one change signal)
============================= ==============================================
**Signals:**
valueChanged(value) Same as QSpinBox; emitted every time the value
has changed.
sigValueChanged(self) Emitted when value has changed, but also combines
multiple rapid changes into one signal (eg,
when rolling the mouse wheel).
sigValueChanging(self, value) Emitted immediately for all value changes.
============================= ==============================================
"""
## There's a PyQt bug that leaks a reference to the
## QLineEdit returned from QAbstractSpinBox.lineEdit()
## This makes it possible to crash the entire program
## by making accesses to the LineEdit after the spinBox has been deleted.
## I have no idea how to get around this..
valueChanged = QtCore.Signal(object) # (value) for compatibility with QSpinBox
sigValueChanged = QtCore.Signal(object) # (self)
sigValueChanging = QtCore.Signal(object, object) # (self, value) sent immediately; no delay.
def __init__(self, parent=None, value=0.0, **kwargs):
"""
============== ========================================================================
**Arguments:**
parent Sets the parent widget for this SpinBox (optional). Default is None.
value (float/int) initial value. Default is 0.0.
bounds (min,max) Minimum and maximum values allowed in the SpinBox.
Either may be None to leave the value unbounded. By default, values are unbounded.
suffix (str) suffix (units) to display after the numerical value. By default, suffix is an empty str.
siPrefix (bool) If True, then an SI prefix is automatically prepended
to the units and the value is scaled accordingly. For example,
if value=0.003 and suffix='V', then the SpinBox will display
"300 mV" (but a call to SpinBox.value will still return 0.003). Default is False.
step (float) The size of a single step. This is used when clicking the up/
down arrows, when rolling the mouse wheel, or when pressing
keyboard arrows while the widget has keyboard focus. Note that
the interpretation of this value is different when specifying
the 'dec' argument. Default is 0.01.
dec (bool) If True, then the step value will be adjusted to match
the current size of the variable (for example, a value of 15
might step in increments of 1 whereas a value of 1500 would
step in increments of 100). In this case, the 'step' argument
is interpreted *relative* to the current value. The most common
'step' values when dec=True are 0.1, 0.2, 0.5, and 1.0. Default is False.
minStep (float) When dec=True, this specifies the minimum allowable step size.
int (bool) if True, the value is forced to integer type. Default is False
precision (int) Number of significant digits to display. Default is 3.
============== ========================================================================
"""
QtGui.QAbstractSpinBox.__init__(self, parent)
self.lastValEmitted = None
self.lastText = ''
self.textValid = True ## If false, we draw a red border
self.setMinimumWidth(0)
self.setMaximumHeight(20)
self.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Preferred)
self.opts = {
'bounds': [None, None],
## Log scaling options #### Log mode is no longer supported.
#'step': 0.1,
#'minStep': 0.001,
#'log': True,
#'dec': False,
## decimal scaling option - example
#'step': 0.1,
#'minStep': .001,
#'log': False,
#'dec': True,
## normal arithmetic step
'step': D('0.01'), ## if 'dec' is false, the spinBox steps by 'step' every time
## if 'dec' is True, the step size is relative to the value
## 'step' needs to be an integral divisor of ten, ie 'step'*n=10 for some integer value of n (but only if dec is True)
'log': False,
'dec': False, ## if true, does decimal stepping. ie from 1-10 it steps by 'step', from 10 to 100 it steps by 10*'step', etc.
## if true, minStep must be set in order to cross zero.
'int': False, ## Set True to force value to be integer
'suffix': '',
'siPrefix': False, ## Set to True to display numbers with SI prefix (ie, 100pA instead of 1e-10A)
'delay': 0.3, ## delay sending wheel update signals for 300ms
'delayUntilEditFinished': True, ## do not send signals until text editing has finished
'precision': 3,
## for compatibility with QDoubleSpinBox and QSpinBox
'decimals': None,
}
self.decOpts = ['step', 'minStep']
self.val = D(asUnicode(value)) ## Value is precise decimal. Ordinary math not allowed.
self.updateText()
self.skipValidate = False
self.setCorrectionMode(self.CorrectToPreviousValue)
self.setKeyboardTracking(False)
self.setOpts(**kwargs)
self.editingFinished.connect(self.editingFinishedEvent)
self.proxy = SignalProxy(self.sigValueChanging, slot=self.delayedChange, delay=self.opts['delay'])
def event(self, ev):
ret = QtGui.QAbstractSpinBox.event(self, ev)
if ev.type() == QtCore.QEvent.KeyPress and ev.key() == QtCore.Qt.Key_Return:
ret = True ## For some reason, spinbox pretends to ignore return key press
return ret
##lots of config options, just gonna stuff 'em all in here rather than do the get/set crap.
def setOpts(self, **opts):
"""
Changes the behavior of the SpinBox. Accepts most of the arguments
allowed in :func:`__init__ <pyqtgraph.SpinBox.__init__>`.
"""
#print opts
for k in opts:
if k == 'bounds':
#print opts[k]
self.setMinimum(opts[k][0], update=False)
self.setMaximum(opts[k][1], update=False)
#for i in [0,1]:
#if opts[k][i] is None:
#self.opts[k][i] = None
#else:
#self.opts[k][i] = D(unicode(opts[k][i]))
elif k in ['step', 'minStep']:
self.opts[k] = D(asUnicode(opts[k]))
elif k == 'value':
pass ## don't set value until bounds have been set
else:
self.opts[k] = opts[k]
if 'value' in opts:
self.setValue(opts['value'])
## If bounds have changed, update value to match
if 'bounds' in opts and 'value' not in opts:
self.setValue()
## sanity checks:
if self.opts['int']:
if 'step' in opts:
step = opts['step']
## not necessary..
#if int(step) != step:
#raise Exception('Integer SpinBox must have integer step size.')
else:
self.opts['step'] = int(self.opts['step'])
if 'minStep' in opts:
step = opts['minStep']
if int(step) != step:
raise Exception('Integer SpinBox must have integer minStep size.')
else:
ms = int(self.opts.get('minStep', 1))
if ms < 1:
ms = 1
self.opts['minStep'] = ms
if 'delay' in opts:
self.proxy.setDelay(opts['delay'])
self.updateText()
def setMaximum(self, m, update=True):
"""Set the maximum allowed value (or None for no limit)"""
if m is not None:
m = D(asUnicode(m))
self.opts['bounds'][1] = m
if update:
self.setValue()
def setMinimum(self, m, update=True):
"""Set the minimum allowed value (or None for no limit)"""
if m is not None:
m = D(asUnicode(m))
self.opts['bounds'][0] = m
if update:
self.setValue()
def setPrefix(self, p):
self.setOpts(prefix=p)
def setRange(self, r0, r1):
self.setOpts(bounds = [r0,r1])
def setProperty(self, prop, val):
## for QSpinBox compatibility
if prop == 'value':
#if type(val) is QtCore.QVariant:
#val = val.toDouble()[0]
self.setValue(val)
else:
print("Warning: SpinBox.setProperty('%s', ..) not supported." % prop)
def setSuffix(self, suf):
self.setOpts(suffix=suf)
def setSingleStep(self, step):
self.setOpts(step=step)
def setPrecision(self, p):
"""Set the number of significant digits to display.
"""
self.setOpts(precision=p)
def setDecimals(self, decimals):
# Note: non-functional for now; provided as workaround for uic files that set this property.
self.setOpts(decimals=decimals)
def selectNumber(self):
"""
Select the numerical portion of the text to allow quick editing by the user.
"""
le = self.lineEdit()
text = asUnicode(le.text())
if self.opts['suffix'] == '':
le.setSelection(0, len(text))
else:
try:
index = text.index(' ')
except ValueError:
return
le.setSelection(0, index)
def value(self):
"""
Return the value of this SpinBox.
"""
if self.opts['int']:
return int(self.val)
else:
return float(self.val)
def setValue(self, value=None, update=True, delaySignal=False):
"""
Set the value of this spin.
If the value is out of bounds, it will be clipped to the nearest boundary.
If the spin is integer type, the value will be coerced to int.
Returns the actual value set.
If value is None, then the current value is used (this is for resetting
the value after bounds, etc. have changed)
"""
if value is None:
value = self.value()
bounds = self.opts['bounds']
if bounds[0] is not None and value < bounds[0]:
value = bounds[0]
if bounds[1] is not None and value > bounds[1]:
value = bounds[1]
if self.opts['int']:
value = int(value)
value = D(asUnicode(value))
if value == self.val:
return
prev = self.val
self.val = value
if update:
self.updateText(prev=prev)
self.sigValueChanging.emit(self, float(self.val)) ## change will be emitted in 300ms if there are no subsequent changes.
if not delaySignal:
self.emitChanged()
return value
def emitChanged(self):
self.lastValEmitted = self.val
self.valueChanged.emit(float(self.val))
self.sigValueChanged.emit(self)
def delayedChange(self):
try:
if self.val != self.lastValEmitted:
self.emitChanged()
except RuntimeError:
pass ## This can happen if we try to handle a delayed signal after someone else has already deleted the underlying C++ object.
def widgetGroupInterface(self):
return (self.valueChanged, SpinBox.value, SpinBox.setValue)
def sizeHint(self):
return QtCore.QSize(120, 0)
def stepEnabled(self):
return self.StepUpEnabled | self.StepDownEnabled
#def fixup(self, *args):
#print "fixup:", args
def stepBy(self, n):
n = D(int(n)) ## n must be integral number of steps.
s = [D(-1), D(1)][n >= 0] ## determine sign of step
val = self.val
for i in range(int(abs(n))):
if self.opts['log']:
raise Exception("Log mode no longer supported.")
# step = abs(val) * self.opts['step']
# if 'minStep' in self.opts:
# step = max(step, self.opts['minStep'])
# val += step * s
if self.opts['dec']:
if val == 0:
step = self.opts['minStep']
exp = None
else:
vs = [D(-1), D(1)][val >= 0]
#exp = D(int(abs(val*(D('1.01')**(s*vs))).log10()))
fudge = D('1.01')**(s*vs) ## fudge factor. at some places, the step size depends on the step sign.
exp = abs(val * fudge).log10().quantize(1, ROUND_FLOOR)
step = self.opts['step'] * D(10)**exp
if 'minStep' in self.opts:
step = max(step, self.opts['minStep'])
val += s * step
#print "Exp:", exp, "step", step, "val", val
else:
val += s*self.opts['step']
if 'minStep' in self.opts and abs(val) < self.opts['minStep']:
val = D(0)
self.setValue(val, delaySignal=True) ## note all steps (arrow buttons, wheel, up/down keys..) emit delayed signals only.
def valueInRange(self, value):
bounds = self.opts['bounds']
if bounds[0] is not None and value < bounds[0]:
return False
if bounds[1] is not None and value > bounds[1]:
return False
if self.opts.get('int', False):
if int(value) != value:
return False
return True
def updateText(self, prev=None):
#print "Update text."
self.skipValidate = True
if self.opts['siPrefix']:
if self.val == 0 and prev is not None:
(s, p) = fn.siScale(prev)
txt = "0.0 %s%s" % (p, self.opts['suffix'])
else:
txt = fn.siFormat(float(self.val), suffix=self.opts['suffix'], precision=self.opts['precision'])
else:
txt = '%g%s' % (self.val , self.opts['suffix'])
self.lineEdit().setText(txt)
self.lastText = txt
self.skipValidate = False
def validate(self, strn, pos):
if self.skipValidate:
#print "skip validate"
#self.textValid = False
ret = QtGui.QValidator.Acceptable
else:
try:
## first make sure we didn't mess with the suffix
suff = self.opts.get('suffix', '')
if len(suff) > 0 and asUnicode(strn)[-len(suff):] != suff:
#print '"%s" != "%s"' % (unicode(strn)[-len(suff):], suff)
ret = QtGui.QValidator.Invalid
## next see if we actually have an interpretable value
else:
val = self.interpret()
if val is False:
#print "can't interpret"
#self.setStyleSheet('SpinBox {border: 2px solid #C55;}')
#self.textValid = False
ret = QtGui.QValidator.Intermediate
else:
if self.valueInRange(val):
if not self.opts['delayUntilEditFinished']:
self.setValue(val, update=False)
#print " OK:", self.val
#self.setStyleSheet('')
#self.textValid = True
ret = QtGui.QValidator.Acceptable
else:
ret = QtGui.QValidator.Intermediate
except:
#print " BAD"
#import sys
#sys.excepthook(*sys.exc_info())
#self.textValid = False
#self.setStyleSheet('SpinBox {border: 2px solid #C55;}')
ret = QtGui.QValidator.Intermediate
## draw / clear border
if ret == QtGui.QValidator.Intermediate:
self.textValid = False
elif ret == QtGui.QValidator.Acceptable:
self.textValid = True
## note: if text is invalid, we don't change the textValid flag
## since the text will be forced to its previous state anyway
self.update()
## support 2 different pyqt APIs. Bleh.
if hasattr(QtCore, 'QString'):
return (ret, pos)
else:
return (ret, strn, pos)
def paintEvent(self, ev):
QtGui.QAbstractSpinBox.paintEvent(self, ev)
## draw red border if text is invalid
if not self.textValid:
p = QtGui.QPainter(self)
p.setRenderHint(p.Antialiasing)
p.setPen(fn.mkPen((200,50,50), width=2))
p.drawRoundedRect(self.rect().adjusted(2, 2, -2, -2), 4, 4)
p.end()
def interpret(self):
"""Return value of text. Return False if text is invalid, raise exception if text is intermediate"""
strn = self.lineEdit().text()
suf = self.opts['suffix']
if len(suf) > 0:
if strn[-len(suf):] != suf:
return False
#raise Exception("Units are invalid.")
strn = strn[:-len(suf)]
try:
val = fn.siEval(strn)
except:
#sys.excepthook(*sys.exc_info())
#print "invalid"
return False
#print val
return val
#def interpretText(self, strn=None):
#print "Interpret:", strn
#if strn is None:
#strn = self.lineEdit().text()
#self.setValue(siEval(strn), update=False)
##QtGui.QAbstractSpinBox.interpretText(self)
def editingFinishedEvent(self):
"""Edit has finished; set value."""
#print "Edit finished."
if asUnicode(self.lineEdit().text()) == self.lastText:
#print "no text change."
return
try:
val = self.interpret()
except:
return
if val is False:
#print "value invalid:", str(self.lineEdit().text())
return
if val == self.val:
#print "no value change:", val, self.val
return
self.setValue(val, delaySignal=False) ## allow text update so that values are reformatted pretty-like
#def textChanged(self):
#print "Text changed."
### Drop-in replacement for SpinBox; just for crash-testing
#class SpinBox(QtGui.QDoubleSpinBox):
#valueChanged = QtCore.Signal(object) # (value) for compatibility with QSpinBox
#sigValueChanged = QtCore.Signal(object) # (self)
#sigValueChanging = QtCore.Signal(object) # (value)
#def __init__(self, parent=None, *args, **kargs):
#QtGui.QSpinBox.__init__(self, parent)
#def __getattr__(self, attr):
#return lambda *args, **kargs: None
#def widgetGroupInterface(self):
#return (self.valueChanged, SpinBox.value, SpinBox.setValue)
| 39.813688 | 150 | 0.518671 |
f73c24cd61c6f29f2dd100f7e4af4609b7bb9113 | 17,366 | py | Python | XY_Model_propare_state3_chi64_A0.py | StudentsZhouPengfei/Automatically-Differentiable-Quantum-Circuit-for-Many-qubit-State-Preparation | 42d3a77380e78819375c9fb2c5600ddc89a3ae3f | [
"MIT"
] | 3 | 2021-05-10T01:49:59.000Z | 2021-06-13T19:03:40.000Z | XY_Model_propare_state3_chi64_A0.py | StudentsZhouPengfei/Automatically-Differentiable-Quantum-Circuit-for-Many-qubit-State-Preparation | 42d3a77380e78819375c9fb2c5600ddc89a3ae3f | [
"MIT"
] | null | null | null | XY_Model_propare_state3_chi64_A0.py | StudentsZhouPengfei/Automatically-Differentiable-Quantum-Circuit-for-Many-qubit-State-Preparation | 42d3a77380e78819375c9fb2c5600ddc89a3ae3f | [
"MIT"
] | null | null | null | import torch as tc
import numpy as np
import copy
import os,sys
import Circle_Function_Class_A0 as ev
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('Agg')
from torch.optim.lr_scheduler import StepLR
import BasicFunSJR as bfs
from CNNBTN import Paras_VL_CNN_BTN_Collected1chg1
import BasicFun as bf
tmp = sys.argv[0][sys.argv[0].rfind(os.sep) + 1:] # 返回文件名
mark = tmp[-5]
which_gpu = tmp[-4] # 调用固定
para = Paras_VL_CNN_BTN_Collected1chg1()
para['dataset'] = 'fashion-mnist'
para['device'] = bf.choose_device(which_gpu)
para['log_name'] = './record' + mark + which_gpu
start = tc.cuda.Event(enable_timing=True)
end = tc.cuda.Event(enable_timing=True)
os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE'
os.environ['CUDA_VISIBLE_DEVICE'] = '0'
# tc.manual_seed(7) # 固定随机数,使产生的随机数可以复现
dtype = tc.float32 # float 监控norm
mps_num = 48
lr = 1e-2
it_time = 50
pt_time = 50 # 交错优化所在的次数
dt_print = 10
step_size = it_time * pt_time // 5 # lr学习率递减的间隔epoch
x1_axis = list() # 作图横轴 优化次数
identity_4 = tc.eye(4, dtype=dtype).to(para['device']) # 第二层演化小量变化的单位阵量子门
vol = tc.tensor(1e-3, dtype=dtype).to(para['device']) # 为其小量变化幅度, 对优化次数影响不大
con_vol = tc.tensor(1e-5, dtype=dtype).to(para['device'])
entropy_list = list()
average = tc.tensor(0, dtype=dtype).to(para['device']) # 计算纠缠熵 所用到的初始值
k_bood = 64
file_name = r'./tar_data.npz'
out_file_name = r'./layer_out_data.npz'
Loss_accuracy_range = 0.0001 # 控制Loss精度的范围,达到精度范围自动跳出循环
base_it_time = it_time//3 # 进行优化的最少次数,与分层优化有关
center_position = 24
layer_num = 3 # 控制不同层的门进行优化
gatenum = (mps_num - 1)*layer_num # 控制变分参数量子门的个数
tar_mpslist = list()
ini_state = list()
y_loss_layer = list() # 分层交错进行 每层的精度
y_loss_conda = list() # 协同优化 的精度
read_gatenum = (mps_num - 1)*(layer_num -1)
zero_gatetensor = tc.zeros(gatenum, 4, 4)
conba_gatalist = list()
layer_gatelist = list() # 在后续被reshape成(2, 4, 2)的三阶tensor
layer_gatelist_0 = list() # 将门分层储存
layer_gatelist_1 = list() # 将门分层储存
layer_gatelist_2 = list() # 将门分层储存
layer_gatelist_3 = list() # 将门分层储存
layer_gatelist_4 = list() # 将门分层储存
layer_gatelist_5 = list() # 将门分层储存
layer_optimize = list() # 分层存储优化器
loss_ = list([list([]), list([]), list([]), list([]), list([]), list([]), list([]), list([]), list([]), list([]),
list([]), list([]), list([])])
half_entropy_list = list([]) # 制作热图
half_entropy_list.append(tc.zeros([pt_time+1, mps_num-1])) # 最后一次为目标纠缠熵
number_list = list([0])
print('The quantum circuit is' + str(layer_num))
print('lr=:' + str(lr) + ', k_bood=: ' + str(k_bood) + ', A small amount of vol per unit door is: ' + str(vol))
data = np.load(file_name)
tar_mpslist.append(tc.from_numpy(data['tar_mpslist0']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist1']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist2']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist3']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist4']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist5']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist6']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist7']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist8']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist9']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist10']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist11']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist12']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist13']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist14']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist15']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist16']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist17']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist18']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist19']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist20']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist21']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist22']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist23']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist24']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist25']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist26']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist27']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist28']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist29']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist30']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist31']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist32']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist33']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist34']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist35']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist36']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist37']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist38']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist39']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist40']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist41']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist42']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist43']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist44']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist45']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist46']).to(para['device']))
tar_mpslist.append(tc.from_numpy(data['tar_mpslist47']).to(para['device']))
def fprint(content, file=None, print_screen=True, append=True):
if file is None:
file = './record.log'
if append:
way = 'ab'
else:
way = 'wb'
with open(file, way, buffering=0) as log:
log.write((content + '\n').encode(encoding='utf-8'))
if print_screen:
print(content)
def mps_norm(tar_tensor_): # 对目标量子态进行归一化 log归一化
tv = tc.einsum('asb,asd->bd', tar_tensor_[0].data, tar_tensor_[0].data)
t_norm = tc.norm(tv)
tv = tv / t_norm
tar_tensor_[0] = tar_tensor_[0].data / tc.sqrt(t_norm)
for gt in range(1, mps_num):
if gt < mps_num - 1:
tv = tc.einsum('ac,asb,csd->bd', tv, tar_tensor_[gt].data, tar_tensor_[gt].data)
else:
tv = tc.einsum('ac,asb,csd->bd', tv, tar_tensor_[gt].data, tar_tensor_[gt].data)
norm_t = tc.norm(tv)
tv = tv / norm_t
tar_tensor_[gt] = tar_tensor_[gt] / tc.sqrt(norm_t)
def qr_left_and_right_location(MPS_list, location, vol, feature_num=2): # 对目标MPS进行正交,并求解其纠缠熵
# print('location', location)
for k in range(location):
# print('k', k)
q, r = tc.qr(MPS_list[k].reshape(-1, MPS_list[k].shape[2]))
r = r
MPS_list[k] = q.reshape(-1, feature_num, q.shape[1])
MPS_list[k + 1] = tc.einsum('nl, lmk-> nmk', [r, MPS_list[k + 1]])
for i in range(len(MPS_list) - 1, location, -1):
# print('i', i)
q, r = tc.qr(MPS_list[i].reshape(MPS_list[i].shape[0], -1).t())
q_shape = q.t().shape
MPS_list[i] = q.t().reshape(q_shape[0], feature_num, -1)
r = r
MPS_list[i - 1] = tc.einsum('ldk, nk-> ldn', [MPS_list[i - 1], r])
MPS_list[location] = MPS_list[location]/tc.norm(MPS_list[location])
# u, s, v = tc.svd(MPS_list[location].reshape(-1, MPS_list[location].shape[2]))
u, s, v = tc.svd(MPS_list[location].reshape(MPS_list[location].shape[0], -1))
s = s[s > vol]
y = (-1) * tc.sum(tc.pow(s, 2) * tc.log(tc.pow(s, 2)), dim=0).item()
return y, MPS_list # y 返回纠缠熵 , mps_list返回正交化的目标mps的list()
def half_entropy(out_mps):
for ht in range(1, mps_num):
h_entropy = qr_left_and_right_location(out_mps, ht, 1e-16)[0]
half_entropy_list[0][number_list[0], ht-1] = h_entropy
number_list[0] = number_list[0] + 1
entro_tar = copy.deepcopy(tar_mpslist)
for et in range(1, mps_num):
entropy = qr_left_and_right_location(entro_tar, et, 1e-16)[0]
entropy_list.append(entropy)
for m in range(mps_num - 2):
average_ = entropy_list[m]
average = average + average_
average = average / (mps_num - 1) # 求解平均纠缠熵
center_entropy = qr_left_and_right_location(entro_tar, center_position, 1e-16)[0]
print('平均纠缠熵是:{}'.format(average))
print('正交中心为第' + str(center_position) + '个tensor的MPS纠缠熵是:{}'.format(center_entropy))
for nn in range(mps_num): # 初始真空零态
ini_state.append(tc.tensor([1, 0], dtype=dtype).reshape(1, 2, 1).to(para['device']))
read_memory_gate = bfs.load('read_memory_gate_data', 'gate')
for vt in range(read_gatenum): # 为了分层优化的下一层结果比单层好,随机初始化小量微扰的单位阵
unitary_gate = read_memory_gate[vt].to(para['device'])
unitary_gate.requires_grad = True
layer_gatelist.append(unitary_gate)
for jt in range(gatenum//layer_num):
vol_gate = tc.mul(tc.rand((4, 4), dtype=dtype).to(para['device']), vol)
unitary_gate = tc.add(vol_gate, identity_4)
unitary_gate.requires_grad = True
layer_gatelist.append(unitary_gate)
mps_norm(ini_state) # 对初始量子态进行归一化
# lay_optimize_1 = tc.optim.Adam(layer_gatelist, lr=lr) # 分层优化的量子门参数,在分层优化结束之后进行协同优化
print('分层储存优化器进入list')
for it in range(gatenum): # 将分层优化的loss的list 根据层数区分开
if it < (gatenum//layer_num)*1:
layer_gatelist_0.append(layer_gatelist[it])
else:
if it < (gatenum//layer_num)*2:
layer_gatelist_1.append(layer_gatelist[it])
else:
if it < (gatenum//layer_num)*3:
layer_gatelist_2.append(layer_gatelist[it])
else:
if it < (gatenum//layer_num)*4:
layer_gatelist_3.append(layer_gatelist[it])
else:
if it < (gatenum//layer_num)*5:
layer_gatelist_4.append(layer_gatelist[it])
else:
layer_gatelist_5.append(layer_gatelist[it])
lay_optimize_0 = tc.optim.Adam(layer_gatelist_0, lr=lr) # 分层优化的量子门参数,在分层优化结束之后进行协同优化
lay_optimize_1 = tc.optim.Adam(layer_gatelist_1, lr=lr)
lay_optimize_2 = tc.optim.Adam(layer_gatelist_2, lr=lr)
layer_optimize.append(lay_optimize_0) # 将三层优化器
layer_optimize.append(lay_optimize_1)
layer_optimize.append(lay_optimize_2)
scheduler_0 = StepLR(lay_optimize_0, step_size=step_size, gamma=0.1)
scheduler_1 = StepLR(lay_optimize_1, step_size=step_size, gamma=0.1)
scheduler_2 = StepLR(lay_optimize_2, step_size=step_size, gamma=0.1)
scheduler = list()
scheduler.append(scheduler_0)
scheduler.append(scheduler_1)
scheduler.append(scheduler_2)
evo = ev.Evolve(mps_num, k_bood, 2, gatenum, layer_num)
evo.init_tensor_list(copy.deepcopy(ini_state))
for bt in range(layer_num):
print('初始化第' + str(bt) + '的学习率:', layer_optimize[bt].defaults['lr'])
start.record() # 开始计算模型的运算时间花费
for pt in range(pt_time): # 交错优化所在的次数
fprint('Circle优化位于第' + str(pt) + '次', file=para['log_name'])
for lay_num in range(layer_num):
fprint('Circle优化位于第' + str(lay_num) + '层', file=para['log_name'])
for vt in range(it_time):
for llt in range(lay_num, lay_num + 1): # 先将优化层进行演化,演化完成后将其存进新的list,作为下一层初始
evo.layered_evolve_mps(layer_gatelist, llt)
if vt == it_time - 1:
evo.storage_layer_out_optimization(llt, 0)
for at in range(lay_num + 1, layer_num): # 将不变分的量子门演化进入线路
evo.layered_evolve_mps(layer_gatelist, at)
lay_loss = evo.log_fidelity(tar_mpslist) # 借助了mps跨越指数复杂度的优势
if ((vt + 1) % dt_print) == 0:
if vt == 0:
fprint('block')
else:
fprint('At t = ' + str(vt) + ', loss = ' + str(lay_loss.item()), file=para['log_name'])
loss_[lay_num].append(lay_loss.item())
lay_loss.backward()
layer_optimize[lay_num].step()
layer_optimize[lay_num].zero_grad()
if ((vt + 1) % dt_print) == 0:
fprint("第%d个epoch的学习率:%f" % (vt, layer_optimize[lay_num].param_groups[0]['lr']),
file=para['log_name'])
scheduler[lay_num].step()
tc.cuda.empty_cache() # 删除不必要的变量
if lay_num == layer_num-1:
if vt == it_time - 1:
half_entropy(evo.out_optimization())
if vt == it_time - 1:
evo.read_layer_out_optimization(lay_num, 0)
else:
evo.read_layer_out_optimization(lay_num, 1)
half_entropy(tar_mpslist) # 热图的最后一行为目标态纠缠的信息
bfs.save('.', 'out_memory_half_entropy_data', [half_entropy_list], ['half_entropy'])
for dt in range(gatenum):
zero_gatetensor[dt, :, :] = layer_gatelist[dt].data
bfs.save('.', 'out_memory_gate_data', [zero_gatetensor], ['gate'])
out_layer = evo.out_optimization()
out_layer_numpy = list()
for nt in range(mps_num): # 将目标MPS转存成numpy数组
out_layer_numpy.append(out_layer[nt].numpy())
np.savez(out_file_name,
tar_mpslist0=out_layer_numpy[0], tar_mpslist1=out_layer_numpy[1], tar_mpslist2=out_layer_numpy[2],
tar_mpslist3=out_layer_numpy[3], tar_mpslist4=out_layer_numpy[4], tar_mpslist5=out_layer_numpy[5],
tar_mpslist6=out_layer_numpy[6], tar_mpslist7=out_layer_numpy[7], tar_mpslist8=out_layer_numpy[8],
tar_mpslist9=out_layer_numpy[9],
tar_mpslist10=out_layer_numpy[10], tar_mpslist11=out_layer_numpy[11], tar_mpslist12=out_layer_numpy[12],
tar_mpslist13=out_layer_numpy[13], tar_mpslist14=out_layer_numpy[14], tar_mpslist15=out_layer_numpy[15],
tar_mpslist16=out_layer_numpy[16], tar_mpslist17=out_layer_numpy[17], tar_mpslist18=out_layer_numpy[18],
tar_mpslist19=out_layer_numpy[19],
tar_mpslist20=out_layer_numpy[20], tar_mpslist21=out_layer_numpy[21], tar_mpslist22=out_layer_numpy[22],
tar_mpslist23=out_layer_numpy[23], tar_mpslist24=out_layer_numpy[24], tar_mpslist25=out_layer_numpy[25],
tar_mpslist26=out_layer_numpy[26], tar_mpslist27=out_layer_numpy[27], tar_mpslist28=out_layer_numpy[28],
tar_mpslist29=out_layer_numpy[29],
tar_mpslist30=out_layer_numpy[30], tar_mpslist31=out_layer_numpy[31], tar_mpslist32=out_layer_numpy[32],
tar_mpslist33=out_layer_numpy[33], tar_mpslist34=out_layer_numpy[34], tar_mpslist35=out_layer_numpy[35],
tar_mpslist36=out_layer_numpy[36], tar_mpslist37=out_layer_numpy[37], tar_mpslist38=out_layer_numpy[38],
tar_mpslist39=out_layer_numpy[39],
tar_mpslist40=out_layer_numpy[40], tar_mpslist41=out_layer_numpy[41], tar_mpslist42=out_layer_numpy[42],
tar_mpslist43=out_layer_numpy[43], tar_mpslist44=out_layer_numpy[44], tar_mpslist45=out_layer_numpy[45],
tar_mpslist46=out_layer_numpy[46], tar_mpslist47=out_layer_numpy[47])
for nt in range(mps_num): # 将目标MPS转存成numpy数组
tar_mpslist[nt] = tar_mpslist[nt].cpu().numpy()
end.record() # 截至记录模型花费计算的时间
# Waits for everything to finish running
tc.cuda.synchronize() # 等待当前设备上所有流中的所有核心完成。
print('Runtime: ', start.elapsed_time(end))
for i in range(pt_time*5):
x1_axis.append(i*10)
color_list = list(['deeppink', 'red', 'gold', 'black', 'lime', 'peru', 'purple', 'blue'])
plt.figure(num=1, figsize=(16, 12), dpi=100)
plt.tick_params(labelsize=16)
plt.xlabel("num of optimize", fontsize=20) # x轴上的名字
plt.ylabel("negative-logarithmic fidelities (NLFs) per site", fontsize=20)
plt.grid(axis='x', c='g', linestyle='--', alpha=0.5)
for kt in range(layer_num):
plt.plot(x1_axis, loss_[kt], color=color_list[kt], linewidth=3, label=' Circle layered Optimize' + str(kt))
plt.legend(prop={'family': 'Times New Roman', 'size': 16}, loc='upper right')
plt.savefig('./MPS_Step_3layer_Circle.jpg')
| 47.190217 | 114 | 0.651273 |
f73c36a9e617dd10e21800879e5f188acdb69937 | 4,355 | py | Python | src/POPULARITY_MODULE/popularity_predictor.py | cristinalunaj/WI-IAT20_PopularityModule | 0a4894e2b889bf31ea1a8beab3025d5dd0b1ed47 | [
"MIT"
] | null | null | null | src/POPULARITY_MODULE/popularity_predictor.py | cristinalunaj/WI-IAT20_PopularityModule | 0a4894e2b889bf31ea1a8beab3025d5dd0b1ed47 | [
"MIT"
] | null | null | null | src/POPULARITY_MODULE/popularity_predictor.py | cristinalunaj/WI-IAT20_PopularityModule | 0a4894e2b889bf31ea1a8beab3025d5dd0b1ed47 | [
"MIT"
] | null | null | null | import pandas as pd
import subprocess, os
import src.utils.loader as loader
def create_test_arff(participant, test_df, aux_path):
arff_text = "@relation summary_features \n\n" \
"@attribute n_faces numeric\n" \
"@attribute avg_confidence_faces numeric\n" \
"@attribute std_confidence_faces numeric\n" \
"@attribute avg_relativeSize_faces numeric\n" \
"@attribute std_relativeSize_faces numeric\n" \
"@attribute avg_thirdRule_x numeric\n" \
"@attribute std_thirdRule_x numeric\n" \
"@attribute avg_thirdRule_y numeric\n" \
"@attribute std_thirdRule_y numeric\n" \
"@attribute num_clts numeric\n" \
"@attribute avg_silhouette numeric\n" \
"@attribute avg_intra_clt_dist numeric\n" \
"@attribute avg_inter_clt_dist numeric\n" \
"@attribute faces_in_noise_clt numeric\n" \
"@attribute num_core_samples numeric\n" \
"@attribute avg_imgs_clt numeric\n" \
"@attribute avg_std_silhouette numeric\n" \
"@attribute avg_std_intra_clt_dist numeric\n" \
"@attribute avg_std_inter_clt_dist numeric\n" \
"@attribute avg_n_core_samples numeric\n" \
"@attribute std_n_core_samples numeric\n" \
"@attribute GTrends_popularity numeric\n" \
"@attribute label {1,0}\n\n" \
"@data\n"
data = test_df.loc[test_df["id"]==participant]
data = data.drop(columns="id")
data_str = ""
for ele in data.values[0]:
data_str += str(ele)+","
data_str = data_str[0:-3]
arff_text+=data_str
print(arff_text)
f = open(aux_path, "w")
f.write(arff_text)
def evaluate_test_arff(model_path, test_arff_path, out_path):
"""
Obtain predictions of test_file using the trained model in model_path
:param output_folder:
:param output_name:
:param model_path:
:param test_file:
"""
# PREDICTIONS FILE HEADERS: INSTANCE, ACTUAL, PREDICTED, ERROR
bash_file_path = "../../data/bash_scripts/explorer_test_model.sh "
with open(out_path, 'w') as fi:
fi.close()
command = "".join([bash_file_path, test_arff_path, " ", model_path, " ", out_path])
print(command)
subprocess.call(command, shell=True)
remove_lines(out_path) # remove headers of prediction file
df_participant = pd.read_csv(out_path, header=0, sep=",")
return df_participant
def remove_lines(path_csv):
with open(path_csv, 'r') as fin:
data = fin.read().splitlines(True)
with open(path_csv, 'w') as fout:
fout.writelines(data[4:]) #en 4 las cabeceras
fout.close()
if __name__ == "__main__":
th = "05"
path_model = "../../data/models/popularity_module/CLASIF/th"+th+"/RandomForest.model"
complete_df_ids = "../../data/datasets/popularity_module_features/train/summary_features_participants_classification_th"+th+".csv"
aux_path = "../../data/datasets/popularity_module_features/aux_test.arff"
out_path_prediction = "../../data/datasets/popularity_module_features/aux_prediction.csv"
complete_df = pd.read_csv(complete_df_ids, header=0, sep=",")
bash_test_model = ""
path_participants = "../../data/datasets/DATASET_GOOGLE_IMGS/participants/"
list_participants = loader.load_list_of_tertulianos(path_participants, "participants_complete_rtve2018",".csv")
#list_participants = [participant.replace(" ", "_") for participant in part]
df_popularity = pd.DataFrame([], columns=["prediction", "popular", "id"])
out_path_popularity_df = "../../data/results/popularity_models_output/popularity_df_th"+th+".csv"
for participant in list_participants:
participant = participant.replace("_", " ")
create_test_arff(participant, complete_df, aux_path)
df_participant = evaluate_test_arff(path_model, aux_path, out_path_prediction)
df_popularity = df_popularity.append(pd.DataFrame([[df_participant["predicted"][0].split(":")[-1], df_participant["predicted"][0].split(":")[-1]=="1", participant
]], columns=["prediction", "popular", "id"]))
df_popularity.to_csv(out_path_popularity_df, sep=";", header=True, index=False)
| 45.364583 | 170 | 0.653961 |
f73cd7659f96363d6da4f7c2bb9631d7abb9b9f3 | 5,938 | py | Python | salt/states/cabal.py | rfairburn/salt | 7e44444e873e1cb1d2fe13e39b0edea3779a2b5e | [
"Apache-2.0"
] | 2 | 2015-08-04T21:54:38.000Z | 2019-04-25T21:47:08.000Z | salt/states/cabal.py | rfairburn/salt | 7e44444e873e1cb1d2fe13e39b0edea3779a2b5e | [
"Apache-2.0"
] | 1 | 2015-09-02T12:49:48.000Z | 2015-09-02T19:22:58.000Z | salt/states/cabal.py | rfairburn/salt | 7e44444e873e1cb1d2fe13e39b0edea3779a2b5e | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
'''
Installation of Cabal Packages
==============================
.. versionadded:: 2015.8.0
These states manage the installed packages for Haskell using
cabal. Note that cabal-install must be installed for these states to
be available, so cabal states should include a requisite to a
pkg.installed state for the package which provides cabal
(``cabal-install`` in case of Debian based distributions). Example::
.. code-block:: yaml
cabal-install:
pkg.installed
ShellCheck:
cabal.installed:
- require:
- pkg: cabal-install
'''
from __future__ import absolute_import
from salt.exceptions import CommandExecutionError, CommandNotFoundError
import salt.utils
def __virtual__():
'''
Only work when cabal-install is installed.
'''
return (salt.utils.which('cabal') is not None) and \
(salt.utils.which('ghc-pkg') is not None)
def _parse_pkg_string(pkg):
'''
Parse pkg string and return a tuple of packge name, separator, and
package version.
Cabal support install package with following format:
* foo-1.0
* foo < 1.2
* foo > 1.3
For the sake of simplicity only the first form is supported,
support for other forms can be added later.
'''
pkg_name, separator, pkg_ver = pkg.partition('-')
return (pkg_name.strip(), separator, pkg_ver.strip())
def installed(name,
pkgs=None,
user=None,
install_global=False,
env=None):
'''
Verify that the given package is installed and is at the correct version
(if specified).
.. code-block:: yaml
ShellCheck-0.3.5:
cabal:
- installed:
name
The package to install
user
The user to run cabal install with
install_global
Install package globally instead of locally
env
A list of environment variables to be set prior to execution. The
format is the same as the :py:func:`cmd.run <salt.states.cmd.run>`.
state function.
'''
ret = {'name': name, 'result': None, 'comment': '', 'changes': {}}
try:
call = __salt__['cabal.update']()
except (CommandNotFoundError, CommandExecutionError) as err:
ret['result'] = False
ret['comment'] = 'Could not run cabal update {0}'.format(err)
if pkgs is not None:
pkg_list = pkgs
else:
pkg_list = [name]
try:
installed_pkgs = __salt__['cabal.list'](
user=user, installed=True, env=env)
except (CommandNotFoundError, CommandExecutionError) as err:
ret['result'] = False
ret['comment'] = 'Error looking up {0!r}: {1}'.format(name, err)
pkgs_satisfied = []
pkgs_to_install = []
for pkg in pkg_list:
pkg_name, _, pkg_ver = _parse_pkg_string(pkg)
if pkg_name not in installed_pkgs:
pkgs_to_install.append(pkg)
else:
if pkg_ver: # version is specified
if installed_pkgs[pkg_name] != pkg_ver:
pkgs_to_install.append(pkg)
else:
pkgs_satisfied.append(pkg)
else:
pkgs_satisfied.append(pkg)
if __opts__['test']:
ret['result'] = None
comment_msg = []
if pkgs_to_install:
comment_msg.append(
'Packages(s) {0!r} are set to be installed'.format(
', '.join(pkgs_to_install)))
if pkgs_satisfied:
comment_msg.append(
'Packages(s) {0!r} satisfied by {1}'.format(
', '.join(pkg_list), ', '.join(pkgs_satisfied)))
ret['comment'] = '. '.join(comment_msg)
return ret
if not pkgs_to_install:
ret['result'] = True
ret['comment'] = ('Packages(s) {0!r} satisfied by {1}'.format(
', '.join(pkg_list), ', '.join(pkgs_satisfied)))
return ret
try:
call = __salt__['cabal.install'](pkgs=pkg_list,
user=user,
install_global=install_global,
env=env)
except (CommandNotFoundError, CommandExecutionError) as err:
ret['result'] = False
ret['comment'] = 'Error installing {0!r}: {1}'.format(
', '.join(pkg_list), err)
return ret
if call and isinstance(call, dict):
ret['result'] = True
ret['changes'] = {'old': [], 'new': pkgs_to_install}
ret['comment'] = 'Packages(s) {0!r} successfully installed'.format(
', '.join(pkgs_to_install))
else:
ret['result'] = False
ret['comment'] = 'Could not install packages(s) {0!r}'.format(
', '.join(pkg_list))
return ret
def removed(name,
user=None,
env=None):
'''
Verify that given package is not installed.
'''
ret = {'name': name, 'result': None, 'comment': '', 'changes': {}}
try:
installed_pkgs = __salt__['cabal.list'](
user=user, installed=True, env=env)
except (CommandNotFoundError, CommandExecutionError) as err:
ret['result'] = False
ret['comment'] = 'Error looking up {0!r}: {1}'.format(name, err)
if name not in installed_pkgs:
ret['result'] = True
ret['comment'] = 'Package {0!r} is not installed'.format(name)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Package {0!r} is set to be removed'.format(name)
return ret
if __salt__['cabal.uninstall'](pkg=name, user=user, env=env):
ret['result'] = True
ret['changes'][name] = 'Removed'
ret['comment'] = 'Package {0!r} was successfully removed'.format(name)
else:
ret['result'] = False
ret['comment'] = 'Error removing package {0!r}'.format(name)
return ret
| 28.965854 | 78 | 0.573594 |
f73ce815f021946c17d274a89a2a9a2f8ef1867c | 1,388 | py | Python | setup.py | shx2/apegears | 3fa0408a15df3817fd206d1086d7e49e1b60594c | [
"MIT"
] | 5 | 2020-08-31T20:04:04.000Z | 2022-01-15T17:09:42.000Z | setup.py | shx2/apegears | 3fa0408a15df3817fd206d1086d7e49e1b60594c | [
"MIT"
] | 5 | 2020-10-23T02:41:45.000Z | 2020-11-03T02:27:57.000Z | setup.py | shx2/apegears | 3fa0408a15df3817fd206d1086d7e49e1b60594c | [
"MIT"
] | 1 | 2020-10-23T02:48:08.000Z | 2020-10-23T02:48:08.000Z | #!/usr/bin/env python3
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
# Read version info from apegears/version.py
version_vars = {}
with open(path.join("apegears", "version.py")) as fp:
exec(fp.read(), version_vars)
version_string = version_vars['__version_string__']
setup(
name='apegears',
version=version_string,
description='An improved ArgumentParser, fully compatible with argparse.',
long_description=long_description,
url='https://github.com/shx2/apegears',
author='shx2',
author_email='shx222@gmail.com',
license='MIT',
packages=find_packages(exclude=['tests*']),
platforms = ["POSIX", "Windows"],
install_requires=[],
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Programming Language :: Python :: 3',
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development',
'License :: OSI Approved :: MIT License',
"Operating System :: POSIX",
"Operating System :: Microsoft :: Windows",
],
keywords='CLI argparse ArgumentParser optparse func_argparse',
)
| 30.173913 | 78 | 0.682277 |
f73cf0e3aa3957672cd28f652a1f0461874ddcc9 | 14,332 | py | Python | policy_gradient.py | rey-allan/rl | 6124bcfe5de8a9a316c41fb75b50a3e9babfc970 | [
"MIT"
] | null | null | null | policy_gradient.py | rey-allan/rl | 6124bcfe5de8a9a316c41fb75b50a3e9babfc970 | [
"MIT"
] | null | null | null | policy_gradient.py | rey-allan/rl | 6124bcfe5de8a9a316c41fb75b50a3e9babfc970 | [
"MIT"
] | null | null | null | """Implementation of different policy gradient methods"""
import argparse
import numpy as np
import plot as plt
import random
from collections import namedtuple
from env import Action, Easy21, State
from tqdm import tqdm
from typing import Callable, List
# For reproducibility
random.seed(0)
np.random.seed(0)
Trajectory = namedtuple("Trajectory", ["state", "action", "reward"])
def encode(s: State, a: Action) -> np.ndarray:
"""
Encodes the given state-action pair using coarse coding as specified in the Easy21 assignment:
A binary feature vector rho(s, a) with 3 ∗ 6 ∗ 2 = 36 features. Each binary feature
has a value of 1 iff (s, a) lies within the cuboid of state-space corresponding to
that feature, and the action corresponding to that feature. The cuboids have the
following overlapping intervals:
- dealer(s) = {[1, 4], [4, 7], [7, 10]}
- player(s) = {[1, 6], [4, 9], [7, 12], [10, 15], [13, 18], [16, 21]}
- a = {hit, stick}
:param State s: The state to encode
:param Action a: The action to encode
:return: A binary feature vector representing the encoded state-action pair
:rtype: np.ndarray
"""
# `range` is end-exclusive so we add a 1 to make sure we capture the intervals inclusive ends
dealer = [range(1, 5), range(4, 8), range(7, 11)]
player = [range(1, 7), range(4, 10), range(7, 13), range(10, 16), range(13, 19), range(16, 22)]
encoded = np.zeros((3, 6, 2))
for i, d in enumerate(dealer):
for j, p in enumerate(player):
for k, action in enumerate([Action.hit, Action.stick]):
if s.dealer_first_card in d and s.player_sum in p and a == action:
encoded[i, j, k] = 1
return encoded.flatten()
def softmax(x: np.ndarray) -> np.ndarray:
"""
Computes the softmax of the given array
:param np.ndarray x: The input array
:return: The softmax of each element of the input array
:rtype: np.ndarray
"""
return np.exp(x) / np.sum(np.exp(x))
class REINFORCEWithBaseline:
"""
REINFORCE algorithm with baseline
Uses softmax on linear action preferences for the policy, and
linear approximation for the value function. Feature vectors
are computed using coarse coding as described in the Easy21
assignment.
"""
def __init__(self):
self._env = Easy21(seed=24)
def learn(self, epochs=200, alpha_policy=0.01, alpha_value=0.01, gamma=0.9, verbose=False, **kwargs) -> np.ndarray:
"""
Learns the optimal value function.
:param int epochs: The number of epochs to take to learn the value function
:param float alpha_policy: The learning rate for the policy approximation
:param float alpha_value: The learning rate for the value approximation
:param float gamma: The discount factor
:param bool verbose: Whether to use verbose mode or not
:return: The optimal value function
:rtype: np.ndarray
"""
# Value function
w = np.random.rand(36)
value_approximator = lambda s: [np.dot(w, encode(s, a)) for a in [Action.hit, Action.stick]]
# Policy function
theta = np.random.rand(36)
pi = lambda s, theta: softmax(np.array([np.dot(theta, encode(s, a)) for a in [Action.hit, Action.stick]]))
for _ in tqdm(range(epochs), disable=not verbose):
trajectories = self._sample_episode(pi, theta)
# Reverse the list so we start backpropagating the return from the last episode
trajectories.reverse()
# Learn from the episode
g = 0
for i, t in enumerate(trajectories):
g = t.reward + gamma * g
x = encode(t.state, t.action)
# Baseline
v = np.dot(w, x)
delta = g - v
# SGD update of the value function
w += alpha_value * delta * x
# SGD update of the policy function
probs = pi(t.state, theta)
eligibility_vector = x - np.sum([p * encode(t.state, a) for a, p in enumerate(probs)])
theta += alpha_policy * gamma ** i * delta * eligibility_vector
# Compute the optimal value function which is simply the value of the best action in each state
values = np.zeros(self._env.state_space)
for d in range(self._env.state_space[0]):
for p in range(self._env.state_space[1]):
values[d, p] = np.max(value_approximator(State(d, p)))
return values
def _sample_episode(self, pi: Callable[[State, Action, np.ndarray], float], theta: np.ndarray) -> List[Trajectory]:
# Samples trajectories following policy `pi` with an optional starting state-action pair
trajectories = []
s = self._env.reset()
# The policy selects the action with some constant exploration as in the Easy21 assignment
policy = (
lambda s: random.choice([Action.hit, Action.stick]) if random.random() < 0.05 else np.argmax(pi(s, theta))
)
a = policy(s)
while True:
s_prime, r, done = self._env.step(a)
trajectories.append(Trajectory(s, a, r))
if done:
break
s = s_prime
a = policy(s)
return trajectories
class OneStepActorCritic:
"""
One-step Actor-Critic
Uses softmax on linear action preferences for the policy, and
linear approximation for the value function. Feature vectors
are computed using coarse coding as described in the Easy21
assignment.
"""
def __init__(self):
self._env = Easy21(seed=24)
def learn(self, epochs=200, alpha_policy=0.01, alpha_value=0.01, gamma=0.9, verbose=False, **kwargs) -> np.ndarray:
"""
Learns the optimal value function.
:param int epochs: The number of epochs to take to learn the value function
:param float alpha_policy: The learning rate for the policy approximation
:param float alpha_value: The learning rate for the value approximation
:param float gamma: The discount factor
:param bool verbose: Whether to use verbose mode or not
:return: The optimal value function
:rtype: np.ndarray
"""
# Value function
w = np.random.rand(36)
value_approximator = lambda s: [np.dot(w, encode(s, a)) for a in [Action.hit, Action.stick]]
# Policy function
theta = np.random.rand(36)
pi = lambda s, theta: softmax(np.array([np.dot(theta, encode(s, a)) for a in [Action.hit, Action.stick]]))
# The policy selects the action with some constant exploration as in the Easy21 assignment
policy = (
lambda s: random.choice([Action.hit, Action.stick]) if random.random() < 0.05 else np.argmax(pi(s, theta))
)
for _ in tqdm(range(epochs), disable=not verbose):
I = 1
s = self._env.reset()
done = False
while not done:
a = policy(s)
s_prime, r, done = self._env.step(a)
# Compute the delta
if done:
delta = r - np.dot(w, encode(s, a))
else:
delta = r + gamma * np.max(value_approximator(s_prime)) - np.dot(w, encode(s, a))
# SGD update of the value function
x = encode(s, a)
w += alpha_value * delta * x
# SGD update of the policy function
probs = pi(s, theta)
eligibility_vector = x - np.sum([p * encode(s, a) for a, p in enumerate(probs)])
theta += alpha_policy * I * delta * eligibility_vector
I *= gamma
s = s_prime
# Compute the optimal value function which is simply the value of the best action in each state
values = np.zeros(self._env.state_space)
for d in range(self._env.state_space[0]):
for p in range(self._env.state_space[1]):
values[d, p] = np.max(value_approximator(State(d, p)))
return values
class ActorCriticWithEligibilityTraces:
"""
Actor-Critic with eligibility traces
Uses softmax on linear action preferences for the policy, and
linear approximation for the value function. Feature vectors
are computed using coarse coding as described in the Easy21
assignment.
"""
def __init__(self):
self._env = Easy21(seed=24)
def learn(
self,
epochs=200,
alpha_policy=0.01,
alpha_value=0.01,
gamma=0.9,
lambda_value=1.0,
lambda_policy=1.0,
verbose=False,
**kwargs
) -> np.ndarray:
"""
Learns the optimal value function.
:param int epochs: The number of epochs to take to learn the value function
:param float alpha_policy: The learning rate for the policy approximation
:param float alpha_value: The learning rate for the value approximation
:param float gamma: The discount factor
:param float lambda_value: The trace decay rate for the value approximation
:param float lambda_policy: The trace decay rate for the policy approximation
:param bool verbose: Whether to use verbose mode or not
:return: The optimal value function
:rtype: np.ndarray
"""
# Value function
w = np.random.rand(36)
value_approximator = lambda s: [np.dot(w, encode(s, a)) for a in [Action.hit, Action.stick]]
# Policy function
theta = np.random.rand(36)
pi = lambda s, theta: softmax(np.array([np.dot(theta, encode(s, a)) for a in [Action.hit, Action.stick]]))
# The policy selects the action with some constant exploration as in the Easy21 assignment
policy = (
lambda s: random.choice([Action.hit, Action.stick]) if random.random() < 0.05 else np.argmax(pi(s, theta))
)
for _ in tqdm(range(epochs), disable=not verbose):
I = 1
s = self._env.reset()
done = False
z_w = np.zeros_like(w)
z_theta = np.zeros_like(theta)
while not done:
a = policy(s)
s_prime, r, done = self._env.step(a)
# Compute the delta
if done:
delta = r - np.dot(w, encode(s, a))
else:
delta = r + gamma * np.max(value_approximator(s_prime)) - np.dot(w, encode(s, a))
# SGD update of the value function
x = encode(s, a)
z_w = gamma * lambda_value * z_w + x
w += alpha_value * delta * z_w
# SGD update of the policy function
probs = pi(s, theta)
eligibility_vector = x - np.sum([p * encode(s, a) for a, p in enumerate(probs)])
z_theta = gamma * lambda_policy * z_theta + I * eligibility_vector
theta += alpha_policy * delta * z_theta
I *= gamma
s = s_prime
# Compute the optimal value function which is simply the value of the best action in each state
values = np.zeros(self._env.state_space)
for d in range(self._env.state_space[0]):
for p in range(self._env.state_space[1]):
values[d, p] = np.max(value_approximator(State(d, p)))
return values
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run policy gradient methods")
parser.add_argument("--reinforce-with-baseline", action="store_true", help="Execute REINFORCE with Baseline")
parser.add_argument("--one-step-ac", action="store_true", help="Execute One-step Actor-Critic")
parser.add_argument(
"--ac-eligibility-traces", action="store_true", help="Execute Actor-Critic with eligibility traces"
)
parser.add_argument("--epochs", type=int, default=200, help="Epochs to train")
parser.add_argument(
"--alpha-value", type=float, default=0.01, help="Learning rate to use for the value function approximation"
)
parser.add_argument(
"--alpha-policy", type=float, default=0.01, help="Learning rate to use for the policy function approximation"
)
parser.add_argument(
"--lambda-value", type=float, default=1.0, help="Trace decay rate to use for the value function approximation"
)
parser.add_argument(
"--lambda-policy", type=float, default=1.0, help="Trace decay rate to use for the policy function approximation"
)
parser.add_argument("--gamma", type=float, default=0.9, help="Discount factor")
parser.add_argument("--verbose", action="store_true", help="Run in verbose mode")
args = parser.parse_args()
# The optimal value function obtained
V = None
# The algorithm to run
policy_grad = None
# The title of the plot
title = None
if args.reinforce_with_baseline:
print("Running REINFORCE with Baseline")
policy_grad = REINFORCEWithBaseline()
title = "reinforce_with_baseline"
elif args.one_step_ac:
print("Running One-step Actor-Critic")
policy_grad = OneStepActorCritic()
title = "one_step_actor_critic"
elif args.ac_eligibility_traces:
print("Running Actor-Critic with eligibility traces")
policy_grad = ActorCriticWithEligibilityTraces()
title = "actor_critic_eligibility_traces"
if policy_grad is not None:
V = policy_grad.learn(
epochs=args.epochs,
alpha_value=args.alpha_value,
alpha_policy=args.alpha_policy,
lambda_value=args.lambda_value,
lambda_policy=args.lambda_policy,
gamma=args.gamma,
verbose=args.verbose,
)
if V is not None:
# Plot the value function as a surface
# Remove the state where the dealer's first card is 0 and the player's sum is 0 because these are not possible
# They were kept in the value function to avoid having to deal with 0-index vs 1-index
plt.plot_value_function(range(1, Easy21.state_space[0]), range(1, Easy21.state_space[1]), V[1:, 1:], title)
| 38.840108 | 120 | 0.612824 |
f73d327e2f83cfcb8714c4fa54d314b74ae3fd9f | 432 | py | Python | djangocms_versioning/test_utils/people/models.py | webbyfox/djangocms-versioning | a466ff0f8d109a22ec2f567cace6ef69d332180c | [
"BSD-3-Clause"
] | null | null | null | djangocms_versioning/test_utils/people/models.py | webbyfox/djangocms-versioning | a466ff0f8d109a22ec2f567cace6ef69d332180c | [
"BSD-3-Clause"
] | null | null | null | djangocms_versioning/test_utils/people/models.py | webbyfox/djangocms-versioning | a466ff0f8d109a22ec2f567cace6ef69d332180c | [
"BSD-3-Clause"
] | null | null | null | from django.db import models
class Person(models.Model):
name = models.TextField()
def __str__(self):
return "{} ({})".format(self.name, self.pk)
class PersonContent(models.Model):
person = models.ForeignKey(Person, on_delete=models.CASCADE)
language = models.TextField()
text = models.TextField()
def __str__(self):
return self.text
def get_absolute_url(self):
return '/'
| 20.571429 | 64 | 0.655093 |
f73d453812bcfc25c8bbd44be64d84d895cd81c9 | 76 | py | Python | main.py | sebanie15/simple_clinic | 4dc942b0549ee6397a0e89dd7aa03eb8580b4a5a | [
"MIT"
] | null | null | null | main.py | sebanie15/simple_clinic | 4dc942b0549ee6397a0e89dd7aa03eb8580b4a5a | [
"MIT"
] | null | null | null | main.py | sebanie15/simple_clinic | 4dc942b0549ee6397a0e89dd7aa03eb8580b4a5a | [
"MIT"
] | null | null | null | # !/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: sebanie15
"""
| 10.857143 | 23 | 0.539474 |
f73d56274bb5a4e9fa2a9fe2f5ce429d2af7de69 | 2,511 | py | Python | deeppavlov/core/common/log.py | ineersa/DeepPavlov | 8200bf9a0f0b378baad4ee0eb75b59453f516004 | [
"Apache-2.0"
] | 1 | 2019-05-22T08:34:33.000Z | 2019-05-22T08:34:33.000Z | deeppavlov/core/common/log.py | ineersa/DeepPavlov | 8200bf9a0f0b378baad4ee0eb75b59453f516004 | [
"Apache-2.0"
] | null | null | null | deeppavlov/core/common/log.py | ineersa/DeepPavlov | 8200bf9a0f0b378baad4ee0eb75b59453f516004 | [
"Apache-2.0"
] | 1 | 2019-03-17T13:47:44.000Z | 2019-03-17T13:47:44.000Z | # Copyright 2017 Neural Networks and Deep Learning lab, MIPT
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import logging.config
import sys
from pathlib import Path
from .paths import get_settings_path
LOG_CONFIG_FILENAME = 'log_config.json'
TRACEBACK_LOGGER_ERRORS = True
root_path = Path(__file__).resolve().parents[3]
logging.getLogger('matplotlib').setLevel(logging.WARNING)
def get_logger(logger_name):
try:
log_config_path = get_settings_path() / LOG_CONFIG_FILENAME
with log_config_path.open(encoding='utf8') as log_config_json:
log_config = json.load(log_config_json)
configured_loggers = [log_config.get('root', {})] + log_config.get('loggers', [])
used_handlers = {handler for log in configured_loggers for handler in log.get('handlers', [])}
for handler_id, handler in list(log_config['handlers'].items()):
if handler_id not in used_handlers:
del log_config['handlers'][handler_id]
elif 'filename' in handler.keys():
filename = handler['filename']
logfile_path = Path(filename).expanduser().resolve()
handler['filename'] = str(logfile_path)
logging.config.dictConfig(log_config)
logger = logging.getLogger(logger_name)
except Exception:
logger = logging.getLogger(logger_name)
logger.setLevel(logging.WARNING)
formatter = logging.Formatter(
'%(asctime)s.%(msecs)d %(levelname)s in \'%(name)s\'[\'%(module)s\'] at line %(lineno)d: %(message)s',
'%Y-%m-%d %H:%M:%S')
handler = logging.StreamHandler(sys.stderr)
handler.setFormatter(formatter)
handler.setLevel(logging.WARNING)
logger.addHandler(handler)
logger.error(
'LOGGER ERROR: Can not initialise {} logger, '
'logging to the stderr. Error traceback:\n'.format(logger_name), exc_info=TRACEBACK_LOGGER_ERRORS)
return logger
| 35.366197 | 114 | 0.683791 |
f73d6cd897ef8812b769f1d9383c8e52951c6300 | 1,705 | py | Python | tests/test_utils.py | nhoffman/uwgroups | fab9cb266d2e0c794370c1bdf62b26610dd33aef | [
"MIT",
"Unlicense"
] | 1 | 2018-11-30T00:43:06.000Z | 2018-11-30T00:43:06.000Z | tests/test_utils.py | nhoffman/uwgroups | fab9cb266d2e0c794370c1bdf62b26610dd33aef | [
"MIT",
"Unlicense"
] | 2 | 2018-08-20T17:00:18.000Z | 2018-08-27T17:30:54.000Z | tests/test_utils.py | nhoffman/uwgroups | fab9cb266d2e0c794370c1bdf62b26610dd33aef | [
"MIT",
"Unlicense"
] | 1 | 2018-08-21T15:06:54.000Z | 2018-08-21T15:06:54.000Z | """
Test utils module.
"""
import logging
from uwgroups.utils import reconcile, grouper
from .__init__ import TestBase
log = logging.getLogger(__name__)
class TestReconcile(TestBase):
def test01(self):
to_add, to_remove = reconcile(
current=set(),
desired=set())
self.assertSetEqual(to_add, set())
self.assertSetEqual(to_remove, set())
def test02(self):
to_add, to_remove = reconcile(
current={'a', 'b', 'c'},
desired={'a', 'b', 'c'})
self.assertSetEqual(to_add, set())
self.assertSetEqual(to_remove, set())
def test03(self):
to_add, to_remove = reconcile(
current={'a', 'b', 'c'},
desired={'a', 'b'})
self.assertSetEqual(to_add, set())
self.assertSetEqual(to_remove, {'c'})
def test04(self):
to_add, to_remove = reconcile(
current={'a', 'b'},
desired={'a', 'b', 'c'})
self.assertSetEqual(to_add, {'c'})
self.assertSetEqual(to_remove, set())
def test05(self):
to_add, to_remove = reconcile(
current={'b', 'd'},
desired={'a', 'b', 'c'})
self.assertSetEqual(to_add, {'a', 'c'})
self.assertSetEqual(to_remove, {'d'})
def test06(self):
self.assertRaises(TypeError, reconcile, set(), [])
class TestGrouper(TestBase):
def test01(self):
items = list(range(10))
chunks = list(grouper(items, 4))
self.assertEqual(chunks[-1], (8, 9))
def test02(self):
items = list(range(10))
chunks = list(grouper(items, 4, fill=True))
self.assertEqual(chunks[-1], (8, 9, None, None))
| 25.073529 | 58 | 0.556012 |
f73d8ed23868ba52f022e4f17ca43ad0a39cd922 | 287 | py | Python | hiicart/gateway/amazon/urls.py | kbourgoin/hiicart | 151d64be60ffa5e09b4abc21bf42fd235bf87eea | [
"MIT"
] | null | null | null | hiicart/gateway/amazon/urls.py | kbourgoin/hiicart | 151d64be60ffa5e09b4abc21bf42fd235bf87eea | [
"MIT"
] | 5 | 2020-10-29T01:05:05.000Z | 2020-10-29T01:05:19.000Z | hiicart/gateway/amazon/urls.py | kbourgoin/hiicart | 151d64be60ffa5e09b4abc21bf42fd235bf87eea | [
"MIT"
] | null | null | null | import hiicart.gateway.amazon.views
from django.conf.urls.defaults import *
urlpatterns = patterns('',
(r'cbui/?$', 'hiicart.gateway.amazon.views.cbui'),
(r'ipn/?$', 'hiicart.gateway.amazon.views.ipn'),
)
| 28.7 | 88 | 0.515679 |
f73de0cf78faa821e681ebb950cc71213af5e281 | 1,630 | py | Python | pinax/apps/tasks/tests/test_client.py | peiwei/pinax | 34f95b1df4318655fe9bd90dcda8fe824e0c4117 | [
"MIT"
] | 1 | 2019-02-12T04:45:09.000Z | 2019-02-12T04:45:09.000Z | pinax/apps/tasks/tests/test_client.py | alex/pinax | 37e17ee2e2eb0e387d8809c12e55c20194a7118a | [
"MIT"
] | null | null | null | pinax/apps/tasks/tests/test_client.py | alex/pinax | 37e17ee2e2eb0e387d8809c12e55c20194a7118a | [
"MIT"
] | 1 | 2019-02-12T04:45:40.000Z | 2019-02-12T04:45:40.000Z | # coding: utf-8
from django.test import TestCase
rst_markup = """
Sample Header
===============
Blah blah blah
Lower Header
-------------
Blah blah blah
"""
class TestAddForm(TestCase):
fixtures = ["test_tasks.json"]
urls = "tasks.tests.tasks_urls"
def setUp(self):
self.client.login(username="admin", password="test")
def tearDown(self):
pass
def test_add_buttons(self):
response = self.client.get("/tasks/add/")
# Check that the response is 200 OK.
self.failUnlessEqual(response.status_code, 200)
# check that there is an add button
self.assertContains(response, '<input type="submit" value="Add task"/>')
# check that there is an add another task button
self.assertContains(response, "add-another-task")
def test_markup(self):
# create some sample form data
form_data = {
"summary": "my simple test",
"detail": rst_markup,
"markup": "rst",
"assignee": "",
"tags": ""
}
# post the form
response = self.client.post("/tasks/add/", form_data)
# display the resultant task
response = self.client.get("/tasks/task/3/")
# test the markup
self.assertContains(response, '<h1 class="title">Sample Header</h1>')
def test_tag_for_rel(self):
# checking for tag
response = self.client.get("/tasks/")
self.assertContains(response, '<a rel="tag" href="/tasks/tag/test/">test</a>')
| 24.69697 | 86 | 0.556442 |
f73dfbb3ad628d6d9456e84ab79764156db7292d | 7,508 | py | Python | mtp_noms_ops/apps/settings/views.py | uk-gov-mirror/ministryofjustice.money-to-prisoners-noms-ops | eb537fb8a8e3adc588d50af1b000402c957b32a7 | [
"MIT"
] | 3 | 2016-12-22T15:56:57.000Z | 2020-03-10T10:37:40.000Z | mtp_noms_ops/apps/settings/views.py | uk-gov-mirror/ministryofjustice.money-to-prisoners-noms-ops | eb537fb8a8e3adc588d50af1b000402c957b32a7 | [
"MIT"
] | 61 | 2016-06-10T08:37:23.000Z | 2022-01-28T12:41:29.000Z | mtp_noms_ops/apps/settings/views.py | uk-gov-mirror/ministryofjustice.money-to-prisoners-noms-ops | eb537fb8a8e3adc588d50af1b000402c957b32a7 | [
"MIT"
] | 1 | 2021-04-11T06:13:53.000Z | 2021-04-11T06:13:53.000Z | from urllib.parse import urlencode
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.contrib.auth.views import SuccessURLAllowedHostsMixin
from django.shortcuts import redirect
from django.urls import reverse, reverse_lazy
from django.utils.http import is_safe_url
from django.utils.translation import gettext_lazy as _
from django.views.generic import FormView, TemplateView
from mtp_common.auth.api_client import get_api_session
from mtp_common.views import SettingsView
from security import confirmed_prisons_flag, provided_job_info_flag
from settings.forms import ConfirmPrisonForm, ChangePrisonForm, ALL_PRISONS_CODE, JobInformationForm
from security.models import EmailNotifications
from security.utils import save_user_flags, can_skip_confirming_prisons, has_provided_job_information
class NomsOpsSettingsView(SettingsView):
template_name = 'settings/settings.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
if self.request.can_access_security:
session = get_api_session(self.request)
email_preferences = session.get('/emailpreferences/').json()
context['email_notifications'] = email_preferences['frequency'] != EmailNotifications.never
return context
def post(self, *args, **kwargs):
if self.request.can_access_security and 'email_notifications' in self.request.POST:
session = get_api_session(self.request)
if self.request.POST['email_notifications'] == 'True':
session.post('/emailpreferences/', json={'frequency': EmailNotifications.daily})
else:
session.post('/emailpreferences/', json={'frequency': EmailNotifications.never})
return redirect(reverse_lazy('settings'))
class ConfirmPrisonsView(FormView):
title = _('Confirm your prisons')
template_name = 'settings/confirm-prisons.html'
form_class = ConfirmPrisonForm
success_url = reverse_lazy('confirm_prisons_confirmation')
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['current_prisons'] = ','.join([
p['nomis_id'] for p in self.request.user.user_data['prisons']
] if self.request.user.user_data.get('prisons') else ['ALL'])
selected_prisons = self.request.GET.getlist('prisons')
if not selected_prisons:
selected_prisons = [
p['nomis_id'] for p in self.request.user.user_data['prisons']
]
if not selected_prisons:
selected_prisons = [ALL_PRISONS_CODE]
query_dict = self.request.GET.copy()
query_dict['prisons'] = selected_prisons
context['change_prison_query'] = urlencode(query_dict, doseq=True)
self.request.cannot_navigate_away = not can_skip_confirming_prisons(self.request.user)
return context
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['request'] = self.request
return kwargs
def form_valid(self, form):
form.save()
save_user_flags(self.request, confirmed_prisons_flag)
return redirect(self.get_success_url())
def get_success_url(self):
if 'next' in self.request.GET:
return '{path}?{query}'.format(
path=self.success_url,
query=urlencode({'next': self.request.GET['next']})
)
return self.success_url
class ChangePrisonsView(SuccessURLAllowedHostsMixin, FormView):
title = _('Change prisons')
template_name = 'settings/confirm-prisons-change.html'
form_class = ChangePrisonForm
def get_success_url(self):
"""
Returns the REDIRECT_FIELD_NAME value in GET if it exists and it's valid
or the url to the settings page otherwise.
"""
if REDIRECT_FIELD_NAME in self.request.GET:
next_page = self.request.GET[REDIRECT_FIELD_NAME]
url_is_safe = is_safe_url(
url=next_page,
allowed_hosts=self.get_success_url_allowed_hosts(),
require_https=self.request.is_secure(),
)
if url_is_safe:
return next_page
return reverse('settings')
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['data_attrs'] = {
'data-autocomplete-error-empty': _('Type a prison name'),
'data-autocomplete-error-summary': _('There was a problem'),
'data-event-category': 'PrisonConfirmation',
}
context['current_prisons'] = ','.join([
p['nomis_id'] for p in self.request.user.user_data['prisons']
] if self.request.user.user_data.get('prisons') else ['ALL'])
return context
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['request'] = self.request
return kwargs
def form_valid(self, form):
form.save()
save_user_flags(self.request, confirmed_prisons_flag)
return redirect(self.get_success_url())
class AddOrRemovePrisonsView(ChangePrisonsView):
title = _('Add or remove prisons')
template_name = 'settings/confirm-prisons-change.html'
form_class = ChangePrisonForm
success_url = reverse_lazy('confirm_prisons')
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
self.request.cannot_navigate_away = not can_skip_confirming_prisons(self.request.user)
return context
def form_valid(self, form):
return redirect('{path}?{query}'.format(
path=self.get_success_url(),
query=form.get_confirmation_query_string()
))
class ConfirmPrisonsConfirmationView(TemplateView):
title = _('Your prisons have been saved')
template_name = 'settings/confirm-prisons-confirmation.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['prisons'] = self.request.user_prisons
return context
class JobInformationView(SuccessURLAllowedHostsMixin, FormView):
title = _('Help us improve this service')
template_name = 'settings/job-information.html'
form_class = JobInformationForm
def dispatch(self, request, *args, **kwargs):
request.cannot_navigate_away = True
return super().dispatch(request, *args, **kwargs)
def get_success_url(self):
if REDIRECT_FIELD_NAME in self.request.GET:
next_page = self.request.GET[REDIRECT_FIELD_NAME]
url_is_safe = is_safe_url(
url=next_page,
allowed_hosts=self.get_success_url_allowed_hosts(),
require_https=self.request.is_secure(),
)
if url_is_safe:
return next_page
return reverse('security:dashboard')
def form_valid(self, form):
if has_provided_job_information(self.request.user):
return redirect(self.get_success_url())
session = get_api_session(self.request)
session.post('/job-information/', json={'title': form.cleaned_data['job_title_or_other'],
'prison_estate': form.cleaned_data['prison_estate'],
'tasks': form.cleaned_data['tasks']})
save_user_flags(self.request, provided_job_info_flag)
return super().form_valid(form)
| 39.515789 | 103 | 0.668221 |
f73e12a1c014661754813b808c8c50782a8dc99d | 4,282 | py | Python | colour_datasets/loaders/jakob2019.py | colour-science/colour-datasets | 464c387c17739f08a0cceb5185f6b225872adb6c | [
"BSD-3-Clause"
] | 28 | 2019-06-15T03:07:28.000Z | 2022-03-28T14:11:51.000Z | colour_datasets/loaders/jakob2019.py | JGoldstone/colour-datasets | 8e0b52870c63c0e9b72d8b848720e0c28e0cbfa4 | [
"BSD-3-Clause"
] | 12 | 2020-03-24T17:35:36.000Z | 2021-11-09T08:49:39.000Z | colour_datasets/loaders/jakob2019.py | JGoldstone/colour-datasets | 8e0b52870c63c0e9b72d8b848720e0c28e0cbfa4 | [
"BSD-3-Clause"
] | 8 | 2019-10-27T15:00:52.000Z | 2022-01-26T15:29:38.000Z | # -*- coding: utf-8 -*-
"""
Spectral Upsampling Coefficient Tables - Jakob and Hanika (2019)
================================================================
Defines the objects implementing support for *Jakob and Hanika (2019)*
*Spectral Upsampling Coefficient Tables* dataset loading:
- :class:`colour_datasets.loaders.DatasetLoader_Jakob2019`
- :func:`colour_datasets.loaders.build_Jakob2019`
References
----------
- :cite:`Jakob2019` : Jakob, W., & Hanika, J. (2019). A Low‐Dimensional
Function Space for Efficient Spectral Upsampling. Computer Graphics Forum,
38(2), 147-155. doi:10.1111/cgf.13626
"""
import glob
import os
from collections import OrderedDict
from colour.recovery import LUT3D_Jakob2019
from colour_datasets.loaders import AbstractDatasetLoader
from colour_datasets.records import datasets
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2019-2021 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = 'colour-developers@colour-science.org'
__status__ = 'Production'
__all__ = ['DatasetLoader_Jakob2019', 'build_Jakob2019']
class DatasetLoader_Jakob2019(AbstractDatasetLoader):
"""
Defines the *Jakob and Hanika (2019)*
*Spectral Upsampling Coefficient Tables* dataset loader.
Attributes
----------
- :attr:`colour_datasets.loaders.DatasetLoader_Jakob2019.ID`
Methods
-------
- :meth:`colour_datasets.loaders.DatasetLoader_Jakob2019.__init__`
- :meth:`colour_datasets.loaders.DatasetLoader_Jakob2019.load`
References
----------
:cite:`Jakob2019`
"""
ID = '4050598'
"""
Dataset record id, i.e. the *Zenodo* record number.
ID : unicode
"""
def __init__(self):
super(DatasetLoader_Jakob2019,
self).__init__(datasets()[DatasetLoader_Jakob2019.ID])
def load(self):
"""
Syncs, parses, converts and returns the *Jakob and Hanika (2019)*
*Spectral Upsampling Coefficient Tables* dataset content.
Returns
-------
OrderedDict
*Jakob and Hanika (2019)* *Spectral Upsampling Coefficient Tables*
dataset content.
Examples
--------
>>> from colour_datasets.utilities import suppress_stdout
>>> dataset = DatasetLoader_Jakob2019()
>>> with suppress_stdout():
... dataset.load()
>>> len(dataset.content.keys())
4
"""
super(DatasetLoader_Jakob2019, self).sync()
self._content = OrderedDict()
tables_path = os.path.join(self.record.repository, 'dataset',
'Jakob2019Spectral', 'supplement', 'tables')
coeff_file_to_RGB_colourspace = {
'rec2020': 'ITU-R BT.2020',
'srgb': 'sRGB',
'aces2065_1': 'ACES2065-1',
'prophotorgb': 'ProPhoto RGB',
}
for coeff_file in glob.glob('{0}/*.coeff'.format(tables_path)):
key = os.path.splitext(os.path.basename(coeff_file))[0]
key = coeff_file_to_RGB_colourspace.get(key, key)
LUT = LUT3D_Jakob2019()
LUT.read(coeff_file)
self._content[key] = LUT
return self._content
_DATASET_LOADER_JAKOB2019 = None
"""
Singleton instance of the *Jakob and Hanika (2019)*
*Spectral Upsampling Coefficient Tables* dataset loader.
_DATASET_LOADER_JAKOB2019 : DatasetLoader_Jakob2019
"""
def build_Jakob2019(load=True):
"""
Singleton factory that builds the *Jakob and Hanika (2019)*
*Spectral Upsampling Coefficient Tables* dataset loader.
Parameters
----------
load : bool, optional
Whether to load the dataset upon instantiation.
Returns
-------
DatasetLoader_Jakob2019
Singleton instance of the *Jakob and Hanika (2019)*
*Spectral Upsampling Coefficient Tables* dataset loader.
References
----------
:cite:`Jakob2019`
"""
global _DATASET_LOADER_JAKOB2019
if _DATASET_LOADER_JAKOB2019 is None:
_DATASET_LOADER_JAKOB2019 = DatasetLoader_Jakob2019()
if load:
_DATASET_LOADER_JAKOB2019.load()
return _DATASET_LOADER_JAKOB2019
| 27.986928 | 79 | 0.64596 |
f73e20489d20cf4f5c8057aeb413fc2f1f957f89 | 3,355 | py | Python | setup.py | albert118/jaffle | 55da4d75ad3a9ca633af3865cc35b73e3406a4ef | [
"BSD-3-Clause"
] | null | null | null | setup.py | albert118/jaffle | 55da4d75ad3a9ca633af3865cc35b73e3406a4ef | [
"BSD-3-Clause"
] | null | null | null | setup.py | albert118/jaffle | 55da4d75ad3a9ca633af3865cc35b73e3406a4ef | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# flake8: noqa
import os
from setuptools import find_packages, setup
from jaffle import __version__
long_description = '''
Jaffle is an automation tool for Python software development, which does:
- Instantiate Python applications in a Jupyter kernel and allows them to call
each other
- Launch external processes
- Combine log messages of all Python applications and external processes
enabling filtering and reformatting
Jaffle contains WatchdogApp that can watch filesystem events and call
arbitrary code or command. That allows you to automate testing, reloading
applications, etc.
Examples
========
- `Auto-testing with pytest`_
- `Automatic Sphinx Document Build`_
- `Web Development with Tornado and React`_
- `Jupyter Extension Development`_
.. _`Auto-testing with pytest`: http://jaffle.readthedocs.io/en/latest/cookbook/pytest.html
.. _`Automatic Sphinx Document Build`: http://jaffle.readthedocs.io/en/latest/cookbook/sphinx.html
.. _`Web Development with Tornado and React`: http://jaffle.readthedocs.io/en/latest/cookbook/tornado_spa.html
.. _`Jupyter Extension Development`: http://jaffle.readthedocs.io/en/latest/cookbook/jupyter_ext.html
GitHub Respository
==================
`yatsu/jaffle`_
.. _`yatsu/jaffle`: https://github.com/yatsu/jaffle
Documentation
=============
`Jaffle documentation`_
.. _`Jaffle documentation`: http://jaffle.readthedocs.io
'''.strip()
requirements = [
"filelock>=3.0.0,<4",
"ipython",
"jupyter-client",
"jupyter-console",
"jupyter-core",
"jsonschema>=2.0.0,<3",
"mako>=1.0.0,<2",
"notebook>=5.0.0,<6",
"prompt-toolkit<2",
"pygments",
"pyyaml",
"pyzmq",
"setuptools",
"tornado>=4.5,<5",
"traitlets",
"watchdog>=0.8.0"
]
dev_requirements = [
"flake8>=3.5.0",
"pip",
"pytest>=3.4.0",
"pytest-cov>=2.5.0",
"pytest-tornado>=0.4.0",
"watchdog>=0.8.0"
]
setup(
name='jaffle',
version=__version__,
description='Python app and process orchestration tool for development environment',
long_description=long_description,
author='Jaffle Development Team',
author_email='jaffle@yatsu.info',
url='https://github.com/yatsu/jaffle',
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: MacOS',
'Operating System :: POSIX',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Testing',
'Topic :: System :: Monitoring',
'Topic :: System :: Filesystems',
'Topic :: System :: Shells',
'Topic :: Utilities'
],
keywords='orchestration interactive process test pytest watchdog',
packages=find_packages(),
install_requires=requirements,
extras_require={
'dev': dev_requirements,
'pytest': ['pytest>=3.4.0']
},
include_package_data=True,
entry_points={
'console_scripts': [
'jaffle = jaffle.command:main'
]
}
)
| 27.958333 | 110 | 0.653949 |
f73e2b8f54d3b3fc1bcc24ef1b0f9ce11a9ce9c5 | 2,360 | py | Python | city_scrapers/spiders/det_great_lakes_water_authority.py | noahkconley/city-scrapers | 37420ce3a9295c2aac68c0fb4a957ad41394a801 | [
"MIT"
] | null | null | null | city_scrapers/spiders/det_great_lakes_water_authority.py | noahkconley/city-scrapers | 37420ce3a9295c2aac68c0fb4a957ad41394a801 | [
"MIT"
] | null | null | null | city_scrapers/spiders/det_great_lakes_water_authority.py | noahkconley/city-scrapers | 37420ce3a9295c2aac68c0fb4a957ad41394a801 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import re
import scrapy
from ics import Calendar
from city_scrapers.spider import Spider
class DetGreatLakesWaterAuthoritySpider(Spider):
name = 'det_great_lakes_water_authority'
agency_id = 'Great Lakes Water Authority'
timezone = 'America/Detroit'
allowed_domains = ['www.glwater.org']
start_urls = ['http://www.glwater.org/events/']
def parse(self, response):
"""
`parse` should always `yield` a dict that follows the Event Schema
<https://city-bureau.github.io/city-scrapers/06_event_schema.html>.
Change the `_parse_id`, `_parse_name`, etc methods to fit your scraping
needs.
"""
next_page = response.css('.tribe-events-nav-next')[0].xpath('a/@href').extract_first()
if next_page:
yield scrapy.Request(next_page, callback=self.parse)
yield scrapy.Request(response.url + '?ical=1&tribe_display=month', callback=self._parse_ical)
def _parse_ical(self, ical_event):
cal = Calendar(ical_event.text)
for event in cal.events:
# Meetings parens to indicate status (e.g. (Canceled))
desc = re.search(r'(?P<name>[^()]+)(?P<status>\(([^()]+)\))?', event.name)
data = {
'_type': 'event',
'name': desc.group('name').strip(),
'event_description': event.description,
'classification': self._parse_classification(desc.group('name')),
'start': {'date': event.begin.date(), 'time': event.begin.time(), 'note': ''},
'end': {'date': event.end.date(), 'time': event.end.time(), 'note': ''},
'all_day': event.all_day,
'location': {'name': '', 'address': event.location, 'neighborhood': ''},
'documents': [],
'sources': [{'url': event.url, 'note': ''}]
}
data['id'] = self._generate_id(data)
data['status'] = self._generate_status(data, desc.group(0))
yield data
@staticmethod
def _parse_classification(name):
"""
Parse or generate classification (e.g. public health, education, etc).
"""
if 'BOARD' in name.upper():
return 'Board'
if 'COMMITTEE' in name.upper():
return 'Committee'
return ''
| 38.688525 | 101 | 0.572458 |
f73e2cae7ae81dc17b787e8465ce0f4ee2ea9092 | 9,839 | py | Python | test/dynamics/solvers/test_solver_classes.py | haggaila/qiskit-dynamics | fd20314e2b591c35323782bc429d9f928fdb9a12 | [
"Apache-2.0"
] | 1 | 2022-01-21T01:47:40.000Z | 2022-01-21T01:47:40.000Z | test/dynamics/solvers/test_solver_classes.py | haggaila/qiskit-dynamics | fd20314e2b591c35323782bc429d9f928fdb9a12 | [
"Apache-2.0"
] | null | null | null | test/dynamics/solvers/test_solver_classes.py | haggaila/qiskit-dynamics | fd20314e2b591c35323782bc429d9f928fdb9a12 | [
"Apache-2.0"
] | null | null | null | # This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=invalid-name
"""
Tests for solver classes module.
"""
import numpy as np
from qiskit import QiskitError
from qiskit.quantum_info import Operator, Statevector, SuperOp, DensityMatrix
from qiskit_dynamics import Solver
from qiskit_dynamics.signals import Signal
from ..common import QiskitDynamicsTestCase, TestJaxBase
class TestSolverExceptions(QiskitDynamicsTestCase):
"""Tests for Solver exception raising based on input types."""
def setUp(self):
X = Operator.from_label("X")
self.ham_solver = Solver(hamiltonian_operators=[X], hamiltonian_signals=[1.0])
self.lindblad_solver = Solver(
hamiltonian_operators=[X], hamiltonian_signals=[1.0], dissipator_operators=[X]
)
self.vec_lindblad_solver = Solver(
hamiltonian_operators=[X],
hamiltonian_signals=[1.0],
dissipator_operators=[X],
evaluation_mode="dense_vectorized",
)
def test_hamiltonian_shape_error(self):
"""Test error raising if invalid shape for Hamiltonian model."""
with self.assertRaises(QiskitError) as qe:
self.ham_solver.solve([0.0, 1.0], np.array([1.0, 0.0, 0.0]))
self.assertTrue("Shape mismatch" in str(qe.exception))
with self.assertRaises(QiskitError) as qe:
self.ham_solver.solve([0.0, 1.0], np.array([[[1.0, 0.0, 0.0]]]))
self.assertTrue("Shape mismatch" in str(qe.exception))
with self.assertRaises(QiskitError) as qe:
self.ham_solver.solve([0.0, 1.0], Statevector(np.array([1.0, 0.0, 0.0])))
self.assertTrue("Shape mismatch" in str(qe.exception))
def test_lindblad_shape_error(self):
"""Test error raising if invalid shape for Lindblad model."""
with self.assertRaises(QiskitError) as qe:
self.lindblad_solver.solve([0.0, 1.0], np.array([1.0, 0.0, 0.0]))
self.assertTrue("Shape mismatch" in str(qe.exception))
with self.assertRaises(QiskitError) as qe:
self.lindblad_solver.solve([0.0, 1.0], np.array([[[1.0, 0.0, 0.0]]]))
self.assertTrue("Shape mismatch" in str(qe.exception))
with self.assertRaises(QiskitError) as qe:
self.lindblad_solver.solve([0.0, 1.0], Statevector(np.array([1.0, 0.0, 0.0])))
self.assertTrue("Shape mismatch" in str(qe.exception))
def test_vectorized_lindblad_shape_error(self):
"""Test error raising if invalid shape for vectorized Lindblad model."""
with self.assertRaises(QiskitError) as qe:
self.vec_lindblad_solver.solve([0.0, 1.0], np.array([[1.0, 0.0], [0.0, 1.0]]))
self.assertTrue("Shape mismatch" in str(qe.exception))
with self.assertRaises(QiskitError) as qe:
self.vec_lindblad_solver.solve([0.0, 1.0], DensityMatrix(np.array([1.0, 0.0, 0.0])))
self.assertTrue("Shape mismatch" in str(qe.exception))
with self.assertRaises(QiskitError) as qe:
self.vec_lindblad_solver.solve([0.0, 1.0], Statevector(np.array([1.0, 0.0, 0.0])))
self.assertTrue("Shape mismatch" in str(qe.exception))
def test_non_vectorized_SuperOp_error(self):
"""Test SuperOp simulation attempt for non-vectorized Lindblad model."""
with self.assertRaises(QiskitError) as qe:
self.lindblad_solver.solve([0.0, 1.0], SuperOp(np.eye(4)))
self.assertTrue("Simulating SuperOp" in str(qe.exception))
class TestSolver(QiskitDynamicsTestCase):
"""Tests for Solver class."""
def setUp(self):
"""Set up some simple models."""
X = 2 * np.pi * Operator.from_label("X") / 2
Z = 2 * np.pi * Operator.from_label("Z") / 2
self.ham_solver = Solver(
hamiltonian_operators=[X],
hamiltonian_signals=[Signal(1.0, 5.0)],
drift=5 * Z,
rotating_frame=5 * Z,
)
self.rwa_ham_solver = Solver(
hamiltonian_operators=[X],
hamiltonian_signals=[Signal(1.0, 5.0)],
drift=5 * Z,
rotating_frame=5 * Z,
rwa_cutoff_freq=2 * 5.0,
)
self.lindblad_solver = Solver(
hamiltonian_operators=[X],
hamiltonian_signals=[Signal(1.0, 5.0)],
dissipator_operators=[0.01 * X],
drift=5 * Z,
rotating_frame=5 * Z,
)
self.vec_lindblad_solver = Solver(
hamiltonian_operators=[X],
hamiltonian_signals=[Signal(1.0, 5.0)],
dissipator_operators=[0.01 * X],
drift=5 * Z,
rotating_frame=5 * Z,
evaluation_mode="dense_vectorized",
)
# lindblad solver with no dissipation for testing
self.vec_lindblad_solver_no_diss = Solver(
hamiltonian_operators=[X],
hamiltonian_signals=[Signal(1.0, 5.0)],
dissipator_operators=[0.0 * X],
drift=5 * Z,
rotating_frame=5 * Z,
evaluation_mode="dense_vectorized",
)
self.method = "DOP853"
def test_lindblad_solve_statevector(self):
"""Test correct conversion of Statevector to DensityMatrix."""
results = self.lindblad_solver.solve(
[0.0, 1.0], y0=Statevector([0.0, 1.0]), method=self.method
)
self.assertTrue(isinstance(results.y[-1], DensityMatrix))
self.assertTrue(results.y[-1].data[0, 0] > 0.99 and results.y[-1].data[0, 0] < 0.999)
def test_vec_lindblad_statevector(self):
"""Test correct conversion of Statevector to DensityMatrix and vectorized solving."""
results = self.vec_lindblad_solver.solve(
[0.0, 1.0], y0=Statevector([0.0, 1.0]), method=self.method
)
results2 = self.lindblad_solver.solve(
[0.0, 1.0], y0=Statevector([0.0, 1.0]), method=self.method
)
self.assertTrue(isinstance(results.y[-1], DensityMatrix))
self.assertAllClose(results.y[-1].data, results2.y[-1].data)
def test_array_vectorized_lindblad(self):
"""Test Lindblad solver is array-vectorized."""
results = self.lindblad_solver.solve(
[0.0, 1.0],
y0=np.array([[[0.0, 0.0], [0.0, 1.0]], [[1.0, 0.0], [0.0, 0.0]]]),
method=self.method,
)
self.assertTrue(results.y[-1][0, 0, 0] > 0.99 and results.y[-1][0, 0, 0] < 0.999)
self.assertTrue(results.y[-1][1, 1, 1] > 0.99 and results.y[-1][1, 1, 1] < 0.999)
def test_rwa_hamiltonian(self):
"""Test perfect inversion for pi pulse with RWA."""
results = self.rwa_ham_solver.solve(
[0.0, 1.0], y0=np.array([0.0, 1.0]), atol=1e-10, rtol=1e-10, method=self.method
)
self.assertTrue(np.abs(results.y[-1][0]) > (1 - 1e-8))
def test_hamiltonian_DensityMatrix(self):
"""Test correct conjugation of Hamiltonian-based density matrix simulation."""
results = self.ham_solver.solve(
[0.0, 1.0],
y0=DensityMatrix(np.array([0.0, 1.0])),
atol=1e-10,
rtol=1e-10,
method=self.method,
)
self.assertTrue(isinstance(results.y[-1], DensityMatrix))
self.assertTrue(np.abs(results.y[-1].data[0, 0]) > 0.999)
def test_hamiltonian_SuperOp(self):
"""Test Hamiltonian-based SuperOp simulation."""
results = self.rwa_ham_solver.solve(
[0.0, 1.0], y0=SuperOp(np.eye(4)), atol=1e-10, rtol=1e-10, method=self.method
)
self.assertTrue(isinstance(results.y[-1], SuperOp))
X = np.array([[0.0, 1.0], [1.0, 0.0]])
self.assertAllClose(results.y[-1].data, np.kron(X, X))
def test_hamiltonian_lindblad_SuperOp_consistency(self):
"""Test Hamiltonian-based SuperOp simulation."""
results = self.ham_solver.solve(
[0.0, 0.432], y0=SuperOp(np.eye(4)), atol=1e-10, rtol=1e-10, method=self.method
)
results2 = self.vec_lindblad_solver_no_diss.solve(
[0.0, 0.432], y0=SuperOp(np.eye(4)), atol=1e-10, rtol=1e-10
)
self.assertAllClose(results.y[-1].data, results2.y[-1].data)
class TestSolverJax(TestSolver, TestJaxBase):
"""JAX version of TestSolver."""
def setUp(self):
"""Set method to 'jax_odeint' to speed up running of jax version of tests."""
super().setUp()
self.method = "jax_odeint"
def test_jit_solve(self):
"""Test jitting setting signals and solving."""
def func(a):
ham_solver = self.ham_solver.copy()
ham_solver.signals = [Signal(lambda t: a, 5.0)]
yf = ham_solver.solve(
np.array([0.0, 1.0]), y0=np.array([0.0, 1.0]), method=self.method
).y[-1]
return yf
jit_func = self.jit_wrap(func)
self.assertAllClose(jit_func(2.0), func(2.0))
def test_jit_grad_solve(self):
"""Test jitting setting signals and solving."""
def func(a):
lindblad_solver = self.lindblad_solver.copy()
lindblad_solver.signals = [[Signal(lambda t: a, 5.0)], [1.0]]
yf = lindblad_solver.solve(
[0.0, 1.0], y0=np.array([[0.0, 1.0], [0.0, 1.0]]), method=self.method
).y[-1]
return yf
jit_grad_func = self.jit_grad_wrap(func)
jit_grad_func(1.0)
| 38.584314 | 96 | 0.6087 |
f73e4d16b2cd8fa1026b82af5d125e7099e43365 | 9,695 | py | Python | siteprefs/utils.py | idlesign/django-siteprefs | dbc040b96800a73e35a3d436a5207dd658ce0c58 | [
"BSD-3-Clause"
] | 8 | 2015-01-30T11:57:45.000Z | 2021-11-07T01:21:05.000Z | siteprefs/utils.py | idlesign/django-siteprefs | dbc040b96800a73e35a3d436a5207dd658ce0c58 | [
"BSD-3-Clause"
] | 17 | 2015-03-28T18:26:40.000Z | 2020-06-05T04:35:15.000Z | siteprefs/utils.py | idlesign/django-siteprefs | dbc040b96800a73e35a3d436a5207dd658ce0c58 | [
"BSD-3-Clause"
] | 7 | 2015-03-29T10:06:14.000Z | 2020-05-29T05:22:39.000Z |
import inspect
import os
from collections import OrderedDict
from datetime import datetime
from typing import Any, Callable, Type, Generator, Tuple
from warnings import warn
from django.contrib import admin
from django.db import models
from django.utils.translation import gettext_lazy as _
from etc.toolbox import import_app_module, import_project_modules
from .settings import PREFS_MODULE_NAME
from .signals import prefs_save
class Frame:
"""Represents a frame object at a definite level of hierarchy.
To be used as context manager:
with Frame as f:
...
"""
def __init__(self, stepback: int = 0):
self.depth = stepback
def __enter__(self):
frame = inspect.currentframe().f_back
for __ in range(self.depth):
frame = frame.f_back
self.frame = frame
return self.frame
def __exit__(self, exc_type, exc_val, exc_tb):
del self.frame
class PatchedLocal:
"""Object of this class temporarily replace all module variables
considered preferences.
"""
def __init__(self, key: str, val: Any):
self.key = key
self.val = val
class Mimic:
"""Mimics other types by implementation of various special methods.
This one is deprecated if favor of setting module proxying (proxy_settings_module()).
"""
value: Any = None
def __call__(self, *args, **kwargs):
return self.value
def __str__(self):
return self.value.__str__()
def __bool__(self):
return bool(self.value)
def __int__(self):
return int(self.value)
def __float__(self):
return float(self.value)
def __len__(self):
return self.value.__len__()
def __contains__(self, item):
return self.value.__contains__(item)
def __sub__(self, other):
return self.value.__sub__(other)
def __rsub__(self, other):
return self.value.__rsub__(other)
def __add__(self, other):
return self.value.__add__(other)
def __radd__(self, other):
return self.value.__radd__(other)
def __mul__(self, other):
return self.value.__mul__(other)
def __rmul__(self, other):
return self.value.__rmul__(other)
def __lt__(self, other):
return self.value.__lt__(other)
def __le__(self, other):
return self.value.__le__(other)
def __gt__(self, other):
return self.value.__gt__(other)
def __ge__(self, other):
return self.value.__ge__(other)
def __eq__(self, other):
return self.value.__eq__(other)
def __ne__(self, other):
return self.value.__ne__(other)
class PrefProxy(Mimic):
"""Objects of this class replace app preferences."""
def __init__(
self,
name: str,
default: Any,
category: str = None,
field: models.Field = None,
verbose_name: str = None,
help_text: str = '',
static: bool = True,
readonly: bool = False
):
"""
:param name: Preference name.
:param default: Default (initial) value.
:param category: Category name the preference belongs to.
:param field: Django model field to represent this preference.
:param verbose_name: Field verbose name.
:param help_text: Field help text.
:param static: Leave this preference static (do not store in DB).
:param readonly: Make this field read only.
"""
self.name = name
self.category = category
self.default = default
self.static = static
self.help_text = help_text
if static:
readonly = True
self.readonly = readonly
if verbose_name is None:
verbose_name = name.replace('_', ' ').capitalize()
self.verbose_name = verbose_name
if field is None:
self.field = get_field_for_proxy(self)
else:
self.field = field
update_field_from_proxy(self.field, self)
@property
def value(self) -> Any:
if self.static:
val = self.default
else:
try:
val = getattr(self, 'db_value')
except AttributeError:
val = self.default
return self.field.to_python(val)
def get_value(self) -> Any:
warn('Please use .value instead .get_value().', DeprecationWarning, stacklevel=2)
return self.value
def __repr__(self):
return f'{self.name} = {self.value}'
def get_field_for_proxy(pref_proxy: PrefProxy) -> models.Field:
"""Returns a field object instance for a given PrefProxy object.
:param pref_proxy:
"""
field = {
bool: models.BooleanField,
int: models.IntegerField,
float: models.FloatField,
datetime: models.DateTimeField,
}.get(type(pref_proxy.default), models.TextField)()
update_field_from_proxy(field, pref_proxy)
return field
def update_field_from_proxy(field_obj: models.Field, pref_proxy: PrefProxy):
"""Updates field object with data from a PrefProxy object.
:param field_obj:
:param pref_proxy:
"""
attr_names = ('verbose_name', 'help_text', 'default')
for attr_name in attr_names:
setattr(field_obj, attr_name, getattr(pref_proxy, attr_name))
def get_pref_model_class(app: str, prefs: dict, get_prefs_func: Callable) -> Type[models.Model]:
"""Returns preferences model class dynamically crated for a given app or None on conflict."""
module = f'{app}.{PREFS_MODULE_NAME}'
model_dict = {
'_prefs_app': app,
'_get_prefs': staticmethod(get_prefs_func),
'__module__': module,
'Meta': type('Meta', (models.options.Options,), {
'verbose_name': _('Preference'),
'verbose_name_plural': _('Preferences'),
'app_label': app,
'managed': False,
})
}
for field_name, val_proxy in prefs.items():
model_dict[field_name] = val_proxy.field
model = type('Preferences', (models.Model,), model_dict)
def fake_save_base(self, *args, **kwargs):
updated_prefs = {
f.name: getattr(self, f.name)
for f in self._meta.fields
if not isinstance(f, models.fields.AutoField)
}
app_prefs = self._get_prefs(self._prefs_app)
for pref in app_prefs.keys():
if pref in updated_prefs:
app_prefs[pref].db_value = updated_prefs[pref]
self.pk = self._prefs_app # Make Django 1.7 happy.
prefs_save.send(sender=self, app=self._prefs_app, updated_prefs=updated_prefs)
return True
model.save_base = fake_save_base
return model
def get_pref_model_admin_class(prefs: dict) -> Type[admin.ModelAdmin]:
by_category = OrderedDict()
readonly_fields = []
for field_name, val_proxy in prefs.items():
if val_proxy.readonly:
readonly_fields.append(field_name)
if val_proxy.category not in by_category:
by_category[val_proxy.category] = []
by_category[val_proxy.category].append(field_name)
cl_model_admin_dict = {
'has_add_permission': lambda *args: False,
'has_delete_permission': lambda *args: False
}
if readonly_fields:
cl_model_admin_dict['readonly_fields'] = readonly_fields
fieldsets = []
for category, cat_prefs in by_category.items():
fieldsets.append((category, {'fields': cat_prefs}))
if fieldsets:
cl_model_admin_dict['fieldsets'] = fieldsets
model = type('PreferencesAdmin', (admin.ModelAdmin,), cl_model_admin_dict)
model.changelist_view = lambda self, request, **kwargs: self.change_view(request, '', **kwargs)
model.get_object = lambda self, *args: (
self.model(
**{
field_name: val_proxy.get_value() for field_name, val_proxy in
self.model._get_prefs(self.model._prefs_app).items()
}
)
)
return model
def get_frame_locals(stepback: int = 0) -> dict:
"""Returns locals dictionary from a given frame.
:param stepback:
"""
with Frame(stepback=stepback) as frame:
locals_dict = frame.f_locals
return locals_dict
def traverse_local_prefs(stepback: int = 0) -> Generator[Tuple[str, dict], None, None]:
"""Generator to walk through variables considered as preferences
in locals dict of a given frame.
:param stepback:
"""
locals_dict = get_frame_locals(stepback+1)
for k in locals_dict:
if not k.startswith('_') and k.upper() == k:
yield k, locals_dict
def import_module(package: str, module_name: str):
"""Imports a module from a given package.
:param package:
:param module_name:
"""
import_app_module(package, module_name)
def import_prefs():
"""Imports preferences modules from packages (apps) and project root."""
# settings.py locals if autodiscover_siteprefs() is in urls.py
settings_locals = get_frame_locals(3)
if 'self' not in settings_locals: # If not SiteprefsConfig.ready()
# Try to import project-wide prefs.
project_package = settings_locals['__package__'] # Expected project layout introduced in Django 1.4
if not project_package:
# Fallback to old layout.
project_package = os.path.split(os.path.dirname(settings_locals['__file__']))[-1]
import_module(project_package, PREFS_MODULE_NAME)
import_project_modules(PREFS_MODULE_NAME)
| 25.580475 | 108 | 0.633213 |
f73e5bfa9700cd4b4cdfe40b8976cb6923234da6 | 4,234 | py | Python | simplejson/tests/test_unicode.py | gitdaniel228/realtor | 4366d57b064be87b31c8a036b3ed7a99b2036461 | [
"BSD-3-Clause"
] | 20 | 2015-01-26T01:39:44.000Z | 2020-05-30T19:04:14.000Z | lib/simplejson/tests/test_unicode.py | motord/Motorcycle-Diaries | bb5e5e2d4d79573b4231e760d7662db26c03a55e | [
"BSD-3-Clause"
] | 6 | 2015-02-23T06:47:09.000Z | 2015-06-04T20:31:30.000Z | lib/simplejson/tests/test_unicode.py | motord/Motorcycle-Diaries | bb5e5e2d4d79573b4231e760d7662db26c03a55e | [
"BSD-3-Clause"
] | 13 | 2015-01-26T01:39:45.000Z | 2022-03-09T16:45:09.000Z | from unittest import TestCase
import simplejson as json
class TestUnicode(TestCase):
def test_encoding1(self):
encoder = json.JSONEncoder(encoding='utf-8')
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
s = u.encode('utf-8')
ju = encoder.encode(u)
js = encoder.encode(s)
self.assertEquals(ju, js)
def test_encoding2(self):
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
s = u.encode('utf-8')
ju = json.dumps(u, encoding='utf-8')
js = json.dumps(s, encoding='utf-8')
self.assertEquals(ju, js)
def test_encoding3(self):
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
j = json.dumps(u)
self.assertEquals(j, '"\\u03b1\\u03a9"')
def test_encoding4(self):
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
j = json.dumps([u])
self.assertEquals(j, '["\\u03b1\\u03a9"]')
def test_encoding5(self):
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
j = json.dumps(u, ensure_ascii=False)
self.assertEquals(j, u'"' + u + u'"')
def test_encoding6(self):
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
j = json.dumps([u], ensure_ascii=False)
self.assertEquals(j, u'["' + u + u'"]')
def test_big_unicode_encode(self):
u = u'\U0001d120'
self.assertEquals(json.dumps(u), '"\\ud834\\udd20"')
self.assertEquals(json.dumps(u, ensure_ascii=False), u'"\U0001d120"')
def test_big_unicode_decode(self):
u = u'z\U0001d120x'
self.assertEquals(json.loads('"' + u + '"'), u)
self.assertEquals(json.loads('"z\\ud834\\udd20x"'), u)
def test_unicode_decode(self):
for i in range(0, 0xd7ff):
u = unichr(i)
#s = '"\\u{0:04x}"'.format(i)
s = '"\\u%04x"' % (i,)
self.assertEquals(json.loads(s), u)
def test_object_pairs_hook_with_unicode(self):
s = u'{"xkd":1, "kcw":2, "art":3, "hxm":4, "qrt":5, "pad":6, "hoy":7}'
p = [(u"xkd", 1), (u"kcw", 2), (u"art", 3), (u"hxm", 4),
(u"qrt", 5), (u"pad", 6), (u"hoy", 7)]
self.assertEqual(json.loads(s), eval(s))
self.assertEqual(json.loads(s, object_pairs_hook=lambda x: x), p)
od = json.loads(s, object_pairs_hook=json.OrderedDict)
self.assertEqual(od, json.OrderedDict(p))
self.assertEqual(type(od), json.OrderedDict)
# the object_pairs_hook takes priority over the object_hook
self.assertEqual(json.loads(s,
object_pairs_hook=json.OrderedDict,
object_hook=lambda x: None),
json.OrderedDict(p))
def test_default_encoding(self):
self.assertEquals(json.loads(u'{"a": "\xe9"}'.encode('utf-8')),
{'a': u'\xe9'})
def test_unicode_preservation(self):
self.assertEquals(type(json.loads(u'""')), unicode)
self.assertEquals(type(json.loads(u'"a"')), unicode)
self.assertEquals(type(json.loads(u'["a"]')[0]), unicode)
def test_ensure_ascii_false_returns_unicode(self):
# http://code.google.com/p/simplejson/issues/detail?id=48
self.assertEquals(type(json.dumps([], ensure_ascii=False)), unicode)
self.assertEquals(type(json.dumps(0, ensure_ascii=False)), unicode)
self.assertEquals(type(json.dumps({}, ensure_ascii=False)), unicode)
self.assertEquals(type(json.dumps("", ensure_ascii=False)), unicode)
def test_ensure_ascii_false_bytestring_encoding(self):
# http://code.google.com/p/simplejson/issues/detail?id=48
doc1 = {u'quux': 'Arr\xc3\xaat sur images'}
doc2 = {u'quux': u'Arr\xeat sur images'}
doc_ascii = '{"quux": "Arr\\u00eat sur images"}'
doc_unicode = u'{"quux": "Arr\xeat sur images"}'
self.assertEquals(json.dumps(doc1), doc_ascii)
self.assertEquals(json.dumps(doc2), doc_ascii)
self.assertEquals(json.dumps(doc1, ensure_ascii=False), doc_unicode)
self.assertEquals(json.dumps(doc2, ensure_ascii=False), doc_unicode)
| 42.34 | 78 | 0.600614 |
f73e7006359c26e3644c0d15ff717fc875f2b778 | 88 | py | Python | helpers/__init__.py | r-luo/QA-Summary | 219dbbd306a85f73c126ad73ef7421c5450afbfc | [
"MIT"
] | null | null | null | helpers/__init__.py | r-luo/QA-Summary | 219dbbd306a85f73c126ad73ef7421c5450afbfc | [
"MIT"
] | null | null | null | helpers/__init__.py | r-luo/QA-Summary | 219dbbd306a85f73c126ad73ef7421c5450afbfc | [
"MIT"
] | null | null | null | from . import utils
from . import data_prep
from . import seq2seq
from . import word2vec | 22 | 23 | 0.784091 |
f73e70ef65fd2d764ecf1cc60292802342d9661f | 5,821 | py | Python | NeuralATT/train.py | INK-USC/shifted-label-distribution | 3cf2b7ced3b2e18234db405f6014f049c4830d71 | [
"Apache-2.0"
] | 37 | 2019-10-29T13:12:41.000Z | 2022-01-20T02:42:28.000Z | NeuralATT/train.py | INK-USC/shifted-label-distribution | 3cf2b7ced3b2e18234db405f6014f049c4830d71 | [
"Apache-2.0"
] | 5 | 2020-07-23T10:32:59.000Z | 2021-09-01T11:37:15.000Z | NeuralATT/train.py | INK-USC/shifted-label-distribution | 3cf2b7ced3b2e18234db405f6014f049c4830d71 | [
"Apache-2.0"
] | 2 | 2020-05-27T06:00:56.000Z | 2021-02-08T10:45:41.000Z | '''
Training script with ramdom splitting dev set
'''
__author__ = 'Maosen'
import torch
from model import Model, Wrapper
import utils
from utils import Dataset
import argparse
import pickle
import numpy as np
from tqdm import tqdm
import logging
import os
import random
torch.backends.cudnn.deterministic = True
def train(args):
# Training
logging.info(str(args))
model = Model(args, device, rel2id, emb_matrix)
wrapper = Wrapper(model, args, device, train_dset.rel2id)
max_dev_f1 = 0.0
test_result_on_max_dev_f1 = (0.0, 0.0, 0.0)
for iter in range(niter):
# print('Iteration %d:' % iter)
loss = 0.0
for idx, batch in enumerate(tqdm(train_dset.batched_data)):
scope = train_dset.batched_scope[idx]
loss_batch = wrapper.update(batch, scope)
loss += loss_batch
loss /= len(train_dset.batched_data)
valid_loss, (dev_prec, dev_recall, dev_f1), _, _, _ = wrapper.eval(dev_dset)
logging.info('Iteration %d, Train loss %f' % (iter, loss))
logging.info(
'Dev loss: {:.4f}, P: {:.4f}, R: {:.4f}, F1: {:.4f}'.format(valid_loss, dev_prec, dev_recall,
dev_f1))
test_loss, (test_prec, test_recall, test_f1), _, _, _ = wrapper.eval(test_dset)
logging.info(
'Test loss: {:.4f}, P: {:.4f}, R: {:.4f}, F1: {:.4f}'.format(test_loss, test_prec, test_recall,
test_f1))
if dev_f1 > max_dev_f1:
max_dev_f1 = dev_f1
test_result_on_max_dev_f1 = (test_prec, test_recall, test_f1)
save_filename = os.path.join(args.save_dir, '%s_%d.pkl' % (args.info, runid))
wrapper.save(save_filename, iter)
wrapper.update_lr(valid_loss)
logging.info('Max dev F1: %f' % max_dev_f1)
test_p, test_r, test_f1 = test_result_on_max_dev_f1
logging.info('Test P, R, F1 on best epoch: {:.4f}, {:.4f}, {:.4f}'.format(test_p, test_r, test_f1))
logging.info('\n')
return max_dev_f1, test_result_on_max_dev_f1
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='data/neural_att/KBP')
parser.add_argument('--vocab_dir', type=str, default='data/neural/vocab')
parser.add_argument('--encoder', type=str, default='pcnn', help='Model')
parser.add_argument('--emb_dim', type=int, default=300, help='Word embedding dimension.')
parser.add_argument('--ner_dim', type=int, default=30, help='NER embedding dimension.')
parser.add_argument('--pos_dim', type=int, default=30, help='POS embedding dimension.')
parser.add_argument('--attn_dim', type=int, default=200, help='Attention size.')
parser.add_argument('--position_dim', type=int, default=30, help='Position encoding dimension.')
parser.add_argument('--hidden', type=int, default=230, help='RNN hidden state size.')
parser.add_argument('--window_size', type=int, default=3, help='Convolution window size')
parser.add_argument('--num_layers', type=int, default=2, help='Num of RNN layers.')
parser.add_argument('--bidirectional', dest='bidirectional', action='store_true', help='Bidirectional RNN.')
parser.set_defaults(bidirectional=True)
# Data Loading & Pre-processing
parser.add_argument('--lower', dest='lower', action='store_true', help='Lowercase all words.')
parser.add_argument('--no-lower', dest='lower', action='store_false')
parser.set_defaults(lower=True)
parser.add_argument('--batch_size', type=int, default=64)
# Optimization
parser.add_argument('--lr', type=float, default=1.0, help='Applies to SGD and Adagrad.')
parser.add_argument('--lr_decay', type=float, default=0.9)
parser.add_argument('--num_epoch', type=int, default=30)
parser.add_argument('--max_grad_norm', type=float, default=5.0, help='Gradient clipping.')
# Optimization - Dropout
parser.add_argument('--dropout', type=float, default=0.5, help='Input and RNN dropout rate.')
parser.add_argument('--in_drop', type=float, default=0.5, help='Input dropout rate.')
parser.add_argument('--intra_drop', type=float, default=0.3, help='Intra-layer dropout rate.')
parser.add_argument('--out_drop', type=float, default=0.7, help='Output dropout rate.')
# Other options
parser.add_argument('--seed', type=int, default=7698)
parser.add_argument('--repeat', type=int, default=5)
parser.add_argument('--save_dir', type=str, default='./dumped_models', help='Root dir for saving models.')
parser.add_argument('--info', type=str, default='KBP_default_ATT', help='Optional info for the experiment.')
args = parser.parse_args()
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# Load vocab file (id2word)
with open(args.vocab_dir + '/vocab.pkl', 'rb') as f:
vocab = pickle.load(f)
word2id = {}
for idx, word in enumerate(vocab):
word2id[word] = idx
# Load word embedding
emb_file = args.vocab_dir + '/embedding.npy'
emb_matrix = np.load(emb_file)
assert emb_matrix.shape[0] == len(vocab)
assert emb_matrix.shape[1] == args.emb_dim
args.vocab_size = len(vocab)
niter = args.num_epoch
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print('Using device: %s' % device.type)
print('Reading data......')
rel2id = utils.load_rel2id('%s/relation2id.json' % args.data_dir)
train_filename = '%s/train.json' % args.data_dir
test_filename = '%s/test.json' % args.data_dir
dev_filename = '%s/dev.json' % args.data_dir
train_dset = Dataset(train_filename, args, word2id, device, rel2id=rel2id, shuffle=True, use_bag=True)
test_dset = Dataset(test_filename, args, word2id, device, rel2id=rel2id, use_bag=False)
dev_dset = Dataset(dev_filename, args, word2id, device, rel2id=rel2id, use_bag=False)
if not os.path.isdir(args.save_dir):
os.makedirs(args.save_dir)
for runid in range(1, args.repeat + 1):
logging.info('Run model %d times......' % runid)
dev_f1, test_result = train(args)
logging.info('')
| 38.296053 | 109 | 0.716715 |
f73e7218a3eaf4998f4b32f6977df96bbf9d95ba | 70,207 | py | Python | ThirdParty/Twisted/twisted/internet/test/test_tcp.py | OpenGeoscience/VTK | a373e975b9284a022f43a062ebf5042bb17b4e44 | [
"BSD-3-Clause"
] | 1 | 2021-10-13T01:57:14.000Z | 2021-10-13T01:57:14.000Z | ThirdParty/Twisted/twisted/internet/test/test_tcp.py | OpenGeoscience/VTK | a373e975b9284a022f43a062ebf5042bb17b4e44 | [
"BSD-3-Clause"
] | null | null | null | ThirdParty/Twisted/twisted/internet/test/test_tcp.py | OpenGeoscience/VTK | a373e975b9284a022f43a062ebf5042bb17b4e44 | [
"BSD-3-Clause"
] | 5 | 2015-10-09T04:12:29.000Z | 2021-12-15T16:57:11.000Z | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for implementations of L{IReactorTCP} and the TCP parts of
L{IReactorSocket}.
"""
from __future__ import division, absolute_import
__metaclass__ = type
import socket, errno
from zope.interface import implementer
from twisted.python.compat import _PY3
from twisted.python.runtime import platform
from twisted.python.failure import Failure
from twisted.python import log
from twisted.trial.unittest import SkipTest, TestCase
from twisted.internet.test.reactormixins import ReactorBuilder
from twisted.internet.error import (
ConnectionLost, UserError, ConnectionRefusedError, ConnectionDone,
ConnectionAborted)
from twisted.internet.interfaces import (
ILoggingContext, IConnector, IReactorFDSet, IReactorSocket, IReactorTCP)
from twisted.internet.address import IPv4Address, IPv6Address
from twisted.internet.defer import (
Deferred, DeferredList, maybeDeferred, gatherResults)
from twisted.internet._endpointspy3 import (
TCP4ServerEndpoint, TCP4ClientEndpoint)
from twisted.internet.protocol import ServerFactory, ClientFactory, Protocol
from twisted.internet.interfaces import (
IPushProducer, IPullProducer, IHalfCloseableProtocol)
from twisted.internet.tcp import Connection, Server, _resolveIPv6
from twisted.internet.test.connectionmixins import (
LogObserverMixin, ConnectionTestsMixin, TCPClientTestsMixin, findFreePort,
ConnectableProtocol, EndpointCreator, runProtocolsWithReactor)
from twisted.internet.test.test_core import ObjectModelIntegrationMixin
from twisted.test.test_tcp import MyClientFactory, MyServerFactory
from twisted.test.test_tcp import ClosingFactory, ClientStartStopFactory
try:
from OpenSSL import SSL
except ImportError:
useSSL = False
else:
from twisted.internet.ssl import ClientContextFactory
useSSL = True
try:
socket.socket(socket.AF_INET6, socket.SOCK_STREAM).close()
except socket.error as e:
ipv6Skip = str(e)
else:
ipv6Skip = None
if platform.isWindows():
from twisted.internet.test import _win32ifaces
getLinkLocalIPv6Addresses = _win32ifaces.win32GetLinkLocalIPv6Addresses
else:
try:
from twisted.internet.test import _posixifaces
except ImportError:
getLinkLocalIPv6Addresses = lambda: []
else:
getLinkLocalIPv6Addresses = _posixifaces.posixGetLinkLocalIPv6Addresses
def getLinkLocalIPv6Address():
"""
Find and return a configured link local IPv6 address including a scope
identifier using the % separation syntax. If the system has no link local
IPv6 addresses, raise L{SkipTest} instead.
@raise SkipTest: if no link local address can be found or if the
C{netifaces} module is not available.
@return: a C{str} giving the address
"""
addresses = getLinkLocalIPv6Addresses()
if addresses:
return addresses[0]
raise SkipTest("Link local IPv6 address unavailable")
def connect(client, destination):
"""
Connect a socket to the given destination.
@param client: A C{socket.socket}.
@param destination: A tuple of (host, port). The host is a C{str}, the
port a C{int}. If the C{host} is an IPv6 IP, the address is resolved
using C{getaddrinfo} and the first version found is used.
"""
(host, port) = destination
if '%' in host or ':' in host:
address = socket.getaddrinfo(host, port)[0][4]
else:
address = (host, port)
client.connect(address)
class FakeSocket(object):
"""
A fake for L{socket.socket} objects.
@ivar data: A C{str} giving the data which will be returned from
L{FakeSocket.recv}.
@ivar sendBuffer: A C{list} of the objects passed to L{FakeSocket.send}.
"""
def __init__(self, data):
self.data = data
self.sendBuffer = []
def setblocking(self, blocking):
self.blocking = blocking
def recv(self, size):
return self.data
def send(self, bytes):
"""
I{Send} all of C{bytes} by accumulating it into C{self.sendBuffer}.
@return: The length of C{bytes}, indicating all the data has been
accepted.
"""
self.sendBuffer.append(bytes)
return len(bytes)
def shutdown(self, how):
"""
Shutdown is not implemented. The method is provided since real sockets
have it and some code expects it. No behavior of L{FakeSocket} is
affected by a call to it.
"""
def close(self):
"""
Close is not implemented. The method is provided since real sockets
have it and some code expects it. No behavior of L{FakeSocket} is
affected by a call to it.
"""
def setsockopt(self, *args):
"""
Setsockopt is not implemented. The method is provided since
real sockets have it and some code expects it. No behavior of
L{FakeSocket} is affected by a call to it.
"""
def fileno(self):
"""
Return a fake file descriptor. If actually used, this will have no
connection to this L{FakeSocket} and will probably cause surprising
results.
"""
return 1
class TestFakeSocket(TestCase):
"""
Test that the FakeSocket can be used by the doRead method of L{Connection}
"""
def test_blocking(self):
skt = FakeSocket(b"someData")
skt.setblocking(0)
self.assertEqual(skt.blocking, 0)
def test_recv(self):
skt = FakeSocket(b"someData")
self.assertEqual(skt.recv(10), b"someData")
def test_send(self):
"""
L{FakeSocket.send} accepts the entire string passed to it, adds it to
its send buffer, and returns its length.
"""
skt = FakeSocket(b"")
count = skt.send(b"foo")
self.assertEqual(count, 3)
self.assertEqual(skt.sendBuffer, [b"foo"])
class FakeProtocol(Protocol):
"""
An L{IProtocol} that returns a value from its dataReceived method.
"""
def dataReceived(self, data):
"""
Return something other than C{None} to trigger a deprecation warning for
that behavior.
"""
return ()
@implementer(IReactorFDSet)
class _FakeFDSetReactor(object):
"""
A no-op implementation of L{IReactorFDSet}, which ignores all adds and
removes.
"""
addReader = addWriter = removeReader = removeWriter = (
lambda self, desc: None)
class TCPServerTests(TestCase):
"""
Whitebox tests for L{twisted.internet.tcp.Server}.
"""
def setUp(self):
self.reactor = _FakeFDSetReactor()
class FakePort(object):
_realPortNumber = 3
self.skt = FakeSocket(b"")
self.protocol = Protocol()
self.server = Server(
self.skt, self.protocol, ("", 0), FakePort(), None, self.reactor)
def test_writeAfterDisconnect(self):
"""
L{Server.write} discards bytes passed to it if called after it has lost
its connection.
"""
self.server.connectionLost(
Failure(Exception("Simulated lost connection")))
self.server.write(b"hello world")
self.assertEqual(self.skt.sendBuffer, [])
def test_writeAfteDisconnectAfterTLS(self):
"""
L{Server.write} discards bytes passed to it if called after it has lost
its connection when the connection had started TLS.
"""
self.server.TLS = True
self.test_writeAfterDisconnect()
def test_writeSequenceAfterDisconnect(self):
"""
L{Server.writeSequence} discards bytes passed to it if called after it
has lost its connection.
"""
self.server.connectionLost(
Failure(Exception("Simulated lost connection")))
self.server.writeSequence([b"hello world"])
self.assertEqual(self.skt.sendBuffer, [])
def test_writeSequenceAfteDisconnectAfterTLS(self):
"""
L{Server.writeSequence} discards bytes passed to it if called after it
has lost its connection when the connection had started TLS.
"""
self.server.TLS = True
self.test_writeSequenceAfterDisconnect()
class TCPConnectionTests(TestCase):
"""
Whitebox tests for L{twisted.internet.tcp.Connection}.
"""
def test_doReadWarningIsRaised(self):
"""
When an L{IProtocol} implementation that returns a value from its
C{dataReceived} method, a deprecated warning is emitted.
"""
skt = FakeSocket(b"someData")
protocol = FakeProtocol()
conn = Connection(skt, protocol)
conn.doRead()
warnings = self.flushWarnings([FakeProtocol.dataReceived])
self.assertEqual(warnings[0]['category'], DeprecationWarning)
self.assertEqual(
warnings[0]["message"],
"Returning a value other than None from "
"twisted.internet.test.test_tcp.FakeProtocol.dataReceived "
"is deprecated since Twisted 11.0.0.")
self.assertEqual(len(warnings), 1)
def test_noTLSBeforeStartTLS(self):
"""
The C{TLS} attribute of a L{Connection} instance is C{False} before
L{Connection.startTLS} is called.
"""
skt = FakeSocket(b"")
protocol = FakeProtocol()
conn = Connection(skt, protocol)
self.assertFalse(conn.TLS)
def test_tlsAfterStartTLS(self):
"""
The C{TLS} attribute of a L{Connection} instance is C{True} after
L{Connection.startTLS} is called.
"""
skt = FakeSocket(b"")
protocol = FakeProtocol()
conn = Connection(skt, protocol, reactor=_FakeFDSetReactor())
conn._tlsClientDefault = True
conn.startTLS(ClientContextFactory(), True)
self.assertTrue(conn.TLS)
if not useSSL:
test_tlsAfterStartTLS.skip = "No SSL support available"
class TCPCreator(EndpointCreator):
"""
Create IPv4 TCP endpoints for L{runProtocolsWithReactor}-based tests.
"""
interface = "127.0.0.1"
def server(self, reactor):
"""
Create a server-side TCP endpoint.
"""
return TCP4ServerEndpoint(reactor, 0, interface=self.interface)
def client(self, reactor, serverAddress):
"""
Create a client end point that will connect to the given address.
@type serverAddress: L{IPv4Address}
"""
return TCP4ClientEndpoint(reactor, self.interface, serverAddress.port)
class TCP6Creator(TCPCreator):
"""
Create IPv6 TCP endpoints for
C{ReactorBuilder.runProtocolsWithReactor}-based tests.
The endpoint types in question here are still the TCP4 variety, since
these simply pass through IPv6 address literals to the reactor, and we are
only testing address literals, not name resolution (as name resolution has
not yet been implemented). See http://twistedmatrix.com/trac/ticket/4470
for more specific information about new endpoint classes. The naming is
slightly misleading, but presumably if you're passing an IPv6 literal, you
know what you're asking for.
"""
def __init__(self):
self.interface = getLinkLocalIPv6Address()
class TCPClientTestsBase(ReactorBuilder, ConnectionTestsMixin,
TCPClientTestsMixin):
"""
Base class for builders defining tests related to L{IReactorTCP.connectTCP}.
"""
requiredInterfaces = (IReactorTCP,)
port = 1234
@property
def interface(self):
"""
Return the interface attribute from the endpoints object.
"""
return self.endpoints.interface
class TCP4ClientTestsBuilder(TCPClientTestsBase):
"""
Builder configured with IPv4 parameters for tests related to
L{IReactorTCP.connectTCP}.
"""
fakeDomainName = 'some-fake.domain.example.com'
family = socket.AF_INET
addressClass = IPv4Address
endpoints = TCPCreator()
class TCP6ClientTestsBuilder(TCPClientTestsBase):
"""
Builder configured with IPv6 parameters for tests related to
L{IReactorTCP.connectTCP}.
"""
if ipv6Skip:
skip = ipv6Skip
family = socket.AF_INET6
addressClass = IPv6Address
def setUp(self):
# Only create this object here, so that it won't be created if tests
# are being skipped:
self.endpoints = TCP6Creator()
# This is used by test_addresses to test the distinction between the
# resolved name and the name on the socket itself. All the same
# invariants should hold, but giving back an IPv6 address from a
# resolver is not something the reactor can handle, so instead, we make
# it so that the connect call for the IPv6 address test simply uses an
# address literal.
self.fakeDomainName = self.endpoints.interface
class TCPConnectorTestsBuilder(ReactorBuilder):
"""
Tests for the L{IConnector} provider returned by L{IReactorTCP.connectTCP}.
"""
requiredInterfaces = (IReactorTCP,)
def test_connectorIdentity(self):
"""
L{IReactorTCP.connectTCP} returns an object which provides
L{IConnector}. The destination of the connector is the address which
was passed to C{connectTCP}. The same connector object is passed to
the factory's C{startedConnecting} method as to the factory's
C{clientConnectionLost} method.
"""
serverFactory = ClosingFactory()
reactor = self.buildReactor()
tcpPort = reactor.listenTCP(0, serverFactory, interface=self.interface)
serverFactory.port = tcpPort
portNumber = tcpPort.getHost().port
seenConnectors = []
seenFailures = []
clientFactory = ClientStartStopFactory()
clientFactory.clientConnectionLost = (
lambda connector, reason: (seenConnectors.append(connector),
seenFailures.append(reason)))
clientFactory.startedConnecting = seenConnectors.append
connector = reactor.connectTCP(self.interface, portNumber,
clientFactory)
self.assertTrue(IConnector.providedBy(connector))
dest = connector.getDestination()
self.assertEqual(dest.type, "TCP")
self.assertEqual(dest.host, self.interface)
self.assertEqual(dest.port, portNumber)
clientFactory.whenStopped.addBoth(lambda _: reactor.stop())
self.runReactor(reactor)
seenFailures[0].trap(ConnectionDone)
self.assertEqual(seenConnectors, [connector, connector])
def test_userFail(self):
"""
Calling L{IConnector.stopConnecting} in C{Factory.startedConnecting}
results in C{Factory.clientConnectionFailed} being called with
L{error.UserError} as the reason.
"""
serverFactory = MyServerFactory()
reactor = self.buildReactor()
tcpPort = reactor.listenTCP(0, serverFactory, interface=self.interface)
portNumber = tcpPort.getHost().port
fatalErrors = []
def startedConnecting(connector):
try:
connector.stopConnecting()
except Exception:
fatalErrors.append(Failure())
reactor.stop()
clientFactory = ClientStartStopFactory()
clientFactory.startedConnecting = startedConnecting
clientFactory.whenStopped.addBoth(lambda _: reactor.stop())
reactor.callWhenRunning(lambda: reactor.connectTCP(self.interface,
portNumber,
clientFactory))
self.runReactor(reactor)
if fatalErrors:
self.fail(fatalErrors[0].getTraceback())
clientFactory.reason.trap(UserError)
self.assertEqual(clientFactory.failed, 1)
def test_reconnect(self):
"""
Calling L{IConnector.connect} in C{Factory.clientConnectionLost} causes
a new connection attempt to be made.
"""
serverFactory = ClosingFactory()
reactor = self.buildReactor()
tcpPort = reactor.listenTCP(0, serverFactory, interface=self.interface)
serverFactory.port = tcpPort
portNumber = tcpPort.getHost().port
clientFactory = MyClientFactory()
def clientConnectionLost(connector, reason):
connector.connect()
clientFactory.clientConnectionLost = clientConnectionLost
reactor.connectTCP(self.interface, portNumber, clientFactory)
protocolMadeAndClosed = []
def reconnectFailed(ignored):
p = clientFactory.protocol
protocolMadeAndClosed.append((p.made, p.closed))
reactor.stop()
clientFactory.failDeferred.addCallback(reconnectFailed)
self.runReactor(reactor)
clientFactory.reason.trap(ConnectionRefusedError)
self.assertEqual(protocolMadeAndClosed, [(1, 1)])
class TCP4ConnectorTestsBuilder(TCPConnectorTestsBuilder):
interface = '127.0.0.1'
family = socket.AF_INET
addressClass = IPv4Address
class TCP6ConnectorTestsBuilder(TCPConnectorTestsBuilder):
family = socket.AF_INET6
addressClass = IPv6Address
if ipv6Skip:
skip = ipv6Skip
def setUp(self):
self.interface = getLinkLocalIPv6Address()
def createTestSocket(test, addressFamily, socketType):
"""
Create a socket for the duration of the given test.
@param test: the test to add cleanup to.
@param addressFamily: an C{AF_*} constant
@param socketType: a C{SOCK_*} constant.
@return: a socket object.
"""
skt = socket.socket(addressFamily, socketType)
test.addCleanup(skt.close)
return skt
class StreamTransportTestsMixin(LogObserverMixin):
"""
Mixin defining tests which apply to any port/connection based transport.
"""
def test_startedListeningLogMessage(self):
"""
When a port starts, a message including a description of the associated
factory is logged.
"""
loggedMessages = self.observe()
reactor = self.buildReactor()
@implementer(ILoggingContext)
class SomeFactory(ServerFactory):
def logPrefix(self):
return "Crazy Factory"
factory = SomeFactory()
p = self.getListeningPort(reactor, factory)
expectedMessage = self.getExpectedStartListeningLogMessage(
p, "Crazy Factory")
self.assertEqual((expectedMessage,), loggedMessages[0]['message'])
def test_connectionLostLogMsg(self):
"""
When a connection is lost, an informative message should be logged
(see L{getExpectedConnectionLostLogMsg}): an address identifying
the port and the fact that it was closed.
"""
loggedMessages = []
def logConnectionLostMsg(eventDict):
loggedMessages.append(log.textFromEventDict(eventDict))
reactor = self.buildReactor()
p = self.getListeningPort(reactor, ServerFactory())
expectedMessage = self.getExpectedConnectionLostLogMsg(p)
log.addObserver(logConnectionLostMsg)
def stopReactor(ignored):
log.removeObserver(logConnectionLostMsg)
reactor.stop()
def doStopListening():
log.addObserver(logConnectionLostMsg)
maybeDeferred(p.stopListening).addCallback(stopReactor)
reactor.callWhenRunning(doStopListening)
reactor.run()
self.assertIn(expectedMessage, loggedMessages)
def test_allNewStyle(self):
"""
The L{IListeningPort} object is an instance of a class with no
classic classes in its hierarchy.
"""
reactor = self.buildReactor()
port = self.getListeningPort(reactor, ServerFactory())
self.assertFullyNewStyle(port)
class ListenTCPMixin(object):
"""
Mixin which uses L{IReactorTCP.listenTCP} to hand out listening TCP ports.
"""
def getListeningPort(self, reactor, factory, port=0, interface=''):
"""
Get a TCP port from a reactor.
"""
return reactor.listenTCP(port, factory, interface=interface)
class SocketTCPMixin(object):
"""
Mixin which uses L{IReactorSocket.adoptStreamPort} to hand out listening TCP
ports.
"""
def getListeningPort(self, reactor, factory, port=0, interface=''):
"""
Get a TCP port from a reactor, wrapping an already-initialized file
descriptor.
"""
if IReactorSocket.providedBy(reactor):
if ':' in interface:
domain = socket.AF_INET6
address = socket.getaddrinfo(interface, port)[0][4]
else:
domain = socket.AF_INET
address = (interface, port)
portSock = socket.socket(domain)
portSock.bind(address)
portSock.listen(3)
portSock.setblocking(False)
try:
return reactor.adoptStreamPort(
portSock.fileno(), portSock.family, factory)
finally:
# The socket should still be open; fileno will raise if it is
# not.
portSock.fileno()
# Now clean it up, because the rest of the test does not need
# it.
portSock.close()
else:
raise SkipTest("Reactor does not provide IReactorSocket")
class TCPPortTestsMixin(object):
"""
Tests for L{IReactorTCP.listenTCP}
"""
requiredInterfaces = (IReactorTCP,)
def getExpectedStartListeningLogMessage(self, port, factory):
"""
Get the message expected to be logged when a TCP port starts listening.
"""
return "%s starting on %d" % (
factory, port.getHost().port)
def getExpectedConnectionLostLogMsg(self, port):
"""
Get the expected connection lost message for a TCP port.
"""
return "(TCP Port %s Closed)" % (port.getHost().port,)
def test_portGetHostOnIPv4(self):
"""
When no interface is passed to L{IReactorTCP.listenTCP}, the returned
listening port listens on an IPv4 address.
"""
reactor = self.buildReactor()
port = self.getListeningPort(reactor, ServerFactory())
address = port.getHost()
self.assertIsInstance(address, IPv4Address)
def test_portGetHostOnIPv6(self):
"""
When listening on an IPv6 address, L{IListeningPort.getHost} returns
an L{IPv6Address} with C{host} and C{port} attributes reflecting the
address the port is bound to.
"""
reactor = self.buildReactor()
host, portNumber = findFreePort(
family=socket.AF_INET6, interface='::1')[:2]
port = self.getListeningPort(
reactor, ServerFactory(), portNumber, host)
address = port.getHost()
self.assertIsInstance(address, IPv6Address)
self.assertEqual('::1', address.host)
self.assertEqual(portNumber, address.port)
if ipv6Skip:
test_portGetHostOnIPv6.skip = ipv6Skip
def test_portGetHostOnIPv6ScopeID(self):
"""
When a link-local IPv6 address including a scope identifier is passed as
the C{interface} argument to L{IReactorTCP.listenTCP}, the resulting
L{IListeningPort} reports its address as an L{IPv6Address} with a host
value that includes the scope identifier.
"""
linkLocal = getLinkLocalIPv6Address()
reactor = self.buildReactor()
port = self.getListeningPort(reactor, ServerFactory(), 0, linkLocal)
address = port.getHost()
self.assertIsInstance(address, IPv6Address)
self.assertEqual(linkLocal, address.host)
if ipv6Skip:
test_portGetHostOnIPv6ScopeID.skip = ipv6Skip
def _buildProtocolAddressTest(self, client, interface):
"""
Connect C{client} to a server listening on C{interface} started with
L{IReactorTCP.listenTCP} and return the address passed to the factory's
C{buildProtocol} method.
@param client: A C{SOCK_STREAM} L{socket.socket} created with an address
family such that it will be able to connect to a server listening on
C{interface}.
@param interface: A C{str} giving an address for a server to listen on.
This should almost certainly be the loopback address for some
address family supported by L{IReactorTCP.listenTCP}.
@return: Whatever object, probably an L{IAddress} provider, is passed to
a server factory's C{buildProtocol} method when C{client}
establishes a connection.
"""
class ObserveAddress(ServerFactory):
def buildProtocol(self, address):
reactor.stop()
self.observedAddress = address
return Protocol()
factory = ObserveAddress()
reactor = self.buildReactor()
port = self.getListeningPort(reactor, factory, 0, interface)
client.setblocking(False)
try:
connect(client, (port.getHost().host, port.getHost().port))
except socket.error as e:
errnum, message = e.args
self.assertIn(errnum, (errno.EINPROGRESS, errno.EWOULDBLOCK))
self.runReactor(reactor)
return factory.observedAddress
def test_buildProtocolIPv4Address(self):
"""
When a connection is accepted over IPv4, an L{IPv4Address} is passed
to the factory's C{buildProtocol} method giving the peer's address.
"""
interface = '127.0.0.1'
client = createTestSocket(self, socket.AF_INET, socket.SOCK_STREAM)
observedAddress = self._buildProtocolAddressTest(client, interface)
self.assertEqual(
IPv4Address('TCP', *client.getsockname()), observedAddress)
def test_buildProtocolIPv6Address(self):
"""
When a connection is accepted to an IPv6 address, an L{IPv6Address} is
passed to the factory's C{buildProtocol} method giving the peer's
address.
"""
interface = '::1'
client = createTestSocket(self, socket.AF_INET6, socket.SOCK_STREAM)
observedAddress = self._buildProtocolAddressTest(client, interface)
self.assertEqual(
IPv6Address('TCP', *client.getsockname()[:2]), observedAddress)
if ipv6Skip:
test_buildProtocolIPv6Address.skip = ipv6Skip
def test_buildProtocolIPv6AddressScopeID(self):
"""
When a connection is accepted to a link-local IPv6 address, an
L{IPv6Address} is passed to the factory's C{buildProtocol} method
giving the peer's address, including a scope identifier.
"""
interface = getLinkLocalIPv6Address()
client = createTestSocket(self, socket.AF_INET6, socket.SOCK_STREAM)
observedAddress = self._buildProtocolAddressTest(client, interface)
self.assertEqual(
IPv6Address('TCP', *client.getsockname()[:2]), observedAddress)
if ipv6Skip:
test_buildProtocolIPv6AddressScopeID.skip = ipv6Skip
def _serverGetConnectionAddressTest(self, client, interface, which):
"""
Connect C{client} to a server listening on C{interface} started with
L{IReactorTCP.listenTCP} and return the address returned by one of the
server transport's address lookup methods, C{getHost} or C{getPeer}.
@param client: A C{SOCK_STREAM} L{socket.socket} created with an address
family such that it will be able to connect to a server listening on
C{interface}.
@param interface: A C{str} giving an address for a server to listen on.
This should almost certainly be the loopback address for some
address family supported by L{IReactorTCP.listenTCP}.
@param which: A C{str} equal to either C{"getHost"} or C{"getPeer"}
determining which address will be returned.
@return: Whatever object, probably an L{IAddress} provider, is returned
from the method indicated by C{which}.
"""
class ObserveAddress(Protocol):
def makeConnection(self, transport):
reactor.stop()
self.factory.address = getattr(transport, which)()
reactor = self.buildReactor()
factory = ServerFactory()
factory.protocol = ObserveAddress
port = self.getListeningPort(reactor, factory, 0, interface)
client.setblocking(False)
try:
connect(client, (port.getHost().host, port.getHost().port))
except socket.error as e:
errnum, message = e.args
self.assertIn(errnum, (errno.EINPROGRESS, errno.EWOULDBLOCK))
self.runReactor(reactor)
return factory.address
def test_serverGetHostOnIPv4(self):
"""
When a connection is accepted over IPv4, the server
L{ITransport.getHost} method returns an L{IPv4Address} giving the
address on which the server accepted the connection.
"""
interface = '127.0.0.1'
client = createTestSocket(self, socket.AF_INET, socket.SOCK_STREAM)
hostAddress = self._serverGetConnectionAddressTest(
client, interface, 'getHost')
self.assertEqual(
IPv4Address('TCP', *client.getpeername()), hostAddress)
def test_serverGetHostOnIPv6(self):
"""
When a connection is accepted over IPv6, the server
L{ITransport.getHost} method returns an L{IPv6Address} giving the
address on which the server accepted the connection.
"""
interface = '::1'
client = createTestSocket(self, socket.AF_INET6, socket.SOCK_STREAM)
hostAddress = self._serverGetConnectionAddressTest(
client, interface, 'getHost')
self.assertEqual(
IPv6Address('TCP', *client.getpeername()[:2]), hostAddress)
if ipv6Skip:
test_serverGetHostOnIPv6.skip = ipv6Skip
def test_serverGetHostOnIPv6ScopeID(self):
"""
When a connection is accepted over IPv6, the server
L{ITransport.getHost} method returns an L{IPv6Address} giving the
address on which the server accepted the connection, including the scope
identifier.
"""
interface = getLinkLocalIPv6Address()
client = createTestSocket(self, socket.AF_INET6, socket.SOCK_STREAM)
hostAddress = self._serverGetConnectionAddressTest(
client, interface, 'getHost')
self.assertEqual(
IPv6Address('TCP', *client.getpeername()[:2]), hostAddress)
if ipv6Skip:
test_serverGetHostOnIPv6ScopeID.skip = ipv6Skip
def test_serverGetPeerOnIPv4(self):
"""
When a connection is accepted over IPv4, the server
L{ITransport.getPeer} method returns an L{IPv4Address} giving the
address of the remote end of the connection.
"""
interface = '127.0.0.1'
client = createTestSocket(self, socket.AF_INET, socket.SOCK_STREAM)
peerAddress = self._serverGetConnectionAddressTest(
client, interface, 'getPeer')
self.assertEqual(
IPv4Address('TCP', *client.getsockname()), peerAddress)
def test_serverGetPeerOnIPv6(self):
"""
When a connection is accepted over IPv6, the server
L{ITransport.getPeer} method returns an L{IPv6Address} giving the
address on the remote end of the connection.
"""
interface = '::1'
client = createTestSocket(self, socket.AF_INET6, socket.SOCK_STREAM)
peerAddress = self._serverGetConnectionAddressTest(
client, interface, 'getPeer')
self.assertEqual(
IPv6Address('TCP', *client.getsockname()[:2]), peerAddress)
if ipv6Skip:
test_serverGetPeerOnIPv6.skip = ipv6Skip
def test_serverGetPeerOnIPv6ScopeID(self):
"""
When a connection is accepted over IPv6, the server
L{ITransport.getPeer} method returns an L{IPv6Address} giving the
address on the remote end of the connection, including the scope
identifier.
"""
interface = getLinkLocalIPv6Address()
client = createTestSocket(self, socket.AF_INET6, socket.SOCK_STREAM)
peerAddress = self._serverGetConnectionAddressTest(
client, interface, 'getPeer')
self.assertEqual(
IPv6Address('TCP', *client.getsockname()[:2]), peerAddress)
if ipv6Skip:
test_serverGetPeerOnIPv6ScopeID.skip = ipv6Skip
class TCPPortTestsBuilder(ReactorBuilder, ListenTCPMixin, TCPPortTestsMixin,
ObjectModelIntegrationMixin,
StreamTransportTestsMixin):
pass
class TCPFDPortTestsBuilder(ReactorBuilder, SocketTCPMixin, TCPPortTestsMixin,
ObjectModelIntegrationMixin,
StreamTransportTestsMixin):
pass
class StopStartReadingProtocol(Protocol):
"""
Protocol that pauses and resumes the transport a few times
"""
def connectionMade(self):
self.data = b''
self.pauseResumeProducing(3)
def pauseResumeProducing(self, counter):
"""
Toggle transport read state, then count down.
"""
self.transport.pauseProducing()
self.transport.resumeProducing()
if counter:
self.factory.reactor.callLater(0,
self.pauseResumeProducing, counter - 1)
else:
self.factory.reactor.callLater(0,
self.factory.ready.callback, self)
def dataReceived(self, data):
log.msg('got data', len(data))
self.data += data
if len(self.data) == 4*4096:
self.factory.stop.callback(self.data)
class TCPConnectionTestsBuilder(ReactorBuilder):
"""
Builder defining tests relating to L{twisted.internet.tcp.Connection}.
"""
requiredInterfaces = (IReactorTCP,)
def test_stopStartReading(self):
"""
This test verifies transport socket read state after multiple
pause/resumeProducing calls.
"""
sf = ServerFactory()
reactor = sf.reactor = self.buildReactor()
skippedReactors = ["Glib2Reactor", "Gtk2Reactor"]
reactorClassName = reactor.__class__.__name__
if reactorClassName in skippedReactors and platform.isWindows():
raise SkipTest(
"This test is broken on gtk/glib under Windows.")
sf.protocol = StopStartReadingProtocol
sf.ready = Deferred()
sf.stop = Deferred()
p = reactor.listenTCP(0, sf)
port = p.getHost().port
def proceed(protos, port):
"""
Send several IOCPReactor's buffers' worth of data.
"""
self.assertTrue(protos[0])
self.assertTrue(protos[1])
protos = protos[0][1], protos[1][1]
protos[0].transport.write(b'x' * (2 * 4096) + b'y' * (2 * 4096))
return (sf.stop.addCallback(cleanup, protos, port)
.addCallback(lambda ign: reactor.stop()))
def cleanup(data, protos, port):
"""
Make sure IOCPReactor didn't start several WSARecv operations
that clobbered each other's results.
"""
self.assertEqual(data, b'x'*(2*4096) + b'y'*(2*4096),
'did not get the right data')
return DeferredList([
maybeDeferred(protos[0].transport.loseConnection),
maybeDeferred(protos[1].transport.loseConnection),
maybeDeferred(port.stopListening)])
cc = TCP4ClientEndpoint(reactor, '127.0.0.1', port)
cf = ClientFactory()
cf.protocol = Protocol
d = DeferredList([cc.connect(cf), sf.ready]).addCallback(proceed, p)
d.addErrback(log.err)
self.runReactor(reactor)
def test_connectionLostAfterPausedTransport(self):
"""
Alice connects to Bob. Alice writes some bytes and then shuts down the
connection. Bob receives the bytes from the connection and then pauses
the transport object. Shortly afterwards Bob resumes the transport
object. At that point, Bob is notified that the connection has been
closed.
This is no problem for most reactors. The underlying event notification
API will probably just remind them that the connection has been closed.
It is a little tricky for win32eventreactor (MsgWaitForMultipleObjects).
MsgWaitForMultipleObjects will only deliver the close notification once.
The reactor needs to remember that notification until Bob resumes the
transport.
"""
class Pauser(ConnectableProtocol):
def __init__(self):
self.events = []
def dataReceived(self, bytes):
self.events.append("paused")
self.transport.pauseProducing()
self.reactor.callLater(0, self.resume)
def resume(self):
self.events.append("resumed")
self.transport.resumeProducing()
def connectionLost(self, reason):
# This is the event you have been waiting for.
self.events.append("lost")
ConnectableProtocol.connectionLost(self, reason)
class Client(ConnectableProtocol):
def connectionMade(self):
self.transport.write(b"some bytes for you")
self.transport.loseConnection()
pauser = Pauser()
runProtocolsWithReactor(self, pauser, Client(), TCPCreator())
self.assertEqual(pauser.events, ["paused", "resumed", "lost"])
def test_doubleHalfClose(self):
"""
If one side half-closes its connection, and then the other side of the
connection calls C{loseWriteConnection}, and then C{loseConnection} in
{writeConnectionLost}, the connection is closed correctly.
This rather obscure case used to fail (see ticket #3037).
"""
@implementer(IHalfCloseableProtocol)
class ListenerProtocol(ConnectableProtocol):
def readConnectionLost(self):
self.transport.loseWriteConnection()
def writeConnectionLost(self):
self.transport.loseConnection()
class Client(ConnectableProtocol):
def connectionMade(self):
self.transport.loseConnection()
# If test fails, reactor won't stop and we'll hit timeout:
runProtocolsWithReactor(
self, ListenerProtocol(), Client(), TCPCreator())
class WriteSequenceTestsMixin(object):
"""
Test for L{twisted.internet.abstract.FileDescriptor.writeSequence}.
"""
requiredInterfaces = (IReactorTCP,)
def setWriteBufferSize(self, transport, value):
"""
Set the write buffer size for the given transport, mananing possible
differences (ie, IOCP). Bug #4322 should remove the need of that hack.
"""
if getattr(transport, "writeBufferSize", None) is not None:
transport.writeBufferSize = value
else:
transport.bufferSize = value
def test_writeSequeceWithoutWrite(self):
"""
C{writeSequence} sends the data even if C{write} hasn't been called.
"""
def connected(protocols):
client, server, port = protocols
def dataReceived(data):
log.msg("data received: %r" % data)
self.assertEqual(data, b"Some sequence splitted")
client.transport.loseConnection()
server.dataReceived = dataReceived
client.transport.writeSequence([b"Some ", b"sequence ", b"splitted"])
reactor = self.buildReactor()
d = self.getConnectedClientAndServer(reactor, "127.0.0.1",
socket.AF_INET)
d.addCallback(connected)
d.addErrback(log.err)
self.runReactor(reactor)
def test_writeSequenceWithUnicodeRaisesException(self):
"""
C{writeSequence} with an element in the sequence of type unicode raises
C{TypeError}.
"""
def connected(protocols):
client, server, port = protocols
exc = self.assertRaises(
TypeError,
server.transport.writeSequence, [u"Unicode is not kosher"])
self.assertEqual(str(exc), "Data must not be unicode")
server.transport.loseConnection()
reactor = self.buildReactor()
d = self.getConnectedClientAndServer(reactor, "127.0.0.1",
socket.AF_INET)
d.addCallback(connected)
d.addErrback(log.err)
self.runReactor(reactor)
def test_streamingProducer(self):
"""
C{writeSequence} pauses its streaming producer if too much data is
buffered, and then resumes it.
"""
@implementer(IPushProducer)
class SaveActionProducer(object):
client = None
server = None
def __init__(self):
self.actions = []
def pauseProducing(self):
self.actions.append("pause")
def resumeProducing(self):
self.actions.append("resume")
# Unregister the producer so the connection can close
self.client.transport.unregisterProducer()
# This is why the code below waits for the server connection
# first - so we have it to close here. We close the server
# side because win32evenreactor cannot reliably observe us
# closing the client side (#5285).
self.server.transport.loseConnection()
def stopProducing(self):
self.actions.append("stop")
producer = SaveActionProducer()
def connected(protocols):
client, server = protocols[:2]
producer.client = client
producer.server = server
# Register a streaming producer and verify that it gets paused
# after it writes more than the local send buffer can hold.
client.transport.registerProducer(producer, True)
self.assertEqual(producer.actions, [])
self.setWriteBufferSize(client.transport, 500)
client.transport.writeSequence([b"x" * 50] * 20)
self.assertEqual(producer.actions, ["pause"])
reactor = self.buildReactor()
d = self.getConnectedClientAndServer(reactor, "127.0.0.1",
socket.AF_INET)
d.addCallback(connected)
d.addErrback(log.err)
self.runReactor(reactor)
# After the send buffer gets a chance to empty out a bit, the producer
# should be resumed.
self.assertEqual(producer.actions, ["pause", "resume"])
def test_nonStreamingProducer(self):
"""
C{writeSequence} pauses its producer if too much data is buffered only
if this is a streaming producer.
"""
test = self
@implementer(IPullProducer)
class SaveActionProducer(object):
client = None
def __init__(self):
self.actions = []
def resumeProducing(self):
self.actions.append("resume")
if self.actions.count("resume") == 2:
self.client.transport.stopConsuming()
else:
test.setWriteBufferSize(self.client.transport, 500)
self.client.transport.writeSequence([b"x" * 50] * 20)
def stopProducing(self):
self.actions.append("stop")
producer = SaveActionProducer()
def connected(protocols):
client = protocols[0]
producer.client = client
# Register a non-streaming producer and verify that it is resumed
# immediately.
client.transport.registerProducer(producer, False)
self.assertEqual(producer.actions, ["resume"])
reactor = self.buildReactor()
d = self.getConnectedClientAndServer(reactor, "127.0.0.1",
socket.AF_INET)
d.addCallback(connected)
d.addErrback(log.err)
self.runReactor(reactor)
# After the local send buffer empties out, the producer should be
# resumed again.
self.assertEqual(producer.actions, ["resume", "resume"])
class TCPTransportServerAddressTestMixin(object):
"""
Test mixing for TCP server address building and log prefix.
"""
def getConnectedClientAndServer(self, reactor, interface, addressFamily):
"""
Helper method returnine a L{Deferred} firing with a tuple of a client
protocol, a server protocol, and a running TCP port.
"""
raise NotImplementedError()
def _testServerAddress(self, interface, addressFamily, adressClass):
"""
Helper method to test TCP server addresses on either IPv4 or IPv6.
"""
def connected(protocols):
client, server, port = protocols
try:
self.assertEqual(
"<AccumulatingProtocol #%s on %s>" %
(server.transport.sessionno, port.getHost().port),
str(server.transport))
self.assertEqual(
"AccumulatingProtocol,%s,%s" %
(server.transport.sessionno, interface),
server.transport.logstr)
[peerAddress] = server.factory.peerAddresses
self.assertIsInstance(peerAddress, adressClass)
self.assertEqual('TCP', peerAddress.type)
self.assertEqual(interface, peerAddress.host)
finally:
# Be certain to drop the connection so the test completes.
server.transport.loseConnection()
reactor = self.buildReactor()
d = self.getConnectedClientAndServer(reactor, interface, addressFamily)
d.addCallback(connected)
d.addErrback(log.err)
self.runReactor(reactor)
def test_serverAddressTCP4(self):
"""
L{Server} instances have a string representation indicating on which
port they're running, and the connected address is stored on the
C{peerAddresses} attribute of the factory.
"""
return self._testServerAddress("127.0.0.1", socket.AF_INET,
IPv4Address)
def test_serverAddressTCP6(self):
"""
IPv6 L{Server} instances have a string representation indicating on
which port they're running, and the connected address is stored on the
C{peerAddresses} attribute of the factory.
"""
return self._testServerAddress(getLinkLocalIPv6Address(),
socket.AF_INET6, IPv6Address)
if ipv6Skip:
test_serverAddressTCP6.skip = ipv6Skip
class TCPTransportTestsBuilder(TCPTransportServerAddressTestMixin,
WriteSequenceTestsMixin, ReactorBuilder):
"""
Test standard L{ITCPTransport}s built with C{listenTCP} and C{connectTCP}.
"""
def getConnectedClientAndServer(self, reactor, interface, addressFamily):
"""
Return a L{Deferred} firing with a L{MyClientFactory} and
L{MyServerFactory} connected pair, and the listening C{Port}.
"""
server = MyServerFactory()
server.protocolConnectionMade = Deferred()
server.protocolConnectionLost = Deferred()
client = MyClientFactory()
client.protocolConnectionMade = Deferred()
client.protocolConnectionLost = Deferred()
port = reactor.listenTCP(0, server, interface=interface)
lostDeferred = gatherResults([client.protocolConnectionLost,
server.protocolConnectionLost])
def stop(result):
reactor.stop()
return result
lostDeferred.addBoth(stop)
startDeferred = gatherResults([client.protocolConnectionMade,
server.protocolConnectionMade])
deferred = Deferred()
def start(protocols):
client, server = protocols
log.msg("client connected %s" % client)
log.msg("server connected %s" % server)
deferred.callback((client, server, port))
startDeferred.addCallback(start)
reactor.connectTCP(interface, port.getHost().port, client)
return deferred
class AdoptStreamConnectionTestsBuilder(TCPTransportServerAddressTestMixin,
WriteSequenceTestsMixin,
ReactorBuilder):
"""
Test server transports built using C{adoptStreamConnection}.
"""
requiredInterfaces = (IReactorFDSet, IReactorSocket)
def getConnectedClientAndServer(self, reactor, interface, addressFamily):
"""
Return a L{Deferred} firing with a L{MyClientFactory} and
L{MyServerFactory} connected pair, and the listening C{Port}. The
particularity is that the server protocol has been obtained after doing
a C{adoptStreamConnection} against the original server connection.
"""
firstServer = MyServerFactory()
firstServer.protocolConnectionMade = Deferred()
server = MyServerFactory()
server.protocolConnectionMade = Deferred()
server.protocolConnectionLost = Deferred()
client = MyClientFactory()
client.protocolConnectionMade = Deferred()
client.protocolConnectionLost = Deferred()
port = reactor.listenTCP(0, firstServer, interface=interface)
def firtServerConnected(proto):
reactor.removeReader(proto.transport)
reactor.removeWriter(proto.transport)
reactor.adoptStreamConnection(
proto.transport.fileno(), addressFamily, server)
firstServer.protocolConnectionMade.addCallback(firtServerConnected)
lostDeferred = gatherResults([client.protocolConnectionLost,
server.protocolConnectionLost])
def stop(result):
if reactor.running:
reactor.stop()
return result
lostDeferred.addBoth(stop)
deferred = Deferred()
deferred.addErrback(stop)
startDeferred = gatherResults([client.protocolConnectionMade,
server.protocolConnectionMade])
def start(protocols):
client, server = protocols
log.msg("client connected %s" % client)
log.msg("server connected %s" % server)
deferred.callback((client, server, port))
startDeferred.addCallback(start)
reactor.connectTCP(interface, port.getHost().port, client)
return deferred
globals().update(TCP4ClientTestsBuilder.makeTestCaseClasses())
globals().update(TCP6ClientTestsBuilder.makeTestCaseClasses())
globals().update(TCPPortTestsBuilder.makeTestCaseClasses())
globals().update(TCPFDPortTestsBuilder.makeTestCaseClasses())
globals().update(TCPConnectionTestsBuilder.makeTestCaseClasses())
globals().update(TCP4ConnectorTestsBuilder.makeTestCaseClasses())
globals().update(TCP6ConnectorTestsBuilder.makeTestCaseClasses())
globals().update(TCPTransportTestsBuilder.makeTestCaseClasses())
globals().update(AdoptStreamConnectionTestsBuilder.makeTestCaseClasses())
class ServerAbortsTwice(ConnectableProtocol):
"""
Call abortConnection() twice.
"""
def dataReceived(self, data):
self.transport.abortConnection()
self.transport.abortConnection()
class ServerAbortsThenLoses(ConnectableProtocol):
"""
Call abortConnection() followed by loseConnection().
"""
def dataReceived(self, data):
self.transport.abortConnection()
self.transport.loseConnection()
class AbortServerWritingProtocol(ConnectableProtocol):
"""
Protocol that writes data upon connection.
"""
def connectionMade(self):
"""
Tell the client that the connection is set up and it's time to abort.
"""
self.transport.write(b"ready")
class ReadAbortServerProtocol(AbortServerWritingProtocol):
"""
Server that should never receive any data, except 'X's which are written
by the other side of the connection before abortConnection, and so might
possibly arrive.
"""
def dataReceived(self, data):
if data.replace(b'X', b''):
raise Exception("Unexpectedly received data.")
class NoReadServer(ConnectableProtocol):
"""
Stop reading immediately on connection.
This simulates a lost connection that will cause the other side to time
out, and therefore call abortConnection().
"""
def connectionMade(self):
self.transport.stopReading()
class EventualNoReadServer(ConnectableProtocol):
"""
Like NoReadServer, except we Wait until some bytes have been delivered
before stopping reading. This means TLS handshake has finished, where
applicable.
"""
gotData = False
stoppedReading = False
def dataReceived(self, data):
if not self.gotData:
self.gotData = True
self.transport.registerProducer(self, False)
self.transport.write(b"hello")
def resumeProducing(self):
if self.stoppedReading:
return
self.stoppedReading = True
# We've written out the data:
self.transport.stopReading()
def pauseProducing(self):
pass
def stopProducing(self):
pass
class BaseAbortingClient(ConnectableProtocol):
"""
Base class for abort-testing clients.
"""
inReactorMethod = False
def connectionLost(self, reason):
if self.inReactorMethod:
raise RuntimeError("BUG: connectionLost was called re-entrantly!")
ConnectableProtocol.connectionLost(self, reason)
class WritingButNotAbortingClient(BaseAbortingClient):
"""
Write data, but don't abort.
"""
def connectionMade(self):
self.transport.write(b"hello")
class AbortingClient(BaseAbortingClient):
"""
Call abortConnection() after writing some data.
"""
def dataReceived(self, data):
"""
Some data was received, so the connection is set up.
"""
self.inReactorMethod = True
self.writeAndAbort()
self.inReactorMethod = False
def writeAndAbort(self):
# X is written before abortConnection, and so there is a chance it
# might arrive. Y is written after, and so no Ys should ever be
# delivered:
self.transport.write(b"X" * 10000)
self.transport.abortConnection()
self.transport.write(b"Y" * 10000)
class AbortingTwiceClient(AbortingClient):
"""
Call abortConnection() twice, after writing some data.
"""
def writeAndAbort(self):
AbortingClient.writeAndAbort(self)
self.transport.abortConnection()
class AbortingThenLosingClient(AbortingClient):
"""
Call abortConnection() and then loseConnection().
"""
def writeAndAbort(self):
AbortingClient.writeAndAbort(self)
self.transport.loseConnection()
class ProducerAbortingClient(ConnectableProtocol):
"""
Call abortConnection from doWrite, via resumeProducing.
"""
inReactorMethod = True
producerStopped = False
def write(self):
self.transport.write(b"lalala" * 127000)
self.inRegisterProducer = True
self.transport.registerProducer(self, False)
self.inRegisterProducer = False
def connectionMade(self):
self.write()
def resumeProducing(self):
self.inReactorMethod = True
if not self.inRegisterProducer:
self.transport.abortConnection()
self.inReactorMethod = False
def stopProducing(self):
self.producerStopped = True
def connectionLost(self, reason):
if not self.producerStopped:
raise RuntimeError("BUG: stopProducing() was never called.")
if self.inReactorMethod:
raise RuntimeError("BUG: connectionLost called re-entrantly!")
ConnectableProtocol.connectionLost(self, reason)
class StreamingProducerClient(ConnectableProtocol):
"""
Call abortConnection() when the other side has stopped reading.
In particular, we want to call abortConnection() only once our local
socket hits a state where it is no longer writeable. This helps emulate
the most common use case for abortConnection(), closing a connection after
a timeout, with write buffers being full.
Since it's very difficult to know when this actually happens, we just
write a lot of data, and assume at that point no more writes will happen.
"""
paused = False
extraWrites = 0
inReactorMethod = False
def connectionMade(self):
self.write()
def write(self):
"""
Write large amount to transport, then wait for a while for buffers to
fill up.
"""
self.transport.registerProducer(self, True)
for i in range(100):
self.transport.write(b"1234567890" * 32000)
def resumeProducing(self):
self.paused = False
def stopProducing(self):
pass
def pauseProducing(self):
"""
Called when local buffer fills up.
The goal is to hit the point where the local file descriptor is not
writeable (or the moral equivalent). The fact that pauseProducing has
been called is not sufficient, since that can happen when Twisted's
buffers fill up but OS hasn't gotten any writes yet. We want to be as
close as possible to every buffer (including OS buffers) being full.
So, we wait a bit more after this for Twisted to write out a few
chunks, then abortConnection.
"""
if self.paused:
return
self.paused = True
# The amount we wait is arbitrary, we just want to make sure some
# writes have happened and outgoing OS buffers filled up -- see
# http://twistedmatrix.com/trac/ticket/5303 for details:
self.reactor.callLater(0.01, self.doAbort)
def doAbort(self):
if not self.paused:
log.err(RuntimeError("BUG: We should be paused a this point."))
self.inReactorMethod = True
self.transport.abortConnection()
self.inReactorMethod = False
def connectionLost(self, reason):
# Tell server to start reading again so it knows to go away:
self.otherProtocol.transport.startReading()
ConnectableProtocol.connectionLost(self, reason)
class StreamingProducerClientLater(StreamingProducerClient):
"""
Call abortConnection() from dataReceived, after bytes have been
exchanged.
"""
def connectionMade(self):
self.transport.write(b"hello")
self.gotData = False
def dataReceived(self, data):
if not self.gotData:
self.gotData = True
self.write()
class ProducerAbortingClientLater(ProducerAbortingClient):
"""
Call abortConnection from doWrite, via resumeProducing.
Try to do so after some bytes have already been exchanged, so we
don't interrupt SSL handshake.
"""
def connectionMade(self):
# Override base class connectionMade().
pass
def dataReceived(self, data):
self.write()
class DataReceivedRaisingClient(AbortingClient):
"""
Call abortConnection(), and then throw exception, from dataReceived.
"""
def dataReceived(self, data):
self.transport.abortConnection()
raise ZeroDivisionError("ONO")
class ResumeThrowsClient(ProducerAbortingClient):
"""
Call abortConnection() and throw exception from resumeProducing().
"""
def resumeProducing(self):
if not self.inRegisterProducer:
self.transport.abortConnection()
raise ZeroDivisionError("ono!")
def connectionLost(self, reason):
# Base class assertion about stopProducing being called isn't valid;
# if the we blew up in resumeProducing, consumers are justified in
# giving up on the producer and not calling stopProducing.
ConnectableProtocol.connectionLost(self, reason)
class AbortConnectionMixin(object):
"""
Unit tests for L{ITransport.abortConnection}.
"""
# Override in subclasses, should be a EndpointCreator instance:
endpoints = None
def runAbortTest(self, clientClass, serverClass,
clientConnectionLostReason=None):
"""
A test runner utility function, which hooks up a matched pair of client
and server protocols.
We then run the reactor until both sides have disconnected, and then
verify that the right exception resulted.
"""
clientExpectedExceptions = (ConnectionAborted, ConnectionLost)
serverExpectedExceptions = (ConnectionLost, ConnectionDone)
# In TLS tests we may get SSL.Error instead of ConnectionLost,
# since we're trashing the TLS protocol layer.
if useSSL:
clientExpectedExceptions = clientExpectedExceptions + (SSL.Error,)
serverExpectedExceptions = serverExpectedExceptions + (SSL.Error,)
client = clientClass()
server = serverClass()
client.otherProtocol = server
server.otherProtocol = client
reactor = runProtocolsWithReactor(self, server, client, self.endpoints)
# Make sure everything was shutdown correctly:
self.assertEqual(reactor.removeAll(), [])
# The reactor always has a timeout added in runReactor():
delayedCalls = reactor.getDelayedCalls()
self.assertEqual(len(delayedCalls), 1, map(str, delayedCalls))
if clientConnectionLostReason is not None:
self.assertIsInstance(
client.disconnectReason.value,
(clientConnectionLostReason,) + clientExpectedExceptions)
else:
self.assertIsInstance(client.disconnectReason.value,
clientExpectedExceptions)
self.assertIsInstance(server.disconnectReason.value, serverExpectedExceptions)
def test_dataReceivedAbort(self):
"""
abortConnection() is called in dataReceived. The protocol should be
disconnected, but connectionLost should not be called re-entrantly.
"""
return self.runAbortTest(AbortingClient, ReadAbortServerProtocol)
def test_clientAbortsConnectionTwice(self):
"""
abortConnection() is called twice by client.
No exception should be thrown, and the connection will be closed.
"""
return self.runAbortTest(AbortingTwiceClient, ReadAbortServerProtocol)
def test_clientAbortsConnectionThenLosesConnection(self):
"""
Client calls abortConnection(), followed by loseConnection().
No exception should be thrown, and the connection will be closed.
"""
return self.runAbortTest(AbortingThenLosingClient,
ReadAbortServerProtocol)
def test_serverAbortsConnectionTwice(self):
"""
abortConnection() is called twice by server.
No exception should be thrown, and the connection will be closed.
"""
return self.runAbortTest(WritingButNotAbortingClient, ServerAbortsTwice,
clientConnectionLostReason=ConnectionLost)
def test_serverAbortsConnectionThenLosesConnection(self):
"""
Server calls abortConnection(), followed by loseConnection().
No exception should be thrown, and the connection will be closed.
"""
return self.runAbortTest(WritingButNotAbortingClient,
ServerAbortsThenLoses,
clientConnectionLostReason=ConnectionLost)
def test_resumeProducingAbort(self):
"""
abortConnection() is called in resumeProducing, before any bytes have
been exchanged. The protocol should be disconnected, but
connectionLost should not be called re-entrantly.
"""
self.runAbortTest(ProducerAbortingClient,
ConnectableProtocol)
def test_resumeProducingAbortLater(self):
"""
abortConnection() is called in resumeProducing, after some
bytes have been exchanged. The protocol should be disconnected.
"""
return self.runAbortTest(ProducerAbortingClientLater,
AbortServerWritingProtocol)
def test_fullWriteBuffer(self):
"""
abortConnection() triggered by the write buffer being full.
In particular, the server side stops reading. This is supposed
to simulate a realistic timeout scenario where the client
notices the server is no longer accepting data.
The protocol should be disconnected, but connectionLost should not be
called re-entrantly.
"""
self.runAbortTest(StreamingProducerClient,
NoReadServer)
def test_fullWriteBufferAfterByteExchange(self):
"""
abortConnection() is triggered by a write buffer being full.
However, this buffer is filled after some bytes have been exchanged,
allowing a TLS handshake if we're testing TLS. The connection will
then be lost.
"""
return self.runAbortTest(StreamingProducerClientLater,
EventualNoReadServer)
def test_dataReceivedThrows(self):
"""
dataReceived calls abortConnection(), and then raises an exception.
The connection will be lost, with the thrown exception
(C{ZeroDivisionError}) as the reason on the client. The idea here is
that bugs should not be masked by abortConnection, in particular
unexpected exceptions.
"""
self.runAbortTest(DataReceivedRaisingClient,
AbortServerWritingProtocol,
clientConnectionLostReason=ZeroDivisionError)
errors = self.flushLoggedErrors(ZeroDivisionError)
self.assertEqual(len(errors), 1)
def test_resumeProducingThrows(self):
"""
resumeProducing calls abortConnection(), and then raises an exception.
The connection will be lost, with the thrown exception
(C{ZeroDivisionError}) as the reason on the client. The idea here is
that bugs should not be masked by abortConnection, in particular
unexpected exceptions.
"""
self.runAbortTest(ResumeThrowsClient,
ConnectableProtocol,
clientConnectionLostReason=ZeroDivisionError)
errors = self.flushLoggedErrors(ZeroDivisionError)
self.assertEqual(len(errors), 1)
class AbortConnectionTestCase(ReactorBuilder, AbortConnectionMixin):
"""
TCP-specific L{AbortConnectionMixin} tests.
"""
requiredInterfaces = (IReactorTCP,)
endpoints = TCPCreator()
globals().update(AbortConnectionTestCase.makeTestCaseClasses())
class SimpleUtilityTestCase(TestCase):
"""
Simple, direct tests for helpers within L{twisted.internet.tcp}.
"""
if ipv6Skip:
skip = ipv6Skip
def test_resolveNumericHost(self):
"""
L{_resolveIPv6} raises a L{socket.gaierror} (L{socket.EAI_NONAME}) when
invoked with a non-numeric host. (In other words, it is passing
L{socket.AI_NUMERICHOST} to L{socket.getaddrinfo} and will not
accidentally block if it receives bad input.)
"""
err = self.assertRaises(socket.gaierror, _resolveIPv6, "localhost", 1)
self.assertEqual(err.args[0], socket.EAI_NONAME)
def test_resolveNumericService(self):
"""
L{_resolveIPv6} raises a L{socket.gaierror} (L{socket.EAI_NONAME}) when
invoked with a non-numeric port. (In other words, it is passing
L{socket.AI_NUMERICSERV} to L{socket.getaddrinfo} and will not
accidentally block if it receives bad input.)
"""
err = self.assertRaises(socket.gaierror, _resolveIPv6, "::1", "http")
self.assertEqual(err.args[0], socket.EAI_NONAME)
if platform.isWindows():
test_resolveNumericService.skip = ("The AI_NUMERICSERV flag is not "
"supported by Microsoft providers.")
# http://msdn.microsoft.com/en-us/library/windows/desktop/ms738520.aspx
def test_resolveIPv6(self):
"""
L{_resolveIPv6} discovers the flow info and scope ID of an IPv6
address.
"""
result = _resolveIPv6("::1", 2)
self.assertEqual(len(result), 4)
# We can't say anything more useful about these than that they're
# integers, because the whole point of getaddrinfo is that you can never
# know a-priori know _anything_ about the network interfaces of the
# computer that you're on and you have to ask it.
self.assertIsInstance(result[2], int) # flow info
self.assertIsInstance(result[3], int) # scope id
# but, luckily, IP presentation format and what it means to be a port
# number are a little better specified.
self.assertEqual(result[:2], ("::1", 2))
| 33.543717 | 86 | 0.643169 |
f73e750bba87126ae1daf9bf2beb2f6676d31d68 | 440 | py | Python | test/rules/test_drowning_adult.py | rileyhazard/SmartVA-Analyze-1 | 0573eeff27d03f54e7506db4f1631c0cd9f54bbb | [
"MIT"
] | 4 | 2019-01-23T12:57:47.000Z | 2020-04-18T17:13:08.000Z | test/rules/test_drowning_adult.py | rileyhazard/SmartVA-Analyze-1 | 0573eeff27d03f54e7506db4f1631c0cd9f54bbb | [
"MIT"
] | 4 | 2019-01-09T22:10:07.000Z | 2022-02-16T04:57:06.000Z | test/rules/test_drowning_adult.py | rileyhazard/SmartVA-Analyze-1 | 0573eeff27d03f54e7506db4f1631c0cd9f54bbb | [
"MIT"
] | 11 | 2018-12-11T22:01:13.000Z | 2022-01-07T11:38:02.000Z | from smartva.rules import drowning_adult as drowning
from smartva.data.constants import *
VA = Adult
def test_pass():
row = {
VA.DROWNING: YES,
VA.INJURY_DAYS: 0,
}
assert drowning.logic_rule(row) is True
def test_fail_drowning():
row = {
VA.DROWNING: NO,
}
assert drowning.logic_rule(row) is False
def test_fail_no_data():
row = {}
assert drowning.logic_rule(row) is False
| 15.714286 | 52 | 0.647727 |
f73eeadec33885d91247f63d260772f3be335b8a | 543 | py | Python | devices/master/plugins/epg_sat/read_epg_json.py | stko/Schnipsl | 824572c657e48f18950f584b9529661ff5bb8069 | [
"MIT"
] | null | null | null | devices/master/plugins/epg_sat/read_epg_json.py | stko/Schnipsl | 824572c657e48f18950f584b9529661ff5bb8069 | [
"MIT"
] | 29 | 2020-08-30T15:07:50.000Z | 2022-02-19T03:41:26.000Z | devices/master/plugins/epg_sat/read_epg_json.py | wifitvbox/Schnipsl | 553ce8de3dda26fb92297ad76e92f4a363070e4e | [
"MIT"
] | 1 | 2020-12-28T05:46:17.000Z | 2020-12-28T05:46:17.000Z | import sys
import json
from datetime import datetime
lastEnd=0
with open(sys.argv[1]) as json_file:
data = json.load(json_file)
times=sorted(list(data['details']))
for time in times:
p=data['details'][time]
print('{0} {1} {2}-{3}'.format(
datetime.utcfromtimestamp(p['unixTimeBegin']).strftime('%Y-%m-%d %H:%M'),
datetime.utcfromtimestamp(p['unixTimeEnd']-p['unixTimeBegin']).strftime('%H:%M'),
p['name'],
p['title']
)
)
if lastEnd != p['unixTimeBegin']:
print('--------------------')
lastEnd=p['unixTimeEnd']
| 24.681818 | 84 | 0.627993 |
f73f0d91c0a845a03c5aa376fc702205ae303d8d | 15,525 | py | Python | python/dgl/backend/pytorch/tensor.py | m30m/dgl | 2190c39d674f76c65db9ee8da7b43d3021f19c29 | [
"Apache-2.0"
] | null | null | null | python/dgl/backend/pytorch/tensor.py | m30m/dgl | 2190c39d674f76c65db9ee8da7b43d3021f19c29 | [
"Apache-2.0"
] | null | null | null | python/dgl/backend/pytorch/tensor.py | m30m/dgl | 2190c39d674f76c65db9ee8da7b43d3021f19c29 | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import
from distutils.version import LooseVersion
import scipy # Weird bug in new pytorch when import scipy after import torch
import torch as th
import builtins
from torch.utils import dlpack
from ... import ndarray as nd
from ... import kernel as K
from ...function.base import TargetCode
from ...base import dgl_warning
if LooseVersion(th.__version__) < LooseVersion("1.2.0"):
dgl_warning("Detected an old version of PyTorch. Suggest using torch>=1.2.0 "
"for the best experience.")
def data_type_dict():
return {'float16' : th.float16,
'float32' : th.float32,
'float64' : th.float64,
'uint8' : th.uint8,
'int8' : th.int8,
'int16' : th.int16,
'int32' : th.int32,
'int64' : th.int64,
'bool' : th.bool}
def cpu():
return th.device('cpu')
def tensor(data, dtype=None):
return th.tensor(data, dtype=dtype)
def as_scalar(data):
return data.item()
def get_preferred_sparse_format():
"""Get the preferred sparse matrix format supported by the backend.
Different backends have their preferred backend. This info is useful when
constructing a sparse matrix.
"""
return "coo"
def sparse_matrix(data, index, shape, force_format=False):
fmt = index[0]
if fmt != 'coo':
raise TypeError('Pytorch backend only supports COO format. But got %s.' % fmt)
spmat = th.sparse_coo_tensor(index[1], data, shape)
return spmat, None
def sparse_matrix_indices(spmat):
return ('coo', spmat._indices())
def is_tensor(obj):
return isinstance(obj, th.Tensor)
def shape(input):
return input.shape
def dtype(input):
return input.dtype
def ndim(input):
return input.dim()
def context(input):
return input.device
def device_type(ctx):
return ctx.type
def device_id(ctx):
if ctx.index is None:
return 0
else:
return ctx.index
def astype(input, ty):
return input.type(ty)
def asnumpy(input):
if isinstance(input, th.sparse.FloatTensor):
return input.to_dense().cpu().detach().numpy()
else:
return input.cpu().detach().numpy()
def copy_to(input, ctx):
if ctx.type == 'cpu':
return input.cpu()
elif ctx.type == 'cuda':
if ctx.index is not None:
th.cuda.set_device(ctx.index)
return input.cuda()
else:
raise RuntimeError('Invalid context', ctx)
def sum(input, dim, keepdims=False):
return th.sum(input, dim=dim, keepdim=keepdims)
def reduce_sum(input):
return input.sum()
def mean(input, dim):
return th.mean(input, dim=dim)
def reduce_mean(input):
return input.mean()
def max(input, dim):
# NOTE: the second argmax array is not returned
return th.max(input, dim=dim)[0]
def reduce_max(input):
return input.max()
def min(input, dim):
# NOTE: the second argmin array is not returned
return th.min(input, dim=dim)[0]
def reduce_min(input):
return input.min()
def argsort(input, dim, descending):
return th.argsort(input, dim=dim, descending=descending)
def topk(input, k, dim, descending=True):
return th.topk(input, k, dim, largest=descending)[0]
def argtopk(input, k, dim, descending=True):
return th.topk(input, k, dim, largest=descending)[1]
def exp(input):
return th.exp(input)
def softmax(input, dim=-1):
return th.softmax(input, dim=dim)
def cat(seq, dim):
return th.cat(seq, dim=dim)
def stack(seq, dim):
return th.stack(seq, dim=dim)
def split(input, sizes_or_sections, dim):
return th.split(input, sizes_or_sections, dim)
def repeat(input, repeats, dim):
# return th.repeat_interleave(input, repeats, dim) # PyTorch 1.1
if dim < 0:
dim += input.dim()
return th.flatten(th.stack([input] * repeats, dim=dim+1), dim, dim+1)
def gather_row(data, row_index):
return th.index_select(data, 0, row_index)
def slice_axis(data, axis, begin, end):
return th.narrow(data, axis, begin, end - begin)
def take(data, indices, dim):
new_shape = data.shape[:dim] + indices.shape + data.shape[dim+1:]
return th.index_select(data, dim, indices.view(-1)).view(new_shape)
def narrow_row(x, start, stop):
return x[start:stop]
def scatter_row(data, row_index, value):
return data.index_copy(0, row_index, value)
def scatter_row_inplace(data, row_index, value):
data[row_index] = value
def squeeze(input, dim):
return th.squeeze(input, dim)
def unsqueeze(input, dim):
return th.unsqueeze(input, dim)
def reshape(input, shape):
return th.reshape(input ,shape)
def swapaxes(input, axis1, axis2):
return th.transpose(input, axis1, axis2)
def zeros(shape, dtype, ctx):
return th.zeros(shape, dtype=dtype, device=ctx)
def zeros_like(input):
return th.zeros_like(input)
def ones(shape, dtype, ctx):
return th.ones(shape, dtype=dtype, device=ctx)
def uniform(shape, dtype, ctx, low, high):
return th.empty(shape, dtype=dtype, device=ctx).uniform_(low, high)
def pad_packed_tensor(input, lengths, value, l_min=None):
old_shape = input.shape
if isinstance(lengths, th.Tensor):
max_len = as_scalar(lengths.max())
else:
max_len = builtins.max(lengths)
if l_min is not None:
max_len = builtins.max(max_len, l_min)
batch_size = len(lengths)
device = input.device
x = input.new(batch_size * max_len, *old_shape[1:])
x.fill_(value)
index = []
for i, l in enumerate(lengths):
index.extend(range(i * max_len, i * max_len + l))
index = th.tensor(index).to(device)
return scatter_row(x, index, input).view(batch_size, max_len, *old_shape[1:])
def pack_padded_tensor(input, lengths):
batch_size, max_len = input.shape[:2]
device = input.device
index = []
for i, l in enumerate(lengths):
index.extend(range(i * max_len, i * max_len + l))
index = th.tensor(index).to(device)
return gather_row(input.view(batch_size * max_len, -1), index)
def unsorted_1d_segment_sum(input, seg_id, n_segs, dim):
y = th.zeros(n_segs, *input.shape[1:]).to(input)
seg_id = seg_id.view((-1,) + (1,) * (input.dim() - 1)).expand_as(input)
y = y.scatter_add_(dim, seg_id, input)
return y
def unsorted_1d_segment_mean(input, seg_id, n_segs, dim):
w = unsorted_1d_segment_sum(th.ones_like(seg_id), seg_id, n_segs, 0).to(input)
w = w.clamp(min=1) # remove 0 entries
y = unsorted_1d_segment_sum(input, seg_id, n_segs, dim)
y = y / w.view((-1,) + (1,) * (y.dim() - 1))
return y
def boolean_mask(input, mask):
return input[mask]
def equal(x, y):
return x == y
def logical_not(input):
return ~input
def clone(input):
return input.clone()
def unique(input):
return th.unique(input)
def full_1d(length, fill_value, dtype, ctx):
return th.full((length,), fill_value, dtype=dtype, device=ctx)
def nonzero_1d(input):
x = th.nonzero(input).squeeze()
return x if x.dim() == 1 else x.view(-1)
def sort_1d(input):
return th.sort(input)
def arange(start, stop):
return th.arange(start, stop, dtype=th.int64)
def rand_shuffle(arr):
idx = th.randperm(len(arr))
return arr[idx]
def zerocopy_to_dlpack(input):
return dlpack.to_dlpack(input.contiguous())
def zerocopy_from_dlpack(dlpack_tensor):
return dlpack.from_dlpack(dlpack_tensor)
def zerocopy_to_numpy(input):
# NOTE: not zerocopy
return asnumpy(input)
def zerocopy_from_numpy(np_array):
return th.as_tensor(np_array)
def zerocopy_to_dgl_ndarray(input):
return nd.from_dlpack(dlpack.to_dlpack(input.contiguous()))
def zerocopy_from_dgl_ndarray(input):
return dlpack.from_dlpack(input.to_dlpack())
class BinaryReduce(th.autograd.Function):
@staticmethod
def forward(ctx, reducer, binary_op, graph, lhs, rhs, lhs_data, rhs_data, out_data,
out_size, lhs_map, rhs_map, out_map):
lhs_data_nd = zerocopy_to_dgl_ndarray(lhs_data)
rhs_data_nd = zerocopy_to_dgl_ndarray(rhs_data)
feat_shape = K.infer_binary_feature_shape(binary_op, lhs_data_nd, rhs_data_nd)
out_shape = feat_shape
if binary_op == 'dot':
out_shape = feat_shape[:-1]
out_data_nd = zerocopy_to_dgl_ndarray(out_data)
K.binary_op_reduce(
reducer if reducer != 'mean' else 'sum',
binary_op, graph, lhs, rhs, lhs_data_nd, rhs_data_nd,
out_data_nd, lhs_map[0], rhs_map[0], out_map[0])
# normalize if mean reducer
# NOTE(zihao): this is a temporary hack and we should have better solution in the future.
if reducer == 'mean':
degs = lhs_data.new_empty((out_data.shape[0],))
degs_nd = zerocopy_to_dgl_ndarray(degs)
if lhs != TargetCode.DST: # src or edge
target = lhs
n = lhs_data.shape[0]
in_map = lhs_map[0]
else: # rhs != TargetCode.DST
target = rhs
n = rhs_data.shape[0]
in_map = rhs_map[0]
in_ones = lhs_data.new_ones((n,))
in_ones_nd = zerocopy_to_dgl_ndarray(in_ones)
K.copy_reduce(
'sum', graph, target, in_ones_nd, degs_nd, in_map, out_map[0])
# reshape
degs = degs.reshape((out_data.shape[0],) + (1,) * (out_data.dim() - 1)).clamp(min=1)
out_data = out_data / degs
else:
degs = None
# save_for_backward can only save variables
ctx.backward_cache = (reducer, binary_op, graph, lhs, rhs, lhs_map,
rhs_map, out_map, feat_shape, degs)
ctx.save_for_backward(lhs_data, rhs_data, out_data)
return out_data
@staticmethod
def backward(ctx, grad_out):
reducer, binary_op, graph, lhs, rhs, lhs_map, rhs_map, out_map, \
feat_shape, degs = ctx.backward_cache
lhs_data, rhs_data, out_data = ctx.saved_tensors
lhs_data_nd = zerocopy_to_dgl_ndarray(lhs_data)
rhs_data_nd = zerocopy_to_dgl_ndarray(rhs_data)
out_data_nd = zerocopy_to_dgl_ndarray(out_data)
grad_lhs = None
grad_rhs = None
if reducer == 'mean':
grad_out = grad_out / degs
grad_out_nd = zerocopy_to_dgl_ndarray(grad_out)
if ctx.needs_input_grad[5]:
grad_lhs = grad_out.new_empty((lhs_data_nd.shape[0],) + feat_shape)
K.backward_lhs_binary_op_reduce(
reducer if reducer != 'mean' else 'sum',
binary_op, graph, lhs, rhs, lhs_data_nd, rhs_data_nd,
out_data_nd, grad_out_nd, zerocopy_to_dgl_ndarray(grad_lhs),
lhs_map[1], rhs_map[1], out_map[1])
grad_lhs = _reduce_grad(grad_lhs, lhs_data_nd.shape)
if ctx.needs_input_grad[6]:
grad_rhs = grad_out.new_empty((rhs_data_nd.shape[0],) + feat_shape)
K.backward_rhs_binary_op_reduce(
reducer if reducer != 'mean' else 'sum',
binary_op, graph, lhs, rhs, lhs_data_nd, rhs_data_nd,
out_data_nd, grad_out_nd, zerocopy_to_dgl_ndarray(grad_rhs),
lhs_map[1], rhs_map[1], out_map[1])
grad_rhs = _reduce_grad(grad_rhs, rhs_data_nd.shape)
return None, None, None, None, None, grad_lhs, grad_rhs, None, None, None, \
None, None
def binary_reduce(reducer, binary_op, graph, lhs, rhs, lhs_data, rhs_data,
out_size, lhs_map=(None, None), rhs_map=(None, None), out_map=(None, None)):
lhs_data_nd = zerocopy_to_dgl_ndarray(lhs_data)
rhs_data_nd = zerocopy_to_dgl_ndarray(rhs_data)
feat_shape = K.infer_binary_feature_shape(binary_op, lhs_data_nd, rhs_data_nd)
out_shape = feat_shape
if binary_op == 'dot':
out_shape = feat_shape[:-1]
out_data = lhs_data.new_empty((out_size,) + out_shape)
return BinaryReduce.apply(
reducer, binary_op, graph, lhs, rhs, lhs_data, rhs_data, out_data,
out_size, lhs_map, rhs_map, out_map)
class CopyReduce(th.autograd.Function):
@staticmethod
def forward(ctx, reducer, graph, target, in_data, out_data, out_size, in_map,
out_map):
in_data_nd = zerocopy_to_dgl_ndarray(in_data)
out_data_nd = zerocopy_to_dgl_ndarray(out_data)
K.copy_reduce(
reducer if reducer != 'mean' else 'sum',
graph, target, in_data_nd, out_data_nd, in_map[0], out_map[0])
# normalize if mean reducer
# NOTE(zihao): this is a temporary hack and we should have better solution in the future.
if reducer == 'mean':
in_ones = in_data.new_ones((in_data.shape[0],))
degs = in_data.new_empty((out_data.shape[0],))
in_ones_nd = zerocopy_to_dgl_ndarray(in_ones)
degs_nd = zerocopy_to_dgl_ndarray(degs)
K.copy_reduce(
'sum', graph, target, in_ones_nd, degs_nd, in_map[0], out_map[0])
# reshape
degs = degs.reshape((out_data.shape[0],) + (1,) * (out_data.dim() - 1)).clamp(min=1)
out_data = out_data / degs
else:
degs = None
# save_for_backward can only save variables
ctx.backward_cache = (reducer, graph, target, in_map, out_map, degs)
ctx.save_for_backward(in_data, out_data)
return out_data
@staticmethod
def backward(ctx, grad_out):
reducer, graph, target, in_map, out_map, degs = ctx.backward_cache
in_data, out_data = ctx.saved_tensors
in_data_nd = zerocopy_to_dgl_ndarray(in_data)
out_data_nd = zerocopy_to_dgl_ndarray(out_data)
grad_in = None
if reducer == 'mean':
grad_out = grad_out / degs
grad_out_nd = zerocopy_to_dgl_ndarray(grad_out)
if ctx.needs_input_grad[3]:
grad_in = grad_out.new_empty(in_data_nd.shape)
K.backward_copy_reduce(
reducer if reducer != 'mean' else 'sum',
graph, target, in_data_nd, out_data_nd, grad_out_nd,
zerocopy_to_dgl_ndarray(grad_in), in_map[1], out_map[1])
return None, None, None, grad_in, None, None, None, None
def copy_reduce(reducer, graph, target, in_data, out_size, in_map=(None, None),
out_map=(None, None)):
out_data = in_data.new_empty((out_size,) + in_data.shape[1:])
return CopyReduce.apply(reducer, graph, target, in_data, out_data, out_size, in_map, out_map)
def _reduce_grad(grad, shape):
"""Reduce gradient on the broadcast dimension
If there is broadcast in forward pass, gradients need to be reduced on
broadcast dimension. This function checks the input tensor shape and
gradient shape and perform the reduction.
Parameters
----------
grad: Tensor
Gradient tensor
shape: tuple
Shape of input tensor
Returns
-------
Tensor
"""
grad_shape = grad.shape[1:]
in_shape = shape[1:]
if in_shape == grad_shape:
# no need to reduce
return grad
num_to_squeeze = len(grad_shape) - len(in_shape)
# pad inshape
in_shape = (1,) * num_to_squeeze + in_shape
reduce_idx = th.nonzero(th.tensor(grad_shape) - th.tensor(in_shape))
reduce_idx += 1 # skip batch dim
grad = grad.sum(dim=tuple(reduce_idx), keepdim=True)
return grad.view(shape)
def sync():
# Pytorch performs computation synchronously, so no need for synchronization.
pass
| 32.753165 | 97 | 0.647923 |
f73f2e392b1048abb55918b907c099ee620e895d | 4,317 | py | Python | hint/01-theo-BTC/02_DQN-5-cations/02_closest-gold/Miner-Testing-CodeSample-Approach1/build/MINER_STATE.py | phunc20/rlcomp2020 | c37f8f05cc86d55fca2648bf5491d6a2218c2cad | [
"MIT"
] | null | null | null | hint/01-theo-BTC/02_DQN-5-cations/02_closest-gold/Miner-Testing-CodeSample-Approach1/build/MINER_STATE.py | phunc20/rlcomp2020 | c37f8f05cc86d55fca2648bf5491d6a2218c2cad | [
"MIT"
] | 1 | 2022-02-10T02:27:10.000Z | 2022-02-10T02:27:10.000Z | hint/01/Miner-Testing-CodeSample-Approach1/build/MINER_STATE.py | phunc20/rlcomp2020 | c37f8f05cc86d55fca2648bf5491d6a2218c2cad | [
"MIT"
] | null | null | null | import json
def str_2_json(str):
return json.loads(str, encoding="utf-8")
class MapInfo:
def __init__(self):
self.max_x = 0
self.max_y = 0
self.golds = []
self.obstacles = []
self.numberOfPlayers = 0
self.maxStep = 0
def init_map(self, gameInfo):
self.max_x = gameInfo["width"] - 1
self.max_y = gameInfo["height"] - 1
self.golds = gameInfo["golds"]
self.obstacles = gameInfo["obstacles"]
self.maxStep = gameInfo["steps"]
self.numberOfPlayers = gameInfo["numberOfPlayers"]
def update(self, golds, changedObstacles):
self.golds = golds
for cob in changedObstacles:
newOb = True
for ob in self.obstacles:
if cob["posx"] == ob["posx"] and cob["posy"] == ob["posy"]:
newOb = False
#print("cell(", cob["posx"], ",", cob["posy"], ") change type from: ", ob["type"], " -> ",
# cob["type"], " / value: ", ob["value"], " -> ", cob["value"])
ob["type"] = cob["type"]
ob["value"] = cob["value"]
break
if newOb:
self.obstacles.append(cob)
#print("new obstacle: ", cob["posx"], ",", cob["posy"], ", type = ", cob["type"], ", value = ",
# cob["value"])
def get_min_x(self):
return min([cell["posx"] for cell in self.golds])
def get_max_x(self):
return max([cell["posx"] for cell in self.golds])
def get_min_y(self):
return min([cell["posy"] for cell in self.golds])
def get_max_y(self):
return max([cell["posy"] for cell in self.golds])
def is_row_has_gold(self, y):
return y in [cell["posy"] for cell in self.golds]
def is_column_has_gold(self, x):
return x in [cell["posx"] for cell in self.golds]
def gold_amount(self, x, y):
for cell in self.golds:
if x == cell["posx"] and y == cell["posy"]:
return cell["amount"]
return 0
def get_obstacle(self, x, y): # Getting the kind of the obstacle at cell(x,y)
for cell in self.obstacles:
if x == cell["posx"] and y == cell["posy"]:
return cell["type"]
return -1 # No obstacle at the cell (x,y)
class State:
STATUS_PLAYING = 0
STATUS_ELIMINATED_WENT_OUT_MAP = 1
STATUS_ELIMINATED_OUT_OF_ENERGY = 2
STATUS_ELIMINATED_INVALID_ACTION = 3
STATUS_STOP_EMPTY_GOLD = 4
STATUS_STOP_END_STEP = 5
def __init__(self):
self.end = False
self.score = 0
self.lastAction = None
self.id = 0
self.x = 0
self.y = 0
self.energy = 0
self.mapInfo = MapInfo()
self.players = []
self.stepCount = 0
self.status = State.STATUS_PLAYING
def init_state(self, data): #parse data from server into object
game_info = str_2_json(data)
self.end = False
self.score = 0
self.lastAction = None
self.id = game_info["playerId"]
self.x = game_info["posx"]
self.y = game_info["posy"]
self.energy = game_info["energy"]
self.mapInfo.init_map(game_info["gameinfo"])
self.stepCount = 0
self.status = State.STATUS_PLAYING
self.players = [{"playerId": 2, "posx": self.x, "posy": self.y},
{"playerId": 3, "posx": self.x, "posy": self.y},
{"playerId": 4, "posx": self.x, "posy": self.y}]
def update_state(self, data):
new_state = str_2_json(data)
for player in new_state["players"]:
if player["playerId"] == self.id:
self.x = player["posx"]
self.y = player["posy"]
self.energy = player["energy"]
self.score = player["score"]
self.lastAction = player["lastAction"]
self.status = player["status"]
self.mapInfo.update(new_state["golds"], new_state["changedObstacles"])
self.players = new_state["players"]
for i in range(len(self.players) + 1, 5, 1):
self.players.append({"playerId": i, "posx": self.x, "posy": self.y})
self.stepCount = self.stepCount + 1
| 34.261905 | 111 | 0.53602 |
f73f489ea063e8b9e9554048a835bc880960f5fa | 1,953 | py | Python | src/transformersX/models/cutoffbert/__init__.py | stevezheng23/fewshot_nlp_pt | aaca4658aaa48a5a45dfd7d5ee7282d7f7c74be2 | [
"Apache-2.0"
] | 2 | 2021-08-06T05:43:55.000Z | 2022-03-17T22:31:21.000Z | src/transformersX/models/mixupbert/__init__.py | stevezheng23/fewshot_nlp_pt | aaca4658aaa48a5a45dfd7d5ee7282d7f7c74be2 | [
"Apache-2.0"
] | null | null | null | src/transformersX/models/mixupbert/__init__.py | stevezheng23/fewshot_nlp_pt | aaca4658aaa48a5a45dfd7d5ee7282d7f7c74be2 | [
"Apache-2.0"
] | null | null | null | # flake8: noqa
# There's no way to ignore "F401 '...' imported but unused" warnings in this
# module, but to preserve other warnings. So, don't check this module at all.
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
_import_structure = {
"configuration_cutoffbert": ["CUTOFFBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CutoffBertConfig"],
"tokenization_cutoffbert": ["CutoffBertTokenizer"],
}
if is_torch_available():
_import_structure["modeling_cutoffbert"] = [
"CUTOFFBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"CutoffBertForSequenceClassification",
"CutoffBertModel",
"CutoffBertPreTrainedModel",
"load_tf_weights_in_cutoffbert",
]
if TYPE_CHECKING:
from .configuration_cutoffbert import CUTOFFBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CutoffBertConfig
from .tokenization_cutoffbert import CutoffBertTokenizer
if is_torch_available():
from .modeling_cutoffbert import (
CUTOFFBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
CutoffBertForSequenceClassification,
CutoffBertModel,
CutoffBertPreTrainedModel,
load_tf_weights_in_cutoffbert,
)
else:
import sys
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 35.509091 | 100 | 0.746544 |
f73f4aaa08bd84d8c1c9f1dc02b6dd9f0812127c | 13,782 | py | Python | nxsdk_modules_ncl/dnn/composable/composable_dnn.py | event-driven-robotics/models | 5e5b6521b8bcab6e3576e177784baaed57fd5280 | [
"BSD-3-Clause"
] | 54 | 2020-03-04T17:37:17.000Z | 2022-02-22T13:16:10.000Z | nxsdk_modules_ncl/dnn/composable/composable_dnn.py | lyes-khacef/models | a8b6e2a83d4842eb99878d3fa53cd92f4c6b3db8 | [
"BSD-3-Clause"
] | 9 | 2020-08-26T13:17:54.000Z | 2021-11-09T09:02:00.000Z | nxsdk_modules_ncl/dnn/composable/composable_dnn.py | lyes-khacef/models | a8b6e2a83d4842eb99878d3fa53cd92f4c6b3db8 | [
"BSD-3-Clause"
] | 26 | 2020-03-18T17:09:34.000Z | 2021-11-22T16:23:14.000Z | #
# Copyright © 2020 Intel Corporation.
#
# This software and the related documents are Intel copyrighted
# materials, and your use of them is governed by the express
# license under which they were provided to you (License). Unless
# the License provides otherwise, you may not use, modify, copy,
# publish, distribute, disclose or transmit this software or the
# related documents without Intel's prior written permission.
#
# This software and the related documents are provided as is, with
# no express or implied warranties, other than those that are
# expressly stated in the License.
"""A wrapper around NxModel to make it a composable"""
import os
from typing import List
import numpy as np
from jinja2 import Environment, FileSystemLoader
import atexit
from nxsdk import get_logger
from nxsdk.composable.abstract_composable import AbstractComposable
from nxsdk.composable.collections import Processes
from nxsdk.composable.interfaces.composable_enums import ResourceMapType
from nxsdk.composable.interfaces.process import Process
from nxsdk.composable.interfaces.process_aggregator_interface import AbstractProcessAggregator
from nxsdk.composable.port_impl import StateInputPort
from nxsdk.composable.resource_map import ResourceMapFactory
from nxsdk.graph.graph import Graph
from nxsdk.graph.monitor.probes import SpikeProbeCondition
from nxsdk.graph.processes.phase_enums import Phase
from nxsdk_modules_ncl.dnn.src.dnn_layers import ProbableStates, InputModes
from nxsdk_modules_ncl.dnn.tests.test_softreset import printLayerMappings, \
printLayers
class ComposableDNN(AbstractComposable):
"""A DNN that is composable. See nxsdk_modules_ncl.dnn.src.dnn_layers.NxModel which is the underlying DNN Model"""
def __init__(self, model: 'NxModel', num_steps_per_img: int, enable_reset: bool = True):
"""
Wraps a DNNModel and makes it composable
:param model (nxsdk_modules_ncl.dnn.src.dnn_layers.NxModel): The underlying DNN Model created from NxTF Layers
:param num_steps_per_img: Number of steps to run for each image
:param enable_reset: Whether to reset states after ``num_steps_per_img``.
"""
super().__init__()
self._logger = get_logger("NET.DNN")
self._build(model=model, num_steps_per_img=num_steps_per_img, enableReset=enable_reset)
def _build(self, *args, **kwargs):
"""Builds the ports, probes and snips for the composable. This method is called from base class constructor"""
# Stores a reference to the underlying model
self._dnn = kwargs["model"]
self._addPorts()
self._addProcesses()
self._num_steps_per_img = kwargs["num_steps_per_img"]
self._enableReset = kwargs['enableReset']
def _addPorts(self):
"""Adds ports to the composable"""
# Create and add input port. This will be delegated to the input layer
self.addPort(StateInputPort(name="input"))
def _addProcesses(self):
"""Adds processes/snips associated with DNN Composable"""
snipDir = os.path.join(os.path.dirname(__file__), '..', 'snips', 'reset_model_states')
# Init snip to populate number of cores and reset interval
init = Process(
name='init',
cFilePath=snipDir + "/snip_init.c",
includeDir=snipDir,
funcName='init_1',
phase=Phase.EMBEDDED_INIT,
lmtId=0)
self.addProcess(init)
# Todo : Profile and measure to see if spreading readout and/or reset across lmts helps.
# Reset SNIP
reset_snip = Process(
name='reset',
cFilePath=snipDir + "/snip_reset.c",
includeDir=snipDir,
guardName='do_reset',
funcName='reset',
phase=Phase.EMBEDDED_MGMT,
lmtId=0)
self.addProcess(reset_snip)
readout_spike_activity_snip_dir = os.path.join(os.path.dirname(__file__),
'..', 'snips', 'readout_spike_activity')
# This is an example of lazily creating a process. The C file does not exist yet and will
# only be generated post map phase when output layer has been mapped to neurocores.
# Class Readout SNIP
readout_snip = Process(
name='readout',
cFilePath=readout_spike_activity_snip_dir + "/snip_class_readout.c",
includeDir=readout_spike_activity_snip_dir,
guardName='do_readout',
funcName='readout',
phase=Phase.EMBEDDED_MGMT,
lmtId=0)
self.addProcess(readout_snip)
def partition(self, board: Graph) -> AbstractComposable:
"""Partition the dnn model. We ignore this step and delegate it to map which invokes compileModel"""
return self
def map(self, board: Graph) -> AbstractComposable:
"""Invoke partition and mapping of the dnn model"""
mapper = self._dnn.compileModel(board)
printLayerInfo = False
if printLayerInfo:
printLayerMappings(self._dnn.layers, mapper, synapses=True, inputAxons=True)
printLayers(self._dnn.layers)
self._createSnips(board)
self._createReadoutSnip()
return self
def updatePorts(self, board: Graph) -> AbstractComposable:
"""Updates resourceMap to input and output ports"""
inputLayer = self._dnn.layers[0]
if inputLayer.inputMode == InputModes.AEDAT:
self.ports.input.resourceMap = ResourceMapFactory.createExplicit(
ResourceMapType.INPUT_AXON, inputLayer.inputAxonResourceMap)
else:
# Return input compartments for multi-compartment neurons
neuronSize = 2 if inputLayer.resetMode == 'soft' else 1
cxResourceMap = inputLayer.cxResourceMap[::neuronSize]
self.ports.input.resourceMap = ResourceMapFactory.createExplicit(
ResourceMapType.COMPARTMENT, cxResourceMap)
# self.ports.output.resourceMap = CompartmentResourceMap(self._dnn.layers[-1].cxResourceMap)
return self
def completeConnectivity(self, board: Graph, processAggregator: AbstractProcessAggregator) -> AbstractComposable:
"""Create channel to communicate data to init snip"""
# Should pipe to resourceMap indices for output layer
self._createInitializationChannel(board, processAggregator)
self._createReadoutChannel(board, processAggregator)
return self
def _createSnips(self, board: Graph):
"""Create clones of reset and init snips based on number of chips used by input layer."""
processes = Processes()
for chip_id in range(board.numChips):
# init snip
initProcess = self.processes.init
initProcessWithChipId = initProcess.clone(name=initProcess.name + str(chip_id),
params={'chipId': chip_id})
processes.add(initProcessWithChipId)
# reset snip
resetProcess = self.processes.reset
resetProcessWithChipId = resetProcess.clone(name=resetProcess.name + str(chip_id),
params={'chipId': chip_id})
processes.add(resetProcessWithChipId)
# Todo : Enable readout for output layers distributed across multiple chips.
# readout
chip_id = self._dnn.layers[-1].cxResourceMap[0, 0]
assert len(np.unique(self._dnn.layers[-1].cxResourceMap[:, 0])) == 1
readoutProcess = self.processes.readout
readoutProcessWithChipId = readoutProcess.clone(name=readoutProcess.name,
params={'chipId': chip_id})
processes.add(readoutProcessWithChipId)
self.processes = processes
def _createReadoutSnip(self):
"""Create readout snip for compartment of the output layer.
The voltage is readout when using an output layer with a softmax
activation, otherwise, spikes are readout by creating spike counters
at the lakemonts.
"""
probeDt = 1
probeStart = 100000000
# Get the output layer from the spiking model
output_layer = self._dnn.layers[-1]
NUM_CLASSES = int(np.prod(output_layer.output_shape[1:]))
# Return output compartments for multi-compartment neurons.
neuronSize = 2 if output_layer.resetMode == 'soft' else 1
offset = 1 if output_layer.resetMode == 'soft' else 0
# Determine whether to read spikes or voltages based on activation.
readSpikes = True
if hasattr(output_layer, 'activation') and \
output_layer.activation.__name__ == 'softmax':
offset = 0
readSpikes = False
lmt_spike_counters = []
if readSpikes:
for i in range(NUM_CLASSES):
spike_probe = output_layer[i * neuronSize + offset].probe(
state=ProbableStates.SPIKE,
probeCondition=SpikeProbeCondition(dt=probeDt, tStart=probeStart))
lmt_spike_counters.append(spike_probe.counterId)
cores = cxIds = np.zeros_like(lmt_spike_counters).tolist()
else:
rm = output_layer.cxResourceMap
cores = rm[offset::neuronSize, 1].tolist()
cxIds = rm[offset::neuronSize, 2].tolist()
lmt_spike_counters = np.zeros_like(cxIds).tolist()
# Now that lmt_spike_counters are known, generate the snip_class_readout.c
self._generateReadOutSnipCFileFromJinjaTemplate(readSpikes=readSpikes,
num_classes=NUM_CLASSES,
lmt_output_spike_counter_ids=lmt_spike_counters,
cores=cores,
cxIds=cxIds)
@staticmethod
def _cleanup():
readout_spike_activity_snip_dir = os.path.join(os.path.dirname(__file__),
'..', 'snips', 'readout_spike_activity')
cFilePath = os.path.join(readout_spike_activity_snip_dir, "snip_class_readout.c")
if os.path.exists(cFilePath):
os.remove(cFilePath)
def _generateReadOutSnipCFileFromJinjaTemplate(self,
readSpikes: bool,
num_classes: int,
lmt_output_spike_counter_ids: List[int],
cores: List[int],
cxIds: List[int]):
atexit.register(ComposableDNN._cleanup)
readout_spike_activity_snip_dir = os.path.join(os.path.dirname(__file__),
'..', 'snips', 'readout_spike_activity')
context = {
"READ_SPIKES": int(readSpikes),
"NUM_CLASSES": num_classes,
"NUM_STEPS_PER_IMG": self._num_steps_per_img,
"LMT_OUTPUT_SPIKE_COUNTER_IDS": "{" + str(lmt_output_spike_counter_ids)[1:-1] + "}",
"CORE_IDS": "{" + str(cores)[1:-1] + "}",
"CX_IDS": "{" + str(cxIds)[1:-1] + "}"
}
env = Environment(loader=FileSystemLoader(os.path.join(readout_spike_activity_snip_dir, "templates")),
trim_blocks=True)
c_template = env.get_template("snip_class_readout.c.template")
c_contents = c_template.render(context)
with open(os.path.join(readout_spike_activity_snip_dir, "snip_class_readout.c"), 'w') as cFile:
cFile.write(c_contents)
def _createInitializationChannel(self, board: Graph, processAggregator: AbstractProcessAggregator):
"""Creates a channel and connects it to init snip"""
for chip_id in range(board.numChips):
init_process = self.processes['init' + str(chip_id)]
processKey = init_process.getProcessKey()
snip_init_1 = processAggregator.getEmbeddedSnipForProcessKey(processKey)
name = 'channel_init_ch{}_lmt0'.format(chip_id)
setattr(self,
name,
board.createChannel(bytes(name, 'utf-8'), "int", 3))
getattr(self, name).connect(None, snip_init_1)
def _createReadoutChannel(self, board: Graph, processAggregator: AbstractProcessAggregator):
"""Create a readout channel to read the classification values from spike counters"""
readout_process = self.processes.readout
processKey = readout_process.getProcessKey()
snip_readout = processAggregator.getEmbeddedSnipForProcessKey(processKey)
self.readout_channel = board.createChannel(bytes('readout', 'utf-8'), "int", numElements=100000)
self.readout_channel.connect(snip_readout, None)
@staticmethod
def load(path: str, board: Graph = None) -> 'AbstractComposable':
"""Not Implemented"""
raise NotImplementedError
def save(self, path: str):
"""Not Implemented"""
raise NotImplementedError
def start(self, board: Graph, *args, **kwargs):
"""Writes initial configuration settings (num_cores_per_chip, num_steps_per_img, enableReset) to init channel"""
num_cores_per_chip = [board.n2Chips[i].numCores for i in range(board.numChips)]
for chip_id in range(board.numChips):
name = 'channel_init_ch{}_lmt0'.format(chip_id)
getattr(self, name).write(3, [num_cores_per_chip[chip_id], self._num_steps_per_img, self._enableReset])
| 45.787375 | 120 | 0.640473 |
f73f60dc33ae48a27d08b16b6d6d0fc00917447b | 6,397 | py | Python | augly/video/helpers/ffmpeg.py | Adib234/AugLy | 35a6a5de07e64f465b8979e3257218551929e57a | [
"MIT"
] | null | null | null | augly/video/helpers/ffmpeg.py | Adib234/AugLy | 35a6a5de07e64f465b8979e3257218551929e57a | [
"MIT"
] | null | null | null | augly/video/helpers/ffmpeg.py | Adib234/AugLy | 35a6a5de07e64f465b8979e3257218551929e57a | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
import io
import math
import os
import shutil
from typing import Any, Dict, Optional, Union
import augly.audio.utils as audutils
import ffmpeg
import numpy as np
from augly.utils import pathmgr, SILENT_AUDIO_PATH
from augly.utils.ffmpeg import FFMPEG_PATH, FFPROBE_PATH
from ffmpeg.nodes import FilterableStream
def combine_frames_and_audio_to_file(
raw_frames: str,
audio: Optional[Union[str, io.BytesIO]],
output_path: str,
framerate: float,
) -> None:
frame_dir = os.path.dirname(raw_frames)
if not os.path.isdir(frame_dir):
raise RuntimeError(
f"Got raw frames glob path of {raw_frames}, but {frame_dir} is not "
"a directory"
)
video_stream = ffmpeg.input(raw_frames, pattern_type="glob", framerate=framerate)
video_stream = video_stream.filter(
"pad", **{"width": "ceil(iw/2)*2", "height": "ceil(ih/2)*2"}
)
merge_video_and_audio(video_stream, audio, output_path)
def extract_audio_to_file(video_path: str, output_audio_path: str) -> None:
audio_info = get_audio_info(video_path)
sample_rate = str(audio_info["sample_rate"])
codec = audio_info["codec_name"]
if os.path.splitext(output_audio_path)[-1] == ".aac":
(
ffmpeg.input(video_path, loglevel="quiet")
.output(output_audio_path, acodec=codec, ac=1)
.overwrite_output()
.run(cmd=FFMPEG_PATH)
)
else:
out, err = (
ffmpeg.input(video_path, loglevel="quiet")
.output("-", format="f32le", acodec="pcm_f32le", ac=1, ar=sample_rate)
.run(cmd=FFMPEG_PATH, capture_stdout=True, capture_stderr=True)
)
audio = np.frombuffer(out, np.float32)
audutils.ret_and_save_audio(audio, output_audio_path, int(sample_rate))
def extract_frames_to_dir(
video_path: str,
output_dir: str,
output_pattern: str = "raw_frame%08d.jpg",
quality: int = 0,
scale: float = 1,
) -> None:
video_info = get_video_info(video_path)
(
ffmpeg.input(video_path, ss=0, loglevel="quiet")
.filter("scale", f"iw*{scale}", f"ih*{scale}")
.output(
os.path.join(output_dir, output_pattern),
vframes=video_info["nb_frames"],
**{"qscale:v": quality},
)
.overwrite_output()
.run(cmd=FFMPEG_PATH)
)
def get_audio_info(media_path: str) -> Dict[str, Any]:
"""
Returns whatever ffprobe returns. Of particular use are things such as the
encoder ("codec_name") used for audio encoding, the sample rate ("sample_rate"),
and length in seconds ("duration")
Accepts as input either an audio or video path.
"""
try:
local_media_path = pathmgr.get_local_path(media_path)
except RuntimeError:
raise FileNotFoundError(f"Provided media path {media_path} does not exist")
probe = ffmpeg.probe(local_media_path, cmd=FFPROBE_PATH)
audio_info = next(
(stream for stream in probe["streams"] if stream["codec_type"] == "audio"),
None,
)
assert (
audio_info is not None
), "Error retrieving audio metadata, please verify that an audio stream exists"
return audio_info
def get_video_fps(video_path: str) -> Optional[float]:
video_info = get_video_info(video_path)
try:
frame_rate = video_info["avg_frame_rate"]
# ffmpeg often returns fractional framerates, e.g. 225480/7523
if "/" in frame_rate:
num, denom = (float(f) for f in frame_rate.split("/"))
return num / denom
else:
return float(frame_rate)
except Exception:
return None
def get_video_info(video_path: str) -> Dict[str, Any]:
"""
Returns whatever ffprobe returns. Of particular use are things such as the FPS
("avg_frame_rate"), number of raw frames ("nb_frames"), height and width of each
frame ("height", "width") and length in seconds ("duration")
"""
try:
local_video_path = pathmgr.get_local_path(video_path)
except RuntimeError:
raise FileNotFoundError(f"Provided video path {video_path} does not exist")
probe = ffmpeg.probe(local_video_path, cmd=FFPROBE_PATH)
video_info = next(
(stream for stream in probe["streams"] if stream["codec_type"] == "video"),
None,
)
assert (
video_info is not None
), "Error retrieving video metadata, please verify that the video file exists"
return video_info
def has_audio_stream(video_path: str) -> bool:
streams = ffmpeg.probe(video_path, cmd=FFPROBE_PATH)["streams"]
for stream in streams:
if stream["codec_type"] == "audio":
return True
return False
def add_silent_audio(
video_path: str,
output_path: Optional[str] = None,
duration: Optional[float] = None,
) -> None:
local_video_path = pathmgr.get_local_path(video_path)
if local_video_path != video_path:
assert (
output_path is not None
), "If remote video_path is provided, an output_path must be provided"
video_path = local_video_path
output_path = output_path or video_path
if has_audio_stream(video_path):
if video_path != output_path:
shutil.copy(video_path, output_path)
return
duration = duration or float(get_video_info(video_path)["duration"])
video = ffmpeg.input(video_path).video
silent_audio_path = pathmgr.get_local_path(SILENT_AUDIO_PATH)
audio = ffmpeg.input(silent_audio_path, stream_loop=math.ceil(duration)).audio
output = ffmpeg.output(video, audio, output_path, pix_fmt="yuv420p", t=duration)
output.overwrite_output().run(cmd=FFMPEG_PATH)
def merge_video_and_audio(
video_stream: FilterableStream,
audio: Optional[Union[str, io.BytesIO]],
output_path: str,
) -> None:
kwargs = {"c:v": "libx264", "c:a": "copy", "bsf:a": "aac_adtstoasc"}
if audio:
audio_stream = ffmpeg.input(audio, loglevel="quiet")
output = ffmpeg.output(
video_stream, audio_stream, output_path, pix_fmt="yuv420p", **kwargs
).overwrite_output()
else:
output = ffmpeg.output(
video_stream, output_path, pix_fmt="yuv420p", **kwargs
).overwrite_output()
output.run(cmd=FFMPEG_PATH)
| 32.308081 | 85 | 0.659997 |
f73f6d3bf17cca6dc52756b61d166c3225f1b7e1 | 1,272 | py | Python | import_faces.py | vignettist/image-import | 7fd8860e3f5cd1011d98d5a2137327e304cbd884 | [
"MIT"
] | null | null | null | import_faces.py | vignettist/image-import | 7fd8860e3f5cd1011d98d5a2137327e304cbd884 | [
"MIT"
] | null | null | null | import_faces.py | vignettist/image-import | 7fd8860e3f5cd1011d98d5a2137327e304cbd884 | [
"MIT"
] | null | null | null | import openface
import cv2
class FaceFinder:
def __init__(self):
self.align = openface.AlignDlib("models/dlib/shape_predictor_68_face_landmarks.dat")
self.net = openface.TorchNeuralNet("models/openface/nn4.small2.v1.t7", 96)
def getFaces(self, imgPath):
bgrImg = cv2.imread(imgPath)
if bgrImg is None:
raise Exception("Unable to load image: {}".format(imgPath))
rgbImg = cv2.cvtColor(bgrImg, cv2.COLOR_BGR2RGB)
faces = []
bbs = self.align.getAllFaceBoundingBoxes(rgbImg)
for bb in bbs:
face = {}
if bb is None:
raise Exception("Unable to find a face: {}".format(imgPath))
alignedFace = self.align.align(96, rgbImg, bb,
landmarkIndices=openface.AlignDlib.OUTER_EYES_AND_NOSE)
if alignedFace is None:
raise Exception("Unable to align image: {}".format(imgPath))
rep = self.net.forward(alignedFace)
face['rect'] = [bb.left(), bb.top(), bb.width(), bb.height()]
face['rep'] = rep.tolist()
face['size'] = bb.area()
faces.append(face)
return faces | 35.333333 | 93 | 0.555031 |
f73f982d51bce455f556a08b6f3c83529ec286cd | 7,733 | py | Python | plugins/opencv/src/opencv/__init__.py | erikolofsson/scrypted | 39016a617464003cac13719a426eefcc2421e51a | [
"MIT"
] | null | null | null | plugins/opencv/src/opencv/__init__.py | erikolofsson/scrypted | 39016a617464003cac13719a426eefcc2421e51a | [
"MIT"
] | null | null | null | plugins/opencv/src/opencv/__init__.py | erikolofsson/scrypted | 39016a617464003cac13719a426eefcc2421e51a | [
"MIT"
] | null | null | null | from __future__ import annotations
from time import sleep
from detect import DetectionSession, DetectPlugin
from typing import Any, List
import numpy as np
import cv2
import imutils
from gi.repository import GLib, Gst
from scrypted_sdk.types import ObjectDetectionModel, ObjectDetectionResult, ObjectsDetected
class OpenCVDetectionSession(DetectionSession):
cap: cv2.VideoCapture
previous_frame: Any
def __init__(self) -> None:
super().__init__()
self.previous_frame = None
self.cap = None
defaultThreshold = 25
defaultArea = 2000
defaultInterval = 250
class OpenCVPlugin(DetectPlugin):
def __init__(self, nativeId: str | None = None):
super().__init__(nativeId=nativeId)
self.color2Gray = None
self.pixelFormat = "I420"
self.pixelFormatChannelCount = 1
if True:
self.retainAspectRatio = False
self.color2Gray = None
self.pixelFormat = "I420"
self.pixelFormatChannelCount = 1
else:
self.retainAspectRatio = True
self.color2Gray = cv2.COLOR_BGRA2GRAY
self.pixelFormat = "BGRA"
self.pixelFormatChannelCount = 4
async def getDetectionModel(self) -> ObjectDetectionModel:
d: ObjectDetectionModel = {
'name': '@scrypted/opencv',
'classes': ['motion'],
}
settings = [
{
'title': "Motion Area",
'description': "The area size required to trigger motion. Higher values (larger areas) are less sensitive. Setting this to 0 will output all matches into the console.",
'value': defaultArea,
'key': 'area',
'placeholder': defaultArea,
'type': 'number',
},
{
'title': "Motion Threshold",
'description': "The threshold required to consider a pixel changed. Higher values (larger changes) are less sensitive.",
'value': defaultThreshold,
'key': 'threshold',
'placeholder': defaultThreshold,
'type': 'number',
},
{
'title': "Frame Analysis Interval",
'description': "The number of milliseconds to wait between motion analysis.",
'value': defaultInterval,
'key': 'interval',
'placeholder': defaultInterval,
'type': 'number',
},
]
d['settings'] = settings
return d
def get_pixel_format(self):
return self.pixelFormat
def parse_settings(self, settings: Any):
area = defaultArea
threshold = defaultThreshold
interval = defaultInterval
if settings:
area = float(settings.get('area', area))
threshold = int(settings.get('threshold', threshold))
interval = float(settings.get('interval', interval))
return area, threshold, interval
def detect(self, detection_session: OpenCVDetectionSession, frame, settings: Any, src_size, convert_to_src_size) -> ObjectsDetected:
area, threshold, interval = self.parse_settings(settings)
# see get_detection_input_size on undocumented size requirements for GRAY8
if self.color2Gray != None:
gray = cv2.cvtColor(frame, self.color2Gray)
else:
gray = frame
curFrame = cv2.GaussianBlur(gray, (21,21), 0)
if detection_session.previous_frame is None:
detection_session.previous_frame = curFrame
return
frameDelta = cv2.absdiff(detection_session.previous_frame, curFrame)
detection_session.previous_frame = curFrame
_, thresh = cv2.threshold(frameDelta, threshold, 255, cv2.THRESH_BINARY)
dilated = cv2.dilate(thresh, None, iterations=2)
fcontours = cv2.findContours(dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = imutils.grab_contours(fcontours)
detections: List[ObjectDetectionResult] = []
detection_result: ObjectsDetected = {}
detection_result['detections'] = detections
detection_result['inputDimensions'] = src_size
for c in contours:
x, y, w, h = cv2.boundingRect(c)
# if w * h != contour_area:
# print("mismatch w/h", contour_area - w * h)
x2, y2, _ = convert_to_src_size((x + w, y + h))
x, y, _ = convert_to_src_size((x, y))
w = x2 - x + 1
h = y2 - y + 1
contour_area = w * h
if not area or contour_area > area:
detection: ObjectDetectionResult = {}
detection['boundingBox'] = (x, y, w, h)
detection['className'] = 'motion'
detection['score'] = 1 if area else contour_area
detections.append(detection)
return detection_result
def run_detection_jpeg(self, detection_session: DetectionSession, image_bytes: bytes, min_score: float) -> ObjectsDetected:
raise Exception('can not run motion detection on jpeg')
def get_detection_input_size(self, src_size):
# The initial implementation of this plugin used BGRA
# because it seemed impossible to pull the Y frame out of I420 without corruption.
# This is because while 318x174 is aspect ratio correct,
# it seems to cause strange issues with stride and the image is skewed.
# By using 300x300, this seems to avoid some undocumented minimum size
# reqiurement in gst-videoscale or opencv. Unclear which.
# This is the same input size as tensorflow-lite. Allows for better pipelining.
if not self.retainAspectRatio:
return (300, 300)
width, height = src_size
if (width > height):
if (width > 318):
height = height / width * 318
width = 318
else:
if (height > 318):
width = width / height * 318
height = 318
width = int(np.floor(width / 6) * 6)
height = int(np.floor(height / 6) * 6)
return width, height
def end_session(self, detection_session: OpenCVDetectionSession):
if detection_session and detection_session.cap:
detection_session.cap.release()
detection_session.cap = None
return super().end_session(detection_session)
def run_detection_gstsample(self, detection_session: OpenCVDetectionSession, gst_sample, settings: Any, src_size, convert_to_src_size)-> ObjectsDetected:
buf = gst_sample.get_buffer()
caps = gst_sample.get_caps()
# can't trust the width value, compute the stride
height = caps.get_structure(0).get_value('height')
width = caps.get_structure(0).get_value('width')
result, info = buf.map(Gst.MapFlags.READ)
if not result:
return
try:
mat = np.ndarray(
(height,
width,
self.pixelFormatChannelCount),
buffer=info.data,
dtype= np.uint8)
return self.detect(detection_session, mat, settings, src_size, convert_to_src_size)
finally:
buf.unmap(info)
def create_detection_session(self):
return OpenCVDetectionSession()
def detection_event_notified(self, settings: Any):
area, threshold, interval = self.parse_settings(settings)
# it is safe to block here because gstreamer creates a queue thread
sleep(interval / 1000)
return super().detection_event_notified(settings)
| 38.472637 | 184 | 0.608431 |
f73fae25872282f55556165e302753b5a8c0c263 | 1,032 | py | Python | tests/tf/examples/test_01_getting_started.py | bschifferer/models-1 | b6042dbd1b98150cc50fd7d2cb6c07033f42fd35 | [
"Apache-2.0"
] | null | null | null | tests/tf/examples/test_01_getting_started.py | bschifferer/models-1 | b6042dbd1b98150cc50fd7d2cb6c07033f42fd35 | [
"Apache-2.0"
] | null | null | null | tests/tf/examples/test_01_getting_started.py | bschifferer/models-1 | b6042dbd1b98150cc50fd7d2cb6c07033f42fd35 | [
"Apache-2.0"
] | null | null | null | from testbook import testbook
from tests.conftest import REPO_ROOT
@testbook(REPO_ROOT / "examples/01-Getting-started.ipynb", execute=False)
def test_func(tb):
tb.inject(
"""
from unittest.mock import patch
from merlin.datasets.synthetic import generate_data
mock_train, mock_valid = generate_data(
input="movielens-1m",
num_rows=1000,
set_sizes=(0.8, 0.2)
)
p1 = patch(
"merlin.datasets.entertainment.get_movielens",
return_value=[mock_train, mock_valid]
)
p1.start()
"""
)
tb.execute()
metrics = tb.ref("metrics")
assert sorted(list(metrics.keys())) == [
"loss",
"rating_binary/binary_classification_task/auc",
"rating_binary/binary_classification_task/binary_accuracy",
"rating_binary/binary_classification_task/precision",
"rating_binary/binary_classification_task/recall",
"regularization_loss",
"total_loss",
]
| 29.485714 | 73 | 0.627907 |
f73fb2a8a307bf6009f2b5f26b6e89cf305daae6 | 3,771 | py | Python | pysimt/utils/filterchain.py | welvin21/pysimt | 6250b33dc518b3195da4fc9cc8d32ba7ada958c0 | [
"MIT"
] | 34 | 2020-09-21T10:49:57.000Z | 2022-01-08T04:50:42.000Z | pysimt/utils/filterchain.py | welvin21/pysimt | 6250b33dc518b3195da4fc9cc8d32ba7ada958c0 | [
"MIT"
] | 2 | 2021-01-08T03:52:51.000Z | 2021-09-10T07:45:05.000Z | pysimt/utils/filterchain.py | welvin21/pysimt | 6250b33dc518b3195da4fc9cc8d32ba7ada958c0 | [
"MIT"
] | 5 | 2021-04-23T09:30:51.000Z | 2022-01-09T08:40:45.000Z | import re
import pathlib
from typing import List, Union
from .resource_mgr import res_mgr
from .io import fopen
class FilterChain:
"""A sequential filter chain to post-process list of tokens. The **available
filters are:**
`c2w`: Stitches back space delimited characters to words.
Necessary for word-level BLEU, etc. when using char-level NMT.
`lower`: Lowercase the input(s).
`upper`: Uppercase the input(s).
`de-bpe`: Stitches back `@@ ` and ` ##` BPE units.
`de-spm`: Stitches back `▁` Sentence Piece (SPM).
`de-segment`: Converts `<tag:morpheme>` to normal form
`de-compound`: Stitches back German compound splittings (zmorph)
`de-hyphen`: De-hyphenate `foo @-@ bar` constructs of Moses tokenizer.
Args:
filters: A list of strings or a comma-separated string
representing filters to apply.
"""
_FILTERS = {
'de-bpe': lambda s: s.replace("@@ ", "").replace("@@", "").replace(" ##", ""),
'de-tag': lambda s: re.sub('<[a-zA-Z][a-zA-Z]>', '', s),
# Decoder for Google sentenpiece
# only for default params of spm_encode
'de-spm': lambda s: s.replace(" ", "").replace("\u2581", " ").strip(),
# Converts segmentations of <tag:morpheme> to normal form
'de-segment': lambda s: re.sub(' *<.*?:(.*?)>', '\\1', s),
# Space delim character sequence to non-tokenized normal word form
'c2w': lambda s: s.replace(' ', '').replace('<s>', ' ').strip(),
# Filters out fillers from compound splitted sentences
'de-compound': lambda s: (s.replace(" @@ ", "").replace(" @@", "")
.replace(" @", "").replace("@ ", "")),
# de-hyphenate when -a given to Moses tokenizer
'de-hyphen': lambda s: re.sub(r'\s*@-@\s*', '-', s),
'lower': lambda s: s.lower(),
'upper': lambda s: s.upper(),
}
def __init__(self, filters: Union[str, List[str]]):
if isinstance(filters, str):
filters = filters.split(',')
assert not set(filters).difference(set(self._FILTERS.keys())), \
"Unknown evaluation filter given."
self.filters = filters
self._funcs = [self._FILTERS[k] for k in self.filters]
def _apply(self, list_of_strs: List[str]) -> List[str]:
"""Applies filters consecutively on a list of sentences."""
for func in self._funcs:
list_of_strs = [func(s) for s in list_of_strs]
return list_of_strs
def apply(self, _input: Union[List[str], pathlib.Path]) -> List[str]:
"""Applies the filterchain on a given input.
Args:
_input: If `pathlib.Path` (it can also be a glob expression),
temporary file(s) with filters applied are returned.
If a list of sentences is given, a list of post-processed
sentences is returned.
"""
if isinstance(_input, pathlib.Path):
# Need to create copies of reference files with filters applied
# and return their paths instead
fnames = _input.parent.glob(_input.name)
new_fnames = []
for fname in fnames:
lines = []
f = fopen(fname)
for line in f:
lines.append(line.strip())
f.close()
f = res_mgr.get_temp_file()
for line in self._apply(lines):
f.write(line + '\n')
f.close()
new_fnames.append(f.name)
return new_fnames
elif isinstance(_input, list):
return self._apply(_input)
def __repr__(self):
return "FilterChain({})".format(" -> ".join(self.filters))
| 37.336634 | 86 | 0.563511 |
f73fc1c99f41a3f9d8453faeb6830db314e02382 | 13,650 | py | Python | sdk/synapse/azure-synapse-artifacts/azure/synapse/artifacts/aio/operations/_metastore_operations.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | 1 | 2022-02-01T18:50:12.000Z | 2022-02-01T18:50:12.000Z | sdk/synapse/azure-synapse-artifacts/azure/synapse/artifacts/aio/operations/_metastore_operations.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | null | null | null | sdk/synapse/azure-synapse-artifacts/azure/synapse/artifacts/aio/operations/_metastore_operations.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | null | null | null | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Optional, TypeVar
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._metastore_operations import build_delete_request, build_get_database_operations_request, build_register_request, build_update_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class MetastoreOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.synapse.artifacts.aio.ArtifactsClient`'s
:attr:`metastore` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace_async
async def register(
self,
id: str,
input_folder: str,
**kwargs: Any
) -> _models.MetastoreRegistrationResponse:
"""Register files in Syms.
:param id: The name of the database to be created. The name can contain only alphanumeric
characters and should not exceed 24 characters.
:type id: str
:param input_folder: The input folder containing CDM files.
:type input_folder: str
:keyword api_version: Api Version. Default value is "2021-07-01-preview". Note that overriding
this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MetastoreRegistrationResponse, or the result of cls(response)
:rtype: ~azure.synapse.artifacts.models.MetastoreRegistrationResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2021-07-01-preview")) # type: str
content_type = kwargs.pop('content_type', _headers.pop('Content-Type', "application/json")) # type: Optional[str]
cls = kwargs.pop('cls', None) # type: ClsType[_models.MetastoreRegistrationResponse]
_register_body = _models.MetastoreRegisterObject(input_folder=input_folder)
_json = self._serialize.body(_register_body, 'MetastoreRegisterObject')
request = build_register_request(
id=id,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self.register.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorContract, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('MetastoreRegistrationResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
register.metadata = {'url': "/metastore/create-database-operations/{id}"} # type: ignore
@distributed_trace_async
async def get_database_operations(
self,
id: str,
**kwargs: Any
) -> _models.MetastoreRequestSuccessResponse:
"""Gets status of the database.
:param id:
:type id: str
:keyword api_version: Api Version. Default value is "2021-07-01-preview". Note that overriding
this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MetastoreRequestSuccessResponse, or the result of cls(response)
:rtype: ~azure.synapse.artifacts.models.MetastoreRequestSuccessResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2021-07-01-preview")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[_models.MetastoreRequestSuccessResponse]
request = build_get_database_operations_request(
id=id,
api_version=api_version,
template_url=self.get_database_operations.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorContract, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('MetastoreRequestSuccessResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_database_operations.metadata = {'url': "/metastore/create-database-operations/{id}"} # type: ignore
@distributed_trace_async
async def update(
self,
id: str,
input_folder: str,
**kwargs: Any
) -> _models.MetastoreUpdationResponse:
"""Update files in Syms.
:param id: The name of the database to be updated.
:type id: str
:param input_folder: The input folder containing CDM files.
:type input_folder: str
:keyword api_version: Api Version. Default value is "2021-07-01-preview". Note that overriding
this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MetastoreUpdationResponse, or the result of cls(response)
:rtype: ~azure.synapse.artifacts.models.MetastoreUpdationResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2021-07-01-preview")) # type: str
content_type = kwargs.pop('content_type', _headers.pop('Content-Type', "application/json")) # type: Optional[str]
cls = kwargs.pop('cls', None) # type: ClsType[_models.MetastoreUpdationResponse]
_update_body = _models.MetastoreUpdateObject(input_folder=input_folder)
_json = self._serialize.body(_update_body, 'MetastoreUpdateObject')
request = build_update_request(
id=id,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self.update.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorContract, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('MetastoreUpdationResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': "/metastore/update-database-operations/{id}"} # type: ignore
@distributed_trace_async
async def delete( # pylint: disable=inconsistent-return-statements
self,
id: str,
**kwargs: Any
) -> None:
"""Remove files in Syms.
:param id:
:type id: str
:keyword api_version: Api Version. Default value is "2021-07-01-preview". Note that overriding
this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2021-07-01-preview")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[None]
request = build_delete_request(
id=id,
api_version=api_version,
template_url=self.delete.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorContract, pipeline_response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': "/metastore/databases/{id}"} # type: ignore
| 43.196203 | 153 | 0.659487 |
f73fe7eed2cb84dff0a06a8c9956db29b0f9bb02 | 6,481 | py | Python | apps/dot_ext/forms.py | TransparentHealth/bluebutton-web-server | 69b6e649b2e9caf7859a75ea3867bf74c83ca340 | [
"Apache-2.0"
] | 1 | 2020-01-26T16:02:27.000Z | 2020-01-26T16:02:27.000Z | apps/dot_ext/forms.py | TransparentHealth/bluebutton-web-server | 69b6e649b2e9caf7859a75ea3867bf74c83ca340 | [
"Apache-2.0"
] | null | null | null | apps/dot_ext/forms.py | TransparentHealth/bluebutton-web-server | 69b6e649b2e9caf7859a75ea3867bf74c83ca340 | [
"Apache-2.0"
] | 2 | 2018-09-29T18:53:08.000Z | 2020-01-26T16:02:31.000Z | from django.utils.safestring import mark_safe
from django import forms
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from oauth2_provider.forms import AllowForm as DotAllowForm
from oauth2_provider.models import get_application_model
from oauth2_provider.scopes import get_scopes_backend
from oauth2_provider.settings import oauth2_settings
from oauth2_provider.validators import urlsplit
class CustomRegisterApplicationForm(forms.ModelForm):
def __init__(self, user, *args, **kwargs):
agree_label = u'Yes I have read and agree to the <a target="_blank" href="%s">API Terms of Service Agreement</a>*' % (
settings.TOS_URI)
super(CustomRegisterApplicationForm, self).__init__(*args, **kwargs)
self.fields['authorization_grant_type'].choices = settings.GRANT_TYPES
self.fields['client_type'].initial = 'confidential'
self.fields['agree'].label = mark_safe(agree_label)
self.fields['name'].label = "Name*"
self.fields['name'].required = True
self.fields['client_type'].label = "Client Type*"
self.fields[
'authorization_grant_type'].label = "Authorization Grant Type*"
self.fields['redirect_uris'].label = "Redirect URIs*"
class Meta:
model = get_application_model()
fields = ('name',
'client_type',
'authorization_grant_type', 'redirect_uris',
'logo_uri', 'policy_uri', 'tos_uri', 'contacts',
'agree')
required_css_class = 'required'
def clean(self):
client_type = self.cleaned_data.get('client_type')
authorization_grant_type = self.cleaned_data.get(
'authorization_grant_type')
redirect_uris = self.cleaned_data.get('redirect_uris')
msg = ""
validate_error = False
# Public clients don't use authorization-code flow
if client_type == 'public' and authorization_grant_type == 'authorization-code':
validate_error = True
msg += 'A public client may not request ' \
'an authorization-code grant type.'
# Confidential clients cannot use implicit authorization_grant_type
if client_type == 'confidential' and authorization_grant_type == 'implicit':
validate_error = True
msg += 'A confidential client may not ' \
'request an implicit grant type.'
# Confidential clients cannot use implicit authorization_grant_type
if client_type == 'confidential' and authorization_grant_type == 'implicit':
validate_error = True
msg += 'A confidential client may not ' \
'request an implicit grant type.'
# Native mobile applications using RCF 8252 must supply https or
# LL00000000
for uri in redirect_uris.split():
scheme, netloc, path, query, fragment = urlsplit(uri)
valid_schemes = get_allowed_schemes()
if scheme in valid_schemes:
validate_error = False
else:
validate_error = True
if validate_error:
msg += '%s is an invalid scheme. Redirect URIs must use %s ' \
% (scheme, ' or '.join(valid_schemes))
if validate_error:
msg_output = _(msg)
raise forms.ValidationError(msg_output)
else:
pass
return self.cleaned_data
def clean_client_type(self):
client_type = self.cleaned_data.get('client_type')
authorization_grant_type = self.cleaned_data.get(
'authorization_grant_type')
if client_type == 'public' and authorization_grant_type == 'authorization-code':
msg = _(
'A public client may not request an '
'authorization-code grant type.')
raise forms.ValidationError(msg)
return client_type
def clean_agree(self):
agree = self.cleaned_data.get('agree')
if not agree:
msg = _('You must agree to the API Terms of Service Agreement')
raise forms.ValidationError(msg)
return agree
def clean_redirect_uris(self):
redirect_uris = self.cleaned_data.get('redirect_uris')
if getattr(settings, 'BLOCK_HTTP_REDIRECT_URIS', True):
if redirect_uris:
for u in redirect_uris.split():
if u.startswith("http://"):
msg = _('Redirect URIs must not use http.')
raise forms.ValidationError(msg)
return redirect_uris
class SimpleAllowForm(DotAllowForm):
pass
class AllowForm(DotAllowForm):
scope = forms.MultipleChoiceField(widget=forms.CheckboxSelectMultiple)
expires_in = forms.TypedChoiceField(choices=settings.DOT_EXPIRES_IN, coerce=int,
empty_value=None,
label="Access to this application expires in")
def __init__(self, *args, **kwargs):
application = kwargs.pop('application', None)
if application is None:
super(AllowForm, self).__init__(*args, **kwargs)
else:
# we use the application instance to get the list of available scopes
# because it is needed to create the choices list for the `scope`
# field.
available_scopes = get_scopes_backend().get_available_scopes(application)
# set the available_scopes as the initial value so that
# all checkboxes are checked
kwargs['initial']['scope'] = available_scopes
# init the form to create self.fields
super(AllowForm, self).__init__(*args, **kwargs)
# get the list of all the scopes available in the system
# to get the description of each available scope.
all_scopes = get_scopes_backend().get_all_scopes()
choices = [(scope, all_scopes[scope])
for scope in available_scopes]
self.fields['scope'].choices = choices
def get_allowed_schemes():
"""
get allowed_schemes set in OAUTH2_PROVIDER.ALLOWED_REDIRECT_URI_SCHEMES
:return: list
"""
if oauth2_settings.ALLOWED_REDIRECT_URI_SCHEMES:
valid_list = oauth2_settings.ALLOWED_REDIRECT_URI_SCHEMES
else:
valid_list = ['https', ]
return valid_list
| 39.518293 | 126 | 0.629841 |
f74060c1de71240f9718b12324bf4e301a79653a | 8,680 | py | Python | python/infinoted.py | speedyleion/vobby | 7369afb52184358130dd4143a2a19e4c1dbb7ecf | [
"Unlicense"
] | null | null | null | python/infinoted.py | speedyleion/vobby | 7369afb52184358130dd4143a2a19e4c1dbb7ecf | [
"Unlicense"
] | null | null | null | python/infinoted.py | speedyleion/vobby | 7369afb52184358130dd4143a2a19e4c1dbb7ecf | [
"Unlicense"
] | null | null | null | """
This handles the communication to an infinoted server
"""
from twisted.internet import reactor
from twisted.internet.defer import Deferred
from twisted.names.srvconnect import SRVConnector
from twisted.words.xish import domish
from twisted.words.protocols.jabber import xmlstream, client
from twisted.words.protocols.jabber.jid import JID
from twisted.words.protocols.jabber.sasl import SASLInitiatingInitializer
from twisted.python import log
class InfinotedProtocol(object):
"""
TODO this needs to be examined, probably should be an actual protocol/factory setup
"""
def __init__(self, service):
jid = JID('127.0.0.1')
f = client.XMPPClientFactory(jid, '')
f.addBootstrap(xmlstream.STREAM_CONNECTED_EVENT, self.connected)
f.addBootstrap(xmlstream.STREAM_AUTHD_EVENT, self.authenticated)
connector = SRVConnector(
reactor, 'xmpp-client', jid.host, f, defaultPort=6523)
connector.connect()
self.finished = Deferred()
self.files = {}
self.service = service
def rawDataIn(self, buf):
log.msg("RECV: %s" % unicode(buf, 'utf-8').encode('ascii', 'replace'))
def rawDataOut(self, buf):
log.msg("SEND: %s" % unicode(buf, 'utf-8').encode('ascii', 'replace'))
def connected(self, xs):
log.msg('Connected.')
self.xmlstream = xs
# Log all traffic
xs.rawDataInFn = self.rawDataIn
xs.rawDataOutFn = self.rawDataOut
# Need to inject our on challenge before twisted words sasl version.
xs.addObserver('/challenge', self.challenge, 100)
xs.addObserver('/group/welcome', self.welcome)
xs.addObserver('/group/explore-begin', self.explore)
xs.addObserver('/group/explore-end', self.explore_end)
xs.addObserver('/group/subscribe-chat', self.subscribe)
xs.addObserver('/group/subscribe-session', self.subscribe_session)
xs.addObserver('/group/sync-begin', self.sync_begin)
xs.addObserver('/group/sync-end', self.sync_end)
xs.addObserver('/group/user-rejoin', self.user_rejoin)
xs.addObserver('/group/add-node', self.add_node)
xs.addObserver('/group/sync-segment', self.sync_segment)
xs.addObserver('/group/sync-user', self.sync_user)
xs.addObserver('/group/request/insert-caret', self.insert)
xs.addObserver('/group/request/delete-caret', self.delete)
def delete(self, element):
delete_node = element.firstChildElement().firstChildElement()
offset = int(delete_node['pos'])
length = int(delete_node['len'])
self.service.delete_vim(offset, length,
self.files.keys()[0].encode('ascii', 'ignore'))
def insert(self, element):
"""
This will send an insert text command to teh associated Vim instance.
"""
insert_node = element.firstChildElement().firstChildElement()
offset = insert_node['pos']
self.service.insert_vim(str(insert_node).encode('ascii', 'ignore'), int(offset),
self.files.keys()[0].encode('ascii', 'ignore'))
def subscribe(self, element):
# TODO this needs to be more robust and really ack
self.xmlstream.send(u'<group publisher="you" name="InfDirectory">'
'<subscribe-ack/></group>')
def delete_text(self, offset, length, buffer_name):
"""TODO: Docstring for delete_text.
Args:
offset (TODO): TODO
length (TODO): TODO
buffer_name (TODO): TODO
Returns: TODO
"""
self.xmlstream.send(u'<group publisher="you" name="' + self.session + '">'
'<request user="' + self.user_id + '" time="">'
'<delete-caret pos="' + str(offset) + '" len="' + str(length)
+ '"/></request></group>')
def insert_text(self, text, position, buffer_name):
"""
This will insert text into the subscribed buffer.
TODO might need to pass the buffer/sequence id in here
Args:
position (int): The caret position in the buffer to insert into, 0 based.
text (string): the text to insert, often one character.
Example: xml from gobby packet sniffing
<group publisher="you" name="InfSession_3">
<request user="1" time="">
<insert-caret pos="0">T</insert-caret>
</request>
</group>
"""
self.xmlstream.send(u'<group publisher="you" name="' + self.session + '">'
'<request user="' + self.user_id + '" time="">'
'<insert-caret pos="' + str(position) + '">' + text +
'</insert-caret>'
'</request>'
'</group>')
def explore_end(self, element):
"""
Once we get the explore-end then we can send anotheer message.
"""
self.service.new_buffer(self.files.keys()[0].encode('ascii', 'ignore'))
file_id = self.files.values()[0]
self.xmlstream.send(u'<group publisher="you" name="InfDirectory">'
'<subscribe-session seq="2" id="' + str(file_id) + '"/>'
'</group>')
def subscribe_session(self, element):
"""
This will send back an ack if we get the expected subscription confirmation
"""
node = element.firstChildElement()
self.session = node['group']
self.xmlstream.send(u'<group publisher="you" name="InfDirectory">'
'<subscribe-ack id="' + node['id'] + '"/>'
'</group>')
def sync_begin(self, element):
"""
Handle the syncing of the file.
"""
# TODO this should be updating the Vim buffer
pass
def sync_user(self, element):
"""
This should be storing/creating data about a user but forget it for now
"""
pass
def sync_segment(self, element):
"""
This is the message to update the buffer with what gobby has.
"""
node = element.firstChildElement()
self.service.sync_vim(str(node).encode('ascii', 'ignore'),
self.files.keys()[0].encode('ascii', 'ignore'))
def sync_end(self, element):
"""
Done with syncing send back an ack
"""
self.xmlstream.send(u'<group publisher="you" name="' + element['name'] + '">'
'<sync-ack/></group>')
self.user_join(self.session)
def user_join(self, name):
"""
This will join to a file or a chat group
"""
self.xmlstream.send(u'<group publisher="you" name="' + name + '">'
'<user-join seq="0" name="Bob" status="active" '
'time="" caret="0" hue="0.28028500000000001"/>'
'</group>')
def user_rejoin(self, element):
"""
Save off the id given from infinoted
"""
node = element.firstChildElement()
self.user_id = node['id']
def explore(self, element):
"""
NOt sure if I really need to do anything here...???
"""
pass
def add_node(self, element):
"""
This will add a file to the list of available files.
"""
# There may be a better way to hook but i keep getting the root, not the element I
# care about
node = element.firstChildElement()
self.files[node['name']] = node['id']
def welcome(self, element):
self.xmlstream.send(u'<group publisher="you" name="InfDirectory"><explore-node '
'seq="0" id="0" /></group>')
def challenge(self, element):
# Super hack not sure the exact problem but looking at RFC 2245 anonymous sasl
# should still work with challenge response
event, observers = self.xmlstream._getEventAndObservers('/challenge')
# Grab priority 0 observer callbacks
for observer in observers[0].values():
for callback in observer.callbacks:
if callback.im_class == SASLInitiatingInitializer:
if not getattr(callback.im_self.mechanism, 'getResponse', None):
callback.im_self.mechanism.getResponse = lambda s: s + 'hello'
def authenticated(self, xs):
log.msg('Authenticated.')
presence = domish.Element((None, 'presence'))
xs.send(presence)
| 38.070175 | 90 | 0.574539 |
f74063724c532a90b8fe454df280bd34fbbc81f0 | 8,355 | py | Python | third_party/WebKit/Source/devtools/PRESUBMIT.py | Wzzzx/chromium-crosswalk | 768dde8efa71169f1c1113ca6ef322f1e8c9e7de | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2 | 2019-01-28T08:09:58.000Z | 2021-11-15T15:32:10.000Z | third_party/WebKit/Source/devtools/PRESUBMIT.py | Wzzzx/chromium-crosswalk | 768dde8efa71169f1c1113ca6ef322f1e8c9e7de | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | third_party/WebKit/Source/devtools/PRESUBMIT.py | Wzzzx/chromium-crosswalk | 768dde8efa71169f1c1113ca6ef322f1e8c9e7de | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 6 | 2020-09-23T08:56:12.000Z | 2021-11-18T03:40:49.000Z | # Copyright (C) 2014 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""DevTools JSDoc validator presubmit script
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl.
"""
import sys
compile_note = "Be sure to run your patch by the compile_frontend.py script prior to committing!"
def _CheckDevtoolsStyle(input_api, output_api):
local_paths = [f.AbsoluteLocalPath() for f in input_api.AffectedFiles() if f.Action() != "D"]
devtools_root = input_api.PresubmitLocalPath()
devtools_front_end = input_api.os_path.join(devtools_root, "front_end")
affected_front_end_files = [file_name for file_name in local_paths if devtools_front_end in file_name and file_name.endswith(".js")]
affected_front_end_files = [input_api.os_path.relpath(file_name, devtools_root) for file_name in affected_front_end_files]
if len(affected_front_end_files) > 0:
lint_path = input_api.os_path.join(input_api.PresubmitLocalPath(),
"scripts", "lint_javascript.py")
process = input_api.subprocess.Popen(
[input_api.python_executable, lint_path] + affected_front_end_files,
stdout=input_api.subprocess.PIPE,
stderr=input_api.subprocess.STDOUT)
out, _ = process.communicate()
if process.returncode != 0:
return [output_api.PresubmitError(out)]
if "NOTE" in out:
return [output_api.PresubmitNotifyResult(out)]
return []
def _CompileDevtoolsFrontend(input_api, output_api):
local_paths = [f.LocalPath() for f in input_api.AffectedFiles()]
# FIXME: The compilation does not actually run if injected script-related files
# have changed, as they reside in core/inspector, which is not affected
# by this presubmit.
# Once this is fixed, injected_script_externs.js
# should be added to the list of triggers.
devtools_front_end = input_api.os_path.join("devtools", "front_end")
if (any(devtools_front_end in path for path in local_paths) or
any("protocol.json" in path for path in local_paths) or
any("compile_frontend.py" in path for path in local_paths) or
any("InjectedScriptSource.js" in path for path in local_paths) or
any("DebuggerScript.js" in path for path in local_paths)):
lint_path = input_api.os_path.join(input_api.PresubmitLocalPath(),
"scripts", "compile_frontend.py")
out, _ = input_api.subprocess.Popen(
[input_api.python_executable, lint_path],
stdout=input_api.subprocess.PIPE,
stderr=input_api.subprocess.STDOUT).communicate()
if "ERROR" in out or "WARNING" in out:
return [output_api.PresubmitError(out)]
if "NOTE" in out:
return [output_api.PresubmitPromptWarning(out + compile_note)]
return []
def _CheckConvertSVGToPNGHashes(input_api, output_api):
if not input_api.platform.startswith('linux'):
return []
original_sys_path = sys.path
try:
sys.path = sys.path + [input_api.os_path.join(input_api.PresubmitLocalPath(), 'scripts')]
import devtools_file_hashes
finally:
sys.path = original_sys_path
absolute_local_paths = [af.AbsoluteLocalPath() for af in input_api.AffectedFiles(include_deletes=False)]
images_src_path = input_api.os_path.join("devtools", "front_end", "Images", "src")
image_source_file_paths = [path for path in absolute_local_paths if images_src_path in path and path.endswith(".svg")]
image_sources_path = input_api.os_path.join(input_api.PresubmitLocalPath(), "front_end", "Images", "src")
hashes_file_name = "svg2png.hashes"
hashes_file_path = input_api.os_path.join(image_sources_path, hashes_file_name)
invalid_hash_file_paths = devtools_file_hashes.files_with_invalid_hashes(hashes_file_path, image_source_file_paths)
if len(invalid_hash_file_paths) == 0:
return []
invalid_hash_file_names = [input_api.os_path.basename(file_path) for file_path in invalid_hash_file_paths]
file_paths_str = ", ".join(invalid_hash_file_names)
error_message = "The following SVG files should be converted to PNG using convert_svg_images_png.py script before uploading: \n - %s" % file_paths_str
return [output_api.PresubmitError(error_message)]
def _CheckOptimizePNGHashes(input_api, output_api):
if not input_api.platform.startswith('linux'):
return []
original_sys_path = sys.path
try:
sys.path = sys.path + [input_api.os_path.join(input_api.PresubmitLocalPath(), 'scripts')]
import devtools_file_hashes
finally:
sys.path = original_sys_path
absolute_local_paths = [af.AbsoluteLocalPath() for af in input_api.AffectedFiles(include_deletes=False)]
images_src_path = input_api.os_path.join("devtools", "front_end", "Images", "src")
image_source_file_paths = [path for path in absolute_local_paths if images_src_path in path and path.endswith(".svg")]
image_sources_path = input_api.os_path.join(input_api.PresubmitLocalPath(), "front_end", "Images", "src")
hashes_file_name = "optimize_png.hashes"
hashes_file_path = input_api.os_path.join(image_sources_path, hashes_file_name)
invalid_hash_file_paths = devtools_file_hashes.files_with_invalid_hashes(hashes_file_path, image_source_file_paths)
if len(invalid_hash_file_paths) == 0:
return []
invalid_hash_file_names = [input_api.os_path.basename(file_path) for file_path in invalid_hash_file_paths]
file_paths_str = ", ".join(invalid_hash_file_names)
error_message = "The following PNG files should be optimized using optimize_png_images.py script before uploading: \n - %s" % file_paths_str
return [output_api.PresubmitError(error_message)]
def _CheckCSSViolations(input_api, output_api):
results = []
for f in input_api.AffectedFiles(include_deletes=False):
if not f.LocalPath().endswith(".css"):
continue
for line_number, line in f.ChangedContents():
if "/deep/" in line:
results.append(output_api.PresubmitError(("%s:%d uses /deep/ selector") % (f.LocalPath(), line_number)))
if "::shadow" in line:
results.append(output_api.PresubmitError(("%s:%d uses ::shadow selector") % (f.LocalPath(), line_number)))
return results
def CheckChangeOnUpload(input_api, output_api):
results = []
results.extend(_CheckDevtoolsStyle(input_api, output_api))
results.extend(_CompileDevtoolsFrontend(input_api, output_api))
results.extend(_CheckConvertSVGToPNGHashes(input_api, output_api))
results.extend(_CheckOptimizePNGHashes(input_api, output_api))
results.extend(_CheckCSSViolations(input_api, output_api))
return results
def CheckChangeOnCommit(input_api, output_api):
return []
| 50.331325 | 155 | 0.735727 |
f7409aa4a047d52ccbf948a6164682ae33e8686a | 1,025 | py | Python | test_project/test_project/urls.py | mpasternak/django-nginx-http-push-stream | 9689071ca53fe958c9652dafe87316ed097a1ec8 | [
"MIT"
] | 4 | 2019-01-19T15:56:52.000Z | 2020-01-16T08:13:50.000Z | test_project/test_project/urls.py | mpasternak/django-nginx-http-push-stream | 9689071ca53fe958c9652dafe87316ed097a1ec8 | [
"MIT"
] | 3 | 2019-03-01T18:13:12.000Z | 2021-06-10T21:08:35.000Z | test_project/test_project/urls.py | mpasternak/django-nginx-http-push-stream | 9689071ca53fe958c9652dafe87316ed097a1ec8 | [
"MIT"
] | null | null | null | """test_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from nginx_push_stream.auth import auth_request
from test_app.views import TestAppView, message_received
urlpatterns = [
path('admin/', admin.site.urls),
path('', TestAppView.as_view()),
path('message_received', message_received, name="message_received"),
path('websocket-auth/', auth_request, name="auth")
]
| 36.607143 | 77 | 0.725854 |
f740b71e6b91cf892d29dabb0041c1e46760b9cc | 5,955 | py | Python | leetcode_python/Two_Pointers/linked-list-cycle-ii.py | yennanliu/Python_basics | 6a597442d39468295946cefbfb11d08f61424dc3 | [
"Unlicense"
] | null | null | null | leetcode_python/Two_Pointers/linked-list-cycle-ii.py | yennanliu/Python_basics | 6a597442d39468295946cefbfb11d08f61424dc3 | [
"Unlicense"
] | null | null | null | leetcode_python/Two_Pointers/linked-list-cycle-ii.py | yennanliu/Python_basics | 6a597442d39468295946cefbfb11d08f61424dc3 | [
"Unlicense"
] | null | null | null | """
142. Linked List Cycle II
Medium
Given the head of a linked list, return the node where the cycle begins. If there is no cycle, return null.
There is a cycle in a linked list if there is some node in the list that can be reached again by continuously following the next pointer. Internally, pos is used to denote the index of the node that tail's next pointer is connected to (0-indexed). It is -1 if there is no cycle. Note that pos is not passed as a parameter.
Do not modify the linked list.
Example 1:
Input: head = [3,2,0,-4], pos = 1
Output: tail connects to node index 1
Explanation: There is a cycle in the linked list, where tail connects to the second node.
Example 2:
Input: head = [1,2], pos = 0
Output: tail connects to node index 0
Explanation: There is a cycle in the linked list, where tail connects to the first node.
Example 3:
Input: head = [1], pos = -1
Output: no cycle
Explanation: There is no cycle in the linked list.
Constraints:
The number of the nodes in the list is in the range [0, 104].
-105 <= Node.val <= 105
pos is -1 or a valid index in the linked-list.
Follow up: Can you solve it using O(1) (i.e. constant) memory?
"""
# V0
# IDEA : 2 pointers + linked list basics
# https://github.com/yennanliu/CS_basics/blob/master/doc/cheatsheet/2_pointers.md
class Solution:
def detectCycle(self, head):
if not head or not head.next:
return
slow = fast = head
while fast and fast.next:
fast = fast.next.next
slow = slow.next
if fast == slow:
break
#print ("slow = " + str(slow) + " fast = " + str(fast))
### NOTE : via below condition check if is a cycle linked list
if not fast or not fast.next:
return
"""
### NOTE : re-init slow or fast as head (from starting point)
-> can init slow or head
"""
slow = head
#fast = head
"""
### NOTE : check while slow != fast
### NOTE : use the same speed
"""
while slow != fast:
fast = fast.next
slow = slow.next
return slow
# V0'
# IDEA : SET
class Solution(object):
def detectCycle(self, head):
if not head or not head.next:
return
s = set()
while head:
s.add(head)
head = head.next
if head in s:
return head
return
# V0'
# IDEA : SET
class Solution(object):
def detectCycle(self, head):
if not head: return None
visited = set()
while head:
if head in visited:
return head
visited.add(head)
head = head.next
return None
# V1
# https://blog.csdn.net/fuxuemingzhu/article/details/79530638
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def detectCycle(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
slow, fast = head, head
while fast and fast.next:
fast = fast.next.next
slow = slow.next
if fast == slow:
break
if not fast or not fast.next:
return None
slow = head
while slow != fast:
slow = slow.next
fast = fast.next
return fast
# V1'
# https://blog.csdn.net/fuxuemingzhu/article/details/79530638
# IDEA : SET
class Solution(object):
def detectCycle(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if not head: return None
visited = set()
while head:
if head in visited:
return head
visited.add(head)
head = head.next
return None
# V1''
# http://bookshadow.com/weblog/2015/07/10/leetcode-linked-list-cycle-ii/
class Solution:
# @param head, a ListNode
# @return a list node
def detectCycle(self, head):
if head is None or head.next is None:
return None
slow, fast = head.next, head.next.next
while fast and fast.next and slow != fast:
fast = fast.next.next
slow = slow.next
if fast is None or fast.next is None:
return None
slow = head
while slow != fast:
slow, fast = slow.next, fast.next
return slow
# V2
# https://www.cnblogs.com/zuoyuan/p/3701877.html
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
# @param head, a ListNode
# @return a list node
def detectCycle(self, head):
if head == None or head.__next__ == None:
return None
slow = fast = head
while fast and fast.__next__:
slow = slow.__next__
fast = fast.next.__next__
if fast == slow:
break
if slow == fast:
slow = head
while slow != fast:
slow = slow.__next__
fast = fast.__next__
return slow
return None
# V3
# Time: O(n)
# Space: O(1)
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
def __str__(self):
if self:
return "{}".format(self.val)
else:
return None
class Solution(object):
# @param head, a ListNode
# @return a list node
def detectCycle(self, head):
fast, slow = head, head
while fast and fast.__next__:
fast, slow = fast.next.__next__, slow.__next__
if fast is slow:
fast = head
while fast is not slow:
fast, slow = fast.__next__, slow.__next__
return fast
return None | 27.068182 | 322 | 0.560705 |
f7410499d1122699b82ce6a7c68b6aaced6ae347 | 578 | py | Python | lab/refactoring/replace_nested_conditional_with_guard_clauses.py | sprajjwal/spd2.3-Testing_and_Architecture | 333e4e191d51eae3f8e84b2aca2f2f63731a22aa | [
"MIT"
] | null | null | null | lab/refactoring/replace_nested_conditional_with_guard_clauses.py | sprajjwal/spd2.3-Testing_and_Architecture | 333e4e191d51eae3f8e84b2aca2f2f63731a22aa | [
"MIT"
] | null | null | null | lab/refactoring/replace_nested_conditional_with_guard_clauses.py | sprajjwal/spd2.3-Testing_and_Architecture | 333e4e191d51eae3f8e84b2aca2f2f63731a22aa | [
"MIT"
] | null | null | null | # by Kami Bigdely
# Replace nested conditional with guard clauses
def extract_position(line):
if line is not None and "x:" in line:
start_index = line.find("x:") + 2
pos = line[start_index:] # from start_index to the end.
else:
pos = None
return pos
if __name__ == "__main__":
result1 = extract_position("|error| numerical calculations could not converge.")
print(result1)
result2 = extract_position(
"|update| the positron location in the particle accelerator is x:21.432"
)
print(result2) | 28.9 | 85 | 0.640138 |
f74105373cdd2d7cc2a1c4cfafa7abdd7842398a | 4,119 | py | Python | sunpy/tests/setup_command.py | Naman9639/sunpy | 24c0cfbd9b03d7f9554bc86036fac2b78a5fcc56 | [
"BSD-2-Clause"
] | null | null | null | sunpy/tests/setup_command.py | Naman9639/sunpy | 24c0cfbd9b03d7f9554bc86036fac2b78a5fcc56 | [
"BSD-2-Clause"
] | null | null | null | sunpy/tests/setup_command.py | Naman9639/sunpy | 24c0cfbd9b03d7f9554bc86036fac2b78a5fcc56 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
This file is designed to be imported and ran only via setup.py, hence it's
dependency on astropy_helpers which will be available in that context.
"""
import os
import copy
from astropy_helpers.commands.test import AstropyTest
class SunPyTest(AstropyTest):
description = 'Run the tests for this package'
user_options = copy.copy(AstropyTest.user_options)
user_options.remove(('remote-data=', 'R',
'Run tests that download remote data. Should be '
'one of none/astropy/any (defaults to none).'))
user_options += [('online', 'R',
'Also run tests that do require a internet connection.'),
('online-only', None,
'Only run test that do require a internet connection.'),
('cov-report=', None,
'How to display the coverage report, should be either "html" or "term"'),
('figure', None,
'Run the figure tests.'),
# Run only tests that check figure generation
('figure-only', None,
'Only run tests that compare figures against stored hashes.')]
package_name = ''
def initialize_options(self):
super().initialize_options()
self.online = False
self.online_only = False
self.figure = False
self.figure_only = False
self.cov_report = True
def _generate_coverage_commands(self):
cmd_pre = '' # Commands to run before the test function
# patch the .coverage file so the paths are correct to the directory
# setup.py was run in rather than the temporary directory.
cwd = os.path.abspath(".")
cmd_post = ('from sunpy.tests.helpers import _patch_coverage; '
'import os; '
'test_dir = os.path.abspath("."); '
f'_patch_coverage(test_dir, "{cwd}"); ')
# Make html report the default and make pytest-cov save it to the
# source directory not the temporary directory.
if self.cov_report and (isinstance(self.cov_report, bool) or "html" in self.cov_report):
html_cov = os.path.join(os.path.abspath("."), "htmlcov")
self.cov_report = f'html:{html_cov}'
else:
self.cov_report = self.cov_report
return cmd_pre, cmd_post
def generate_testing_command(self):
"""
Build a Python script to run the tests.
"""
cmd_pre = '' # Commands to run before the test function
cmd_post = '' # Commands to run after the test function
if self.coverage:
pre, post = self._generate_coverage_commands()
cmd_pre += pre
cmd_post += post
cmd = ('{cmd_pre}{0}; import {1.package_name}, sys; result = ('
'{1.package_name}.self_test('
'package={1.package!r}, '
'test_path={1.test_path!r}, '
'args={1.args!r}, '
'coverage={1.coverage!r}, '
'cov_report={1.cov_report!r}, '
'plugins={1.plugins!r}, '
'verbose={1.verbose_results!r}, '
'pastebin={1.pastebin!r}, '
'online={1.online!r}, '
'online_only={1.online_only!r}, '
'figure={1.figure!r}, '
'figure_only={1.figure_only!r}, '
'figure_dir="{figure_dir}", '
'pep8={1.pep8!r}, '
'pdb={1.pdb!r}, '
'open_files={1.open_files!r}, '
'parallel={1.parallel!r}, '
'docs_path={1.docs_path!r}, '
'skip_docs={1.skip_docs!r}, '
'repeat={1.repeat!r})); '
'{cmd_post}'
'sys.exit(result)')
return cmd.format('pass',
self,
figure_dir=os.path.join(os.path.abspath('.'), "figure_test_images"),
cmd_pre=cmd_pre,
cmd_post=cmd_post)
| 38.858491 | 96 | 0.53144 |