code
stringlengths 22
1.05M
| apis
sequencelengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
import matplotlib.pyplot as plt
import numpy as np
cnames = [
'#F0F8FF',
'#FAEBD7',
'#00FFFF',
'#7FFFD4',
'#F0FFFF',
'#F5F5DC',
'#FFE4C4',
'#000000',
'#FFEBCD',
'#0000FF',
'#8A2BE2',
'#A52A2A',
'#DEB887',
'#5F9EA0',
'#7FFF00',
'#D2691E',
'#FF7F50',
'#6495ED',
'#FFF8DC',
'#DC143C',
'#00FFFF',
'#00008B',
'#008B8B',
'#B8860B',
'#A9A9A9',
'#006400',
'#BDB76B',
'#8B008B',
'#556B2F',
'#FF8C00',
'#9932CC',
'#8B0000',
'#E9967A',
'#8FBC8F',
'#483D8B',
'#2F4F4F',
'#00CED1',
'#9400D3',
'#FF1493',
'#00BFFF',
'#696969',
'#1E90FF',
'#B22222',
'#FFFAF0',
'#228B22',
'#FF00FF',
'#DCDCDC',
'#F8F8FF',
'#FFD700',
'#DAA520',
'#808080',
'#008000',
'#ADFF2F',
'#F0FFF0',
'#FF69B4',
'#CD5C5C',
'#4B0082',
'#FFFFF0',
'#F0E68C',
'#E6E6FA',
'#FFF0F5',
'#7CFC00',
'#FFFACD',
'#ADD8E6',
'#F08080',
'#E0FFFF',
'#FAFAD2',
'#90EE90',
'#D3D3D3',
'#FFB6C1',
'#FFA07A',
'#20B2AA',
'#87CEFA',
'#778899',
'#B0C4DE',
'#FFFFE0',
'#00FF00',
'#32CD32',
'#FAF0E6',
'#FF00FF',
'#800000',
'#66CDAA',
'#0000CD',
'#BA55D3',
'#9370DB',
'#3CB371',
'#7B68EE',
'#00FA9A',
'#48D1CC',
'#C71585',
'#191970',
'#F5FFFA',
'#FFE4E1',
'#FFE4B5',
'#FFDEAD',
'#000080',
'#FDF5E6',
'#808000',
'#6B8E23',
'#FFA500',
'#FF4500',
'#DA70D6',
'#EEE8AA',
'#98FB98',
'#AFEEEE',
'#DB7093',
'#FFEFD5',
'#FFDAB9',
'#CD853F',
'#FFC0CB',
'#DDA0DD',
'#B0E0E6',
'#800080',
'#FF0000',
'#BC8F8F',
'#4169E1',
'#8B4513',
'#FA8072',
'#FAA460',
'#2E8B57',
'#FFF5EE',
'#A0522D',
'#C0C0C0',
'#87CEEB',
'#6A5ACD',
'#708090',
'#FFFAFA',
'#00FF7F',
'#4682B4',
'#D2B48C',
'#008080',
'#D8BFD8',
'#FF6347',
'#40E0D0',
'#EE82EE',
'#F5DEB3',
'#FFFFFF',
'#F5F5F5',
'#FFFF00',
'#9ACD32']
months = {'Jan': [],
'Feb': [],
'Mar': [],
'Apr': [],
'May': [],
'Jun': [],
'Jul': [],
'Aug': [],
'Sep': [],
'Oct': [],
'Nov': [],
'Dec': []
}
def getOwl(monthTable, ID):
result = []
for f in monthTable:
if f[0] == ID:
result.append(f)
return result
def fillNull(months):
months["Jan"].append(0)
months["Feb"].append(0)
months["Mar"].append(0)
months["Apr"].append(0)
months["May"].append(0)
months["Jun"].append(0)
months["Jul"].append(0)
months["Aug"].append(0)
months["Sep"].append(0)
months["Oct"].append(0)
months["Nov"].append(0)
months["Dec"].append(0)
return months
def fillMonths(monthTable, months):
curOwl = monthTable[0][0]
for feature in monthTable:
tempOwl = feature[0]
month = feature[2]
dist = feature[3]
owl = getOwl(monthTable, "1751")
# get all Data for one owl
# fill all month with distance
# missing data = 0 distance
months = fillNull(months)
if month == "01":
months["Jan"][len(months["Jan"])-1] = dist
if month == "02":
months["Feb"][len(months["Feb"])-1] = dist
if month == "03":
months["Mar"][len(months["Mar"])-1] = dist
if month == "04":
months["Apr"][len(months["Apr"])-1] = dist
if month == "05":
months["May"][len(months["May"])-1] = dist
if month == "06":
months["Jun"][len(months["Jun"])-1] = dist
if month == "07":
months["Jul"][len(months["Jul"])-1] = dist
if month == "08":
months["Aug"][len(months["Aug"])-1] = dist
if month == "09":
months["Sep"][len(months["Sep"])-1] = dist
if month == "10":
months["Oct"][len(months["Oct"])-1] = dist
if month == "11":
months["Nov"][len(months["Nov"])-1] = dist
if month == "12":
months["Dec"][len(months["Dec"])-1] = dist
return months
months = fillMonths(monthTable, months)
X = np.arange(12)
curOwl = [np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,np.nan,]
counter = 0
tempOwl = "0"
lastOwl="none"
for feature in monthTable:
owl = feature[0]
if owl != tempOwl:
tempOwl = owl
t = getOwl(monthTable, feature[0])
for i in t:
month = i[2]
if month == "01":
curOwl[0] = i[3]
if month == "02":
curOwl[1] = i[3]
if month == "03":
curOwl[2] = i[3]
if month == "04":
curOwl[3] = i[3]
if month == "05":
curOwl[4] = i[3]
if month == "06":
curOwl[5] = i[3]
if month == "07":
curOwl[6] = i[3]
if month == "08":
curOwl[7] = i[3]
if month == "09":
curOwl[8] = i[3]
if month == "10":
curOwl[9] = i[3]
if month == "11":
curOwl[10] = i[3]
if month == "12":
curOwl[11] = i[3]
col = cnames[counter]
if lastOwl == "none":
plt.bar(X, curOwl, color = col)
else:
plt.bar(X, curOwl, color = col, bottom = lastOwl)
lastOwl = curOwl
counter = counter + 5
plt.show()
| [
"matplotlib.pyplot.bar",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((5007, 5020), 'numpy.arange', 'np.arange', (['(12)'], {}), '(12)\n', (5016, 5020), True, 'import numpy as np\n'), ((6430, 6440), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6438, 6440), True, 'import matplotlib.pyplot as plt\n'), ((6249, 6278), 'matplotlib.pyplot.bar', 'plt.bar', (['X', 'curOwl'], {'color': 'col'}), '(X, curOwl, color=col)\n', (6256, 6278), True, 'import matplotlib.pyplot as plt\n'), ((6307, 6352), 'matplotlib.pyplot.bar', 'plt.bar', (['X', 'curOwl'], {'color': 'col', 'bottom': 'lastOwl'}), '(X, curOwl, color=col, bottom=lastOwl)\n', (6314, 6352), True, 'import matplotlib.pyplot as plt\n')] |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains utilities for downloading and converting datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import pickle
import os
def save_obj(obj, save_dir, name):
with open(os.path.join(save_dir, name + '.pkl'), 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(name, file_dir):
with open(os.path.join(file_dir, name + '.pkl'), 'rb') as f:
return pickle.load(f)
def int64_feature(values):
"""Returns a TF-Feature of int64s.
Args:
values: A scalar or list of values.
Returns:
a TF-Feature.
"""
if not isinstance(values, (tuple, list)):
values = [values]
return tf.train.Feature(int64_list=tf.train.Int64List(value=values))
def floats_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def bytes_feature(values):
"""Returns a TF-Feature of bytes.
Args:
values: A string.
Returns:
a TF-Feature.
"""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[values]))
def to_tfexample(image_data, image_format, im_size, bbox, azimuth, elevation, theta):
return tf.train.Example(features=tf.train.Features(feature={
'image/encoded': bytes_feature(image_data),
'image/format': bytes_feature(image_format),
'image/height': int64_feature(im_size[0]),
'image/width': int64_feature(im_size[1]),
'image/bbox': floats_feature(bbox),
'image/viewpoint': floats_feature([azimuth, elevation, theta]),
}))
def image_to_tfexample(image_data, image_format, height, width, class_id):
return tf.train.Example(features=tf.train.Features(feature={
'image/encoded': bytes_feature(image_data),
'image/format': bytes_feature(image_format),
'image/class/label': int64_feature(class_id),
'image/height': int64_feature(height),
'image/width': int64_feature(width),
}))
class ImageCoder(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self):
# Create a single Session to run all image coding calls.
self._sess = tf.Session()
# Initializes function that converts PNG to JPEG data.
self._png_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_png(self._png_data, channels=3)
self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that converts CMYK JPEG data to RGB JPEG data.
self._cmyk_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_jpeg(self._cmyk_data, channels=0)
self._cmyk_to_rgb = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that decodes RGB JPEG data.
self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)
# Initializes function that encodes RGB JPEG data.
self._encode_image_data = tf.placeholder(dtype=tf.uint8)
self._encode_jpeg = tf.image.encode_jpeg(self._encode_image_data)
def png_to_jpeg(self, image_data):
return self._sess.run(self._png_to_jpeg,
feed_dict={self._png_data: image_data})
def cmyk_to_rgb(self, image_data):
return self._sess.run(self._cmyk_to_rgb,
feed_dict={self._cmyk_data: image_data})
def decode_jpeg(self, image_data):
image = self._sess.run(self._decode_jpeg,
feed_dict={self._decode_jpeg_data: image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def encode_jpeg(self, image_data):
image_data = image_data.astype(dtype=np.uint8)
image = self._sess.run(self._encode_jpeg,
feed_dict={self._encode_image_data: image_data})
return image
| [
"tensorflow.train.BytesList",
"pickle.dump",
"tensorflow.train.Int64List",
"tensorflow.image.encode_jpeg",
"tensorflow.Session",
"tensorflow.image.decode_png",
"tensorflow.placeholder",
"pickle.load",
"tensorflow.train.FloatList",
"tensorflow.image.decode_jpeg",
"os.path.join"
] | [((1042, 1086), 'pickle.dump', 'pickle.dump', (['obj', 'f', 'pickle.HIGHEST_PROTOCOL'], {}), '(obj, f, pickle.HIGHEST_PROTOCOL)\n', (1053, 1086), False, 'import pickle\n'), ((1199, 1213), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1210, 1213), False, 'import pickle\n'), ((2934, 2946), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2944, 2946), True, 'import tensorflow as tf\n'), ((3036, 3067), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.string'}), '(dtype=tf.string)\n', (3050, 3067), True, 'import tensorflow as tf\n'), ((3084, 3131), 'tensorflow.image.decode_png', 'tf.image.decode_png', (['self._png_data'], {'channels': '(3)'}), '(self._png_data, channels=3)\n', (3103, 3131), True, 'import tensorflow as tf\n'), ((3160, 3214), 'tensorflow.image.encode_jpeg', 'tf.image.encode_jpeg', (['image'], {'format': '"""rgb"""', 'quality': '(100)'}), "(image, format='rgb', quality=100)\n", (3180, 3214), True, 'import tensorflow as tf\n'), ((3320, 3351), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.string'}), '(dtype=tf.string)\n', (3334, 3351), True, 'import tensorflow as tf\n'), ((3368, 3417), 'tensorflow.image.decode_jpeg', 'tf.image.decode_jpeg', (['self._cmyk_data'], {'channels': '(0)'}), '(self._cmyk_data, channels=0)\n', (3388, 3417), True, 'import tensorflow as tf\n'), ((3446, 3500), 'tensorflow.image.encode_jpeg', 'tf.image.encode_jpeg', (['image'], {'format': '"""rgb"""', 'quality': '(100)'}), "(image, format='rgb', quality=100)\n", (3466, 3500), True, 'import tensorflow as tf\n'), ((3594, 3625), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.string'}), '(dtype=tf.string)\n', (3608, 3625), True, 'import tensorflow as tf\n'), ((3654, 3710), 'tensorflow.image.decode_jpeg', 'tf.image.decode_jpeg', (['self._decode_jpeg_data'], {'channels': '(3)'}), '(self._decode_jpeg_data, channels=3)\n', (3674, 3710), True, 'import tensorflow as tf\n'), ((3805, 3835), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.uint8'}), '(dtype=tf.uint8)\n', (3819, 3835), True, 'import tensorflow as tf\n'), ((3864, 3909), 'tensorflow.image.encode_jpeg', 'tf.image.encode_jpeg', (['self._encode_image_data'], {}), '(self._encode_image_data)\n', (3884, 3909), True, 'import tensorflow as tf\n'), ((983, 1020), 'os.path.join', 'os.path.join', (['save_dir', "(name + '.pkl')"], {}), "(save_dir, name + '.pkl')\n", (995, 1020), False, 'import os\n'), ((1133, 1170), 'os.path.join', 'os.path.join', (['file_dir', "(name + '.pkl')"], {}), "(file_dir, name + '.pkl')\n", (1145, 1170), False, 'import os\n'), ((1487, 1519), 'tensorflow.train.Int64List', 'tf.train.Int64List', ([], {'value': 'values'}), '(value=values)\n', (1505, 1519), True, 'import tensorflow as tf\n'), ((1589, 1620), 'tensorflow.train.FloatList', 'tf.train.FloatList', ([], {'value': 'value'}), '(value=value)\n', (1607, 1620), True, 'import tensorflow as tf\n'), ((1803, 1837), 'tensorflow.train.BytesList', 'tf.train.BytesList', ([], {'value': '[values]'}), '(value=[values])\n', (1821, 1837), True, 'import tensorflow as tf\n')] |
"""Graphical User Interface (GUI) utility module.
This module contains various tools and utilities used to instantiate annotators and GUI elements.
"""
import logging
import thelper.utils
logger = logging.getLogger(__name__)
def create_key_listener(callback):
"""Returns a key press listener based on pynput.keyboard (used for mocking)."""
import pynput.keyboard
return pynput.keyboard.Listener(on_press=callback)
def create_annotator(session_name, save_dir, config, datasets):
"""Instantiates a GUI annotation tool based on the type contained in the config dictionary.
The tool type is expected to be in the configuration dictionary's `annotator` field, under the `type` key. For more
information on the configuration, refer to :class:`thelper.gui.annotators.Annotator`. The instantiated type must be
compatible with the constructor signature of :class:`thelper.gui.annotators.Annotator`. The object's constructor will
be given the full config dictionary.
Args:
session_name: name of the annotation session used for printing and to create output directories.
save_dir: path to the session directory where annotations and other outputs will be saved.
config: full configuration dictionary that will be parsed for annotator parameters.
datasets: map of named dataset parsers that will provide the data to annotate.
Returns:
The fully-constructed annotator object, ready to begin annotation via its ``run()`` function.
.. seealso::
| :class:`thelper.gui.annotators.Annotator`
"""
if "annotator" not in config or not config["annotator"]:
raise AssertionError("config missing 'annotator' field")
annotator_config = config["annotator"]
if "type" not in annotator_config or not annotator_config["type"]:
raise AssertionError("annotator config missing 'type' field")
annotator_type = thelper.utils.import_class(annotator_config["type"])
return annotator_type(session_name, config, save_dir, datasets)
| [
"logging.getLogger"
] | [((200, 227), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (217, 227), False, 'import logging\n')] |
from model.adacos import AdaCos
from model.blocks import NetBlock
from tensorflow.keras.layers import Input, Reshape, Conv2D, Activation, Flatten, Dropout, add
from tensorflow.keras.models import Model
import tensorflow.keras.backend as K
class Net:
def __init__(self, config):
self.model_name = config.model_name
self.start_fine_tune_layer_id = config.start_fine_tune_layer_id
self.end_fine_tune_layer_id = config.end_fine_tune_layer_id
self.embedding_dim = config.embedding_dim
self.embedding_layer_name = config.embedding_layer_name
self.dropout = config.dropout
self.net_blocks = NetBlock(config)
def build_mpsnet_backbone(self, input_shape):
c = [32, 32, 64, 64, 128]
t = [1, 2, 2, 3, 2]
s = [2, 2, 2, 2, 1]
n = [1, 2, 2, 3, 2]
activation='relu'
I = Input(shape = input_shape)
M0 = self.net_blocks.conv_block(I, c[0], 3, s[0], activation=activation)
M1 = self.net_blocks.inverted_residual_block(M0, c=c[1], ks=3, t=t[1], s=s[1], n=n[1], activation=activation)
M0 = self.net_blocks.separable_conv_block(M0, c[1], 3, s[1], activation=None)
A1 = add([M0, M1])
M2 = self.net_blocks.inverted_residual_block(A1, c=c[2], ks=3, t=t[2], s=s[2], n=n[2], activation=activation)
A1 = self.net_blocks.separable_conv_block(A1, c[2], 3, s[2], activation=None)
A2 = add([A1, M2])
M3 = self.net_blocks.inverted_residual_block(A2, c=c[3], ks=3, t=t[3], s=s[3], n=n[3], activation=activation)
A2 = self.net_blocks.separable_conv_block(A2, c[3], 3, s[3], activation=None)
A3 = add([A2, M3])
M4 = self.net_blocks.inverted_residual_block(A3, c=c[4], ks=3, t=t[4], s=s[4], n=n[4], activation=activation)
A3 = self.net_blocks.separable_conv_block(A3, c[4], 3, s[4], activation=None)
A4 = add([A3, M4])
M = self.net_blocks.spp_block(A4, pool_list=[1, 2, 4])
self.backbone = Model(inputs=I, outputs=M, name=self.model_name)
def build_mobilenet_v1_backbone(self, input_shape, alpha=1.0):
I = Input(shape = input_shape)
activation = 'relu'
c = int(32 * alpha)
x = self.net_blocks.conv_block(I, 32, 3, 2, activation=activation)
x = self.net_blocks.bottleneck_v1(x, 64 , 3, s=1, alpha=alpha, activation=activation)
x = self.net_blocks.bottleneck_v1(x, 128, 3, s=2, alpha=alpha, activation=activation)
x = self.net_blocks.bottleneck_v1(x, 128, 3, s=1, alpha=alpha, activation=activation)
x = self.net_blocks.bottleneck_v1(x, 256, 3, s=2, alpha=alpha, activation=activation)
x = self.net_blocks.bottleneck_v1(x, 256, 3, s=1, alpha=alpha, activation=activation)
x = self.net_blocks.bottleneck_v1(x, 512, 3, s=2, alpha=alpha, activation=activation)
x = self.net_blocks.bottleneck_v1(x, 512, 3, s=1, alpha=alpha, activation=activation)
x = self.net_blocks.bottleneck_v1(x, 512, 3, s=1, alpha=alpha, activation=activation)
x = self.net_blocks.bottleneck_v1(x, 512, 3, s=1, alpha=alpha, activation=activation)
x = self.net_blocks.bottleneck_v1(x, 512, 3, s=1, alpha=alpha, activation=activation)
x = self.net_blocks.bottleneck_v1(x, 512, 3, s=1, alpha=alpha, activation=activation)
x = self.net_blocks.bottleneck_v1(x, 1024, 3, s=2, alpha=alpha, activation=activation)
x = self.net_blocks.bottleneck_v1(x, 1024, 3, s=1, alpha=alpha, activation=activation)
x = GlobalAveragePooling2D()(x)
self.backbone = Model(inputs=I, outputs=x, name=self.model_name)
def build_mobilenet_v2_backbone(self, input_shape, alpha=1.0):
c = [32, 16, 24, 32, 64, 96, 160, 320, 1280]
t = [1, 1, 6, 6, 6, 6, 6, 6, 1]
s = [2, 1, 2, 2, 2, 1, 2, 1, 1]
n = [1, 1, 2, 3, 4, 3, 3, 1, 1]
activation = 'relu6'
I = Input(shape = input_shape)
n_filters = self.net_blocks.make_divisible(c[0] * alpha, 8)
x = self.net_blocks.conv_block(I, n_filters, 3, s[0], activation=activation) # (64, 64, 32)
x = self.net_blocks.inverted_residual_block(x, c=c[1], ks=3, t=t[1], s=s[1], n=n[1], alpha=alpha, activation=activation)
x = self.net_blocks.inverted_residual_block(x, c=c[2], ks=3, t=t[2], s=s[2], n=n[2], alpha=alpha, activation=activation)
x = self.net_blocks.inverted_residual_block(x, c=c[3], ks=3, t=t[3], s=s[3], n=n[3], alpha=alpha, activation=activation)
x = self.net_blocks.inverted_residual_block(x, c=c[4], ks=3, t=t[4], s=s[4], n=n[4], alpha=alpha, activation=activation)
x = self.net_blocks.inverted_residual_block(x, c=c[5], ks=3, t=t[5], s=s[5], n=n[5], alpha=alpha, activation=activation)
x = self.net_blocks.inverted_residual_block(x, c=c[6], ks=3, t=t[6], s=s[6], n=n[6], alpha=alpha, activation=activation)
x = self.net_blocks.inverted_residual_block(x, c=c[7], ks=3, t=t[7], s=s[7], n=n[7], alpha=alpha, activation=activation)
if alpha > 1.0:
last_filters = self.net_blocks.make_divisible(c[8] * alpha, 8)
else:
last_filters = c[8]
x = self.net_blocks.conv_block(x, last_filters, 1, 1, activation=activation)
x = GlobalAveragePooling2D()(x)
self.backbone = Model(inputs=I, outputs=x, name=self.model_name)
def build_mobilenet_v3_backbone(self, input_shape, alpha=1.0):
I = Input(shape = input_shape)
x = self.net_blocks.conv_block(I, 16, 3 , 2, activation='hard_swish')
x = self.net_blocks.bottleneck_v3(x, 16 , 3, e=16 , s=1, alpha=alpha, squeeze=False, activation='relu6')
x = self.net_blocks.bottleneck_v3(x, 24 , 3, e=64 , s=2, alpha=alpha, squeeze=False, activation='relu6')
x = self.net_blocks.bottleneck_v3(x, 24 , 3, e=72 , s=1, alpha=alpha, squeeze=False, activation='relu6')
x = self.net_blocks.bottleneck_v3(x, 40 , 5, e=72 , s=2, alpha=alpha, squeeze=True, activation='relu6')
x = self.net_blocks.bottleneck_v3(x, 40 , 5, e=120, s=1, alpha=alpha, squeeze=True, activation='relu6')
x = self.net_blocks.bottleneck_v3(x, 40 , 5, e=120, s=1, alpha=alpha, squeeze=True, activation='relu6')
x = self.net_blocks.bottleneck_v3(x, 80 , 3, e=240, s=2, alpha=alpha, squeeze=False, activation='hard_swish')
x = self.net_blocks.bottleneck_v3(x, 80 , 3, e=200, s=1, alpha=alpha, squeeze=False, activation='hard_swish')
x = self.net_blocks.bottleneck_v3(x, 80 , 3, e=184, s=1, alpha=alpha, squeeze=False, activation='hard_swish')
x = self.net_blocks.bottleneck_v3(x, 80 , 3, e=184, s=1, alpha=alpha, squeeze=False, activation='hard_swish')
x = self.net_blocks.bottleneck_v3(x, 112, 3, e=480, s=1, alpha=alpha, squeeze=True, activation='hard_swish')
x = self.net_blocks.bottleneck_v3(x, 112, 3, e=672, s=1, alpha=alpha, squeeze=True, activation='hard_swish')
x = self.net_blocks.bottleneck_v3(x, 160, 5, e=672, s=2, alpha=alpha, squeeze=True, activation='hard_swish')
x = self.net_blocks.bottleneck_v3(x, 160, 5, e=960, s=1, alpha=alpha, squeeze=True, activation='hard_swish')
x = self.net_blocks.bottleneck_v3(x, 160, 5, e=960, s=1, alpha=alpha, squeeze=True, activation='hard_swish')
x = self.net_blocks.conv_block(x, 960, 1, 1, activation='hard_swish')
x = GlobalAveragePooling2D()(x)
self.backbone = Model(inputs=I, outputs=x, name=self.model_name)
def build_mobilefacenet_backbone(self, input_shape, alpha=1.0):
c = [64, 64, 64, 128, 128, 128, 128]
t = [1, 1, 2, 4, 2, 4, 2]
s = [2, 1, 2, 2, 1, 2, 1]
n = [1, 1, 5, 1, 6, 1, 2]
activation='prelu'
I = Input(shape = input_shape)
x = self.net_blocks.conv_block(I, c[0], 3, s[0], activation=activation)
x = self.net_blocks.separable_conv_block(M, c[1], 3, s[1], activation=activation)
x = self.net_blocks.inverted_residual_block(x, c=c[2], ks=3, t=t[2], s=s[2], n=n[2], activation=activation)
x = self.net_blocks.inverted_residual_block(x, c=c[3], ks=3, t=t[3], s=s[3], n=n[3], activation=activation)
x = self.net_blocks.inverted_residual_block(x, c=c[4], ks=3, t=t[4], s=s[4], n=n[4], activation=activation)
x = self.net_blocks.inverted_residual_block(x, c=c[5], ks=3, t=t[5], s=s[5], n=n[5], activation=activation)
x = self.net_blocks.inverted_residual_block(x, c=c[6], ks=3, t=t[6], s=s[6], n=n[6], activation=activation)
x = self.net_blocks.conv_block(x, 512, 1, 1, 'valid', activation=activation)
ks = K.int_shape(x)[2]
x = self.net_blocks.depthwise_conv_block(x, ks, 1, padding='valid', activation=None)
self.backbone = Model(inputs=I, outputs=x, name=self.model_name)
def build_softmax_model(self, n_classes):
I=self.backbone.inputs
x=self.backbone.outputs[0]
if(len(x.shape)==2):
c = K.int_shape(x)[self.net_blocks.channel_axis]
x = Reshape((1, 1, c))(x)
x = self.net_blocks.conv_block(x, self.embedding_dim, 1, 1, 'valid', activation=None)
if(self.dropout>0):
x = Dropout(rate=dropout)(x)
x = self.net_blocks.conv_block(x, n_classes, 1, 1, activation='softmax', norm=None)
x = Reshape((n_classes,))(x)
self.softmax_model = Model(inputs=I, outputs=x, name=self.model_name)
def build_adacos_model(self):
label = Input(shape=(1,), name='label_input')
softmax = self.softmax_model.outputs[0]
n_classes = K.int_shape(softmax)[-1]
inputs = self.softmax_model.inputs[0]
x = self.softmax_model.layers[self.end_fine_tune_layer_id].output
if(self.dropout>0):
x = Dropout(rate=dropout)(x)
x = Flatten(name=self.embedding_layer_name)(x)
break_point = len(self.softmax_model.layers) + self.start_fine_tune_layer_id
for layer in self.softmax_model.layers[:break_point]:
layer.trainable=False
outputs = AdaCos(n_classes, initializer=self.net_blocks.kernel_initializer, regularizer=self.net_blocks.kernel_regularizer, name='adacos')([x, label])
self.adacos_model = Model(inputs = (inputs, label), outputs = outputs, name=self.model_name)
| [
"tensorflow.keras.layers.Reshape",
"tensorflow.keras.layers.Dropout",
"model.adacos.AdaCos",
"tensorflow.keras.layers.add",
"model.blocks.NetBlock",
"tensorflow.keras.models.Model",
"tensorflow.keras.backend.int_shape",
"tensorflow.keras.layers.Input",
"tensorflow.keras.layers.Flatten"
] | [((650, 666), 'model.blocks.NetBlock', 'NetBlock', (['config'], {}), '(config)\n', (658, 666), False, 'from model.blocks import NetBlock\n'), ((892, 916), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': 'input_shape'}), '(shape=input_shape)\n', (897, 916), False, 'from tensorflow.keras.layers import Input, Reshape, Conv2D, Activation, Flatten, Dropout, add\n'), ((1226, 1239), 'tensorflow.keras.layers.add', 'add', (['[M0, M1]'], {}), '([M0, M1])\n', (1229, 1239), False, 'from tensorflow.keras.layers import Input, Reshape, Conv2D, Activation, Flatten, Dropout, add\n'), ((1466, 1479), 'tensorflow.keras.layers.add', 'add', (['[A1, M2]'], {}), '([A1, M2])\n', (1469, 1479), False, 'from tensorflow.keras.layers import Input, Reshape, Conv2D, Activation, Flatten, Dropout, add\n'), ((1706, 1719), 'tensorflow.keras.layers.add', 'add', (['[A2, M3]'], {}), '([A2, M3])\n', (1709, 1719), False, 'from tensorflow.keras.layers import Input, Reshape, Conv2D, Activation, Flatten, Dropout, add\n'), ((1946, 1959), 'tensorflow.keras.layers.add', 'add', (['[A3, M4]'], {}), '([A3, M4])\n', (1949, 1959), False, 'from tensorflow.keras.layers import Input, Reshape, Conv2D, Activation, Flatten, Dropout, add\n'), ((2065, 2113), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'I', 'outputs': 'M', 'name': 'self.model_name'}), '(inputs=I, outputs=M, name=self.model_name)\n', (2070, 2113), False, 'from tensorflow.keras.models import Model\n'), ((2207, 2231), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': 'input_shape'}), '(shape=input_shape)\n', (2212, 2231), False, 'from tensorflow.keras.layers import Input, Reshape, Conv2D, Activation, Flatten, Dropout, add\n'), ((3658, 3706), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'I', 'outputs': 'x', 'name': 'self.model_name'}), '(inputs=I, outputs=x, name=self.model_name)\n', (3663, 3706), False, 'from tensorflow.keras.models import Model\n'), ((4003, 4027), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': 'input_shape'}), '(shape=input_shape)\n', (4008, 4027), False, 'from tensorflow.keras.layers import Input, Reshape, Conv2D, Activation, Flatten, Dropout, add\n'), ((5401, 5449), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'I', 'outputs': 'x', 'name': 'self.model_name'}), '(inputs=I, outputs=x, name=self.model_name)\n', (5406, 5449), False, 'from tensorflow.keras.models import Model\n'), ((5543, 5567), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': 'input_shape'}), '(shape=input_shape)\n', (5548, 5567), False, 'from tensorflow.keras.layers import Input, Reshape, Conv2D, Activation, Flatten, Dropout, add\n'), ((7552, 7600), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'I', 'outputs': 'x', 'name': 'self.model_name'}), '(inputs=I, outputs=x, name=self.model_name)\n', (7557, 7600), False, 'from tensorflow.keras.models import Model\n'), ((7869, 7893), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': 'input_shape'}), '(shape=input_shape)\n', (7874, 7893), False, 'from tensorflow.keras.layers import Input, Reshape, Conv2D, Activation, Flatten, Dropout, add\n'), ((8892, 8940), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'I', 'outputs': 'x', 'name': 'self.model_name'}), '(inputs=I, outputs=x, name=self.model_name)\n', (8897, 8940), False, 'from tensorflow.keras.models import Model\n'), ((9553, 9601), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'I', 'outputs': 'x', 'name': 'self.model_name'}), '(inputs=I, outputs=x, name=self.model_name)\n', (9558, 9601), False, 'from tensorflow.keras.models import Model\n'), ((9663, 9700), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(1,)', 'name': '"""label_input"""'}), "(shape=(1,), name='label_input')\n", (9668, 9700), False, 'from tensorflow.keras.layers import Input, Reshape, Conv2D, Activation, Flatten, Dropout, add\n'), ((10472, 10540), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': '(inputs, label)', 'outputs': 'outputs', 'name': 'self.model_name'}), '(inputs=(inputs, label), outputs=outputs, name=self.model_name)\n', (10477, 10540), False, 'from tensorflow.keras.models import Model\n'), ((8748, 8762), 'tensorflow.keras.backend.int_shape', 'K.int_shape', (['x'], {}), '(x)\n', (8759, 8762), True, 'import tensorflow.keras.backend as K\n'), ((9490, 9511), 'tensorflow.keras.layers.Reshape', 'Reshape', (['(n_classes,)'], {}), '((n_classes,))\n', (9497, 9511), False, 'from tensorflow.keras.layers import Input, Reshape, Conv2D, Activation, Flatten, Dropout, add\n'), ((9769, 9789), 'tensorflow.keras.backend.int_shape', 'K.int_shape', (['softmax'], {}), '(softmax)\n', (9780, 9789), True, 'import tensorflow.keras.backend as K\n'), ((10021, 10060), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {'name': 'self.embedding_layer_name'}), '(name=self.embedding_layer_name)\n', (10028, 10060), False, 'from tensorflow.keras.layers import Input, Reshape, Conv2D, Activation, Flatten, Dropout, add\n'), ((10294, 10426), 'model.adacos.AdaCos', 'AdaCos', (['n_classes'], {'initializer': 'self.net_blocks.kernel_initializer', 'regularizer': 'self.net_blocks.kernel_regularizer', 'name': '"""adacos"""'}), "(n_classes, initializer=self.net_blocks.kernel_initializer,\n regularizer=self.net_blocks.kernel_regularizer, name='adacos')\n", (10300, 10426), False, 'from model.adacos import AdaCos\n'), ((9125, 9139), 'tensorflow.keras.backend.int_shape', 'K.int_shape', (['x'], {}), '(x)\n', (9136, 9139), True, 'import tensorflow.keras.backend as K\n'), ((9186, 9204), 'tensorflow.keras.layers.Reshape', 'Reshape', (['(1, 1, c)'], {}), '((1, 1, c))\n', (9193, 9204), False, 'from tensorflow.keras.layers import Input, Reshape, Conv2D, Activation, Flatten, Dropout, add\n'), ((9356, 9377), 'tensorflow.keras.layers.Dropout', 'Dropout', ([], {'rate': 'dropout'}), '(rate=dropout)\n', (9363, 9377), False, 'from tensorflow.keras.layers import Input, Reshape, Conv2D, Activation, Flatten, Dropout, add\n'), ((9967, 9988), 'tensorflow.keras.layers.Dropout', 'Dropout', ([], {'rate': 'dropout'}), '(rate=dropout)\n', (9974, 9988), False, 'from tensorflow.keras.layers import Input, Reshape, Conv2D, Activation, Flatten, Dropout, add\n')] |
"""A wrapper for the colorama module."""
"""
Copyright 2019 - 2020 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from colorama import init, Fore, Back, Style
import os
def printcol(text, fore_col=None, back_col=None, shade=None, end=None):
"""A function which prints the text in the specified colour on the specified background.
Arguments:
text - The text to print to the screen in the required format.
fore_col - The colour of the text to print the text in. Default: white, can be either of: red, light red, magenta,
light Magenta, yellow, light yellow, green, light green, blue, light blue, cyan, light cyan, black or white
back_col - The colour to print the text onto. Default: black, can be either of: red, light red, magenta, light
magenta, yellow, light yellow, green, light green, blue, light blue, cyan, light cyan, black or white
shade - The shade of the colour to use. Default: normal, can be either of: dim, normal, bright
end - What character to end the print line with. By default this is the newline character. This can be set to an
empty string to change the colour of the text being printed out.
"""
# Handle the keyword arguments so that they still work correctly when the terminal is used, this allows any not
# defined to be set to the default. E.G. It is possible to run printcol("Some text") and still get some output,
# This will be white text using the normal shade on a white background, this is normal print for cmd, but may be
# different for other terminals.
if fore_col is None:
fore_col = "white"
if back_col is None:
back_col = "black"
if shade is None:
shade = "normal"
if end is None:
end = "\n"
# Convert the inputs into lowercase names to be checked
fore_col = fore_col.lower()
back_col = back_col.lower()
shade = shade.lower()
# Check if running from pycharm
is_running_pycharm = "PYCHARM_HOSTED" in os.environ
if is_running_pycharm:
convert = False
strip = False
else:
convert = None
strip = None
init(autoreset=True, convert=convert, strip=strip) # Make sure the next print statement runs correctly
# Define values for each style and colour
shades = {"dim": Style.DIM, "bright": Style.BRIGHT, "normal": Style.NORMAL} # When underline is available add Style.UNDERLINED
fore_cols = {"red": Fore.RED, "light red": Fore.LIGHTRED_EX, "magenta": Fore.MAGENTA, "light magenta": Fore.LIGHTMAGENTA_EX, "yellow": Fore.YELLOW, "light yellow": Fore.LIGHTYELLOW_EX, "green": Fore.GREEN, "light green": Fore.LIGHTGREEN_EX, "blue": Fore.BLUE, "light blue": Fore.LIGHTBLUE_EX, "cyan": Fore.CYAN, "light cyan": Fore.LIGHTCYAN_EX, "black": Fore.BLACK}
back_cols = {"red": Back.RED, "light red": Back.LIGHTRED_EX, "magenta": Back.MAGENTA, "light magenta": Back.LIGHTMAGENTA_EX, "yellow": Back.YELLOW, "light yellow": Back.LIGHTYELLOW_EX, "green": Back.GREEN, "light green": Back.LIGHTGREEN_EX, "blue": Back.BLUE, "light blue": Back.LIGHTBLUE_EX, "cyan": Back.CYAN, "light cyan": Back.LIGHTCYAN_EX, "white": Back.WHITE}
# Check the shade of colour to use
if shade in shades:
shade = shades[shade]
else:
shade = Style.NORMAL
# Check the foreground colour to use
if fore_col in fore_cols:
fore_col = fore_cols[fore_col]
else:
fore_col = Fore.WHITE
# Check the background colour to use
if back_col in back_cols:
back_col = back_cols[back_col]
else:
back_col = Back.BLACK
# Then print the text to the screen
print(shade + fore_col + back_col + text, end=end)
def printcollist(list_to_print, fore_col=None, back_col=None, shade=None, end=None):
"""A Function which takes a list and iterates through the list and prints it out in coloured text. The
colours and shade to use can be provided as a list or as a sting.
Arguments:
list_to_print - A iterable list of strings or numbers to print out.
fore_col - A list of strings or a single string to use as the text colour for the strings being printed.
Default White, colours same as printcol
back_col - A list of strings or a single string to use as the background text colour for the strings being
printed. Default Black, colours same as printcol
shade - A list of strings or a single string to use as the shade of the text colour for the string.
Default Normal, options same as printcol
end - A list of strings or a single string to use as the separator between the strings being printed.
Default Newline, this list must be passed for the system to work correctly
"""
# Check the keyword arguments are None and then set the defaults.
if fore_col is None:
fore_col = "white"
if back_col is None:
back_col = "black"
if shade is None:
shade = "normal"
if end is None:
end = "\n"
# Check the lists are of the correct length before attempting the iteration
if len(list_to_print) == len(fore_col) == len(back_col) == len(shade) == len(end):
# Then print out each item as required in its colour
for item, foreground, background, shade, ending in zip(list_to_print, fore_col, back_col, shade, end):
# Print the item
printcol(item, fore_col=foreground, back_col=background, shade=shade, end=ending)
else:
# The lists are not of all equal length so print an error message in red.
printcol("Please use lists of equal length.")
def inputcolour(text, prompt_fore_col=None, prompt_back_col=None, prompt_shade=None, input_fore_col=None, input_back_col=None, input_shade=None):
"""Returns input from a coloured input prompt.
Arguments:
text - The text to prompt the user for the desired input.
prompt_fore_col - The colour of the text to print the prompt text in. Default: white, can be either of: red, light
red, magenta, light Magenta, yellow, light yellow, green, light green, blue, light blue, cyan, light cyan, black or
white
prompt_back_col - The colour to print the prompt text onto. Default: black, can be either of: red, light red,
magenta, light magenta, yellow, light yellow, green, light green, blue, light blue, cyan, light cyan, black or white
prompt_shade - The shade of the colour to use for the input prompt. Default: normal, can be either of: dim, normal,
bright
input_fore_col - The colour of the text to print the user input in. Default: white, can be either of: red, light
red, magenta, light Magenta, yellow, light yellow, green, light green, blue, light blue, cyan, light cyan, black or
white
input_back_col - The colour to print the user input onto. Default: black, can be either of: red, light red,
magenta, light magenta, yellow, light yellow, green, light green, blue, light blue, cyan, light cyan, black or white
input_shade - The shade of the colour to use for the text entered by the user. Default: normal, can be either of:
dim, normal, bright"""
# Handle None keywords
if prompt_fore_col is None:
prompt_fore_col = "white"
if prompt_back_col is None:
prompt_back_col = "black"
if prompt_shade is None:
prompt_shade = "normal"
if input_fore_col is None:
input_fore_col = "white"
if input_back_col is None:
input_back_col = "black"
if input_shade is None:
input_shade = "normal"
# Convert the inputs into lowercase names to be checked
prompt_fore_col = prompt_fore_col.lower()
prompt_back_col = prompt_back_col.lower()
prompt_shade = prompt_shade.lower()
input_fore_col = input_fore_col.lower()
input_back_col = input_back_col.lower()
input_shade = input_shade.lower()
# Check if running from pycharm
is_running_pycharm = "PYCHARM_HOSTED" in os.environ
if is_running_pycharm:
convert = False
strip = False
else:
convert = None
strip = None
init(autoreset=False, convert=convert, strip=strip) # Disable autoreset to colour the prompt correctly
# Define values for each style and colour
shades = {"dim": Style.DIM, "bright": Style.BRIGHT, "normal": Style.NORMAL} # When underline is available add Style.UNDERLINED
fore_cols = {"red": Fore.RED, "light red": Fore.LIGHTRED_EX, "magenta": Fore.MAGENTA, "light magenta": Fore.LIGHTMAGENTA_EX, "yellow": Fore.YELLOW, "light yellow": Fore.LIGHTYELLOW_EX, "green": Fore.GREEN, "light green": Fore.LIGHTGREEN_EX, "blue": Fore.BLUE, "light blue": Fore.LIGHTBLUE_EX, "cyan": Fore.CYAN, "light cyan": Fore.LIGHTCYAN_EX, "black": Fore.BLACK}
back_cols = {"red": Back.RED, "light red": Back.LIGHTRED_EX, "magenta": Back.MAGENTA, "light magenta": Back.LIGHTMAGENTA_EX, "yellow": Back.YELLOW, "light yellow": Back.LIGHTYELLOW_EX, "green": Back.GREEN, "light green": Back.LIGHTGREEN_EX, "blue": Back.BLUE, "light blue": Back.LIGHTBLUE_EX, "cyan": Back.CYAN, "light cyan": Back.LIGHTCYAN_EX, "white": Back.WHITE}
# Check which shade of colour to use for the input prompt and the user input.
if prompt_shade in shades:
prompt_shade = shades[prompt_shade]
else:
prompt_shade = Style.NORMAL
if input_shade in shades:
input_shade = shades[input_shade]
else:
input_shade = Style.NORMAL
# Check each foreground colour to use
if prompt_fore_col in fore_cols:
prompt_fore_col = fore_cols[prompt_fore_col]
else:
prompt_fore_col = Fore.WHITE
if input_fore_col in fore_cols:
input_fore_col = fore_cols[input_fore_col]
else:
input_fore_col = Fore.WHITE
# Check each background colour to use
if prompt_back_col in back_cols:
prompt_back_col = back_cols[prompt_back_col]
else:
prompt_back_col = Back.BLACK
if input_back_col in back_cols:
input_back_col = back_cols[input_back_col]
else:
input_back_col = Back.BLACK
print(prompt_shade + prompt_fore_col + prompt_back_col, end='')
show_text = str(text) + " " + Style.RESET_ALL # Force the text to string and add a space for styling
show_text + input_shade + input_fore_col + input_back_col
return_text = input(show_text) # Show the text
print(Style.RESET_ALL) # Reset for normal
return return_text
def testcolour(use_string=None):
"""A function which is used to test the colour printing of the shell by printing a string in different colours onto
different backgrounds.
Arguments:
use_string - The string to use for testing the console prints text correctly in all colours. Default:
'Hello World'."""
if use_string is None:
use_string = "Hello World"
printcol(use_string, "red", "black", "dim")
printcol(use_string, "red", "black", "normal")
printcol(use_string, "red", "black", "bright")
printcol(use_string, "magenta", "black", "dim")
printcol(use_string, "magenta", "black", "normal")
printcol(use_string, "magenta", "black", "bright")
printcol(use_string, "yellow", "black", "dim")
printcol(use_string, "yellow", "black", "normal")
printcol(use_string, "yellow", "black", "bright")
printcol(use_string, "green", "black", "dim")
printcol(use_string, "green", "black", "normal")
printcol(use_string, "green", "black", "bright")
printcol(use_string, "cyan", "black", "dim")
printcol(use_string, "cyan", "black", "normal")
printcol(use_string, "cyan", "black", "bright")
printcol(use_string, "blue", "black", "dim")
printcol(use_string, "blue", "black", "normal")
printcol(use_string, "blue", "black", "bright")
printcol(use_string, "white", "black", "dim")
printcol(use_string, "white", "black", "normal")
printcol(use_string, "white", "black", "bright")
printcol(use_string, "black", "white", "dim")
printcol(use_string, "black", "white", "normal")
printcol(use_string, "black", "white", "bright")
| [
"colorama.init"
] | [((2665, 2715), 'colorama.init', 'init', ([], {'autoreset': '(True)', 'convert': 'convert', 'strip': 'strip'}), '(autoreset=True, convert=convert, strip=strip)\n', (2669, 2715), False, 'from colorama import init, Fore, Back, Style\n'), ((8690, 8741), 'colorama.init', 'init', ([], {'autoreset': '(False)', 'convert': 'convert', 'strip': 'strip'}), '(autoreset=False, convert=convert, strip=strip)\n', (8694, 8741), False, 'from colorama import init, Fore, Back, Style\n')] |
import os
from typing import Dict
from typing import Optional
from typing import Union
import pandas as pd
from cata import constants
from cata.plotters import base_plotter
class UnifiedPlotter(base_plotter.BasePlotter):
"""Class for plotting generalisation errors, overlaps etc.
For case when logging is done in 'unified' fashion i.e. all into one dataframe.
"""
def __init__(
self,
save_folder: str,
num_steps: int,
log_overlaps: bool,
ode_log_path: str,
network_log_path: str,
):
"""
Class constructor.
Args:
save_folder: path to folder for saving plots.
num_steps: total number of steps in the training run (used for scaling axes).
log_overlaps: whether or not to plot overlaps (or just errors).
log_ode: whether ot not to plot ode data.
log_network: whether ot not to plot network data.
"""
self._ode_logger_path = ode_log_path
self._network_logger_path = network_log_path
super().__init__(
save_folder=save_folder, num_steps=num_steps, log_overlaps=log_overlaps
)
def _setup_data(self):
"""Setup data from relevant dataframes.
Here, in the unified case, full dataset is loaded into memory.
"""
if self._ode_logger_path is not None:
self._ode_logger = pd.read_csv(self._ode_logger_path, index_col=0)
if self._network_logger_path is not None:
self._network_logger = pd.read_csv(self._network_logger_path)
def make_plots(self) -> None:
"""Orchestration method for plotting ode logs, network logs, or both."""
if self._ode_logger_path is not None:
self._make_plot(
data={constants.ODE: self._ode_logger},
save_path=os.path.join(self._save_folder, constants.ODE_PDF),
)
if self._network_logger_path is not None:
self._make_plot(
data={constants.SIM: self._network_logger},
save_path=os.path.join(self._save_folder, constants.NETWORK_PDF),
)
if self._ode_logger_path is not None and self._network_logger_path is not None:
self._make_plot(
data={
constants.ODE: self._ode_logger,
constants.SIM: self._network_logger,
},
save_path=os.path.join(self._save_folder, constants.OVERLAY_PDF),
)
def _make_plot(
self,
data: Dict[str, Union[pd.DataFrame, Dict[str, pd.DataFrame]]],
save_path: str,
) -> None:
"""Make plots for a set of results (e.g. ode or network or both).
Args:
data: mapping from type of results (ode, network etc.)
to dataframes with results.
save_path: path to save the plot.
"""
# can use arbitrary dataframe since columns will be the same.
tag_groups = self._collate_tags(tags=list(list(data.values())[0].keys()))
# e.g. [error, overlap, ...]
group_names = list(tag_groups.keys())
# e.g. [[error_1, error_2, ...], [overlap_1, overlap_2, ...], ...]
group_key_names = list(tag_groups.values()) # e.g.
num_graphs = len(tag_groups)
num_rows = self.GRAPH_LAYOUT[0]
num_columns = self.GRAPH_LAYOUT[1]
fig, spec = self._get_figure_skeleton(
height=4, width=5, num_columns=num_columns, num_rows=num_rows
)
for row in range(num_rows):
for col in range(num_columns):
graph_index = (row) * num_columns + col
if graph_index < num_graphs:
print("Plotting graph {}/{}".format(graph_index + 1, num_graphs))
group_name = group_names[graph_index]
keys = group_key_names[graph_index]
data_collection = {
data_type: {key: data[data_type][key].dropna() for key in keys}
for data_type in data.keys()
}
fig = self._plot_scalar(
fig=fig,
spec=spec,
row=row,
col=col,
tag_group_name=group_name,
data_collection=data_collection,
)
fig.savefig(save_path, dpi=100)
| [
"pandas.read_csv",
"os.path.join"
] | [((1418, 1465), 'pandas.read_csv', 'pd.read_csv', (['self._ode_logger_path'], {'index_col': '(0)'}), '(self._ode_logger_path, index_col=0)\n', (1429, 1465), True, 'import pandas as pd\n'), ((1551, 1589), 'pandas.read_csv', 'pd.read_csv', (['self._network_logger_path'], {}), '(self._network_logger_path)\n', (1562, 1589), True, 'import pandas as pd\n'), ((1863, 1913), 'os.path.join', 'os.path.join', (['self._save_folder', 'constants.ODE_PDF'], {}), '(self._save_folder, constants.ODE_PDF)\n', (1875, 1913), False, 'import os\n'), ((2094, 2148), 'os.path.join', 'os.path.join', (['self._save_folder', 'constants.NETWORK_PDF'], {}), '(self._save_folder, constants.NETWORK_PDF)\n', (2106, 2148), False, 'import os\n'), ((2459, 2513), 'os.path.join', 'os.path.join', (['self._save_folder', 'constants.OVERLAY_PDF'], {}), '(self._save_folder, constants.OVERLAY_PDF)\n', (2471, 2513), False, 'import os\n')] |
import os
from typing import Any
import requests
import yaml
if os.path.exists("gh_token.py"):
from gh_token import GH_TOKEN
else:
GH_TOKEN = os.environ["GH_TOKEN"]
headers = {"Authorization": f"token {GH_TOKEN}"}
def query_gh_gpl_api(query: str) -> dict:
"""Query the GitHub GraphQL API.
Args:
query (str): Multi-line query string. Use triple-quotes. Minimal example:
'''
{
viewer {
login
}
}
'''
Raises:
Exception: If the query returned an error message.
Returns:
dict: The data returned by the API.
"""
response = requests.post(
"https://api.github.com/graphql", json={"query": query}, headers=headers
).json()
if "errors" in response:
err = response["errors"][0]["message"]
raise Exception(f"Request failed with error '{err}'.")
else:
return response["data"]
def pretty_print(dic: dict) -> None:
"""Pretty print a dictionary in YAML format.
Useful for development and debugging.
"""
print(yaml.dump(dic))
def get_gql_query(settings: str, affil: str = "OWNER") -> str:
"""Construct GraphQL query from settings list.
Args:
settings (str): Names of repo settings according to the GraphQL API,
separated by new lines. Use '\n'.join(settings_list).
affil (str, optional): Comma-separated string of author affiliations to their
repos. One or several of OWNER, COLLABORATOR, ORGANIZATION_MEMBER.
Defaults to "OWNER".
Returns:
str: GraphQL query.
"""
return """{
viewer {
repositories(first: 100, affiliations: [{affil}]) {
nodes {
name
nameWithOwner
isArchived
isFork
{settings}
}
}
organizations(first: 100) {
nodes {
login
repositories(first: 100) {
nodes {
name
nameWithOwner
isArchived
isFork
{settings}
}
}
}
}
}
}""".replace(
"{settings}", settings
).replace(
"{affil}", affil
)
def load_config(config_path: str = None) -> tuple[dict[str, Any], list[str], bool]:
"""Load .repo-config.(yml|yaml).
Returns:
tuple[dict[str, Any], list[str], bool]:
- Dictionary of GitHub settings to apply to all your repos
- list of additional logins of your GitHub organizations to query for repos
- boolean whether or not apply settings to repos you forked as well
"""
config = {}
if config_path and not os.path.exists(config_path):
raise FileNotFoundError(
f"Path to config file was set as '{config_path}' but no such file exists."
)
elif config_path:
with open(config_path) as file:
config = yaml.safe_load(file.read())
for path in (".repo-config.yml", ".repo-config.yaml"):
if os.path.exists(path):
with open(path) as file:
config = yaml.safe_load(file.read())
if config == {}:
raise ValueError(
"No config file could be found. See https://git.io/JWa5o for an example "
"config file. All fields except 'settings' are optional."
)
settings = config["settings"]
orgs = config["orgs"] or []
skipForks = config["skipForks"] or True
return settings, orgs, skipForks
| [
"requests.post",
"yaml.dump",
"os.path.exists"
] | [((67, 96), 'os.path.exists', 'os.path.exists', (['"""gh_token.py"""'], {}), "('gh_token.py')\n", (81, 96), False, 'import os\n'), ((1114, 1128), 'yaml.dump', 'yaml.dump', (['dic'], {}), '(dic)\n', (1123, 1128), False, 'import yaml\n'), ((3120, 3140), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (3134, 3140), False, 'import os\n'), ((675, 766), 'requests.post', 'requests.post', (['"""https://api.github.com/graphql"""'], {'json': "{'query': query}", 'headers': 'headers'}), "('https://api.github.com/graphql', json={'query': query},\n headers=headers)\n", (688, 766), False, 'import requests\n'), ((2779, 2806), 'os.path.exists', 'os.path.exists', (['config_path'], {}), '(config_path)\n', (2793, 2806), False, 'import os\n')] |
# ------------------------------------------------------------------------------------------------ #
def ImportEssentialityData(fileName):
# Not yet ready for prime time
# Import a defined format essentiality data file
# Assumes that data is in the format: locus tag, gene name, essentiality
from .utils import ParseCSVLine
fileHandle = open(fileName, 'r')
data = fileHandle.readlines()
dataDict = {}
i = 0
while i < len(data):
# Ignore comment lines
if data[i][0] != '#':
dataLine = ParseCSVLine(data[i])
dataDict[dataLine[0]] = [dataLine[1], dataLine[2]]
i += 1
return dataDict
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def BuildEssentialityDictThatIsKeyedByLocusTag(dataArray):
# Not yet ready for prime time
# Build essentiality data dict that is keyed by locus tag
essentialityDict = {}
locusTags = []
headersWithoutSysName = []
i = 0
while i < len(headers):
if headers[i] != 'sysName':
headersWithoutSysName.append(headers[i])
i += 1
dataDict = {}
for line in dataArray:
dataDict[line['sysName']] = {}
for header in headersWithoutSysName:
dataDict[line['sysName']][header] = line[header]
return dataDict
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def BuildCDSDictThatIsKeyedByLocusTag(cdsFeatures):
# Not yet ready for prime time
i = 0
cdsDict = {}
while i < len(cdsFeatures):
locusTag = cdsFeatures[i].tagDict['locus_tag'][0]
cdsDict[locusTag] = cdsFeatures[i]
i += 1
return cdsDict
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def SimulatePicking(hittableFeatures, notHittableFeatures, hittableTransposonCoords, \
transposonCoordToFeatureDict, maxMutants):
from numpy.random import choice
import pdb
nonEssentialGeneCount = len(hittableFeatures)
featureHitCountDict = {}
for feature in hittableFeatures:
featureHitCountDict[feature] = 0
featuresHitAtLeastOnce = 0
featuresHitAtLeastOnceVersusMutant = []
i = 1
while i <= maxMutants:
randomCoord = int(choice(hittableTransposonCoords))
featuresToBeHit = transposonCoordToFeatureDict[randomCoord]
isAnyFeatureIncludingThisCoordNotHittable = False
for featureToBeHit in featuresToBeHit:
if featureToBeHit in notHittableFeatures:
isAnyFeatureIncludingThisCoordNotHittable = True
if isAnyFeatureIncludingThisCoordNotHittable == False:
for featureToBeHit in featuresToBeHit:
try:
featureHitCountDict[featureToBeHit] += 1
except:
pdb.set_trace()
if featureHitCountDict[featureToBeHit] == 1:
featuresHitAtLeastOnce += 1
featuresHitAtLeastOnceVersusMutant.append(featuresHitAtLeastOnce)
i += 1
return featuresHitAtLeastOnceVersusMutant
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def SimulateMultiplePickings(transposonCoordToFeatureDictFile, numberOfTrials, maxMutants):
from scipy import unique, intersect1d
from numpy import mean, std, arange
import xml.etree.ElementTree as ET
import pdb
transposonCoordToFeatureDictFileHandle = open(transposonCoordToFeatureDictFile, 'r')
transposonCoordToFeatureDict = {}
hittableFeatures = []
hittableTransposonCoords = []
notHittableTransposonCoords = []
notHittableFeatures = []
otherFeatures = []
tree = ET.parse(transposonCoordToFeatureDictFile)
root = tree.getroot()
importedCoordsList = root.findall('coord')
for coord in importedCoordsList:
coordinate = int(coord.attrib['coord'])
loci = coord.findall('locus')
importedCoordsKeys = transposonCoordToFeatureDict.keys()
if coordinate not in importedCoordsKeys:
transposonCoordToFeatureDict[coordinate] = []
for locus in loci:
locusName = locus.attrib['locus']
essentiality = locus.attrib['essentiality']
transposonCoordToFeatureDict[coordinate].append(locusName)
if essentiality == 'Dispensable':
hittableTransposonCoords.append(coordinate)
hittableFeatures.append(locusName)
elif essentiality == 'Essential':
notHittableFeatures.append(locusName)
notHittableTransposonCoords.append(coordinate)
else:
otherFeatures.append(locusName)
print(locusName)
hittableFeatures = unique(hittableFeatures)
hittableTransposonCoords = unique(hittableTransposonCoords)
notHittableFeatures = unique(notHittableFeatures)
otherFeatures = unique(otherFeatures)
intersection = intersect1d(hittableFeatures, notHittableFeatures)
# Simulate a number of picking runs
featuresHitAtLeastOnceTrialsArray = []
i = 0
while i < numberOfTrials:
featuresHitAtLeastOnceVersusMutant = \
SimulatePicking(hittableFeatures, notHittableFeatures, hittableTransposonCoords, \
transposonCoordToFeatureDict, maxMutants)
featuresHitAtLeastOnceTrialsArray.append(featuresHitAtLeastOnceVersusMutant)
i += 1
# Collect together then data from the picking runs for calculation of mean and standard
# deviation of number of hits picked
i = 0
collectedFeatureHitCountArray = []
while i < len(featuresHitAtLeastOnceTrialsArray[0]):
collectedFeatureHitCountArray.append([])
i += 1
i = 0
while i < len(collectedFeatureHitCountArray):
j = 0
while j < len(featuresHitAtLeastOnceTrialsArray):
collectedFeatureHitCountArray[i].append(featuresHitAtLeastOnceTrialsArray[j][i])
j += 1
i += 1
averageFeatureHitCount = []
sdFeatureHitCount = []
featureHitCountUpperBound = []
featureHitCountLowerBound = []
# Calculate the mean and standard deviation of the number of unique features hit at each pick
# from the trials
i = 0
while i < len(collectedFeatureHitCountArray):
averageFeatureHitCount.append(mean(collectedFeatureHitCountArray[i]))
sdFeatureHitCount.append(std(collectedFeatureHitCountArray[i]))
featureHitCountUpperBound.append(averageFeatureHitCount[i] + sdFeatureHitCount[i])
featureHitCountLowerBound.append(averageFeatureHitCount[i] - sdFeatureHitCount[i])
i += 1
# Prepare an x axis (the number of mutants picked) for the output
iAxis = arange(1, maxMutants+1, 1)
noUniqHittableFeatures = len(hittableFeatures)
return [iAxis, averageFeatureHitCount, sdFeatureHitCount, featureHitCountUpperBound, \
featureHitCountLowerBound, noUniqHittableFeatures ]
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def PoissonEstimateOfGenesHit(iAxis, noUniqHittableFeatures):
from numpy import exp, array, float
uniqueGenesHit = []
i = 0
while i < len(iAxis):
ans = noUniqHittableFeatures*(1-exp(-iAxis[i]/noUniqHittableFeatures))
uniqueGenesHit.append(ans)
i += 1
uniqueGenesHit = array(uniqueGenesHit, float)
return uniqueGenesHit
# ------------------------------------------------------------------------------------------------ #
# ------------------------------------------------------------------------------------------------ #
def FindATandTAPositions2(genomeFile, format='genbank'):
# Does the same thing as FindATandTAPositions but can work with a GenBank or a Fasta file, \
# so you only need one file format
import re
from pdb import set_trace
if format == 'genbank':
sequence = ImportGenBankSequence(genomeFile)
elif format == 'fasta':
sequence = ImportFastaSequence(genomeFile)
ATandTAPositions = []
atRegex = re.compile('(at|ta)', re.IGNORECASE)
# set_trace()
i = 0
while i < len(sequence) - 1:
atMatch = atRegex.match(sequence[i:i+2])
if atMatch != None:
ATandTAPositions.append(i+1)
i += 1
return [ATandTAPositions, sequence]
# ------------------------------------------------------------------------------------------------ #
| [
"xml.etree.ElementTree.parse",
"numpy.std",
"scipy.intersect1d",
"numpy.mean",
"numpy.array",
"numpy.arange",
"numpy.exp",
"numpy.random.choice",
"pdb.set_trace",
"scipy.unique",
"re.compile"
] | [((3844, 3886), 'xml.etree.ElementTree.parse', 'ET.parse', (['transposonCoordToFeatureDictFile'], {}), '(transposonCoordToFeatureDictFile)\n', (3852, 3886), True, 'import xml.etree.ElementTree as ET\n'), ((4733, 4757), 'scipy.unique', 'unique', (['hittableFeatures'], {}), '(hittableFeatures)\n', (4739, 4757), False, 'from scipy import unique, intersect1d\n'), ((4786, 4818), 'scipy.unique', 'unique', (['hittableTransposonCoords'], {}), '(hittableTransposonCoords)\n', (4792, 4818), False, 'from scipy import unique, intersect1d\n'), ((4842, 4869), 'scipy.unique', 'unique', (['notHittableFeatures'], {}), '(notHittableFeatures)\n', (4848, 4869), False, 'from scipy import unique, intersect1d\n'), ((4887, 4908), 'scipy.unique', 'unique', (['otherFeatures'], {}), '(otherFeatures)\n', (4893, 4908), False, 'from scipy import unique, intersect1d\n'), ((4927, 4977), 'scipy.intersect1d', 'intersect1d', (['hittableFeatures', 'notHittableFeatures'], {}), '(hittableFeatures, notHittableFeatures)\n', (4938, 4977), False, 'from scipy import unique, intersect1d\n'), ((6541, 6569), 'numpy.arange', 'arange', (['(1)', '(maxMutants + 1)', '(1)'], {}), '(1, maxMutants + 1, 1)\n', (6547, 6569), False, 'from numpy import mean, std, arange\n'), ((7252, 7280), 'numpy.array', 'array', (['uniqueGenesHit', 'float'], {}), '(uniqueGenesHit, float)\n', (7257, 7280), False, 'from numpy import exp, array, float\n'), ((7924, 7960), 're.compile', 're.compile', (['"""(at|ta)"""', 're.IGNORECASE'], {}), "('(at|ta)', re.IGNORECASE)\n", (7934, 7960), False, 'import re\n'), ((2451, 2483), 'numpy.random.choice', 'choice', (['hittableTransposonCoords'], {}), '(hittableTransposonCoords)\n', (2457, 2483), False, 'from numpy.random import choice\n'), ((6174, 6212), 'numpy.mean', 'mean', (['collectedFeatureHitCountArray[i]'], {}), '(collectedFeatureHitCountArray[i])\n', (6178, 6212), False, 'from numpy import mean, std, arange\n'), ((6241, 6278), 'numpy.std', 'std', (['collectedFeatureHitCountArray[i]'], {}), '(collectedFeatureHitCountArray[i])\n', (6244, 6278), False, 'from numpy import mean, std, arange\n'), ((7154, 7193), 'numpy.exp', 'exp', (['(-iAxis[i] / noUniqHittableFeatures)'], {}), '(-iAxis[i] / noUniqHittableFeatures)\n', (7157, 7193), False, 'from numpy import exp, array, float\n'), ((2923, 2938), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (2936, 2938), False, 'import pdb\n')] |
import pytest
from onegram.exceptions import NotSupportedError
from onegram import follow, unfollow
from onegram import like, unlike
from onegram import comment, uncomment
from onegram import save, unsave
def test_follow(logged, user, cassette):
if logged:
response = follow(user)
assert response == {'result': 'following',
'status': 'ok',
'user_id': user['id']}
response = unfollow(user)
assert response == {'status': 'ok', 'user_id': user['id']}
else:
with pytest.raises(NotSupportedError):
follow(user)
with pytest.raises(NotSupportedError):
unfollow(user)
def test_like(logged, post, cassette):
if logged:
response = like(post)
assert response == {'status': 'ok', 'post_id': post['id']}
response = unlike(post)
assert response == {'status': 'ok', 'post_id': post['id']}
else:
with pytest.raises(NotSupportedError):
like(post)
with pytest.raises(NotSupportedError):
unlike(post)
def test_comment(logged, post, cassette):
text = 'awesome!'
if logged:
commentary = comment(text, post)
assert commentary['id']
assert commentary['text'] == text
assert commentary['status'] == 'ok'
assert commentary['post_id'] == post['id']
response = uncomment(commentary)
assert response == {'status': 'ok', 'post_id': post['id']}
else:
with pytest.raises(NotSupportedError):
comment(text, post)
with pytest.raises(NotSupportedError):
fake_comment = {'id': '1', 'post_id': '2'}
uncomment(fake_comment)
def test_save(logged, post, cassette):
if logged:
response = save(post)
assert response == {'status': 'ok', 'post_id': post['id']}
response = unsave(post)
assert response == {'status': 'ok', 'post_id': post['id']}
else:
with pytest.raises(NotSupportedError):
save(post)
with pytest.raises(NotSupportedError):
unsave(post)
| [
"onegram.unsave",
"onegram.save",
"onegram.unfollow",
"onegram.uncomment",
"onegram.like",
"onegram.unlike",
"pytest.raises",
"onegram.follow",
"onegram.comment"
] | [((284, 296), 'onegram.follow', 'follow', (['user'], {}), '(user)\n', (290, 296), False, 'from onegram import follow, unfollow\n'), ((463, 477), 'onegram.unfollow', 'unfollow', (['user'], {}), '(user)\n', (471, 477), False, 'from onegram import follow, unfollow\n'), ((776, 786), 'onegram.like', 'like', (['post'], {}), '(post)\n', (780, 786), False, 'from onegram import like, unlike\n'), ((874, 886), 'onegram.unlike', 'unlike', (['post'], {}), '(post)\n', (880, 886), False, 'from onegram import like, unlike\n'), ((1208, 1227), 'onegram.comment', 'comment', (['text', 'post'], {}), '(text, post)\n', (1215, 1227), False, 'from onegram import comment, uncomment\n'), ((1417, 1438), 'onegram.uncomment', 'uncomment', (['commentary'], {}), '(commentary)\n', (1426, 1438), False, 'from onegram import comment, uncomment\n'), ((1809, 1819), 'onegram.save', 'save', (['post'], {}), '(post)\n', (1813, 1819), False, 'from onegram import save, unsave\n'), ((1907, 1919), 'onegram.unsave', 'unsave', (['post'], {}), '(post)\n', (1913, 1919), False, 'from onegram import save, unsave\n'), ((568, 600), 'pytest.raises', 'pytest.raises', (['NotSupportedError'], {}), '(NotSupportedError)\n', (581, 600), False, 'import pytest\n'), ((614, 626), 'onegram.follow', 'follow', (['user'], {}), '(user)\n', (620, 626), False, 'from onegram import follow, unfollow\n'), ((640, 672), 'pytest.raises', 'pytest.raises', (['NotSupportedError'], {}), '(NotSupportedError)\n', (653, 672), False, 'import pytest\n'), ((686, 700), 'onegram.unfollow', 'unfollow', (['user'], {}), '(user)\n', (694, 700), False, 'from onegram import follow, unfollow\n'), ((977, 1009), 'pytest.raises', 'pytest.raises', (['NotSupportedError'], {}), '(NotSupportedError)\n', (990, 1009), False, 'import pytest\n'), ((1023, 1033), 'onegram.like', 'like', (['post'], {}), '(post)\n', (1027, 1033), False, 'from onegram import like, unlike\n'), ((1047, 1079), 'pytest.raises', 'pytest.raises', (['NotSupportedError'], {}), '(NotSupportedError)\n', (1060, 1079), False, 'import pytest\n'), ((1093, 1105), 'onegram.unlike', 'unlike', (['post'], {}), '(post)\n', (1099, 1105), False, 'from onegram import like, unlike\n'), ((1529, 1561), 'pytest.raises', 'pytest.raises', (['NotSupportedError'], {}), '(NotSupportedError)\n', (1542, 1561), False, 'import pytest\n'), ((1575, 1594), 'onegram.comment', 'comment', (['text', 'post'], {}), '(text, post)\n', (1582, 1594), False, 'from onegram import comment, uncomment\n'), ((1608, 1640), 'pytest.raises', 'pytest.raises', (['NotSupportedError'], {}), '(NotSupportedError)\n', (1621, 1640), False, 'import pytest\n'), ((1709, 1732), 'onegram.uncomment', 'uncomment', (['fake_comment'], {}), '(fake_comment)\n', (1718, 1732), False, 'from onegram import comment, uncomment\n'), ((2010, 2042), 'pytest.raises', 'pytest.raises', (['NotSupportedError'], {}), '(NotSupportedError)\n', (2023, 2042), False, 'import pytest\n'), ((2056, 2066), 'onegram.save', 'save', (['post'], {}), '(post)\n', (2060, 2066), False, 'from onegram import save, unsave\n'), ((2080, 2112), 'pytest.raises', 'pytest.raises', (['NotSupportedError'], {}), '(NotSupportedError)\n', (2093, 2112), False, 'import pytest\n'), ((2126, 2138), 'onegram.unsave', 'unsave', (['post'], {}), '(post)\n', (2132, 2138), False, 'from onegram import save, unsave\n')] |
import logging
import math
import os
import time
import boto3
from pyspark.sql import SparkSession
def get_asg_inservice_instance_count(asg_name: str, region_name: str) -> int:
client = boto3.client('autoscaling', region_name=region_name)
asg = client.describe_auto_scaling_groups(AutoScalingGroupNames=[asg_name])['AutoScalingGroups'][0]
return sum([1 for inst in asg['Instances'] if inst['LifecycleState'] == 'InService'])
def adjust_ec2_asg(asg_name: str, region_name: str, desired_capacity: int,
check_interval_sec: int = 15,
ten_inst_timeout_sec: int = 30):
aws_client = boto3.client('autoscaling', region_name=region_name)
aws_client.update_auto_scaling_group(AutoScalingGroupName=asg_name,
DesiredCapacity=desired_capacity,
MinSize=0,
MaxSize=desired_capacity)
current_capacity = get_asg_inservice_instance_count(asg_name, region_name)
adjust_capacity = abs(desired_capacity - current_capacity)
timeout_sec = ten_inst_timeout_sec * math.ceil(abs(adjust_capacity) / 10)
max_trial = int(math.ceil(timeout_sec / check_interval_sec))
for trial in range(0, max_trial + 1):
inservice_instance_count = get_asg_inservice_instance_count(asg_name, region_name)
if inservice_instance_count != desired_capacity:
time.sleep(check_interval_sec)
else:
return
logging.warning('Failed to adjust the capacity of asg "%(g)s" from %(f)d to %(t)d in %(s)d seconds'
% {'g': asg_name, 'f': current_capacity, 't': desired_capacity, 's': timeout_sec})
def wait_on_nodes_to_join_spark_cluster(spark, desired_cluster_size: int,
timeout_sec: int, check_interval_sec: int = 3):
max_trials = int(math.ceil(timeout_sec / check_interval_sec))
for trial in range(0, max_trials + 1):
current_size = get_spark_worker_node_count(spark)
if current_size != desired_cluster_size:
time.sleep(check_interval_sec)
else:
return
logging.warning('Failed to bring %(d)d nodes to Spark cluster in %(s)d seconds, current cluster size: %(c)d'
% {'d': desired_cluster_size, 's': timeout_sec, 'c': get_spark_worker_node_count(spark)})
def get_spark_worker_node_count(spark):
# noinspection PyProtectedMember
return spark.sparkContext._jsc.sc().getExecutorMemoryStatus().size() - 1
def setup_spark_session() -> SparkSession:
from envconfig.env import env
asg_name = env.aws_asg_name
region_name = env.aws_region_name
cluster_size = env.aws_cluster_size
adjust_ec2_asg(asg_name, region_name, cluster_size)
os.environ['PYSPARK_PYTHON'] = env.spark_pyspark_python
os.environ['PYTHONPATH'] = env.spark_pythonpath
spark = SparkSession.builder \
.master(env.spark_master) \
.appName('Trinity %(e)s' % {'e': env.env}) \
.config('spark.executor.uri', env.spark_executor_uri) \
.config('spark.sql.shuffle.partitions', env.spark_sql_shuffle_partitions) \
.config('spark.driver.memory', env.spark_driver_memory) \
.config('spark.executor.memory', env.spark_executor_memory) \
.getOrCreate()
wait_on_nodes_to_join_spark_cluster(spark, env.aws_cluster_size, env.aws_cluster_size * 10)
return spark
def prep_notebook(spark, aws_cluster_size: int = None, aws_asg_name: str = None, aws_region_name: str = None,
setup_spark_cluster_timeout_sec: int = None):
from envconfig.env import env
if aws_cluster_size is None:
aws_cluster_size = env.aws_cluster_size
if aws_asg_name is None:
aws_asg_name = env.aws_asg_name
if aws_region_name is None:
aws_region_name = env.aws_region_name
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
logging.info('Adjusting AWS autoscaling group "%(g)s" capacity to %(c)d ...'
% {'g': aws_asg_name, 'c': aws_cluster_size})
adjust_ec2_asg(aws_asg_name, aws_region_name, aws_cluster_size)
logging.info('Waiting for workers to join Spark cluster ...')
if setup_spark_cluster_timeout_sec is None:
setup_spark_cluster_timeout_sec = aws_cluster_size * 20
wait_on_nodes_to_join_spark_cluster(spark, aws_cluster_size, setup_spark_cluster_timeout_sec)
logging.info('Notebook and Spark cluster are standing by')
def shutdown_notebook(aws_asg_name: str = None, aws_region_name: str = None):
from envconfig.env import env
if aws_asg_name is None:
aws_asg_name = env.aws_asg_name
if aws_region_name is None:
aws_region_name = env.aws_region_name
logging.info('Shutting down AWS autoscaling group "%(g)s" by adjusting capacity to 0'
% {'g': aws_asg_name})
adjust_ec2_asg(aws_asg_name, aws_region_name, 0)
| [
"logging.basicConfig",
"boto3.client",
"logging.warning",
"math.ceil",
"time.sleep",
"pyspark.sql.SparkSession.builder.master",
"logging.info"
] | [((193, 245), 'boto3.client', 'boto3.client', (['"""autoscaling"""'], {'region_name': 'region_name'}), "('autoscaling', region_name=region_name)\n", (205, 245), False, 'import boto3\n'), ((635, 687), 'boto3.client', 'boto3.client', (['"""autoscaling"""'], {'region_name': 'region_name'}), "('autoscaling', region_name=region_name)\n", (647, 687), False, 'import boto3\n'), ((1509, 1705), 'logging.warning', 'logging.warning', (['(\'Failed to adjust the capacity of asg "%(g)s" from %(f)d to %(t)d in %(s)d seconds\'\n % {\'g\': asg_name, \'f\': current_capacity, \'t\': desired_capacity, \'s\':\n timeout_sec})'], {}), '(\n \'Failed to adjust the capacity of asg "%(g)s" from %(f)d to %(t)d in %(s)d seconds\'\n % {\'g\': asg_name, \'f\': current_capacity, \'t\': desired_capacity, \'s\':\n timeout_sec})\n', (1524, 1705), False, 'import logging\n'), ((3897, 4018), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(asctime)s %(levelname)s %(message)s"""', 'datefmt': '"""%Y-%m-%d %H:%M:%S"""'}), "(level=logging.INFO, format=\n '%(asctime)s %(levelname)s %(message)s', datefmt='%Y-%m-%d %H:%M:%S')\n", (3916, 4018), False, 'import logging\n'), ((4042, 4173), 'logging.info', 'logging.info', (['(\'Adjusting AWS autoscaling group "%(g)s" capacity to %(c)d ...\' % {\'g\':\n aws_asg_name, \'c\': aws_cluster_size})'], {}), '(\n \'Adjusting AWS autoscaling group "%(g)s" capacity to %(c)d ...\' % {\'g\':\n aws_asg_name, \'c\': aws_cluster_size})\n', (4054, 4173), False, 'import logging\n'), ((4255, 4316), 'logging.info', 'logging.info', (['"""Waiting for workers to join Spark cluster ..."""'], {}), "('Waiting for workers to join Spark cluster ...')\n", (4267, 4316), False, 'import logging\n'), ((4532, 4590), 'logging.info', 'logging.info', (['"""Notebook and Spark cluster are standing by"""'], {}), "('Notebook and Spark cluster are standing by')\n", (4544, 4590), False, 'import logging\n'), ((4858, 4975), 'logging.info', 'logging.info', (['(\'Shutting down AWS autoscaling group "%(g)s" by adjusting capacity to 0\' %\n {\'g\': aws_asg_name})'], {}), '(\n \'Shutting down AWS autoscaling group "%(g)s" by adjusting capacity to 0\' %\n {\'g\': aws_asg_name})\n', (4870, 4975), False, 'import logging\n'), ((1194, 1237), 'math.ceil', 'math.ceil', (['(timeout_sec / check_interval_sec)'], {}), '(timeout_sec / check_interval_sec)\n', (1203, 1237), False, 'import math\n'), ((1897, 1940), 'math.ceil', 'math.ceil', (['(timeout_sec / check_interval_sec)'], {}), '(timeout_sec / check_interval_sec)\n', (1906, 1940), False, 'import math\n'), ((1441, 1471), 'time.sleep', 'time.sleep', (['check_interval_sec'], {}), '(check_interval_sec)\n', (1451, 1471), False, 'import time\n'), ((2104, 2134), 'time.sleep', 'time.sleep', (['check_interval_sec'], {}), '(check_interval_sec)\n', (2114, 2134), False, 'import time\n'), ((2919, 2964), 'pyspark.sql.SparkSession.builder.master', 'SparkSession.builder.master', (['env.spark_master'], {}), '(env.spark_master)\n', (2946, 2964), False, 'from pyspark.sql import SparkSession\n')] |
from django import forms
from .models import User
from news.models import Category
from django.utils.translation import gettext, gettext_lazy as _
from django.contrib.auth import authenticate, forms as auth_forms
from django.db.models import Q
class LoginForm(forms.Form):
username = forms.CharField(
label=_('Username'),
)
password = forms.CharField(
label=_("<PASSWORD>"),
strip=False,
widget=forms.PasswordInput(attrs={'autocomplete': 'current-password'}),
)
error_messages = {
'invalid_login': _("Username hoặc mật khẩu không đúng."),
}
def __init__(self, *args, **kwargs):
self.user_cache = None
return super().__init__(*args, **kwargs)
def get_initial_for_field(self, field, field_name):
return ''
def clean(self):
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
if username is not None and password is not None:
self.user_cache = authenticate(
username=username, password=password)
if self.user_cache is None:
raise forms.ValidationError(
self.error_messages['invalid_login'],
code='invalid_login',
)
return self.cleaned_data
def get_user(self):
return self.user_cache
class RegisterForm(forms.ModelForm):
error_messages = {
'password_mismatch': _('Mật khẩu không khớp.'),
}
re_password = forms.CharField(
label='<PASSWORD>',
widget=forms.PasswordInput,
)
class Meta:
model = User
fields = ('username', 'first_name', 'last_name', 'email', 'password',)
labels = {
'email': 'Email',
'password': '<PASSWORD>',
'first_name': 'Tên',
'last_name': 'Họ',
}
def get_initial_for_field(self, field, field_name):
return ''
def clean_re_password(self):
password = self.cleaned_data.get('password')
re_password = self.cleaned_data.get('re_password')
if password and re_password and password != re_password:
raise forms.ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch'
)
return re_password
def save(self, commit=True):
user = super().save(commit=False)
user.set_password(self.cleaned_data["password"])
if commit:
user.save()
return user
class SetPasswordForm(auth_forms.SetPasswordForm):
error_messages = {
'password_mismatch': _('Mật khẩu không khớp.'),
}
new_password1 = forms.CharField(
label=_("Mật khẩu"),
widget=forms.PasswordInput(attrs={'autocomplete': 'new-password'}),
strip=False,
)
new_password2 = forms.CharField(
label=_("Nhập lại mật khẩu"),
strip=False,
widget=forms.PasswordInput(attrs={'autocomplete': 'new-password'}),
)
class PasswordChangeForm(SetPasswordForm):
error_messages = {
**SetPasswordForm.error_messages,
'password_incorrect': _("Mật khẩu cũ bạn vừa nhập không đúng."),
}
old_password = forms.CharField(
label=_("M<PASSWORD>"),
strip=False,
widget=forms.PasswordInput(
attrs={'autocomplete': 'current-password', 'autofocus': True}),
)
field_order = ['old_password', 'new_password1', 'new_<PASSWORD>']
def get_initial_for_field(self, field, field_name):
return ''
def clean_old_password(self):
old_password = self.cleaned_data.get('old_password')
if not self.user.check_password(old_password):
raise forms.ValidationError(
self.error_messages['password_incorrect'],
code='password_incorrect',
)
return old_password
def clean(self):
new_password = self.cleaned_data.get('<PASSWORD>_<PASSWORD>')
self.user.set_password(new_password)
self.user.save()
return self.cleaned_data
class TopicOrganizeForm(forms.ModelForm):
def __init__(self, user, *args, **kwargs):
super().__init__(*args, **kwargs)
self.following_categories = user.following_categories.all()
self.fields['following_categories'].queryset = self.following_categories
class Meta:
model = User
fields = ('following_categories', )
widgets = {
'following_categories': forms.CheckboxSelectMultiple(),
}
class TopicAddForm(TopicOrganizeForm):
adding_categories = forms.ModelMultipleChoiceField(queryset=None)
def __init__(self, user, *args, **kwargs):
super().__init__(user, *args, **kwargs)
following_query = Q()
for category in self.following_categories:
following_query |= Q(pk=category.pk)
self.fields['adding_categories'].queryset = Category.objects.exclude(following_query)
def clean(self):
cleaned_data = self.cleaned_data
cleaned_data['following_categories'] = self.following_categories.union(cleaned_data.get('adding_categories'))
return cleaned_data
class Meta(TopicOrganizeForm.Meta):
fields = ('following_categories', 'adding_categories', )
widgets = {
'adding_categories': forms.CheckboxSelectMultiple()
}
class UserUpdateForm(forms.ModelForm):
def __init__(self, user, *args, **kwargs):
self.user = user
return super().__init__(*args, **kwargs)
class Meta:
model = User
fields = ('last_name', 'first_name', 'avatar')
labels = {
'last_name': 'Họ',
'first_name': 'Tên',
'avatar': 'Avatar',
}
| [
"django.forms.CheckboxSelectMultiple",
"django.utils.translation.gettext_lazy",
"django.forms.PasswordInput",
"django.db.models.Q",
"django.forms.ValidationError",
"django.forms.ModelMultipleChoiceField",
"django.contrib.auth.authenticate",
"django.forms.CharField",
"news.models.Category.objects.exclude"
] | [((1525, 1588), 'django.forms.CharField', 'forms.CharField', ([], {'label': '"""<PASSWORD>"""', 'widget': 'forms.PasswordInput'}), "(label='<PASSWORD>', widget=forms.PasswordInput)\n", (1540, 1588), False, 'from django import forms\n'), ((4640, 4685), 'django.forms.ModelMultipleChoiceField', 'forms.ModelMultipleChoiceField', ([], {'queryset': 'None'}), '(queryset=None)\n', (4670, 4685), False, 'from django import forms\n'), ((561, 600), 'django.utils.translation.gettext_lazy', '_', (['"""Username hoặc mật khẩu không đúng."""'], {}), "('Username hoặc mật khẩu không đúng.')\n", (562, 600), True, 'from django.utils.translation import gettext, gettext_lazy as _\n'), ((1473, 1498), 'django.utils.translation.gettext_lazy', '_', (['"""Mật khẩu không khớp."""'], {}), "('Mật khẩu không khớp.')\n", (1474, 1498), True, 'from django.utils.translation import gettext, gettext_lazy as _\n'), ((2659, 2684), 'django.utils.translation.gettext_lazy', '_', (['"""Mật khẩu không khớp."""'], {}), "('Mật khẩu không khớp.')\n", (2660, 2684), True, 'from django.utils.translation import gettext, gettext_lazy as _\n'), ((3179, 3220), 'django.utils.translation.gettext_lazy', '_', (['"""Mật khẩu cũ bạn vừa nhập không đúng."""'], {}), "('Mật khẩu cũ bạn vừa nhập không đúng.')\n", (3180, 3220), True, 'from django.utils.translation import gettext, gettext_lazy as _\n'), ((4808, 4811), 'django.db.models.Q', 'Q', ([], {}), '()\n', (4809, 4811), False, 'from django.db.models import Q\n'), ((4966, 5007), 'news.models.Category.objects.exclude', 'Category.objects.exclude', (['following_query'], {}), '(following_query)\n', (4990, 5007), False, 'from news.models import Category\n'), ((321, 334), 'django.utils.translation.gettext_lazy', '_', (['"""Username"""'], {}), "('Username')\n", (322, 334), True, 'from django.utils.translation import gettext, gettext_lazy as _\n'), ((388, 403), 'django.utils.translation.gettext_lazy', '_', (['"""<PASSWORD>"""'], {}), "('<PASSWORD>')\n", (389, 403), True, 'from django.utils.translation import gettext, gettext_lazy as _\n'), ((441, 504), 'django.forms.PasswordInput', 'forms.PasswordInput', ([], {'attrs': "{'autocomplete': 'current-password'}"}), "(attrs={'autocomplete': 'current-password'})\n", (460, 504), False, 'from django import forms\n'), ((1021, 1071), 'django.contrib.auth.authenticate', 'authenticate', ([], {'username': 'username', 'password': 'password'}), '(username=username, password=password)\n', (1033, 1071), False, 'from django.contrib.auth import authenticate, forms as auth_forms\n'), ((2194, 2288), 'django.forms.ValidationError', 'forms.ValidationError', (["self.error_messages['password_mismatch']"], {'code': '"""password_mismatch"""'}), "(self.error_messages['password_mismatch'], code=\n 'password_mismatch')\n", (2215, 2288), False, 'from django import forms\n'), ((2743, 2756), 'django.utils.translation.gettext_lazy', '_', (['"""Mật khẩu"""'], {}), "('Mật khẩu')\n", (2744, 2756), True, 'from django.utils.translation import gettext, gettext_lazy as _\n'), ((2773, 2832), 'django.forms.PasswordInput', 'forms.PasswordInput', ([], {'attrs': "{'autocomplete': 'new-password'}"}), "(attrs={'autocomplete': 'new-password'})\n", (2792, 2832), False, 'from django import forms\n'), ((2912, 2934), 'django.utils.translation.gettext_lazy', '_', (['"""Nhập lại mật khẩu"""'], {}), "('Nhập lại mật khẩu')\n", (2913, 2934), True, 'from django.utils.translation import gettext, gettext_lazy as _\n'), ((2972, 3031), 'django.forms.PasswordInput', 'forms.PasswordInput', ([], {'attrs': "{'autocomplete': 'new-password'}"}), "(attrs={'autocomplete': 'new-password'})\n", (2991, 3031), False, 'from django import forms\n'), ((3279, 3295), 'django.utils.translation.gettext_lazy', '_', (['"""M<PASSWORD>"""'], {}), "('M<PASSWORD>')\n", (3280, 3295), True, 'from django.utils.translation import gettext, gettext_lazy as _\n'), ((3333, 3419), 'django.forms.PasswordInput', 'forms.PasswordInput', ([], {'attrs': "{'autocomplete': 'current-password', 'autofocus': True}"}), "(attrs={'autocomplete': 'current-password', 'autofocus':\n True})\n", (3352, 3419), False, 'from django import forms\n'), ((3751, 3847), 'django.forms.ValidationError', 'forms.ValidationError', (["self.error_messages['password_incorrect']"], {'code': '"""password_incorrect"""'}), "(self.error_messages['password_incorrect'], code=\n 'password_incorrect')\n", (3772, 3847), False, 'from django import forms\n'), ((4533, 4563), 'django.forms.CheckboxSelectMultiple', 'forms.CheckboxSelectMultiple', ([], {}), '()\n', (4561, 4563), False, 'from django import forms\n'), ((4895, 4912), 'django.db.models.Q', 'Q', ([], {'pk': 'category.pk'}), '(pk=category.pk)\n', (4896, 4912), False, 'from django.db.models import Q\n'), ((5376, 5406), 'django.forms.CheckboxSelectMultiple', 'forms.CheckboxSelectMultiple', ([], {}), '()\n', (5404, 5406), False, 'from django import forms\n'), ((1151, 1237), 'django.forms.ValidationError', 'forms.ValidationError', (["self.error_messages['invalid_login']"], {'code': '"""invalid_login"""'}), "(self.error_messages['invalid_login'], code=\n 'invalid_login')\n", (1172, 1237), False, 'from django import forms\n')] |
from kivy.properties import (
NumericProperty, ReferenceListProperty, BooleanProperty)
from kivy.vector import Vector
from parabox.base_object import BaseObject
class Movable(BaseObject):
"""Mixins for movable classes"""
velocity_x = NumericProperty(0)
velocity_y = NumericProperty(0)
velocity = ReferenceListProperty(velocity_x, velocity_y)
acceleration_x = NumericProperty(0)
acceleration_y = NumericProperty(0)
acceleration = ReferenceListProperty(acceleration_x, acceleration_y)
in_move = BooleanProperty(False)
def __init__(self, *args, velocity=(0, 0), speed_limit=10., **kwargs):
"""Movable constructor
:param velocity: velocity vector
:type velocity: kivy.vector.Vector
:param speed_limit: speed limit for object. 10 by default
:type speed_limit: float
"""
super(Movable, self).__init__(*args, **kwargs)
self.speed_limit = speed_limit
self.velocity = velocity
self.add_to_collections(["movable"])
self.register_event_type('on_move')
self.register_event_type('on_move_x')
self.register_event_type('on_move_y')
self.register_event_type('on_stop')
self.register_event_type('on_stop_x')
self.register_event_type('on_stop_y')
self.bind(on_update=self.move)
def _update_velocity(self):
"""Change velocity because of acceleration"""
self.velocity = Vector(*self.velocity) + Vector(*self.acceleration)
velocity_vector = Vector(self.velocity)
if velocity_vector.length() > self.speed_limit:
self.velocity = (velocity_vector * self.speed_limit /
velocity_vector.length())
def move(self, instance):
"""Move object
:param instance: self analog
:type instance: kivy.uix.widget.Widget
"""
self._update_velocity()
self._change_position()
self._reset_acceleration()
def _change_position(self):
"""Change objects position"""
self.x += self.velocity_x
if self.velocity_x:
self.dispatch("on_move_x")
self.y += self.velocity_y
if self.velocity_y:
self.dispatch("on_move_y")
if self.velocity_y or self.velocity_x:
self.dispatch("on_move")
def _reset_acceleration(self):
"""Set acceleration to zero"""
self.acceleration_x = self.acceleration_y = 0
def move_stop_x(self):
"""Stop in x direction"""
self.velocity_x = 0
def move_stop_y(self):
"""Stop in y direction"""
self.velocity_y = 0
def move_stop(self):
"""Stop object"""
self.move_stop_x()
self.move_stop_y()
def on_velocity_x(self, instance, value):
"""Dispatch event on x move"""
if not value and self.in_move:
self.dispatch("on_stop_x")
if not self.velocity_y:
self.dispatch("on_stop")
def on_velocity_y(self, instance, value):
"""Dispatch event on y move"""
if not value and self.in_move:
self.dispatch("on_stop_y")
if not self.velocity_x:
self.dispatch("on_stop")
def on_move(self):
"""On move event"""
self.in_move = True
def on_move_x(self):
"""On move x event"""
pass
def on_move_y(self):
"""On move y event"""
pass
def on_stop(self):
"""On stop event"""
self.in_move = False
def on_stop_x(self):
"""On stop x event"""
pass
def on_stop_y(self):
"""On stop y event"""
pass
| [
"kivy.properties.NumericProperty",
"kivy.properties.BooleanProperty",
"kivy.properties.ReferenceListProperty",
"kivy.vector.Vector"
] | [((249, 267), 'kivy.properties.NumericProperty', 'NumericProperty', (['(0)'], {}), '(0)\n', (264, 267), False, 'from kivy.properties import NumericProperty, ReferenceListProperty, BooleanProperty\n'), ((285, 303), 'kivy.properties.NumericProperty', 'NumericProperty', (['(0)'], {}), '(0)\n', (300, 303), False, 'from kivy.properties import NumericProperty, ReferenceListProperty, BooleanProperty\n'), ((319, 364), 'kivy.properties.ReferenceListProperty', 'ReferenceListProperty', (['velocity_x', 'velocity_y'], {}), '(velocity_x, velocity_y)\n', (340, 364), False, 'from kivy.properties import NumericProperty, ReferenceListProperty, BooleanProperty\n'), ((386, 404), 'kivy.properties.NumericProperty', 'NumericProperty', (['(0)'], {}), '(0)\n', (401, 404), False, 'from kivy.properties import NumericProperty, ReferenceListProperty, BooleanProperty\n'), ((426, 444), 'kivy.properties.NumericProperty', 'NumericProperty', (['(0)'], {}), '(0)\n', (441, 444), False, 'from kivy.properties import NumericProperty, ReferenceListProperty, BooleanProperty\n'), ((464, 517), 'kivy.properties.ReferenceListProperty', 'ReferenceListProperty', (['acceleration_x', 'acceleration_y'], {}), '(acceleration_x, acceleration_y)\n', (485, 517), False, 'from kivy.properties import NumericProperty, ReferenceListProperty, BooleanProperty\n'), ((532, 554), 'kivy.properties.BooleanProperty', 'BooleanProperty', (['(False)'], {}), '(False)\n', (547, 554), False, 'from kivy.properties import NumericProperty, ReferenceListProperty, BooleanProperty\n'), ((1530, 1551), 'kivy.vector.Vector', 'Vector', (['self.velocity'], {}), '(self.velocity)\n', (1536, 1551), False, 'from kivy.vector import Vector\n'), ((1452, 1474), 'kivy.vector.Vector', 'Vector', (['*self.velocity'], {}), '(*self.velocity)\n', (1458, 1474), False, 'from kivy.vector import Vector\n'), ((1477, 1503), 'kivy.vector.Vector', 'Vector', (['*self.acceleration'], {}), '(*self.acceleration)\n', (1483, 1503), False, 'from kivy.vector import Vector\n')] |
from functools import lru_cache
from pydantic import BaseSettings
class Settings(BaseSettings):
"""Settings model."""
secret_key: str = ""
mongo_url: str = ""
testing: bool = False
@lru_cache(typed=False)
def get_settings() -> Settings:
"""Initialize settings."""
return Settings()
| [
"functools.lru_cache"
] | [((204, 226), 'functools.lru_cache', 'lru_cache', ([], {'typed': '(False)'}), '(typed=False)\n', (213, 226), False, 'from functools import lru_cache\n')] |
from __future__ import print_function
import minpy.numpy as mp
import numpy as np
import minpy.dispatch.policy as policy
from minpy.core import convert_args, return_numpy, grad_and_loss, grad, minpy_to_numpy as mn, numpy_to_minpy as nm
import time
# mp.set_policy(policy.OnlyNumPyPolicy())
def test_autograd():
@convert_args
def minpy_rnn_step_forward(x, prev_h, Wx, Wh, b):
next_h = mp.tanh(x.dot(Wx) + prev_h.dot(Wh) + b)
return next_h
def rel_error(x, y):
""" returns relative error """
return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))
def rnn_step_forward(x, prev_h, Wx, Wh, b):
next_h = np.tanh(prev_h.dot(Wh) + x.dot(Wx) + b)
cache = next_h, prev_h, x, Wx, Wh
return next_h, cache
def rnn_step_backward(dnext_h, cache):
dx, dprev_h, dWx, dWh, db = None, None, None, None, None
# Load values from rnn_step_forward
next_h, prev_h, x, Wx, Wh = cache
# Gradients of loss wrt tanh
dtanh = dnext_h * (1 - next_h * next_h) # (N, H)
# Gradients of loss wrt x
dx = dtanh.dot(Wx.T)
# Gradients of loss wrt prev_h
dprev_h = dtanh.dot(Wh.T)
# Gradients of loss wrt Wx
dWx = x.T.dot(dtanh) # (D, H)
# Gradients of loss wrt Wh
dWh = prev_h.T.dot(dtanh)
# Gradients of loss wrt b. Note we broadcast b in practice. Thus result of
# matrix ops are just sum over columns
db = dtanh.sum(axis=0) # == np.ones([N, 1]).T.dot(dtanh)[0, :]
return dx, dprev_h, dWx, dWh, db
# preparation
N, D, H = 4, 5, 6
x = np.random.randn(N, D)
h = np.random.randn(N, H)
Wx = np.random.randn(D, H)
Wh = np.random.randn(H, H)
b = np.random.randn(H)
out, cache = rnn_step_forward(x, h, Wx, Wh, b)
dnext_h = np.random.randn(*out.shape)
# test MinPy
start = time.time()
rnn_step_forward_loss = lambda x, h, Wx, Wh, b, dnext_h: minpy_rnn_step_forward(x, h, Wx, Wh, b) * nm(dnext_h)
grad_loss_function = return_numpy(grad_and_loss(rnn_step_forward_loss, range(5)))
grad_arrays = grad_loss_function(x, h, Wx, Wh, b, dnext_h)[0]
end = time.time()
print("MinPy total time elapsed:", end - start)
# test NumPy
start = time.time()
out, cache = rnn_step_forward(x, h, Wx, Wh, b)
dx, dprev_h, dWx, dWh, db = rnn_step_backward(dnext_h, cache)
out *= dnext_h # to agree with MinPy calculation
end = time.time()
print("NumPy total time elapsed:", end - start)
print()
print("Result Check:")
print('dx error: ', rel_error(dx, grad_arrays[0]))
print('dprev_h error: ', rel_error(dprev_h, grad_arrays[1]))
print('dWx error: ', rel_error(dWx, grad_arrays[2]))
print('dWh error: ', rel_error(dWh, grad_arrays[3]))
print('db error: ', rel_error(db, grad_arrays[4]))
def test_zero_input_grad():
def foo1(x):
return 1
bar1 = grad(foo1)
assert bar1(0) == 0.0
def test_reduction():
def test_sum():
x_np = np.array([[1, 2], [3, 4], [5, 6]])
x_grad = np.array([[1, 1], [1, 1], [1, 1]])
def red1(x):
return mp.sum(x)
def red2(x):
return mp.sum(x, axis=0)
def red3(x):
return mp.sum(x, axis=0, keepdims=True)
grad1 = grad(red1)
assert np.all(grad1(x_np).asnumpy() == x_grad)
grad2 = grad(red2)
assert np.all(grad2(x_np).asnumpy() == x_grad)
grad3 = grad(red3)
assert np.all(grad3(x_np).asnumpy() == x_grad)
def test_max():
x_np = np.array([[1, 2], [2, 1], [0, 0]])
x_grad1 = np.array([[0, 1], [1, 0], [0, 0]])
x_grad2 = np.array([[0, 1], [1, 0], [1, 1]])
x_grad3 = np.array([[0, 1], [1, 0], [0, 0]])
def red1(x):
return mp.max(x)
def red2(x):
return mp.max(x, axis=1)
def red3(x):
return mp.max(x, axis=1, keepdims=True)
def red4(x):
return mp.max(x, axis=0)
def red5(x):
return mp.max(x, axis=0, keepdims=True)
grad1 = grad(red1)
assert np.all(grad1(x_np).asnumpy() == x_grad1)
grad2 = grad(red2)
assert np.all(grad2(x_np).asnumpy() == x_grad2)
grad3 = grad(red3)
assert np.all(grad3(x_np).asnumpy() == x_grad2)
grad4 = grad(red4)
assert np.all(grad4(x_np).asnumpy() == x_grad3)
grad5 = grad(red5)
assert np.all(grad5(x_np).asnumpy() == x_grad3)
def test_min():
x_np = np.array([[1, 2], [2, 1], [0, 0]])
x_grad1 = np.array([[0, 0], [0, 0], [1, 1]])
x_grad2 = np.array([[1, 0], [0, 1], [1, 1]])
x_grad3 = np.array([[0, 0], [0, 0], [1, 1]])
def red1(x):
return mp.min(x)
def red2(x):
return mp.min(x, axis=1)
def red3(x):
return mp.min(x, axis=1, keepdims=True)
def red4(x):
return mp.min(x, axis=0)
def red5(x):
return mp.min(x, axis=0, keepdims=True)
grad1 = grad(red1)
assert np.all(grad1(x_np).asnumpy() == x_grad1)
grad2 = grad(red2)
assert np.all(grad2(x_np).asnumpy() == x_grad2)
grad3 = grad(red3)
assert np.all(grad3(x_np).asnumpy() == x_grad2)
grad4 = grad(red4)
assert np.all(grad4(x_np).asnumpy() == x_grad3)
grad5 = grad(red5)
assert np.all(grad5(x_np).asnumpy() == x_grad3)
test_sum()
test_max()
test_min()
if __name__ == "__main__":
test_autograd()
test_zero_input_grad()
test_reduction()
| [
"numpy.abs",
"minpy.numpy.sum",
"numpy.random.randn",
"minpy.numpy.max",
"time.time",
"minpy.core.numpy_to_minpy",
"numpy.array",
"minpy.core.grad",
"minpy.numpy.min"
] | [((1681, 1702), 'numpy.random.randn', 'np.random.randn', (['N', 'D'], {}), '(N, D)\n', (1696, 1702), True, 'import numpy as np\n'), ((1711, 1732), 'numpy.random.randn', 'np.random.randn', (['N', 'H'], {}), '(N, H)\n', (1726, 1732), True, 'import numpy as np\n'), ((1742, 1763), 'numpy.random.randn', 'np.random.randn', (['D', 'H'], {}), '(D, H)\n', (1757, 1763), True, 'import numpy as np\n'), ((1773, 1794), 'numpy.random.randn', 'np.random.randn', (['H', 'H'], {}), '(H, H)\n', (1788, 1794), True, 'import numpy as np\n'), ((1803, 1821), 'numpy.random.randn', 'np.random.randn', (['H'], {}), '(H)\n', (1818, 1821), True, 'import numpy as np\n'), ((1887, 1914), 'numpy.random.randn', 'np.random.randn', (['*out.shape'], {}), '(*out.shape)\n', (1902, 1914), True, 'import numpy as np\n'), ((1949, 1960), 'time.time', 'time.time', ([], {}), '()\n', (1958, 1960), False, 'import time\n'), ((2238, 2249), 'time.time', 'time.time', ([], {}), '()\n', (2247, 2249), False, 'import time\n'), ((2336, 2347), 'time.time', 'time.time', ([], {}), '()\n', (2345, 2347), False, 'import time\n'), ((2528, 2539), 'time.time', 'time.time', ([], {}), '()\n', (2537, 2539), False, 'import time\n'), ((2999, 3009), 'minpy.core.grad', 'grad', (['foo1'], {}), '(foo1)\n', (3003, 3009), False, 'from minpy.core import convert_args, return_numpy, grad_and_loss, grad, minpy_to_numpy as mn, numpy_to_minpy as nm\n'), ((3094, 3128), 'numpy.array', 'np.array', (['[[1, 2], [3, 4], [5, 6]]'], {}), '([[1, 2], [3, 4], [5, 6]])\n', (3102, 3128), True, 'import numpy as np\n'), ((3146, 3180), 'numpy.array', 'np.array', (['[[1, 1], [1, 1], [1, 1]]'], {}), '([[1, 1], [1, 1], [1, 1]])\n', (3154, 3180), True, 'import numpy as np\n'), ((3378, 3388), 'minpy.core.grad', 'grad', (['red1'], {}), '(red1)\n', (3382, 3388), False, 'from minpy.core import convert_args, return_numpy, grad_and_loss, grad, minpy_to_numpy as mn, numpy_to_minpy as nm\n'), ((3460, 3470), 'minpy.core.grad', 'grad', (['red2'], {}), '(red2)\n', (3464, 3470), False, 'from minpy.core import convert_args, return_numpy, grad_and_loss, grad, minpy_to_numpy as mn, numpy_to_minpy as nm\n'), ((3542, 3552), 'minpy.core.grad', 'grad', (['red3'], {}), '(red3)\n', (3546, 3552), False, 'from minpy.core import convert_args, return_numpy, grad_and_loss, grad, minpy_to_numpy as mn, numpy_to_minpy as nm\n'), ((3644, 3678), 'numpy.array', 'np.array', (['[[1, 2], [2, 1], [0, 0]]'], {}), '([[1, 2], [2, 1], [0, 0]])\n', (3652, 3678), True, 'import numpy as np\n'), ((3697, 3731), 'numpy.array', 'np.array', (['[[0, 1], [1, 0], [0, 0]]'], {}), '([[0, 1], [1, 0], [0, 0]])\n', (3705, 3731), True, 'import numpy as np\n'), ((3750, 3784), 'numpy.array', 'np.array', (['[[0, 1], [1, 0], [1, 1]]'], {}), '([[0, 1], [1, 0], [1, 1]])\n', (3758, 3784), True, 'import numpy as np\n'), ((3803, 3837), 'numpy.array', 'np.array', (['[[0, 1], [1, 0], [0, 0]]'], {}), '([[0, 1], [1, 0], [0, 0]])\n', (3811, 3837), True, 'import numpy as np\n'), ((4166, 4176), 'minpy.core.grad', 'grad', (['red1'], {}), '(red1)\n', (4170, 4176), False, 'from minpy.core import convert_args, return_numpy, grad_and_loss, grad, minpy_to_numpy as mn, numpy_to_minpy as nm\n'), ((4249, 4259), 'minpy.core.grad', 'grad', (['red2'], {}), '(red2)\n', (4253, 4259), False, 'from minpy.core import convert_args, return_numpy, grad_and_loss, grad, minpy_to_numpy as mn, numpy_to_minpy as nm\n'), ((4332, 4342), 'minpy.core.grad', 'grad', (['red3'], {}), '(red3)\n', (4336, 4342), False, 'from minpy.core import convert_args, return_numpy, grad_and_loss, grad, minpy_to_numpy as mn, numpy_to_minpy as nm\n'), ((4415, 4425), 'minpy.core.grad', 'grad', (['red4'], {}), '(red4)\n', (4419, 4425), False, 'from minpy.core import convert_args, return_numpy, grad_and_loss, grad, minpy_to_numpy as mn, numpy_to_minpy as nm\n'), ((4498, 4508), 'minpy.core.grad', 'grad', (['red5'], {}), '(red5)\n', (4502, 4508), False, 'from minpy.core import convert_args, return_numpy, grad_and_loss, grad, minpy_to_numpy as mn, numpy_to_minpy as nm\n'), ((4601, 4635), 'numpy.array', 'np.array', (['[[1, 2], [2, 1], [0, 0]]'], {}), '([[1, 2], [2, 1], [0, 0]])\n', (4609, 4635), True, 'import numpy as np\n'), ((4654, 4688), 'numpy.array', 'np.array', (['[[0, 0], [0, 0], [1, 1]]'], {}), '([[0, 0], [0, 0], [1, 1]])\n', (4662, 4688), True, 'import numpy as np\n'), ((4707, 4741), 'numpy.array', 'np.array', (['[[1, 0], [0, 1], [1, 1]]'], {}), '([[1, 0], [0, 1], [1, 1]])\n', (4715, 4741), True, 'import numpy as np\n'), ((4760, 4794), 'numpy.array', 'np.array', (['[[0, 0], [0, 0], [1, 1]]'], {}), '([[0, 0], [0, 0], [1, 1]])\n', (4768, 4794), True, 'import numpy as np\n'), ((5123, 5133), 'minpy.core.grad', 'grad', (['red1'], {}), '(red1)\n', (5127, 5133), False, 'from minpy.core import convert_args, return_numpy, grad_and_loss, grad, minpy_to_numpy as mn, numpy_to_minpy as nm\n'), ((5206, 5216), 'minpy.core.grad', 'grad', (['red2'], {}), '(red2)\n', (5210, 5216), False, 'from minpy.core import convert_args, return_numpy, grad_and_loss, grad, minpy_to_numpy as mn, numpy_to_minpy as nm\n'), ((5289, 5299), 'minpy.core.grad', 'grad', (['red3'], {}), '(red3)\n', (5293, 5299), False, 'from minpy.core import convert_args, return_numpy, grad_and_loss, grad, minpy_to_numpy as mn, numpy_to_minpy as nm\n'), ((5372, 5382), 'minpy.core.grad', 'grad', (['red4'], {}), '(red4)\n', (5376, 5382), False, 'from minpy.core import convert_args, return_numpy, grad_and_loss, grad, minpy_to_numpy as mn, numpy_to_minpy as nm\n'), ((5455, 5465), 'minpy.core.grad', 'grad', (['red5'], {}), '(red5)\n', (5459, 5465), False, 'from minpy.core import convert_args, return_numpy, grad_and_loss, grad, minpy_to_numpy as mn, numpy_to_minpy as nm\n'), ((2064, 2075), 'minpy.core.numpy_to_minpy', 'nm', (['dnext_h'], {}), '(dnext_h)\n', (2066, 2075), True, 'from minpy.core import convert_args, return_numpy, grad_and_loss, grad, minpy_to_numpy as mn, numpy_to_minpy as nm\n'), ((3221, 3230), 'minpy.numpy.sum', 'mp.sum', (['x'], {}), '(x)\n', (3227, 3230), True, 'import minpy.numpy as mp\n'), ((3271, 3288), 'minpy.numpy.sum', 'mp.sum', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (3277, 3288), True, 'import minpy.numpy as mp\n'), ((3329, 3361), 'minpy.numpy.sum', 'mp.sum', (['x'], {'axis': '(0)', 'keepdims': '(True)'}), '(x, axis=0, keepdims=True)\n', (3335, 3361), True, 'import minpy.numpy as mp\n'), ((3878, 3887), 'minpy.numpy.max', 'mp.max', (['x'], {}), '(x)\n', (3884, 3887), True, 'import minpy.numpy as mp\n'), ((3928, 3945), 'minpy.numpy.max', 'mp.max', (['x'], {'axis': '(1)'}), '(x, axis=1)\n', (3934, 3945), True, 'import minpy.numpy as mp\n'), ((3986, 4018), 'minpy.numpy.max', 'mp.max', (['x'], {'axis': '(1)', 'keepdims': '(True)'}), '(x, axis=1, keepdims=True)\n', (3992, 4018), True, 'import minpy.numpy as mp\n'), ((4059, 4076), 'minpy.numpy.max', 'mp.max', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (4065, 4076), True, 'import minpy.numpy as mp\n'), ((4117, 4149), 'minpy.numpy.max', 'mp.max', (['x'], {'axis': '(0)', 'keepdims': '(True)'}), '(x, axis=0, keepdims=True)\n', (4123, 4149), True, 'import minpy.numpy as mp\n'), ((4835, 4844), 'minpy.numpy.min', 'mp.min', (['x'], {}), '(x)\n', (4841, 4844), True, 'import minpy.numpy as mp\n'), ((4885, 4902), 'minpy.numpy.min', 'mp.min', (['x'], {'axis': '(1)'}), '(x, axis=1)\n', (4891, 4902), True, 'import minpy.numpy as mp\n'), ((4943, 4975), 'minpy.numpy.min', 'mp.min', (['x'], {'axis': '(1)', 'keepdims': '(True)'}), '(x, axis=1, keepdims=True)\n', (4949, 4975), True, 'import minpy.numpy as mp\n'), ((5016, 5033), 'minpy.numpy.min', 'mp.min', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (5022, 5033), True, 'import minpy.numpy as mp\n'), ((5074, 5106), 'minpy.numpy.min', 'mp.min', (['x'], {'axis': '(0)', 'keepdims': '(True)'}), '(x, axis=0, keepdims=True)\n', (5080, 5106), True, 'import minpy.numpy as mp\n'), ((557, 570), 'numpy.abs', 'np.abs', (['(x - y)'], {}), '(x - y)\n', (563, 570), True, 'import numpy as np\n'), ((591, 600), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (597, 600), True, 'import numpy as np\n'), ((603, 612), 'numpy.abs', 'np.abs', (['y'], {}), '(y)\n', (609, 612), True, 'import numpy as np\n')] |
from point import Point
__author__ = 'pzqa'
l1 = list(map(lambda i: Point(i, i*i), range(-5, 6)))
l2 = list(filter(lambda el: el.x % 2 == 0, l1))
print(l1)
print(l2)
| [
"point.Point"
] | [((70, 85), 'point.Point', 'Point', (['i', '(i * i)'], {}), '(i, i * i)\n', (75, 85), False, 'from point import Point\n')] |
from abc import ABC, abstractmethod
import os
import spacy_udpipe
from .utils import load_pickled_file
from .settings import PATH_TO_RUS_UDPIPE_MODEL
import spacy
def processTag(tag_representation):
res = {}
if len(tag_representation.split('|')) > 0:
for one_subtag in tag_representation.split('|'):
if len(one_subtag.split('=')) > 1:
key = one_subtag.split('=')[0]
value = one_subtag.split('=')[1]
res[key] = value
return res
class SyntaxVectorizer(ABC):
def setup_rules(self):
pass
def text_structures_initializer(self):
pass
def calculate_morpho_tags(self, current_token):
pass
def normalize_morpho_tags(self):
pass
if __name__ == "__main__":
# Just checking
# Please, pay attention, that this class imported
t = load_pickled_file(os.path.join('ProcessedData', 'Андреев_Ангелочек.pkl'))
print('Pickle loaded')
current_nlp_module = spacy_udpipe.load_from_path('ru-syntagrus', PATH_TO_RUS_UDPIPE_MODEL)
print('Model loaded')
hj = SyntaxVectorizerRU(current_nlp_module)
hj.convert_to_attributes(t['Trees'])
resAttribs = hj.get_res_attributes()
print('Thats all') | [
"os.path.join",
"spacy_udpipe.load_from_path"
] | [((991, 1060), 'spacy_udpipe.load_from_path', 'spacy_udpipe.load_from_path', (['"""ru-syntagrus"""', 'PATH_TO_RUS_UDPIPE_MODEL'], {}), "('ru-syntagrus', PATH_TO_RUS_UDPIPE_MODEL)\n", (1018, 1060), False, 'import spacy_udpipe\n'), ((883, 937), 'os.path.join', 'os.path.join', (['"""ProcessedData"""', '"""Андреев_Ангелочек.pkl"""'], {}), "('ProcessedData', 'Андреев_Ангелочек.pkl')\n", (895, 937), False, 'import os\n')] |
import numpy as np
from krikos.nn.layer import BatchNorm, BatchNorm2d, Dropout
class Network(object):
def __init__(self):
super(Network, self).__init__()
self.diff = (BatchNorm, BatchNorm2d, Dropout)
def train(self, input, target):
raise NotImplementedError
def eval(self, input):
raise NotImplementedError
class Sequential(Network):
def __init__(self, layers, loss, lr, regularization=None):
super(Sequential, self).__init__()
self.layers = layers
self.loss = loss
self.lr = lr
self.regularization = regularization
def train(self, input, target):
layers = self.layers
loss = self.loss
regularization = self.regularization
l = 0
for layer in layers:
if isinstance(layer, self.diff):
layer.mode = "train"
input = layer.forward(input)
if regularization is not None:
for _, param in layer.params.items():
l += regularization.forward(param)
l += loss.forward(input, target)
dout = loss.backward()
for layer in reversed(layers):
dout = layer.backward(dout)
for param, grad in layer.grads.items():
if regularization is not None:
grad += regularization.backward(layer.params[param])
layer.params[param] -= self.lr * grad
return np.argmax(input, axis=1), l
def eval(self, input):
layers = self.layers
for layer in layers:
if isinstance(layer, self.diff):
layer.mode = "test"
input = layer.forward(input)
return np.argmax(input, axis=1) | [
"numpy.argmax"
] | [((1717, 1741), 'numpy.argmax', 'np.argmax', (['input'], {'axis': '(1)'}), '(input, axis=1)\n', (1726, 1741), True, 'import numpy as np\n'), ((1463, 1487), 'numpy.argmax', 'np.argmax', (['input'], {'axis': '(1)'}), '(input, axis=1)\n', (1472, 1487), True, 'import numpy as np\n')] |
import getpass
from pymongo import MongoClient
def main():
hostname = input("MongoDB Hostname (Default: localhost): ")
if not hostname:
hostname = "localhost"
port = input("MongoDB Port (Default: 27017): ")
if not port:
port = "27017"
username = input("MongoDB Username: ")
password = getpass.getpass("MongoDB Password: ")
database_name = input("MongoDB Database Name: ")
url = f"mongodb://{username}:{password}@{hostname}:{port}"
client = MongoClient(url)
db = client[database_name]
option = input("1: Create Indexes\n"
"2: Drop TTL Indexes\n"
"3: Drop Common Indexes\n"
"4: Drop Database\n"
"Option: ")
if option == "1":
db['download_cache'].create_index([("illust_id", 1)], unique=True)
db['illust_detail_cache'].create_index([("illust.id", 1)], unique=True)
db['illust_ranking_cache'].create_index([("mode", 1)], unique=True)
db['search_illust_cache'].create_index([("word", 1)], unique=True)
db['search_user_cache'].create_index([("word", 1)], unique=True)
db['user_illusts_cache'].create_index([("user_id", 1)], unique=True)
db['other_cache'].create_index([("type", 1)], unique=True)
create_ttl_indexes = input("Create TTL Indexes? [y/N] ")
if create_ttl_indexes == 'y' or create_ttl_indexes == 'Y':
download_cache_expires_in = int(input("Download cache expires in (sec): "))
db['download_cache'].create_index([("update_time", 1)], expireAfterSeconds=download_cache_expires_in)
illust_detail_cache_expires_in = int(input("Illust detail cache expires in (sec): "))
db['illust_detail_cache'].create_index([("update_time", 1)],
expireAfterSeconds=illust_detail_cache_expires_in)
illust_ranking_cache_expires_in = int(input("Illust ranking cache expires in (sec): "))
db['illust_ranking_cache'].create_index([("update_time", 1)],
expireAfterSeconds=illust_ranking_cache_expires_in)
search_illust_cache_expires_in = int(input("Search illust cache expires in (sec): "))
db['search_illust_cache'].create_index([("update_time", 1)],
expireAfterSeconds=search_illust_cache_expires_in)
search_user_cache_expires_in = int(input("Search user cache expires in (sec): "))
db['search_user_cache'].create_index([("update_time", 1)], expireAfterSeconds=search_user_cache_expires_in)
user_illusts_cache_expires_in = int(input("User illusts cache expires in (sec): "))
db['user_illusts_cache'].create_index([("update_time", 1)],
expireAfterSeconds=user_illusts_cache_expires_in)
other_cache_expires_in = int(input("User bookmarks and recommended illusts cache expire in (sec): "))
db['other_cache'].create_index([("update_time", 1)], expireAfterSeconds=other_cache_expires_in)
elif option == "2":
db['download_cache'].drop_index([("update_time", 1)])
db['illust_detail_cache'].drop_index([("update_time", 1)])
db['illust_ranking_cache'].drop_index([("update_time", 1)])
db['search_illust_cache'].drop_index([("update_time", 1)])
db['search_user_cache'].drop_index([("update_time", 1)])
db['user_illusts_cache'].drop_index([("update_time", 1)])
db['other_cache'].drop_index([("update_time", 1)])
elif option == "3":
db['download_cache'].drop_index([("illust_id", 1)])
db['illust_detail_cache'].drop_index([("illust_id", 1)])
db['illust_ranking_cache'].drop_index([("mode", 1)])
db['search_illust_cache'].drop_index([("word", 1)])
db['search_user_cache'].drop_index([("word", 1)])
db['user_illusts_cache'].drop_index([("user_id", 1)])
db['other_cache'].drop_index([("type", 1)])
elif option == "4":
comfirm = input("Sure? [y/N]")
if comfirm == 'y' or comfirm == 'Y':
client.drop_database(database_name)
else:
print("Invalid Option.")
if __name__ == '__main__':
main()
| [
"pymongo.MongoClient",
"getpass.getpass"
] | [((330, 367), 'getpass.getpass', 'getpass.getpass', (['"""MongoDB Password: """'], {}), "('MongoDB Password: ')\n", (345, 367), False, 'import getpass\n'), ((498, 514), 'pymongo.MongoClient', 'MongoClient', (['url'], {}), '(url)\n', (509, 514), False, 'from pymongo import MongoClient\n')] |
import logging
import json
import time
from random import random, randint
import asyncio
import aiohttp
import aiosqlite
import aiofiles
import regex
from aiohttp.client_exceptions import ClientConnectorError
from piggy import utils
# Logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
fh = logging.FileHandler("./piggy.log")
ch.setLevel(logging.INFO)
fh.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(message)s")
ch.setFormatter(formatter)
formatter = logging.Formatter(
"[%(asctime)s] %(levelname)s %(funcName)s: %(message)s"
)
fh.setFormatter(formatter)
logger.addHandler(ch)
logger.addHandler(fh)
class Piggy:
def __init__(self, loop):
self.loop = loop
async def http_request(
self, method, url,
headers=None, params=None, data=None, response_type="text"
):
await asyncio.sleep(self.settings['connection']["wait_time"])
try:
if method == "GET":
r = await self.session.get(
url,
headers=headers,
params=params
)
logger.debug(f"[GET] {r.url}")
elif method == "POST":
r = await self.session.post(
url,
headers=headers,
data=data
)
logger.debug(f"[POST] {r.url}")
else:
raise ValueError(f"Invalid HTTP method: {method}")
except ClientConnectorError:
logger.error("Could not reach the server. Retrying in 30 seconds.")
await asyncio.sleep(30)
return await self.http_request(
method,
url,
headers=headers,
params=params,
data=data,
response_type=response_type
)
else:
logger.debug(f"Status code: {r.status} {r.reason}")
if r.status == 200:
# Successfull request: decrease retry time
if self.settings['connection']["wait_time"] > 0:
self.settings['connection']["wait_time"] -= 1
if response_type == "text":
res = await r.text()
logger.debug(res)
return res
elif response_type == "json":
res = await r.json()
logger.debug(res)
return res
else:
raise ValueError(f"Invalid response type: {response_type}")
elif r.status == 429:
# Unsuccessfull request: increase retry time
self.settings['connection']["wait_time"] += 1
logger.warning(
f"""Too many requests! Retrying in {self.settings['connection']['wait_time']} seconds."""
)
return await self.http_request(
method,
url,
headers=headers,
params=params,
data=data,
response_type=response_type
)
else:
logger.error(f"Response status: {r.status}")
logger.error(f"Response headers: {r.headers}")
logger.error(await r.text())
raise ValueError(f"Response error: {r.status}")
async def setup(self, settings_path="settings.json"):
logger.info("Loading settings...")
# Load settings
with open(settings_path) as f:
self.settings = json.loads(
regex.sub(r"#.+$", "", f.read(), flags=regex.MULTILINE)
)
# Load comments list for photos
with open("comments/pic_comments.txt") as f:
comments = f.readlines()
self.pic_comments_list = [x.strip() for x in comments]
# Load comments list for videos
with open("comments/video_comments.txt") as f:
comments = f.readlines()
self.video_comments_list = [x.strip() for x in comments]
# Initialize the asynchronous http session
headers = {
"DNT": "1",
"Host": "www.instagram.com",
"Upgrade-Insecure-Requests": "1",
"User-Agent": self.settings["connection"]["user_agent"]
}
timeout = aiohttp.ClientTimeout(
total=self.settings["connection"]["timeout"]
)
self.session = aiohttp.ClientSession(headers=headers, timeout=timeout)
logger.info("Session initialized.")
# Get the csrf token. It is needed to log in
self.csrf_token = await self._getCsrfTokenFromForm()
async def _getCsrfTokenFromForm(self):
# Get login page and find the csrf token
res = await self.http_request(
"GET",
"https://www.instagram.com/accounts/login/"
)
return regex.findall(
r"\"csrf_token\":\"(.*?)\"",
res,
flags=regex.MULTILINE
)[0]
async def login(self):
payload = {
"username": self.settings["user"]["username"],
"password": self.settings["user"]["password"]
}
headers = {
"User-Agent": self.settings["connection"]["user_agent"],
"X-CSRFToken": self.csrf_token
}
res = await self.http_request(
"POST",
"https://www.instagram.com/accounts/login/ajax/",
headers=headers,
data=payload,
response_type="json"
)
if res["authenticated"]:
logger.info("Logged in!")
self.id = res["userId"]
elif res["message"] == "checkpoint_required":
logger.info("Checkpoint required.")
res = await self.http_request(
"POST",
f"https://www.instagram.com{res['checkpoint_url']}",
headers=headers,
data=payload
)
logger.error(res)
else:
logger.error("Couldn't log in.")
cookies = utils.cookies_dict(self.session.cookie_jar)
self.csrf_token = cookies["csrftoken"]
# Initialize the database
await self._init_database()
async def _init_database(self):
logger.info("Checking database...")
# Connect to the local database and look for the table names
async with aiosqlite.connect("./piggy.db") as db:
logger.debug("Checking table: pics")
await db.execute(
"""
CREATE TABLE IF NOT EXISTS pics (
id INT,
height INT,
width INT,
url TEXT,
tags TEXT
)
"""
)
logger.debug("Checking table: users")
await db.execute(
"""
CREATE TABLE IF NOT EXISTS users (
id TEXT,
username TEXT,
ts_follower INTEGER,
ts_following INTEGER,
follower BOOL,
following BOOL
)
"""
)
logger.debug("Checking table: likes")
await db.execute(
"""
CREATE TABLE IF NOT EXISTS likes (
id INTEGER,
ts INTEGER
)
"""
)
logger.debug("Checking table: comments")
await db.execute(
"""
CREATE TABLE IF NOT EXISTS comments (
id INTEGER,
ts INTEGER,
comment TEXT
)
"""
)
logger.info("Updating followers and following lists.")
await db.execute("UPDATE users SET follower=0, following=1")
for username in await self.followers():
await db.execute(
"UPDATE users SET follower=0 WHERE username=?",
(username,)
)
for username in await self.following():
await db.execute(
"UPDATE users SET following=1 WHERE username=?",
(username,)
)
await db.commit()
async def followers(self, username=None):
followers = []
if username is None:
id = self.id
else:
user = await self.get_user_by_username(username)
id = user["graphql"]["user"]["id"]
params = {
"query_hash": "37479f2b8209594dde7facb0d904896a",
"variables": json.dumps({"id": str(id), "first": 50})
}
has_next_page = True
while has_next_page:
res = await self.http_request(
"GET",
"https://www.instagram.com/graphql/query/",
params=params,
response_type="json"
)
has_next_page = res["data"]["user"]["edge_followed_by"]["page_info"]["has_next_page"]
end_cursor = res["data"]["user"]["edge_followed_by"]["page_info"]["end_cursor"]
params["variables"] = json.dumps(
{"id": str(id), "first": 50, "after": end_cursor}
)
for user in res["data"]["user"]["edge_followed_by"]["edges"]:
followers.append(user["node"]["username"])
return followers
async def following(self, username=None):
following = []
if username is None:
id = self.id
else:
user = await self.get_user_by_username(username)
id = user["graphql"]["user"]["id"]
params = {
"query_hash": "58712303d941c6855d4e888c5f0cd22f",
"variables": json.dumps({"id": str(id), "first": 50})
}
has_next_page = True
while has_next_page:
res = await self.http_request(
"GET",
"https://www.instagram.com/graphql/query/",
params=params,
response_type="json"
)
has_next_page = res["data"]["user"]["edge_follow"]["page_info"]["has_next_page"]
end_cursor = res["data"]["user"]["edge_follow"]["page_info"]["end_cursor"]
params["variables"] = json.dumps(
{"id": str(id), "first": 50, "after": end_cursor}
)
for user in res["data"]["user"]["edge_follow"]["edges"]:
following.append(user["node"]["username"])
return following
async def feed(self, explore=True, users=[], hashtags=[], locations=[]):
"""
Generates a feed based on the passed parameters. Multiple parameters
can be passed at the same time.
Args:
explore: [Bool] If True the explore page will be added to to the
feed.
users: [List of usernames] Their media will be pulled and added to
the feed.
hashtags: [List of hastags] Media with those hashtags will be added
to the feed.
locations: [List of locations ids] Media with those locations will
be added to the feed.
Retruns:
Yields a media from the generated feed.
"""
# Initialize asynchronous queue where the feed elements will be
# temporarely stored
q = asyncio.Queue()
if explore:
# Add the "explore" feed to the queue
asyncio.ensure_future(self._explore_feed(q))
if len(users):
# Add all the media from the given users to the queue
for user in users:
asyncio.ensure_future(self._user_feed(q, user))
if len(hashtags):
# Add all the media from the given hashtags to the queue
for hashtag in hashtags:
asyncio.ensure_future(self._hashtag_feed(q, hashtag))
if len(locations):
# Add all the media from the given locations to the queue
for location in locations:
asyncio.ensure_future(self._location_feed(q, location))
# Keep on yielding media while more is loaded
while 1:
while not q.empty():
yield await q.get()
await asyncio.sleep(1e-12)
async def _explore_feed(self, q):
params = {
"query_hash": "ecd67af449fb6edab7c69a205413bfa7",
"variables": json.dumps({"first": 24})
}
has_next_page = True
while has_next_page:
res = await self.http_request(
"GET",
"https://www.instagram.com/graphql/query/",
params=params,
response_type="json"
)
has_next_page = res["data"]["user"]["edge_web_discover_media"]["page_info"]["has_next_page"]
end_cursor = res["data"]["user"]["edge_web_discover_media"]["page_info"]["end_cursor"]
params["variables"] = json.dumps(
{"first": 50, "after": end_cursor}
)
for media in res["data"]["user"]["edge_web_discover_media"]["edges"]:
await q.put(media["node"])
async def _user_feed(self, q, user):
user = await self.get_user_by_usernameUsername(user)
id = user["id"]
params = {
"query_hash": "a5164aed103f24b03e7b7747a2d94e3c",
"variables": json.dumps({"id": id, "first": 24})
}
has_next_page = True
while has_next_page:
res = await self.http_request(
"GET",
"https://www.instagram.com/graphql/query/",
params=params,
response_type="json"
)
has_next_page = res["data"]["user"]["edge_owner_to_timeline_media"]["page_info"]["has_next_page"]
end_cursor = res["data"]["user"]["edge_owner_to_timeline_media"]["page_info"]["end_cursor"]
params["variables"] = json.dumps(
{"id": id, "first": 50, "after": end_cursor}
)
for media in res["data"]["user"]["edge_web_discover_media"]["edges"]:
await q.put(media["node"])
async def _hashtag_feed(self, q, hashtag):
count = 0
params = {
"query_hash": "1780c1b186e2c37de9f7da95ce41bb67",
"variables": json.dumps({"tag_name": hashtag, "first": count})
}
has_next_page = True
while has_next_page:
res = await self.http_request(
"GET",
"https://www.instagram.com/graphql/query/",
params=params,
response_type="json"
)
has_next_page = res["data"]["hashtag"]["edge_hashtag_to_media"]["page_info"]["has_next_page"]
end_cursor = res["data"]["hashtag"]["edge_hashtag_to_media"]["page_info"]["end_cursor"]
count += 1
params["variables"] = json.dumps(
{"tag_name": hashtag, "first": count, "after": end_cursor}
)
for media in res["data"]["hashtag"]["edge_hashtag_to_media"]["edges"]:
await q.put(media["node"])
async def _location_feed(self, q, location_id):
count = 0
params = {
"query_hash": "1b84447a4d8b6d6d0426fefb34514485",
"variables": json.dumps({"id": str(location_id), "first": 50})
}
has_next_page = True
while has_next_page:
res = await self.http_request(
"GET",
"https://www.instagram.com/graphql/query/",
params=params,
response_type="json"
)
has_next_page = res["data"]["location"]["edge_location_to_media"]["page_info"]["has_next_page"]
end_cursor = res["data"]["location"]["edge_location_to_media"]["page_info"]["end_cursor"]
count += 1
params["variables"] = json.dumps(
{
"id": str(location_id),
"first": 50,
"after": str(end_cursor)
}
)
for media in res["data"]["location"]["edge_location_to_media"]["edges"]:
await q.put(media["node"])
async def print(self, media):
"""
Gives a visual representation of a media.
Args:
media: The media to be printed.
Returns:
None
"""
logger.info("#--------"*3+"#")
try:
mediatype = media["__typename"]
except KeyError:
is_video = media["is_video"]
if is_video:
mediatype = "GraphVideo"
else:
mediatype = "GraphImage"
pass
likes = media["edge_liked_by"]["count"]
comments = media["edge_media_to_comment"]["count"]
shortcode = media["shortcode"]
res = await self.http_request(
"GET",
f"https://www.instagram.com/p/{shortcode}/",
params="__a=1",
response_type="json"
)
username = res["graphql"]["shortcode_media"]["owner"]["username"]
logger.info(
f"{utils.translate_ig_media_type_to_custom(mediatype).capitalize()} by {username}\n❤️ {likes}, 💬 {comments}"
)
try:
caption = media["edge_media_to_caption"]["edges"][0]["node"]["text"]
except IndexError:
pass
else:
if len(caption) > 100:
logger.info(f"{caption:.100}...")
else:
logger.info(f"{caption}")
async def like(self, media):
"""
Check if the media satisfy the prerequisites and eventually it will
send a like.
Args:
media: The media to like.
Retruns:
None
"""
# Check if the media has already been liked
async with aiosqlite.connect("./piggy.db") as db:
row = await db.execute(
"SELECT * FROM likes WHERE id=?",
(media["id"],)
)
if await row.fetchone():
logger.info("Already liked!")
return
try:
mediatype = media["__typename"]
except KeyError:
is_video = media["is_video"]
if is_video:
mediatype = "GraphVideo"
else:
mediatype = "GraphImage"
pass
else:
if not mediatype in utils.translate_custom_media_type_to_ig(self.settings["like"]["media_type"]):
logger.info("Wrong media type. Not liked!")
return
likes = media["edge_liked_by"]["count"]
if likes < self.settings["like"]["num_of_likes"]["min"] or likes >= self.settings["like"]["num_of_likes"]["max"]:
logger.info("Too many or too few likes. Not liked!")
return
comments = media["edge_media_to_comment"]["count"]
if comments < self.settings["like"]["num_of_comments"]["min"] or comments >= self.settings["like"]["num_of_comments"]["max"]:
logger.info("Too many or too few comments. Not liked!")
return
if self.settings["like"]["rate"] / 100 > random():
await self._like(media["id"])
else:
logger.info("Not liked!")
async def _like(self, id):
headers = {
"DNT": "1",
"Host": "www.instagram.com",
"User-Agent": self.settings["connection"]["user_agent"],
"X-CSRFToken": self.csrf_token
}
await self.http_request(
"POST",
f"https://www.instagram.com/web/likes/{id}/like/",
headers=headers
)
async with aiosqlite.connect("./piggy.db") as db:
await db.execute(
"INSERT INTO likes VALUES(?,?)",
(id, int(time.time()))
)
await db.commit()
logger.info("Liked!")
async def _unlike(self, id):
headers = {
"DNT": "1",
"Host": "www.instagram.com",
"User-Agent": self.settings["connection"]["user_agent"],
"X-CSRFToken": self.csrf_token
}
await self.http_request(
"POST",
f"https://www.instagram.com/web/likes/{id}/unlike/",
headers=headers
)
async with aiosqlite.connect("./piggy.db") as db:
await db.execute("INSERT INTO likes WHERE id=?", (id,))
await db.commit()
logger.info("Unliked!")
async def comment(self, media):
"""
Check if the media satisfy the prerequisites and eventually it will
send a comment.
Args:
media: The media to comment.
Retruns:
None
"""
if media["comments_disabled"]:
logger.info("Comments disabled.")
return
if self.settings["comment"]["only_once"]:
async with aiosqlite.connect("./piggy.db") as db:
row = await db.execute(
"SELECT * FROM comments WHERE id=?",
(media["id"],)
)
if await row.fetchone() is None:
logger.info("Already commented.")
return
try:
mediatype = media["__typename"]
except KeyError:
is_video = media["is_video"]
if is_video:
mediatype = "GraphVideo"
else:
mediatype = "GraphImage"
pass
else:
if not mediatype in utils.translate_custom_media_type_to_ig(self.settings["comment"]["media_type"]):
return
likes = media["edge_liked_by"]["count"]
if likes < self.settings["comment"]["num_of_likes"]["min"] or likes >= self.settings["comment"]["num_of_likes"]["max"]:
return
comments = media["edge_media_to_comment"]["count"]
if comments < self.settings["comment"]["num_of_comments"]["min"] or comments >= self.settings["comment"]["num_of_comments"]["max"]:
return
if self.settings["comment"]["rate"] / 100 <= random():
if mediatype == "GraphImage" or mediatype == "GraphSidecar":
comment = self.pic_comments_list[
randint(0, len(self.pic_comments_list)-1)
]
else:
comment = self.video_comments_list[
randint(0, len(self.video_comments_list)-1)
]
await self._comment(media["id"], comment)
else:
logger.info("Not commented!")
async def _comment(self, id, comment, reply_to_id=None):
headers = {
"DNT": "1",
"Host": "www.instagram.com",
"User-Agent": self.settings["connection"]["user_agent"],
"X-CSRFToken": self.csrf_token
}
payload = {
"comment_text": comment
}
await self.http_request(
"POST",
f"https://www.instagram.com/web/comments/{id}/add/",
headers=headers,
data=payload
)
async with aiosqlite.connect("./piggy.db") as db:
await db.execute(
"INSERT INTO comments VALUES(?,?,?)",
(id, int(time.time()), comment)
)
await db.commit()
logger.info("Comment posted!")
async def follow(self, media):
"""
Check if the media satisfy the prerequisites and eventually send a
follow request.
Args:
media: The media of the user to be followed.
Retruns:
None
"""
if self.settings["follow"]["rate"] / 100 > random():
await self._follow(media["owner"]["id"])
else:
logger.info("Not followed!")
async def _follow(self, id):
headers = {
"DNT": "1",
"Host": "www.instagram.com",
"User-Agent": self.settings["connection"]["user_agent"],
"X-CSRFToken": self.csrf_token
}
await self.http_request(
"POST",
f"https://www.instagram.com/web/friendships/{id}/follow/",
headers=headers
)
async with aiosqlite.connect("./piggy.db") as db:
c = await db.execute("SELECT * FROM users WHERE id=?", (id,))
if c.rowcount:
await db.execute(
"""
UPDATE users SET
ts_following=?, following=?
WHERE id=?
""",
(int(time.time()), True, id)
)
else:
await db.execute(
"INSERT INTO users VALUES(?,?,?,?,?)",
(id, None, int(time.time()), False, True)
)
await db.commit()
logger.info("Follow request sent!")
async def unfollow(self, id):
return
async def _unfollow(self, id):
headers = {
"DNT": "1",
"Host": "www.instagram.com",
"User-Agent": self.settings["connection"]["user_agent"],
"X-CSRFToken": self.csrf_token
}
await self.http_request(
"POST",
f"https://www.instagram.com/web/friendships/{id}/unfollow/",
headers=headers
)
async with aiosqlite.connect("./piggy.db") as db:
await db.execute(
"UPDATE users SET following=false WHERE id=?",
(id,)
)
await db.commit()
async def backup(self):
while 1:
logger.info("Backing up database...")
for table_name in ["users", "likes", "comments"]:
if self.settings["backup"][table_name]:
async with aiosqlite.connect("./piggy.db") as db:
rows = await db.execute(
f"SELECT * FROM '{table_name}'"
)
header = [i[0] for i in rows.description]
rows = await rows.fetchall()
if self.settings["backup"]["format"] == "csv":
await utils.to_csv(table_name, header, rows)
elif self.settings["backup"]["format"] == "json":
await utils.to_json(table_name, header, rows)
else:
logger.warning(
f"""Unsupported file format: {self.settings['backup']['format']}."""
)
await asyncio.sleep(
utils.interval_in_seconds(self.settings["backup"]["every"])
)
async def close(self):
logger.info("\nClosing session...")
# Close the http session
await self.session.close()
async def get_user_by_username(self, username):
res = await self.http_request(
"GET",
f"https://www.instagram.com/{username}/",
params="__a:1"
)
return json.loads(
regex.findall(
r"<script[^>]*>window._sharedData = (.*?)</script>",
regex.findall(
r"<body[^>]*>(.*)</body>",
res,
flags=regex.DOTALL
)[0],
flags=regex.DOTALL
)[0][:-1])["entry_data"]["ProfilePage"][0]["graphql"]["user"]
# -----------------------------------------------------------------------------
async def download(self, media):
id = media["id"]
url = media["display_url"]
format = regex.findall(r".([a-zA-Z]+)$", url)[0]
if media["__typename"] != "GraphImage" or await self.pic_already_saved(id):
return
height = media["dimensions"]["height"]
width = media["dimensions"]["width"]
try:
caption = media["edge_media_to_caption"]["edges"][0]["node"]["text"]
except IndexError:
tags = []
pass
else:
if await self.download_pic(url, id, format):
logger.info(f"Caption: {caption}")
tags = regex.findall(r"#([\p{L}0-9_]+)", caption)
logger.info(f"Tags: {tags}")
else:
return
await self.save_to_database(id, type, height, width, url, tags)
async def download_pic(self, url, id, format):
logger.info(f"Downloading {id}")
async with aiohttp.ClientSession() as session:
try:
async with session.get(url) as r:
if r.status == 200:
f = await aiofiles.open(
f"./images/{id}.{format}",
mode="wb"
)
await f.write(await r.read())
await f.close()
return True
else:
return False
except TimeoutError:
return False
async def pic_already_saved(self, id):
logger.debug("Checking database.")
async with aiosqlite.connect("./piggy.db") as db:
row = await db.execute(
"SELECT * FROM pics WHERE id=?",
(id,)
)
if await row.fetchone() is None:
return False
else:
return True
async def save_to_database(self, id, type, height, width, url, tags):
tags = json.dumps(tags)
async with aiosqlite.connect("./piggy.db") as db:
await db.execute(
"INSERT INTO pics VALUES(?,?,?,?,?)",
(id, height, width, url, tags)
)
await db.commit()
| [
"piggy.utils.to_json",
"piggy.utils.interval_in_seconds",
"json.dumps",
"piggy.utils.translate_custom_media_type_to_ig",
"logging.Formatter",
"aiohttp.ClientSession",
"piggy.utils.to_csv",
"logging.FileHandler",
"piggy.utils.translate_ig_media_type_to_custom",
"aiosqlite.connect",
"asyncio.sleep",
"logging.StreamHandler",
"random.random",
"piggy.utils.cookies_dict",
"aiohttp.ClientTimeout",
"aiofiles.open",
"regex.findall",
"time.time",
"asyncio.Queue",
"logging.getLogger"
] | [((258, 285), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (275, 285), False, 'import logging\n'), ((323, 346), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (344, 346), False, 'import logging\n'), ((352, 386), 'logging.FileHandler', 'logging.FileHandler', (['"""./piggy.log"""'], {}), "('./piggy.log')\n", (371, 386), False, 'import logging\n'), ((453, 485), 'logging.Formatter', 'logging.Formatter', (['"""%(message)s"""'], {}), "('%(message)s')\n", (470, 485), False, 'import logging\n'), ((525, 599), 'logging.Formatter', 'logging.Formatter', (['"""[%(asctime)s] %(levelname)s %(funcName)s: %(message)s"""'], {}), "('[%(asctime)s] %(levelname)s %(funcName)s: %(message)s')\n", (542, 599), False, 'import logging\n'), ((4429, 4496), 'aiohttp.ClientTimeout', 'aiohttp.ClientTimeout', ([], {'total': "self.settings['connection']['timeout']"}), "(total=self.settings['connection']['timeout'])\n", (4450, 4496), False, 'import aiohttp\n'), ((4542, 4597), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {'headers': 'headers', 'timeout': 'timeout'}), '(headers=headers, timeout=timeout)\n', (4563, 4597), False, 'import aiohttp\n'), ((6179, 6222), 'piggy.utils.cookies_dict', 'utils.cookies_dict', (['self.session.cookie_jar'], {}), '(self.session.cookie_jar)\n', (6197, 6222), False, 'from piggy import utils\n'), ((11590, 11605), 'asyncio.Queue', 'asyncio.Queue', ([], {}), '()\n', (11603, 11605), False, 'import asyncio\n'), ((29914, 29930), 'json.dumps', 'json.dumps', (['tags'], {}), '(tags)\n', (29924, 29930), False, 'import json\n'), ((891, 946), 'asyncio.sleep', 'asyncio.sleep', (["self.settings['connection']['wait_time']"], {}), "(self.settings['connection']['wait_time'])\n", (904, 946), False, 'import asyncio\n'), ((4990, 5063), 'regex.findall', 'regex.findall', (['"""\\\\"csrf_token\\\\":\\\\"(.*?)\\\\\\""""', 'res'], {'flags': 'regex.MULTILINE'}), '(\'\\\\"csrf_token\\\\":\\\\"(.*?)\\\\"\', res, flags=regex.MULTILINE)\n', (5003, 5063), False, 'import regex\n'), ((6510, 6541), 'aiosqlite.connect', 'aiosqlite.connect', (['"""./piggy.db"""'], {}), "('./piggy.db')\n", (6527, 6541), False, 'import aiosqlite\n'), ((12653, 12678), 'json.dumps', 'json.dumps', (["{'first': 24}"], {}), "({'first': 24})\n", (12663, 12678), False, 'import json\n'), ((13194, 13240), 'json.dumps', 'json.dumps', (["{'first': 50, 'after': end_cursor}"], {}), "({'first': 50, 'after': end_cursor})\n", (13204, 13240), False, 'import json\n'), ((13631, 13666), 'json.dumps', 'json.dumps', (["{'id': id, 'first': 24}"], {}), "({'id': id, 'first': 24})\n", (13641, 13666), False, 'import json\n'), ((14192, 14248), 'json.dumps', 'json.dumps', (["{'id': id, 'first': 50, 'after': end_cursor}"], {}), "({'id': id, 'first': 50, 'after': end_cursor})\n", (14202, 14248), False, 'import json\n'), ((14577, 14626), 'json.dumps', 'json.dumps', (["{'tag_name': hashtag, 'first': count}"], {}), "({'tag_name': hashtag, 'first': count})\n", (14587, 14626), False, 'import json\n'), ((15167, 15237), 'json.dumps', 'json.dumps', (["{'tag_name': hashtag, 'first': count, 'after': end_cursor}"], {}), "({'tag_name': hashtag, 'first': count, 'after': end_cursor})\n", (15177, 15237), False, 'import json\n'), ((18162, 18193), 'aiosqlite.connect', 'aiosqlite.connect', (['"""./piggy.db"""'], {}), "('./piggy.db')\n", (18179, 18193), False, 'import aiosqlite\n'), ((19496, 19504), 'random.random', 'random', ([], {}), '()\n', (19502, 19504), False, 'from random import random, randint\n'), ((20013, 20044), 'aiosqlite.connect', 'aiosqlite.connect', (['"""./piggy.db"""'], {}), "('./piggy.db')\n", (20030, 20044), False, 'import aiosqlite\n'), ((20662, 20693), 'aiosqlite.connect', 'aiosqlite.connect', (['"""./piggy.db"""'], {}), "('./piggy.db')\n", (20679, 20693), False, 'import aiosqlite\n'), ((22466, 22474), 'random.random', 'random', ([], {}), '()\n', (22472, 22474), False, 'from random import random, randint\n'), ((23478, 23509), 'aiosqlite.connect', 'aiosqlite.connect', (['"""./piggy.db"""'], {}), "('./piggy.db')\n", (23495, 23509), False, 'import aiosqlite\n'), ((24051, 24059), 'random.random', 'random', ([], {}), '()\n', (24057, 24059), False, 'from random import random, randint\n'), ((24592, 24623), 'aiosqlite.connect', 'aiosqlite.connect', (['"""./piggy.db"""'], {}), "('./piggy.db')\n", (24609, 24623), False, 'import aiosqlite\n'), ((25742, 25773), 'aiosqlite.connect', 'aiosqlite.connect', (['"""./piggy.db"""'], {}), "('./piggy.db')\n", (25759, 25773), False, 'import aiosqlite\n'), ((28015, 28050), 'regex.findall', 'regex.findall', (['""".([a-zA-Z]+)$"""', 'url'], {}), "('.([a-zA-Z]+)$', url)\n", (28028, 28050), False, 'import regex\n'), ((28871, 28894), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (28892, 28894), False, 'import aiohttp\n'), ((29543, 29574), 'aiosqlite.connect', 'aiosqlite.connect', (['"""./piggy.db"""'], {}), "('./piggy.db')\n", (29560, 29574), False, 'import aiosqlite\n'), ((29950, 29981), 'aiosqlite.connect', 'aiosqlite.connect', (['"""./piggy.db"""'], {}), "('./piggy.db')\n", (29967, 29981), False, 'import aiosqlite\n'), ((12487, 12507), 'asyncio.sleep', 'asyncio.sleep', (['(1e-12)'], {}), '(1e-12)\n', (12500, 12507), False, 'import asyncio\n'), ((21263, 21294), 'aiosqlite.connect', 'aiosqlite.connect', (['"""./piggy.db"""'], {}), "('./piggy.db')\n", (21280, 21294), False, 'import aiosqlite\n'), ((28557, 28599), 'regex.findall', 'regex.findall', (['"""#([\\\\p{L}0-9_]+)"""', 'caption'], {}), "('#([\\\\p{L}0-9_]+)', caption)\n", (28570, 28599), False, 'import regex\n'), ((1657, 1674), 'asyncio.sleep', 'asyncio.sleep', (['(30)'], {}), '(30)\n', (1670, 1674), False, 'import asyncio\n'), ((18750, 18826), 'piggy.utils.translate_custom_media_type_to_ig', 'utils.translate_custom_media_type_to_ig', (["self.settings['like']['media_type']"], {}), "(self.settings['like']['media_type'])\n", (18789, 18826), False, 'from piggy import utils\n'), ((21894, 21973), 'piggy.utils.translate_custom_media_type_to_ig', 'utils.translate_custom_media_type_to_ig', (["self.settings['comment']['media_type']"], {}), "(self.settings['comment']['media_type'])\n", (21933, 21973), False, 'from piggy import utils\n'), ((26185, 26216), 'aiosqlite.connect', 'aiosqlite.connect', (['"""./piggy.db"""'], {}), "('./piggy.db')\n", (26202, 26216), False, 'import aiosqlite\n'), ((27002, 27061), 'piggy.utils.interval_in_seconds', 'utils.interval_in_seconds', (["self.settings['backup']['every']"], {}), "(self.settings['backup']['every'])\n", (27027, 27061), False, 'from piggy import utils\n'), ((17434, 17484), 'piggy.utils.translate_ig_media_type_to_custom', 'utils.translate_ig_media_type_to_custom', (['mediatype'], {}), '(mediatype)\n', (17473, 17484), False, 'from piggy import utils\n'), ((20156, 20167), 'time.time', 'time.time', ([], {}), '()\n', (20165, 20167), False, 'import time\n'), ((23626, 23637), 'time.time', 'time.time', ([], {}), '()\n', (23635, 23637), False, 'import time\n'), ((26576, 26614), 'piggy.utils.to_csv', 'utils.to_csv', (['table_name', 'header', 'rows'], {}), '(table_name, header, rows)\n', (26588, 26614), False, 'from piggy import utils\n'), ((29048, 29099), 'aiofiles.open', 'aiofiles.open', (['f"""./images/{id}.{format}"""'], {'mode': '"""wb"""'}), "(f'./images/{id}.{format}', mode='wb')\n", (29061, 29099), False, 'import aiofiles\n'), ((24956, 24967), 'time.time', 'time.time', ([], {}), '()\n', (24965, 24967), False, 'import time\n'), ((25144, 25155), 'time.time', 'time.time', ([], {}), '()\n', (25153, 25155), False, 'import time\n'), ((26715, 26754), 'piggy.utils.to_json', 'utils.to_json', (['table_name', 'header', 'rows'], {}), '(table_name, header, rows)\n', (26728, 26754), False, 'from piggy import utils\n'), ((27563, 27627), 'regex.findall', 'regex.findall', (['"""<body[^>]*>(.*)</body>"""', 'res'], {'flags': 'regex.DOTALL'}), "('<body[^>]*>(.*)</body>', res, flags=regex.DOTALL)\n", (27576, 27627), False, 'import regex\n')] |
import os
import random
import tempfile
import webbrowser
import time
import uuid
import socket
import shutil
import subprocess
import pathlib
from bs4 import BeautifulSoup
from yaplee.errors import UnknownTemplateValue
from yaplee.js.converter import JSFunc
class Server:
def __init__(self, meta) -> None:
self.port = meta['config']['port']
self.templates = meta['templates']
self.tree = meta['tree']
self.opentab = meta['config']['opentab']
self.tempuuid = ''
self.module_path = str(pathlib.Path(__file__).resolve().parent)
self.temp_uuid, self.temp_path = self.__gen_yaplee_temp()
def is_port_open(self):
a_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
local_connection = ('127.0.0.1', self.port)
port_open = a_socket.connect_ex(local_connection)
a_socket.close()
return not (not port_open)
def __gen_yaplee_temp(self):
self.tempuuid = uuid.uuid1().hex[:15]
path = os.path.join(tempfile.gettempdir(), self.tempuuid)
if not os.path.isdir(path):
os.mkdir(path)
return self.tempuuid, path
def generate_files(self):
generated_files = []
js_functions = {}
for template, meta in self.templates.items():
template = template.split('-_-')[0]
to_copy_path = meta['load_name'] if meta['load_name'] else template
to_copy_path = to_copy_path.split(os.sep)[-1]
template_to_copy = os.path.join(self.temp_path, to_copy_path.replace('\\', os.sep))
shutil.copy(
template,
template_to_copy
)
tag_loc, tags = '', {}
if 'tags' in meta['meta']:
tag_loc, tags = meta['meta']['tags']()
for tag_meta, tag in tags.items():
tag_source = ''
is_tag_has_source = False
tag_name = str(tag_meta.split('-_-')[0])
if tag_name in ['link']:
if 'href' in str(tag):
tag_source = tag.get('href')
is_tag_has_source = True
else:
try:
if 'src' in str(tag):
tag_source = tag.get('src')
is_tag_has_source = True
except:
continue
if is_tag_has_source and ('://' not in tag_source and tag_source):
shutil.copy(
tag_source,
os.path.join(self.temp_path, tag_source)
)
if 'tagvalue' in tag.attrs:
tagvalue = tag.get('tagvalue')
del tag.attrs['tagvalue']
tag.append(tagvalue)
elif 'functions' in meta['meta']:
js_functions = {i.__name__:i for i in meta['meta']['functions']}
elif 'style' in meta['meta']:
if type(meta['meta']['style']) is str:
styles = [meta['meta']['style']]
elif type(meta['meta']['style']) is list:
styles = meta['meta']['style']
else:
raise UnknownTemplateValue(
'template style must be list or string (one style)'
)
tag_loc, tags = 'head', {
str(random.randint(111111, 999999)):BeautifulSoup('', 'html.parser').new_tag(
'link', rel='stylesheet', href=style
) for style in styles
}
for style in styles:
shutil.copy(
style,
os.path.join(self.temp_path, style)
)
with open(template_to_copy, 'r+') as file:
template_data = file.read()
soup = BeautifulSoup(template_data, 'html.parser')
for tagname, tag in tags.items():
soup.find(tag_loc).append(tag)
for funcname, function in js_functions.items():
unique_id = str(uuid.uuid1()).split('-')[0]
soup.html.append(soup.new_tag('script', id=unique_id))
soup.find('script', {'id': unique_id}).append(
'function '+funcname+'(){ '+
str(JSFunc(function))+
' }'
)
file.truncate(0)
file.write(soup.prettify())
del file
generated_files.append(to_copy_path)
if 'index.html' not in generated_files:
with open(os.path.join(self.module_path, 'assets', 'no-index.html.py'), 'r+') as file:
nohtml_base = file.read()
file.close()
del file
nohtml_base = nohtml_base.replace('{% avaliable_paths %}',
'' if not self.templates else
'<h4>Avaliable paths : {}</h4>'.format(
', '.join(['<a style="text-decoration: none;" href="{}" target="_blank">{}</a>'.format(
i.split('-_-')[0] if j['load_name'] == None else j['load_name'],
i.split('-_-')[0] if not j['name'] else j['name'].title()
) for i, j in self.templates.items()])
)
)
with open(os.path.join(self.temp_path, 'index.html'), 'w+') as file:
file.write(nohtml_base)
def start(self):
self.generate_files()
if self.opentab:
webbrowser.open('http://127.0.0.1:{}/'.format(str(self.port)))
time.sleep(1)
yield self.temp_uuid, self.temp_path
subprocess.run(
('python3' if os.name == 'posix' else 'python')+' -m http.server '+str(self.port)+' --bind 127.0.0.1 --directory "'+self.temp_path+'"',
shell=True
)
def remove_yaplee_dir(self):
if os.path.isdir(os.path.join(tempfile.gettempdir(), self.tempuuid)):
shutil.rmtree(os.path.join(tempfile.gettempdir(), self.tempuuid))
| [
"os.mkdir",
"random.randint",
"os.path.isdir",
"tempfile.gettempdir",
"socket.socket",
"time.sleep",
"yaplee.js.converter.JSFunc",
"uuid.uuid1",
"pathlib.Path",
"bs4.BeautifulSoup",
"os.path.join",
"yaplee.errors.UnknownTemplateValue",
"shutil.copy"
] | [((693, 742), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (706, 742), False, 'import socket\n'), ((1022, 1043), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (1041, 1043), False, 'import tempfile\n'), ((1076, 1095), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (1089, 1095), False, 'import os\n'), ((1109, 1123), 'os.mkdir', 'os.mkdir', (['path'], {}), '(path)\n', (1117, 1123), False, 'import os\n'), ((1599, 1638), 'shutil.copy', 'shutil.copy', (['template', 'template_to_copy'], {}), '(template, template_to_copy)\n', (1610, 1638), False, 'import shutil\n'), ((5892, 5905), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (5902, 5905), False, 'import time\n'), ((972, 984), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (982, 984), False, 'import uuid\n'), ((4088, 4131), 'bs4.BeautifulSoup', 'BeautifulSoup', (['template_data', '"""html.parser"""'], {}), "(template_data, 'html.parser')\n", (4101, 4131), False, 'from bs4 import BeautifulSoup\n'), ((6238, 6259), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (6257, 6259), False, 'import tempfile\n'), ((4881, 4941), 'os.path.join', 'os.path.join', (['self.module_path', '"""assets"""', '"""no-index.html.py"""'], {}), "(self.module_path, 'assets', 'no-index.html.py')\n", (4893, 4941), False, 'import os\n'), ((5629, 5671), 'os.path.join', 'os.path.join', (['self.temp_path', '"""index.html"""'], {}), "(self.temp_path, 'index.html')\n", (5641, 5671), False, 'import os\n'), ((6317, 6338), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (6336, 6338), False, 'import tempfile\n'), ((538, 560), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (550, 560), False, 'import pathlib\n'), ((2696, 2736), 'os.path.join', 'os.path.join', (['self.temp_path', 'tag_source'], {}), '(self.temp_path, tag_source)\n', (2708, 2736), False, 'import os\n'), ((3399, 3472), 'yaplee.errors.UnknownTemplateValue', 'UnknownTemplateValue', (['"""template style must be list or string (one style)"""'], {}), "('template style must be list or string (one style)')\n", (3419, 3472), False, 'from yaplee.errors import UnknownTemplateValue\n'), ((3907, 3942), 'os.path.join', 'os.path.join', (['self.temp_path', 'style'], {}), '(self.temp_path, style)\n', (3919, 3942), False, 'import os\n'), ((3586, 3616), 'random.randint', 'random.randint', (['(111111)', '(999999)'], {}), '(111111, 999999)\n', (3600, 3616), False, 'import random\n'), ((4335, 4347), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (4345, 4347), False, 'import uuid\n'), ((4586, 4602), 'yaplee.js.converter.JSFunc', 'JSFunc', (['function'], {}), '(function)\n', (4592, 4602), False, 'from yaplee.js.converter import JSFunc\n'), ((3618, 3650), 'bs4.BeautifulSoup', 'BeautifulSoup', (['""""""', '"""html.parser"""'], {}), "('', 'html.parser')\n", (3631, 3650), False, 'from bs4 import BeautifulSoup\n')] |
# import module and read xyz file
from ase.io import read, write
file=read('last3.xyz', index=":")
# create list of tags
tags = []
for structure in file:
if structure.info['config_type'] not in tags:
tags.append(structure.info['config_type'])
# extract unique tags and energy sigma
dict={}
for i in tags:
dict.setdefault(i, [])
for key in tags:
for structure in file:
if structure.info['config_type'] == key and structure.info['energy_sigma'] not in dict.get(key):
dict[key].append(structure.info['energy_sigma'])
| [
"ase.io.read"
] | [((72, 100), 'ase.io.read', 'read', (['"""last3.xyz"""'], {'index': '""":"""'}), "('last3.xyz', index=':')\n", (76, 100), False, 'from ase.io import read, write\n')] |
import cv2
import numpy as np
from matplotlib import pyplot as plt
l: list = []
img = None
img_cp = None
def draw_circle(event, x, y, flags, param):
global l
global img
global img_cp
if event == cv2.EVENT_LBUTTONDOWN:
cv2.circle(img_cp, (x, y), 5, (255, 0, 0), -1)
l.append([x, y])
cv2.imshow('image', img_cp)
if len(l) == 4:
print(l)
pts1 = np.float32(l)
pts2 = np.float32([[0, 0], [300, 0], [0, 300], [300, 300]])
M = cv2.getPerspectiveTransform(pts1, pts2)
dst = cv2.warpPerspective(img, M, (300, 300))
cv2.imshow('Original image', img_cp)
cv2.imshow('Final', dst)
img_cp = img.copy()
l.clear()
def road_straight():
global img
global img_cp
img = cv2.imread('road.jpg')
img = cv2.resize(img, dsize=(1000, 1000))
img = cv2.resize(img, (0, 0), fx=0.75, fy=0.75, interpolation=cv2.INTER_NEAREST)
img_cp = img.copy()
cv2.namedWindow('image')
cv2.imshow('image', img)
cv2.setMouseCallback('image', draw_circle)
cv2.waitKey()
cv2.destroyAllWindows()
return
road_straight()
| [
"cv2.resize",
"cv2.warpPerspective",
"cv2.circle",
"cv2.waitKey",
"cv2.destroyAllWindows",
"numpy.float32",
"cv2.getPerspectiveTransform",
"cv2.imread",
"cv2.setMouseCallback",
"cv2.imshow",
"cv2.namedWindow"
] | [((787, 809), 'cv2.imread', 'cv2.imread', (['"""road.jpg"""'], {}), "('road.jpg')\n", (797, 809), False, 'import cv2\n'), ((820, 855), 'cv2.resize', 'cv2.resize', (['img'], {'dsize': '(1000, 1000)'}), '(img, dsize=(1000, 1000))\n', (830, 855), False, 'import cv2\n'), ((866, 940), 'cv2.resize', 'cv2.resize', (['img', '(0, 0)'], {'fx': '(0.75)', 'fy': '(0.75)', 'interpolation': 'cv2.INTER_NEAREST'}), '(img, (0, 0), fx=0.75, fy=0.75, interpolation=cv2.INTER_NEAREST)\n', (876, 940), False, 'import cv2\n'), ((969, 993), 'cv2.namedWindow', 'cv2.namedWindow', (['"""image"""'], {}), "('image')\n", (984, 993), False, 'import cv2\n'), ((998, 1022), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'img'], {}), "('image', img)\n", (1008, 1022), False, 'import cv2\n'), ((1027, 1069), 'cv2.setMouseCallback', 'cv2.setMouseCallback', (['"""image"""', 'draw_circle'], {}), "('image', draw_circle)\n", (1047, 1069), False, 'import cv2\n'), ((1075, 1088), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (1086, 1088), False, 'import cv2\n'), ((1093, 1116), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1114, 1116), False, 'import cv2\n'), ((245, 291), 'cv2.circle', 'cv2.circle', (['img_cp', '(x, y)', '(5)', '(255, 0, 0)', '(-1)'], {}), '(img_cp, (x, y), 5, (255, 0, 0), -1)\n', (255, 291), False, 'import cv2\n'), ((325, 352), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'img_cp'], {}), "('image', img_cp)\n", (335, 352), False, 'import cv2\n'), ((407, 420), 'numpy.float32', 'np.float32', (['l'], {}), '(l)\n', (417, 420), True, 'import numpy as np\n'), ((436, 488), 'numpy.float32', 'np.float32', (['[[0, 0], [300, 0], [0, 300], [300, 300]]'], {}), '([[0, 0], [300, 0], [0, 300], [300, 300]])\n', (446, 488), True, 'import numpy as np\n'), ((502, 541), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['pts1', 'pts2'], {}), '(pts1, pts2)\n', (529, 541), False, 'import cv2\n'), ((556, 595), 'cv2.warpPerspective', 'cv2.warpPerspective', (['img', 'M', '(300, 300)'], {}), '(img, M, (300, 300))\n', (575, 595), False, 'import cv2\n'), ((605, 641), 'cv2.imshow', 'cv2.imshow', (['"""Original image"""', 'img_cp'], {}), "('Original image', img_cp)\n", (615, 641), False, 'import cv2\n'), ((650, 674), 'cv2.imshow', 'cv2.imshow', (['"""Final"""', 'dst'], {}), "('Final', dst)\n", (660, 674), False, 'import cv2\n')] |
from __future__ import unicode_literals
from .error import error
from io import open
import arrow
import os
import oyaml as yaml
import pandas as pd
import re
import sys
# Load dataset file specifications
spec_file_name = 'dataset_file_def.yaml'
spec_file_path = os.path.join(os.path.dirname(__file__), spec_file_name)
with open(spec_file_path, encoding='utf-8') as fh:
spec = yaml.load(fh)
def validate(input_path):
if (sys.version_info > (3, 0)):
wb = pd.read_excel(input_path, sheet_name=None, na_values=[],
keep_default_na=False, dtype=str)
else:
wb = pd.read_excel(input_path, sheet_name=None, na_values=[],
keep_default_na=False, dtype=unicode)
errors = []
errors.extend(validate_filename(input_path, spec))
errors.extend(validate_sheet_metadata(wb, spec))
errors.extend(validate_sheet_vars(wb, spec))
errors.extend(validate_sheet_data(wb, spec))
return errors
def validate_column_datetimes(series, colspec, sheet):
errors = []
empty_errors, series = validate_column_generic(series, colspec, sheet)
errors.extend(empty_errors)
# Now look for format errors in non-empty rows
present = series[series.str.len() > 0]
for idx, val in present.iteritems():
try:
dt = arrow.get(val, colspec['format'])
except ValueError as e:
errors.append(error({
'message': 'error in datetime string: %s' % e,
'value': val,
'row': idx,
'column': series.name,
'sheet': sheet
}))
except arrow.parser.ParserError as e:
errors.append(error({
'message': 'invalid datetime string - should match %s' % colspec['format'],
'value': val,
'row': idx,
'column': series.name,
'sheet': sheet
}))
return errors
def validate_column_floats(series, colspec, sheet):
errors = []
empty_errors, series = validate_column_generic(series, colspec, sheet)
errors.extend(empty_errors)
# Convert to floats
converted = pd.to_numeric(series, errors='coerce')
# Non-numeric strings are now NaN
# Flag NaN as errors
nonnumeric_errors = series[pd.isna(converted)]
for idx, val in nonnumeric_errors.iteritems():
errors.append(error({
'message': 'invalid value',
'value': val,
'row': idx,
'column': series.name,
'sheet': sheet
}))
# Check range
min_errors = None
max_errors = None
if colspec.get('min', None) is not None:
min_errors = series[converted < colspec['min']]
for idx, val in min_errors.iteritems():
errors.append(error({
'message': 'value less than minimum of {}'.format(colspec['min']),
'value': val,
'row': idx,
'column': series.name,
'sheet': sheet
}))
if colspec.get('max', None) is not None:
max_errors = series[converted > colspec['max']]
for idx, val in max_errors.iteritems():
errors.append(error({
'message': 'value greater than maximum of {}'.format(colspec['max']),
'value': val,
'row': idx,
'column': series.name,
'sheet': sheet
}))
return errors
def validate_column_generic(series, colspec, sheet):
errors = []
required = colspec.get('required', None)
na = colspec.get('na', None)
if not required:
# Empty cell is a valid value. Remove empty cells before further checks
series = series[series.str.len() > 0]
elif str(na) == '':
# Empty cell is a valid value. Remove empty cells before further checks
series = series[series.str.len() > 0]
else:
# NA is None or is not the empty string, therefore empty cells are not
# valid values. Flag as errors.
empty_errors = series[series.str.len() == 0]
for idx, val in empty_errors.iteritems():
errors.append(error({
'message': 'missing required field',
'row': idx,
'column': series.name,
'sheet': sheet
}))
# Now remove empty cells
series = series[series.str.len() > 0]
if na is not None:
# Remove NA values before further checks
series = series[series != na]
return (errors, series)
def validate_column_strings(series, colspec, sheet):
errors = []
empty_errors, series = validate_column_generic(series, colspec, sheet)
errors.extend(empty_errors)
if colspec.get('max', None) is not None:
maxlen_errors = series[series.str.len() > colspec['max']]
for idx, val in maxlen_errors.iteritems():
errors.append(error({
'message': 'string length > %d' % colspec['max'],
'value': val,
'row': idx,
'column': series.name,
'sheet': sheet
}))
return errors
def validate_filename(input_path, spec):
fn = os.path.basename(input_path)
errors = []
filename_re = re.compile(r'^(?P<shortname>.+)_(?P<date>[^_]+)_(?P<version>[^_]+)\.xlsx$')
m = filename_re.match(fn)
if not m:
errors.append(error({
'message': 'filename does not match format <dataset_short_name>_<dataset_release_date>_v<dataset_version>.xlsx',
'value': fn
}))
else:
try:
dt = arrow.get(m.group('date'), spec['file_date'])
except ValueError as e:
errors.append(error({
'message': 'error in filename datetime string: %s' % e,
'value': m.group('date')
}))
except arrow.parser.ParserError as e:
errors.append(error({
'message': 'date in filename must be in %s format' % spec['file_date'],
'value': m.group('date')
}))
if not re.match(r'^v.+$', m.group('version')):
errors.append(error({
'message': 'version string in filename must start with "v"',
'value': fn
}))
return errors
def validate_sheet_data(wb, spec):
errors = []
if not 'data' in wb:
errors.append(error({
'message': '"%s" worksheet is missing' % 'data',
'sheet': 'data'
}))
return errors
df = wb['data']
errors.extend(validate_sheet_generic(df, 'data', spec))
# Next check columns in 'data' that were defined in 'vars_meta_data'
# First make sure that 'vars_meta_data' doesn't have any errors, if it does
# don't bother with any more checks here
if len(validate_sheet_vars(wb, spec)) > 0:
return errors
# Now check custom data columns
required_columns = list(spec['columns']['data'].keys())
df_data = df.drop(required_columns, axis='columns')
# Collect variable short names from vars_meta_data sheet and check that
# data columns in 'data' sheet match data columns defined in 'vars' sheet.
vars_defined = wb['vars_meta_data']['var_short_name'].tolist()
vars_found = df_data.columns.tolist()
extra_defined = set(vars_defined).difference(set(vars_found))
extra_found = set(vars_found).difference(set(vars_defined))
if extra_defined:
errors.append(error({
'message': 'some data variables were defined in the "%s" worksheet but were not found in the "%s" worksheet' % ('vars_meta_data', 'data'),
'value': ', '.join(extra_defined)
}))
if extra_found:
errors.append(error({
'message': 'some data variables were found in the "%s" worksheet but were not defined in the "%s" worksheet' % ('data', 'vars_meta_data'),
'value': ', '.join(extra_found)
}))
# Now validate the actual data only on the condition of
# proper missing values.
# TODO: Is there any type-checking expected in custom vars?
vars_missing_value = wb['vars_meta_data']['var_missing_value'].tolist()
for var, na in zip(vars_defined, vars_missing_value):
if var not in extra_defined:
sheet = 'vars_meta_data'
colspec = { 'required': True, 'na': na }
empty_errors, _ = validate_column_generic(df_data[var], colspec, 'data')
errors.extend(empty_errors)
return errors
def validate_sheet_generic(df, sheet, spec):
errors = []
required_columns = list(spec['columns'][sheet].keys())
if df.columns.tolist()[:len(required_columns)] != required_columns:
errors.append(error({
'message': 'the first %d columns of the "%s" worksheet should be %s' % (len(required_columns), sheet, required_columns),
'value': str(df.columns.tolist()),
'sheet': sheet
}))
return errors
# Validate cells
for colname, colspec in spec['columns'][sheet].items():
v = validator_lookup[colspec['type']]
errors.extend(v(df[colname], colspec, sheet))
return errors
def validate_sheet_metadata(wb, spec):
errors = []
if not 'dataset_meta_data' in wb:
errors.append(error({
'message': '"%s" worksheet is missing' % 'dataset_meta_data',
'sheet': 'dataset_meta_data'
}))
return errors
df = wb['dataset_meta_data']
errors.extend(validate_sheet_generic(df, 'dataset_meta_data', spec))
return errors
def validate_sheet_vars(wb, spec=spec):
errors = []
if not 'vars_meta_data' in wb:
errors.append(error({
'message': '"%s" worksheet is missing' % 'vars_meta_data',
'sheet': 'vars_meta_data'
}))
return errors
df = wb['vars_meta_data']
errors.extend(validate_sheet_generic(df, 'vars_meta_data', spec))
return errors
# Register column validators in lookup
validator_lookup = {
'float': validate_column_floats,
'string': validate_column_strings,
'datetime': validate_column_datetimes,
'generic': validate_column_generic
}
| [
"arrow.get",
"os.path.basename",
"os.path.dirname",
"pandas.read_excel",
"oyaml.load",
"io.open",
"pandas.isna",
"pandas.to_numeric",
"re.compile"
] | [((277, 302), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (292, 302), False, 'import os\n'), ((325, 363), 'io.open', 'open', (['spec_file_path'], {'encoding': '"""utf-8"""'}), "(spec_file_path, encoding='utf-8')\n", (329, 363), False, 'from io import open\n'), ((382, 395), 'oyaml.load', 'yaml.load', (['fh'], {}), '(fh)\n', (391, 395), True, 'import oyaml as yaml\n'), ((2177, 2215), 'pandas.to_numeric', 'pd.to_numeric', (['series'], {'errors': '"""coerce"""'}), "(series, errors='coerce')\n", (2190, 2215), True, 'import pandas as pd\n'), ((5248, 5276), 'os.path.basename', 'os.path.basename', (['input_path'], {}), '(input_path)\n', (5264, 5276), False, 'import os\n'), ((5311, 5386), 're.compile', 're.compile', (['"""^(?P<shortname>.+)_(?P<date>[^_]+)_(?P<version>[^_]+)\\\\.xlsx$"""'], {}), "('^(?P<shortname>.+)_(?P<date>[^_]+)_(?P<version>[^_]+)\\\\.xlsx$')\n", (5321, 5386), False, 'import re\n'), ((473, 568), 'pandas.read_excel', 'pd.read_excel', (['input_path'], {'sheet_name': 'None', 'na_values': '[]', 'keep_default_na': '(False)', 'dtype': 'str'}), '(input_path, sheet_name=None, na_values=[], keep_default_na=\n False, dtype=str)\n', (486, 568), True, 'import pandas as pd\n'), ((610, 709), 'pandas.read_excel', 'pd.read_excel', (['input_path'], {'sheet_name': 'None', 'na_values': '[]', 'keep_default_na': '(False)', 'dtype': 'unicode'}), '(input_path, sheet_name=None, na_values=[], keep_default_na=\n False, dtype=unicode)\n', (623, 709), True, 'import pandas as pd\n'), ((2311, 2329), 'pandas.isna', 'pd.isna', (['converted'], {}), '(converted)\n', (2318, 2329), True, 'import pandas as pd\n'), ((1316, 1349), 'arrow.get', 'arrow.get', (['val', "colspec['format']"], {}), "(val, colspec['format'])\n", (1325, 1349), False, 'import arrow\n')] |
import torch.nn as nn
import torch.nn.functional as F
import torch
SCORE_THRESH = 0.3
STRIDE_SCALE = 8
IOU_THRESH = 0.6
class Integral(nn.Module):
"""A fixed layer for calculating integral result from distribution.
This layer calculates the target location by :math: `sum{P(y_i) * y_i}`,
P(y_i) denotes the softmax vector that represents the discrete distribution
y_i denotes the discrete set, usually {0, 1, 2, ..., reg_max}
Args:
reg_max (int): The maximal value of the discrete set. Default: 16. You
may want to reset it according to your new dataset or related
settings.
"""
def __init__(self, reg_max=16):
super(Integral, self).__init__()
self.reg_max = reg_max
self.register_buffer('project',
torch.linspace(0, self.reg_max, self.reg_max + 1))
def forward(self, x):
"""Forward feature from the regression head to get integral result of
bounding box location.
Args:
x (Tensor): Features of the regression head, shape (N, 4*(n+1)),
n is self.reg_max.
Returns:
x (Tensor): Integral result of box locations, i.e., distance
offsets from the box center in four directions, shape (N, 4).
"""
x = F.softmax(x.reshape(-1, self.reg_max + 1), dim=1)
x = F.linear(x, self.project.type_as(x)).reshape(-1, 4)
return x
def IouCal(Box1, Box2):
inner_x1 = torch.max(Box1[0], Box2[0])
inner_y1 = torch.max(Box1[1], Box2[1])
inner_x2 = torch.min(Box1[2], Box2[2])
inner_y2 = torch.min(Box1[3], Box2[3])
area_inner = (inner_x2 - inner_x1) * (inner_y2 - inner_y1)
area = (Box2[2] - Box2[0]) * (Box2[3] - Box2[1]) + \
(Box1[2] - Box1[0]) * (Box1[3] - Box1[1]) - \
area_inner
return torch.max(torch.tensor(0.), area_inner / area)
def nms(Bboxes):
Bboxes = sorted(Bboxes, key=lambda x:x[4], reverse=True)
record_dict = set()
res = []
for i in range(len(Bboxes)):
if i not in record_dict:
record_dict.add(i)
res.append(Bboxes[i])
else:
continue
for j in range(i + 1, len(Bboxes)):
Iou = IouCal(Bboxes[i], Bboxes[j])
if Iou > IOU_THRESH:
record_dict.add(j)
continue
return res
def gfl_post_process(output, extra_info):
integral = Integral(16)
ml_scores, ml_bboxes = output
scale_factor = extra_info["scale_factor"]
levels = 5
total_bboxes = []
for level in range(levels):
stride = 2**(level)*8
'''默认输出顺序为 小stride->大stride'''
feat_h, feat_w = ml_scores[level].shape[2:]
scores = ml_scores[level].permute(0, 2, 3, 1).view(feat_h*feat_w, 1).sigmoid()
bboxes = integral(ml_bboxes[level].permute(0, 2, 3, 1))*stride
for i in range(len(scores)):
if scores[i] > SCORE_THRESH:
x = i % int(feat_w) * stride
y = i // int(feat_w) * stride
x1 = x - bboxes[i][0]
y1 = y - bboxes[i][1]
x2 = x + bboxes[i][2]
y2 = y + bboxes[i][3]
score_loc = scores[i]
box = torch.stack([x1, y1, x2, y2], dim=0)/torch.tensor(scale_factor)
total_bboxes.append(torch.cat([box, score_loc], dim=0))
nmsBoxes = nms(total_bboxes)
return nmsBoxes
| [
"torch.stack",
"torch.cat",
"torch.max",
"torch.linspace",
"torch.min",
"torch.tensor"
] | [((1497, 1524), 'torch.max', 'torch.max', (['Box1[0]', 'Box2[0]'], {}), '(Box1[0], Box2[0])\n', (1506, 1524), False, 'import torch\n'), ((1540, 1567), 'torch.max', 'torch.max', (['Box1[1]', 'Box2[1]'], {}), '(Box1[1], Box2[1])\n', (1549, 1567), False, 'import torch\n'), ((1583, 1610), 'torch.min', 'torch.min', (['Box1[2]', 'Box2[2]'], {}), '(Box1[2], Box2[2])\n', (1592, 1610), False, 'import torch\n'), ((1626, 1653), 'torch.min', 'torch.min', (['Box1[3]', 'Box2[3]'], {}), '(Box1[3], Box2[3])\n', (1635, 1653), False, 'import torch\n'), ((1874, 1891), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (1886, 1891), False, 'import torch\n'), ((817, 866), 'torch.linspace', 'torch.linspace', (['(0)', 'self.reg_max', '(self.reg_max + 1)'], {}), '(0, self.reg_max, self.reg_max + 1)\n', (831, 866), False, 'import torch\n'), ((3275, 3311), 'torch.stack', 'torch.stack', (['[x1, y1, x2, y2]'], {'dim': '(0)'}), '([x1, y1, x2, y2], dim=0)\n', (3286, 3311), False, 'import torch\n'), ((3312, 3338), 'torch.tensor', 'torch.tensor', (['scale_factor'], {}), '(scale_factor)\n', (3324, 3338), False, 'import torch\n'), ((3375, 3409), 'torch.cat', 'torch.cat', (['[box, score_loc]'], {'dim': '(0)'}), '([box, score_loc], dim=0)\n', (3384, 3409), False, 'import torch\n')] |
import sys,os
arr = []
files = os.listdir(sys.path[0] + '/stickersraw')
st = "{{$a := index .CmdArgs 0 }} \n"
st += "{{$b := cslice "
names = []
for fileName in files:
names.append(fileName.split('.')[0])
names = sorted(names)
n = ""
for name in names:
n += "\"" + name + "\" "
st += n
st += """ }}
{{if or (eq $a "stickers") (eq $a "gifs") (eq $a "gif") (eq $a "gifss") }}
{{deleteTrigger 0 }}
{{if eq (len .Args) 1}}
{{$r := joinStr " " $b.StringSlice}}
{{$r}}
{{else if eq (len .Args) 2}}
{{$c := index .CmdArgs 1}}
{{$s := cslice " " }}
{{range $index,$value := $b}}
{{- if or (hasPrefix $value $c) ( and (eq $a "gifss" ) ( reFind $c $value ) ) -}}
{{$s = $s.Append $value}}
{{- end -}}
{{- end}}
{{$r := joinStr " " $s.StringSlice}}
{{$r := str $r}}
{{$r}}
{{ deleteResponse 30 }}
{{end}}
{{end}}
{{range $b}}
{{- if eq . $a -}}
{{- $link := joinStr "" "https://github.com/42ip/animatedStickersDB/blob/main/stickersraw/" $a ".gif?raw=true" -}}
{{- $link -}}
{{- end -}}
{{- end}}"""
with open(sys.path[0] + "/output.yag", "w") as text_file:
text_file.write(st) | [
"os.listdir"
] | [((31, 71), 'os.listdir', 'os.listdir', (["(sys.path[0] + '/stickersraw')"], {}), "(sys.path[0] + '/stickersraw')\n", (41, 71), False, 'import sys, os\n')] |
import cv2
import sys
import matplotlib.pyplot as plt
def blur_display(infile, nogui=False):
# The first argument is the image
image = cv2.imread(infile)
#conver to grayscale
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#blur it
blurred_image = cv2.GaussianBlur(image, (7,7), 0)
if nogui:
cv2.imwrite('test_blurred.png', blurred_image)
else:
# Show all 3 images
cv2.imwrite("Original_Image.png", image)
cv2.imwrite("Gray_Image.png", gray_image)
cv2.imwrite("Blurred_Image.png", blurred_image)
cv2.waitKey(0)
if __name__ == "__main__":
blur_display(sys.argv[1])
plt.savefig('output/Original_Image.png')
plt.savefig('output/Gray_Image.png')
plt.savefig('output/Blurred_Image.png') | [
"cv2.GaussianBlur",
"cv2.cvtColor",
"cv2.imwrite",
"cv2.waitKey",
"cv2.imread",
"matplotlib.pyplot.savefig"
] | [((145, 163), 'cv2.imread', 'cv2.imread', (['infile'], {}), '(infile)\n', (155, 163), False, 'import cv2\n'), ((207, 246), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (219, 246), False, 'import cv2\n'), ((281, 315), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['image', '(7, 7)', '(0)'], {}), '(image, (7, 7), 0)\n', (297, 315), False, 'import cv2\n'), ((665, 705), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""output/Original_Image.png"""'], {}), "('output/Original_Image.png')\n", (676, 705), True, 'import matplotlib.pyplot as plt\n'), ((710, 746), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""output/Gray_Image.png"""'], {}), "('output/Gray_Image.png')\n", (721, 746), True, 'import matplotlib.pyplot as plt\n'), ((751, 790), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""output/Blurred_Image.png"""'], {}), "('output/Blurred_Image.png')\n", (762, 790), True, 'import matplotlib.pyplot as plt\n'), ((338, 384), 'cv2.imwrite', 'cv2.imwrite', (['"""test_blurred.png"""', 'blurred_image'], {}), "('test_blurred.png', blurred_image)\n", (349, 384), False, 'import cv2\n'), ((432, 472), 'cv2.imwrite', 'cv2.imwrite', (['"""Original_Image.png"""', 'image'], {}), "('Original_Image.png', image)\n", (443, 472), False, 'import cv2\n'), ((481, 522), 'cv2.imwrite', 'cv2.imwrite', (['"""Gray_Image.png"""', 'gray_image'], {}), "('Gray_Image.png', gray_image)\n", (492, 522), False, 'import cv2\n'), ((531, 578), 'cv2.imwrite', 'cv2.imwrite', (['"""Blurred_Image.png"""', 'blurred_image'], {}), "('Blurred_Image.png', blurred_image)\n", (542, 578), False, 'import cv2\n'), ((588, 602), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (599, 602), False, 'import cv2\n')] |
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.patches import FancyBboxPatch
from matplotlib.colors import LinearSegmentedColormap
from mpl_toolkits.basemap import Basemap
import numpy as np
# Suppress matplotlib warnings
np.warnings.filterwarnings('ignore')
import xarray as xr
import cmocean
from pathlib import Path
import _pickle as pickle
import os
import ship_mapper as sm
import urllib.request
import netCDF4
def map_density(info, file_in=None, cmap='Default', sidebar=False,
to_screen=True, save=True,
filename_out='auto',filedir_out='auto'):
'''
Plots a map using a gridded (or merged) file
Arguments:
info (info): ``info`` object containing metadata
Keyword Arguments:
file_in (str): Gridded or merged file to map. If ``None`` it looks for
``merged_grid.nc`` in the `\merged` directory
cmap (str): Colormap to use
sidebar (bool): If ``True``, includes side panel with metadata
to_screen (bool): If ``True``, a plot is printed to screen
save (bool): If ``True`` a ``.png`` figure is saved to hardrive
filename_out (str): Name of produced figure.
If ``auto`` then name is ``info.run_name + '__' + file_in + '.png'``
filedir_out (str): Directory where figure is saved.
If ``auto`` then output directory is ``info.dirs.pngs``
Returns:
Basemap object
'''
print('map_density ------------------------------------------------------')
# Load data
if file_in == None:
file_in = os.path.join(str(info.dirs.merged_grid),'merged_grid.nc')
print(file_in)
d = xr.open_dataset(file_in)
# Define boundaries
if info.grid.minlat == None or info.grid.maxlat == None or info.grid.minlon == None or info.grid.maxlon == None:
minlat = d['lat'].values.min()
maxlat = d['lat'].values.max()
minlon = d['lon'].values.min()
maxlon = d['lon'].values.max()
else:
minlat = d.attrs['minlat']
maxlat = d.attrs['maxlat']
minlon = d.attrs['minlon']
maxlon = d.attrs['maxlon']
basemap_file = info.dirs.basemap
print('Basemap file: ' + basemap_file)
# Check for basemap.p and, if doesn;t exist, make it
if not os.path.exists(basemap_file):
m = sm.make_basemap(info,info.dirs.project_path,[minlat,maxlat,minlon,maxlon])
else:
print('Found basemap...')
m = pickle.load(open(basemap_file,'rb'))
# Create grid for mapping
lons_grid, lats_grid = np.meshgrid(d['lon'].values,d['lat'].values)
xx,yy = m(lons_grid, lats_grid)
H = d['ship_density'].values
# Rotate and flip H... ----------------------------------------------------------------------------
H = np.rot90(H)
H = np.flipud(H)
# Mask zeros
d.attrs['mask_below'] = info.maps.mask_below
Hmasked = np.ma.masked_where(H<=d.attrs['mask_below'],H)
# Set vman and vmin
print('Min: ' + str(np.min(Hmasked)))
print('Max: ' + str(np.max(Hmasked)))
print('Mean: ' + str(np.nanmean(Hmasked)))
print('Std: ' + str(Hmasked.std()))
if info.maps.cbarmax == 'auto':
# vmax = (np.median(Hmasked)) + (4*Hmasked.std())
vmax = (np.max(Hmasked)) - (2*Hmasked.std())
elif info.maps.cbarmax != None:
vmax = info.maps.cbarmax
else:
vmax = None
if info.maps.cbarmin == 'auto':
# vmin = (np.median(Hmasked)) - (4*Hmasked.std())
alat = (d.attrs['maxlat'] - d.attrs['minlat'])/2
cellsize = sm.degrees_to_meters(d.attrs['bin_size'], alat)
# max_speed = 616.66 # m/min ...roughly 20 knots
max_speed = 316.66 # m/min ...roughly 20 knots
vmin = cellsize / max_speed
elif info.maps.cbarmin != None:
vmin = info.maps.cbarmin
else:
vmin = None
# Log H for better display
Hmasked = np.log10(Hmasked)
if vmin != None:
vmin = np.log10(vmin)
if vmax != None:
vmax = np.log10(vmax)
# Make colormap
fig = plt.gcf()
ax = plt.gca()
if cmap == 'Default':
cmapcolor = load_my_cmap('my_cmap_amber2red')
elif cmap == 'red2black':
cmapcolor = load_my_cmap('my_cmap_red2black')
else:
cmapcolor =plt.get_cmap(cmap)
cs = m.pcolor(xx,yy,Hmasked, cmap=cmapcolor, zorder=10, vmin=vmin, vmax=vmax)
#scalebar
sblon = minlon + ((maxlon-minlon)/10)
sblat = minlat + ((maxlat-minlat)/20)
m.drawmapscale(sblon, sblat,
minlon, minlat,
info.maps.scalebar_km, barstyle='fancy',
units='km', fontsize=8,
fontcolor='#808080',
fillcolor1 = '#cccccc',
fillcolor2 = '#a6a6a6',
yoffset = (0.01*(m.ymax-m.ymin)),
labelstyle='simple',zorder=60)
if not sidebar:
cbaxes2 = fig.add_axes([0.70, 0.18, 0.2, 0.03],zorder=60)
cbar = plt.colorbar(extend='both', cax = cbaxes2, orientation='horizontal')
# Change colorbar labels for easier interpreting
label_values = cbar._tick_data_values
log_label_values = np.round(10 ** label_values,decimals=0)
labels = []
for log_label_value in log_label_values:
labels.append(str(int(log_label_value)))
cbar.ax.set_yticklabels(labels)
cbar.ax.set_xlabel(d.attrs['units'])
if sidebar:
text1, text2, text3, text4 = make_legend_text(info,d.attrs)
ax2 = plt.subplot2grid((1,24),(0,0),colspan=4)
# Turn off tick labels
ax2.get_xaxis().set_visible(False)
ax2.get_yaxis().set_visible(False)
ax2.add_patch(FancyBboxPatch((0,0),
width=1, height=1, clip_on=False,
boxstyle="square,pad=0", zorder=3,
facecolor='#e6e6e6', alpha=1.0,
edgecolor='#a6a6a6',
transform=plt.gca().transAxes))
plt.text(0.15, 0.99, text1,
verticalalignment='top',
horizontalalignment='left',
weight='bold',
size=10,
color= '#737373',
transform=plt.gca().transAxes)
plt.text(0.02, 0.83, text2,
horizontalalignment='left',
verticalalignment='top',
size=9,
color= '#808080',
transform=plt.gca().transAxes)
plt.text(0.02, 0.145, text3,
horizontalalignment='left',
verticalalignment='top',
size=7,
color= '#808080',
transform=plt.gca().transAxes)
plt.text(0.02, 0.25, text4,
style='italic',
horizontalalignment='left',
verticalalignment='top',
size=8,
color= '#808080',
transform=plt.gca().transAxes)
cbaxes2 = fig.add_axes([0.019, 0.9, 0.15, 0.02],zorder=60)
cbar = plt.colorbar(extend='both', cax = cbaxes2, orientation='horizontal')
cbar.ax.tick_params(labelsize=8, labelcolor='#808080')
# Change colorbar labels for easier interpreting
label_values = cbar._tick_data_values
# print("values")
# print(label_values)
log_label_values = np.round(10 ** label_values,decimals=0)
# print(log_label_values)
labels = []
for log_label_value in log_label_values:
labels.append(str(int(log_label_value)))
cbar.ax.set_xticklabels(labels)
cbar.ax.set_xlabel(d.attrs['units'], size=9, color='#808080')
# TODO: maybe delete this?
# mng = plt.get_current_fig_manager()
# mng.frame.Maximize(True)
#
# fig.tight_layout()
plt.show()
# Save map as png
if save:
if filedir_out == 'auto':
filedir = str(info.dirs.pngs)
else:
filedir = filedir_out
if filename_out == 'auto':
filename = info.run_name + '__' + sm.get_filename_from_fullpath(file_in) + '.png'
else:
filename = filename_out
sm.checkDir(filedir)
plt.savefig(os.path.join(filedir,filename), dpi=300)
# Close netCDF file
d.close()
if to_screen == False:
plt.close()
return
def make_legend_text(info,md):
'''
Makes text for legend in left block of map
:param info info: ``info`` object containing metadata
:return: text for legend
'''
import datetime
alat = (md['maxlat'] - md['minlat'])/2
text1 = 'VESSEL DENSITY HEATMAP'
# print(info)
# --------------------------------------------------------
text2 = ('Unit description: ' + md['unit_description'] + '\n\n' +
'Data source: ' + md['data_source'] + '\n\n' +
'Data source description:\n' + md['data_description'] + '\n\n' +
'Time range: \n' + md['startdate'][0:-3] + ' to ' + md['enddate'][0:-3] + '\n\n' +
'Included speeds: ' + info.sidebar.included_speeds + '\n' +
'Included vessels: ' + info.sidebar.included_vessel_types + '\n\n' +
'Grid size: ' + str(md['bin_size']) + ' degrees (~' + str(int(round(sm.degrees_to_meters(md['bin_size'], alat))))+ ' m)\n' +
'EPGS code: ' + md['epsg_code'] + '\n' +
'Interpolation: ' + md['interpolation'] + '\n' +
'Interpolation threshold: ' + str(md['interp_threshold']) + ' knots\n' +
'Time bin: ' + str(round(md['time_bin']*1440,1)) + ' minutes\n' +
'Mask below: ' + str(md['mask_below']) + ' vessels per grid'
)
text3 = ('Creation date: ' + datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + '\n' +
'Creation script: ' + info.run_name + '.py\n' +
'Software: ship mapper v0.1\n\n' +
'Created by:\n' +
'Oceans and Coastal Management Division\n' +
'Ecosystem Management Branch\n' +
'Fisheries and Oceans Canada – Maritimes Region\n' +
'Bedford Institute of Oceanography\n' +
'PO Box 1006, Dartmouth, NS, Canada, B2Y 4A2'
)
text4 = ('---------------------------------------------------------------\n' +
'WARNING: This is a preliminary data product.\n' +
'We cannot guarantee the validity, accuracy, \n' +
'or quality of this product. Data is provided\n' +
'on an "AS IS" basis. USE AT YOUR OWN RISK.\n' +
'---------------------------------------------------------------\n'
)
return text1, text2, text3, text4
def map_dots(info, file_in, sidebar=False, save=True):
'''
Creates a map of "pings" rather than gridded density
Arguments:
info (info): ``info`` object containing metadata
Keyword Arguments:
file_in (str): Gridded or merged file to map. If ``None`` it looks for
``merged_grid.nc`` in the `\merged` directory
sidebar (bool): If ``True``, includes side panel with metadata
save (bool): If ``True`` a ``.png`` figure is saved to hardrive
'''
print('Mapping...')
# -----------------------------------------------------------------------------
d = xr.open_dataset(file_in)
# Define boundaries
if info.grid.minlat == None or info.grid.maxlat == None or info.grid.minlon == None or info.grid.maxlon == None:
minlat = d['lat'].values.min()
maxlat = d['lat'].values.max()
minlon = d['lon'].values.min()
maxlon = d['lon'].values.max()
else:
minlat = info.grid.minlat
maxlat = info.grid.maxlat
minlon = info.grid.minlon
maxlon = info.grid.maxlon
path_to_basemap = info.dirs.project_path / 'ancillary'
print('-----------------------------------------------------')
print('-----------------------------------------------------')
if sidebar:
basemap_file = str(path_to_basemap / 'basemap_sidebar.p')
else:
basemap_file = str(path_to_basemap / 'basemap.p')
if not os.path.exists(basemap_file):
m = sm.make_basemap(info,[minlat,maxlat,minlon,maxlon])
else:
print('Found basemap...')
m = pickle.load(open(basemap_file,'rb'))
x, y = m(d['longitude'].values,d['latitude'].values)
cs = m.scatter(x,y,s=0.1,marker='o',color='r', zorder=10)
#
plt.show()
# # Save map as png
# if save:
# filedir = str(info.dirs.pngs)
# sm.checkDir(filedir)
# filename = info.project_name + '_' + str(info.grid.bin_number) + '.png'
# plt.savefig(os.path.join(filedir,filename), dpi=300)
return
def map_dots_one_ship(info, file_in, Ship_No, save=True):
'''
Creates a map of "pings" (i.e. not gridded density) of only one ship
Arguments:
info (info): ``info`` object containing metadata
Keyword Arguments:
file_in (str): Gridded or merged file to map. If ``None`` it looks for
``merged_grid.nc`` in the `\merged` directory
Ship_No (str): Unique identifier of the ship to plot
save (bool): If ``True`` a ``.png`` figure is saved to hardrive
'''
import pandas as pd
print('Mapping...')
# -----------------------------------------------------------------------------
d = xr.open_dataset(file_in)
# Define boundaries
if info.grid.minlat == None or info.grid.maxlat == None or info.grid.minlon == None or info.grid.maxlon == None:
minlat = d['lat'].values.min()
maxlat = d['lat'].values.max()
minlon = d['lon'].values.min()
maxlon = d['lon'].values.max()
else:
minlat = info.grid.minlat
maxlat = info.grid.maxlat
minlon = info.grid.minlon
maxlon = info.grid.maxlon
path_to_basemap = info.dirs.project_path / 'ancillary'
print('-----------------------------------------------------')
print('-----------------------------------------------------')
# basemap_file = str(path_to_basemap / 'basemap_spots.p')
m = sm.make_basemap(info.dirs.project_path,[minlat,maxlat,minlon,maxlon])
# if not os.path.exists(str(path_to_basemap / 'basemap.p')):
# m = sm.make_basemap(info.dirs.project_path,[minlat,maxlat,minlon,maxlon])
# else:
# print('Found basemap...')
# m = pickle.load(open(basemap_file,'rb'))
indx = ((d['longitude']> minlon) &
(d['longitude']<= maxlon) &
(d['latitude']> minlat) &
(d['latitude']<= maxlat))
filtered_data = d.sel(Dindex=indx)
ship_id = info.ship_id
unis = pd.unique(filtered_data[ship_id].values)
ship = unis[Ship_No]
indxship = (filtered_data[ship_id] == ship)
singleship = filtered_data.sel(Dindex=indxship)
print('Ship id:'+ str(ship))
# print(singleship['longitude'].values)
# print(singleship['latitude'].values)
x, y = m(singleship['longitude'].values,singleship['latitude'].values)
# x, y = m(d['longitude'].values,d['latitude'].values)
cs = m.scatter(x,y,2,marker='o',color='r', zorder=30)
# fig = plt.figure()
# plt.plot(filtered_data['longitude'].values,filtered_data['latitude'].values,'.')
#
plt.show()
# # Save map as png
# if save:
# filedir = str(info.dirs.pngs)
# sm.checkDir(filedir)
# filename = info.project_name + '_' + str(info.grid.bin_number) + '.png'
# plt.savefig(os.path.join(filedir,filename), dpi=300)
return
def define_path_to_map(info, path_to_basemap='auto'):
'''
Figures out where is the .basemap and .grid files
Arguments:
info (info): ``info`` object containing metadata
'''
if path_to_basemap == 'auto':
if info.grid.type == 'one-off':
path_to_map = os.path.join(info.dirs.project_path,info.grid.region,'ancillary')
elif info.grid.type == 'generic':
path_to_map = os.path.abspath(os.path.join(info.dirs.project_path,'ancillary'))
else:
path_to_map = path_to_basemap
return path_to_map
def make_basemap(info,spatial,path_to_basemap='auto', sidebar=False):
'''
Makes a basemap
Arguments:
info (info): ``info`` object containing metadata
spatial (list): List with corners... this will be deprecated soon
Keyword arguments:
path_to_basemap (str): Directory where to save the produced basemap. If ``'auto'``
then path is setup by :func:`~ship_mapper.mapper.define_path_to_map`
sidebar (bool): If ``True`` space for a side panel is added to the basemap
Returns:
A ``.basemap`` and a ``.grid`` files
'''
print('Making basemap...')
# -----------------------------------------------------------------------------
path_to_map = define_path_to_map(info, path_to_basemap=path_to_basemap)
sm.checkDir(str(path_to_map))
minlat = spatial[0]
maxlat = spatial[1]
minlon = spatial[2]
maxlon = spatial[3]
# Create map
m = Basemap(projection='mill', llcrnrlat=minlat,urcrnrlat=maxlat,
llcrnrlon=minlon, urcrnrlon=maxlon,resolution=info.maps.resolution)
# TOPO
# Read data from: http://coastwatch.pfeg.noaa.gov/erddap/griddap/usgsCeSrtm30v6.html
# using the netCDF output option
# bathymetry_file = str(path_to_map / 'usgsCeSrtm30v6.nc')
bathymetry_file = os.path.join(path_to_map, 'usgsCeSrtm30v6.nc')
if not os.path.isfile(bathymetry_file):
isub = 1
base_url='http://coastwatch.pfeg.noaa.gov/erddap/griddap/usgsCeSrtm30v6.nc?'
query='topo[(%f):%d:(%f)][(%f):%d:(%f)]' % (maxlat,isub,minlat,minlon,isub,maxlon)
url = base_url+query
# store data in NetCDF file
urllib.request.urlretrieve(url, bathymetry_file)
# open NetCDF data in
nc = netCDF4.Dataset(bathymetry_file)
ncv = nc.variables
lon = ncv['longitude'][:]
lat = ncv['latitude'][:]
lons, lats = np.meshgrid(lon,lat)
topo = ncv['topo'][:,:]
#
fig = plt.figure(figsize=(19,9))
# ax = fig.add_axes([0.05,0.05,0.80,1])
# ax = fig.add_axes([0,0,0.80,1])
# ax = fig.add_axes([0.23,0.035,0.85,0.9])
if sidebar:
ax = plt.subplot2grid((1,24),(0,5),colspan=19)
else:
ax = fig.add_axes([0.05,0.05,0.94,0.94])
TOPOmasked = np.ma.masked_where(topo>0,topo)
cs = m.pcolormesh(lons,lats,TOPOmasked,cmap=load_my_cmap('my_cmap_lightblue'),latlon=True,zorder=5)
# m.drawcoastlines(color='#A27D0C',linewidth=0.5,zorder=25)
# m.fillcontinents(color='#E1E1A0',zorder=23)
m.drawcoastlines(color='#a6a6a6',linewidth=0.5,zorder=25)
m.fillcontinents(color='#e6e6e6',zorder=23)
m.drawmapboundary()
def setcolor(x, color):
for m in x:
for t in x[m][1]:
t.set_color(color)
parallels = np.arange(minlat,maxlat,info.maps.parallels)
# labels = [left,right,top,bottom]
par = m.drawparallels(parallels,labels=[True,False,False,False],dashes=[20,20],color='#00a3cc', linewidth=0.2, zorder=25)
setcolor(par,'#00a3cc')
meridians = np.arange(minlon,maxlon,info.maps.meridians)
mers = m.drawmeridians(meridians,labels=[False,False,False,True],dashes=[20,20],color='#00a3cc', linewidth=0.2, zorder=25)
setcolor(mers,'#00a3cc')
ax = plt.gca()
# ax.axhline(linewidth=4, color="#00a3cc")
# ax.axvline(linewidth=4, color="#00a3cc")
#
ax.spines['top'].set_color('#00a3cc')
ax.spines['right'].set_color('#00a3cc')
ax.spines['bottom'].set_color('#00a3cc')
ax.spines['left'].set_color('#00a3cc')
for k, spine in ax.spines.items(): #ax.spines is a dictionary
spine.set_zorder(35)
# ax.spines['top'].set_visible(False)
# ax.spines['right'].set_visible(False)
# ax.spines['bottom'].set_visible(False)
# ax.spines['left'].set_visible(False)
# fig.tight_layout(pad=0.25)
fig.tight_layout(rect=[0.01,0.01,.99,.99])
plt.show()
if sidebar:
basemap_name = 'basemap_sidebar.p'
else:
basemap_name = 'basemap.p'
info = sm.calculate_gridcell_areas(info)
# Save basemap
save_basemap(m,info,path_to_basemap=path_to_map)
# picklename = str(path_to_map / basemap_name)
# pickle.dump(m,open(picklename,'wb'),-1)
# print('!!! Pickle just made: ' + picklename)
#
## pngDir = 'C:\\Users\\IbarraD\\Documents\\VMS\\png\\'
## plt.savefig(datadir[0:-5] + 'png\\' + filename + '- Grid' + str(BinNo) + ' - Filter' +str(downLim) + '-' + str(upLim) + '.png')
# plt.savefig('test.png')
return m
def load_my_cmap(name):
'''
Creates and loads custom colormap
'''
# cdict = {'red': ((0.0, 0.0, 0.0),
# (1.0, 0.7, 0.7)),
# 'green': ((0.0, 0.25, 0.25),
# (1.0, 0.85, 0.85)),
# 'blue': ((0.0, 0.5, 0.5),
# (1.0, 1.0, 1.0))}
# my_cmap = LinearSegmentedColormap('my_colormap',cdict,256)
if name == 'my_cmap_lightblue':
cdict = {'red': ((0.0, 0.0, 0.0), # Dark
(1.0, 0.9, 0.9)), # Light
'green': ((0.0, 0.9, 0.9),
(1.0, 1.0,1.0)),
'blue': ((0.0, 0.9, 0.9),
(1.0, 1.0, 1.0))}
my_cmap = LinearSegmentedColormap('my_colormap',cdict,256)
elif name == 'my_cmap_amber2red':
# cdict = {'red': ((0.0, 1.0, 1.0),
# (1.0, 0.5, 0.5)),
# 'green': ((0.0, 1.0, 1.0),
# (1.0, 0.0, 0.0)),
# 'blue': ((0.0, 0.0, 0.0),
# (1.0, 0.0, 0.0))}
# my_cmap_yellow2red = LinearSegmentedColormap('my_colormap',cdict,256)
cdict = {'red': ((0.0, 1.0, 1.0),
(1.0, 0.5, 0.5)),
'green': ((0.0, 0.85, 0.85),
(1.0, 0.0, 0.0)),
'blue': ((0.0, 0.3, 0.3),
(1.0, 0.0, 0.0))}
my_cmap = LinearSegmentedColormap('my_colormap',cdict,256)
elif name == 'my_cmap_red2black':
# c1 = np.array([252,142,110])/256 #RGB/256
c1 = np.array([250,59,59])/256 #RGB/256
c2 = np.array([103,0,13])/256 #RGB/256
cdict = {'red': ((0.0, c1[0], c1[0]),
(1.0, c2[0], c2[0])),
'green': ((0.0, c1[1], c1[1]),
(1.0, c2[1], c2[1])),
'blue': ((0.0, c1[2], c1[2]),
(1.0, c2[2], c2[2]))}
my_cmap = LinearSegmentedColormap('my_colormap',cdict,256)
else:
print('cmap name does not match any of the available cmaps')
return my_cmap
def save_basemap(m,info,path_to_basemap='auto'):
'''
Saves basemap (and correspoding info.grid) to a pickle file
Arguments:
m (mpl_toolkits.basemap.Basemap): Basemap object
info (info): ``info`` object containing metadata
Keyword Arguments:
path_to_basemap (str): If ``'auto'`` it looks in ``grids`` directory
Returns:
Pickle file
See also:
:mod:`pickle`
'''
#
# basemap = [grid, m]
# f = open(str(path_to_map / (info.grid.basemap + '.p')),'w')
# pickle.dump(grid, f)
# pickle.dump(m, f)
# f.close()
# picklename = str(path_to_map / (info.grid.basemap + '.p'))
# pickle.dump(basemap, open(picklename, 'wb'), -1)
# print('!!! Pickle just made: ' + picklename)
path_to_map = define_path_to_map(info, path_to_basemap=path_to_basemap)
# basemap_picklename = str(path_to_map / (info.grid.basemap + '.basemap'))
basemap_picklename = os.path.join(path_to_map,info.grid.basemap + '.basemap')
pickle.dump(m, open(basemap_picklename, 'wb'), -1)
# info_picklename = str(path_to_map / (info.grid.basemap + '.grid'))
info_picklename = os.path.join(path_to_map, info.grid.basemap + '.grid')
pickle.dump(info, open(info_picklename, 'wb'), -1)
print('!!! Pickles were just made: ' + basemap_picklename)
return
| [
"matplotlib.colors.LinearSegmentedColormap",
"matplotlib.pyplot.subplot2grid",
"matplotlib.pyplot.figure",
"numpy.rot90",
"numpy.arange",
"os.path.isfile",
"matplotlib.pyplot.gca",
"os.path.join",
"numpy.round",
"numpy.nanmean",
"netCDF4.Dataset",
"numpy.meshgrid",
"ship_mapper.degrees_to_meters",
"matplotlib.pyplot.close",
"os.path.exists",
"matplotlib.pyplot.colorbar",
"numpy.max",
"numpy.log10",
"datetime.datetime.now",
"matplotlib.pyplot.show",
"matplotlib.pyplot.get_cmap",
"numpy.ma.masked_where",
"ship_mapper.checkDir",
"numpy.flipud",
"numpy.min",
"matplotlib.use",
"ship_mapper.make_basemap",
"matplotlib.pyplot.gcf",
"ship_mapper.get_filename_from_fullpath",
"xarray.open_dataset",
"pandas.unique",
"numpy.array",
"ship_mapper.calculate_gridcell_areas",
"numpy.warnings.filterwarnings",
"mpl_toolkits.basemap.Basemap"
] | [((19, 40), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (33, 40), False, 'import matplotlib\n'), ((273, 309), 'numpy.warnings.filterwarnings', 'np.warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (299, 309), True, 'import numpy as np\n'), ((1869, 1893), 'xarray.open_dataset', 'xr.open_dataset', (['file_in'], {}), '(file_in)\n', (1884, 1893), True, 'import xarray as xr\n'), ((2811, 2856), 'numpy.meshgrid', 'np.meshgrid', (["d['lon'].values", "d['lat'].values"], {}), "(d['lon'].values, d['lat'].values)\n", (2822, 2856), True, 'import numpy as np\n'), ((3053, 3064), 'numpy.rot90', 'np.rot90', (['H'], {}), '(H)\n', (3061, 3064), True, 'import numpy as np\n'), ((3074, 3086), 'numpy.flipud', 'np.flipud', (['H'], {}), '(H)\n', (3083, 3086), True, 'import numpy as np\n'), ((3177, 3226), 'numpy.ma.masked_where', 'np.ma.masked_where', (["(H <= d.attrs['mask_below'])", 'H'], {}), "(H <= d.attrs['mask_below'], H)\n", (3195, 3226), True, 'import numpy as np\n'), ((4238, 4255), 'numpy.log10', 'np.log10', (['Hmasked'], {}), '(Hmasked)\n', (4246, 4255), True, 'import numpy as np\n'), ((4402, 4411), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (4409, 4411), True, 'import matplotlib.pyplot as plt\n'), ((4422, 4431), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (4429, 4431), True, 'import matplotlib.pyplot as plt\n'), ((8435, 8445), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8443, 8445), True, 'import matplotlib.pyplot as plt\n'), ((12143, 12167), 'xarray.open_dataset', 'xr.open_dataset', (['file_in'], {}), '(file_in)\n', (12158, 12167), True, 'import xarray as xr\n'), ((13355, 13365), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13363, 13365), True, 'import matplotlib.pyplot as plt\n'), ((14338, 14362), 'xarray.open_dataset', 'xr.open_dataset', (['file_in'], {}), '(file_in)\n', (14353, 14362), True, 'import xarray as xr\n'), ((15122, 15195), 'ship_mapper.make_basemap', 'sm.make_basemap', (['info.dirs.project_path', '[minlat, maxlat, minlon, maxlon]'], {}), '(info.dirs.project_path, [minlat, maxlat, minlon, maxlon])\n', (15137, 15195), True, 'import ship_mapper as sm\n'), ((15709, 15749), 'pandas.unique', 'pd.unique', (['filtered_data[ship_id].values'], {}), '(filtered_data[ship_id].values)\n', (15718, 15749), True, 'import pandas as pd\n'), ((16351, 16361), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (16359, 16361), True, 'import matplotlib.pyplot as plt\n'), ((18245, 18381), 'mpl_toolkits.basemap.Basemap', 'Basemap', ([], {'projection': '"""mill"""', 'llcrnrlat': 'minlat', 'urcrnrlat': 'maxlat', 'llcrnrlon': 'minlon', 'urcrnrlon': 'maxlon', 'resolution': 'info.maps.resolution'}), "(projection='mill', llcrnrlat=minlat, urcrnrlat=maxlat, llcrnrlon=\n minlon, urcrnrlon=maxlon, resolution=info.maps.resolution)\n", (18252, 18381), False, 'from mpl_toolkits.basemap import Basemap\n'), ((18628, 18674), 'os.path.join', 'os.path.join', (['path_to_map', '"""usgsCeSrtm30v6.nc"""'], {}), "(path_to_map, 'usgsCeSrtm30v6.nc')\n", (18640, 18674), False, 'import os\n'), ((19081, 19113), 'netCDF4.Dataset', 'netCDF4.Dataset', (['bathymetry_file'], {}), '(bathymetry_file)\n', (19096, 19113), False, 'import netCDF4\n'), ((19217, 19238), 'numpy.meshgrid', 'np.meshgrid', (['lon', 'lat'], {}), '(lon, lat)\n', (19228, 19238), True, 'import numpy as np\n'), ((19285, 19312), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(19, 9)'}), '(figsize=(19, 9))\n', (19295, 19312), True, 'import matplotlib.pyplot as plt\n'), ((19621, 19655), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(topo > 0)', 'topo'], {}), '(topo > 0, topo)\n', (19639, 19655), True, 'import numpy as np\n'), ((20160, 20206), 'numpy.arange', 'np.arange', (['minlat', 'maxlat', 'info.maps.parallels'], {}), '(minlat, maxlat, info.maps.parallels)\n', (20169, 20206), True, 'import numpy as np\n'), ((20440, 20486), 'numpy.arange', 'np.arange', (['minlon', 'maxlon', 'info.maps.meridians'], {}), '(minlon, maxlon, info.maps.meridians)\n', (20449, 20486), True, 'import numpy as np\n'), ((20657, 20666), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (20664, 20666), True, 'import matplotlib.pyplot as plt\n'), ((21338, 21348), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (21346, 21348), True, 'import matplotlib.pyplot as plt\n'), ((21485, 21518), 'ship_mapper.calculate_gridcell_areas', 'sm.calculate_gridcell_areas', (['info'], {}), '(info)\n', (21512, 21518), True, 'import ship_mapper as sm\n'), ((25188, 25245), 'os.path.join', 'os.path.join', (['path_to_map', "(info.grid.basemap + '.basemap')"], {}), "(path_to_map, info.grid.basemap + '.basemap')\n", (25200, 25245), False, 'import os\n'), ((25403, 25457), 'os.path.join', 'os.path.join', (['path_to_map', "(info.grid.basemap + '.grid')"], {}), "(path_to_map, info.grid.basemap + '.grid')\n", (25415, 25457), False, 'import os\n'), ((2530, 2558), 'os.path.exists', 'os.path.exists', (['basemap_file'], {}), '(basemap_file)\n', (2544, 2558), False, 'import os\n'), ((2573, 2652), 'ship_mapper.make_basemap', 'sm.make_basemap', (['info', 'info.dirs.project_path', '[minlat, maxlat, minlon, maxlon]'], {}), '(info, info.dirs.project_path, [minlat, maxlat, minlon, maxlon])\n', (2588, 2652), True, 'import ship_mapper as sm\n'), ((3872, 3919), 'ship_mapper.degrees_to_meters', 'sm.degrees_to_meters', (["d.attrs['bin_size']", 'alat'], {}), "(d.attrs['bin_size'], alat)\n", (3892, 3919), True, 'import ship_mapper as sm\n'), ((4294, 4308), 'numpy.log10', 'np.log10', (['vmin'], {}), '(vmin)\n', (4302, 4308), True, 'import numpy as np\n'), ((4347, 4361), 'numpy.log10', 'np.log10', (['vmax'], {}), '(vmax)\n', (4355, 4361), True, 'import numpy as np\n'), ((5323, 5389), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'extend': '"""both"""', 'cax': 'cbaxes2', 'orientation': '"""horizontal"""'}), "(extend='both', cax=cbaxes2, orientation='horizontal')\n", (5335, 5389), True, 'import matplotlib.pyplot as plt\n'), ((5535, 5575), 'numpy.round', 'np.round', (['(10 ** label_values)'], {'decimals': '(0)'}), '(10 ** label_values, decimals=0)\n', (5543, 5575), True, 'import numpy as np\n'), ((5925, 5969), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(1, 24)', '(0, 0)'], {'colspan': '(4)'}), '((1, 24), (0, 0), colspan=4)\n', (5941, 5969), True, 'import matplotlib.pyplot as plt\n'), ((7597, 7663), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'extend': '"""both"""', 'cax': 'cbaxes2', 'orientation': '"""horizontal"""'}), "(extend='both', cax=cbaxes2, orientation='horizontal')\n", (7609, 7663), True, 'import matplotlib.pyplot as plt\n'), ((7930, 7970), 'numpy.round', 'np.round', (['(10 ** label_values)'], {'decimals': '(0)'}), '(10 ** label_values, decimals=0)\n', (7938, 7970), True, 'import numpy as np\n'), ((8839, 8859), 'ship_mapper.checkDir', 'sm.checkDir', (['filedir'], {}), '(filedir)\n', (8850, 8859), True, 'import ship_mapper as sm\n'), ((9025, 9036), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (9034, 9036), True, 'import matplotlib.pyplot as plt\n'), ((13022, 13050), 'os.path.exists', 'os.path.exists', (['basemap_file'], {}), '(basemap_file)\n', (13036, 13050), False, 'import os\n'), ((13065, 13120), 'ship_mapper.make_basemap', 'sm.make_basemap', (['info', '[minlat, maxlat, minlon, maxlon]'], {}), '(info, [minlat, maxlat, minlon, maxlon])\n', (13080, 13120), True, 'import ship_mapper as sm\n'), ((18689, 18720), 'os.path.isfile', 'os.path.isfile', (['bathymetry_file'], {}), '(bathymetry_file)\n', (18703, 18720), False, 'import os\n'), ((19494, 19539), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(1, 24)', '(0, 5)'], {'colspan': '(19)'}), '((1, 24), (0, 5), colspan=19)\n', (19510, 19539), True, 'import matplotlib.pyplot as plt\n'), ((22736, 22786), 'matplotlib.colors.LinearSegmentedColormap', 'LinearSegmentedColormap', (['"""my_colormap"""', 'cdict', '(256)'], {}), "('my_colormap', cdict, 256)\n", (22759, 22786), False, 'from matplotlib.colors import LinearSegmentedColormap\n'), ((3549, 3564), 'numpy.max', 'np.max', (['Hmasked'], {}), '(Hmasked)\n', (3555, 3564), True, 'import numpy as np\n'), ((4637, 4655), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['cmap'], {}), '(cmap)\n', (4649, 4655), True, 'import matplotlib.pyplot as plt\n'), ((8881, 8912), 'os.path.join', 'os.path.join', (['filedir', 'filename'], {}), '(filedir, filename)\n', (8893, 8912), False, 'import os\n'), ((16957, 17024), 'os.path.join', 'os.path.join', (['info.dirs.project_path', 'info.grid.region', '"""ancillary"""'], {}), "(info.dirs.project_path, info.grid.region, 'ancillary')\n", (16969, 17024), False, 'import os\n'), ((23453, 23503), 'matplotlib.colors.LinearSegmentedColormap', 'LinearSegmentedColormap', (['"""my_colormap"""', 'cdict', '(256)'], {}), "('my_colormap', cdict, 256)\n", (23476, 23503), False, 'from matplotlib.colors import LinearSegmentedColormap\n'), ((3281, 3296), 'numpy.min', 'np.min', (['Hmasked'], {}), '(Hmasked)\n', (3287, 3296), True, 'import numpy as np\n'), ((3324, 3339), 'numpy.max', 'np.max', (['Hmasked'], {}), '(Hmasked)\n', (3330, 3339), True, 'import numpy as np\n'), ((3368, 3387), 'numpy.nanmean', 'np.nanmean', (['Hmasked'], {}), '(Hmasked)\n', (3378, 3387), True, 'import numpy as np\n'), ((24014, 24064), 'matplotlib.colors.LinearSegmentedColormap', 'LinearSegmentedColormap', (['"""my_colormap"""', 'cdict', '(256)'], {}), "('my_colormap', cdict, 256)\n", (24037, 24064), False, 'from matplotlib.colors import LinearSegmentedColormap\n'), ((6712, 6721), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (6719, 6721), True, 'import matplotlib.pyplot as plt\n'), ((6954, 6963), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (6961, 6963), True, 'import matplotlib.pyplot as plt\n'), ((7197, 7206), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (7204, 7206), True, 'import matplotlib.pyplot as plt\n'), ((7472, 7481), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (7479, 7481), True, 'import matplotlib.pyplot as plt\n'), ((8716, 8754), 'ship_mapper.get_filename_from_fullpath', 'sm.get_filename_from_fullpath', (['file_in'], {}), '(file_in)\n', (8745, 8754), True, 'import ship_mapper as sm\n'), ((17109, 17158), 'os.path.join', 'os.path.join', (['info.dirs.project_path', '"""ancillary"""'], {}), "(info.dirs.project_path, 'ancillary')\n", (17121, 17158), False, 'import os\n'), ((23617, 23640), 'numpy.array', 'np.array', (['[250, 59, 59]'], {}), '([250, 59, 59])\n', (23625, 23640), True, 'import numpy as np\n'), ((23666, 23688), 'numpy.array', 'np.array', (['[103, 0, 13]'], {}), '([103, 0, 13])\n', (23674, 23688), True, 'import numpy as np\n'), ((6446, 6455), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (6453, 6455), True, 'import matplotlib.pyplot as plt\n'), ((10476, 10499), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (10497, 10499), False, 'import datetime\n'), ((10002, 10044), 'ship_mapper.degrees_to_meters', 'sm.degrees_to_meters', (["md['bin_size']", 'alat'], {}), "(md['bin_size'], alat)\n", (10022, 10044), True, 'import ship_mapper as sm\n')] |
# Generated by Django 3.0 on 2019-12-15 02:01
import core.models
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Artifact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(choices=[(core.models.ArtifactType['ORTHOMOSAIC'], 'Orthomosaic'), (core.models.ArtifactType['SHAPEFILE'], 'Shapefile')], max_length=20)),
],
),
migrations.CreateModel(
name='Flight',
fields=[
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('name', models.CharField(max_length=50)),
('date', models.DateField(auto_now_add=True)),
('camera', models.CharField(choices=[(core.models.Camera['REDEDGE'], 'Micasense RedEdge'), (core.models.Camera['RGB'], 'RGB')], max_length=10)),
('multispectral_processing', models.BooleanField(default=False)),
('annotations', models.TextField()),
('deleted', models.BooleanField(default=False)),
('state', models.CharField(choices=[(core.models.FlightState['WAITING'], 'Waiting for images'), (core.models.FlightState['PROCESSING'], 'Processing'), (core.models.FlightState['COMPLETE'], 'Complete'), (core.models.FlightState['PAUSED'], 'Paused'), (core.models.FlightState['CANCELED'], 'Canceled')], max_length=10)),
],
),
migrations.RemoveField(
model_name='user',
name='bio',
),
migrations.RemoveField(
model_name='user',
name='birth_date',
),
migrations.RemoveField(
model_name='user',
name='location',
),
migrations.AddField(
model_name='user',
name='organization',
field=models.CharField(blank=True, max_length=20),
),
migrations.AddField(
model_name='user',
name='type',
field=models.CharField(choices=[(core.models.UserType['DEMO_USER'], 'DemoUser'), (core.models.UserType['ACTIVE'], 'Active'), (core.models.UserType['DELETED'], 'Deleted'), (core.models.UserType['ADMIN'], 'Admin')], default=core.models.UserType['DEMO_USER'], max_length=20),
),
migrations.CreateModel(
name='UserProject',
fields=[
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('name', models.CharField(max_length=50)),
('description', models.TextField()),
('deleted', models.BooleanField(default=False)),
('artifacts', models.ManyToManyField(related_name='user_projects', to='core.Artifact')),
('flights', models.ManyToManyField(related_name='user_projects', to='core.Flight')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_projects', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='flight',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.CreateModel(
name='DemoProject',
fields=[
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('name', models.CharField(max_length=50)),
('description', models.TextField()),
('deleted', models.BooleanField(default=False)),
('artifacts', models.ManyToManyField(related_name='demo_projects', to='core.Artifact')),
('flights', models.ManyToManyField(related_name='demo_projects', to='core.Flight')),
('users', models.ManyToManyField(related_name='demo_projects', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='artifact',
name='flight',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='artifacts', to='core.Flight'),
),
]
| [
"django.db.models.TextField",
"django.db.models.ManyToManyField",
"django.db.migrations.RemoveField",
"django.db.models.CharField",
"django.db.models.ForeignKey",
"django.db.models.BooleanField",
"django.db.models.AutoField",
"django.db.models.DateField",
"django.db.models.UUIDField"
] | [((1758, 1811), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""user"""', 'name': '"""bio"""'}), "(model_name='user', name='bio')\n", (1780, 1811), False, 'from django.db import migrations, models\n'), ((1856, 1916), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""user"""', 'name': '"""birth_date"""'}), "(model_name='user', name='birth_date')\n", (1878, 1916), False, 'from django.db import migrations, models\n'), ((1961, 2019), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""user"""', 'name': '"""location"""'}), "(model_name='user', name='location')\n", (1983, 2019), False, 'from django.db import migrations, models\n'), ((2167, 2210), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(20)'}), '(blank=True, max_length=20)\n', (2183, 2210), False, 'from django.db import migrations, models\n'), ((2326, 2604), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[(core.models.UserType['DEMO_USER'], 'DemoUser'), (core.models.UserType[\n 'ACTIVE'], 'Active'), (core.models.UserType['DELETED'], 'Deleted'), (\n core.models.UserType['ADMIN'], 'Admin')]", 'default': "core.models.UserType['DEMO_USER']", 'max_length': '(20)'}), "(choices=[(core.models.UserType['DEMO_USER'], 'DemoUser'),\n (core.models.UserType['ACTIVE'], 'Active'), (core.models.UserType[\n 'DELETED'], 'Deleted'), (core.models.UserType['ADMIN'], 'Admin')],\n default=core.models.UserType['DEMO_USER'], max_length=20)\n", (2342, 2604), False, 'from django.db import migrations, models\n'), ((3539, 3658), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'to': 'settings.AUTH_USER_MODEL'}), '(blank=True, null=True, on_delete=django.db.models.\n deletion.CASCADE, to=settings.AUTH_USER_MODEL)\n', (3556, 3658), False, 'from django.db import migrations, models\n'), ((4566, 4677), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""artifacts"""', 'to': '"""core.Flight"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='artifacts', to='core.Flight')\n", (4583, 4677), False, 'from django.db import migrations, models\n'), ((413, 506), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (429, 506), False, 'from django.db import migrations, models\n'), ((530, 691), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[(core.models.ArtifactType['ORTHOMOSAIC'], 'Orthomosaic'), (core.models.\n ArtifactType['SHAPEFILE'], 'Shapefile')]", 'max_length': '(20)'}), "(choices=[(core.models.ArtifactType['ORTHOMOSAIC'],\n 'Orthomosaic'), (core.models.ArtifactType['SHAPEFILE'], 'Shapefile')],\n max_length=20)\n", (546, 691), False, 'from django.db import migrations, models\n'), ((817, 908), 'django.db.models.UUIDField', 'models.UUIDField', ([], {'default': 'uuid.uuid4', 'editable': '(False)', 'primary_key': '(True)', 'serialize': '(False)'}), '(default=uuid.uuid4, editable=False, primary_key=True,\n serialize=False)\n', (833, 908), False, 'from django.db import migrations, models\n'), ((932, 963), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (948, 963), False, 'from django.db import migrations, models\n'), ((991, 1026), 'django.db.models.DateField', 'models.DateField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (1007, 1026), False, 'from django.db import migrations, models\n'), ((1056, 1191), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[(core.models.Camera['REDEDGE'], 'Micasense RedEdge'), (core.models.Camera[\n 'RGB'], 'RGB')]", 'max_length': '(10)'}), "(choices=[(core.models.Camera['REDEDGE'],\n 'Micasense RedEdge'), (core.models.Camera['RGB'], 'RGB')], max_length=10)\n", (1072, 1191), False, 'from django.db import migrations, models\n'), ((1235, 1269), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (1254, 1269), False, 'from django.db import migrations, models\n'), ((1304, 1322), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (1320, 1322), False, 'from django.db import migrations, models\n'), ((1353, 1387), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (1372, 1387), False, 'from django.db import migrations, models\n'), ((1416, 1739), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[(core.models.FlightState['WAITING'], 'Waiting for images'), (core.models.\n FlightState['PROCESSING'], 'Processing'), (core.models.FlightState[\n 'COMPLETE'], 'Complete'), (core.models.FlightState['PAUSED'], 'Paused'),\n (core.models.FlightState['CANCELED'], 'Canceled')]", 'max_length': '(10)'}), "(choices=[(core.models.FlightState['WAITING'],\n 'Waiting for images'), (core.models.FlightState['PROCESSING'],\n 'Processing'), (core.models.FlightState['COMPLETE'], 'Complete'), (core\n .models.FlightState['PAUSED'], 'Paused'), (core.models.FlightState[\n 'CANCELED'], 'Canceled')], max_length=10)\n", (1432, 1739), False, 'from django.db import migrations, models\n'), ((2714, 2805), 'django.db.models.UUIDField', 'models.UUIDField', ([], {'default': 'uuid.uuid4', 'editable': '(False)', 'primary_key': '(True)', 'serialize': '(False)'}), '(default=uuid.uuid4, editable=False, primary_key=True,\n serialize=False)\n', (2730, 2805), False, 'from django.db import migrations, models\n'), ((2829, 2860), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (2845, 2860), False, 'from django.db import migrations, models\n'), ((2895, 2913), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (2911, 2913), False, 'from django.db import migrations, models\n'), ((2944, 2978), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (2963, 2978), False, 'from django.db import migrations, models\n'), ((3011, 3083), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'related_name': '"""user_projects"""', 'to': '"""core.Artifact"""'}), "(related_name='user_projects', to='core.Artifact')\n", (3033, 3083), False, 'from django.db import migrations, models\n'), ((3114, 3184), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'related_name': '"""user_projects"""', 'to': '"""core.Flight"""'}), "(related_name='user_projects', to='core.Flight')\n", (3136, 3184), False, 'from django.db import migrations, models\n'), ((3212, 3338), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""user_projects"""', 'to': 'settings.AUTH_USER_MODEL'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='user_projects', to=settings.AUTH_USER_MODEL)\n", (3229, 3338), False, 'from django.db import migrations, models\n'), ((3776, 3867), 'django.db.models.UUIDField', 'models.UUIDField', ([], {'default': 'uuid.uuid4', 'editable': '(False)', 'primary_key': '(True)', 'serialize': '(False)'}), '(default=uuid.uuid4, editable=False, primary_key=True,\n serialize=False)\n', (3792, 3867), False, 'from django.db import migrations, models\n'), ((3891, 3922), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (3907, 3922), False, 'from django.db import migrations, models\n'), ((3957, 3975), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (3973, 3975), False, 'from django.db import migrations, models\n'), ((4006, 4040), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (4025, 4040), False, 'from django.db import migrations, models\n'), ((4073, 4145), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'related_name': '"""demo_projects"""', 'to': '"""core.Artifact"""'}), "(related_name='demo_projects', to='core.Artifact')\n", (4095, 4145), False, 'from django.db import migrations, models\n'), ((4176, 4246), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'related_name': '"""demo_projects"""', 'to': '"""core.Flight"""'}), "(related_name='demo_projects', to='core.Flight')\n", (4198, 4246), False, 'from django.db import migrations, models\n'), ((4275, 4361), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'related_name': '"""demo_projects"""', 'to': 'settings.AUTH_USER_MODEL'}), "(related_name='demo_projects', to=settings.\n AUTH_USER_MODEL)\n", (4297, 4361), False, 'from django.db import migrations, models\n')] |
import numpy as np
import cv2
cascade = cv2.CascadeClassifier('cascade.xml')
img = cv2.imread('orange.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
oranges = cascade.detectMultiScale(gray, 1.05, 15, 0, (150,150))
for (x,y,w,h) in oranges:
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
imgs = cv2.resize(img, (img.shape[1] / 5, img.shape[0] / 5))
cv2.imshow('img',imgs)
cv2.waitKey(0)
cv2.destroyAllWindows()
# # show image thats being collected
# $ for filename in Positives/*.jpg;
# $ do opencv_createsamples -img ${filename} -bg negatives.txt -num 25 -bgcolor 255 -w 60 -h 60 -vec output${filename}.vec -maxzangle 0.5 -maxyangle 0.3 -maxxangle 0.3;
# $ done
# $ opencv_traincascade -data Classifier -vec allvecs.vec -bg negatives.txt -numNeg 1000 -numPos 3000 -numStages 11 -h 60 -w 60 -minHitRate 0.99 -maxFalseAlarmRate 0.5 -featureType HAAR -precalcValBufSize 2048 -precalcIdxBufSize 2048
| [
"cv2.cvtColor",
"cv2.waitKey",
"cv2.destroyAllWindows",
"cv2.rectangle",
"cv2.imread",
"cv2.CascadeClassifier",
"cv2.imshow",
"cv2.resize"
] | [((42, 78), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""cascade.xml"""'], {}), "('cascade.xml')\n", (63, 78), False, 'import cv2\n'), ((86, 110), 'cv2.imread', 'cv2.imread', (['"""orange.jpg"""'], {}), "('orange.jpg')\n", (96, 110), False, 'import cv2\n'), ((118, 155), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (130, 155), False, 'import cv2\n'), ((305, 358), 'cv2.resize', 'cv2.resize', (['img', '(img.shape[1] / 5, img.shape[0] / 5)'], {}), '(img, (img.shape[1] / 5, img.shape[0] / 5))\n', (315, 358), False, 'import cv2\n'), ((359, 382), 'cv2.imshow', 'cv2.imshow', (['"""img"""', 'imgs'], {}), "('img', imgs)\n", (369, 382), False, 'import cv2\n'), ((382, 396), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (393, 396), False, 'import cv2\n'), ((397, 420), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (418, 420), False, 'import cv2\n'), ((251, 309), 'cv2.rectangle', 'cv2.rectangle', (['img', '(x, y)', '(x + w, y + h)', '(255, 0, 0)', '(2)'], {}), '(img, (x, y), (x + w, y + h), (255, 0, 0), 2)\n', (264, 309), False, 'import cv2\n')] |
from helper import greeting
greeting("What's Up", "Jake")
| [
"helper.greeting"
] | [((29, 58), 'helper.greeting', 'greeting', (['"""What\'s Up"""', '"""Jake"""'], {}), '("What\'s Up", \'Jake\')\n', (37, 58), False, 'from helper import greeting\n')] |
# Applied Database
# Final Project
# Section 4.4 - Python program answers
# Author : Somu
#mySQL modules import
import mysql.connector
from mysql.connector import Error
from mysql.connector import errorcode
import pandas as pd
#Mongo modules import
import pymongo
from pymongo import MongoClient
#Pandas printing module
from tabulate import tabulate
# This function will display a Menu as requested in the project specification
def menu():
print("--------")
print("World DB")
print("--------")
print("Menu")
print("====")
print("1 - View 15 Cities")
print("2 - View Cities by population")
print("3 - Add New City")
print("4 - Find Car by Engine Size")
print("5 - Add New Car")
print("6 - View Countries by name")
print("7 - View Countries by population")
print("x - Exit application")
myclient = None
global dfp, df
dfp =""
df = pd.DataFrame()
def Mongoconnect(csize,choice,id,reg,size):
try:
global myclient
myclient =pymongo.MongoClient(host = "localhost",port=27017)
myclient.admin.command('ismaster')
mydb = myclient['docs']
docs = mydb["docs"]
if choice == "4":
query = {"car.engineSize":float(csize)}
car = docs.find(query)
for p in car:
print ('{0} | {1} | {2} '.format(p["_id"],p["car"],p["addresses"]))
if choice == "5":
query={"_id":int(id), "car": { "reg":reg,"engineSize":float(size)}}
x = docs.insert_one(query)
query = {"_id":int(id)}
car = docs.find(query)
for p in car:
print (p)
except :
print ("******Error Occurred while executing Mongo commands******")
def globalSet ():
global dfp
dfp = "2"
def DBconnection(query,choice,code,param1):
try:
connection = mysql.connector.connect(host='localhost',database='world', user='root', password='<PASSWORD>')
cursor = connection.cursor(prepared=True)
global dfp,df
if (choice == "6" or choice == "7") and dfp != "2" :
df = pd.read_sql_query(query, connection)
globalSet()
if choice == "1" :
cursor.execute(query)
names = list(map(lambda x: x[0], cursor.description))
print("----------------------------------------------------------------------------------")
print("{:5} | {:^20} | {:^12} | {:^20} | {:10}".format(names[0],names[1],names[2],names[3],names[4]))
print("----------------------------------------------------------------------------------")
for (id,name, countrycode, district,population, latitue,longitude) in cursor:
print("{:5} | {:^20} | {:^12} | {:^20} | {:d}".format(id,name, countrycode, district,population))
elif choice == "2" :
cursor.execute(query)
names = list(map(lambda x: x[0], cursor.description))
print("----------------------------------------------------------------------------------")
print("{:5} | {:^20} | {:^12} | {:^20} | {:10}".format(names[0],names[1],names[2],names[3],names[4]))
print("----------------------------------------------------------------------------------")
for (id,name, countrycode, district,population, latitue,longitude) in cursor:
print("{:5} | {:^20} | {:^12} | {:^20} | {:d}".format(id,name, countrycode, district,population))
elif choice == "3":
cursor.execute(query)
connection.commit
print("**** RESULT ***** The new city record is inserted into the table")
elif choice == "6" :
df1 = df[df["Name"].str.contains(code)].loc[:,["Name","Continent","population","HeadofState"]]
#print tabulate(df1.to_string(index=False))
print(tabulate(df1, headers="keys",tablefmt="orgtbl"))
elif choice == "7":
if param1 == ">":
df1 = df[(df["population"] > int(code)) ].loc[:,["Name","Continent","population","HeadofState"]]
elif param1 == "<":
df1 = df[(df["population"] < int(code)) ].loc[:,["Name","Continent","population","HeadofState"]]
elif param1 == "=":
df1 = df[(df["population"] == int(code)) ].loc[:,["Name","Continent","population","HeadofState"]]
print(tabulate(df1, headers="keys",tablefmt="orgtbl"))
except mysql.connector.Error as error :
if error.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif error.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
elif error.errno == 1452:
print("----------------------------------------------------")
print("***ERROR***: Country Code "+ code + " does not exist")
print("----------------------------------------------------")
else:
print("Failed to connect to the database: {}".format(error))
connection.rollback()
finally:
#closing database connection.
if(connection.is_connected()):
connection.close()
def displaymenu():
print("This is not a valid choice. You can only choose from the above options")
input("\nPress enter to continue...")
def main():
while True:
menu()
choice = input("Choice : --> ")
Code,param1 = "",""
if choice == "x":
print("Bye - Program Terminate now and welcome back anytime!")
return
elif choice == "1":
query= "select * from city limit 15"
DBconnection (query, choice,Code,param1)
elif choice == "2":
print("Cities by Population")
print("--------------------")
while True:
Comparison = input("Enter <, > or = :")
if Comparison == "<" or Comparison == ">" or Comparison == "=":
query = "select * from city where population" + Comparison
break
else:
displaymenu()
while True:
Value= input("Enter Population :")
if Value.isdigit() == True:
query = query + str(Value)
break
else:
displaymenu()
DBconnection (query, choice,Code,param1)
elif choice == "3":
print("Add New City")
print("------------")
City= input("Enter City Name :")
Code= input("Country Code :")
district= input("District :")
pop= input("Population :")
query = "Insert INTO city (name, countrycode,district,population) VALUES ('" + City + "','" + Code + "','" + district + "',"+ str(pop)+")"
DBconnection (query, choice, Code,param1)
elif choice == "6":
print("Countries by Name")
print("-----------------")
Ctyname = input("Enter Country Name :")
query = "select code, Name, Continent,population,HeadofState from country"
Code=Ctyname
DBconnection (query, choice, Code,param1)
elif choice == "7":
print("Countries by Population")
print("-----------------------")
query = "select code, Name, Continent,population,HeadofState from country"
while True:
Comparison = input("Enter <, > or = :")
if Comparison == "<" or Comparison == ">" or Comparison == "=":
param1=Comparison
break
else:
displaymenu()
while True:
Value= input("Enter Population :")
if Value.isdigit() == True:
Code = Value
break
else:
displaymenu()
DBconnection (query, choice, Code,param1)
elif choice == "4":
print("show cars by engine size")
print("------------------------")
while True:
csize = input("Enter Car Engine Size :")
if csize.isdigit() == True:
csize = csize
break
else:
displaymenu()
Mongoconnect(csize,choice,"","","")
elif choice == "5":
print("Add New Car")
print("-----------")
id= input("_ids:")
reg= input("Enter reg :")
size= input("Enter Size :")
Mongoconnect("",choice,id,reg,size)
else:
print("That is not a valid choice. You can only choose from the menu.")
input("\nPress enter to continue...")
if __name__ == "__main__":
main() | [
"pandas.DataFrame",
"tabulate.tabulate",
"pymongo.MongoClient",
"pandas.read_sql_query"
] | [((935, 949), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (947, 949), True, 'import pandas as pd\n'), ((1051, 1100), 'pymongo.MongoClient', 'pymongo.MongoClient', ([], {'host': '"""localhost"""', 'port': '(27017)'}), "(host='localhost', port=27017)\n", (1070, 1100), False, 'import pymongo\n'), ((2185, 2221), 'pandas.read_sql_query', 'pd.read_sql_query', (['query', 'connection'], {}), '(query, connection)\n', (2202, 2221), True, 'import pandas as pd\n'), ((3972, 4020), 'tabulate.tabulate', 'tabulate', (['df1'], {'headers': '"""keys"""', 'tablefmt': '"""orgtbl"""'}), "(df1, headers='keys', tablefmt='orgtbl')\n", (3980, 4020), False, 'from tabulate import tabulate\n'), ((4509, 4557), 'tabulate.tabulate', 'tabulate', (['df1'], {'headers': '"""keys"""', 'tablefmt': '"""orgtbl"""'}), "(df1, headers='keys', tablefmt='orgtbl')\n", (4517, 4557), False, 'from tabulate import tabulate\n')] |
# Generated by Django 3.0.4 on 2020-04-01 13:24
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('reading', '0007_auto_20200331_2133'),
]
operations = [
migrations.RenameModel(
old_name='ReadingListMetadata',
new_name='ReadingMetadata',
),
]
| [
"django.db.migrations.RenameModel"
] | [((227, 314), 'django.db.migrations.RenameModel', 'migrations.RenameModel', ([], {'old_name': '"""ReadingListMetadata"""', 'new_name': '"""ReadingMetadata"""'}), "(old_name='ReadingListMetadata', new_name=\n 'ReadingMetadata')\n", (249, 314), False, 'from django.db import migrations\n')] |
"""This module provides the main functionality of cfbackup
"""
from __future__ import print_function
import sys
import argparse
import json
import CloudFlare
# https://api.cloudflare.com/#dns-records-for-a-zone-list-dns-records
class CF_DNS_Records(object):
"""
commands for zones manipulation
"""
def __init__(self, ctx):
self._ctx = ctx
def run(self):
"""
run - entry point for DNS records manipulations
"""
cmd = self._ctx.command
if cmd == "show":
self.show()
else:
sys.exit("Command " + cmd + " not implemened for zones")
def show(self):
"""Show CF zones"""
# print("Show DSN records")
try:
records = self._all_records()
except CloudFlare.exceptions.CloudFlareAPIError as e:
exit('/zones %d %s - api call failed' % (e, e))
if not self._ctx.pretty:
print(json.dumps(records, indent=4))
return
records_by_type = {}
types = {}
for rec in records:
if not records_by_type.get(rec["type"]):
types[rec["type"]] = 0
records_by_type[rec["type"]] = []
types[rec["type"]] += 1
records_by_type[rec["type"]].append(rec)
for t in sorted(list(types)):
for rec in records_by_type[t]:
# print(json.dumps(rec, indent=4))
print("Type: {}".format(rec["type"]))
print("Name: {}".format(rec["name"]))
print("Content: {}".format(rec["content"]))
print("TTL: {}{}".format(
rec["ttl"],
" (auto)" if str(rec["ttl"]) == "1" else "",
))
print("Proxied: {}".format(rec["proxied"]))
print("Auto: {}".format(rec["meta"]["auto_added"]))
print("")
print("")
print("-------------------")
print("Records stat:")
print("-------------------")
print("{0: <11} {1: >4}".format("<type>", "<count>"))
for t in sorted(list(types)):
print("{0: <11} {1: >4}".format(t, types[t]))
print("-------------------")
print("{0: <11} {1: >4}".format("Total:", len(records)))
def _all_records(self):
cf = CloudFlare.CloudFlare()
zones = cf.zones.get(params={'name': self._ctx.zone_name, 'per_page': 1})
if len(zones) == 0:
exit('No zones found')
zone_id = zones[0]['id']
cf_raw = CloudFlare.CloudFlare(raw=True)
page = 1
records = []
while True:
raw_results = cf_raw.zones.dns_records.get(
zone_id,
params={'per_page':100, 'page':page},
)
total_pages = raw_results['result_info']['total_pages']
result = raw_results['result']
for rec in result:
records.append(rec)
if page == total_pages:
break
page += 1
return records
# https://api.cloudflare.com/#zone-list-zones
class CF_Zones(object):
"""
commands for zones manipulation
"""
def __init__(self, ctx):
self._ctx = ctx
def run(self):
"""
run - entry point for zones manipulations
"""
cmd = self._ctx.command
if cmd == "show":
self.show()
else:
sys.exit("Command " + cmd + " not implemened for zones")
def show(self):
"""Show CF zones"""
# print("Show cf zones")
try:
zones = self._all_zones()
except CloudFlare.exceptions.CloudFlareAPIError as e:
exit('/zones %d %s - api call failed' % (e, e))
if not self._ctx.pretty:
print(json.dumps(zones, indent=4))
return
for z in zones:
print("Zone: {0: <16} NS: {1}".format(
z["name"],
z["name_servers"][0],
))
for ns in z["name_servers"][1:]:
print(" {0: <16} {1}".format("", ns))
def _all_zones(self):
cf = CloudFlare.CloudFlare(raw=True)
if self._ctx.zone_name:
raw_results = cf.zones.get(params={
'name': self._ctx.zone_name,
'per_page': 1,
'page': 1,
})
return raw_results['result']
page = 1
domains = []
while True:
raw_results = cf.zones.get(params={'per_page':5, 'page':page})
total_pages = raw_results['result_info']['total_pages']
zones = raw_results['result']
for z in zones:
domains.append(z)
if page == total_pages:
break
page += 1
return domains
COMMANDS = [
"show",
# "restore"
]
OBJECT_ENTRYPOINT = {
"zones": CF_Zones,
"dns": CF_DNS_Records,
}
def main():
"""Main entry"""
parser = argparse.ArgumentParser(
prog="cfbackup",
description='Simple Cloudflare backup tool.',
)
parser.add_argument(
"command",
choices=[x for x in COMMANDS],
help="command",
)
subparsers = parser.add_subparsers(
help='Object of command',
dest="object"
)
parser_zones = subparsers.add_parser("zones")
parser_zones.add_argument(
"--pretty",
action='store_true',
help="show user friendly output",
)
parser_zones.add_argument(
"-z", "--zone-name",
help="optional zone name",
)
parser_dns = subparsers.add_parser("dns")
parser_dns.add_argument(
"-z", "--zone-name",
required=True,
help="required zone name",
)
parser_dns.add_argument(
"--pretty",
action='store_true',
help="show user friendly output",
)
args = parser.parse_args()
OBJECT_ENTRYPOINT[args.object](args).run()
| [
"CloudFlare.CloudFlare",
"argparse.ArgumentParser",
"sys.exit",
"json.dumps"
] | [((5047, 5138), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': '"""cfbackup"""', 'description': '"""Simple Cloudflare backup tool."""'}), "(prog='cfbackup', description=\n 'Simple Cloudflare backup tool.')\n", (5070, 5138), False, 'import argparse\n'), ((2361, 2384), 'CloudFlare.CloudFlare', 'CloudFlare.CloudFlare', ([], {}), '()\n', (2382, 2384), False, 'import CloudFlare\n'), ((2581, 2612), 'CloudFlare.CloudFlare', 'CloudFlare.CloudFlare', ([], {'raw': '(True)'}), '(raw=True)\n', (2602, 2612), False, 'import CloudFlare\n'), ((4200, 4231), 'CloudFlare.CloudFlare', 'CloudFlare.CloudFlare', ([], {'raw': '(True)'}), '(raw=True)\n', (4221, 4231), False, 'import CloudFlare\n'), ((577, 633), 'sys.exit', 'sys.exit', (["('Command ' + cmd + ' not implemened for zones')"], {}), "('Command ' + cmd + ' not implemened for zones')\n", (585, 633), False, 'import sys\n'), ((3484, 3540), 'sys.exit', 'sys.exit', (["('Command ' + cmd + ' not implemened for zones')"], {}), "('Command ' + cmd + ' not implemened for zones')\n", (3492, 3540), False, 'import sys\n'), ((947, 976), 'json.dumps', 'json.dumps', (['records'], {'indent': '(4)'}), '(records, indent=4)\n', (957, 976), False, 'import json\n'), ((3848, 3875), 'json.dumps', 'json.dumps', (['zones'], {'indent': '(4)'}), '(zones, indent=4)\n', (3858, 3875), False, 'import json\n')] |
import os
from setuptools import setup
VERSION = "1.0.4"
NAMESPACE = "newstore"
NAME = "{}.json_encoder".format(NAMESPACE)
def local_text_file(file_name):
path = os.path.join(os.path.dirname(__file__), file_name)
with open(path, "rt") as fp:
file_data = fp.read()
return file_data
setup(
name=NAME,
version=VERSION,
description="JSONEncoder",
long_description=local_text_file("README.md"),
long_description_content_type="text/markdown",
author="NewStore Inc.",
author_email="<EMAIL>",
url="https://github.com/NewStore-oss/json-encoder",
zip_safe=True,
packages=[NAME],
namespace_packages=[NAMESPACE],
python_requires=">=3.6,<3.9",
package_data={NAME: []},
install_requires=["setuptools"],
)
| [
"os.path.dirname"
] | [((183, 208), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (198, 208), False, 'import os\n')] |
from generallibrary import getBaseClassNames, SigInfo, dict_insert, wrapper_transfer
def set_parent_hook(self, parent, _draw=True):
""" :param generalgui.MethodGrouper self:
:param generalgui.MethodGrouper parent: """
if _draw:
for part in self.get_children(depth=-1, include_self=True, gen=True):
part.draw_create()
assert "Contain" in getBaseClassNames(parent) or parent is None
class PartBaseClass:
def draw_create_hook(self, kwargs):
""" Used to decouple properties, called by draw_create which is called by init and set_parent. """
def draw_create_post_hook(self):
""" Called after widget is packed. """
def _deco_draw_queue(func):
""" Append one order to dict for this func call.
Creates a key with id of Part and func's name.
If key exists as an old order then it's removed.
Returns key unless draw_now is True. """
def _wrapper(*args, **kwargs):
sigInfo = SigInfo(func, *args, **kwargs)
methodGrouper = sigInfo["self"]
orders = methodGrouper.orders
key = methodGrouper.get_order_key(func)
if sigInfo["draw_now"]:
orders.pop(key, None) # This allows us to manually call draw_create(draw_now=True) after instantiating a Page instead of passing draw_now to Page.
sigInfo.call()
else:
orders[key] = sigInfo
return key
# Could possibly do something like this to skip queue instead of drawing instantly
# if sigInfo["draw_now"]:
# dict_insert(orders, **{key: sigInfo})
# else:
# orders[key] = sigInfo
return wrapper_transfer(func, _wrapper)
| [
"generallibrary.getBaseClassNames",
"generallibrary.wrapper_transfer",
"generallibrary.SigInfo"
] | [((1667, 1699), 'generallibrary.wrapper_transfer', 'wrapper_transfer', (['func', '_wrapper'], {}), '(func, _wrapper)\n', (1683, 1699), False, 'from generallibrary import getBaseClassNames, SigInfo, dict_insert, wrapper_transfer\n'), ((977, 1007), 'generallibrary.SigInfo', 'SigInfo', (['func', '*args'], {}), '(func, *args, **kwargs)\n', (984, 1007), False, 'from generallibrary import getBaseClassNames, SigInfo, dict_insert, wrapper_transfer\n'), ((379, 404), 'generallibrary.getBaseClassNames', 'getBaseClassNames', (['parent'], {}), '(parent)\n', (396, 404), False, 'from generallibrary import getBaseClassNames, SigInfo, dict_insert, wrapper_transfer\n')] |
import time
def check_repository(document):
rows = document.findAll("tr", attrs=testing.expect.with_class("repository"))
testing.expect.check(1, len(rows))
def check_cell(row, class_name, expected_string, inline_element_type=None):
cells = row.findAll("td", attrs=testing.expect.with_class(class_name))
testing.expect.check(1, len(cells))
if inline_element_type:
testing.expect.check(1, len(cells[0].findAll(inline_element_type)))
string = cells[0].findAll("i")[0].string
else:
string = cells[0].string
if string is None:
string = ""
testing.expect.check(expected_string, string)
check_cell(rows[0], "name", "critic")
check_cell(rows[0], "location", "http://%s/critic.git" % instance.hostname)
check_cell(rows[0], "upstream", " ")
rows = document.findAll("tr", attrs=testing.expect.with_class("details"))
testing.expect.check(1, len(rows))
tables = rows[0].findAll("table", attrs=testing.expect.with_class("trackedbranches"))
testing.expect.check(1, len(tables))
# Would like to use 'tables[0].findAll()' here, but BeautifulSoup apparently
# doesn't parse nested tables correctly, so these rows aren't actually part
# of the 'trackedbranches' table according to it.
rows = document.findAll("tr", attrs=testing.expect.with_class("branch"))
testing.expect.check(2, len(rows))
check_cell(rows[0], "localname", "Tags", inline_element_type="i")
check_cell(rows[0], "remote", repository.url)
check_cell(rows[0], "remotename", "N/A", inline_element_type="i")
check_cell(rows[0], "enabled", "Yes")
check_cell(rows[0], "users", "")
check_cell(rows[1], "localname", "master")
check_cell(rows[1], "remote", repository.url)
check_cell(rows[1], "remotename", "master")
check_cell(rows[1], "enabled", "Yes")
check_cell(rows[1], "users", "")
with frontend.signin():
# Check that this URL isn't handled already. We're using it later to detect
# that the repository has been created and the tracked branch fetched, and
# if it's already handled for some reason, that check won't be reliable.
frontend.page("critic/master", expected_http_status=404)
frontend.operation("addrepository",
data={ "name": "critic",
"path": "critic",
"remote": { "url": repository.url,
"branch": "master" }})
# If it hasn't happened after 30 seconds, something must be wrong.
deadline = time.time() + 30
finished = False
while not finished and time.time() < deadline:
# The frontend.page() function returns None if the HTTP status was
# 404, and a BeautifulSoup object if it was 200.
if frontend.page("critic/master", expected_http_status=[200, 404]) is None:
time.sleep(0.5)
while True:
mail = mailbox.pop(accept=testing.mailbox.with_subject("^branchtracker.log: "))
if not mail:
break
logger.error("Administrator message: %s\n > %s"
% (mail.header("Subject"), "\n > ".join(mail.lines)))
raise testing.TestFailure
else:
finished = True
if not finished:
logger.error("Repository main branch ('refs/heads/master') not fetched after 30 seconds.")
raise testing.TestFailure
# Check that /repositories still loads correctly now that there's a
# repository in the system.
frontend.page(
"repositories",
expect={ "document_title": testing.expect.document_title(u"Repositories"),
"content_title": testing.expect.paleyellow_title(0, u"Repositories"),
"repository": check_repository })
frontend.operation("addrepository",
data={ "name": "a" * 65,
"path": "validpath2" },
expect={ "status": "failure",
"code": "invalidshortname" })
frontend.operation("addrepository",
data={ "name": "",
"path": "validpath1" },
expect={ "status": "failure",
"code": "invalidshortname" })
| [
"time.sleep",
"time.time"
] | [((2613, 2624), 'time.time', 'time.time', ([], {}), '()\n', (2622, 2624), False, 'import time\n'), ((2679, 2690), 'time.time', 'time.time', ([], {}), '()\n', (2688, 2690), False, 'import time\n'), ((2931, 2946), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (2941, 2946), False, 'import time\n')] |
#!/usr/bin/env python3
"""
Generate GSE64913
"""
__author__ = "<NAME>"
__version__ = "0.1.0"
__license__ = "MIT"
import logging
import GEOparse
import argparse
import pandas as pd
from funcs import utils
from os.path import join
import numpy as np
#def append_postfix(filename,postfix):
# return "{0}_{2}.{1}".format(*filename.rsplit('.', 1) + postfix)
def main(args):
logging.basicConfig(level=logging.INFO,
format='%(module)s:%(levelname)s:%(asctime)s:%(message)s',
handlers=[logging.FileHandler("../logs/report.log")])
logging.info(args)
utils.create_dir_if_not_exist(args.out_expr_dir)
utils.create_dir_if_not_exist(join(args.out_expr_dir,'raw'))
utils.create_dir_if_not_exist(join(args.out_expr_dir,'processed'))
gse = GEOparse.get_GEO(geo='GSE64913', destdir=join(args.out_expr_dir,'raw'))
annotated = gse.pivot_and_annotate('VALUE', gse.gpls['GPL570'], 'ENTREZ_GENE_ID')
annotated2 = annotated[~pd.isnull(annotated.ENTREZ_GENE_ID)]
annotated2 = annotated2.loc[~annotated2.isnull().values.all(axis=1)]
annotated2['ENTREZ_GENE_ID'] = annotated2.ENTREZ_GENE_ID.str.split('///').str[0].astype(int)
annotated2 = annotated2.set_index('ENTREZ_GENE_ID')
classes = {}
classes['healthy_cae'] = ['diagnosis: Healthy','cell type: Central airway epithelium']
classes['healthy_pae'] = ['diagnosis: Healthy', 'cell type: Peripheral airway epithelium']
classes['asthma_cae'] = ['diagnosis: Severe Asthmatic', 'cell type: Central airway epithelium']
classes['asthma_pae'] = ['diagnosis: Severe Asthmatic', 'cell type: Peripheral airway epithelium']
logging.info(classes)
gsms = {cls: [gsm for gsm in gse.gsms if gse.gsms[gsm].metadata['characteristics_ch1'][1] == classes[cls][0] and
gse.gsms[gsm].metadata['characteristics_ch1'][5] == classes[cls][1]] for cls in classes}
logging.info(' '.join(['{} GSM:{}'.format(cls, len(gsms[cls])) for cls in classes]))
utils.create_dir_if_not_exist(args.out_expr_dir)
utils.write_expr(join(args.out_expr_dir, 'processed', 'expr.tsv'), annotated2)
for cls in classes:
utils.write_text(join(args.out_expr_dir, 'processed', '{}_gsms.txt'.format(cls)), gsms[cls])
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Process GSE64913')
parser.add_argument('out_expr_dir', type=str, help='Output directory for expression data file and GSM lists')
args = parser.parse_args()
main(args) | [
"argparse.ArgumentParser",
"logging.FileHandler",
"funcs.utils.create_dir_if_not_exist",
"pandas.isnull",
"logging.info",
"os.path.join"
] | [((590, 608), 'logging.info', 'logging.info', (['args'], {}), '(args)\n', (602, 608), False, 'import logging\n'), ((614, 662), 'funcs.utils.create_dir_if_not_exist', 'utils.create_dir_if_not_exist', (['args.out_expr_dir'], {}), '(args.out_expr_dir)\n', (643, 662), False, 'from funcs import utils\n'), ((1671, 1692), 'logging.info', 'logging.info', (['classes'], {}), '(classes)\n', (1683, 1692), False, 'import logging\n'), ((2011, 2059), 'funcs.utils.create_dir_if_not_exist', 'utils.create_dir_if_not_exist', (['args.out_expr_dir'], {}), '(args.out_expr_dir)\n', (2040, 2059), False, 'from funcs import utils\n'), ((2309, 2364), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Process GSE64913"""'}), "(description='Process GSE64913')\n", (2332, 2364), False, 'import argparse\n'), ((697, 727), 'os.path.join', 'join', (['args.out_expr_dir', '"""raw"""'], {}), "(args.out_expr_dir, 'raw')\n", (701, 727), False, 'from os.path import join\n'), ((762, 798), 'os.path.join', 'join', (['args.out_expr_dir', '"""processed"""'], {}), "(args.out_expr_dir, 'processed')\n", (766, 798), False, 'from os.path import join\n'), ((2081, 2129), 'os.path.join', 'join', (['args.out_expr_dir', '"""processed"""', '"""expr.tsv"""'], {}), "(args.out_expr_dir, 'processed', 'expr.tsv')\n", (2085, 2129), False, 'from os.path import join\n'), ((851, 881), 'os.path.join', 'join', (['args.out_expr_dir', '"""raw"""'], {}), "(args.out_expr_dir, 'raw')\n", (855, 881), False, 'from os.path import join\n'), ((996, 1031), 'pandas.isnull', 'pd.isnull', (['annotated.ENTREZ_GENE_ID'], {}), '(annotated.ENTREZ_GENE_ID)\n', (1005, 1031), True, 'import pandas as pd\n'), ((542, 583), 'logging.FileHandler', 'logging.FileHandler', (['"""../logs/report.log"""'], {}), "('../logs/report.log')\n", (561, 583), False, 'import logging\n')] |
#
# Code by <NAME> and under the MIT license
#
from mineturtle import *
import lsystem
t = Turtle()
t.pendelay(0)
t.turtle(None)
t.penblock(block.BRICK_BLOCK)
# ensure angles are always integral multiples of 90 degrees
t.gridalign()
rules = {'X':'X+YF+', 'Y':'-FX-Y'}
def go():
# draw a wall segment with a door
t.pendown()
t.penblock(block.BRICK_BLOCK)
t.startface()
for i in range(4):
t.go(4)
t.pitch(90)
t.endface()
t.penup()
t.go(2)
t.pendown()
t.penblock(block.AIR)
t.pitch(90)
t.go(1)
t.penup()
t.pitch(180)
t.go(1)
t.pitch(90)
t.go(2)
dictionary = { '+': lambda: t.yaw(90),
'-': lambda: t.yaw(-90),
'F': lambda: go() }
lsystem.lsystem('FX', rules, dictionary, 14)
| [
"lsystem.lsystem"
] | [((781, 825), 'lsystem.lsystem', 'lsystem.lsystem', (['"""FX"""', 'rules', 'dictionary', '(14)'], {}), "('FX', rules, dictionary, 14)\n", (796, 825), False, 'import lsystem\n')] |
import pytest
from fastapi.testclient import TestClient
@pytest.mark.integration
@pytest.mark.usefixtures("test_db_session")
class TestSignupEndpoint:
def test_signup_returns_200(self, client: TestClient):
response = client.post(
"/users/signup",
json={
"email": "<EMAIL>",
"password": "<PASSWORD>"
}
)
assert response.status_code == 201
def test_signup_existing_user_returns_422(self, client: TestClient):
response = client.post(
"/users/signup",
json={
"email": "<EMAIL>",
"password": "<PASSWORD>"
}
)
assert response.status_code == 201
response_2 = client.post(
"/users/signup",
json={
"email": "<EMAIL>",
"password": "<PASSWORD>"
}
)
assert response_2.status_code == 422
| [
"pytest.mark.usefixtures"
] | [((84, 126), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""test_db_session"""'], {}), "('test_db_session')\n", (107, 126), False, 'import pytest\n')] |
""" Oracle Query Module
"""
import json
from clamfig import Serializable
from web3 import Web3
from telliot_core.dtypes.value_type import ValueType
class OracleQuery(Serializable):
"""Oracle Query
An OracleQuery specifies how to pose a question to the
Tellor Oracle and how to format/interpret the response.
The OracleQuery class serves
as the base class for all Queries, and implements default behaviors.
Each subclass corresponds to a unique Query Type supported
by the TellorX network.
All public attributes of an OracleQuery represent an input that can
be used to customize the query.
The base class provides:
- Calculation of the contents of the `data` field to include with the
`TellorX.Oracle.tipQuery()` contract call.
- Calculation of the `id` field field to include with the
`TellorX.Oracle.tipQuery()` and `TellorX.Oracle.submitValue()`
contract calls.
"""
@property
def descriptor(self) -> str:
"""Get the query descriptor string.
The Query descriptor is a unique string representation of the query.
The descriptor is required for users to specify the query to TellorX
through the ``TellorX.Oracle.tipQuery()`` contract call.
"""
state = self.get_state()
jstr = json.dumps(state, separators=(",", ":"))
return jstr
@property
def value_type(self) -> ValueType:
"""Returns the ValueType expected by the current Query configuration
The value type defines required data type/structure of the
``value`` submitted to the contract through
``TellorX.Oracle.submitValue()``
This method must be overridden by subclasses
"""
pass
@property
def query_data(self) -> bytes:
"""Returns the ``data`` field for use in ``TellorX.Oracle.tipQuery()``
contract call.
"""
return self.descriptor.encode("utf-8")
@property
def query_id(self) -> bytes:
"""Returns the query ``id`` for use with the
``TellorX.Oracle.tipQuery()`` and ``TellorX.Oracle.submitValue()``
contract calls.
"""
return bytes(Web3.keccak(self.query_data))
| [
"json.dumps",
"web3.Web3.keccak"
] | [((1324, 1364), 'json.dumps', 'json.dumps', (['state'], {'separators': "(',', ':')"}), "(state, separators=(',', ':'))\n", (1334, 1364), False, 'import json\n'), ((2201, 2229), 'web3.Web3.keccak', 'Web3.keccak', (['self.query_data'], {}), '(self.query_data)\n', (2212, 2229), False, 'from web3 import Web3\n')] |
"""Compute Cloud setup with SaltStack and Apache Libcloud"""
__version__ = '0.1.0-git'
if __name__ == '__main__':
from cardice.commandline import main
main()
| [
"cardice.commandline.main"
] | [((163, 169), 'cardice.commandline.main', 'main', ([], {}), '()\n', (167, 169), False, 'from cardice.commandline import main\n')] |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from future import standard_library
standard_library.install_aliases()
from builtins import *
from urllib.error import URLError
MASTER_BRANCH = 'https://github.com/genestack/python-client/archive/master.zip'
PYPI_PACKAGE = 'genestack-client'
class GenestackBaseException(Exception):
"""
Base class for Genestack exceptions.
Use it to catch all exceptions raised explicitly by Genestack Python Client.
"""
pass
class GenestackException(GenestackBaseException):
"""
Client-side exception class.
Raise its instances (instead of :py:class:`~exceptions.Exception`)
if anything is wrong on client side.
"""
pass
class GenestackServerException(GenestackException):
"""
Server-side exception class.
Raised when Genestack server returns an error response
(error message generated by Genestack Java code, not an HTTP error).
"""
def __init__(self, message, path, post_data, debug=False, stack_trace=None):
"""
:param message: exception message
:type message: str
:param path: path after server URL of connection.
:type path: str
:param post_data: POST data (file or dict)
:type debug: bool
:param debug: flag if stack trace should be printed
:param stack_trace: server stack trace
:type stack_trace: str
"""
message = (message.decode('utf-8', 'ignore')
if isinstance(message, bytes) else message)
GenestackException.__init__(self, message, path, post_data, debug, stack_trace)
self.message = message
self.debug = debug
self.stack_trace = stack_trace
self.path = path
self.post_data = post_data
def __str__(self):
if isinstance(self.post_data, dict):
message = 'Got error "%s" at call of method "%s" of "%s"' % (
self.message,
self.post_data.get('method', '<unknown>'),
self.path
)
else:
# upload file
message = 'Got error "%s" at call of "%s"' % (
self.message,
self.path
)
if self.stack_trace:
if self.debug:
message += '\nStacktrace from server is:\n%s' % self.stack_trace
else:
message += '\nEnable debug option to retrieve traceback'
return message
class GenestackResponseError(GenestackBaseException, URLError):
"""
Wrapper for HTTP response errors.
Extends :py:class:`urllib2.URLError` for backward compatibility.
"""
def __init__(self, reason):
self.args = reason,
self.reason = reason
def __str__(self):
return '<urlopen error %s>' % self.reason
class GenestackConnectionFailure(GenestackBaseException, URLError):
"""
Wrapper for server connection failures.
Extends :py:class:`urllib2.URLError` for backward compatibility.
"""
def __init__(self, message):
self.message = "<connection failed %s>" % message
def __str__(self):
return self.message
class GenestackAuthenticationException(GenestackException):
"""
Exception thrown on an authentication error response from server.
"""
pass
class GenestackVersionException(GenestackException):
"""
Exception thrown if server requires a newer version on Python Client.
"""
def __init__(self, current_version, required_version=None):
"""
:param current_version: current version
:type current_version: distutils.version.StrictVersion
:param required_version: minimum required version
:type required_version: distutils.version.StrictVersion
"""
if required_version:
package = MASTER_BRANCH if required_version.prerelease else PYPI_PACKAGE
message = (
'Your Genestack Client version "{current_version}" is too old, '
'at least "{required_version}" is required.\n'
).format(current_version=current_version, required_version=required_version)
else:
package = PYPI_PACKAGE
message = 'Cannot get required version from server.\n'
message += (
'You can update client with the following command:\n'
' pip install {package} --upgrade'
).format(package=package)
super(GenestackVersionException, self).__init__(message)
| [
"future.standard_library.install_aliases"
] | [((209, 243), 'future.standard_library.install_aliases', 'standard_library.install_aliases', ([], {}), '()\n', (241, 243), False, 'from future import standard_library\n')] |
"""
"""
from enum import Enum
import logging
from telegram import (ReplyKeyboardMarkup, ReplyKeyboardRemove)
from telegram.ext import ConversationHandler
from telegramcalendarkeyboard import telegramcalendar
from telegramcalendarkeyboard import telegramoptions
from texts import texts as TEXTS
from texts import keyboards as KEYBOARDS
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.DEBUG)
logger = logging.getLogger(__name__)
class BotOptions(Enum):
ADD_QUERY = 1
DEL_QUERY = 2
DO_QUERY = 3
class ConvStates(Enum):
OPTION = 1
STATION = 2
DATE = 3
NUMERIC_OPTION = 4
class RenfeBotConversations:
class Conversation:
def __init__(self, userid):
self._userid = userid
self.reset()
def reset(self):
self._option = 0
self._origin = None
self._dest = None
self._date = None
self._data = None
def __init__(self, renfebot):
self._conversations = {}
self._RB = renfebot
def _start_conv_for_user(self, userid):
if userid not in self._conversations:
self._conversations[userid] = self.Conversation(userid)
self._conversations[userid].reset()
def handler_start(self, bot, update):
ret_code = 0
userid = update.message.from_user.id
username = update.message.from_user.first_name
if update.message.from_user.last_name is not None:
username += " " + update.message.from_user.last_name
auth = self._RB._DB.get_user_auth(userid, username)
if auth == 0: # Not authorized
logger.debug("NOT AUTHORIZED USER")
update.message.reply_text(TEXTS["NOT_AUTH_REPLY"].format(username=username),
reply_markup=ReplyKeyboardRemove())
self._RB.ask_admin_for_access(bot, userid, username)
ret_code = ConversationHandler.END
else: # Authorized
logger.debug("AUTHORIZED USER")
self._start_conv_for_user(userid)
update.message.reply_text(TEXTS["OPTION_SELECTION"],
reply_markup=ReplyKeyboardMarkup(
KEYBOARDS["MAIN_OPTIONS"]),
one_time_keyboard=True)
ret_code = ConvStates.OPTION
return ret_code
def handler_cancel(self, bot, update):
return ConversationHandler.END
def handler_option(self, bot, update):
userid = update.message.from_user.id
ret_code = 0
if update.message.text == TEXTS["MAIN_OP_DO_QUERY"]:
ret_code = self._h_op_do_query(userid, bot, update)
elif update.message.text == TEXTS["MAIN_OP_ADD_QUERY"]:
ret_code = self._h_op_add_query(userid, bot, update)
elif update.message.text == TEXTS["MAIN_OP_DEL_QUERY"]:
ret_code = self._h_op_del_query(userid, bot, update)
elif update.message.text == TEXTS["MAIN_OP_CHECK_QUERY"]:
ret_code = self._h_op_check_queries(userid, bot, update)
else:
update.message.reply_text(TEXTS["MAIN_OP_UNKNOWN"])
ret_code = ConversationHandler.END
return ret_code
def _h_op_do_query(self, userid, bot, update):
self._conversations[userid]._option = BotOptions.DO_QUERY
update.message.reply_text(TEXTS["DO_ONETIME_QUERY"])
update.message.reply_text(TEXTS["SELECT_ORIGIN_STATION"],
reply_markup=ReplyKeyboardMarkup(KEYBOARDS["STATIONS"],
one_time_keyboard=True))
return ConvStates.STATION
def _h_op_add_query(self, userid, bot, update):
self._conversations[userid]._option = BotOptions.ADD_QUERY
update.message.reply_text(TEXTS["ADD_PERIODIC_QUERY"])
update.message.reply_text(TEXTS["SELECT_ORIGIN_STATION"],
reply_markup=ReplyKeyboardMarkup(KEYBOARDS["STATIONS"],
one_time_keyboard=True))
return ConvStates.STATION
def _h_op_del_query(self, userid, bot, update):
self._conversations[userid]._option = BotOptions.DEL_QUERY
user_queries = self._RB._DB.get_user_queries(userid)
ret_code = 0
if len(user_queries) == 0:
update.message.reply_text(TEXTS["NO_QUERIES_FOR_USERID"])
ret_code = ConversationHandler.END
else:
options = []
for q in user_queries:
options.append(TEXTS["QUERY_IN_DB"].
format(origin=q["origin"],
destination=q["destination"],
date=self._RB._DB.timestamp_to_date(q["date"])))
bot.send_message(chat_id=userid,
text=TEXTS["SELECT_TRIP_TO_DETELE"],
reply_markup=telegramoptions.create_options_keyboard(options,TEXTS["CANCEL"]))
self._conversations[userid]._data = user_queries
ret_code = ConvStates.NUMERIC_OPTION
return ret_code
def _h_op_check_queries(self, userid, bot, update):
user_queries = self._RB._DB.get_user_queries(userid)
if len(user_queries) == 0:
update.message.reply_text(TEXTS["NO_QUERIES_FOR_USERID"])
else:
update.message.reply_text(TEXTS["QUERIES_FOR_USERID"])
for q in user_queries:
update.message.reply_text(TEXTS["QUERY_IN_DB"].
format(origin=q["origin"],
destination=q["destination"],
date=self._RB._DB.timestamp_to_date(q["date"])))
update.message.reply_text(TEXTS["END_MESSAGE"],reply_markup=ReplyKeyboardRemove())
return ConversationHandler.END
def handler_numeric_option(self, bot, update):
logger.debug("Processing numeric opion")
userid = update.callback_query.from_user.id
user_queries = self._conversations[userid]._data
selected, query_index = telegramoptions.process_option_selection(bot, update)
if not selected:
logger.debug("Nothing selected")
bot.send_message(chat_id= userid, text=TEXTS["DB_QUERY_NOT_REMOVED"],reply_markup=ReplyKeyboardRemove())
return ConversationHandler.END
else:
logger.debug("Deleting query with index "+str(query_index))
if len(user_queries) > query_index:
query = user_queries[query_index]
if self._RB._DB.remove_periodic_query(query["userid"], query["origin"],
query["destination"], query["date"]):
bot.send_message(chat_id=userid,text=TEXTS["DB_QUERY_REMOVED"],reply_markup=ReplyKeyboardRemove())
else:
bot.send_message(chat_id=userid,text=TEXTS["DB_QUERY_NOT_PRESENT"],reply_markup=ReplyKeyboardRemove())
return ConversationHandler.END
def handler_date(self, bot, update):
logger.debug("Processing date")
selected, date = telegramcalendar.process_calendar_selection(bot, update)
if not selected:
logger.debug("Not selected")
return ConvStates.DATE
else:
logger.debug("selected")
userid = update.callback_query.from_user.id
conv = self._conversations[userid]
conv._date = date.strftime("%d/%m/%Y")
logger.debug("Date is " + conv._date)
bot.send_message(chat_id=userid, text=TEXTS["SELECTED_DATA"].
format(origin=conv._origin, destination=conv._dest, date=conv._date))
if conv._option == BotOptions.ADD_QUERY:
res = self._RB._DB.add_periodic_query(
userid, conv._origin, conv._dest, conv._date)
bot.send_message(chat_id=userid,text=res[1])
elif conv._option == BotOptions.DO_QUERY:
bot.send_message(chat_id=userid,text=TEXTS["WAIT_FOR_TRAINS"])
res = self._RB._RF.check_trip(conv._origin, conv._dest, conv._date)
self._RB.send_query_results_to_user(bot, userid, res,
conv._origin, conv._dest, conv._date)
else:
logger.error("Problem, no other option should lead HERE!")
return ConversationHandler.END
def handler_station(self, bot, update):
logger.debug("Setting Station")
userid = update.message.from_user.id
if self._conversations[userid]._origin is None:
logger.debug("Origin Station")
self._conversations[userid]._origin = update.message.text.upper()
update.message.reply_text(TEXTS["SELECT_DESTINATION_STATION"],
reply_markup=ReplyKeyboardMarkup(KEYBOARDS["STATIONS"], one_time_keyboard=True))
return ConvStates.STATION
else:
logger.debug("Destination Station")
self._conversations[userid]._dest = update.message.text.upper()
bot.send_message(chat_id=userid,
text=TEXTS["SELECTED_TRIP"].format(
origin=self._conversations[userid]._origin,
destination=self._conversations[userid]._dest
),
reply_markup=ReplyKeyboardRemove())
bot.send_message(chat_id=userid, text=TEXTS["SELECT_TRIP_DATE"],
reply_markup=telegramcalendar.create_calendar())
return ConvStates.DATE | [
"telegramcalendarkeyboard.telegramcalendar.create_calendar",
"logging.basicConfig",
"telegram.ReplyKeyboardRemove",
"telegramcalendarkeyboard.telegramoptions.create_options_keyboard",
"telegram.ReplyKeyboardMarkup",
"telegramcalendarkeyboard.telegramoptions.process_option_selection",
"telegramcalendarkeyboard.telegramcalendar.process_calendar_selection",
"logging.getLogger"
] | [((341, 454), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s - %(name)s - %(levelname)s - %(message)s"""', 'level': 'logging.DEBUG'}), "(format=\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.DEBUG\n )\n", (360, 454), False, 'import logging\n'), ((475, 502), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (492, 502), False, 'import logging\n'), ((6233, 6286), 'telegramcalendarkeyboard.telegramoptions.process_option_selection', 'telegramoptions.process_option_selection', (['bot', 'update'], {}), '(bot, update)\n', (6273, 6286), False, 'from telegramcalendarkeyboard import telegramoptions\n'), ((7274, 7330), 'telegramcalendarkeyboard.telegramcalendar.process_calendar_selection', 'telegramcalendar.process_calendar_selection', (['bot', 'update'], {}), '(bot, update)\n', (7317, 7330), False, 'from telegramcalendarkeyboard import telegramcalendar\n'), ((3618, 3684), 'telegram.ReplyKeyboardMarkup', 'ReplyKeyboardMarkup', (["KEYBOARDS['STATIONS']"], {'one_time_keyboard': '(True)'}), "(KEYBOARDS['STATIONS'], one_time_keyboard=True)\n", (3637, 3684), False, 'from telegram import ReplyKeyboardMarkup, ReplyKeyboardRemove\n'), ((4054, 4120), 'telegram.ReplyKeyboardMarkup', 'ReplyKeyboardMarkup', (["KEYBOARDS['STATIONS']"], {'one_time_keyboard': '(True)'}), "(KEYBOARDS['STATIONS'], one_time_keyboard=True)\n", (4073, 4120), False, 'from telegram import ReplyKeyboardMarkup, ReplyKeyboardRemove\n'), ((5929, 5950), 'telegram.ReplyKeyboardRemove', 'ReplyKeyboardRemove', ([], {}), '()\n', (5948, 5950), False, 'from telegram import ReplyKeyboardMarkup, ReplyKeyboardRemove\n'), ((1877, 1898), 'telegram.ReplyKeyboardRemove', 'ReplyKeyboardRemove', ([], {}), '()\n', (1896, 1898), False, 'from telegram import ReplyKeyboardMarkup, ReplyKeyboardRemove\n'), ((2246, 2292), 'telegram.ReplyKeyboardMarkup', 'ReplyKeyboardMarkup', (["KEYBOARDS['MAIN_OPTIONS']"], {}), "(KEYBOARDS['MAIN_OPTIONS'])\n", (2265, 2292), False, 'from telegram import ReplyKeyboardMarkup, ReplyKeyboardRemove\n'), ((5052, 5117), 'telegramcalendarkeyboard.telegramoptions.create_options_keyboard', 'telegramoptions.create_options_keyboard', (['options', "TEXTS['CANCEL']"], {}), "(options, TEXTS['CANCEL'])\n", (5091, 5117), False, 'from telegramcalendarkeyboard import telegramoptions\n'), ((6451, 6472), 'telegram.ReplyKeyboardRemove', 'ReplyKeyboardRemove', ([], {}), '()\n', (6470, 6472), False, 'from telegram import ReplyKeyboardMarkup, ReplyKeyboardRemove\n'), ((9037, 9103), 'telegram.ReplyKeyboardMarkup', 'ReplyKeyboardMarkup', (["KEYBOARDS['STATIONS']"], {'one_time_keyboard': '(True)'}), "(KEYBOARDS['STATIONS'], one_time_keyboard=True)\n", (9056, 9103), False, 'from telegram import ReplyKeyboardMarkup, ReplyKeyboardRemove\n'), ((9621, 9642), 'telegram.ReplyKeyboardRemove', 'ReplyKeyboardRemove', ([], {}), '()\n', (9640, 9642), False, 'from telegram import ReplyKeyboardMarkup, ReplyKeyboardRemove\n'), ((9763, 9797), 'telegramcalendarkeyboard.telegramcalendar.create_calendar', 'telegramcalendar.create_calendar', ([], {}), '()\n', (9795, 9797), False, 'from telegramcalendarkeyboard import telegramcalendar\n'), ((6960, 6981), 'telegram.ReplyKeyboardRemove', 'ReplyKeyboardRemove', ([], {}), '()\n', (6979, 6981), False, 'from telegram import ReplyKeyboardMarkup, ReplyKeyboardRemove\n'), ((7105, 7126), 'telegram.ReplyKeyboardRemove', 'ReplyKeyboardRemove', ([], {}), '()\n', (7124, 7126), False, 'from telegram import ReplyKeyboardMarkup, ReplyKeyboardRemove\n')] |
import torch
import logging
from quadboost import QuadBoostMHCR
from quadboost.label_encoder import LabelEncoder, OneHotEncoder, AllPairsEncoder
from quadboost.weak_learner import *
from quadboost.callbacks import *
from quadboost.datasets import MNISTDataset
from quadboost.utils import parse, timed
from quadboost.data_preprocessing.data_augmentation import extend_mnist
from quadboost.weak_learner.random_convolution import plot_images
@timed
@parse
def main(m=60_000, val=10_000, da=0, dataset='mnist', center=True, reduce=True, encodings='onehot', wl='rccridge', max_round=1000, patience=1000, resume=0, n_jobs=1, max_n_leaves=4, n_filters=10, fs=11, fsh=0, locality=4, init_filters='from_bank', bank_ratio=.05, fn='c', seed=42, nl='maxpool', maxpool=3, device='cpu', degrees=.0, scale=.0, shear=.0, margin=2, nt=1):
if seed:
torch.manual_seed(seed)
np.random.seed(seed)
### Data loading
mnist = MNISTDataset.load(dataset+'.pkl')
(Xtr, Ytr), (X_val, Y_val), (Xts, Yts) = mnist.get_train_valid_test(valid=val, center=False, reduce=False, shuffle=seed)
Xtr, Ytr = Xtr[:m], Ytr[:m]
if da:
logging.info(f'Adding {da} examples with data augmentation.')
Xtr, Ytr = extend_mnist(Xtr, Ytr, N=da, degrees=degrees, scale=(1-scale, 1/(1-scale)), shear=shear)
mnist.fit_scaler(Xtr, center=center, reduce=reduce)
Xtr, Ytr = mnist.transform_data(Xtr.reshape(Xtr.shape[0],-1), Ytr)
X_val, Y_val = mnist.transform_data(X_val.reshape(X_val.shape[0],-1), Y_val)
Xts, Yts = mnist.transform_data(Xts.reshape(Xts.shape[0],-1), Yts)
logging.info(f'Loaded dataset: {dataset} (center: {center}, reduce: {reduce})')
logging.info(f'Number of examples - train: {len(Xtr)}, valid: {len(X_val)}, test: {len(Xts)}')
### Choice of encoder
if encodings == 'onehot':
encoder = OneHotEncoder(Ytr)
elif encodings == 'allpairs':
encoder = AllPairsEncoder(Ytr)
else:
encoder = LabelEncoder.load_encodings(encodings)
if all(label.isdigit() for label in encoder.labels_encoding):
encoder = LabelEncoder({int(label):encoding for label, encoding in encoder.labels_encoding.items()})
logging.info(f'Encoding: {encodings}')
filename = f'd={dataset}-e={encodings}-wl={wl}'
### Choice of weak learner
kwargs = {}
if wl in ['ds', 'decision-stump']:
weak_learner = MulticlassDecisionStump()
kwargs = dict(zip(('sorted_X', 'sorted_X_idx'), weak_learner.sort_data(Xtr)))
kwargs['n_jobs'] = n_jobs
elif wl in ['dt', 'decision-tree']:
weak_learner = MulticlassDecisionTree(max_n_leaves=max_n_leaves)
kwargs = dict(zip(('sorted_X', 'sorted_X_idx'), weak_learner.sort_data(Xtr)))
kwargs['n_jobs'] = n_jobs
filename += f'{max_n_leaves}'
elif wl == 'ridge':
weak_learner = WLThresholdedRidge(threshold=.5)
elif wl.startswith('rcc') or wl.startswith('rlc'):
if device.startswith('cuda'):
Xtr = RandomConvolution.format_data(Xtr).to(device=device)
X_val = RandomConvolution.format_data(X_val).to(device=device)
Xts = RandomConvolution.format_data(Xts).to(device=device)
filename += f'-nf={n_filters}-fs={fs}'
if fsh: filename += f'_to_{fsh}'
if wl.startswith('rlc'): filename += f'-loc={locality}'
activation = None
if 'maxpool' in nl:
filename += f'-maxpool{maxpool}'
if 'relu' in nl:
filename += f'-relu'
activation = torch.nn.functional.relu
elif 'sigmoid' in nl:
filename += f'-sigmoid'
activation = torch.sigmoid
filename += f'-{init_filters}'
if degrees:
filename += f'-deg={degrees}'
if scale:
filename += f'-scale={scale}'
scale = (1-scale, 1/(1-scale))
else:
scale = None
if shear:
filename += f'-shear={shear}'
else:
shear = None
filter_bank = None
if init_filters == 'from_bank':
if 0 < bank_ratio < 1:
bank_size = int(m*bank_ratio)
filter_bank = Xtr[:bank_size]
Xtr, Ytr = Xtr[bank_size:], Ytr[bank_size:]
logging.info(f'Bank size: {bank_size}')
else:
raise ValueError(f'Invalid bank_size {bank_size}.')
filename += f'_br={bank_ratio}'
elif init_filters == 'from_data':
filter_bank = Xtr
if fn:
filename += f'_{fn}'
f_proc = []
if 'c' in fn:
f_proc.append(center_weight)
if 'n' in fn:
f_proc.append(normalize_weight)
if 'r' in fn:
f_proc.append(reduce_weight)
w_gen = WeightFromBankGenerator(filter_bank=filter_bank,
filters_shape=(fs, fs),
filters_shape_high=(fsh, fsh) if fsh else None,
filter_processing=f_proc,
margin=margin,
degrees=degrees,
scale=scale,
shear=shear,
)
if wl.startswith('rcc'):
filters = Filters(n_filters=n_filters,
weights_generator=w_gen,
activation=activation,
maxpool_shape=(nt, maxpool, maxpool))
elif wl.startswith('rlc'):
filters = LocalFilters(n_filters=n_filters,
weights_generator=w_gen,
locality=locality,
maxpool_shape=(nt, maxpool, maxpool))
if nt > 1:
filename += f'-nt={nt}'
if wl.endswith('ridge'):
weak_learner = RandomConvolution(filters=filters, weak_learner=Ridge)
if wl.endswith('ds'):
weak_learner = RandomConvolution(filters=filters, weak_learner=MulticlassDecisionStump)
kwargs['n_jobs'] = n_jobs
else:
raise ValueError(f'Invalid weak learner name: "{wl}".')
logging.info(f'Weak learner: {type(weak_learner).__name__}')
### Callbacks
ckpt = ModelCheckpoint(filename=filename+'-{round}.ckpt', dirname='./results', save_last=True)
logger = CSVLogger(filename=filename+'-log.csv', dirname='./results/log')
zero_risk = BreakOnZeroRiskCallback()
callbacks = [ckpt,
logger,
zero_risk,
]
logging.info(f'Filename: {filename}')
### Fitting the model
if not resume:
logging.info(f'Beginning fit with max_round_number={max_round} and patience={patience}.')
qb = QuadBoostMHCR(weak_learner, encoder=encoder)
qb.fit(Xtr, Ytr, max_round_number=max_round, patience=patience,
X_val=X_val, Y_val=Y_val,
callbacks=callbacks,
**kwargs)
### Or resume fitting a model
else:
logging.info(f'Resuming fit with max_round_number={max_round}.')
qb = QuadBoostMHCR.load(f'results/{filename}-{resume}.ckpt')
qb.resume_fit(Xtr, Ytr,
X_val=X_val, Y_val=Y_val,
max_round_number=max_round,
**kwargs)
print(f'Best round recap:\nBoosting round {qb.best_round.step_number+1:03d} | Train acc: {qb.best_round.train_acc:.3%} | Valid acc: {qb.best_round.valid_acc:.3%} | Risk: {qb.best_round.risk:.3f}')
print(f'Test accuracy on best model: {qb.evaluate(Xts, Yts):.3%}')
print(f'Test accuracy on last model: {qb.evaluate(Xts, Yts, mode="last"):.3%}')
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO, style='{', format='[{levelname}] {message}')
main()
| [
"quadboost.label_encoder.LabelEncoder.load_encodings",
"quadboost.label_encoder.AllPairsEncoder",
"logging.basicConfig",
"quadboost.datasets.MNISTDataset.load",
"quadboost.QuadBoostMHCR.load",
"torch.manual_seed",
"quadboost.QuadBoostMHCR",
"logging.info",
"quadboost.data_preprocessing.data_augmentation.extend_mnist",
"quadboost.label_encoder.OneHotEncoder"
] | [((933, 968), 'quadboost.datasets.MNISTDataset.load', 'MNISTDataset.load', (["(dataset + '.pkl')"], {}), "(dataset + '.pkl')\n", (950, 968), False, 'from quadboost.datasets import MNISTDataset\n'), ((1598, 1677), 'logging.info', 'logging.info', (['f"""Loaded dataset: {dataset} (center: {center}, reduce: {reduce})"""'], {}), "(f'Loaded dataset: {dataset} (center: {center}, reduce: {reduce})')\n", (1610, 1677), False, 'import logging\n'), ((2198, 2236), 'logging.info', 'logging.info', (['f"""Encoding: {encodings}"""'], {}), "(f'Encoding: {encodings}')\n", (2210, 2236), False, 'import logging\n'), ((6698, 6735), 'logging.info', 'logging.info', (['f"""Filename: {filename}"""'], {}), "(f'Filename: {filename}')\n", (6710, 6735), False, 'import logging\n'), ((7849, 7938), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'style': '"""{"""', 'format': '"""[{levelname}] {message}"""'}), "(level=logging.INFO, style='{', format=\n '[{levelname}] {message}')\n", (7868, 7938), False, 'import logging\n'), ((846, 869), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (863, 869), False, 'import torch\n'), ((1143, 1204), 'logging.info', 'logging.info', (['f"""Adding {da} examples with data augmentation."""'], {}), "(f'Adding {da} examples with data augmentation.')\n", (1155, 1204), False, 'import logging\n'), ((1224, 1322), 'quadboost.data_preprocessing.data_augmentation.extend_mnist', 'extend_mnist', (['Xtr', 'Ytr'], {'N': 'da', 'degrees': 'degrees', 'scale': '(1 - scale, 1 / (1 - scale))', 'shear': 'shear'}), '(Xtr, Ytr, N=da, degrees=degrees, scale=(1 - scale, 1 / (1 -\n scale)), shear=shear)\n', (1236, 1322), False, 'from quadboost.data_preprocessing.data_augmentation import extend_mnist\n'), ((1852, 1870), 'quadboost.label_encoder.OneHotEncoder', 'OneHotEncoder', (['Ytr'], {}), '(Ytr)\n', (1865, 1870), False, 'from quadboost.label_encoder import LabelEncoder, OneHotEncoder, AllPairsEncoder\n'), ((6790, 6889), 'logging.info', 'logging.info', (['f"""Beginning fit with max_round_number={max_round} and patience={patience}."""'], {}), "(\n f'Beginning fit with max_round_number={max_round} and patience={patience}.'\n )\n", (6802, 6889), False, 'import logging\n'), ((6893, 6937), 'quadboost.QuadBoostMHCR', 'QuadBoostMHCR', (['weak_learner'], {'encoder': 'encoder'}), '(weak_learner, encoder=encoder)\n', (6906, 6937), False, 'from quadboost import QuadBoostMHCR\n'), ((7164, 7228), 'logging.info', 'logging.info', (['f"""Resuming fit with max_round_number={max_round}."""'], {}), "(f'Resuming fit with max_round_number={max_round}.')\n", (7176, 7228), False, 'import logging\n'), ((7242, 7297), 'quadboost.QuadBoostMHCR.load', 'QuadBoostMHCR.load', (['f"""results/{filename}-{resume}.ckpt"""'], {}), "(f'results/{filename}-{resume}.ckpt')\n", (7260, 7297), False, 'from quadboost import QuadBoostMHCR\n'), ((1923, 1943), 'quadboost.label_encoder.AllPairsEncoder', 'AllPairsEncoder', (['Ytr'], {}), '(Ytr)\n', (1938, 1943), False, 'from quadboost.label_encoder import LabelEncoder, OneHotEncoder, AllPairsEncoder\n'), ((1972, 2010), 'quadboost.label_encoder.LabelEncoder.load_encodings', 'LabelEncoder.load_encodings', (['encodings'], {}), '(encodings)\n', (1999, 2010), False, 'from quadboost.label_encoder import LabelEncoder, OneHotEncoder, AllPairsEncoder\n'), ((4290, 4329), 'logging.info', 'logging.info', (['f"""Bank size: {bank_size}"""'], {}), "(f'Bank size: {bank_size}')\n", (4302, 4329), False, 'import logging\n')] |
from concurrent import futures
import grpc
import relationExtractService_pb2
import relationExtractService_pb2_grpc
import tools
class relationExtractService(relationExtractService_pb2_grpc.relationExtractServiceServicer):
def ExtractTriple(self,request,context):
sentence = request.sentence
triples = tools.extract_items(sentence)
response = relationExtractService_pb2.relationExtractResponse()
for triple in triples:
data = response.triples.add()
data.sub=triple[0]
data.pred=triple[1]
data.obj=triple[2]
return response
def serve():
server = grpc.server(futures.ThreadPoolExecutor(max_workers=8))
relationExtractService_pb2_grpc.add_relationExtractServiceServicer_to_server(relationExtractService(),server)
server.add_insecure_port("[::]:4232")
server.start()
server.wait_for_termination()
if __name__ == '__main__':
serve()
| [
"concurrent.futures.ThreadPoolExecutor",
"relationExtractService_pb2.relationExtractResponse",
"tools.extract_items"
] | [((322, 351), 'tools.extract_items', 'tools.extract_items', (['sentence'], {}), '(sentence)\n', (341, 351), False, 'import tools\n'), ((371, 423), 'relationExtractService_pb2.relationExtractResponse', 'relationExtractService_pb2.relationExtractResponse', ([], {}), '()\n', (421, 423), False, 'import relationExtractService_pb2\n'), ((654, 695), 'concurrent.futures.ThreadPoolExecutor', 'futures.ThreadPoolExecutor', ([], {'max_workers': '(8)'}), '(max_workers=8)\n', (680, 695), False, 'from concurrent import futures\n')] |
import itertools
from collections import OrderedDict
import numpy as np
import cv2
import torch
import torch.nn as nn
from torch.nn import Sequential as Seq, Linear, ReLU
import torch.nn.functional as F
from torch_geometric.data import Data, Batch
from . import base_networks
from . import graph_construction as gc
from . import constants
from .util import utilities as util_
class DeleteNet(nn.Module):
def __init__(self, config):
super(DeleteNet, self).__init__()
self.node_encoder = base_networks.NodeEncoder(config['node_encoder_config'])
self.bg_fusion_module = base_networks.LinearEncoder(config['bg_fusion_module_config'])
def forward(self, graph):
"""DeleteNet forward pass.
Note: Assume that the graph contains the background node as the first node.
Args:
graph: a torch_geometric.Data instance with attributes:
- rgb: a [N, 256, h, w] torch.FloatTensor of ResnNet50+FPN rgb image features
- depth: a [N, 3, h, w] torch.FloatTensor. XYZ image
- mask: a [N, h, w] torch.FloatTensor of values in {0, 1}
- orig_masks: a [N, H, W] torch.FloatTensor of values in {0, 1}. Original image size.
- crop_indices: a [N, 4] torch.LongTensor. xmin, ymin, xmax, ymax.
Returns:
a [N] torch.FloatTensor of delete score logits. The first logit (background) is always low,
so BG is never deleted.
"""
encodings = self.node_encoder(graph) # dictionary
concat_features = torch.cat([encodings[key] for key in encodings], dim=1) # [N, \sum_i d_i]
bg_feature = concat_features[0:1] # [1, \sum_i d_i]
node_features = concat_features[1:] # [N-1, \sum_i d_i]
node_minus_bg_features = node_features - bg_feature # [N-1, \sum_i d_i]
node_delete_logits = self.bg_fusion_module(node_minus_bg_features) # [N-1, 1]
delete_logits = torch.cat([torch.ones((1, 1), device=constants.DEVICE) * -100,
node_delete_logits], dim=0)
return delete_logits[:,0]
class DeleteNetWrapper(base_networks.NetworkWrapper):
def setup(self):
if 'deletenet_model' in self.config:
self.model = self.config['deletenet_model']
else:
self.model = DeleteNet(self.config)
self.model.to(self.device)
def get_new_potential_masks(self, masks, fg_mask):
"""Compute new potential masks.
See if any connected components of fg_mask _setminus_ mask can be
considered as a new mask. Concatenate them to masks.
Args:
masks: a [N, H, W] torch.Tensor with values in {0, 1}.
fg_mask: a [H, W] torch.Tensor with values in {0, 1}.
Returns:
a [N + delta, H, W] np.ndarray of new masks. delta = #new_masks.
"""
occupied_mask = masks.sum(dim=0) > 0.5
fg_mask = fg_mask.cpu().numpy().astype(np.uint8)
fg_mask[occupied_mask.cpu().numpy()] = 0
fg_mask = cv2.erode(fg_mask, np.ones((3,3)), iterations=1)
nc, components = cv2.connectedComponents(fg_mask, connectivity=8)
components = torch.from_numpy(components).float().to(constants.DEVICE)
for j in range(1, nc):
mask = components == j
component_size = mask.sum().float()
if component_size > self.config['min_pixels_thresh']:
masks = torch.cat([masks, mask[None].float()], dim=0)
return masks
def delete_scores(self, graph):
"""Compute delete scores for each node in the graph.
Args:
graph: a torch_geometric.Data instance
Returns:
a [N] torch.Tensor with values in [0, 1]
"""
return torch.sigmoid(self.model(graph))
| [
"torch.ones",
"numpy.ones",
"torch.cat",
"cv2.connectedComponents",
"torch.from_numpy"
] | [((1587, 1642), 'torch.cat', 'torch.cat', (['[encodings[key] for key in encodings]'], {'dim': '(1)'}), '([encodings[key] for key in encodings], dim=1)\n', (1596, 1642), False, 'import torch\n'), ((3169, 3217), 'cv2.connectedComponents', 'cv2.connectedComponents', (['fg_mask'], {'connectivity': '(8)'}), '(fg_mask, connectivity=8)\n', (3192, 3217), False, 'import cv2\n'), ((3105, 3120), 'numpy.ones', 'np.ones', (['(3, 3)'], {}), '((3, 3))\n', (3112, 3120), True, 'import numpy as np\n'), ((1993, 2036), 'torch.ones', 'torch.ones', (['(1, 1)'], {'device': 'constants.DEVICE'}), '((1, 1), device=constants.DEVICE)\n', (2003, 2036), False, 'import torch\n'), ((3239, 3267), 'torch.from_numpy', 'torch.from_numpy', (['components'], {}), '(components)\n', (3255, 3267), False, 'import torch\n')] |
from resources import get_subreddits, update_subreddits
"""
subreddits:
{
'<subreddit name>': {
'phrases': [
'<phrases>'
],
'flairs': [
'<flairs>'
],
'include': <boolean>,
'unflaired': <boolean>
},
...
}
"""
subreddits = get_subreddits()
def list():
return subreddits.keys()
def add(name):
subreddits[name] = {
'phrases': [],
'flairs': [],
'include': False,
'unflaired': True
}
update_subreddits(subreddits)
def remove(name):
del subreddits[name]
update_subreddits(subreddits)
def clear():
subreddits.clear()
update_subreddits(subreddits) | [
"resources.get_subreddits",
"resources.update_subreddits"
] | [((307, 323), 'resources.get_subreddits', 'get_subreddits', ([], {}), '()\n', (321, 323), False, 'from resources import get_subreddits, update_subreddits\n'), ((516, 545), 'resources.update_subreddits', 'update_subreddits', (['subreddits'], {}), '(subreddits)\n', (533, 545), False, 'from resources import get_subreddits, update_subreddits\n'), ((595, 624), 'resources.update_subreddits', 'update_subreddits', (['subreddits'], {}), '(subreddits)\n', (612, 624), False, 'from resources import get_subreddits, update_subreddits\n'), ((667, 696), 'resources.update_subreddits', 'update_subreddits', (['subreddits'], {}), '(subreddits)\n', (684, 696), False, 'from resources import get_subreddits, update_subreddits\n')] |
# -*- coding: utf-8 -*-
"""Console script to generate goals for real_robots"""
import click
import numpy as np
from real_robots.envs import Goal
import gym
import math
basePosition = None
slow = False
render = False
def pairwise_distances(a):
b = a.reshape(a.shape[0], 1, a.shape[1])
return np.sqrt(np.einsum('ijk, ijk->ij', a-b, a-b))
def runEnv(env, max_t=1000):
reward = 0
done = False
render = slow
action = {'joint_command': np.zeros(9), 'render': render}
objects = env.robot.used_objects[1:]
positions = np.vstack([env.get_obj_pose(obj) for obj in objects])
still = False
stable = 0
for t in range(max_t):
old_positions = positions
observation, reward, done, _ = env.step(action)
positions = np.vstack([env.get_obj_pose(obj) for obj in objects])
maxPosDiff = 0
maxOrientDiff = 0
for i, obj in enumerate(objects):
posDiff = np.linalg.norm(old_positions[i][:3] - positions[i][:3])
q1 = old_positions[i][3:]
q2 = positions[i][3:]
orientDiff = min(np.linalg.norm(q1 - q2), np.linalg.norm(q1+q2))
maxPosDiff = max(maxPosDiff, posDiff)
maxOrientDiff = max(maxOrientDiff, orientDiff)
if maxPosDiff < 0.0001 and maxOrientDiff < 0.001 and t > 10:
stable += 1
else:
stable = 0
action['render'] = slow
if stable > 19:
action['render'] = True
if stable > 20:
still = True
break
pos_dict = {}
for obj in objects:
pos_dict[obj] = env.get_obj_pose(obj)
print("Exiting environment after {} timesteps..".format(t))
if not still:
print("Failed because maxPosDiff:{:.6f},"
"maxOrientDiff:{:.6f}".format(maxPosDiff, maxOrientDiff))
return observation['retina'], pos_dict, not still, t, observation['mask']
class Position:
def __init__(self, start_state=None, fixed_state=None, retina=None, mask=None):
self.start_state = start_state
self.fixed_state = fixed_state
self.retina = retina
self.mask = mask
def generatePosition(env, obj, fixed=False, tablePlane=None):
if tablePlane is None:
min_x = -.25
max_x = .25
elif tablePlane:
min_x = -.25
max_x = .05
else:
min_x = .10
max_x = .25
min_y = -.45
max_y = .45
x = np.random.rand()*(max_x-min_x)+min_x
y = np.random.rand()*(max_y-min_y)+min_y
if x <= 0.05:
z = 0.40
else:
z = 0.50
if fixed:
orientation = basePosition[obj][3:]
else:
orientation = (np.random.rand(3)*math.pi*2).tolist()
orientation = env._p.getQuaternionFromEuler(orientation)
pose = [x, y, z] + np.array(orientation).tolist()
return pose
def generateRealPosition(env, startPositions):
env.reset()
runEnv(env)
# Generate Images
for obj in startPositions:
pos = startPositions[obj]
env.robot.object_bodies[obj].reset_pose(pos[:3], pos[3:])
actual_image, actual_position, failed, it, mask = runEnv(env)
return actual_image, actual_position, failed, it, mask
def checkMinSeparation(state):
positions = np.vstack([state[obj][:3] for obj in state])
if len(positions) > 1:
distances = pairwise_distances(positions)
clearance = distances[distances > 0].min()
else:
clearance = np.inf
return clearance
def drawPosition(env, fixedOrientation=False, fixedObjects=[],
fixedPositions=None, minSeparation=0, objOnTable=None):
failed = True
while failed:
# skip 1st object, i.e the table
objects = env.robot.used_objects[1:]
position = Position()
startPositions = {}
for obj in fixedObjects:
startPositions[obj] = fixedPositions[obj]
for obj in np.random.permutation(objects):
if obj in fixedObjects:
continue
while True:
table = None
if objOnTable is not None:
if obj in objOnTable:
table = objOnTable[obj]
startPose = generatePosition(env, obj,
fixedOrientation,
tablePlane=table)
startPositions[obj] = startPose
if len(startPositions) == 1:
break
clearance = checkMinSeparation(startPositions)
if clearance >= minSeparation:
break
print("Failed minimum separation ({}), draw again {}.."
.format(clearance, obj))
(a, p, f, it, m) = generateRealPosition(env, startPositions)
actual_image = a
actual_mask = m
actual_position = p
failed = f
if failed:
print("Failed image generation...")
continue
clearance = checkMinSeparation(actual_position)
if clearance < minSeparation:
failed = True
print("Failed minimum separation ({}) after real generation, "
"draw again everything..".format(clearance))
continue
if fixedOrientation:
for obj in objects:
q1 = startPositions[obj][3:]
q2 = actual_position[obj][3:]
orientDiff = min(np.linalg.norm(q1 - q2),
np.linalg.norm(q1+q2))
# TODO CHECK This - we had to rise it many times
failed = failed or orientDiff > 0.041
if failed:
print("{} changed orientation by {}"
.format(obj, orientDiff))
break
else:
print("{} kept orientation.".format(obj))
if failed:
print("Failed to keep orientation...")
continue
for obj in fixedObjects:
posDiff = np.linalg.norm(startPositions[obj][:3] -
actual_position[obj][:3])
q1 = startPositions[obj][3:]
q2 = actual_position[obj][3:]
orientDiff = min(np.linalg.norm(q1 - q2), np.linalg.norm(q1+q2))
failed = failed or posDiff > 0.002 or orientDiff > 0.041
if failed:
print("{} changed pos by {} and orientation by {}"
.format(obj, posDiff, orientDiff))
print(startPositions[obj])
print(actual_position[obj])
break
if failed:
print("Failed to keep objects fixed...")
continue
position.start_state = startPositions
position.fixed_state = actual_position
position.retina = actual_image
position.mask = actual_mask
return position
def checkRepeatability(env, goals):
maxDiffPos = 0
maxDiffOr = 0
for goal in goals:
_, pos, failed, _, _ = generateRealPosition(env, goal.initial_state)
objects = [o for o in goal.initial_state]
p0 = np.vstack([goal.initial_state[o] for o in objects])
p1 = np.vstack([pos[o] for o in objects])
diffPos = np.linalg.norm(p1[:, :3]-p0[:, :3])
diffOr = min(np.linalg.norm(p1[:, 3:]-p0[:, 3:]),
np.linalg.norm(p1[:, 3:]+p0[:, 3:]))
maxDiffPos = max(maxDiffPos, diffPos)
maxDiffOr = max(maxDiffPos, diffOr)
print("Replicated diffPos:{} diffOr:{}".format(diffPos, diffOr))
if failed:
print("*****************FAILED************!!!!")
return 1000000
return maxDiffPos, maxDiffOr
def isOnShelf(obj, state):
z = state[obj][2]
if obj == 'cube' and z > 0.55 - 0.15:
return True
if obj == 'orange' and z > 0.55 - 0.15:
return True
if obj == 'tomato' and z > 0.55 - 0.15:
return True
if obj == 'mustard' and z > 0.545 - 0.15:
return True
return False
def isOnTable(obj, state):
z = state[obj][2]
if obj == 'cube' and z < 0.48 - 0.15:
return True
if obj == 'orange' and z < 0.48 - 0.15:
return True
if obj == 'tomato' and z < 0.49 - 0.15:
return True
if obj == 'mustard' and z < 0.48 - 0.15:
return True
return False
def generateGoalREAL2020(env, n_obj, goal_type, on_shelf=False, min_start_goal_dist=0.1, min_objects_dist=0.05, max_objects_dist=2):
print("Generating GOAL..")
objOnTable = None
if not on_shelf:
objects = env.robot.used_objects[1:]
objOnTable = {}
for obj in objects:
objOnTable[obj] = True
if goal_type == '3D':
fixedOrientation = False
else:
fixedOrientation = True
found = False
while not(found):
initial = drawPosition(env, fixedOrientation=fixedOrientation, objOnTable=objOnTable, minSeparation=min_objects_dist)
found = True
# checks whether at least two objects are close together as specified in max_objects_dist
if n_obj == 1:
at_least_two_near_objects = True
else:
at_least_two_near_objects = False
for obj1 in initial.fixed_state.keys():
for obj2 in initial.fixed_state.keys():
if obj1 == obj2:
continue
if np.linalg.norm(initial.fixed_state[obj1][:3]-initial.fixed_state[obj2][:3]) <= max_objects_dist or goal_type != '3D' or len(initial.fixed_state.keys()) == 1:
at_least_two_near_objects = True
break
if at_least_two_near_objects:
break
# checks if at least one object is on the table
at_least_one_on_shelf = False
for obj in initial.fixed_state.keys():
if isOnShelf(obj, initial.fixed_state) or goal_type == '2D':
at_least_one_on_shelf = True
break
found = False
while not(found):
found = True
final = drawPosition(env, fixedOrientation=fixedOrientation, objOnTable=objOnTable, minSeparation=min_objects_dist)
# checks whether at least two objects are close together as specified in max_objects_dist. This only if in the initial positions it is not true
if not at_least_two_near_objects:
found = False
for obj1 in final.fixed_state.keys():
for obj2 in final.fixed_state.keys():
if obj1 == obj2:
continue
if np.linalg.norm(final.fixed_state[obj1][:3]-final.fixed_state[obj2][:3]) <= max_objects_dist:
found = True
break
if found:
break
# checks if at least one object is on the table. This only if in the initial positions it is not true
if found and not at_least_one_on_shelf:
found = False
for obj in final.fixed_state.keys():
if isOnShelf(obj, final.fixed_state):
found = True
break
# checks if the distance between initial and final positions of the objects is at least how much specified in min_start_goal_dist
for obj in final.fixed_state.keys():
if min_start_goal_dist > np.linalg.norm(final.fixed_state[obj][:2]-initial.fixed_state[obj][:2]):
found = False
break
goal = Goal()
goal.challenge = goal_type
goal.subtype = str(n_obj)
goal.initial_state = initial.fixed_state
goal.final_state = final.fixed_state
goal.retina_before = initial.retina
goal.retina = final.retina
goal.mask = final.mask
print("SUCCESSFULL generation of GOAL {}!".format(goal_type))
return goal
def visualizeGoalDistribution(all_goals, images=True):
import matplotlib.pyplot as plt
challenges = np.unique([goal.challenge for goal in all_goals])
fig, axes = plt.subplots(max(2, len(challenges)), 3)
for c, challenge in enumerate(challenges):
goals = [goal for goal in all_goals if goal.challenge == challenge]
if len(goals) > 0:
if images:
# Superimposed images view
tomatos = sum([goal.mask == 2 for goal in goals])
mustards = sum([goal.mask == 3 for goal in goals])
cubes = sum([goal.mask == 4 for goal in goals])
axes[c, 0].imshow(tomatos, cmap='gray')
axes[c, 1].imshow(mustards, cmap='gray')
axes[c, 2].imshow(cubes, cmap='gray')
else:
# Positions scatter view
for i, o in enumerate(goals[0].final_state.keys()):
positions = np.vstack([goal.final_state[o] for goal in goals])
axes[c, i].set_title("{} {}".format(o, challenge))
axes[c, i].hist2d(positions[:, 0], positions[:, 1])
axes[c, i].set_xlim([-0.3, 0.3])
axes[c, i].set_ylim([-0.6, 0.6])
plt.show()
@click.command()
@click.option('--seed', type=int,
help='Generate goals using this SEED for numpy.random')
@click.option('--n_2d_goals', type=int, default=25,
help='# of 2D goals (default 25)')
@click.option('--n_25d_goals', type=int, default=15,
help='# of 2.5D goals (default 15)')
@click.option('--n_3d_goals', type=int, default=10,
help='# of 3D goals (default 10)')
@click.option('--n_obj', type=int, default=3,
help='# of objects (default 3)')
def main(seed=None, n_2d_goals=25, n_25d_goals=15, n_3d_goals=10, n_obj=3):
"""
Generates the specified number of goals
and saves them in a file.\n
The file is called goals-REAL2020-s{}-{}-{}-{}-{}.npy.npz
where enclosed brackets are replaced with the
supplied options (seed, n_2d_goals, n_25d_goals, n_3d_goals, n_obj)
or the default value.
"""
np.random.seed(seed)
allgoals = []
env = gym.make('REALRobot2020-R1J{}-v0'.format(n_obj))
if render:
env.render('human')
env.reset()
global basePosition
_, basePosition, _, _, _ = runEnv(env)
# In these for loops, we could add some progress bar...
for _ in range(n_2d_goals):
allgoals += [generateGoalREAL2020(env, n_obj, "2D", on_shelf=False, min_start_goal_dist=0.2, min_objects_dist=0.25)]
for _ in range(n_25d_goals):
allgoals += [generateGoalREAL2020(env, n_obj, "2.5D", on_shelf=True, min_start_goal_dist=0.2, min_objects_dist=0.25)]
for _ in range(n_3d_goals):
allgoals += [generateGoalREAL2020(env, n_obj, "3D", on_shelf=True, min_start_goal_dist=0.2, min_objects_dist=0)]
np.savez_compressed('goals-REAL2020-s{}-{}-{}-{}-{}.npy'
.format(seed, n_2d_goals, n_25d_goals, n_3d_goals, n_obj), allgoals)
checkRepeatability(env, allgoals)
visualizeGoalDistribution(allgoals)
if __name__ == "__main__":
main()
| [
"matplotlib.pyplot.show",
"numpy.random.seed",
"numpy.random.rand",
"click.option",
"numpy.einsum",
"numpy.zeros",
"click.command",
"numpy.linalg.norm",
"numpy.array",
"numpy.random.permutation",
"numpy.vstack",
"real_robots.envs.Goal",
"numpy.unique"
] | [((13119, 13134), 'click.command', 'click.command', ([], {}), '()\n', (13132, 13134), False, 'import click\n'), ((13136, 13229), 'click.option', 'click.option', (['"""--seed"""'], {'type': 'int', 'help': '"""Generate goals using this SEED for numpy.random"""'}), "('--seed', type=int, help=\n 'Generate goals using this SEED for numpy.random')\n", (13148, 13229), False, 'import click\n'), ((13240, 13330), 'click.option', 'click.option', (['"""--n_2d_goals"""'], {'type': 'int', 'default': '(25)', 'help': '"""# of 2D goals (default 25)"""'}), "('--n_2d_goals', type=int, default=25, help=\n '# of 2D goals (default 25)')\n", (13252, 13330), False, 'import click\n'), ((13341, 13434), 'click.option', 'click.option', (['"""--n_25d_goals"""'], {'type': 'int', 'default': '(15)', 'help': '"""# of 2.5D goals (default 15)"""'}), "('--n_25d_goals', type=int, default=15, help=\n '# of 2.5D goals (default 15)')\n", (13353, 13434), False, 'import click\n'), ((13445, 13535), 'click.option', 'click.option', (['"""--n_3d_goals"""'], {'type': 'int', 'default': '(10)', 'help': '"""# of 3D goals (default 10)"""'}), "('--n_3d_goals', type=int, default=10, help=\n '# of 3D goals (default 10)')\n", (13457, 13535), False, 'import click\n'), ((13546, 13623), 'click.option', 'click.option', (['"""--n_obj"""'], {'type': 'int', 'default': '(3)', 'help': '"""# of objects (default 3)"""'}), "('--n_obj', type=int, default=3, help='# of objects (default 3)')\n", (13558, 13623), False, 'import click\n'), ((3266, 3310), 'numpy.vstack', 'np.vstack', (['[state[obj][:3] for obj in state]'], {}), '([state[obj][:3] for obj in state])\n', (3275, 3310), True, 'import numpy as np\n'), ((11508, 11514), 'real_robots.envs.Goal', 'Goal', ([], {}), '()\n', (11512, 11514), False, 'from real_robots.envs import Goal\n'), ((11954, 12003), 'numpy.unique', 'np.unique', (['[goal.challenge for goal in all_goals]'], {}), '([goal.challenge for goal in all_goals])\n', (11963, 12003), True, 'import numpy as np\n'), ((13105, 13115), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13113, 13115), True, 'import matplotlib.pyplot as plt\n'), ((14044, 14064), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (14058, 14064), True, 'import numpy as np\n'), ((312, 351), 'numpy.einsum', 'np.einsum', (['"""ijk, ijk->ij"""', '(a - b)', '(a - b)'], {}), "('ijk, ijk->ij', a - b, a - b)\n", (321, 351), True, 'import numpy as np\n'), ((461, 472), 'numpy.zeros', 'np.zeros', (['(9)'], {}), '(9)\n', (469, 472), True, 'import numpy as np\n'), ((3925, 3955), 'numpy.random.permutation', 'np.random.permutation', (['objects'], {}), '(objects)\n', (3946, 3955), True, 'import numpy as np\n'), ((7181, 7232), 'numpy.vstack', 'np.vstack', (['[goal.initial_state[o] for o in objects]'], {}), '([goal.initial_state[o] for o in objects])\n', (7190, 7232), True, 'import numpy as np\n'), ((7246, 7282), 'numpy.vstack', 'np.vstack', (['[pos[o] for o in objects]'], {}), '([pos[o] for o in objects])\n', (7255, 7282), True, 'import numpy as np\n'), ((7301, 7338), 'numpy.linalg.norm', 'np.linalg.norm', (['(p1[:, :3] - p0[:, :3])'], {}), '(p1[:, :3] - p0[:, :3])\n', (7315, 7338), True, 'import numpy as np\n'), ((942, 997), 'numpy.linalg.norm', 'np.linalg.norm', (['(old_positions[i][:3] - positions[i][:3])'], {}), '(old_positions[i][:3] - positions[i][:3])\n', (956, 997), True, 'import numpy as np\n'), ((2446, 2462), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (2460, 2462), True, 'import numpy as np\n'), ((2491, 2507), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (2505, 2507), True, 'import numpy as np\n'), ((6086, 6152), 'numpy.linalg.norm', 'np.linalg.norm', (['(startPositions[obj][:3] - actual_position[obj][:3])'], {}), '(startPositions[obj][:3] - actual_position[obj][:3])\n', (6100, 6152), True, 'import numpy as np\n'), ((7358, 7395), 'numpy.linalg.norm', 'np.linalg.norm', (['(p1[:, 3:] - p0[:, 3:])'], {}), '(p1[:, 3:] - p0[:, 3:])\n', (7372, 7395), True, 'import numpy as np\n'), ((7416, 7453), 'numpy.linalg.norm', 'np.linalg.norm', (['(p1[:, 3:] + p0[:, 3:])'], {}), '(p1[:, 3:] + p0[:, 3:])\n', (7430, 7453), True, 'import numpy as np\n'), ((1099, 1122), 'numpy.linalg.norm', 'np.linalg.norm', (['(q1 - q2)'], {}), '(q1 - q2)\n', (1113, 1122), True, 'import numpy as np\n'), ((1124, 1147), 'numpy.linalg.norm', 'np.linalg.norm', (['(q1 + q2)'], {}), '(q1 + q2)\n', (1138, 1147), True, 'import numpy as np\n'), ((2810, 2831), 'numpy.array', 'np.array', (['orientation'], {}), '(orientation)\n', (2818, 2831), True, 'import numpy as np\n'), ((6302, 6325), 'numpy.linalg.norm', 'np.linalg.norm', (['(q1 - q2)'], {}), '(q1 - q2)\n', (6316, 6325), True, 'import numpy as np\n'), ((6327, 6350), 'numpy.linalg.norm', 'np.linalg.norm', (['(q1 + q2)'], {}), '(q1 + q2)\n', (6341, 6350), True, 'import numpy as np\n'), ((11371, 11444), 'numpy.linalg.norm', 'np.linalg.norm', (['(final.fixed_state[obj][:2] - initial.fixed_state[obj][:2])'], {}), '(final.fixed_state[obj][:2] - initial.fixed_state[obj][:2])\n', (11385, 11444), True, 'import numpy as np\n'), ((5480, 5503), 'numpy.linalg.norm', 'np.linalg.norm', (['(q1 - q2)'], {}), '(q1 - q2)\n', (5494, 5503), True, 'import numpy as np\n'), ((5538, 5561), 'numpy.linalg.norm', 'np.linalg.norm', (['(q1 + q2)'], {}), '(q1 + q2)\n', (5552, 5561), True, 'import numpy as np\n'), ((12800, 12850), 'numpy.vstack', 'np.vstack', (['[goal.final_state[o] for goal in goals]'], {}), '([goal.final_state[o] for goal in goals])\n', (12809, 12850), True, 'import numpy as np\n'), ((2683, 2700), 'numpy.random.rand', 'np.random.rand', (['(3)'], {}), '(3)\n', (2697, 2700), True, 'import numpy as np\n'), ((9425, 9502), 'numpy.linalg.norm', 'np.linalg.norm', (['(initial.fixed_state[obj1][:3] - initial.fixed_state[obj2][:3])'], {}), '(initial.fixed_state[obj1][:3] - initial.fixed_state[obj2][:3])\n', (9439, 9502), True, 'import numpy as np\n'), ((10590, 10663), 'numpy.linalg.norm', 'np.linalg.norm', (['(final.fixed_state[obj1][:3] - final.fixed_state[obj2][:3])'], {}), '(final.fixed_state[obj1][:3] - final.fixed_state[obj2][:3])\n', (10604, 10663), True, 'import numpy as np\n')] |
import os
import argparse
from svgrasterize import *
import numpy as np
np.set_printoptions(formatter={'float': lambda x: "{0:0.3f}".format(x)})
parser = argparse.ArgumentParser()
parser.add_argument("svg", help="input SVG file")
parser.add_argument("output", help="output PNG file")
parser.add_argument("-bg", type=svg_color, help="set default background color")
parser.add_argument("-fg", type=svg_color, help="set default foreground color")
parser.add_argument("-w", "--width", type=int, help="output width")
parser.add_argument("-id", help="render single element with specified `id`")
parser.add_argument(
"-t", "--transform", type=svg_transform, help="apply additional transformation"
)
parser.add_argument("--linear-rgb", action="store_true", help="use linear RGB for rendering")
parser.add_argument("--fonts", nargs="*", help="paths to SVG files containing all fonts")
opts = parser.parse_args()
if not os.path.exists(opts.svg):
sys.stderr.write(f"[error] file does not exsits: {opts.svg}\n")
sys.exit(1)
fonts = FontsDB()
for font in opts.fonts or [DEFAULT_FONTS]:
fonts.register_file(font)
transform = Transform().matrix(0, 1, 0, 1, 0, 0)
if opts.transform:
transform @= opts.transform
if opts.svg.endswith(".path"):
path = Path.from_svg(open(opts.svg).read())
print(path,"\n")
opts.bg = svg_color("white") if opts.bg is None else opts.bg
opts.fg = svg_color("black") if opts.fg is None else opts.fg
scene = Scene.fill(path, opts.fg)
ids, size = {}, None
else:
scene, ids, size = svg_scene_from_filepath(
opts.svg, fg=opts.fg, width=opts.width, fonts=fonts
)
if scene is None:
sys.stderr.write("[error] nothing to render\n")
else:
pass
if opts.id is not None:
size = None
scene = ids.get(opts.id)
if scene is None:
sys.stderr.write(f"[error] no object with id: {opts.id}\n")
sys.exit(1)
start = time.time()
if size is not None:
w, h = size
result = scene.render(
transform, viewport=[0, 0, int(h), int(w)], linear_rgb=opts.linear_rgb
)
else:
result = scene.render(transform, linear_rgb=opts.linear_rgb)
stop = time.time()
sys.stderr.write("[info] rendered in {:.2f}\n".format(stop - start))
sys.stderr.flush()
if result is None:
sys.stderr.write("[error] nothing to render\n")
sys.exit(1)
output, _convex_hull = result
if size is not None:
w, h = size
output = output.convert(pre_alpha=True, linear_rgb=opts.linear_rgb)
base = np.zeros((int(h), int(w), 4), dtype=FLOAT)
image = canvas_merge_at(base, output.image, output.offset)
output = Layer(image, (0, 0), pre_alpha=True, linear_rgb=opts.linear_rgb)
if opts.bg is not None:
output = output.background(opts.bg)
filename = opts.output if opts.output != "-" else 1
closefd = opts.output != "-"
with open(filename, "wb", closefd=closefd) as file:
output.write_png(file)
#path.fill trả về vùng ảnh, offset của ảnh lớn.
#hàm gộp là Layer.compose trả về vùng ảnh và offset, trả về ảnh lớn nhất.
# canvas_compose cho biết cách mà blend tam giác vào ảnh.
# blend của hàm canvas_merge_union là canvas_compose truyền vào tham số đầu tiên, Mặc định là COMPOSE_OVER
# Path.mask trả về mass của tam giác. bên trong tam giác là 1, ngoài tam giác là 0
# còn các cạnh của tam giác sẽ được là có giá trị trong khoảng từ 0 đến 1.
# còn trả về offset nữa.
| [
"os.path.exists",
"argparse.ArgumentParser"
] | [((154, 179), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (177, 179), False, 'import argparse\n'), ((915, 939), 'os.path.exists', 'os.path.exists', (['opts.svg'], {}), '(opts.svg)\n', (929, 939), False, 'import os\n')] |
import unittest
import ramda as R
"""
https://github.com/ramda/ramda/blob/master/test/head.js
"""
class TestHead(unittest.TestCase):
def test_returns_the_first_element_of_an_ordered_collection(self):
self.assertEqual(1, R.head([1, 2, 3]))
self.assertEqual(2, R.head([2, 3]))
self.assertEqual(3, R.head([3]))
self.assertEqual(None, R.head([]))
self.assertEqual('a', R.head('abc'))
self.assertEqual('b', R.head('bc'))
self.assertEqual('c', R.head('c'))
self.assertEqual('', R.head(''))
def test_throws_if_applied_to_None(self):
with self.assertRaises(TypeError):
R.head(None)
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"ramda.head"
] | [((656, 671), 'unittest.main', 'unittest.main', ([], {}), '()\n', (669, 671), False, 'import unittest\n'), ((230, 247), 'ramda.head', 'R.head', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (236, 247), True, 'import ramda as R\n'), ((273, 287), 'ramda.head', 'R.head', (['[2, 3]'], {}), '([2, 3])\n', (279, 287), True, 'import ramda as R\n'), ((313, 324), 'ramda.head', 'R.head', (['[3]'], {}), '([3])\n', (319, 324), True, 'import ramda as R\n'), ((353, 363), 'ramda.head', 'R.head', (['[]'], {}), '([])\n', (359, 363), True, 'import ramda as R\n'), ((392, 405), 'ramda.head', 'R.head', (['"""abc"""'], {}), "('abc')\n", (398, 405), True, 'import ramda as R\n'), ((433, 445), 'ramda.head', 'R.head', (['"""bc"""'], {}), "('bc')\n", (439, 445), True, 'import ramda as R\n'), ((473, 484), 'ramda.head', 'R.head', (['"""c"""'], {}), "('c')\n", (479, 484), True, 'import ramda as R\n'), ((511, 521), 'ramda.head', 'R.head', (['""""""'], {}), "('')\n", (517, 521), True, 'import ramda as R\n'), ((613, 625), 'ramda.head', 'R.head', (['None'], {}), '(None)\n', (619, 625), True, 'import ramda as R\n')] |
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.contrib.sitemaps.views import sitemap
from django.urls import include, path
from django.views.generic import TemplateView
from markdownx.views import MarkdownifyView
from exhaust.posts.sitemaps import POSTS_SITE_MAPS
urlpatterns = [
path('', include('exhaust.posts.urls', namespace='posts')),
path('exogram/', include('exhaust.exogram.urls', namespace='exogram')),
path('robots.txt', TemplateView.as_view(template_name='robots.txt', content_type='text/plain')),
path('admin/', admin.site.urls),
path('markdownx/markdownify/', MarkdownifyView.as_view(), name='markdownx_markdownify'),
path('sitemap.xml', sitemap, {'sitemaps': POSTS_SITE_MAPS}, name='django.contrib.sitemaps.views.sitemap'),
# Error page styling tests. It's OK to have these outside of DEBUG (if
# someone wants to pretend they're having a 500 they're more than welcome
# to). It means there's one less branch to test in settings.
path('404/', TemplateView.as_view(template_name='404.html')),
path('500/', TemplateView.as_view(template_name='500.html')),
] + static(
settings.MEDIA_URL, document_root=settings.MEDIA_ROOT
) + static(
settings.STATIC_URL, document_root=settings.STATIC_ROOT
)
| [
"django.urls.path",
"markdownx.views.MarkdownifyView.as_view",
"django.views.generic.TemplateView.as_view",
"django.urls.include",
"django.conf.urls.static.static"
] | [((1262, 1325), 'django.conf.urls.static.static', 'static', (['settings.STATIC_URL'], {'document_root': 'settings.STATIC_ROOT'}), '(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n', (1268, 1325), False, 'from django.conf.urls.static import static\n'), ((1192, 1253), 'django.conf.urls.static.static', 'static', (['settings.MEDIA_URL'], {'document_root': 'settings.MEDIA_ROOT'}), '(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n', (1198, 1253), False, 'from django.conf.urls.static import static\n'), ((601, 632), 'django.urls.path', 'path', (['"""admin/"""', 'admin.site.urls'], {}), "('admin/', admin.site.urls)\n", (605, 632), False, 'from django.urls import include, path\n'), ((731, 841), 'django.urls.path', 'path', (['"""sitemap.xml"""', 'sitemap', "{'sitemaps': POSTS_SITE_MAPS}"], {'name': '"""django.contrib.sitemaps.views.sitemap"""'}), "('sitemap.xml', sitemap, {'sitemaps': POSTS_SITE_MAPS}, name=\n 'django.contrib.sitemaps.views.sitemap')\n", (735, 841), False, 'from django.urls import include, path\n'), ((369, 417), 'django.urls.include', 'include', (['"""exhaust.posts.urls"""'], {'namespace': '"""posts"""'}), "('exhaust.posts.urls', namespace='posts')\n", (376, 417), False, 'from django.urls import include, path\n'), ((441, 493), 'django.urls.include', 'include', (['"""exhaust.exogram.urls"""'], {'namespace': '"""exogram"""'}), "('exhaust.exogram.urls', namespace='exogram')\n", (448, 493), False, 'from django.urls import include, path\n'), ((519, 594), 'django.views.generic.TemplateView.as_view', 'TemplateView.as_view', ([], {'template_name': '"""robots.txt"""', 'content_type': '"""text/plain"""'}), "(template_name='robots.txt', content_type='text/plain')\n", (539, 594), False, 'from django.views.generic import TemplateView\n'), ((669, 694), 'markdownx.views.MarkdownifyView.as_view', 'MarkdownifyView.as_view', ([], {}), '()\n', (692, 694), False, 'from markdownx.views import MarkdownifyView\n'), ((1073, 1119), 'django.views.generic.TemplateView.as_view', 'TemplateView.as_view', ([], {'template_name': '"""404.html"""'}), "(template_name='404.html')\n", (1093, 1119), False, 'from django.views.generic import TemplateView\n'), ((1139, 1185), 'django.views.generic.TemplateView.as_view', 'TemplateView.as_view', ([], {'template_name': '"""500.html"""'}), "(template_name='500.html')\n", (1159, 1185), False, 'from django.views.generic import TemplateView\n')] |
import logging
import pymongo
from quakestats.datasource.mongo2 import (
DataStoreMongo,
)
from quakestats.system import (
conf,
)
logger = logging.getLogger(__name__)
class SystemContext:
def __init__(self):
self.config = conf.cnf
self.ds: DataStoreMongo = None
self.ds_client: pymongo.MongoClient = None
self.configure()
def configure(self):
uri = conf.get_conf_val('MONGO_URI')
self.ds_client = pymongo.MongoClient(uri)
parsed_uri = pymongo.uri_parser.parse_uri(uri)
database_name = parsed_uri["database"]
self.ds = DataStoreMongo(self.ds_client.get_database(database_name))
| [
"pymongo.MongoClient",
"quakestats.system.conf.get_conf_val",
"pymongo.uri_parser.parse_uri",
"logging.getLogger"
] | [((151, 178), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (168, 178), False, 'import logging\n'), ((412, 442), 'quakestats.system.conf.get_conf_val', 'conf.get_conf_val', (['"""MONGO_URI"""'], {}), "('MONGO_URI')\n", (429, 442), False, 'from quakestats.system import conf\n'), ((468, 492), 'pymongo.MongoClient', 'pymongo.MongoClient', (['uri'], {}), '(uri)\n', (487, 492), False, 'import pymongo\n'), ((514, 547), 'pymongo.uri_parser.parse_uri', 'pymongo.uri_parser.parse_uri', (['uri'], {}), '(uri)\n', (542, 547), False, 'import pymongo\n')] |
import smtplib
from email.message import EmailMessage
from credent import secret
tb_headers=["id","przedmiot","wykladowca","forma_zaliczenia","rodz_zajec","ocena1","data1","ocena2","data2"]
def sendEmail(subject,eml_from,eml_to,message):
msg = EmailMessage()
msg.set_content(message)
msg['Subject'] = subject
msg['From'] = eml_from
msg['To'] = eml_to
# Send the message via SMTP server.
print("SENDING INFO EMAIL...")
try:
server = smtplib.SMTP(secret["smtp_host"], secret["smtp_port"])
server.ehlo()
server.login(secret["smtp_login"], secret["smtp_password"])
server.send_message(msg)
server.quit()
print("SENDING OK!")
except:
#raise
print("...sending email: somethin went wrong:(")
def preetyGrade(grade):
if grade=="-":
return "brak"
else:
return str(grade)
def compareT(T1,T2): #T1,T2 krotka z wierszem z bazy danych (wiersz tabeli z ocenami starymi/nowymi)
lenT1 = len(T1)
lenT2 = len(T2)
if lenT1!=9 and lenT2!=9:
return {"private":"Błąd E1. Nieodpowiednia ilość kolumn. Być może zmeniła się struktura strony źródłowej ?!","public":""}
if lenT2 > lenT1 and lenT1==0:
return {"private":"Dopisano nowy przedmiot: "+T2[1],"public":""}
if lenT1 == lenT2 and lenT1 == 9:
zm=""
L = len(T1)
for i in range(0,L):
if(T1[i]!=T2[i]):
zm = zm +"\r\nZmiana "+tb_headers[i]+" z "+preetyGrade(T1[i])+" na "+preetyGrade(T2[i])+", "
if len(zm)>1:
zm = zm[:-2]
return {"private":"Przedmiot: "+T1[1]+" ("+T1[3]+", "+T1[2]+")"+zm, "public":"Możliwa nowe oceny z przedmiotu: "+T1[1]+" ("+T1[3]+", "+T1[2]+") [powiadomienie automatyczne, grupa WZ_INiN3_PG2]"}
return {"private":"Nieokreślony błąd. Być moze załadowane zostały przedmioty z nowego semestru lub zmeniła się struktura strony źródłowej ?!","public":""}
| [
"email.message.EmailMessage",
"smtplib.SMTP"
] | [((256, 270), 'email.message.EmailMessage', 'EmailMessage', ([], {}), '()\n', (268, 270), False, 'from email.message import EmailMessage\n'), ((482, 536), 'smtplib.SMTP', 'smtplib.SMTP', (["secret['smtp_host']", "secret['smtp_port']"], {}), "(secret['smtp_host'], secret['smtp_port'])\n", (494, 536), False, 'import smtplib\n')] |
import tqdm
import pickle
import logging
import functools
from typing import List, Mapping, Optional
from multiprocessing import Pool
from indra.statements import Statement
from indra_world.sources import eidos, hume, sofia
logger = logging.getLogger(__name__)
def _reader_wrapper(fname, reader, dart_ids=None, **kwargs):
if reader == 'eidos':
pr = eidos.process_json_file(fname, **kwargs)
pr.doc.tree = None
elif reader == 'sofia':
pr = sofia.process_json_file(fname, **kwargs)
elif reader == 'hume':
pr = hume.process_jsonld_file(fname, **kwargs)
if dart_ids:
dart_id = dart_ids.get(fname)
for stmt in pr.statements:
for ev in stmt.evidence:
ev.text_refs['DART'] = dart_id
return pr.statements
def process_reader_outputs(fnames: List[str],
reader: str,
dart_ids: Mapping[str, str] = None,
extract_filter: List[str] = None,
grounding_mode: str = 'compositional',
nproc: int = 8,
output_pkl: str = None) -> List[Statement]:
"""Process a set of reader outputs in parallel.
Parameters
----------
fnames :
The list of file paths to the reader outputs to be processed.
reader :
The name of the reader which produced the outputs.
dart_ids :
A dict which maps each fname in the fnames list to a DART document ID.
These are then set in the evidences of statements exxtracted from
the output.
extract_filter :
What types of statements to extract.
grounding_mode :
The type of grounding mode to use for processing.
nproc :
The number of workers to use for parallelization.
output_pkl :
The path to an output pickle file in which to dump the statements
extracted from the outputs.
Returns
-------
:
The list of statements extracted from the outputs.
"""
if extract_filter is None:
extract_filter = ['influence']
pool = Pool(nproc)
chunk_size = 10
process_fun = functools.partial(_reader_wrapper,
reader=reader, dart_ids=dart_ids,
extract_filter=extract_filter,
grounding_mode=grounding_mode)
stmts = []
for res in tqdm.tqdm(pool.imap_unordered(process_fun, fnames,
chunksize=chunk_size),
total=len(fnames)):
stmts += res
logger.debug('Closing pool...')
pool.close()
logger.debug('Joining pool...')
pool.join()
logger.info('Pool closed and joined.')
if output_pkl:
logger.info(f'Writing into {output_pkl}')
with open(output_pkl, 'wb') as fh:
pickle.dump(stmts, fh)
return stmts | [
"functools.partial",
"pickle.dump",
"indra_world.sources.eidos.process_json_file",
"indra_world.sources.hume.process_jsonld_file",
"indra_world.sources.sofia.process_json_file",
"multiprocessing.Pool",
"logging.getLogger"
] | [((234, 261), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (251, 261), False, 'import logging\n'), ((2135, 2146), 'multiprocessing.Pool', 'Pool', (['nproc'], {}), '(nproc)\n', (2139, 2146), False, 'from multiprocessing import Pool\n'), ((2185, 2319), 'functools.partial', 'functools.partial', (['_reader_wrapper'], {'reader': 'reader', 'dart_ids': 'dart_ids', 'extract_filter': 'extract_filter', 'grounding_mode': 'grounding_mode'}), '(_reader_wrapper, reader=reader, dart_ids=dart_ids,\n extract_filter=extract_filter, grounding_mode=grounding_mode)\n', (2202, 2319), False, 'import functools\n'), ((364, 404), 'indra_world.sources.eidos.process_json_file', 'eidos.process_json_file', (['fname'], {}), '(fname, **kwargs)\n', (387, 404), False, 'from indra_world.sources import eidos, hume, sofia\n'), ((473, 513), 'indra_world.sources.sofia.process_json_file', 'sofia.process_json_file', (['fname'], {}), '(fname, **kwargs)\n', (496, 513), False, 'from indra_world.sources import eidos, hume, sofia\n'), ((2914, 2936), 'pickle.dump', 'pickle.dump', (['stmts', 'fh'], {}), '(stmts, fh)\n', (2925, 2936), False, 'import pickle\n'), ((554, 595), 'indra_world.sources.hume.process_jsonld_file', 'hume.process_jsonld_file', (['fname'], {}), '(fname, **kwargs)\n', (578, 595), False, 'from indra_world.sources import eidos, hume, sofia\n')] |
# standard modules
import os
import shutil
import argparse
# aliased standard modules
import pandas as pd
# modules of sanity checker
import lib.paths as paths
import lib.utils as utils
import lib.logger_config as logger_config
# standalone imports
from lib.logger_config import log
from lib.test_config import get_config_of_current_test
from lib.color import Style
'''
Module providing the functionality to add an experiment
to the reference pool. It contains:
- add_line_descr_f: Add a new line to the experiment description file
with all information about an experiment
- main: asks user for additional information about experiment, commits
data of new experiment to git-repository
Help: python add_exp_tp_ref.py --help
C.Siegenthaler 07.2020 (C2SM)
J.Jucker 01.2021 (C2SM)
'''
def add_line_descr_f(exp,f_exp_descr):
'''
Add line for exp exp in file f_exp_descr
:param exp: new expirement name
:param f_exp_descr: file in which the new line has to be added
return: None
'''
log.info('Adding line {} in the file {}:'.format(exp,f_exp_descr))
# open file in dataframe
if not os.path.isfile(f_exp_descr):
# create dataframe
cols_exp_descr_f = ['Experiment name',
'Platform',
'OS',
'Compiler (with version)',
'Optimisation level (-OX)',
'-fast-transcendentals (y/n)',
'-no-prec-sqrt (y/n)',
'-no-prec-div (y/n)',
'welch (y/n)',
'fldcor (y/n)',
'rmse (y/n)',
'emi (y/n)',
'Date of experiment (month yyyy)']
df_exp_descr = pd.DataFrame(columns=cols_exp_descr_f)
else:
df_exp_descr = pd.read_csv(f_exp_descr, sep=';')
# collect information from user
log.banner('Please give the following informations '
'about your experiment')
dict_line = {'Experiment name': exp}
for col_name in df_exp_descr.keys():
if col_name != 'Experiment name':
# ask the user for info
dict_line[col_name] = input('{} : '.format(col_name))
# amend the information if needed
while True:
# new dataframe containing new line for exp
df_exp_descr_new = df_exp_descr.append(dict_line, ignore_index=True)
log.banner('Here is the content of the description '
'file including your new experiment.')
log.info(df_exp_descr_new)
answ_chg = input('Is the new file right ? (y/n/abort).\n'
'If you type n, you will be able to change '
'column values\n'
'If you type abort, the process of adding '
'the experiment {} to the reference is stoped.\n'
'(y/n/abort) : '
''.format(exp))
if answ_chg.upper() == 'Y':
# save new file
df_exp_descr_new.to_csv(f_exp_descr,sep=';',index=False)
# get out of the loop
return False
elif answ_chg.upper() == 'N':
answ_col = input('Which column field you want to change ?')
if answ_col in df_exp_descr.keys():
dict_line[answ_col] = input('{} : '.format(answ_col))
else:
log.warning('{} not in columns!'.format(answ_col))
log.info('Columns are {}\n'.format(list(df_exp_descr.columns)))
elif answ_chg.upper() == 'ABORT':
exit()
return()
def main(exp,
tests,
p_stages=paths.p_stages,
p_ref_csv_files=paths.p_ref_csv_files,
ltestsuite=False,
lverbose=False):
# initialisation
new_branch_name = 'test_add_{}'.format(exp)
files_to_commit = []
# fill up file 'Exps_description.csv' with additional
# information via user input
f_exp_descr = os.path.join(p_ref_csv_files,'Exps_description.csv')
if not ltestsuite:
add_line_descr_f(exp=exp,f_exp_descr=f_exp_descr)
files_to_commit.append(f_exp_descr)
for test in tests:
test_cfg = get_config_of_current_test(test)
csv_file = utils.clean_path(p_stages,
'test_postproc_{}_{}.csv'
.format(test,exp))
# what is the filename in the reference pool
filename_in_ref_dir = '{}_{}.csv'.format(test_cfg.ref_name,exp)
# what is the location to store that file
place_for_reference = os.path.join(p_ref_csv_files,
test,
filename_in_ref_dir)
log.debug('Copy {} to {}'.format(csv_file,place_for_reference))
if not ltestsuite:
shutil.copy(csv_file,place_for_reference)
files_to_commit.append(place_for_reference)
# copy pdf with bar-plots from Welch's-test
if test == 'welch':
pdf_file = utils.clean_path(p_stages,
'{}_{}.pdf'.format(test_cfg.ref_name,
exp))
# what is the name of the pdf in the reference pool
filename_in_ref_dir = '{}_plots.pdf'.format(test_cfg.ref_name)
# what is the location to store that file
place_for_reference = os.path.join(p_ref_csv_files,
test,
filename_in_ref_dir)
log.debug('Copy {} to {}'.format(csv_file,place_for_reference))
files_to_commit.append(place_for_reference)
if not ltestsuite:
shutil.copy(pdf_file,place_for_reference)
# root is important to not fail during git commands
os.chdir(paths.rootdir)
# checkout new branch
if not ltestsuite:
log.info('Create and checkout new branch {}'.format(new_branch_name))
git_cmd = 'git checkout -B {}'.format(new_branch_name)
utils.shell_cmd(git_cmd,py_routine='add_exp_to_ref.py')
# commit all modified files prior in the function to git
for file in files_to_commit:
git_cmd = 'git add {}'.format(file)
log.debug(git_cmd)
utils.shell_cmd(git_cmd, py_routine=__name__)
log.debug('Commit files {}'.format(files_to_commit))
commit_message = input('Please type your commit message :')
git_cmd = 'git commit -m "{}"'.format(commit_message)
utils.shell_cmd(git_cmd, py_routine=__name__)
# Finish
log.info(Style.GREEN('Files are added in the new branch: '
'{} in your local git repository.'
.format(new_branch_name)))
log.info('To add the file to the official repository, '
'please perform the following steps:')
log.info('1. Push the new branch into the official repo:')
log.info(' git push --set-upstream origin {}'.format(new_branch_name))
log.info('2. On the Open Web interface (GitHub) , open a Pull Request.')
log.banner('End add_exp_to_ref for experiment {}'.format(exp))
return()
if __name__ == '__main__':
# parsing arguments
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--exp','-e', dest='exp',
required=True,
help='exp to add')
parser.add_argument('--p_stages', dest='p_stages',
default=paths.p_stages,
help='relative or absolute path of the csv \
files of the testresults')
parser.add_argument('--p_ref_csv_files', dest='p_ref_csv_files',
default=paths.p_ref_csv_files,
help='path to the pool of csv files, \
one per reference experiment')
parser.add_argument('--tests','-t', dest='tests',
default=['welch','fldcor','rmse','emi'],
nargs='+',
help='Tests to add to reference pool')
parser.add_argument('--verbose','-v', dest='lverbose',
action='store_true',
help='Debug output')
parser.add_argument('--testsuite','-ts', dest='ltestsuite',
action='store_true',
help='Run of testsuite')
args = parser.parse_args()
# init logger
logger_config.init_logger(args.lverbose,__file__)
log.banner('Start execute {} as main()'.format(__file__))
# make all paths from user to absolute paths
args.p_stages = utils.abs_path(args.p_stages)
args.p_ref_csv_files = utils.abs_path(args.p_ref_csv_files)
main(exp=args.exp,
tests=args.tests,
p_stages=args.p_stages,
p_ref_csv_files=args.p_ref_csv_files,
ltestsuite=args.ltestsuite,
lverbose=args.lverbose)
log.banner('End execute {} as main()'.format(__file__))
| [
"pandas.DataFrame",
"lib.logger_config.log.info",
"argparse.ArgumentParser",
"lib.logger_config.init_logger",
"pandas.read_csv",
"lib.logger_config.log.banner",
"shutil.copy",
"lib.logger_config.log.debug",
"lib.utils.shell_cmd",
"os.path.isfile",
"lib.utils.abs_path",
"os.path.join",
"os.chdir",
"lib.test_config.get_config_of_current_test"
] | [((2015, 2089), 'lib.logger_config.log.banner', 'log.banner', (['"""Please give the following informations about your experiment"""'], {}), "('Please give the following informations about your experiment')\n", (2025, 2089), False, 'from lib.logger_config import log\n'), ((4120, 4173), 'os.path.join', 'os.path.join', (['p_ref_csv_files', '"""Exps_description.csv"""'], {}), "(p_ref_csv_files, 'Exps_description.csv')\n", (4132, 4173), False, 'import os\n'), ((6029, 6052), 'os.chdir', 'os.chdir', (['paths.rootdir'], {}), '(paths.rootdir)\n', (6037, 6052), False, 'import os\n'), ((6987, 7088), 'lib.logger_config.log.info', 'log.info', (['"""To add the file to the official repository, please perform the following steps:"""'], {}), "(\n 'To add the file to the official repository, please perform the following steps:'\n )\n", (6995, 7088), False, 'from lib.logger_config import log\n'), ((7099, 7157), 'lib.logger_config.log.info', 'log.info', (['"""1. Push the new branch into the official repo:"""'], {}), "('1. Push the new branch into the official repo:')\n", (7107, 7157), False, 'from lib.logger_config import log\n'), ((7239, 7311), 'lib.logger_config.log.info', 'log.info', (['"""2. On the Open Web interface (GitHub) , open a Pull Request."""'], {}), "('2. On the Open Web interface (GitHub) , open a Pull Request.')\n", (7247, 7311), False, 'from lib.logger_config import log\n'), ((7459, 7538), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), '(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n', (7482, 7538), False, 'import argparse\n'), ((8742, 8792), 'lib.logger_config.init_logger', 'logger_config.init_logger', (['args.lverbose', '__file__'], {}), '(args.lverbose, __file__)\n', (8767, 8792), True, 'import lib.logger_config as logger_config\n'), ((8925, 8954), 'lib.utils.abs_path', 'utils.abs_path', (['args.p_stages'], {}), '(args.p_stages)\n', (8939, 8954), True, 'import lib.utils as utils\n'), ((8982, 9018), 'lib.utils.abs_path', 'utils.abs_path', (['args.p_ref_csv_files'], {}), '(args.p_ref_csv_files)\n', (8996, 9018), True, 'import lib.utils as utils\n'), ((1164, 1191), 'os.path.isfile', 'os.path.isfile', (['f_exp_descr'], {}), '(f_exp_descr)\n', (1178, 1191), False, 'import os\n'), ((1868, 1906), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'cols_exp_descr_f'}), '(columns=cols_exp_descr_f)\n', (1880, 1906), True, 'import pandas as pd\n'), ((1940, 1973), 'pandas.read_csv', 'pd.read_csv', (['f_exp_descr'], {'sep': '""";"""'}), "(f_exp_descr, sep=';')\n", (1951, 1973), True, 'import pandas as pd\n'), ((2530, 2628), 'lib.logger_config.log.banner', 'log.banner', (['"""Here is the content of the description file including your new experiment."""'], {}), "(\n 'Here is the content of the description file including your new experiment.'\n )\n", (2540, 2628), False, 'from lib.logger_config import log\n'), ((2649, 2675), 'lib.logger_config.log.info', 'log.info', (['df_exp_descr_new'], {}), '(df_exp_descr_new)\n', (2657, 2675), False, 'from lib.logger_config import log\n'), ((4337, 4369), 'lib.test_config.get_config_of_current_test', 'get_config_of_current_test', (['test'], {}), '(test)\n', (4363, 4369), False, 'from lib.test_config import get_config_of_current_test\n'), ((4740, 4796), 'os.path.join', 'os.path.join', (['p_ref_csv_files', 'test', 'filename_in_ref_dir'], {}), '(p_ref_csv_files, test, filename_in_ref_dir)\n', (4752, 4796), False, 'import os\n'), ((6252, 6308), 'lib.utils.shell_cmd', 'utils.shell_cmd', (['git_cmd'], {'py_routine': '"""add_exp_to_ref.py"""'}), "(git_cmd, py_routine='add_exp_to_ref.py')\n", (6267, 6308), True, 'import lib.utils as utils\n'), ((6748, 6793), 'lib.utils.shell_cmd', 'utils.shell_cmd', (['git_cmd'], {'py_routine': '__name__'}), '(git_cmd, py_routine=__name__)\n', (6763, 6793), True, 'import lib.utils as utils\n'), ((4996, 5038), 'shutil.copy', 'shutil.copy', (['csv_file', 'place_for_reference'], {}), '(csv_file, place_for_reference)\n', (5007, 5038), False, 'import shutil\n'), ((5594, 5650), 'os.path.join', 'os.path.join', (['p_ref_csv_files', 'test', 'filename_in_ref_dir'], {}), '(p_ref_csv_files, test, filename_in_ref_dir)\n', (5606, 5650), False, 'import os\n'), ((6471, 6489), 'lib.logger_config.log.debug', 'log.debug', (['git_cmd'], {}), '(git_cmd)\n', (6480, 6489), False, 'from lib.logger_config import log\n'), ((6502, 6547), 'lib.utils.shell_cmd', 'utils.shell_cmd', (['git_cmd'], {'py_routine': '__name__'}), '(git_cmd, py_routine=__name__)\n', (6517, 6547), True, 'import lib.utils as utils\n'), ((5926, 5968), 'shutil.copy', 'shutil.copy', (['pdf_file', 'place_for_reference'], {}), '(pdf_file, place_for_reference)\n', (5937, 5968), False, 'import shutil\n')] |
from urllib.parse import urljoin
import requests
from crosswalk_client.exceptions import BadResponse
from crosswalk_client.objects.domain import DomainObject
from crosswalk_client.validators.domain import validate_required_domain_arg
class UpdateDomain(object):
@validate_required_domain_arg
def update_domain(self, domain, update_attrs):
response = requests.patch(
urljoin(self.service_address, f"domains/{domain}/"),
headers=self.headers,
json=update_attrs,
)
if response.status_code != 200:
raise BadResponse(
"The service responded with a {}: {}".format(
response.status_code, response.content
)
)
return DomainObject(response.json(), client=self)
| [
"urllib.parse.urljoin"
] | [((398, 449), 'urllib.parse.urljoin', 'urljoin', (['self.service_address', 'f"""domains/{domain}/"""'], {}), "(self.service_address, f'domains/{domain}/')\n", (405, 449), False, 'from urllib.parse import urljoin\n')] |
# Create your views here.
from mozdns.views import MozdnsDeleteView
from mozdns.views import MozdnsCreateView
from mozdns.views import MozdnsDetailView
from mozdns.views import MozdnsUpdateView
from mozdns.views import MozdnsListView
from mozdns.sshfp.models import SSHFP
from mozdns.sshfp.forms import SSHFPForm
class SSHFPView(object):
model = SSHFP
form_class = SSHFPForm
queryset = SSHFP.objects.all()
class SSHFPDeleteView(SSHFPView, MozdnsDeleteView):
""" """
class SSHFPDetailView(SSHFPView, MozdnsDetailView):
""" """
template_name = 'sshfp/sshfp_detail.html'
class SSHFPCreateView(SSHFPView, MozdnsCreateView):
""" """
class SSHFPUpdateView(SSHFPView, MozdnsUpdateView):
""" """
class SSHFPListView(SSHFPView, MozdnsListView):
""" """
| [
"mozdns.sshfp.models.SSHFP.objects.all"
] | [((400, 419), 'mozdns.sshfp.models.SSHFP.objects.all', 'SSHFP.objects.all', ([], {}), '()\n', (417, 419), False, 'from mozdns.sshfp.models import SSHFP\n')] |
# A minimal setup.py file to make a Python project installable.
import setuptools
import yaml
with open("README.md", "r") as fh:
long_description = fh.read()
with open("environment.yml", "r") as fh:
env = yaml.safe_load(fh)
requirements = [a.split('=', 1)[0].strip() for a in env['dependencies'] ]
setuptools.setup(
name = "mortie",
version = "0.1.0",
author = "<NAME>",
author_email = "<EMAIL>",
description = "Morton numbering for healpix grids",
long_description = long_description,
long_description_content_type = "text/markdown",
packages = setuptools.find_packages(),
classifiers = [
"Programming Language :: Python :: 3",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
],
python_requires = '>= 3.5',
install_requires = requirements,
)
| [
"yaml.safe_load",
"setuptools.find_packages"
] | [((216, 234), 'yaml.safe_load', 'yaml.safe_load', (['fh'], {}), '(fh)\n', (230, 234), False, 'import yaml\n'), ((638, 664), 'setuptools.find_packages', 'setuptools.find_packages', ([], {}), '()\n', (662, 664), False, 'import setuptools\n')] |
import os
import sys
import ctypes
this_dir = os.path.dirname(__file__)
parent_dir = os.path.dirname(this_dir)
sys.path.insert(0, parent_dir)
from pcc.evaluater.c_evaluator import CEvaluator
import unittest
class TestMainReturnPtr(unittest.TestCase):
def test_simple(self):
pcc = CEvaluator()
ret = pcc.evaluate('''
int a = 50;
int b = 4;
int* swap(int *x, int *y){
int tmp;
tmp = *x;
*x = *y;
*y = tmp;
return x;
}
int* main(){
swap(&a, &b);
return &a ;
}
''', llvmdump=True)
# ret_value = ret.contents
print("The answer is {} ret type is {} content ".format(ret, type(ret)))
# so the global var
assert ret.contents.value == 4
if __name__ == "__main__":
unittest.main() | [
"unittest.main",
"pcc.evaluater.c_evaluator.CEvaluator",
"os.path.dirname",
"sys.path.insert"
] | [((47, 72), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (62, 72), False, 'import os\n'), ((86, 111), 'os.path.dirname', 'os.path.dirname', (['this_dir'], {}), '(this_dir)\n', (101, 111), False, 'import os\n'), ((112, 142), 'sys.path.insert', 'sys.path.insert', (['(0)', 'parent_dir'], {}), '(0, parent_dir)\n', (127, 142), False, 'import sys\n'), ((917, 932), 'unittest.main', 'unittest.main', ([], {}), '()\n', (930, 932), False, 'import unittest\n'), ((296, 308), 'pcc.evaluater.c_evaluator.CEvaluator', 'CEvaluator', ([], {}), '()\n', (306, 308), False, 'from pcc.evaluater.c_evaluator import CEvaluator\n')] |
from pathlib import Path
from markdown import markdown
from gilbert import Site
from gilbert.content import Page
from gilbert.types import LoaderResult
from gilbert.utils import oneshot
class MarkdownPage(Page):
"""
Page type that renders its content as Markdown.
Extensions can be configured in ``config.yml`` via:
content_type:MarkdownPage
or using the ``extras`` attribute.
"""
# List of Markdown extensions to enable.
extras: list = []
@oneshot
def content(self) -> str:
extras = self.extras
if not extras:
extras = self.site.config.get("content_type", {}).get("MarkdownPage", [])
return markdown(self.data, output_format="html5", extensions=extras)
def load_md(path: Path) -> LoaderResult:
data = path.read_text(encoding="utf-8")
return data, {"content_type": "MarkdownPage"}
Site.register_loader("md", load_md)
| [
"gilbert.Site.register_loader",
"markdown.markdown"
] | [((880, 915), 'gilbert.Site.register_loader', 'Site.register_loader', (['"""md"""', 'load_md'], {}), "('md', load_md)\n", (900, 915), False, 'from gilbert import Site\n'), ((678, 739), 'markdown.markdown', 'markdown', (['self.data'], {'output_format': '"""html5"""', 'extensions': 'extras'}), "(self.data, output_format='html5', extensions=extras)\n", (686, 739), False, 'from markdown import markdown\n')] |
import os
import sys
EXGDBFILE = os.path.abspath(os.path.expanduser(__file__))
sys.path.insert(0, os.path.dirname(EXGDBFILE) + "/lib/")
import utils
from enert import *
def clearscreen():
"""
Customized clearscreen from https://github.com/longld/peda
"""
print("\x1b[2J\x1b[H")
utils.clearscreen = clearscreen
| [
"os.path.dirname",
"os.path.expanduser"
] | [((50, 78), 'os.path.expanduser', 'os.path.expanduser', (['__file__'], {}), '(__file__)\n', (68, 78), False, 'import os\n'), ((99, 125), 'os.path.dirname', 'os.path.dirname', (['EXGDBFILE'], {}), '(EXGDBFILE)\n', (114, 125), False, 'import os\n')] |
import httplib2
import urllib
import json
import re
import sys
class Transport:
"""
Abstract representation of a transport class. Defines
the supported API methods
"""
endpoint = "platform.clickatell.com"
def __init__(self):
"""
Construct a new transportation instance.
:param boolean secure: Should we try and use a secure connection
"""
pass
def merge(self, *args):
"""
Merge multiple dictionary objects into one.
:param variadic args: Multiple dictionary items
:return dict
"""
values = []
for entry in args:
values = values + list(entry.items())
return dict(values)
def parseResponse(self, response):
"""
Parse the response from json.
Remapping error code and messages to be a level higher
"""
response['body'] = json.loads(response['body'])
response['messages'] = response['body']['messages']
response['error'] = response['body']['error']
del response['body']
return response
def request(self, action, data={}, headers={}, method='GET'):
"""
Run the HTTP request against the Clickatell API
:param str action: The API action
:param dict data: The request parameters
:param dict headers: The request headers (if any)
:param str method: The HTTP method
:return: The request response
"""
http = httplib2.Http()
body = urllib.urlencode(data) if (sys.version_info[0] < 3) else urllib.parse.urlencode(data)
url = 'https://' + self.endpoint + '/' + action
url = (url + '?' + body) if (method == 'GET') else url
resp, content = http.request(url, method, headers=headers, body=json.dumps(data))
return self.merge(resp, {'body': content})
def sendMessage(self, to, message, extra={}):
"""
Send a message.
:param list to: The number you want to send to (list of strings, or one string)
:param string message: The message you want to send
:param dict extra: Any extra parameters (see Clickatell documentation)
:return dict
:raises NotImplementedError
"""
raise NotImplementedError() | [
"httplib2.Http",
"json.loads",
"urllib.parse.urlencode",
"json.dumps",
"urllib.urlencode"
] | [((915, 943), 'json.loads', 'json.loads', (["response['body']"], {}), "(response['body'])\n", (925, 943), False, 'import json\n'), ((1524, 1539), 'httplib2.Http', 'httplib2.Http', ([], {}), '()\n', (1537, 1539), False, 'import httplib2\n'), ((1555, 1577), 'urllib.urlencode', 'urllib.urlencode', (['data'], {}), '(data)\n', (1571, 1577), False, 'import urllib\n'), ((1612, 1640), 'urllib.parse.urlencode', 'urllib.parse.urlencode', (['data'], {}), '(data)\n', (1634, 1640), False, 'import urllib\n'), ((1832, 1848), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (1842, 1848), False, 'import json\n')] |
import os
import yaml
class YamlConfig:
def __init__(self, file_path: str = "./settings/config.yml"):
self.file_path = file_path
def exists(self) -> bool:
return os.path.exists(self.file_path)
def load(self) -> dict:
"""
:return: Return yaml data as dictionary format
"""
with open(self.file_path, "r", encoding="utf-8") as yf:
return yaml.load(yf, Loader=yaml.FullLoader)
def write(self, data: dict) -> None:
"""
Export yaml
:param data: A dictionary of data that will be output in Yaml format
"""
with open(self.file_path, "w", encoding="utf-8") as yf:
yaml.dump(data, yf, default_flow_style=False)
| [
"yaml.load",
"yaml.dump",
"os.path.exists"
] | [((194, 224), 'os.path.exists', 'os.path.exists', (['self.file_path'], {}), '(self.file_path)\n', (208, 224), False, 'import os\n'), ((420, 457), 'yaml.load', 'yaml.load', (['yf'], {'Loader': 'yaml.FullLoader'}), '(yf, Loader=yaml.FullLoader)\n', (429, 457), False, 'import yaml\n'), ((701, 746), 'yaml.dump', 'yaml.dump', (['data', 'yf'], {'default_flow_style': '(False)'}), '(data, yf, default_flow_style=False)\n', (710, 746), False, 'import yaml\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
*******************************************************************************
Google Earth Engine Setninel-1 Lake Area
o Purpose: Estimate surface are of lake from Sentinel-1 SAR date, using
Google Earth Engine cloud computing platform
o Inputs:
* ROI: Google Earth Engine geometry object describing the region of
interest
o Outputs:
* Results: List containing 4 elements (GEE objects):
1) List of lake surface areas from ascending passes
2) List of lake surface areas from descending passes
3) List of time steps ascoiated with ascending pass surface areas
4) List of time steps ascoiated with descending pass surface areas
Written by: <NAME>, <EMAIL>
Version 0.3
*******************************************************************************
"""
import ee
ee.Initialize()
def GetS1ResTimeSeries(roi):
ID = roi.get('ID')
ROI = roi.geometry()
ROI_Diff = ROI.difference(roi)
Date_Start = ee.Date('2017-01-01');
Date_End = ee.Date('2020-01-01');
date_interval = ee.Number(1); #month
angle_threshold_1 = ee.Number(45.4);
angle_threshold_2 = ee.Number(31.66)
AreaImg = ee.Image.pixelArea()
#****Get WaterProb Threshold************************************************
waterProb = ee.Image('JRC/GSW1_1/GlobalSurfaceWater').select('occurrence')
wProbThresh = ee.Number(ee.Image.constant(0).blend(waterProb).rename('occurrence').reduceRegion(
reducer = ee.Reducer.max(),
geometry = ROI,
scale = 100,
maxPixels = 6098838800,
tileScale = 16
).get('occurrence'))
waterConfident = waterProb.gte(wProbThresh)
landConfident = (ee.Image.constant(0).blend(waterProb)).Not().rename('occurrence')
waterConfidentArea = ee.Number(waterConfident.multiply(AreaImg).reduceRegion(
reducer = ee.Reducer.sum(),
geometry = ROI,
scale = 100,
maxPixels = 6098838800,
tileScale = 16,
).get('occurrence'))
landConfidentArea = ee.Number(landConfident.multiply(AreaImg).reduceRegion(
reducer = ee.Reducer.sum(),
geometry = ROI,
scale = 100,
maxPixels = 6098838800,
tileScale = 16,
).get('occurrence'))
#****Create list of dates for time series********************************************
n_steps = Date_End.difference(Date_Start,'month').divide(date_interval).round();
dates = ee.List.sequence(0,n_steps,1);
def make_datelist(n):
return(Date_Start.advance(ee.Number(n).multiply(date_interval),'month'))
dates = dates.map(make_datelist);
#****Filter Edge Pixels**************************************************************
def maskByAngle(img):
I = ee.Image(img)
angle = I.select('angle');
mask1 = angle.lt(angle_threshold_1);
mask2 = angle.gt(angle_threshold_2);
I = I.updateMask(mask1)
return(I.updateMask(mask2))
#****Make S1 Image Collection********************************************************
def create_collection(d):
start = ee.Date(d);
end = ee.Date(d).advance(date_interval,'month');
date_range = ee.DateRange(start,end);
S1 = ee.ImageCollection('COPERNICUS/S1_GRD') \
.filterDate(date_range) \
.filterBounds(ROI) \
.filter(ee.Filter.listContains('transmitterReceiverPolarisation', 'VV')) \
.filter(ee.Filter.eq('instrumentMode', 'IW'))
S1 = ee.ImageCollection(ee.Algorithms.If(
condition = S1.size().gt(0),
trueCase = S1.map(maskByAngle),
falseCase = S1
))
S1_median = ee.Image(S1.select('VV').mean()).clip(ROI)
S1_median = S1_median.set('system:time_start',start.millis())
S1_median = S1_median.set('Number_of_images',S1.size())
return(S1_median)
#****Calc ROI Area**********************************************************************
def calcArea(img):
I = ee.Image(img)
area = I.select('VV').lt(99999999).multiply(AreaImg).reduceRegion(
reducer = ee.Reducer.sum(),
geometry = ROI,
scale = 100,
maxPixels = 6098838800,
tileScale = 16,
).get('VV')
return(I.set('ROI_area',area))
#****Apply Filter**********************************************************************
def focal_median(img):
I = ee.Image(img)
#fm = I.select('VV').rename('VV_smooth')
fm = I.select('VV').focal_median(50,'circle','meters').rename('VV_smooth')
return(I.addBands(fm))
#****Make Water Mask****************************************************************
def MakeWaterMask(img):
I = ee.Image(img)
wThresh = ee.Number(I.get('wThresh'))
waterProb = ee.Image('JRC/GSW1_1/GlobalSurfaceWater').select('occurrence')
Mask = I.select('VV_smooth').updateMask(waterProb).lt(wThresh).rename('WaterMask')
Sum = Mask.multiply(AreaImg).reduceRegion(
reducer = ee.Reducer.sum(),
geometry = ROI,
scale = 100,
maxPixels = 6098838800,
tileScale = 16
)
I = I.set('water_pixels',Sum.get('WaterMask'))
I = I.set('Water_Area',ee.Number(Sum.get('WaterMask')))
return I.addBands(Mask)
#****Round time*********************************************************************
def makeBackscatterStats(img):
img = ee.Image(img)
wMask = img.select('WaterMask')
vv = img.select('VV_smooth')
wPixelmean = vv.updateMask(wMask).reduceRegion(
reducer = ee.Reducer.mean(),
geometry = ROI,
scale = 1000,
maxPixels = 6098838800,
tileScale = 16
)
wPixelStd = vv.updateMask(wMask).reduceRegion(
reducer = ee.Reducer.stdDev(),
geometry = ROI,
scale = 1000,
maxPixels = 6098838800,
tileScale = 16
)
lPixelmean = vv.updateMask(wMask.Not()).reduceRegion(
reducer = ee.Reducer.mean(),
geometry = ROI,
scale = 1000,
maxPixels = 6098838800,
tileScale = 16
)
lPixelStd = vv.updateMask(wMask.Not()).reduceRegion(
reducer = ee.Reducer.stdDev(),
geometry = ROI,
scale = 1000,
maxPixels = 6098838800,
tileScale = 16
)
inPixelmean = vv.reduceRegion(
reducer = ee.Reducer.mean(),
geometry = roi,
scale = 1000,
maxPixels = 6098838800,
tileScale = 16
)
inPixelStd = vv.reduceRegion(
reducer = ee.Reducer.stdDev(),
geometry = roi,
scale = 1000,
maxPixels = 6098838800,
tileScale = 16
)
outPixelmean = vv.reduceRegion(
reducer = ee.Reducer.mean(),
geometry = ROI_Diff,
scale = 1000,
maxPixels = 6098838800,
tileScale = 16
)
outPixelStd = vv.updateMask(wMask.Not()).reduceRegion(
reducer = ee.Reducer.stdDev(),
geometry = ROI_Diff,
scale = 300,
maxPixels = 6098838800,
tileScale = 16
)
img = img.set('wPixelmean',wPixelmean.get('VV_smooth'))
img = img.set('wPixelStd',wPixelStd.get('VV_smooth'))
img = img.set('lPixelmean',lPixelmean.get('VV_smooth'))
img = img.set('lPixelStd',lPixelStd.get('VV_smooth'))
img = img.set('inPixelmean',inPixelmean.get('VV_smooth'))
img = img.set('inPixelStd',inPixelStd.get('VV_smooth'))
img = img.set('outPixelmean',outPixelmean.get('VV_smooth'))
img = img.set('outPixelStd',outPixelStd.get('VV_smooth'))
return img
def makeBackScatterFromJRC(img):
img = ee.Image(img)
waterProb = ee.Image('JRC/GSW1_1/GlobalSurfaceWater').select('occurrence')
waterConfident = waterProb.gte(wProbThresh)
landConfident = (ee.Image.constant(0).blend(waterProb)).Not()
vv = img.select('VV_smooth')
wMean = vv.updateMask(waterConfident).reduceRegion(
reducer = ee.Reducer.mean(),
geometry = ROI,
scale = 100,
maxPixels = 6098838800,
tileScale = 16
)
wStd = vv.updateMask(waterConfident).reduceRegion(
reducer = ee.Reducer.stdDev(),
geometry = ROI,
scale = 100,
maxPixels = 6098838800,
tileScale = 16
)
lMean = vv.updateMask(landConfident).reduceRegion(
reducer = ee.Reducer.mean(),
geometry = ROI,
scale = 100,
maxPixels = 6098838800,
tileScale = 16
)
lStd = vv.updateMask(landConfident).reduceRegion(
reducer = ee.Reducer.stdDev(),
geometry = ROI,
scale = 100,
maxPixels = 6098838800,
tileScale = 16
)
img = img.set('wMean',wMean.get('VV_smooth')).set('lMean',lMean.get('VV_smooth'))
img = img.set('wStd',wStd.get('VV_smooth')).set('lStd',lStd.get('VV_smooth'))
return img
#****Round time*********************************************************************
def Roundtime(img):
I = ee.Image(img)
time = ee.Number(I.get('system:time_start')).round()
return(I.set('system:time_start',time))
#****Caclulate Threshold**************************************************************
def calcThresh(img):
img = ee.Image(img)
wMean = ee.Number(img.get('wMean'))
wStd = ee.Number(img.get('wStd'))
lMean = ee.Number(img.get('lMean'))
lStd = ee.Number(img.get('lStd'))
x = (lMean.subtract(wMean)).divide(wStd.add(lStd))
wThresh = wMean.add(wStd.multiply(x))
return img.set('wThresh',wThresh)
#****Caclulate Errors*************************************************************
def calcError(img):
img = ee.Image(img)
waterProb = ee.Image('JRC/GSW1_1/GlobalSurfaceWater').select('occurrence')
waterConfident = waterProb.gte(wProbThresh)
landConfident = (ee.Image.constant(0).blend(waterProb)).Not()
vv = img.select('VV_smooth')
thresh = ee.Number(img.get('wThresh'))
wError = ee.Number(vv.gt(thresh).updateMask(waterConfident).rename('wError').multiply(AreaImg).reduceRegion(
reducer = ee.Reducer.sum(),
geometry = ROI,
scale = 100,
maxPixels = 6098838800,
tileScale = 16
).get('wError'))
lError = ee.Number(vv.lt(thresh).updateMask(landConfident).rename('lError').multiply(AreaImg).reduceRegion(
reducer = ee.Reducer.sum(),
geometry = ROI,
scale = 100,
maxPixels = 6098838800,
tileScale = 16
).get('lError'))
#wError = wError.divide(waterConfidentArea.subtract(wError))
#lError = lError.divide(landConfidentArea.subtract(lError))
return img.set('wError',wError).set('lError',lError)
def calcError2(img):
img = ee.Image(img)
wError = ee.Number(img.get('wError'))
lError = ee.Number(img.get('lError'))
wError = wError.divide(waterConfidentArea.subtract(wError))
lError = lError.divide(landConfidentArea.subtract(lError))
return img.set('wError2',wError).set('lError2',lError)
#****Run Functions******************************************************************
S1 = ee.ImageCollection(dates.map(create_collection,True))
S1 = S1.set('wProbThresh',wProbThresh)
S1 = S1.filter(ee.Filter.gt('Number_of_images',0))
S1 = S1.map(calcArea)
S1 = S1.filter(ee.Filter.gt('ROI_area',ee.Number(ROI.area().multiply(0.95))))
S1 = S1.map(focal_median)
#S1 = S1.map(Roundtime)
S1 = S1.map(makeBackScatterFromJRC)
S1 = S1.filter(ee.Filter.gt('wMean',-9999))
S1 = S1.filter(ee.Filter.gt('lMean',-9999))
S1 = S1.filter(ee.Filter.gt('wStd',-9999))
S1 = S1.filter(ee.Filter.gt('lStd',-9999))
S1 = S1.map(calcThresh)
S1 = S1.map(calcError)
S1 = S1.filter(ee.Filter.lt('wError',9999999999999))
S1 = S1.filter(ee.Filter.lt('lError',9999999999999))
S1 = S1.map(calcError2)
S1 = S1.filter(ee.Filter.lt('wError2',9999999999999))
S1 = S1.filter(ee.Filter.lt('lError2',9999999999999))
S1 = S1.map(MakeWaterMask)
#S1 = S1.map(makeBackscatterStats)
#****Extract Time Series***************************************************************
def extractTimeSeries(collection):
WaterArea = ee.Array(collection.aggregate_array('Water_Area')).multiply(0.000001) #Conversion to km^2
time = ee.Array(collection.aggregate_array('system:time_start'))
wMean = ee.Array(collection.aggregate_array('wMean'))
wStd = ee.Array(collection.aggregate_array('wStd'))
lMean = ee.Array(collection.aggregate_array('lMean'))
lStd = ee.Array(collection.aggregate_array('lStd'))
wProbThresh = collection.get('wProbThresh')
ROIArea = ee.Array(collection.aggregate_array('ROI_area')).multiply(0.000001)
WThresh = ee.Array(collection.aggregate_array('wThresh'))
WError = WaterArea.multiply(ee.Array(collection.aggregate_array('wError2')))
LError = ee.Array(collection.aggregate_array('lError2')).multiply(ROIArea.subtract(WaterArea))
exportDict = ee.Dictionary({
'Date': time,
'WaterArea': WaterArea,
'WThresh': WThresh,
'LakeID': ID,
'WError': WError,
'LError': LError
})
exportTable = ee.Feature(None, exportDict)
return exportTable
Export = ee.Algorithms.If(
condition = S1.size().gt(0),
trueCase = ee.Feature(extractTimeSeries(S1)),
falseCase = None
)
return Export
#return([WaterAreaA,WaterAreaD,timeA,timeD,wPixelmeanA, wPixelStdA, lPixelmeanA, lPixelStdA, wPixelmeanD, wPixelStdD, lPixelmeanD, lPixelStdD, inPixelmeanA, inPixelStdA, outPixelmeanA, outPixelStdA, inPixelmeanD, inPixelStdD, outPixelmeanD, outPixelStdD ]) | [
"ee.Filter.listContains",
"ee.Image.pixelArea",
"ee.Image.constant",
"ee.Dictionary",
"ee.Date",
"ee.Reducer.max",
"ee.List.sequence",
"ee.Reducer.mean",
"ee.DateRange",
"ee.Reducer.sum",
"ee.Initialize",
"ee.Filter.gt",
"ee.Filter.lt",
"ee.Reducer.stdDev",
"ee.Number",
"ee.ImageCollection",
"ee.Image",
"ee.Filter.eq",
"ee.Feature"
] | [((884, 899), 'ee.Initialize', 'ee.Initialize', ([], {}), '()\n', (897, 899), False, 'import ee\n'), ((1030, 1051), 'ee.Date', 'ee.Date', (['"""2017-01-01"""'], {}), "('2017-01-01')\n", (1037, 1051), False, 'import ee\n'), ((1068, 1089), 'ee.Date', 'ee.Date', (['"""2020-01-01"""'], {}), "('2020-01-01')\n", (1075, 1089), False, 'import ee\n'), ((1111, 1123), 'ee.Number', 'ee.Number', (['(1)'], {}), '(1)\n', (1120, 1123), False, 'import ee\n'), ((1156, 1171), 'ee.Number', 'ee.Number', (['(45.4)'], {}), '(45.4)\n', (1165, 1171), False, 'import ee\n'), ((1197, 1213), 'ee.Number', 'ee.Number', (['(31.66)'], {}), '(31.66)\n', (1206, 1213), False, 'import ee\n'), ((1228, 1248), 'ee.Image.pixelArea', 'ee.Image.pixelArea', ([], {}), '()\n', (1246, 1248), False, 'import ee\n'), ((2578, 2609), 'ee.List.sequence', 'ee.List.sequence', (['(0)', 'n_steps', '(1)'], {}), '(0, n_steps, 1)\n', (2594, 2609), False, 'import ee\n'), ((2885, 2898), 'ee.Image', 'ee.Image', (['img'], {}), '(img)\n', (2893, 2898), False, 'import ee\n'), ((3231, 3241), 'ee.Date', 'ee.Date', (['d'], {}), '(d)\n', (3238, 3241), False, 'import ee\n'), ((3321, 3345), 'ee.DateRange', 'ee.DateRange', (['start', 'end'], {}), '(start, end)\n', (3333, 3345), False, 'import ee\n'), ((4154, 4167), 'ee.Image', 'ee.Image', (['img'], {}), '(img)\n', (4162, 4167), False, 'import ee\n'), ((4595, 4608), 'ee.Image', 'ee.Image', (['img'], {}), '(img)\n', (4603, 4608), False, 'import ee\n'), ((4902, 4915), 'ee.Image', 'ee.Image', (['img'], {}), '(img)\n', (4910, 4915), False, 'import ee\n'), ((5646, 5659), 'ee.Image', 'ee.Image', (['img'], {}), '(img)\n', (5654, 5659), False, 'import ee\n'), ((8132, 8145), 'ee.Image', 'ee.Image', (['img'], {}), '(img)\n', (8140, 8145), False, 'import ee\n'), ((9632, 9645), 'ee.Image', 'ee.Image', (['img'], {}), '(img)\n', (9640, 9645), False, 'import ee\n'), ((9885, 9898), 'ee.Image', 'ee.Image', (['img'], {}), '(img)\n', (9893, 9898), False, 'import ee\n'), ((10343, 10356), 'ee.Image', 'ee.Image', (['img'], {}), '(img)\n', (10351, 10356), False, 'import ee\n'), ((11458, 11471), 'ee.Image', 'ee.Image', (['img'], {}), '(img)\n', (11466, 11471), False, 'import ee\n'), ((11976, 12011), 'ee.Filter.gt', 'ee.Filter.gt', (['"""Number_of_images"""', '(0)'], {}), "('Number_of_images', 0)\n", (11988, 12011), False, 'import ee\n'), ((12237, 12265), 'ee.Filter.gt', 'ee.Filter.gt', (['"""wMean"""', '(-9999)'], {}), "('wMean', -9999)\n", (12249, 12265), False, 'import ee\n'), ((12285, 12313), 'ee.Filter.gt', 'ee.Filter.gt', (['"""lMean"""', '(-9999)'], {}), "('lMean', -9999)\n", (12297, 12313), False, 'import ee\n'), ((12333, 12360), 'ee.Filter.gt', 'ee.Filter.gt', (['"""wStd"""', '(-9999)'], {}), "('wStd', -9999)\n", (12345, 12360), False, 'import ee\n'), ((12380, 12407), 'ee.Filter.gt', 'ee.Filter.gt', (['"""lStd"""', '(-9999)'], {}), "('lStd', -9999)\n", (12392, 12407), False, 'import ee\n'), ((12482, 12519), 'ee.Filter.lt', 'ee.Filter.lt', (['"""wError"""', '(9999999999999)'], {}), "('wError', 9999999999999)\n", (12494, 12519), False, 'import ee\n'), ((12539, 12576), 'ee.Filter.lt', 'ee.Filter.lt', (['"""lError"""', '(9999999999999)'], {}), "('lError', 9999999999999)\n", (12551, 12576), False, 'import ee\n'), ((12624, 12662), 'ee.Filter.lt', 'ee.Filter.lt', (['"""wError2"""', '(9999999999999)'], {}), "('wError2', 9999999999999)\n", (12636, 12662), False, 'import ee\n'), ((12682, 12720), 'ee.Filter.lt', 'ee.Filter.lt', (['"""lError2"""', '(9999999999999)'], {}), "('lError2', 9999999999999)\n", (12694, 12720), False, 'import ee\n'), ((13767, 13894), 'ee.Dictionary', 'ee.Dictionary', (["{'Date': time, 'WaterArea': WaterArea, 'WThresh': WThresh, 'LakeID': ID,\n 'WError': WError, 'LError': LError}"], {}), "({'Date': time, 'WaterArea': WaterArea, 'WThresh': WThresh,\n 'LakeID': ID, 'WError': WError, 'LError': LError})\n", (13780, 13894), False, 'import ee\n'), ((14008, 14036), 'ee.Feature', 'ee.Feature', (['None', 'exportDict'], {}), '(None, exportDict)\n', (14018, 14036), False, 'import ee\n'), ((1351, 1392), 'ee.Image', 'ee.Image', (['"""JRC/GSW1_1/GlobalSurfaceWater"""'], {}), "('JRC/GSW1_1/GlobalSurfaceWater')\n", (1359, 1392), False, 'import ee\n'), ((3579, 3615), 'ee.Filter.eq', 'ee.Filter.eq', (['"""instrumentMode"""', '"""IW"""'], {}), "('instrumentMode', 'IW')\n", (3591, 3615), False, 'import ee\n'), ((3257, 3267), 'ee.Date', 'ee.Date', (['d'], {}), '(d)\n', (3264, 3267), False, 'import ee\n'), ((4982, 5023), 'ee.Image', 'ee.Image', (['"""JRC/GSW1_1/GlobalSurfaceWater"""'], {}), "('JRC/GSW1_1/GlobalSurfaceWater')\n", (4990, 5023), False, 'import ee\n'), ((5209, 5225), 'ee.Reducer.sum', 'ee.Reducer.sum', ([], {}), '()\n', (5223, 5225), False, 'import ee\n'), ((5815, 5832), 'ee.Reducer.mean', 'ee.Reducer.mean', ([], {}), '()\n', (5830, 5832), False, 'import ee\n'), ((6042, 6061), 'ee.Reducer.stdDev', 'ee.Reducer.stdDev', ([], {}), '()\n', (6059, 6061), False, 'import ee\n'), ((6278, 6295), 'ee.Reducer.mean', 'ee.Reducer.mean', ([], {}), '()\n', (6293, 6295), False, 'import ee\n'), ((6512, 6531), 'ee.Reducer.stdDev', 'ee.Reducer.stdDev', ([], {}), '()\n', (6529, 6531), False, 'import ee\n'), ((6725, 6742), 'ee.Reducer.mean', 'ee.Reducer.mean', ([], {}), '()\n', (6740, 6742), False, 'import ee\n'), ((6935, 6954), 'ee.Reducer.stdDev', 'ee.Reducer.stdDev', ([], {}), '()\n', (6952, 6954), False, 'import ee\n'), ((7149, 7166), 'ee.Reducer.mean', 'ee.Reducer.mean', ([], {}), '()\n', (7164, 7166), False, 'import ee\n'), ((7390, 7409), 'ee.Reducer.stdDev', 'ee.Reducer.stdDev', ([], {}), '()\n', (7407, 7409), False, 'import ee\n'), ((8166, 8207), 'ee.Image', 'ee.Image', (['"""JRC/GSW1_1/GlobalSurfaceWater"""'], {}), "('JRC/GSW1_1/GlobalSurfaceWater')\n", (8174, 8207), False, 'import ee\n'), ((8470, 8487), 'ee.Reducer.mean', 'ee.Reducer.mean', ([], {}), '()\n', (8485, 8487), False, 'import ee\n'), ((8700, 8719), 'ee.Reducer.stdDev', 'ee.Reducer.stdDev', ([], {}), '()\n', (8717, 8719), False, 'import ee\n'), ((8932, 8949), 'ee.Reducer.mean', 'ee.Reducer.mean', ([], {}), '()\n', (8947, 8949), False, 'import ee\n'), ((9161, 9180), 'ee.Reducer.stdDev', 'ee.Reducer.stdDev', ([], {}), '()\n', (9178, 9180), False, 'import ee\n'), ((10377, 10418), 'ee.Image', 'ee.Image', (['"""JRC/GSW1_1/GlobalSurfaceWater"""'], {}), "('JRC/GSW1_1/GlobalSurfaceWater')\n", (10385, 10418), False, 'import ee\n'), ((2669, 2681), 'ee.Number', 'ee.Number', (['n'], {}), '(n)\n', (2678, 2681), False, 'import ee\n'), ((3492, 3555), 'ee.Filter.listContains', 'ee.Filter.listContains', (['"""transmitterReceiverPolarisation"""', '"""VV"""'], {}), "('transmitterReceiverPolarisation', 'VV')\n", (3514, 3555), False, 'import ee\n'), ((1537, 1553), 'ee.Reducer.max', 'ee.Reducer.max', ([], {}), '()\n', (1551, 1553), False, 'import ee\n'), ((1951, 1967), 'ee.Reducer.sum', 'ee.Reducer.sum', ([], {}), '()\n', (1965, 1967), False, 'import ee\n'), ((2222, 2238), 'ee.Reducer.sum', 'ee.Reducer.sum', ([], {}), '()\n', (2236, 2238), False, 'import ee\n'), ((4265, 4281), 'ee.Reducer.sum', 'ee.Reducer.sum', ([], {}), '()\n', (4279, 4281), False, 'import ee\n'), ((8306, 8326), 'ee.Image.constant', 'ee.Image.constant', (['(0)'], {}), '(0)\n', (8323, 8326), False, 'import ee\n'), ((10517, 10537), 'ee.Image.constant', 'ee.Image.constant', (['(0)'], {}), '(0)\n', (10534, 10537), False, 'import ee\n'), ((1778, 1798), 'ee.Image.constant', 'ee.Image.constant', (['(0)'], {}), '(0)\n', (1795, 1798), False, 'import ee\n'), ((10783, 10799), 'ee.Reducer.sum', 'ee.Reducer.sum', ([], {}), '()\n', (10797, 10799), False, 'import ee\n'), ((11070, 11086), 'ee.Reducer.sum', 'ee.Reducer.sum', ([], {}), '()\n', (11084, 11086), False, 'import ee\n'), ((1442, 1462), 'ee.Image.constant', 'ee.Image.constant', (['(0)'], {}), '(0)\n', (1459, 1462), False, 'import ee\n'), ((3359, 3398), 'ee.ImageCollection', 'ee.ImageCollection', (['"""COPERNICUS/S1_GRD"""'], {}), "('COPERNICUS/S1_GRD')\n", (3377, 3398), False, 'import ee\n')] |
__source__ = 'https://leetcode.com/problems/kth-largest-element-in-a-stream/'
# Time: O()
# Space: O()
#
# Description: Leetcode # 703. Kth Largest Element in a Stream
#
# Design a class to find the kth largest element in a stream.
# Note that it is the kth largest element in the sorted order, not the kth distinct element.
#
# Your KthLargest class will have a constructor which accepts an integer k and an integer array nums,
# which contains initial elements from the stream. For each call to the method KthLargest.add,
# return the element representing the kth largest element in the stream.
#
# Example:
#
# int k = 3;
# int[] arr = [4,5,8,2];
# KthLargest kthLargest = new KthLargest(3, arr);
# kthLargest.add(3); // returns 4
# kthLargest.add(5); // returns 5
# kthLargest.add(10); // returns 5
# kthLargest.add(9); // returns 8
# kthLargest.add(4); // returns 8
# Note:
# You may assume that nums length >= k-1 and k >= 1.
#
import unittest
class Solution(object):
pass # your function here
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought:
# use PQ
# 77ms 83.97%
class KthLargest {
private PriorityQueue<Integer> pq;
private int k;
public KthLargest(int k, int[] nums) {
this.k = k;
this.pq = new PriorityQueue<>();
for (int num : nums) {
pq.offer(num);
if (pq.size() > k)
pq.poll();
}
}
public int add(int val) {
pq.offer(val);
if (pq.size() > k)
pq.poll();
return pq.peek();
}
}
# use BST
# 428ms 5.74%
class KthLargest {
TreeNode root;
private int k;
public KthLargest(int k, int[] nums) {
this.k = k - 1;
for (int n : nums) {
root = insert(root, n);
}
}
public int add(int val) {
root = insert(root, val);
return findKthLargest(k, root);
}
private int findKthLargest(int k, TreeNode root) {
if (root == null) return -1;
if (root.mRightSum == k) return root.mVal;
if (root.mRightSum > k) {
return findKthLargest(k, root.right);
} else {
return findKthLargest(k - root.mRightSum - 1, root.left);
}
}
private TreeNode insert(TreeNode root, int val) {
if (root == null) return new TreeNode(val, 0);
if (val < root.mVal) {
root.left = insert(root.left, val);
} else {
root.mRightSum++;
root.right = insert(root.right, val);
}
return root;
}
private class TreeNode {
int mVal;
int mRightSum;
TreeNode left;
TreeNode right;
TreeNode(int val, int rightSum) {
mVal = val;
mRightSum = rightSum;
}
}
}
/**
* Your KthLargest object will be instantiated and called as such:
* KthLargest obj = new KthLargest(k, nums);
* int param_1 = obj.add(val);
*/
''' | [
"unittest.main"
] | [((1148, 1163), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1161, 1163), False, 'import unittest\n')] |
from django.db import models
from django.utils import timezone
# Create your models here.
class Beverage(models.Model):
"""( description)"""
created_by = models.ForeignKey('auth.User', on_delete=models.SET_NULL, null=True)
name = models.CharField(max_length=100)
key = models.CharField(max_length=100)
description = models.CharField(max_length=200)
fill_quantity_min = models.IntegerField()
fill_quantity_max = models.IntegerField()
fill_quantity_steps = models.IntegerField()
def __str__(self):
return str(self.name)
class BeverageHistory(models.Model):
bean_amount_choices = (
('VeryMild', 'Very mild'),
('Mild', 'Mild'),
('MildPlus', 'Mild +'),
)
temperature_choices = (
('88C', '88 °C'),
('90C', '90 °C'),
('92C', '92 °C'),
)
created_by = models.ForeignKey('auth.User', on_delete=models.SET_NULL, null=True)
created_at = models.DateTimeField(default=timezone.now)
bean_amount = models.CharField(max_length=100, choices=bean_amount_choices, default='Mild')
temperature = models.CharField(max_length=100, choices=temperature_choices, default='90C')
beverage = models.ForeignKey(Beverage, on_delete=models.CASCADE) | [
"django.db.models.ForeignKey",
"django.db.models.IntegerField",
"django.db.models.CharField",
"django.db.models.DateTimeField"
] | [((163, 231), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""auth.User"""'], {'on_delete': 'models.SET_NULL', 'null': '(True)'}), "('auth.User', on_delete=models.SET_NULL, null=True)\n", (180, 231), False, 'from django.db import models\n'), ((243, 275), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (259, 275), False, 'from django.db import models\n'), ((286, 318), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (302, 318), False, 'from django.db import models\n'), ((337, 369), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (353, 369), False, 'from django.db import models\n'), ((394, 415), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (413, 415), False, 'from django.db import models\n'), ((440, 461), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (459, 461), False, 'from django.db import models\n'), ((488, 509), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (507, 509), False, 'from django.db import models\n'), ((858, 926), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""auth.User"""'], {'on_delete': 'models.SET_NULL', 'null': '(True)'}), "('auth.User', on_delete=models.SET_NULL, null=True)\n", (875, 926), False, 'from django.db import models\n'), ((944, 986), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'timezone.now'}), '(default=timezone.now)\n', (964, 986), False, 'from django.db import models\n'), ((1005, 1082), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'choices': 'bean_amount_choices', 'default': '"""Mild"""'}), "(max_length=100, choices=bean_amount_choices, default='Mild')\n", (1021, 1082), False, 'from django.db import models\n'), ((1101, 1177), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'choices': 'temperature_choices', 'default': '"""90C"""'}), "(max_length=100, choices=temperature_choices, default='90C')\n", (1117, 1177), False, 'from django.db import models\n'), ((1193, 1246), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Beverage'], {'on_delete': 'models.CASCADE'}), '(Beverage, on_delete=models.CASCADE)\n', (1210, 1246), False, 'from django.db import models\n')] |
#!/usr/bin/env python
import sys
import django
from django.conf import settings
from django.test.runner import DiscoverRunner as TestRunner
SETTINGS = {
'DATABASES': {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'dev.db',
},
# 'default': {
# 'ENGINE': 'django.db.backends.mysql',
# 'NAME': 'dpaste',
# 'USER': 'root',
# 'PASSWORD': '',
# }
},
'TEMPLATES': [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.template.context_processors.i18n',
'dpaste.context_processors.dpaste_globals',
],
},
},
],
'INSTALLED_APPS': [
'django.contrib.sessions',
'django.contrib.staticfiles',
'dpaste',
],
'MIDDLEWARE_CLASSES': (
'django.contrib.sessions.middleware.SessionMiddleware',
),
'STATIC_ROOT': '/tmp/dpaste_test_static/',
'STATIC_URL': '/static/',
'ROOT_URLCONF': 'dpaste.urls',
'LANGUAGE_CODE': 'en',
'LANGUAGES': (('en', 'English'),),
}
def runtests(*test_args):
# Setup settings
if not settings.configured:
settings.configure(**SETTINGS)
# app registry setup
django.setup()
# test runner
test_runner = TestRunner(verbosity=1)
failures = test_runner.run_tests(['dpaste'])
if failures:
sys.exit(failures)
if __name__ == '__main__':
runtests(*sys.argv[1:])
| [
"django.conf.settings.configure",
"django.test.runner.DiscoverRunner",
"django.setup",
"sys.exit"
] | [((1556, 1570), 'django.setup', 'django.setup', ([], {}), '()\n', (1568, 1570), False, 'import django\n'), ((1608, 1631), 'django.test.runner.DiscoverRunner', 'TestRunner', ([], {'verbosity': '(1)'}), '(verbosity=1)\n', (1618, 1631), True, 'from django.test.runner import DiscoverRunner as TestRunner\n'), ((1495, 1525), 'django.conf.settings.configure', 'settings.configure', ([], {}), '(**SETTINGS)\n', (1513, 1525), False, 'from django.conf import settings\n'), ((1706, 1724), 'sys.exit', 'sys.exit', (['failures'], {}), '(failures)\n', (1714, 1724), False, 'import sys\n')] |
import RPi.GPIO as GPIO
from .light_errors import LedError
class Led:
"""
This is a class used to control LED's directly connected to the GPIO via a pin given.
See the documentation for an example of how to wire the LED.
"""
def __init__(self, pin):
"""
This initates the LED on the given pin, setting it into the output mode,
making sure it is off, and setting the PWM up so that the LED can be dimmed.
"""
try:
self.pin = int(pin)
GPIO.setmode(GPIO.BOARD)
GPIO.setup(self.pin, GPIO.OUT)
GPIO.output(self.pin, GPIO.LOW)
self.led_dim = GPIO.PWM(self.pin, 500)
except:
raise LedError("Error during the initiation of the LED class.")
def on(self, brightness=100):
"""
Turns the defined LED on, the brightness is set by default to 100%.
"""
try:
self.led_dim.start(brightness)
except:
raise LedError("Error while turning the LED on.")
def off(self):
"""
Turns the defined LED off.
"""
try:
self.led_dim.stop()
except:
raise LedError("Error while turning the LED off.")
def dim(self, brightness):
"""
Dims the definied LED. Keep in mind, that if you don't first turn the
LED on this will error out.
"""
if brightness < 0:
brightness = 0
elif brightness > 100:
brightness = 100
else:
pass
try:
self.led_dim.ChangeDutyCycle(brightness)
except:
raise LedError("Error while dimming the LED. Make sure you have turned the LED on.")
| [
"RPi.GPIO.setup",
"RPi.GPIO.setmode",
"RPi.GPIO.output",
"RPi.GPIO.PWM"
] | [((519, 543), 'RPi.GPIO.setmode', 'GPIO.setmode', (['GPIO.BOARD'], {}), '(GPIO.BOARD)\n', (531, 543), True, 'import RPi.GPIO as GPIO\n'), ((556, 586), 'RPi.GPIO.setup', 'GPIO.setup', (['self.pin', 'GPIO.OUT'], {}), '(self.pin, GPIO.OUT)\n', (566, 586), True, 'import RPi.GPIO as GPIO\n'), ((599, 630), 'RPi.GPIO.output', 'GPIO.output', (['self.pin', 'GPIO.LOW'], {}), '(self.pin, GPIO.LOW)\n', (610, 630), True, 'import RPi.GPIO as GPIO\n'), ((659, 682), 'RPi.GPIO.PWM', 'GPIO.PWM', (['self.pin', '(500)'], {}), '(self.pin, 500)\n', (667, 682), True, 'import RPi.GPIO as GPIO\n')] |
import os
import pickle
import re
import requests
import tkinter
from tkinter import ttk
from Hero import Hero
from SearchListbox import SearchListbox
from Team import Team
class Window(ttk.Frame):
def __init__(self, root=None):
super().__init__(root)
self.root = root
self.root.title("teamcomp")
self.grid()
self.root.unbind_class("Listbox", "<space>") # how to rebind action to Enter?
self.root.bind("<Key>", lambda event: self.search(event))
# hero list
self.heroes = dict()
self.hero_frm = ttk.Frame(self, borderwidth=0)
self.hero_lst = SearchListbox(self.hero_frm, height=20)
self.hero_scl = ttk.Scrollbar(self.hero_frm)
self.init_hero_list()
# team lists
self.team1 = Team()
self.team2 = Team()
self.team_frm = ttk.Frame(self, borderwidth=0)
self.team1_lst = SearchListbox(self.team_frm, height=5)
self.team2_lst = SearchListbox(self.team_frm, height=5)
self.init_team_lists()
# add/remove buttons
self.add_rem_frm = ttk.Frame(self, borderwidth=0)
self.team1_add_btn = ttk.Button(
self.add_rem_frm,
text="-->",
command=lambda: self.add_hero(self.team1, self.team1_lst),
)
self.team1_rem_btn = ttk.Button(
self.add_rem_frm,
text="<--",
command=lambda: self.remove_hero(self.team1, self.team1_lst),
)
self.team2_add_btn = ttk.Button(
self.add_rem_frm,
text="-->",
command=lambda: self.add_hero(self.team2, self.team2_lst),
)
self.team2_rem_btn = ttk.Button(
self.add_rem_frm,
text="<--",
command=lambda: self.remove_hero(self.team2, self.team2_lst),
)
self.init_add_rem_buttons()
# stats list
self.stats_frm = ttk.Frame(self, borderwidth=0)
self.stats_lbl = ttk.Label(self.stats_frm, text="Counters")
self.stats_lst = SearchListbox(
self.stats_frm,
height=20,
width=26,
font=("Courier", "10"),
)
self.stats_scl = ttk.Scrollbar(self.stats_frm)
self.init_stats_list()
# controls
self.controls_lfrm = ttk.LabelFrame(self, text="Controls")
self.show_rb_var = tkinter.StringVar()
self.show_team1_rb = ttk.Radiobutton(
self.controls_lfrm,
text="Radiant",
variable=self.show_rb_var, value="team1",
)
self.show_team2_rb = ttk.Radiobutton(
self.controls_lfrm,
text="Dire",
variable=self.show_rb_var,
value="team2",
)
self.show_hero_rb = ttk.Radiobutton(
self.controls_lfrm,
text="Hero",
variable=self.show_rb_var,
value="hero",
)
self.show_stats_btn = ttk.Button(
self.controls_lfrm,
text="Show",
command=self.show_stats,
)
self.reset_teams_btn = ttk.Button(
self.controls_lfrm,
text="Clear",
command=self.clear_teams,
)
self.clear_stats_btn = ttk.Button(
self.controls_lfrm,
text="Wipe",
command=self.wipe_stats,
)
self.init_controls()
def init_hero_list(self):
if os.path.isfile("heroes.dat"):
with open("heroes.dat", "rb") as f:
self.heroes = pickle.load(f)
else:
self.init_heroes()
for name in self.heroes.keys():
self.hero_lst.append(name)
self.hero_lst.config(yscrollcommand=self.hero_scl.set)
self.hero_scl.config(command=self.hero_lst.yview)
hero_lbl = ttk.Label(self.hero_frm, text="Heroes")
self.hero_frm.grid(row=0, column=0, rowspan=2, sticky=tkinter.NS)
self.hero_lst.grid(row=1, column=0)
self.hero_scl.grid(row=1, column=1, sticky=tkinter.NS)
hero_lbl.grid(row=0, column=0)
def init_team_lists(self):
team1_lbl = ttk.Label(self.team_frm, text="Radiant")
team2_lbl = ttk.Label(self.team_frm, text="Dire")
self.team_frm.grid(row=0, column=2, sticky=tkinter.N)
team1_lbl.grid(row=0, column=3)
self.team1_lst.grid(row=1, column=3, rowspan=5)
self.team_frm.grid_rowconfigure(6, minsize=20)
team2_lbl.grid(row=7, column=3)
self.team2_lst.grid(row=8, column=3, rowspan=5)
def init_add_rem_buttons(self):
self.add_rem_frm.grid(row=0, column=1, sticky=tkinter.N)
self.add_rem_frm.grid_rowconfigure(0, minsize=40)
self.team1_add_btn.grid(row=1)
self.team1_rem_btn.grid(row=2)
self.team2_add_btn.grid(row=3)
self.team2_rem_btn.grid(row=4)
def init_stats_list(self):
self.stats_lst.config(yscrollcommand=self.stats_scl.set)
self.stats_scl.config(command=self.stats_lst.yview)
self.stats_frm.grid(row=0, column=3, rowspan=2, sticky=tkinter.NS)
self.stats_lst.grid(row=1, column=0)
self.stats_scl.grid(row=1, column=1, sticky=tkinter.NS)
self.stats_lbl.grid(row=0, column=0)
def init_controls(self):
self.controls_lfrm.grid_columnconfigure(0, weight=1)
self.controls_lfrm.grid_columnconfigure(1, weight=1)
self.controls_lfrm.grid_columnconfigure(2, weight=1)
self.controls_lfrm.grid(row=1, column=1, columnspan=2, sticky=tkinter.NSEW)
self.show_team1_rb.grid(row=0, column=0)
self.show_team2_rb.grid(row=0, column=1)
self.show_hero_rb.grid(row=0, column=2)
self.show_stats_btn.grid(row=1, column=0)
self.reset_teams_btn.grid(row=1, column=1)
self.clear_stats_btn.grid(row=1, column=2)
# team 1 selected by default
self.show_team1_rb.invoke()
def clear_teams(self):
self.team1.reset()
self.team2.reset()
self.team1_lst.delete(0, tkinter.END)
self.team2_lst.delete(0, tkinter.END)
# wipe cached stats and fetch fresh stats for heroes on teams
def wipe_stats(self):
for hero in self.heroes.values():
hero.stats = dict()
for hero in self.team1.heroes + self.team2.heroes:
self.heroes[hero.name].fetch_stats()
self.stats_lst.delete(0, tkinter.END)
# initialize hero dict and SearchListbox
def init_heroes(self):
page = requests.get(
"https://www.dotabuff.com/heroes", headers={"user-agent": "Mozilla/5.0"}
)
self.hero_lst.delete(0, tkinter.END)
self.heroes = dict()
for hero_info in re.findall(
r'<a href="/heroes/(.+?)">.+?<div class="name">(.+?)</div>',
re.search(
r'<div class="hero-grid">[\s\S]+</div></footer></section>', page.text
).group(),
):
self.heroes[hero_info[1]] = Hero(hero_info[1], hero_info[0])
self.hero_lst.append(hero_info[1])
# unused, has no button; doable by deleting heroes.dat before run
def refresh_heroes(self):
self.init_heroes()
self.wipe_stats()
# button action
def add_hero(self, team: Team, team_lst):
hero: Hero = self.get_selected_hero(self.hero_lst)
if hero is not None and team.add_hero(hero):
team_lst.append(hero.name)
# button action
def remove_hero(self, team, team_lst):
idx = team_lst.curselection()
if not idx:
return
team.remove_hero(self.heroes[team_lst.get(idx[0])])
team_lst.delete(idx[0])
# get currently selected hero in hero list, fetching stats if necessary
def get_selected_hero(self, lst: SearchListbox) -> Hero:
idx = lst.curselection()
hero: Hero = None # use Optional? do something different?
if idx:
hero = self.heroes[lst.get(idx[0])]
if not hero.stats:
hero.fetch_stats()
return hero
# button action
def show_stats(self):
if self.show_rb_var.get() == "hero":
# can select a hero from full list or teams
for lst in [self.hero_lst, self.team1_lst, self.team2_lst]:
hero: Hero = self.get_selected_hero(lst)
if hero is not None:
self.update_stats_listbox(hero)
break
else:
self.update_stats_listbox(eval(f"self.{self.show_rb_var.get()}"))
def update_stats_listbox(self, hero_or_team): # better way to handle hero or team?
self.stats_lst.delete(0, tkinter.END)
for hero, stat in sorted(
hero_or_team.stats.items(),
key=lambda item: item[1],
reverse=True,
):
if isinstance(hero_or_team, Hero) or hero not in hero_or_team.heroes:
self.stats_lst.append_stat(f"{hero:20} {stat:+.2f}")
self.stats_lst.grid(row=1, column=0)
# performed on window close
def write_stats(self):
with open("heroes.dat", "wb") as f:
pickle.dump(self.heroes, f, protocol=pickle.HIGHEST_PROTOCOL)
@staticmethod
def search(event):
if (
event.widget.winfo_class() == "Listbox"
and (event.char.isalpha() or event.char == " ")
):
event.widget.search(event.char)
| [
"tkinter.ttk.Label",
"tkinter.StringVar",
"pickle.dump",
"tkinter.ttk.Scrollbar",
"tkinter.ttk.Radiobutton",
"tkinter.ttk.Frame",
"Team.Team",
"os.path.isfile",
"pickle.load",
"Hero.Hero",
"SearchListbox.SearchListbox",
"tkinter.ttk.Button",
"requests.get",
"re.search",
"tkinter.ttk.LabelFrame"
] | [((575, 605), 'tkinter.ttk.Frame', 'ttk.Frame', (['self'], {'borderwidth': '(0)'}), '(self, borderwidth=0)\n', (584, 605), False, 'from tkinter import ttk\n'), ((630, 669), 'SearchListbox.SearchListbox', 'SearchListbox', (['self.hero_frm'], {'height': '(20)'}), '(self.hero_frm, height=20)\n', (643, 669), False, 'from SearchListbox import SearchListbox\n'), ((694, 722), 'tkinter.ttk.Scrollbar', 'ttk.Scrollbar', (['self.hero_frm'], {}), '(self.hero_frm)\n', (707, 722), False, 'from tkinter import ttk\n'), ((796, 802), 'Team.Team', 'Team', ([], {}), '()\n', (800, 802), False, 'from Team import Team\n'), ((824, 830), 'Team.Team', 'Team', ([], {}), '()\n', (828, 830), False, 'from Team import Team\n'), ((855, 885), 'tkinter.ttk.Frame', 'ttk.Frame', (['self'], {'borderwidth': '(0)'}), '(self, borderwidth=0)\n', (864, 885), False, 'from tkinter import ttk\n'), ((911, 949), 'SearchListbox.SearchListbox', 'SearchListbox', (['self.team_frm'], {'height': '(5)'}), '(self.team_frm, height=5)\n', (924, 949), False, 'from SearchListbox import SearchListbox\n'), ((975, 1013), 'SearchListbox.SearchListbox', 'SearchListbox', (['self.team_frm'], {'height': '(5)'}), '(self.team_frm, height=5)\n', (988, 1013), False, 'from SearchListbox import SearchListbox\n'), ((1102, 1132), 'tkinter.ttk.Frame', 'ttk.Frame', (['self'], {'borderwidth': '(0)'}), '(self, borderwidth=0)\n', (1111, 1132), False, 'from tkinter import ttk\n'), ((1926, 1956), 'tkinter.ttk.Frame', 'ttk.Frame', (['self'], {'borderwidth': '(0)'}), '(self, borderwidth=0)\n', (1935, 1956), False, 'from tkinter import ttk\n'), ((1982, 2024), 'tkinter.ttk.Label', 'ttk.Label', (['self.stats_frm'], {'text': '"""Counters"""'}), "(self.stats_frm, text='Counters')\n", (1991, 2024), False, 'from tkinter import ttk\n'), ((2050, 2124), 'SearchListbox.SearchListbox', 'SearchListbox', (['self.stats_frm'], {'height': '(20)', 'width': '(26)', 'font': "('Courier', '10')"}), "(self.stats_frm, height=20, width=26, font=('Courier', '10'))\n", (2063, 2124), False, 'from SearchListbox import SearchListbox\n'), ((2209, 2238), 'tkinter.ttk.Scrollbar', 'ttk.Scrollbar', (['self.stats_frm'], {}), '(self.stats_frm)\n', (2222, 2238), False, 'from tkinter import ttk\n'), ((2319, 2356), 'tkinter.ttk.LabelFrame', 'ttk.LabelFrame', (['self'], {'text': '"""Controls"""'}), "(self, text='Controls')\n", (2333, 2356), False, 'from tkinter import ttk\n'), ((2384, 2403), 'tkinter.StringVar', 'tkinter.StringVar', ([], {}), '()\n', (2401, 2403), False, 'import tkinter\n'), ((2433, 2531), 'tkinter.ttk.Radiobutton', 'ttk.Radiobutton', (['self.controls_lfrm'], {'text': '"""Radiant"""', 'variable': 'self.show_rb_var', 'value': '"""team1"""'}), "(self.controls_lfrm, text='Radiant', variable=self.\n show_rb_var, value='team1')\n", (2448, 2531), False, 'from tkinter import ttk\n'), ((2603, 2697), 'tkinter.ttk.Radiobutton', 'ttk.Radiobutton', (['self.controls_lfrm'], {'text': '"""Dire"""', 'variable': 'self.show_rb_var', 'value': '"""team2"""'}), "(self.controls_lfrm, text='Dire', variable=self.show_rb_var,\n value='team2')\n", (2618, 2697), False, 'from tkinter import ttk\n'), ((2781, 2874), 'tkinter.ttk.Radiobutton', 'ttk.Radiobutton', (['self.controls_lfrm'], {'text': '"""Hero"""', 'variable': 'self.show_rb_var', 'value': '"""hero"""'}), "(self.controls_lfrm, text='Hero', variable=self.show_rb_var,\n value='hero')\n", (2796, 2874), False, 'from tkinter import ttk\n'), ((2960, 3028), 'tkinter.ttk.Button', 'ttk.Button', (['self.controls_lfrm'], {'text': '"""Show"""', 'command': 'self.show_stats'}), "(self.controls_lfrm, text='Show', command=self.show_stats)\n", (2970, 3028), False, 'from tkinter import ttk\n'), ((3107, 3177), 'tkinter.ttk.Button', 'ttk.Button', (['self.controls_lfrm'], {'text': '"""Clear"""', 'command': 'self.clear_teams'}), "(self.controls_lfrm, text='Clear', command=self.clear_teams)\n", (3117, 3177), False, 'from tkinter import ttk\n'), ((3256, 3324), 'tkinter.ttk.Button', 'ttk.Button', (['self.controls_lfrm'], {'text': '"""Wipe"""', 'command': 'self.wipe_stats'}), "(self.controls_lfrm, text='Wipe', command=self.wipe_stats)\n", (3266, 3324), False, 'from tkinter import ttk\n'), ((3443, 3471), 'os.path.isfile', 'os.path.isfile', (['"""heroes.dat"""'], {}), "('heroes.dat')\n", (3457, 3471), False, 'import os\n'), ((3832, 3871), 'tkinter.ttk.Label', 'ttk.Label', (['self.hero_frm'], {'text': '"""Heroes"""'}), "(self.hero_frm, text='Heroes')\n", (3841, 3871), False, 'from tkinter import ttk\n'), ((4145, 4185), 'tkinter.ttk.Label', 'ttk.Label', (['self.team_frm'], {'text': '"""Radiant"""'}), "(self.team_frm, text='Radiant')\n", (4154, 4185), False, 'from tkinter import ttk\n'), ((4206, 4243), 'tkinter.ttk.Label', 'ttk.Label', (['self.team_frm'], {'text': '"""Dire"""'}), "(self.team_frm, text='Dire')\n", (4215, 4243), False, 'from tkinter import ttk\n'), ((6512, 6602), 'requests.get', 'requests.get', (['"""https://www.dotabuff.com/heroes"""'], {'headers': "{'user-agent': 'Mozilla/5.0'}"}), "('https://www.dotabuff.com/heroes', headers={'user-agent':\n 'Mozilla/5.0'})\n", (6524, 6602), False, 'import requests\n'), ((7005, 7037), 'Hero.Hero', 'Hero', (['hero_info[1]', 'hero_info[0]'], {}), '(hero_info[1], hero_info[0])\n', (7009, 7037), False, 'from Hero import Hero\n'), ((9176, 9237), 'pickle.dump', 'pickle.dump', (['self.heroes', 'f'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(self.heroes, f, protocol=pickle.HIGHEST_PROTOCOL)\n', (9187, 9237), False, 'import pickle\n'), ((3551, 3565), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (3562, 3565), False, 'import pickle\n'), ((6826, 6912), 're.search', 're.search', (['"""<div class="hero-grid">[\\\\s\\\\S]+</div></footer></section>"""', 'page.text'], {}), '(\'<div class="hero-grid">[\\\\s\\\\S]+</div></footer></section>\', page\n .text)\n', (6835, 6912), False, 'import re\n')] |
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
if __name__ == '__main__':
import os
import argparse
from PIL import Image
import torch
import torchvision.transforms as transforms
from torch.autograd import Variable
from beamsearch import SemanticBeamSearch
parser = argparse.ArgumentParser(description="generate sketches")
parser.add_argument('--image_path', type=str, help='path to image file')
parser.add_argument('--distract_dir', type=str, help='directory to distractor image files')
parser.add_argument('--sketch_dir', type=str, help='directory to store sketches')
parser.add_argument('--n_samples', type=int, default=5,
help='number of samples per iteration')
parser.add_argument('--n_iters', type=int, default=20,
help='number of iterations')
parser.add_argument('--stdev', type=float, default=15.0,
help='standard deviation for Gaussian when sampling')
parser.add_argument('--patience', type=int, default=5,
help='once the informativity measure stops improving, wait N epochs before quitting')
parser.add_argument('--beam_width', type=int, default=2,
help='number of particles to preserve at each timestep')
parser.add_argument('--embedding_layer', type=int, default=-1,
help='-1|0|1|...|7|8')
parser.add_argument('--embedding_net', type=str, default='vgg19', help='vgg19|resnet152')
parser.add_argument('--distance_fn', type=str, default='cosine',
help='cosine|l1|l2')
parser.add_argument('--fuzz', type=float, default=1.0,
help='hyperparameter for line rendering')
args = parser.parse_args()
# prep images
natural = Image.open(args.image_path)
distractors = []
for i in os.listdir(args.distract_dir):
distractor_path = os.path.join(args.distract_dir, i)
distractor = Image.open(distractor_path)
distractors.append(distractor)
preprocessing = transforms.Compose([
transforms.Scale(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
# grab embeddings for the natural & distractor images
natural = Variable(preprocessing(natural).unsqueeze(0))
distractors = Variable(torch.cat([preprocessing(image).unsqueeze(0)
for image in distractors]))
explorer = SemanticBeamSearch(112, 112, 224, beam_width=args.beam_width,
n_samples=args.n_samples, n_iters=args.n_iters,
stdev=args.stdev, fuzz=1.0,
embedding_net=args.embedding_net,
embedding_layer=args.embedding_layer)
natural_emb = explorer.vgg19(natural)
distractor_embs = explorer.vgg19(distractors)
for i in range(args.n_iters):
sketch = explorer.train(i, natural_emb, distractor_items=distractor_embs)
im = Image.fromarray(sketch)
im.save(os.path.join(args.sketch_dir, 'sketch.png'))
| [
"argparse.ArgumentParser",
"torchvision.transforms.Scale",
"PIL.Image.open",
"beamsearch.SemanticBeamSearch",
"PIL.Image.fromarray",
"torchvision.transforms.CenterCrop",
"torchvision.transforms.Normalize",
"os.path.join",
"os.listdir",
"torchvision.transforms.ToTensor"
] | [((366, 422), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""generate sketches"""'}), "(description='generate sketches')\n", (389, 422), False, 'import argparse\n'), ((1879, 1906), 'PIL.Image.open', 'Image.open', (['args.image_path'], {}), '(args.image_path)\n', (1889, 1906), False, 'from PIL import Image\n'), ((1941, 1970), 'os.listdir', 'os.listdir', (['args.distract_dir'], {}), '(args.distract_dir)\n', (1951, 1970), False, 'import os\n'), ((2640, 2858), 'beamsearch.SemanticBeamSearch', 'SemanticBeamSearch', (['(112)', '(112)', '(224)'], {'beam_width': 'args.beam_width', 'n_samples': 'args.n_samples', 'n_iters': 'args.n_iters', 'stdev': 'args.stdev', 'fuzz': '(1.0)', 'embedding_net': 'args.embedding_net', 'embedding_layer': 'args.embedding_layer'}), '(112, 112, 224, beam_width=args.beam_width, n_samples=\n args.n_samples, n_iters=args.n_iters, stdev=args.stdev, fuzz=1.0,\n embedding_net=args.embedding_net, embedding_layer=args.embedding_layer)\n', (2658, 2858), False, 'from beamsearch import SemanticBeamSearch\n'), ((3206, 3229), 'PIL.Image.fromarray', 'Image.fromarray', (['sketch'], {}), '(sketch)\n', (3221, 3229), False, 'from PIL import Image\n'), ((1998, 2032), 'os.path.join', 'os.path.join', (['args.distract_dir', 'i'], {}), '(args.distract_dir, i)\n', (2010, 2032), False, 'import os\n'), ((2054, 2081), 'PIL.Image.open', 'Image.open', (['distractor_path'], {}), '(distractor_path)\n', (2064, 2081), False, 'from PIL import Image\n'), ((3242, 3285), 'os.path.join', 'os.path.join', (['args.sketch_dir', '"""sketch.png"""'], {}), "(args.sketch_dir, 'sketch.png')\n", (3254, 3285), False, 'import os\n'), ((2171, 2192), 'torchvision.transforms.Scale', 'transforms.Scale', (['(256)'], {}), '(256)\n', (2187, 2192), True, 'import torchvision.transforms as transforms\n'), ((2202, 2228), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(224)'], {}), '(224)\n', (2223, 2228), True, 'import torchvision.transforms as transforms\n'), ((2238, 2259), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2257, 2259), True, 'import torchvision.transforms as transforms\n'), ((2269, 2335), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (2289, 2335), True, 'import torchvision.transforms as transforms\n')] |
import argparse
import csv
import haploqa.mongods as mds
SAMPLE_BATCH_SIZE = 20
def import_samples(platform, geno_matrix_csv, x_matrix_csv, y_matrix_csv, sample_tags, db):
platform_chrs, snp_count_per_chr, snp_chr_indexes = mds.within_chr_snp_indices(platform, db)
curr_sample_start_index = 0
while True:
def get_sample_names(header_row):
slice = header_row[curr_sample_start_index:curr_sample_start_index + SAMPLE_BATCH_SIZE]
return [x.strip() for x in slice]
def get_data(data_row):
# the '+ 1' is because we need to shift right to accommodate the SNP ID column
slice = data_row[curr_sample_start_index + 1:curr_sample_start_index + SAMPLE_BATCH_SIZE + 1]
return [x.strip() for x in slice]
with open(geno_matrix_csv, 'r', newline='') as geno_matrix_handle, \
open(x_matrix_csv, 'r', newline='') as x_matrix_handle, \
open(y_matrix_csv, 'r', newline='') as y_matrix_handle:
# grab the current sample names
geno_matrix_table = csv.reader(geno_matrix_handle)
x_matrix_table = csv.reader(x_matrix_handle)
y_matrix_table = csv.reader(y_matrix_handle)
sample_names = get_sample_names(next(geno_matrix_table))
if not sample_names:
# we've already imported all of the samples
return
x_sample_names = get_sample_names(next(x_matrix_table))
y_sample_names = get_sample_names(next(y_matrix_table))
if sample_names != x_sample_names or sample_names != y_sample_names:
raise Exception('sample IDs do not match in files')
def make_snp_stream():
while True:
next_geno_row = next(geno_matrix_table)
next_x_row = next(x_matrix_table)
next_y_row = next(y_matrix_table)
snp_id = next_geno_row[0].strip()
if snp_id != next_x_row[0].strip() or snp_id != next_y_row[0].strip():
raise Exception('snp IDs do not match in files')
genos = get_data(next_geno_row)
xs = [float(x) for x in get_data(next_x_row)]
ys = [float(y) for y in get_data(next_y_row)]
yield snp_id, genos, xs, ys
samples = []
for sample_name in sample_names:
chr_dict = dict()
for chr in platform_chrs:
curr_snp_count = snp_count_per_chr[chr]
chr_dict[chr] = {
'xs': [float('nan')] * curr_snp_count,
'ys': [float('nan')] * curr_snp_count,
'snps': ['-'] * curr_snp_count,
}
curr_sample = {
'sample_id': mds.gen_unique_id(db),
'other_ids': [sample_name],
'platform_id': platform,
'chromosome_data': chr_dict,
'tags': sample_tags,
'unannotated_snps': [],
}
samples.append(curr_sample)
for snp_id, genos, xs, ys in make_snp_stream():
snp_chr_index = snp_chr_indexes.get(snp_id)
if snp_chr_index is not None:
snp_chr = snp_chr_index['chromosome']
snp_index = snp_chr_index['index']
for i, curr_sample in enumerate(samples):
curr_geno = genos[i].upper()
if curr_geno == 'N':
curr_geno = '-'
curr_x = xs[i]
curr_y = ys[i]
curr_sample_chr = curr_sample['chromosome_data'][snp_chr]
curr_sample_chr['xs'][snp_index] = curr_x
curr_sample_chr['ys'][snp_index] = curr_y
curr_sample_chr['snps'][snp_index] = curr_geno
for curr_sample in samples:
mds.post_proc_sample(curr_sample)
db.samples.insert_one(curr_sample)
print('inserted samples:', ', '.join(sample_names))
curr_sample_start_index += SAMPLE_BATCH_SIZE
def main():
# parse command line arguments
parser = argparse.ArgumentParser(description='import the final report with probe intensities')
parser.add_argument(
'platform',
help='the platform for the data we are importing. eg: MegaMUGA')
parser.add_argument(
'tag',
help='a tag name that should be associated with all imported samples')
parser.add_argument(
'geno_matrix_csv',
help='comma-separated genotype values matrix')
parser.add_argument(
'x_matrix_csv',
help='comma-separated X intensity values matrix')
parser.add_argument(
'y_matrix_csv',
help='comma-separated Y intensity values matrix')
args = parser.parse_args()
import_samples(
args.platform,
args.geno_matrix_csv, args.x_matrix_csv, args.y_matrix_csv,
[args.tag, args.platform],
mds.init_db())
if __name__ == '__main__':
main()
| [
"haploqa.mongods.post_proc_sample",
"csv.reader",
"haploqa.mongods.within_chr_snp_indices",
"argparse.ArgumentParser",
"haploqa.mongods.gen_unique_id",
"haploqa.mongods.init_db"
] | [((233, 273), 'haploqa.mongods.within_chr_snp_indices', 'mds.within_chr_snp_indices', (['platform', 'db'], {}), '(platform, db)\n', (259, 273), True, 'import haploqa.mongods as mds\n'), ((4380, 4470), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""import the final report with probe intensities"""'}), "(description=\n 'import the final report with probe intensities')\n", (4403, 4470), False, 'import argparse\n'), ((5226, 5239), 'haploqa.mongods.init_db', 'mds.init_db', ([], {}), '()\n', (5237, 5239), True, 'import haploqa.mongods as mds\n'), ((1082, 1112), 'csv.reader', 'csv.reader', (['geno_matrix_handle'], {}), '(geno_matrix_handle)\n', (1092, 1112), False, 'import csv\n'), ((1142, 1169), 'csv.reader', 'csv.reader', (['x_matrix_handle'], {}), '(x_matrix_handle)\n', (1152, 1169), False, 'import csv\n'), ((1199, 1226), 'csv.reader', 'csv.reader', (['y_matrix_handle'], {}), '(y_matrix_handle)\n', (1209, 1226), False, 'import csv\n'), ((4115, 4148), 'haploqa.mongods.post_proc_sample', 'mds.post_proc_sample', (['curr_sample'], {}), '(curr_sample)\n', (4135, 4148), True, 'import haploqa.mongods as mds\n'), ((2898, 2919), 'haploqa.mongods.gen_unique_id', 'mds.gen_unique_id', (['db'], {}), '(db)\n', (2915, 2919), True, 'import haploqa.mongods as mds\n')] |
# This program reads in a Google Hangouts JSON file and produces a wordcount
# Author: <NAME>
import json # JSON to handle Google's format
import re # regular expressions
# CHANGE THIS. For linux/mac, use '/home/user/restofpath/'
basepath = 'C:\\Users\\Pinaky\\Desktop\\cesmd\\gmail_hangout\\'
# INPUT: This is the input file path
jsonPath = basepath + 'Hangouts.json'
# OUTPUT: These are the output file paths. dict = sorted alphabetical; freq = sorted by frequency
mainDictPath = basepath + 'hangoutdict.txt'
mainFreqPath = basepath + 'hangoutfreq.txt'
# This is the path to a temporary intermediate file
tempPath = basepath + 'hangouttemp.txt'
# Read in the JSON file
jsonFile = open(jsonPath, 'r', encoding='utf8')
outFile = open(tempPath,'w', encoding='utf8')
# 'p' is the variable that contains all the data
p = json.load(jsonFile)
c = 0 # Count the number of chat messages
# This loops through Google's weird JSON format and picks out the chat text
for n in p['conversation_state']:
for e in n['conversation_state']['event']:
if 'chat_message' in e:
x = e['chat_message']['message_content']
if 'segment' in x:
xtype = x['segment'][0]['type']
xtext = x['segment'][0]['text'] + u" "
if xtype == u'TEXT':
# Write out the chat text to an intermediate file
outFile.write(xtext)
c += 1
print(u'Total number of chats: {0:d}'.format(c))
jsonFile.close()
outFile.close()
# The intermediate file has been written
# Now, run a wordcount
# Read in the intermediate file
inFile = open(tempPath,'r', encoding='utf8')
s = inFile.readlines()
inFile.close()
wordcount={} # The dictionary for wordcount
for l in range(len(s)):
line = s[l].lower().strip() # strip unnecessary white space
line = re.sub(u'[^A-Za-z]+', u' ', line) # keep only alphabets and remove the rest
for word in line.split():
if word not in wordcount:
wordcount[word] = 1
else:
wordcount[word] += 1
# Sort the wordcount like a dictionary and write to file
outFile = open(mainDictPath, 'w', encoding='utf8')
for k,v in sorted(wordcount.items()):
outFile.write(str(k))
outFile.write(u' ')
outFile.write(str(v))
outFile.write(u'\n')
outFile.close()
# Sort the wordcount in descending order of frequency and write to file
outFile = open(mainFreqPath, 'w', encoding='utf8')
for k, v in sorted(wordcount.items(), key=lambda w: w[1], reverse=True):
outFile.write(str(k))
outFile.write(u' ')
outFile.write(str(v))
outFile.write(u'\n')
outFile.close()
| [
"json.load",
"re.sub"
] | [((860, 879), 'json.load', 'json.load', (['jsonFile'], {}), '(jsonFile)\n', (869, 879), False, 'import json\n'), ((1937, 1970), 're.sub', 're.sub', (['u"""[^A-Za-z]+"""', 'u""" """', 'line'], {}), "(u'[^A-Za-z]+', u' ', line)\n", (1943, 1970), False, 'import re\n')] |
"""Learn command"""
import botutils
import discord
import traceback
import json
from discord.ext import commands
from botc import check_if_is_player, check_if_is_night, check_if_dm, RoleCannotUseCommand, \
check_if_player_really_dead, check_if_can_learn, PlayerParser, AbilityForbidden, \
NotAPlayer, BOTCUtils, DeadOnlyCommand, NotDawn, NotDMChannel, check_if_is_dawn
with open('botutils/bot_text.json') as json_file:
language = json.load(json_file)
error_str = language["system"]["error"]
with open('botc/game_text.json') as json_file:
documentation = json.load(json_file)
class Learn(commands.Cog, name = documentation["misc"]["abilities_cog"]):
"""BoTC in-game commands cog
Learn command - used by ravenkeeper
"""
def __init__(self, client):
self.client = client
def cog_check(self, ctx):
"""Check performed on all commands of this cog.
Must be a non-fleaved player to use these commands.
"""
return check_if_is_player(ctx) # Registered non-quit player -> NotAPlayer
# ---------- LEARN COMMAND (Ravenkeeper) ----------------------------------------
@commands.command(
pass_context = True,
name = "learn",
hidden = False,
brief = documentation["doc"]["learn"]["brief"],
help = documentation["doc"]["learn"]["help"],
description = documentation["doc"]["learn"]["description"]
)
@commands.check(check_if_is_dawn) # Correct phase -> NotNight
@commands.check(check_if_dm) # Correct channel -> NotDMChannel
@commands.check(check_if_player_really_dead) # Player dead -> DeadOnlyCommand
@commands.check(check_if_can_learn) # Correct character -> RoleCannotUseCommand
async def learn(self, ctx, *, learned: PlayerParser()):
"""Learn command
usage: learn <player> and <player> and...
characters: ravenkeeper
"""
player = BOTCUtils.get_player_from_id(ctx.author.id)
await player.role.ego_self.register_learn(player, learned)
@learn.error
async def learn_error(self, ctx, error):
emoji = documentation["cmd_warnings"]["x_emoji"]
# Incorrect character -> RoleCannotUseCommand
if isinstance(error, RoleCannotUseCommand):
return
# If it passed all the checks but raised an error in the character class
elif isinstance(error, AbilityForbidden):
error = getattr(error, 'original', error)
await ctx.send(error)
# Non-registered or quit player -> NotAPlayer
elif isinstance(error, NotAPlayer):
return
# Incorrect channel -> NotDMChannel
elif isinstance(error, NotDMChannel):
return
# Incorrect argument -> commands.BadArgument
elif isinstance(error, commands.BadArgument):
return
# Incorrect phase -> NotNight
elif isinstance(error, NotDawn):
try:
await ctx.author.send(documentation["cmd_warnings"]["dawn_only"].format(ctx.author.mention, emoji))
except discord.Forbidden:
pass
# Player not dead -> DeadOnlyCommand
elif isinstance(error, DeadOnlyCommand):
try:
await ctx.author.send(documentation["cmd_warnings"]["dead_only"].format(ctx.author.mention, emoji))
except discord.Forbidden:
pass
# Missing argument -> commands.MissingRequiredArgument
elif isinstance(error, commands.MissingRequiredArgument):
player = BOTCUtils.get_player_from_id(ctx.author.id)
msg = player.role.ego_self.emoji + " " + player.role.ego_self.instruction + " " + player.role.ego_self.action
try:
await ctx.author.send(msg)
except discord.Forbidden:
pass
else:
try:
raise error
except Exception:
await ctx.send(error_str)
await botutils.log(botutils.Level.error, traceback.format_exc())
def setup(client):
client.add_cog(Learn(client))
| [
"json.load",
"discord.ext.commands.command",
"botc.BOTCUtils.get_player_from_id",
"discord.ext.commands.check",
"botc.check_if_is_player",
"traceback.format_exc",
"botc.PlayerParser"
] | [((445, 465), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (454, 465), False, 'import json\n'), ((576, 596), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (585, 596), False, 'import json\n'), ((1162, 1383), 'discord.ext.commands.command', 'commands.command', ([], {'pass_context': '(True)', 'name': '"""learn"""', 'hidden': '(False)', 'brief': "documentation['doc']['learn']['brief']", 'help': "documentation['doc']['learn']['help']", 'description': "documentation['doc']['learn']['description']"}), "(pass_context=True, name='learn', hidden=False, brief=\n documentation['doc']['learn']['brief'], help=documentation['doc'][\n 'learn']['help'], description=documentation['doc']['learn']['description'])\n", (1178, 1383), False, 'from discord.ext import commands\n'), ((1446, 1478), 'discord.ext.commands.check', 'commands.check', (['check_if_is_dawn'], {}), '(check_if_is_dawn)\n', (1460, 1478), False, 'from discord.ext import commands\n'), ((1513, 1540), 'discord.ext.commands.check', 'commands.check', (['check_if_dm'], {}), '(check_if_dm)\n', (1527, 1540), False, 'from discord.ext import commands\n'), ((1581, 1624), 'discord.ext.commands.check', 'commands.check', (['check_if_player_really_dead'], {}), '(check_if_player_really_dead)\n', (1595, 1624), False, 'from discord.ext import commands\n'), ((1664, 1698), 'discord.ext.commands.check', 'commands.check', (['check_if_can_learn'], {}), '(check_if_can_learn)\n', (1678, 1698), False, 'from discord.ext import commands\n'), ((998, 1021), 'botc.check_if_is_player', 'check_if_is_player', (['ctx'], {}), '(ctx)\n', (1016, 1021), False, 'from botc import check_if_is_player, check_if_is_night, check_if_dm, RoleCannotUseCommand, check_if_player_really_dead, check_if_can_learn, PlayerParser, AbilityForbidden, NotAPlayer, BOTCUtils, DeadOnlyCommand, NotDawn, NotDMChannel, check_if_is_dawn\n'), ((1940, 1983), 'botc.BOTCUtils.get_player_from_id', 'BOTCUtils.get_player_from_id', (['ctx.author.id'], {}), '(ctx.author.id)\n', (1968, 1983), False, 'from botc import check_if_is_player, check_if_is_night, check_if_dm, RoleCannotUseCommand, check_if_player_really_dead, check_if_can_learn, PlayerParser, AbilityForbidden, NotAPlayer, BOTCUtils, DeadOnlyCommand, NotDawn, NotDMChannel, check_if_is_dawn\n'), ((1787, 1801), 'botc.PlayerParser', 'PlayerParser', ([], {}), '()\n', (1799, 1801), False, 'from botc import check_if_is_player, check_if_is_night, check_if_dm, RoleCannotUseCommand, check_if_player_really_dead, check_if_can_learn, PlayerParser, AbilityForbidden, NotAPlayer, BOTCUtils, DeadOnlyCommand, NotDawn, NotDMChannel, check_if_is_dawn\n'), ((3574, 3617), 'botc.BOTCUtils.get_player_from_id', 'BOTCUtils.get_player_from_id', (['ctx.author.id'], {}), '(ctx.author.id)\n', (3602, 3617), False, 'from botc import check_if_is_player, check_if_is_night, check_if_dm, RoleCannotUseCommand, check_if_player_really_dead, check_if_can_learn, PlayerParser, AbilityForbidden, NotAPlayer, BOTCUtils, DeadOnlyCommand, NotDawn, NotDMChannel, check_if_is_dawn\n'), ((4047, 4069), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (4067, 4069), False, 'import traceback\n')] |
from random import randrange
from film import film_embed
from api import api_call
import os
async def random_embed():
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
f = open(os.path.join(__location__, "films.txt"), "r", encoding="utf8", errors="ignore")
film = random_line(f)
f.close()
return await film_embed(film)
def random_line(afile, default=None):
line = default
for i, aline in enumerate(afile, start=1):
if randrange(i) == 0: # random int [0..i)
line = aline
return line
| [
"os.getcwd",
"os.path.dirname",
"film.film_embed",
"random.randrange",
"os.path.join"
] | [((222, 261), 'os.path.join', 'os.path.join', (['__location__', '"""films.txt"""'], {}), "(__location__, 'films.txt')\n", (234, 261), False, 'import os\n'), ((360, 376), 'film.film_embed', 'film_embed', (['film'], {}), '(film)\n', (370, 376), False, 'from film import film_embed\n'), ((168, 179), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (177, 179), False, 'import os\n'), ((181, 206), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (196, 206), False, 'import os\n'), ((493, 505), 'random.randrange', 'randrange', (['i'], {}), '(i)\n', (502, 505), False, 'from random import randrange\n')] |
from time import sleep
from kubernetes import client, config
from kubernetes.client.rest import ApiException
from kubetools.constants import MANAGED_BY_ANNOTATION_KEY
from kubetools.exceptions import KubeBuildError
from kubetools.settings import get_settings
def get_object_labels_dict(obj):
return obj.metadata.labels or {}
def get_object_annotations_dict(obj):
return obj.metadata.annotations or {}
def get_object_name(obj):
if isinstance(obj, dict):
return obj['metadata']['name']
return obj.metadata.name
def is_kubetools_object(obj):
if get_object_annotations_dict(obj).get(MANAGED_BY_ANNOTATION_KEY) == 'kubetools':
return True
def _get_api_client(env):
return config.new_client_from_config(context=env)
def _get_k8s_core_api(env):
api_client = _get_api_client(env)
return client.CoreV1Api(api_client=api_client)
def _get_k8s_apps_api(env):
api_client = _get_api_client(env)
return client.AppsV1Api(api_client=api_client)
def _get_k8s_batch_api(env):
api_client = _get_api_client(env)
return client.BatchV1Api(api_client=api_client)
def _object_exists(api, method, namespace, obj):
try:
if namespace:
getattr(api, method)(
namespace=namespace,
name=get_object_name(obj),
)
else:
getattr(api, method)(
name=get_object_name(obj),
)
except ApiException as e:
if e.status == 404:
return False
raise
return True
def _wait_for(function, name='object'):
settings = get_settings()
sleeps = 0
while True:
if function():
return
sleep(settings.WAIT_SLEEP_TIME)
sleeps += 1
if sleeps > settings.WAIT_MAX_SLEEPS:
raise KubeBuildError(f'Timeout waiting for {name} to be ready')
def _wait_for_object(*args):
return _wait_for(lambda: _object_exists(*args) is True)
def _wait_for_no_object(*args):
return _wait_for(lambda: _object_exists(*args) is False)
def namespace_exists(env, namespace_obj):
k8s_core_api = _get_k8s_core_api(env)
return _object_exists(k8s_core_api, 'read_namespace', None, namespace_obj)
def list_namespaces(env):
k8s_core_api = _get_k8s_core_api(env)
return k8s_core_api.list_namespace().items
def create_namespace(env, namespace_obj):
k8s_core_api = _get_k8s_core_api(env)
k8s_namespace = k8s_core_api.create_namespace(
body=namespace_obj,
)
_wait_for_object(k8s_core_api, 'read_namespace', None, namespace_obj)
return k8s_namespace
def update_namespace(env, namespace_obj):
k8s_core_api = _get_k8s_core_api(env)
k8s_namespace = k8s_core_api.patch_namespace(
name=get_object_name(namespace_obj),
body=namespace_obj,
)
return k8s_namespace
def delete_namespace(env, namespace, namespace_obj):
k8s_core_api = _get_k8s_core_api(env)
k8s_core_api.delete_namespace(
name=get_object_name(namespace_obj),
)
_wait_for_no_object(k8s_core_api, 'read_namespace', None, namespace_obj)
def list_pods(env, namespace):
k8s_core_api = _get_k8s_core_api(env)
return k8s_core_api.list_namespaced_pod(namespace=namespace).items
def delete_pod(env, namespace, pod):
k8s_core_api = _get_k8s_core_api(env)
k8s_core_api.delete_namespaced_pod(
name=get_object_name(pod),
namespace=namespace,
)
_wait_for_no_object(k8s_core_api, 'read_namespaced_pod', namespace, pod)
def list_replica_sets(env, namespace):
k8s_apps_api = _get_k8s_apps_api(env)
return k8s_apps_api.list_namespaced_replica_set(namespace=namespace).items
def delete_replica_set(env, namespace, replica_set):
k8s_apps_api = _get_k8s_apps_api(env)
k8s_apps_api.delete_namespaced_replica_set(
name=get_object_name(replica_set),
namespace=namespace,
)
_wait_for_no_object(k8s_apps_api, 'read_namespaced_replica_set', namespace, replica_set)
def list_services(env, namespace):
k8s_core_api = _get_k8s_core_api(env)
return k8s_core_api.list_namespaced_service(namespace=namespace).items
def delete_service(env, namespace, service):
k8s_core_api = _get_k8s_core_api(env)
k8s_core_api.delete_namespaced_service(
name=get_object_name(service),
namespace=namespace,
)
_wait_for_no_object(k8s_core_api, 'read_namespaced_service', namespace, service)
def service_exists(env, namespace, service):
k8s_core_api = _get_k8s_core_api(env)
return _object_exists(k8s_core_api, 'read_namespaced_service', namespace, service)
def create_service(env, namespace, service):
k8s_core_api = _get_k8s_core_api(env)
k8s_service = k8s_core_api.create_namespaced_service(
body=service,
namespace=namespace,
)
_wait_for_object(k8s_core_api, 'read_namespaced_service', namespace, service)
return k8s_service
def update_service(env, namespace, service):
k8s_core_api = _get_k8s_core_api(env)
k8s_service = k8s_core_api.patch_namespaced_service(
name=get_object_name(service),
body=service,
namespace=namespace,
)
return k8s_service
def list_deployments(env, namespace):
k8s_apps_api = _get_k8s_apps_api(env)
return k8s_apps_api.list_namespaced_deployment(namespace=namespace).items
def delete_deployment(env, namespace, deployment):
k8s_apps_api = _get_k8s_apps_api(env)
k8s_apps_api.delete_namespaced_deployment(
name=get_object_name(deployment),
namespace=namespace,
)
_wait_for_no_object(k8s_apps_api, 'read_namespaced_deployment', namespace, deployment)
def deployment_exists(env, namespace, deployment):
k8s_apps_api = _get_k8s_apps_api(env)
return _object_exists(k8s_apps_api, 'read_namespaced_deployment', namespace, deployment)
def create_deployment(env, namespace, deployment):
k8s_apps_api = _get_k8s_apps_api(env)
k8s_deployment = k8s_apps_api.create_namespaced_deployment(
body=deployment,
namespace=namespace,
)
wait_for_deployment(env, namespace, k8s_deployment)
return k8s_deployment
def update_deployment(env, namespace, deployment):
k8s_apps_api = _get_k8s_apps_api(env)
k8s_deployment = k8s_apps_api.patch_namespaced_deployment(
name=get_object_name(deployment),
body=deployment,
namespace=namespace,
)
wait_for_deployment(env, namespace, k8s_deployment)
return k8s_deployment
def wait_for_deployment(env, namespace, deployment):
k8s_apps_api = _get_k8s_apps_api(env)
def check_deployment():
d = k8s_apps_api.read_namespaced_deployment(
name=get_object_name(deployment),
namespace=namespace,
)
if d.status.ready_replicas == d.status.replicas:
return True
_wait_for(check_deployment, get_object_name(deployment))
def list_jobs(env, namespace):
k8s_batch_api = _get_k8s_batch_api(env)
return k8s_batch_api.list_namespaced_job(namespace=namespace).items
def is_running(job):
conditions = job.status.conditions
if conditions is None:
return True
complete = any(condition.type == 'Complete' for condition in job.status.conditions)
return not complete
def list_running_jobs(env, namespace):
jobs = list_jobs(env, namespace)
return [job for job in jobs if is_running(job)]
def list_complete_jobs(env, namespace):
jobs = list_jobs(env, namespace)
return [job for job in jobs if not is_running(job)]
valid_propagation_policies = ["Orphan", "Background", "Foreground"]
def delete_job(env, namespace, job, propagation_policy=None):
if propagation_policy and propagation_policy not in valid_propagation_policies:
raise KubeBuildError(f"Propagation policy must be one of {valid_propagation_policies}")
args = {}
if propagation_policy:
args['propagation_policy'] = propagation_policy
k8s_batch_api = _get_k8s_batch_api(env)
k8s_batch_api.delete_namespaced_job(
name=get_object_name(job),
namespace=namespace,
**args,
)
_wait_for_no_object(k8s_batch_api, 'read_namespaced_job', namespace, job)
def create_job(env, namespace, job, wait_for_completion=True):
k8s_batch_api = _get_k8s_batch_api(env)
k8s_job = k8s_batch_api.create_namespaced_job(
body=job,
namespace=namespace,
)
if wait_for_completion:
wait_for_job(env, namespace, k8s_job)
return k8s_job
def wait_for_job(env, namespace, job):
k8s_batch_api = _get_k8s_batch_api(env)
def check_job():
j = k8s_batch_api.read_namespaced_job(
name=get_object_name(job),
namespace=namespace,
)
if j.status.succeeded == j.spec.completions:
return True
_wait_for(check_job, get_object_name(job))
| [
"kubernetes.config.new_client_from_config",
"kubetools.settings.get_settings",
"kubernetes.client.CoreV1Api",
"time.sleep",
"kubernetes.client.AppsV1Api",
"kubetools.exceptions.KubeBuildError",
"kubernetes.client.BatchV1Api"
] | [((719, 761), 'kubernetes.config.new_client_from_config', 'config.new_client_from_config', ([], {'context': 'env'}), '(context=env)\n', (748, 761), False, 'from kubernetes import client, config\n'), ((841, 880), 'kubernetes.client.CoreV1Api', 'client.CoreV1Api', ([], {'api_client': 'api_client'}), '(api_client=api_client)\n', (857, 880), False, 'from kubernetes import client, config\n'), ((960, 999), 'kubernetes.client.AppsV1Api', 'client.AppsV1Api', ([], {'api_client': 'api_client'}), '(api_client=api_client)\n', (976, 999), False, 'from kubernetes import client, config\n'), ((1080, 1120), 'kubernetes.client.BatchV1Api', 'client.BatchV1Api', ([], {'api_client': 'api_client'}), '(api_client=api_client)\n', (1097, 1120), False, 'from kubernetes import client, config\n'), ((1606, 1620), 'kubetools.settings.get_settings', 'get_settings', ([], {}), '()\n', (1618, 1620), False, 'from kubetools.settings import get_settings\n'), ((1704, 1735), 'time.sleep', 'sleep', (['settings.WAIT_SLEEP_TIME'], {}), '(settings.WAIT_SLEEP_TIME)\n', (1709, 1735), False, 'from time import sleep\n'), ((7796, 7882), 'kubetools.exceptions.KubeBuildError', 'KubeBuildError', (['f"""Propagation policy must be one of {valid_propagation_policies}"""'], {}), "(\n f'Propagation policy must be one of {valid_propagation_policies}')\n", (7810, 7882), False, 'from kubetools.exceptions import KubeBuildError\n'), ((1821, 1878), 'kubetools.exceptions.KubeBuildError', 'KubeBuildError', (['f"""Timeout waiting for {name} to be ready"""'], {}), "(f'Timeout waiting for {name} to be ready')\n", (1835, 1878), False, 'from kubetools.exceptions import KubeBuildError\n')] |
import requests
import json
import jwt
import cryptography
#ploads = {'Authorization': 'Bearer '}
#r = requests.get('https://api.music.apple.com/v1/me/library/playlists')
#print(r.headers)
#print(r.text)
#print(r.json())
import applemusicpy
secret_key = ''
key_id = '74G4697BU4'
team_id = 'QTM38LJQ3P'
am = applemusicpy.AppleMusic(secret_key, key_id, team_id)
results = am.search('<NAME>', types=['albums'], limit=5)
for item in results['results']['albums']['data']:
print(item['attributes']['name']) | [
"applemusicpy.AppleMusic"
] | [((310, 362), 'applemusicpy.AppleMusic', 'applemusicpy.AppleMusic', (['secret_key', 'key_id', 'team_id'], {}), '(secret_key, key_id, team_id)\n', (333, 362), False, 'import applemusicpy\n')] |
import numpy as np
class Sersic:
def b(self,n):
return 1.9992*n - 0.3271 + 4*(405*n)**-1
def kappa(self,x, y, n_sersic, r_eff, k_eff, q, center_x=0, center_y=0):
bn = self.b(n_sersic)
r = (x**2+y**2*q**-2)**0.5
return k_eff*np.exp(-bn*((r*r_eff**-1)**(n_sersic**-1)-1))
| [
"numpy.exp"
] | [((270, 325), 'numpy.exp', 'np.exp', (['(-bn * ((r * r_eff ** -1) ** n_sersic ** -1 - 1))'], {}), '(-bn * ((r * r_eff ** -1) ** n_sersic ** -1 - 1))\n', (276, 325), True, 'import numpy as np\n')] |
import os, re, collections
from attrdict import AttrDict
from app_settings import file_search
from app_settings import FileFactory
__all__ = ["Config"]
class Config(object):
def __init__(self, files=None, dir=None, default=None, filter=None, **kwargs):
self._validate(files, dir, default)
self._create_files(files, dir, filter, default, **kwargs)
self._load_files()
def save(self, config_name):
if config_name in self._files:
self._save_config(config_name)
def save_all(self):
for _name in self._files:
self.save(_name)
@property
def files(self):
return list(self._files.keys())
def __getitem__(self, key):
return self._get_config(key)
def _create_files(self, files, dir, filter, default, **kwargs):
self._files = {}
files = self._get_files(files, dir, filter)
for f in files:
_file = FileFactory.create(f, default, **kwargs)
_name = self._transform_invalid_name(_file.name)
self._files[_name] = _file
def _get_files(self, files, dir, filter):
if isinstance(files, str):
return [files]
if isinstance(files, collections.Iterable):
return files
if dir:
return file_search(dir, filter, recursive=True)
return []
def _load_files(self):
for _name, _file in self._files.items():
self._add_config(_name, _file.load())
def _get_config(self, config_name):
return getattr(self, config_name)
def _add_config(self, config_name, config):
setattr(self, config_name, AttrDict(config))
def _save_config(self, name):
config_dict = dict(self._get_config(name))
self._files[name].flush(config_dict)
def _transform_invalid_name(self, filename):
return re.sub(r"[^A-Za-z]", "_", filename)
def _validate(self, files, dir, resolve_type):
if not files and not dir:
raise ValueError("No files or search directory provided.")
if files:
if isinstance(files, collections.Iterable):
for f in files:
assert isinstance(f, str)
else:
assert isinstance(files, str)
if dir:
assert isinstance(dir, str)
assert os.path.isdir(dir)
| [
"os.path.isdir",
"app_settings.FileFactory.create",
"attrdict.AttrDict",
"re.sub",
"app_settings.file_search"
] | [((1870, 1904), 're.sub', 're.sub', (['"""[^A-Za-z]"""', '"""_"""', 'filename'], {}), "('[^A-Za-z]', '_', filename)\n", (1876, 1904), False, 'import os, re, collections\n'), ((941, 981), 'app_settings.FileFactory.create', 'FileFactory.create', (['f', 'default'], {}), '(f, default, **kwargs)\n', (959, 981), False, 'from app_settings import FileFactory\n'), ((1303, 1343), 'app_settings.file_search', 'file_search', (['dir', 'filter'], {'recursive': '(True)'}), '(dir, filter, recursive=True)\n', (1314, 1343), False, 'from app_settings import file_search\n'), ((1656, 1672), 'attrdict.AttrDict', 'AttrDict', (['config'], {}), '(config)\n', (1664, 1672), False, 'from attrdict import AttrDict\n'), ((2354, 2372), 'os.path.isdir', 'os.path.isdir', (['dir'], {}), '(dir)\n', (2367, 2372), False, 'import os, re, collections\n')] |
#
# Copyright (c) 2022 Airbyte, Inc., all rights reserved.
#
import pytest
from requests.auth import HTTPBasicAuth
@pytest.fixture(name="config")
def config_fixture():
return {"domain": "test.freshdesk.com", "api_key": "secret_api_key", "requests_per_minute": 50, "start_date": "2002-02-10T22:21:44Z"}
@pytest.fixture(name="authenticator")
def authenticator_fixture(config):
return HTTPBasicAuth(username=config["api_key"], password="<PASSWORD>")
| [
"requests.auth.HTTPBasicAuth",
"pytest.fixture"
] | [((119, 148), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""config"""'}), "(name='config')\n", (133, 148), False, 'import pytest\n'), ((312, 348), 'pytest.fixture', 'pytest.fixture', ([], {'name': '"""authenticator"""'}), "(name='authenticator')\n", (326, 348), False, 'import pytest\n'), ((395, 459), 'requests.auth.HTTPBasicAuth', 'HTTPBasicAuth', ([], {'username': "config['api_key']", 'password': '"""<PASSWORD>"""'}), "(username=config['api_key'], password='<PASSWORD>')\n", (408, 459), False, 'from requests.auth import HTTPBasicAuth\n')] |
import pandas as pd
import pdb
import requests
import numpy as np
import os, sys
import xarray as xr
from datetime import datetime, timedelta
import logging
from scipy.interpolate import PchipInterpolator
import argparse
from collections import OrderedDict, defaultdict
class PchipOceanSlices(object):
def __init__(self, pLevelRange, basin=None, exceptBasin={None}, starttdx=None, appLocal=False):
self.appLocal = appLocal
self.datesSet = self.get_dates_set()
self.exceptBasin = exceptBasin
self.starttdx = starttdx
self.reduceMeas = False #removes excess points from db query
self.qcKeep = set([1,2]) # used to filter bad positions and dates
self.basin = basin # indian ocean only Set to None otherwise
self.presLevels = [ 2.5, 10. , 20. , 30. , 40. , 50. , 60. , 70. , 80. ,
90. , 100. , 110. , 120. , 130. , 140. , 150. , 160. , 170. ,
182.5, 200. , 220. , 240. , 260. , 280. , 300. , 320. , 340. ,
360. , 380. , 400. , 420. , 440. , 462.5, 500. , 550. , 600. ,
650. , 700. , 750. , 800. , 850. , 900. , 950. , 1000. , 1050. ,
1100. , 1150. , 1200. , 1250. , 1300. , 1350. , 1412.5, 1500. , 1600. ,
1700. , 1800. , 1900. , 1975., 2000.]
self.pLevelRange = pLevelRange
self.presRanges = self.make_rg_pres_ranges()
self.reduce_presLevels_and_presRanges()
@staticmethod
def get_dates_set(period=30):
"""
create a set of dates split into n periods.
period is in days.
"""
n_rows = int(np.floor(365/period))
datesSet = []
for year in range(2007, 2019):
yearSet = np.array_split(pd.date_range(str(year)+'-01-01', str(year)+'-12-31'), n_rows)
datesSet = datesSet + yearSet
keepEnds = lambda x: [x[0].strftime(format='%Y-%m-%d'), x[-1].strftime(format='%Y-%m-%d')]
datesSet = list(map(keepEnds, datesSet))
return datesSet
@staticmethod
def get_ocean_slice(startDate, endDate, presRange, intPres, basin=None, appLocal=None, reduceMeas=False):
'''
query horizontal slice of ocean for a specified time range
startDate and endDate should be a string formated like so: 'YYYY-MM-DD'
presRange should comprise of a string formatted to be: '[lowPres,highPres]'
Try to make the query small enough so as to not pass the 15 MB limit set by the database.
'''
if appLocal:
baseURL = 'http://localhost:3000'
else:
baseURL = 'https://argovis.colorado.edu'
baseURL += '/gridding/presSliceForInterpolation/'
startDateQuery = '?startDate=' + startDate
endDateQuery = '&endDate=' + endDate
presRangeQuery = '&presRange=' + presRange
intPresQuery = '&intPres=' + str(intPres)
url = baseURL + startDateQuery + endDateQuery + presRangeQuery + intPresQuery
if basin:
basinQuery = '&basin=' + basin
url += basinQuery
url += '&reduceMeas=' + str(reduceMeas).lower()
resp = requests.get(url)
# Consider any status other than 2xx an error
if not resp.status_code // 100 == 2:
raise ValueError("Error: Unexpected response {}".format(resp))
profiles = resp.json()
return profiles
def reject_profile(self, profile):
if not profile['position_qc'] in self.qcKeep:
reject = True
elif not profile['date_qc'] in self.qcKeep:
reject = True
elif len(profile['measurements']) < 2: # cannot be interpolated
reject = True
elif profile['BASIN'] in self.exceptBasin: # ignores basins
reject=True
else:
reject = False
return reject
@staticmethod
def make_profile_interpolation_function(x,y):
'''
creates interpolation function
df is a dataframe containing columns xLab and yLab
'''
try:
f = PchipInterpolator(x, y, axis=1, extrapolate=False)
except Exception as err:
pdb.set_trace()
logging.warning(err)
raise Exception
return f
@staticmethod
def make_pres_ranges(presLevels):
"""
Pressure ranges are based off of depths catagory
surface: at 2.5 dbar +- 2.5
shallow: 10 to 182.5 dbar +- 5
medium: 200 to 462.5 dbar +- 15
deep: 500 to 1050 dbar +- 30
abbysal: 1100 to 1975 dbar +- 60
"""
stringifyArray = lambda x: str(x).replace(' ', '')
surfaceRange = [[presLevels[0] - 2.5, presLevels[0]+ 2.5]]
shallowRanges = [ [x - 5, x + 5] for x in presLevels[1:19] ]
mediumRanges = [ [x - 15, x + 15] for x in presLevels[19:33] ]
deepRanges = [ [x - 30, x + 30] for x in presLevels[33:45] ]
abbysalRanges = [ [x - 60, x + 60] for x in presLevels[45:] ]
presRanges = surfaceRange + shallowRanges + mediumRanges + deepRanges + abbysalRanges
presRanges = [stringifyArray(x) for x in presRanges]
return presRanges
@staticmethod
def make_rg_pres_ranges():
'''
uses pressure ranges defined in RG climatology
'''
rgFilename = '/home/tyler/Desktop/RG_ArgoClim_Temp.nc'
rg = xr.open_dataset(rgFilename, decode_times=False)
bnds = rg['PRESSURE_bnds']
presRanges = bnds.values.tolist()
stringifyArray = lambda x: str(x).replace(' ', '')
presRanges = [stringifyArray(x) for x in presRanges]
return presRanges
@staticmethod
def save_iDF(iDf, filename, tdx):
iDf.date = pd.to_datetime(iDf.date)
iDf.date = iDf.date.apply(lambda d: d.strftime("%d-%b-%Y %H:%M:%S"))
if not iDf.empty:
with open(filename, 'a') as f:
if tdx==0:
iDf.to_csv(f, header=True)
else:
iDf.to_csv(f, header=False)
@staticmethod
def record_to_array(measurements, xLab, yLab):
x = []
y = []
for meas in measurements:
x.append(meas[xLab])
y.append(meas[yLab])
return x, y
@staticmethod
def sort_list(x, y):
'''sort x based off of y'''
xy = zip(x, y)
ys = [y for _, y in sorted(xy)]
xs = sorted(x)
return xs, ys
@staticmethod
def unique_idxs(seq):
'''gets unique, non nan and non -999 indexes'''
tally = defaultdict(list)
for idx,item in enumerate(seq):
tally[item].append(idx)
dups = [ (key,locs) for key,locs in tally.items() ]
dups = [ (key, locs) for key, locs in dups if not np.isnan(key) or key not in {-999, None, np.NaN} ]
idxs = []
for dup in sorted(dups):
idxs.append(dup[1][0])
return idxs
def format_xy(self, x, y):
'''prep for interpolation'''
x2, y2 = self.sort_list(x, y)
try:
x_dup_idx = self.unique_idxs(x2)
xu = [x2[idx] for idx in x_dup_idx]
yu = [y2[idx] for idx in x_dup_idx]
# remove none -999 and none
y_nan_idx =[idx for idx,key in enumerate(yu) if not key in {-999, None, np.NaN} ]
except Exception as err:
pdb.set_trace()
print(err)
xu = [xu[idx] for idx in y_nan_idx]
yu = [yu[idx] for idx in y_nan_idx]
return xu, yu
def make_interpolated_profile(self, profile, xintp, xLab, yLab):
meas = profile['measurements']
if len(meas) == 0:
return None
if not yLab in meas[0].keys():
return None
x, y = self.record_to_array(meas, xLab, yLab)
x, y = self.format_xy(x, y)
if len(x) < 2: # pchip needs at least two points
return None
f = self.make_profile_interpolation_function(x, y)
rowDict = profile.copy()
del rowDict['measurements']
rowDict[xLab] = xintp
if len(meas) == 1 and meas[xLab][0] == xintp:
yintp = meas[yLab][0]
else:
yintp = f(xintp)
rowDict[yLab] = yintp
return rowDict
def make_interpolated_df(self, profiles, xintp, xLab='pres', yLab='temp'):
'''
make a dataframe of interpolated values set at xintp for each profile
xLab: the column name for the interpolation input x
yLab: the column to be interpolated
xintp: the values to be interpolated
'''
outArray = []
for profile in profiles:
rowDict = self.make_interpolated_profile(profile, xintp, xLab, yLab)
if rowDict:
outArray.append(rowDict)
outDf = pd.DataFrame(outArray)
outDf = outDf.rename({'_id': 'profile_id'}, axis=1)
outDf = outDf.dropna(subset=[xLab, yLab], how='any', axis=0)
logging.debug('number of rows in df: {}'.format(outDf.shape[0]))
logging.debug('number of profiles interpolated: {}'.format(len(outDf['profile_id'].unique())))
return outDf
def intp_pres(self, xintp, presRange):
if self.basin:
iTempFileName = 'iTempData_pres_{0}_basin_{1}.csv'.format(xintp, self.basin)
iPsalFileName = 'iPsalData_pres_{0}_basin_{1}.csv'.format(xintp, self.basin)
else:
iTempFileName = 'iTempData_pres_{}.csv'.format(xintp)
iPsalFileName = 'iPsalData_pres_{}.csv'.format(xintp)
start = datetime.now()
logging.debug('number of dates:{}'.format(len(self.datesSet)))
for tdx, dates in enumerate(self.datesSet):
if tdx < self.starttdx:
continue
logging.debug('starting interpolation at time index: {}'.format(tdx))
startDate, endDate = dates
try:
sliceProfiles = self.get_ocean_slice(startDate, endDate, presRange, xintp, self.basin, self.appLocal, self.reduceMeas)
except Exception as err:
logging.warning('profiles not recieved: {}'.format(err))
continue
logging.debug('xintp: {0} on tdx: {1}'.format(xintp, tdx))
logging.debug('number of profiles found in interval: {}'.format(len(sliceProfiles)))
try:
iTempDf = self.make_interpolated_df(sliceProfiles, xintp, 'pres', 'temp')
except Exception as err:
logging.warning('error when interpolating temp')
logging.warning(err)
continue
try:
iPsalDf = self.make_interpolated_df(sliceProfiles, xintp, 'pres', 'psal')
except Exception as err:
pdb.set_trace()
logging.warning('error when interpolating psal')
logging.warning(err)
continue
self.save_iDF(iTempDf, iTempFileName, tdx)
self.save_iDF(iPsalDf, iPsalFileName, tdx)
logging.debug('interpolation complete at time index: {}'.format(tdx))
timeTick = datetime.now()
logging.debug(timeTick.strftime(format='%Y-%m-%d %H:%M'))
dt = timeTick-start
logging.debug('completed run for psal {0} running for: {1}'.format(xintp, dt))
def reduce_presLevels_and_presRanges(self):
'''
reduces presLevels and pres ranges to those specified in pLevelRange
'''
self.startIdx = self.presLevels.index(self.pLevelRange[0])
self.endIdx = self.presLevels.index(self.pLevelRange[1])
self.presLevels = self.presLevels[ self.startIdx:self.endIdx ]
self.presRanges = self.presRanges[ self.startIdx:self.endIdx ]
def main(self):
logging.debug('inside main loop')
logging.debug('running pressure level ranges: {}'.format(self.pLevelRange))
for idx, presLevel in enumerate(self.presLevels):
xintp = presLevel
presRange = self.presRanges[idx]
self.intp_pres(xintp, presRange)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("--maxl", help="start on pressure level", type=float, nargs='?', default=2000)
parser.add_argument("--minl", help="end on pressure level", type=float, nargs='?', default=1975)
parser.add_argument("--basin", help="filter this basin", type=str, nargs='?', default=None)
parser.add_argument("--starttdx", help="start time index", type=int, nargs='?', default=0)
parser.add_argument("--logFileName", help="name of log file", type=str, nargs='?', default='pchipOceanSlices.log')
myArgs = parser.parse_args()
pLevelRange = [myArgs.minl, myArgs.maxl]
basin = myArgs.basin
starttdx = myArgs.starttdx
#idxStr = str(myArgs.minl) + ':' + str(myArgs.maxl)
#logFileName = 'pchipOceanSlices{}.log'.format(idxStr)
FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(format=FORMAT,
filename=myArgs.logFileName,
level=logging.DEBUG)
logging.debug('Start of log file')
startTime = datetime.now()
pos = PchipOceanSlices(pLevelRange, basin=basin, exceptBasin={}, starttdx=starttdx, appLocal=True)
pos.main()
endTime = datetime.now()
dt = endTime - startTime
logging.debug('end of log file for pressure level ranges: {}'.format(pLevelRange))
dtStr = 'time to complete: {} seconds'.format(dt.seconds)
print(dtStr)
logging.debug(dtStr) | [
"pandas.DataFrame",
"scipy.interpolate.PchipInterpolator",
"logging.debug",
"argparse.ArgumentParser",
"logging.basicConfig",
"logging.warning",
"numpy.floor",
"xarray.open_dataset",
"numpy.isnan",
"collections.defaultdict",
"pandas.to_datetime",
"pdb.set_trace",
"requests.get",
"datetime.datetime.now"
] | [((12241, 12337), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__', 'formatter_class': 'argparse.RawTextHelpFormatter'}), '(description=__doc__, formatter_class=argparse.\n RawTextHelpFormatter)\n', (12264, 12337), False, 'import argparse\n'), ((13171, 13260), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': 'FORMAT', 'filename': 'myArgs.logFileName', 'level': 'logging.DEBUG'}), '(format=FORMAT, filename=myArgs.logFileName, level=\n logging.DEBUG)\n', (13190, 13260), False, 'import logging\n'), ((13309, 13343), 'logging.debug', 'logging.debug', (['"""Start of log file"""'], {}), "('Start of log file')\n", (13322, 13343), False, 'import logging\n'), ((13360, 13374), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (13372, 13374), False, 'from datetime import datetime, timedelta\n'), ((13507, 13521), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (13519, 13521), False, 'from datetime import datetime, timedelta\n'), ((13721, 13741), 'logging.debug', 'logging.debug', (['dtStr'], {}), '(dtStr)\n', (13734, 13741), False, 'import logging\n'), ((3238, 3255), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (3250, 3255), False, 'import requests\n'), ((5476, 5523), 'xarray.open_dataset', 'xr.open_dataset', (['rgFilename'], {'decode_times': '(False)'}), '(rgFilename, decode_times=False)\n', (5491, 5523), True, 'import xarray as xr\n'), ((5822, 5846), 'pandas.to_datetime', 'pd.to_datetime', (['iDf.date'], {}), '(iDf.date)\n', (5836, 5846), True, 'import pandas as pd\n'), ((6664, 6681), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (6675, 6681), False, 'from collections import OrderedDict, defaultdict\n'), ((8910, 8932), 'pandas.DataFrame', 'pd.DataFrame', (['outArray'], {}), '(outArray)\n', (8922, 8932), True, 'import pandas as pd\n'), ((9666, 9680), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (9678, 9680), False, 'from datetime import datetime, timedelta\n'), ((11251, 11265), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (11263, 11265), False, 'from datetime import datetime, timedelta\n'), ((11904, 11937), 'logging.debug', 'logging.debug', (['"""inside main loop"""'], {}), "('inside main loop')\n", (11917, 11937), False, 'import logging\n'), ((1721, 1743), 'numpy.floor', 'np.floor', (['(365 / period)'], {}), '(365 / period)\n', (1729, 1743), True, 'import numpy as np\n'), ((4158, 4208), 'scipy.interpolate.PchipInterpolator', 'PchipInterpolator', (['x', 'y'], {'axis': '(1)', 'extrapolate': '(False)'}), '(x, y, axis=1, extrapolate=False)\n', (4175, 4208), False, 'from scipy.interpolate import PchipInterpolator\n'), ((4254, 4269), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (4267, 4269), False, 'import pdb\n'), ((4282, 4302), 'logging.warning', 'logging.warning', (['err'], {}), '(err)\n', (4297, 4302), False, 'import logging\n'), ((7473, 7488), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (7486, 7488), False, 'import pdb\n'), ((10612, 10660), 'logging.warning', 'logging.warning', (['"""error when interpolating temp"""'], {}), "('error when interpolating temp')\n", (10627, 10660), False, 'import logging\n'), ((10677, 10697), 'logging.warning', 'logging.warning', (['err'], {}), '(err)\n', (10692, 10697), False, 'import logging\n'), ((10884, 10899), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (10897, 10899), False, 'import pdb\n'), ((10916, 10964), 'logging.warning', 'logging.warning', (['"""error when interpolating psal"""'], {}), "('error when interpolating psal')\n", (10931, 10964), False, 'import logging\n'), ((10981, 11001), 'logging.warning', 'logging.warning', (['err'], {}), '(err)\n', (10996, 11001), False, 'import logging\n'), ((6876, 6889), 'numpy.isnan', 'np.isnan', (['key'], {}), '(key)\n', (6884, 6889), True, 'import numpy as np\n')] |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2011 OpenStack LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Role-Grant manager module """
import logging
import keystone.backends.api as api
logger = logging.getLogger(__name__) # pylint: disable=C0103
class Manager(object):
def __init__(self):
self.driver = api.ROLE
#
# Role-Grant Methods
#
def rolegrant_get_page(self, user_id, tenant_id, marker, limit):
""" Get one page of role grant list """
return self.driver.rolegrant_get_page(user_id, tenant_id, marker,
limit)
def rolegrant_get_page_markers(self, user_id, tenant_id, marker, limit):
""" Calculate pagination markers for role grants list """
return self.driver.rolegrant_get_page_markers(user_id, tenant_id,
marker, limit)
def list_global_roles_for_user(self, user_id):
return self.driver.list_global_roles_for_user(user_id)
def list_tenant_roles_for_user(self, user_id, tenant_id):
return self.driver.list_tenant_roles_for_user(user_id, tenant_id)
def rolegrant_list_by_role(self, role_id):
return self.driver.rolegrant_list_by_role(role_id)
def rolegrant_get_by_ids(self, user_id, role_id, tenant_id):
return self.driver.rolegrant_get_by_ids(user_id, role_id, tenant_id)
def rolegrant_delete(self, grant_id):
return self.driver.rolegrant_delete(grant_id)
def list_role_grants(self, role_id, user_id, tenant_id):
return self.driver.list_role_grants(role_id, user_id, tenant_id)
| [
"logging.getLogger"
] | [((726, 753), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (743, 753), False, 'import logging\n')] |
#
# OtterTune - target_objective.py
#
# Copyright (c) 2017-18, Carnegie Mellon University Database Group
#
import logging
from website.models import DBMSCatalog, MetricCatalog
from website.types import DBMSType
from ..base.target_objective import (BaseTargetObjective, BaseThroughput, LESS_IS_BETTER,
MORE_IS_BETTER)
LOG = logging.getLogger(__name__)
class CustomDBTime(BaseTargetObjective):
def __init__(self):
super().__init__(name='custom_db_time', pprint='Custom DB Time', unit='seconds',
short_unit='s', improvement=LESS_IS_BETTER)
def compute(self, metrics, observation_time):
total_wait_time = 0.
# dba_hist db_time will be 0 after cleaning if & only if it does not exist before cleaning
has_dba_hist = metrics['global.dba_hist_sys_time_model.db time'] > 0
for name, value in metrics.items():
if has_dba_hist and 'dba_hist_' not in name:
continue
if 'db cpu' in name:
total_wait_time += float(value)
elif 'time_waited_micro_fg' in name:
wait_time = float(value)
elif name.endswith('wait_class'):
# wait_class#:
# 0: Other; 1: Application; 2: Configuration; 3: Administrative; 4: Concurrency;
# 5: Commit; 6: Idle; 7: Network; 8: User I/O; 9: System I/O
if value == 'Idle':
wait_time = 0
total_wait_time += wait_time
return total_wait_time / 1000000.
class NormalizedDBTime(BaseTargetObjective):
def __init__(self):
super().__init__(name='db_time', pprint='Normalized DB Time', unit='seconds',
short_unit='s', improvement=LESS_IS_BETTER)
# This target objective is designed for Oracle v12.2.0.1.0
dbms = DBMSCatalog.objects.get(type=DBMSType.ORACLE, version='172.16.31.10.0')
self.default_values = {}
for metric in MetricCatalog.objects.filter(dbms=dbms):
self.default_values[metric.name] = metric.default
def reload_default_metrics(self):
dbms = DBMSCatalog.objects.get(type=DBMSType.ORACLE, version='172.16.31.10.0')
self.default_values = {}
for metric in MetricCatalog.objects.filter(dbms=dbms):
self.default_values[metric.name] = metric.default
def compute(self, metrics, observation_time):
extra_io_metrics = ["log file sync"]
not_io_metrics = ["read by other session"]
total_wait_time = 0.
has_dba_hist = metrics['global.dba_hist_sys_time_model.db time'] > 0
for name, value in metrics.items():
if has_dba_hist and 'dba_hist_' not in name:
continue
if 'db cpu' in name:
total_wait_time += float(value)
elif 'time_waited_micro_fg' in name:
default_wait_time = float(self.default_values[name])
wait_time = float(value)
elif 'total_waits_fg' in name:
default_total_waits = float(self.default_values[name])
total_waits = float(value)
elif name.endswith('wait_class'):
if value == 'Idle':
wait_time = 0
elif value in ('User I/O', 'System I/O') or \
any(n in name for n in extra_io_metrics):
if not any(n in name for n in not_io_metrics):
if default_total_waits == 0:
average_wait = 0
else:
average_wait = default_wait_time / default_total_waits
wait_time = total_waits * average_wait
total_wait_time += wait_time
return total_wait_time / 1000000.
class RawDBTime(BaseTargetObjective):
def __init__(self):
super().__init__(name='raw_db_time', pprint='Raw DB Time',
unit='seconds', short_unit='s', improvement=LESS_IS_BETTER)
def compute(self, metrics, observation_time):
has_dba_hist = metrics['global.dba_hist_sys_time_model.db time'] > 0
if has_dba_hist:
return metrics['global.dba_hist_sys_time_model.db time'] / 1000000.
return metrics['global.sys_time_model.db time'] / 1000000.
class TransactionCounter(BaseTargetObjective):
def __init__(self):
super().__init__(name='transaction_counter', pprint='Number of commits and rollbacks',
unit='transactions', short_unit='txn', improvement=MORE_IS_BETTER)
def compute(self, metrics, observation_time):
num_txns = sum(metrics[ctr] for ctr in ('global.sysstat.user commits',
'global.sysstat.user rollbacks'))
return num_txns
class ElapsedTime(BaseTargetObjective):
def __init__(self):
super().__init__(name='elapsed_time', pprint='Elapsed Time', unit='seconds',
short_unit='s', improvement=LESS_IS_BETTER)
def compute(self, metrics, observation_time):
return observation_time
target_objective_list = tuple((DBMSType.ORACLE, target_obj) for target_obj in [ # pylint: disable=invalid-name
BaseThroughput(transactions_counter=('global.sysstat.user commits',
'global.sysstat.user rollbacks')),
CustomDBTime(),
NormalizedDBTime(),
RawDBTime(),
TransactionCounter(),
ElapsedTime(),
])
| [
"website.models.DBMSCatalog.objects.get",
"website.models.MetricCatalog.objects.filter",
"logging.getLogger"
] | [((363, 390), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (380, 390), False, 'import logging\n'), ((1887, 1958), 'website.models.DBMSCatalog.objects.get', 'DBMSCatalog.objects.get', ([], {'type': 'DBMSType.ORACLE', 'version': '"""172.16.31.10.0"""'}), "(type=DBMSType.ORACLE, version='172.16.31.10.0')\n", (1910, 1958), False, 'from website.models import DBMSCatalog, MetricCatalog\n'), ((2014, 2053), 'website.models.MetricCatalog.objects.filter', 'MetricCatalog.objects.filter', ([], {'dbms': 'dbms'}), '(dbms=dbms)\n', (2042, 2053), False, 'from website.models import DBMSCatalog, MetricCatalog\n'), ((2171, 2242), 'website.models.DBMSCatalog.objects.get', 'DBMSCatalog.objects.get', ([], {'type': 'DBMSType.ORACLE', 'version': '"""172.16.31.10.0"""'}), "(type=DBMSType.ORACLE, version='172.16.31.10.0')\n", (2194, 2242), False, 'from website.models import DBMSCatalog, MetricCatalog\n'), ((2298, 2337), 'website.models.MetricCatalog.objects.filter', 'MetricCatalog.objects.filter', ([], {'dbms': 'dbms'}), '(dbms=dbms)\n', (2326, 2337), False, 'from website.models import DBMSCatalog, MetricCatalog\n')] |
import itertools
import pandas as pd
import numpy as np
# all permutations are already reverse-deleted
# all sequences are represented in binary
nucleotides = {'A':0,'C':1,'G':2,'T':3}
numtonuc = {0:'A',1:'C',2:'G',3:'T'}
complement = {0:3,3:0,1:2,2:1}
def window(fseq, window_size):
for i in range(len(fseq) - window_size + 1):
yield fseq[i:i+window_size]
# return the first or the last number representation
def seqpos(kmer,last):
return 1 << (1 + 2 * kmer) if last else 1 << 2 * kmer;
def seq_permutation(seqlen):
return (range(seqpos(seqlen,False),seqpos(seqlen,True)))
def gen_nonreversed_kmer(k):
nonrevk = list()
for i in range(seqpos(k,False),seqpos(k,True)):
if i <= revcomp(i):
nonrevk.append(i)
return nonrevk
def itoseq(seqint):
if type(seqint) is not int:
return seqint
seq = ""
mask = 3
copy = int(seqint) # prevent changing the original value
while(copy) != 1:
seq = numtonuc[copy&mask] + seq
copy >>= 2
if copy == 0:
print("Could not find the append-left on the input sequence")
return 0
return seq
def seqtoi(seq,gappos=0,gapsize=0):
# due to various seqlengths, this project always needs append 1 to the left
binrep = 1
gaps = range(gappos,gappos+gapsize)
for i in range(0,len(seq)):
if i in gaps:
continue
binrep <<= 2
binrep |= nucleotides[seq[i]]
return binrep
def revcomp(seqbin):
rev = 1
mask = 3
copy = int(seqbin)
while copy != 1:
rev <<= 2
rev |= complement[copy&mask]
copy >>= 2
if copy == 0:
print("Could not find the append-left on the input sequence")
return 0
return rev
def revcompstr(seq):
rev = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A'}
return "".join([rev[base] for base in reversed(seq)])
def insert_pos(seqint,base,pos): # pos is position from the right
return ((seqint << 2) & ~(2**(2*pos+2)-1)) | ((seqint & 2**(2*pos)-1) | (nucleotides[base] << pos*2))
#return (seqint << 2) | (seqint & 2**pos-1) & ~(3 << (pos*2)) | (nucleotides[base] << pos*2)
# this function already counts without its reverse complement,
# i.e. oligfreq + reverse merge in the original R code
# Input: panda list and kmer length
# Output: oligonucleotide count with reverse removed
def nonr_olig_freq(seqtbl,kmer,nonrev_list,gappos=0,gapsize=0):
# with the gapmodel, our model become gapsize + kmer
gapmer = kmer+gapsize
# separator, since this is binary, the number is counted from the right
rightseparator = kmer-gappos
leftseparator = rightseparator+gapsize
olig_df = {k: [0] * len(seqtbl) for k in nonrev_list} # use dictionary first to avoid slow indexing from panda data frame
for i in range(0,len(seqtbl)): #22s for 3000
mask = (4**gapmer)-1
cpy = int(seqtbl[i])
while cpy > (4**gapmer)-1:
# gap calculation here
cur = cpy & mask
right = cur & ((4**rightseparator)-1)
left = (cur >> 2*leftseparator) << 2*rightseparator
gappedseqint = left | right
r = (1<<(2*kmer))|gappedseqint # append 1
rc = revcomp(r)
if r > rc:
r = rc
# 392secs with loc,434 secs with the regression. R time, 10secs for allocation, 3.97mins for linreg
# with 'at', only 23secs! -- 254secs total for 6mer
olig_df[r][i] += 1
cpy >>= 2
return pd.DataFrame(olig_df)
| [
"pandas.DataFrame"
] | [((3541, 3562), 'pandas.DataFrame', 'pd.DataFrame', (['olig_df'], {}), '(olig_df)\n', (3553, 3562), True, 'import pandas as pd\n')] |
from idact.detail.jupyter_app.format_deployments_info import \
format_deployments_info
def test_format_deployments_info():
formatted = format_deployments_info(cluster_name='cluster1')
assert formatted == (
"\nTo access the allocation and notebook deployments from cluster,"
" you can use the following snippet.\n"
"You may need to change the cluster name if it's different in"
" the target environment.\n"
"----------------\n"
"from idact import show_cluster\n"
"cluster = show_cluster('cluster1')\n"
"deployments = cluster.pull_deployments()\n"
"nodes = deployments.nodes[-1]\n"
"nb = deployments.jupyter_deployments[-1]\n"
"----------------")
| [
"idact.detail.jupyter_app.format_deployments_info.format_deployments_info"
] | [((145, 193), 'idact.detail.jupyter_app.format_deployments_info.format_deployments_info', 'format_deployments_info', ([], {'cluster_name': '"""cluster1"""'}), "(cluster_name='cluster1')\n", (168, 193), False, 'from idact.detail.jupyter_app.format_deployments_info import format_deployments_info\n')] |
# Red color e-paper
import sys
import os
import lib_2inch7_ec_paper
import time
from PIL import Image,ImageDraw,ImageFont
from pms_a003 import Sensor
air_mon = Sensor()
air_mon.connect_hat(port="/dev/ttyS0", baudrate=9600)
while True:
try:
e_paper = lib_2inch7_ec_paper.Ec_Paper()
e_paper.init()
# Drawing on the image
black_image = Image.new('1', (e_paper.width, e_paper.height), 255) # 255: clear the frame
red_image = Image.new('1', (e_paper.width, e_paper.height), 255) #
font28 = ImageFont.truetype(('images/Font.ttc'), 28)
font18 = ImageFont.truetype(('images/Font.ttc'), 18)
# Drawing on the Horizontal image
horizontal_black_image = Image.new('1', (e_paper.height, e_paper.width), 255) # 298*126
horizontal_red_image = Image.new('1', (e_paper.height, e_paper.width), 255) # 298*126
values = air_mon.read()
print("PMS 1 value is {}".format(values.pm10_cf1))
print("PMS 2.5 value is {}".format(values.pm25_cf1))
print("PMS 10 value is {}".format(values.pm100_cf1))
drawblack = ImageDraw.Draw(horizontal_black_image)
drawred = ImageDraw.Draw(horizontal_red_image)
drawred.text((10, 0), 'AIR MONITORING', font = font28, fill = 0)
drawblack.text((10, 40), 'PMS 1 value = ', font = font28, fill = 0)
drawblack.text((10, 80), 'PMS 2.5 value = ', font = font28, fill = 0)
drawblack.text((10, 120), 'PMS 10 value =', font = font28, fill = 0)
drawred.text((210, 40), str(values.pm10_cf1), font = font28, fill = 0)
drawred.text((210, 80), str(values.pm25_cf1), font = font28, fill = 0)
drawred.text((210, 120),str(values.pm100_cf1), font = font28, fill = 0)
e_paper.display(e_paper.buffer(horizontal_black_image),e_paper.buffer(horizontal_red_image))
time.sleep(4)
e_paper.Clear_screen()
#e_paper.exit()
except KeyboardInterrupt:
epd_2in7_color_air.e_paperconfig.module_exit()
exit()
| [
"PIL.Image.new",
"lib_2inch7_ec_paper.Ec_Paper",
"time.sleep",
"PIL.ImageFont.truetype",
"PIL.ImageDraw.Draw",
"pms_a003.Sensor"
] | [((163, 171), 'pms_a003.Sensor', 'Sensor', ([], {}), '()\n', (169, 171), False, 'from pms_a003 import Sensor\n'), ((270, 300), 'lib_2inch7_ec_paper.Ec_Paper', 'lib_2inch7_ec_paper.Ec_Paper', ([], {}), '()\n', (298, 300), False, 'import lib_2inch7_ec_paper\n'), ((386, 438), 'PIL.Image.new', 'Image.new', (['"""1"""', '(e_paper.width, e_paper.height)', '(255)'], {}), "('1', (e_paper.width, e_paper.height), 255)\n", (395, 438), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((483, 535), 'PIL.Image.new', 'Image.new', (['"""1"""', '(e_paper.width, e_paper.height)', '(255)'], {}), "('1', (e_paper.width, e_paper.height), 255)\n", (492, 535), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((565, 606), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['"""images/Font.ttc"""', '(28)'], {}), "('images/Font.ttc', 28)\n", (583, 606), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((626, 667), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['"""images/Font.ttc"""', '(18)'], {}), "('images/Font.ttc', 18)\n", (644, 667), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((755, 807), 'PIL.Image.new', 'Image.new', (['"""1"""', '(e_paper.height, e_paper.width)', '(255)'], {}), "('1', (e_paper.height, e_paper.width), 255)\n", (764, 807), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((850, 902), 'PIL.Image.new', 'Image.new', (['"""1"""', '(e_paper.height, e_paper.width)', '(255)'], {}), "('1', (e_paper.height, e_paper.width), 255)\n", (859, 902), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((1165, 1203), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['horizontal_black_image'], {}), '(horizontal_black_image)\n', (1179, 1203), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((1222, 1258), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['horizontal_red_image'], {}), '(horizontal_red_image)\n', (1236, 1258), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((1924, 1937), 'time.sleep', 'time.sleep', (['(4)'], {}), '(4)\n', (1934, 1937), False, 'import time\n')] |
import itertools
class Subject:
__id_generator = itertools.count(0, 1)
@staticmethod
def get_all_subjects():
return ['Eng1', 'Eng2', 'Eng3', 'Eng4', 'Alg1', 'Alg2', 'Geo', 'PreC',
'Phys', 'Chem', 'SciE', 'Bio', 'Civ1', 'Civ2', 'Civ3', 'Civ4']
def __init__(self, name: str):
self.name = name
self.id = next(self.__id_generator)
| [
"itertools.count"
] | [((55, 76), 'itertools.count', 'itertools.count', (['(0)', '(1)'], {}), '(0, 1)\n', (70, 76), False, 'import itertools\n')] |
#!/usr/bin/python3
# More information, as well as the (non)licence can be found at: https://github.com/Katorone/Astrox-Imperium
# This script exports 2 files to a csv:
# - MOD/items/items_database.txt -> itemdb.csv
# - MOD/items/specs_database.txt -> docdb.csv
# It will also do some sanity checking, which should be useful for mod & modpack creators:
# - Each file can only contain unique IDs (the exported csv will only contain the first match)
# - Every ID between items and documents needs to be unique (the script will warn)
# - Warns when an item doesn't have a doc for crafting
# - Check if the .png for an item/doc exists
# - Orphaned documents
# Example for windows: c:\path\to\Astrox\MOD\items\
source = '/home/user/.steam/steam/steamapps/common/Astrox Imperium/Astrox Imperium_Data/MOD/items/'
itemfile = 'items_database.txt'
docfile = 'specs_database.txt'
# Delimiter to use in the exported csv
delimiter = ';'
# List of item IDs that don't have a crafting document
ignoreUncraftable = [
"1", "2", "3", "4", "5", "6", "7", "8", "9", "10", # Resources - Raw
"11", "20", "21", "22", "23", "24", "25", "26", "27", "28", # Resources - Loot
"29", "100", "101", "103", "114", "102", "104", "109", "118", "113", # Materials
"105", "106", "107", "108", "110" , "2000", "111", "115", "112", # Materials
"121", "117", "116", "124", "119", "123", "120", "122", # Materials
"150", "151", "152", "153", "164", "155", "156", "157", "158", "168", # Components - Class A
"160", "161", "162", "163", "154", "159", "165", "166", "167", "169", # Components - Class B
"170", "200", "201", "202", "203", "204", "205", "206", "207", "208", # Components - Class C
"209", "210", "211", "212", "213", "214", "215", "216", "217", "218", # Components - Class D
"219", "2001", "2002", "2003", "2004", "2005", "2006", "2007", "2008", "2009", # Components - Class E
"2010", "2011", "2012", "2013", "2014", "2015", "2016", "2017", "2018", "2019", # Components - Class F
"2020", "2021", "2022", "2023", "2024", "2025", "2026", "2027", "2028", "2029", # Components - Class G
"2030", "2031", "2032", "2033", "2034", "2035", "2036", "2037", "2038", "2039", # Components - Class H
"2040", "2041", "2042", "2043", "2044", "2045", "2046", "2047", "2048", "2049", # Components - Class I
"2050", "2051", "2052", "2053", "2054", "2055", "2056", "2057", "2058", "2059", # Components - Class J
"2080", "2081", "2082", "400", "401", "402", # Components - Class M
"302", "300", "301", "351", "353", "350", "352", "330", "332", "331", # Trade Goods
"333", "341", "342", "340", "343", "303", "304", "305", "322", "324", # Trade Goods
"320", "321", "323", "325", "311", "310", "312", "313", "403", "404", # Trade Goods
"405", "406", "407", "408", # Trade Goods
"600", "601", "602", "603", "604", "605", "606", "607", "608", "609", # Life Support - Food
"620", "621", "622", "623", "624", "625", "626", "627", "628", "629", # Life Support - Water
"640", "641", "642", "643", "644", "645", "646", "647", "648", "649", # Life Support - Thermal
"660", "661", "662", "663", "664", "665", "666", "667", "668", "669", # Life Support - Waste
"690", "670", "671", "691", "672", "673", "692", "674", "675", "693", # Consumables
"676", "677", "700", "678", "679", "701", "680", "681", "710", "711", # Consumables
"712", "702", "703", "735", "736", "737", "738", # Consumables
]
## These settings tell the script which title it needs to look for when examining data.
header = {}
# You probably won't need to change this, unless Momo changes this in an update.
# Unique sorting key of items (items_database.txt)
header['itemId'] = '1 ITEM ID'
# Unique sorting key of documents (specs_database.txt)
header['docId'] = '1 DOC ID'
# Name of the item's image
header['itemImage'] = '6 icon image'
# Name of the document's image
header['docImage'] = '6 doc image'
# The item ID that a doc would craft:
header['docItemId'] = '9 CRAFTS ID'
### End of configuration ###
### Code starts here ###
import os
# reads data from path
def readFile(path):
fh = open(path, 'r', encoding='utf8', newline='\n')
data = fh.readlines()
fh.close()
return data
# Writes a list of data to path
def writeFile(path, dataList):
fh = open(path, 'w', encoding='utf8', newline='')
for line in dataList:
fh.write(line+'\r\n')
fh.close()
print("✔️ Finished writing: "+path)
# Takes a string and returns a list
def cleanLine(line, strip, delim):
line = line.strip()
if line == "": return line
if line[-1] == delim: line = line[0:-1]
return [x.strip(strip) for x in line.split(delim)]
# Finds the header, which is the last commented line at the start of a file
def getHeader(data):
for idx, line in enumerate(data):
if line[:2] != '//':
return data[idx-1][2:]
# Gets the index of the identifier from the header[list]
def getIdentifierIndex(header, identifier):
if identifier not in header: return -1
return header.index(identifier)
def parseFile(file, identifier, ):
lines = readFile(os.path.join(source, file))
header = cleanLine(getHeader(lines), '\t ', ';')
identifierIndex = getIdentifierIndex(header, identifier)
if identifierIndex == -1:
print("🛑 couldn't locate '"+identifier+"' in '"+source+"'")
quit()
# Parse the items, stored as item[id]
data = {}
data[delimiter+'header'+delimiter] = header # store the header for future use
doubles = {} # stores the ID that are duplicates
for line in lines:
if line[:2] == '//': continue # Ignore comments
line = cleanLine(line, '\t ', ';')
if line == "": continue # Ignore empty lines
id = line[identifierIndex]
if id in data: # Duplicate checking
doubles[id] = 2 if id not in doubles else doubles[id] + 1
else: # No duplicate, add the line.
data[id] = line
if len(doubles) > 0:
for id in doubles:
print("❌ The unique identifier '"+id+"' matched "+str(doubles[id])+" different lines.")
print("❌ Duplicates were found. The script will only use the first match per duplicate.")
print("------------------------------")
else:
print("✔️ There were no duplicate keys in: "+file)
return data
def composeCsv(data, target):
lines = []
for item in data: # data is a dictionary-type, which is guarantueed to be ordered by insertion.
joiner = '"'+delimiter+'"'
lines.append('"'+joiner.join(data[item])+'"')
writeFile(target, lines)
# Check itemData and docData for duplicate IDs
def findDuplicateEntries(fn1, data1, fn2, data2):
duplicates = {}
for id in data1.keys() & data2.keys():
if id == delimiter+'header'+delimiter: continue
duplicates[id] = 2 if id not in duplicates else duplicates[id] + 1
if len(duplicates) > 0:
for id in duplicates:
print("❌ The unique identifier '"+id+"' matched "+str(duplicates[id])+" times in "+fn1+" and "+fn2+".")
print("❌ Duplicate IDs were found across "+fn1+" and "+fn2+".")
print("------------------------------")
else:
print("✔️ There were no duplicate keys across: "+fn1+" and "+fn2+".")
# Checks that the column header[itemId] has en entry in the column header[docItemId]
def sanityCheck(items, itemHeader, docs, docsHeader):
itemHeaderIdentifier = getIdentifierIndex(items[delimiter+'header'+delimiter], itemHeader)
if itemHeaderIdentifier == -1:
print("🛑 couldn't locate '"+itemHeader+"' in findMissing(), unable to continue sanity check.")
return
docsHeaderIdentifier = getIdentifierIndex(docs[delimiter+'header'+delimiter], docsHeader)
if docsHeaderIdentifier == -1:
print("🛑 couldn't locate '"+docsHeader+"' in findMissing(), unable to continue sanity check.")
return
itemIDs = []
for i in items:
if i == delimiter+'header'+delimiter: continue
itemIDs.append(items[i][itemHeaderIdentifier])
docIDs = []
for i in docs:
if i == delimiter+'header'+delimiter: continue
docIDs.append(docs[i][docsHeaderIdentifier])
# Let's go over all items in docIDs and make sure they're unique
seen = set()
duplicates = [x for x in docIDs if x in seen or seen.add(x)]
if len(duplicates) > 0:
print("❌ The following item ID(s) have more than one crafting document: "+', '.join(duplicates))
print("------------------------------")
else:
print("✔️ All documents point to a unique item.")
# We have 2 lists of IDs, find the IDs from itemIDS that are missing in docIDs
docSet = set(docIDs)
ignoreSet = set(ignoreUncraftable)
missingDocs = [x for x in itemIDs if x not in docSet and x not in ignoreSet]
if len(missingDocs) > 0:
print("❌ The following item ID(s) do not have a crafting document: "+', '.join(missingDocs))
print(" Items that are uncraftable by design can be added to the 'ignoreUncraftable'-list in itemdb_2_csv.py")
print("------------------------------")
else:
print("✔️ All items have a crafting document attached (with "+str(len(ignoreUncraftable))+" ignored uncraftables).")
# For the orphaned check, we find docIDs that are missing in itemIDs
itemSet = set(itemIDs)
missingItems = [x for x in docIDs if x not in itemSet]
if len(missingItems) > 0:
print("❌ The following item ID(s) have a crafting document, but the item does not exist: "+', '.join(missingItems))
print("------------------------------")
else:
print("✔️ All documents have an existing item attached.")
def checkFileLinks(data, header):
headerIdentifier = getIdentifierIndex(data[delimiter+'header'+delimiter], header)
if headerIdentifier == -1:
print("🛑 couldn't locate '"+header+"' in checkFileLinks(), unable to continue sanity check.")
return
haserror = False
for i in data:
if i == delimiter+'header'+delimiter: continue
file = data[i][headerIdentifier]
if not os.path.isfile(os.path.join(source, file)):
haserror = True
print("❌ Item id '"+i+"' links to '"+file+"', which doesn't exists.")
if not haserror:
print("✔️ All files in column '"+header+"' exist.")
if __name__ == "__main__":
itemData = parseFile(itemfile, header["itemId"])
composeCsv(itemData, 'items_database.csv')
docData = parseFile(docfile, header["docId"])
composeCsv(docData, 'specs_database.csv')
# Check itemData and docData for duplicate IDs
findDuplicateEntries(itemfile, itemData, docfile, docData)
# Sanity checks:
# - Check if all items have a document
# - Check if all documents point to an existing item
# - Check if all documents point to a unique item
sanityCheck(itemData, header["itemId"], docData, header["docItemId"])
# Check if the .png for an item/doc exists
checkFileLinks(itemData, header["itemImage"])
checkFileLinks(docData, header["docImage"])
print("")
input("All done. Press enter to exit.")
| [
"os.path.join"
] | [((5372, 5398), 'os.path.join', 'os.path.join', (['source', 'file'], {}), '(source, file)\n', (5384, 5398), False, 'import os\n'), ((10245, 10271), 'os.path.join', 'os.path.join', (['source', 'file'], {}), '(source, file)\n', (10257, 10271), False, 'import os\n')] |
from setuptools import setup, find_packages
import versioneer
setup(
name="metagraph-stellargraph",
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description="Stellargraph plugins for Metagraph",
author="<NAME>.",
packages=find_packages(
include=["metagraph_stellargraph", "metagraph_stellargraph.*"]
),
include_package_data=True,
install_requires=["metagraph", "stellargraph"],
entry_points={
"metagraph.plugins": "plugins=metagraph_stellargraph.plugins:find_plugins"
},
)
| [
"versioneer.get_version",
"setuptools.find_packages",
"versioneer.get_cmdclass"
] | [((117, 141), 'versioneer.get_version', 'versioneer.get_version', ([], {}), '()\n', (139, 141), False, 'import versioneer\n'), ((156, 181), 'versioneer.get_cmdclass', 'versioneer.get_cmdclass', ([], {}), '()\n', (179, 181), False, 'import versioneer\n'), ((272, 349), 'setuptools.find_packages', 'find_packages', ([], {'include': "['metagraph_stellargraph', 'metagraph_stellargraph.*']"}), "(include=['metagraph_stellargraph', 'metagraph_stellargraph.*'])\n", (285, 349), False, 'from setuptools import setup, find_packages\n')] |
#!/usr/bin/env python3
# Copyright 2021 <NAME>
"""Extension to math.isclose and cmath.isclose."""
import cmath
import logging
import math
import numbers
LOG = logging.getLogger("isclose")
try:
import version as _version
if not _version.version.is_backwards_compatible_with("1.0.0"):
raise ImportError
except ImportError:
_version = type("_version", (object,), {"Version": lambda self, s: s})()
__all__ = ("version", "isclose", "IsClose")
version = _version.Version("1.1.0")
def isclose(a, b, **kwargs) -> bool:
"""polymorphic, parameterized isclose.
>>> isclose(1.0, 1.0)
True
>>> isclose(0.0, 1.0)
False
>>> isclose(1.0j, 1.0j)
True
>>> isclose(-1.0j, 1.0j)
False
"""
type_a = type(a)
type_b = type(b)
if type_a != type_b and issubclass(type_b, type_a):
x, y = b, a
else:
x, y = a, b
result = NotImplemented
try:
result = x.isclose(y, **kwargs)
except Exception:
pass
if result is NotImplemented:
try:
result = y.isclose(x, **kwargs)
except Exception:
pass
if result is NotImplemented:
rel_tol = kwargs.get("rel_tol", None)
abs_tol = kwargs.get("abs_tol", None)
try:
if isinstance(a, numbers.Real) and isinstance(b, numbers.Real):
result = math.isclose(
float(a),
float(b),
rel_tol=isclose.default_rel_tol
if rel_tol is None
else float(rel_tol),
abs_tol=isclose.default_abs_tol
if abs_tol is None
else float(abs_tol),
)
elif isinstance(a, numbers.Complex) and isinstance(b, numbers.Complex):
result = cmath.isclose(
complex(a),
complex(b),
rel_tol=isclose.default_rel_tol
if rel_tol is None
else float(rel_tol),
abs_tol=isclose.default_abs_tol
if abs_tol is None
else float(abs_tol),
)
elif a is b or a == b:
result = True
else:
difference = abs(a - b)
abs_result = abs_tol is not None and difference <= abs_tol
rel_result = rel_tol is not None and difference <= rel_tol * max(
abs(a), abs(b)
)
result = abs_result or rel_result
except Exception:
pass
if result is NotImplemented and not kwargs.get("return_NotImplemented", None):
raise TypeError(f"cannot compare {a!r} and {b!r}")
return result
isclose.default_rel_tol = 1e-9
isclose.default_abs_tol = 0.0
class IsClose:
"""Allows pre-defined closeness on polymorphic isclose."""
def __init__(self, **kwargs) -> None:
self._kwargs = kwargs
@property
def kwargs(self):
return self._kwargs
def __call__(self, a, b) -> bool:
"""Apply IsClose().
>>> myisclose = IsClose()
>>> myisclose(1.0, 1.0)
True
"""
return isclose(a, b, **self._kwargs)
def close(self):
"""close function.
>>> myisclose = IsClose()
>>> callable(myisclose.close)
True
"""
return self
def notclose(self):
"""not close function.
>>> myisclose = IsClose()
>>> callable(myisclose.notclose)
True
"""
return lambda a, b: not self(a, b)
def much_less_than(self):
"""definitely less function."""
return lambda a, b: a < b and not self(a, b)
def less_than_or_close(self):
"""less or close function."""
return lambda a, b: a < b or self(a, b)
def much_greater_than(self):
"""definitely greater function."""
return lambda a, b: a > b and not self(a, b)
def greater_than_or_close(self):
"""greater or close function."""
return lambda a, b: a > b or self(a, b)
if __name__ == "__main__":
import doctest
doctest.testmod()
| [
"version.Version",
"version.version.is_backwards_compatible_with",
"logging.getLogger",
"doctest.testmod"
] | [((162, 190), 'logging.getLogger', 'logging.getLogger', (['"""isclose"""'], {}), "('isclose')\n", (179, 190), False, 'import logging\n'), ((474, 499), 'version.Version', '_version.Version', (['"""1.1.0"""'], {}), "('1.1.0')\n", (490, 499), True, 'import version as _version\n'), ((4193, 4210), 'doctest.testmod', 'doctest.testmod', ([], {}), '()\n', (4208, 4210), False, 'import doctest\n'), ((240, 294), 'version.version.is_backwards_compatible_with', '_version.version.is_backwards_compatible_with', (['"""1.0.0"""'], {}), "('1.0.0')\n", (285, 294), True, 'import version as _version\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc. and Contributors
# Distributed under the Apache License, 2.0 (apache.org/licenses/LICENSE-2.0)
# See accompanying Copyright.txt and LICENSE files for details
###############################################################################
import os
import tempfile
from typing import Dict
from danesfield_server.algorithms.generate_point_cloud import ResultRunDockerCommand
from danesfield_server.workflow import DanesfieldWorkflowException
from docker.types import DeviceRequest
from girder.models.collection import Collection
from girder.models.folder import Folder
from girder.models.user import User
from girder_worker.docker.tasks import docker_run
from girder_worker.docker.transforms.girder import (
GirderUploadVolumePathToFolder,
)
from girder_worker.docker.transforms import BindMountVolume, VolumePath
from danesfield_server.algorithms.common import (
addJobInfo,
createDockerRunArguments,
createGirderClient,
)
from ..constants import DanesfieldStep, DockerImage
from ..workflow_step import DanesfieldWorkflowStep
from ..workflow_utilities import getWorkingSet
from ..models.workingSet import WorkingSet
class RunDanesfieldImageless(DanesfieldWorkflowStep):
"""
Step that generates a point cloud.
Supports the following options:
- aoiBBox (required)
"""
def __init__(self):
super(RunDanesfieldImageless, self).__init__("Imageless")
self.addDependency(DanesfieldStep.GENERATE_POINT_CLOUD)
def run(self, jobInfo, outputFolder):
gc = createGirderClient(jobInfo.requestInfo)
baseWorkingSet: Dict = getWorkingSet(DanesfieldStep.INIT, jobInfo)
# Get point cloud working set
pointCloudWorkingSet: Dict = getWorkingSet(
DanesfieldStep.GENERATE_POINT_CLOUD, jobInfo
)
core3dCollection = Collection().createCollection(
name="core3d",
creator=User().getAdmins().next(),
description="",
public=True,
reuseExisting=True,
)
modelsFolder = Folder().findOne(
{
"parentId": core3dCollection["_id"],
"name": "models",
}
)
if modelsFolder is None:
raise DanesfieldWorkflowException(
"Models folder has not been created and populated"
)
# Download models folder
models_folder = tempfile.mkdtemp()
modelsFolderVolume = BindMountVolume(models_folder, models_folder)
gc.downloadFolderRecursive(modelsFolder["_id"], models_folder)
# Get single file, there will only be one
point_cloud_path = tempfile.mktemp(suffix=".las")
pointCloudFile = self.getFiles(pointCloudWorkingSet)[0]
gc.downloadFile(str(pointCloudFile["_id"]), point_cloud_path)
pointCloudFileVolume = BindMountVolume(point_cloud_path, point_cloud_path)
# Create output dir
outputDir = tempfile.mkdtemp()
outputDirVolume = BindMountVolume(host_path=outputDir, container_path=outputDir)
# Create config file
config_file, config_file_path = tempfile.mkstemp(suffix=".ini")
configFileVolume = BindMountVolume(config_file_path, config_file_path)
with open(config_file, "w") as in_config_file:
# Configure paths
paths_section = (
"[paths]\n"
+ f"p3d_fpath = {point_cloud_path}\n"
+ f"work_dir = {outputDir}\n"
# Supply empty dir so no errors are generated
+ f"rpc_dir = {tempfile.mkdtemp()}\n"
)
in_config_file.write(f"{paths_section}\n")
# Set name prefix for output files
aoi_section = (
"[aoi]\n" + f"name = {baseWorkingSet['name'].replace(' ', '_')}"
)
in_config_file.write(f"{aoi_section}\n")
# Ground sample distancy of output imagery in meters per pixel
# Default is 0.25
params_section = "[params]\n" + "gsd = 0.25\n"
in_config_file.write(f"{params_section}\n")
# Parameters for the roof geon extraction step
roof_section = (
"[roof]\n"
+ f"model_dir = {models_folder}/Columbia Geon Segmentation Model\n"
+ "model_prefix = dayton_geon"
)
in_config_file.write(f"{roof_section}\n")
# Ensure folder exists
existing_folder_id = baseWorkingSet.get("output_folder_id")
if existing_folder_id is None:
output_folder = Folder().createFolder(
parent=core3dCollection,
parentType="collection",
name=f"(Imageless) {baseWorkingSet['name']}",
reuseExisting=True,
)
existing_folder_id = output_folder["_id"]
baseWorkingSet["output_folder_id"] = output_folder["_id"]
WorkingSet().save(baseWorkingSet)
containerArgs = [
"python",
"/danesfield/tools/run_danesfield.py",
config_file_path,
]
resultHooks = [
# - Fix output folder permissions
ResultRunDockerCommand(
DockerImage.DANESFIELD,
command=["chown", "-R", f"{os.getuid()}:{os.getgid()}", outputDir],
volumes=outputDirVolume._repr_json_(),
),
# Upload results
GirderUploadVolumePathToFolder(
VolumePath(".", volume=outputDirVolume),
existing_folder_id,
),
]
asyncResult = docker_run.delay(
device_requests=[DeviceRequest(count=-1, capabilities=[["gpu"]])],
shm_size="8G",
volumes=[
pointCloudFileVolume,
configFileVolume,
outputDirVolume,
modelsFolderVolume,
],
**createDockerRunArguments(
image=f"{DockerImage.DANESFIELD}:latest",
containerArgs=containerArgs,
jobTitle=f"Run imageless workflow on [{baseWorkingSet['name']}]",
jobType=self.name,
user=jobInfo.requestInfo.user,
resultHooks=resultHooks,
),
)
# Add info for job event listeners
job = asyncResult.job
job = addJobInfo(
job,
jobId=jobInfo.jobId,
stepName=self.name,
workingSetId=baseWorkingSet["_id"],
)
return job
| [
"danesfield_server.algorithms.common.createGirderClient",
"girder_worker.docker.transforms.BindMountVolume",
"girder_worker.docker.transforms.VolumePath",
"tempfile.mkstemp",
"girder.models.folder.Folder",
"os.getgid",
"danesfield_server.algorithms.common.addJobInfo",
"danesfield_server.algorithms.common.createDockerRunArguments",
"girder.models.collection.Collection",
"tempfile.mkdtemp",
"os.getuid",
"danesfield_server.workflow.DanesfieldWorkflowException",
"tempfile.mktemp",
"girder.models.user.User",
"docker.types.DeviceRequest"
] | [((1672, 1711), 'danesfield_server.algorithms.common.createGirderClient', 'createGirderClient', (['jobInfo.requestInfo'], {}), '(jobInfo.requestInfo)\n', (1690, 1711), False, 'from danesfield_server.algorithms.common import addJobInfo, createDockerRunArguments, createGirderClient\n'), ((2559, 2577), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (2575, 2577), False, 'import tempfile\n'), ((2607, 2652), 'girder_worker.docker.transforms.BindMountVolume', 'BindMountVolume', (['models_folder', 'models_folder'], {}), '(models_folder, models_folder)\n', (2622, 2652), False, 'from girder_worker.docker.transforms import BindMountVolume, VolumePath\n'), ((2802, 2832), 'tempfile.mktemp', 'tempfile.mktemp', ([], {'suffix': '""".las"""'}), "(suffix='.las')\n", (2817, 2832), False, 'import tempfile\n'), ((2998, 3049), 'girder_worker.docker.transforms.BindMountVolume', 'BindMountVolume', (['point_cloud_path', 'point_cloud_path'], {}), '(point_cloud_path, point_cloud_path)\n', (3013, 3049), False, 'from girder_worker.docker.transforms import BindMountVolume, VolumePath\n'), ((3099, 3117), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (3115, 3117), False, 'import tempfile\n'), ((3144, 3206), 'girder_worker.docker.transforms.BindMountVolume', 'BindMountVolume', ([], {'host_path': 'outputDir', 'container_path': 'outputDir'}), '(host_path=outputDir, container_path=outputDir)\n', (3159, 3206), False, 'from girder_worker.docker.transforms import BindMountVolume, VolumePath\n'), ((3277, 3308), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'suffix': '""".ini"""'}), "(suffix='.ini')\n", (3293, 3308), False, 'import tempfile\n'), ((3336, 3387), 'girder_worker.docker.transforms.BindMountVolume', 'BindMountVolume', (['config_file_path', 'config_file_path'], {}), '(config_file_path, config_file_path)\n', (3351, 3387), False, 'from girder_worker.docker.transforms import BindMountVolume, VolumePath\n'), ((6548, 6645), 'danesfield_server.algorithms.common.addJobInfo', 'addJobInfo', (['job'], {'jobId': 'jobInfo.jobId', 'stepName': 'self.name', 'workingSetId': "baseWorkingSet['_id']"}), "(job, jobId=jobInfo.jobId, stepName=self.name, workingSetId=\n baseWorkingSet['_id'])\n", (6558, 6645), False, 'from danesfield_server.algorithms.common import addJobInfo, createDockerRunArguments, createGirderClient\n'), ((2391, 2470), 'danesfield_server.workflow.DanesfieldWorkflowException', 'DanesfieldWorkflowException', (['"""Models folder has not been created and populated"""'], {}), "('Models folder has not been created and populated')\n", (2418, 2470), False, 'from danesfield_server.workflow import DanesfieldWorkflowException\n'), ((1973, 1985), 'girder.models.collection.Collection', 'Collection', ([], {}), '()\n', (1983, 1985), False, 'from girder.models.collection import Collection\n'), ((2197, 2205), 'girder.models.folder.Folder', 'Folder', ([], {}), '()\n', (2203, 2205), False, 'from girder.models.folder import Folder\n'), ((5660, 5699), 'girder_worker.docker.transforms.VolumePath', 'VolumePath', (['"""."""'], {'volume': 'outputDirVolume'}), "('.', volume=outputDirVolume)\n", (5670, 5699), False, 'from girder_worker.docker.transforms import BindMountVolume, VolumePath\n'), ((6101, 6351), 'danesfield_server.algorithms.common.createDockerRunArguments', 'createDockerRunArguments', ([], {'image': 'f"""{DockerImage.DANESFIELD}:latest"""', 'containerArgs': 'containerArgs', 'jobTitle': 'f"""Run imageless workflow on [{baseWorkingSet[\'name\']}]"""', 'jobType': 'self.name', 'user': 'jobInfo.requestInfo.user', 'resultHooks': 'resultHooks'}), '(image=f\'{DockerImage.DANESFIELD}:latest\',\n containerArgs=containerArgs, jobTitle=\n f"Run imageless workflow on [{baseWorkingSet[\'name\']}]", jobType=self.\n name, user=jobInfo.requestInfo.user, resultHooks=resultHooks)\n', (6125, 6351), False, 'from danesfield_server.algorithms.common import addJobInfo, createDockerRunArguments, createGirderClient\n'), ((4743, 4751), 'girder.models.folder.Folder', 'Folder', ([], {}), '()\n', (4749, 4751), False, 'from girder.models.folder import Folder\n'), ((5832, 5879), 'docker.types.DeviceRequest', 'DeviceRequest', ([], {'count': '(-1)', 'capabilities': "[['gpu']]"}), "(count=-1, capabilities=[['gpu']])\n", (5845, 5879), False, 'from docker.types import DeviceRequest\n'), ((3724, 3742), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (3740, 3742), False, 'import tempfile\n'), ((2051, 2057), 'girder.models.user.User', 'User', ([], {}), '()\n', (2055, 2057), False, 'from girder.models.user import User\n'), ((5460, 5471), 'os.getuid', 'os.getuid', ([], {}), '()\n', (5469, 5471), False, 'import os\n'), ((5474, 5485), 'os.getgid', 'os.getgid', ([], {}), '()\n', (5483, 5485), False, 'import os\n')] |
from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
# This is the generated API
from xosapi import *
from core.views.legacyapi import LegacyXMLRPC
from core.views.services import ServiceGridView
#from core.views.analytics import AnalyticsAjaxView
from core.models import *
from rest_framework import generics
from core.dashboard.sites import SitePlus
from django.http import HttpResponseRedirect
#from core.xoslib import XOSLibDataView
admin.site = SitePlus()
admin.autodiscover()
def redirect_to_apache(request):
""" bounce a request back to the apache server that is running on the machine """
apache_url = "http://%s%s" % (request.META['HOSTNAME'], request.path)
return HttpResponseRedirect(apache_url)
urlpatterns = patterns('',
# Examples:
url(r'^stats', 'core.views.stats.Stats', name='stats'),
url(r'^observer', 'core.views.observer.Observer', name='observer'),
url(r'^serviceGrid', ServiceGridView.as_view(), name='serviceGrid'),
url(r'^docs/', include('rest_framework_swagger.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
url(r'^', include(admin.site.urls)),
#url(r'^profile/home', 'core.views.home'),
# url(r'^admin/xoslib/(?P<name>\w+)/$', XOSLibDataView.as_view(), name="xoslib"),
url(r'^xmlrpc/legacyapi/$', 'core.views.legacyapi.LegacyXMLRPC', name='xmlrpc'),
# url(r'^analytics/(?P<name>\w+)/$', AnalyticsAjaxView.as_view(), name="analytics"),
url(r'^files/', redirect_to_apache),
#Adding in rest_framework urls
url(r'^xos/', include('rest_framework.urls', namespace='rest_framework')),
# XOSLib rest methods
url(r'^xoslib/', include('core.xoslib.methods', namespace='xoslib')),
) + get_REST_patterns()
| [
"django.contrib.admin.autodiscover",
"django.conf.urls.include",
"core.views.services.ServiceGridView.as_view",
"django.conf.urls.url",
"core.dashboard.sites.SitePlus",
"django.http.HttpResponseRedirect"
] | [((538, 548), 'core.dashboard.sites.SitePlus', 'SitePlus', ([], {}), '()\n', (546, 548), False, 'from core.dashboard.sites import SitePlus\n'), ((549, 569), 'django.contrib.admin.autodiscover', 'admin.autodiscover', ([], {}), '()\n', (567, 569), False, 'from django.contrib import admin\n'), ((778, 810), 'django.http.HttpResponseRedirect', 'HttpResponseRedirect', (['apache_url'], {}), '(apache_url)\n', (798, 810), False, 'from django.http import HttpResponseRedirect\n'), ((859, 912), 'django.conf.urls.url', 'url', (['"""^stats"""', '"""core.views.stats.Stats"""'], {'name': '"""stats"""'}), "('^stats', 'core.views.stats.Stats', name='stats')\n", (862, 912), False, 'from django.conf.urls import patterns, include, url\n'), ((919, 984), 'django.conf.urls.url', 'url', (['"""^observer"""', '"""core.views.observer.Observer"""'], {'name': '"""observer"""'}), "('^observer', 'core.views.observer.Observer', name='observer')\n", (922, 984), False, 'from django.conf.urls import patterns, include, url\n'), ((1539, 1617), 'django.conf.urls.url', 'url', (['"""^xmlrpc/legacyapi/$"""', '"""core.views.legacyapi.LegacyXMLRPC"""'], {'name': '"""xmlrpc"""'}), "('^xmlrpc/legacyapi/$', 'core.views.legacyapi.LegacyXMLRPC', name='xmlrpc')\n", (1542, 1617), False, 'from django.conf.urls import patterns, include, url\n'), ((1714, 1748), 'django.conf.urls.url', 'url', (['"""^files/"""', 'redirect_to_apache'], {}), "('^files/', redirect_to_apache)\n", (1717, 1748), False, 'from django.conf.urls import patterns, include, url\n'), ((1012, 1037), 'core.views.services.ServiceGridView.as_view', 'ServiceGridView.as_view', ([], {}), '()\n', (1035, 1037), False, 'from core.views.services import ServiceGridView\n'), ((1080, 1118), 'django.conf.urls.include', 'include', (['"""rest_framework_swagger.urls"""'], {}), "('rest_framework_swagger.urls')\n", (1087, 1118), False, 'from django.conf.urls import patterns, include, url\n'), ((1218, 1258), 'django.conf.urls.include', 'include', (['"""django.contrib.admindocs.urls"""'], {}), "('django.contrib.admindocs.urls')\n", (1225, 1258), False, 'from django.conf.urls import patterns, include, url\n'), ((1333, 1357), 'django.conf.urls.include', 'include', (['admin.site.urls'], {}), '(admin.site.urls)\n', (1340, 1357), False, 'from django.conf.urls import patterns, include, url\n'), ((1374, 1398), 'django.conf.urls.include', 'include', (['admin.site.urls'], {}), '(admin.site.urls)\n', (1381, 1398), False, 'from django.conf.urls import patterns, include, url\n'), ((1805, 1863), 'django.conf.urls.include', 'include', (['"""rest_framework.urls"""'], {'namespace': '"""rest_framework"""'}), "('rest_framework.urls', namespace='rest_framework')\n", (1812, 1863), False, 'from django.conf.urls import patterns, include, url\n'), ((1914, 1964), 'django.conf.urls.include', 'include', (['"""core.xoslib.methods"""'], {'namespace': '"""xoslib"""'}), "('core.xoslib.methods', namespace='xoslib')\n", (1921, 1964), False, 'from django.conf.urls import patterns, include, url\n')] |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File : retrieve_1hop.py
@Author : yyhaker
@Contact : <EMAIL>
@Time : 2020/04/07 16:33:58
'''
"""
检索知识图谱:对于某个token,分别检索出三部分:
1. sub-graph
(1) 检索出头或者尾部包含该词的三元组,构建子图G
2. sub-graph triples
3. core_entity
"""
import sys
sys.path.append(".")
import random
import pickle
import argparse
import os
import nltk
import logging
import string
from tqdm import tqdm
from nltk.corpus import wordnet as wn
from multiprocessing import Pool
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
PROCESSES = 60
def extract_en_triples(conceptnet_path):
"""检索出所有英文的三元组"""
en_triples = []
with open(conceptnet_path, 'r', encoding="utf-8") as f:
for line in f.readlines():
ls = line.split('\t')
if ls[2].startswith('/c/en/') and ls[3].startswith('/c/en/'):
"""
Some preprocessing:
- Remove part-of-speech encoding.
- Split("/")[-1] to trim the "/c/en/" and just get the entity name, convert all to
- Lowercase for uniformity.
"""
rel = ls[1].split("/")[-1].lower()
head = del_pos(ls[2]).split("/")[-1].lower()
tail = del_pos(ls[3]).split("/")[-1].lower()
if not head.replace("_", "").replace("-", "").isalpha():
continue
if not tail.replace("_", "").replace("-", "").isalpha():
continue
triple = (head, rel, tail)
en_triples.append(triple)
return en_triples
def extract_triples(conceptnet_path):
"""检索出conceptnet中的三元组"""
conceptnet_triples = []
with open(conceptnet_path, 'r', encoding="utf-8") as f:
for line in f.readlines():
ls = line.split(",")
head = ls[0].strip()
rel = ls[1].strip()
tail = ls[2].strip()
triple = (head, rel, tail)
conceptnet_triples.append(triple)
return conceptnet_triples
# def build_mapping(triples, entity_path, relation_path):
# """build mapping of entities and triples"""
# entity2id = {}
# relation2id = {}
# for triple in triples:
# head, rel, tail = triple[0], triple[1], triple[2]
# if head not in entity2id.keys():
# entity2id[head] = len(entity2id)
# if tail not in entity2id.keys():
# entity2id[tail] = len(entity2id)
# if rel not in relation2id.keys():
# relation2id[rel] = len(relation2id)
# with open(entity_path, 'w') as f_e:
# for entity, idx in entity2id.items():
# f_e.write(entity + " " + str(idx))
# f_e.write('\n')
# with open(relation_path, 'w') as f_r:
# for relation, idx in relation2id.items():
# f_r.write(relation + " " + str(idx))
# f_r.write('\n')
# id2entity = {v:k for k,v in entity2id.items()}
# id2relation = {v:k for k,v in relation2id.items()}
# return entity2id, id2entity, relation2id, id2relation
def get_concept_mapping(entity_path, relation_path):
"""read entity and relation mapping file"""
entity2id = {}
relation2id = {}
with open(entity_path, 'r', encoding="utf-8") as f:
for line in f.readlines():
ls = line.split(" ")
# pass first line
if len(ls) <= 1:
continue
entity = ls[0].strip()
idx = int(ls[1].strip())
entity2id[entity] = idx
with open(relation_path, 'r', encoding="utf-8") as f:
for line in f.readlines():
ls = line.split(" ")
# pass first line
if len(ls) <= 1:
continue
rel = ls[0].strip()
idx = int(ls[1].strip())
relation2id[rel] = idx
return entity2id, relation2id
def search_triples(token, conceptnet_triples, limit=20):
"""检索出头或者尾部包含该词的三元组"""
triples = []
core_entitys = set()
# search triples
for triple in conceptnet_triples:
head, rel, tail = triple[0], triple[1], triple[2]
if token in head.split("_") or token in tail.split("_"):
triples.append(triple)
# limit retrieved knowledge here
if len(triples) > limit:
break
if token in head.split("_"):
core_entitys.add(head)
if token in tail.split("_"):
core_entitys.add(tail)
# define core entity, choose the shortest
core_entitys = list(core_entitys)
if len(core_entitys) != 0:
min_len = len(core_entitys[0])
min_entity = core_entitys[0]
for entity in core_entitys:
if len(entity) < min_len:
min_len = len(entity)
min_entity = entity
core_entity = min_entity
else:
core_entity = None
return triples, core_entity
def search_triple_neighbor(cur_triple, conceptnet_triples):
"""检索出三元组的相邻的三元组"""
neighbor_triples = []
cur_head, cur_rel, cur_tail = cur_triple[0], cur_triple[1], cur_triple[2]
for triple in conceptnet_triples:
if triple == cur_triple:
continue
head, rel, tail = triple[0], triple[1], triple[2]
if cur_head == head or cur_head == tail or cur_tail == head or cur_tail == tail:
neighbor_triples.append(triple)
return neighbor_triples
def build_graph(triples):
"""连接相同的实体构建子图, 返回子图G"""
# x : [num_nodes, num_node_features]
# edge : [2, num_edges]
# edge_attr : [num_edges, num_edge_features]
nodes = []
edges = []
edges_attr = []
token_triples = []
for triple in triples:
head, rel, tail = triple[0], triple[1], triple[2]
# remove empty entity triple
if head == "" or head == " ":
continue
if tail == "" or tail == " ":
continue
# add nodes
if head not in nodes:
nodes.append(head)
if tail not in nodes:
nodes.append(tail)
# add edge
edges.append([head, tail])
edges.append([tail, head])
edges_attr.append(rel)
edges_attr.append(rel)
token_triples.append(triple)
assert len(edges) == len(edges_attr)
return nodes, edges, edges_attr, token_triples
def build_graph_for_token(token, conceptnet_triples):
"""根据给定的token,构建子图"""
contained_triples, core_entity = search_triples(token, conceptnet_triples)
nodes, edges, edges_attr, token_triples = build_graph(contained_triples)
return nodes, edges, edges_attr, token_triples, core_entity
def retrieve_tokens_graph(index, token_part, conceptnet_triples, stopwords, args):
"""retrieve tokens graph"""
logger.info("begin run function {} at process {}".format(retrieve_tokens_graph, os.getpid()))
token2datas = {}
for token in tqdm(token_part):
if token in set(string.punctuation):
logger.info('{} is punctuation, skipped!'.format(token))
# punctuation_cnt += 1
continue
if args.no_stopwords and token in stopwords:
logger.info('{} is stopword, skipped!'.format(token))
# stopword_cnt += 1
continue
if args.ignore_length > 0 and len(token) <= args.ignore_length:
logger.info('{} is too short, skipped!'.format(token))
continue
# build graph for token here
nodes, edges, edges_attr, token_triples, core_entity = build_graph_for_token(token, conceptnet_triples)
token2data = {}
token2data["sub_graph"] = (nodes, edges, edges_attr)
token2data["graph_triples"] = token_triples
token2data["core_entity"] = core_entity
token2datas[token] = token2data
with open(os.path.join(args.output_dir, 'retrived_token_graphs_{}.data'.format(index)), 'wb') as fout:
pickle.dump(token2datas, fout)
logger.info('Finished dumping retrieved token graphs {}'.format(index))
def del_pos(s):
"""
Deletes part-of-speech encoding from an entity string, if present.
:param s: Entity string.
:return: Entity string with part-of-speech encoding removed.
"""
if s.endswith("/n") or s.endswith("/a") or s.endswith("/v") or s.endswith("/r"):
s = s[:-2]
return s
def retrieved_entity_rel_emb(token2datas, entity2id, relation2id, entity_emb, relation_emb):
"""retrieve entity and relation embeddings"""
entity2emb = {}
relation2emb = {}
for token, data in token2datas.items():
graph_triples = data["graph_triples"]
for triple in graph_triples:
head, rel, tail = triple[0], triple[1], triple[2]
if head not in entity2emb:
entity2emb[head] = entity_emb[entity2id[head]]
if rel not in relation2emb:
relation2emb[rel] = relation_emb[relation2id[rel]]
if tail not in entity2emb:
entity2emb[tail] = entity_emb[entity2id[tail]]
return entity2emb, relation2emb
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--train_token', type=str, default='EKMRC/data/ReCoRD_tokenization/tokens_self/train.tokenization.cased.data',
help='token file of train set')
parser.add_argument('--eval_token', type=str, default='EKMRC/data/ReCoRD_tokenization/tokens_self/dev.tokenization.cased.data',
help='token file of dev set')
parser.add_argument('--conceptnet_path', type=str, default='EKMRC/data/conceptnet/conceptNet_process.txt', help='conceptnet triple path')
parser.add_argument('--entity_path', type=str, default='EKMRC/build_graph_concepts/concept_embs/entity2id.txt', help="entity2id path")
parser.add_argument('--relation_path', type=str, default='EKMRC/build_graph_concepts/concept_embs/relation2id.txt', help="relation2id path")
parser.add_argument('--entity_emb_path', type=str, default='EKMRC/build_graph_concepts/concept_embs/entity_emb.pkl', help="entity emb path")
parser.add_argument('--relation_emb_path', type=str, default='EKMRC/build_graph_concepts/concept_embs/rel_emb.pkl', help="relation emb path")
parser.add_argument('--entity2emb_path', type=str, default='EKMRC/build_graph_concepts/concept_embs/entity2emb.txt', help="entity2emb path")
parser.add_argument('--relation2emb_path', type=str, default='EKMRC/build_graph_concepts/concept_embs/relation2emb.txt', help='relation2emb path')
parser.add_argument('--output_dir', type=str, default='EKMRC/build_graph_concepts/retrieve_result/one_hop', help='output directory')
parser.add_argument('--no_stopwords', action='store_true', default=True, help='ignore stopwords')
parser.add_argument('--ignore_length', type=int, default=0, help='ignore words with length <= ignore_length')
args = parser.parse_args()
# load ConceptNet here
logger.info("Begin loading concept triples...")
conceptnet_triples = extract_triples(args.conceptnet_path)
logger.info('Finished loading concept english triples.')
logger.info("sample five triples...")
for i in range(5):
triple = random.choice(conceptnet_triples)
logger.info(triple)
# # build mappings of entities and relations(all ConceptNet)
# entity2id, id2entity, relation2id, id2relation = build_mapping(conceptnet_triples, args.entity_path, args.relation_path)
# logger.info("Finished mapping of relations and entities.")
# get concept mapping
logger.info("get concept mapping...")
entity2id, relation2id = get_concept_mapping(args.entity_path, args.relation_path)
# load pickled samples
logger.info('Begin to load tokenization results...')
train_samples = pickle.load(open(args.train_token, 'rb'))
dev_samples = pickle.load(open(args.eval_token, 'rb'))
logger.info('Finished loading tokenization results.')
# build token set
all_token_set = set()
for sample in train_samples + dev_samples:
for token in sample['query_tokens'] + sample['document_tokens']:
all_token_set.add(token)
logger.info('Finished making tokenization results into token set.')
# load stopwords
stopwords = set(nltk.corpus.stopwords.words('english'))
logger.info('Finished loading stopwords list.')
# mk directory
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
# retrive neighbor triples and build sub-graph
logger.info('Begin to retrieve neighbor triples and build sub-graph...')
# token2graph = dict()
# stopword_cnt = 0
# punctuation_cnt = 0
all_token_set = list(all_token_set)
# split all_token_set to processes parts and deal with multi-processing
all_token_parts = []
part_token_nums = int(len(all_token_set) / PROCESSES)
for i in range(PROCESSES):
if i != PROCESSES - 1:
cur_token_set = all_token_set[i * part_token_nums: (i+1) * part_token_nums]
else:
cur_token_set = all_token_set[i * part_token_nums: ]
all_token_parts.append(cur_token_set)
# multi-processing
logger.info("Begin to deal with {} processes...".format(PROCESSES))
p = Pool(PROCESSES)
for i, part in enumerate(all_token_parts):
p.apply_async(retrieve_tokens_graph, args=(i, part, conceptnet_triples, stopwords, args,))
p.close()
p.join()
logger.info("all processes done!")
# combine all results
logger.info('Finished retrieving token graphs, combine all result...')
token2datas = {}
for i in range(PROCESSES):
with open(os.path.join(args.output_dir, 'retrived_token_graphs_{}.data'.format(i)), 'rb') as fin:
token2data = pickle.load(fin)
token2datas.update(token2data)
logger.info("combine all results done!")
logger.info('{} / {} tokens retrieved at lease 1 graph.'.format(len(token2datas), len(all_token_set)))
with open(os.path.join(args.output_dir, 'retrived_token_graphs_1hop.data'), 'wb') as fout:
pickle.dump(token2datas, fout)
logger.info('Finished dumping retrieved token graphs.')
# with open(os.path.join(args.output_dir, 'retrived_token_graphs_1hop.data'), 'rb') as f_in:
# token2datas = pickle.load(f_in)
logger.info("save retrieved entity and relation embeddings...")
with open(args.entity_emb_path, 'rb') as f1:
entity_emb = pickle.load(f1)
with open(args.relation_emb_path, 'rb') as f2:
relation_emb = pickle.load(f2)
entity2emb, relation2emb = retrieved_entity_rel_emb(token2datas, entity2id, relation2id, entity_emb, relation_emb)
with open(args.entity2emb_path, 'w', encoding='utf-8') as f:
for entity, emb in entity2emb.items():
assert len(emb) == 100
if entity == "" or entity == " ":
logger.info("empty entity: {}".format(entity))
f.write(entity + " " + " ".join(map(str, emb)) + "\n")
with open(args.relation2emb_path, 'w', encoding="utf-8") as f:
for rel, emb in relation2emb.items():
assert len(emb) == 100
f.write(rel + " " + " ".join(map(str, emb)) + "\n")
logger.info("For all KG, {}/{} retrieved entities used, {}/{} retrieved relations used.".format(
len(entity2emb), len(entity_emb), len(relation2emb), len(relation_emb)))
if __name__ == '__main__':
main() | [
"sys.path.append",
"tqdm.tqdm",
"pickle.dump",
"os.getpid",
"argparse.ArgumentParser",
"logging.basicConfig",
"os.makedirs",
"os.path.exists",
"random.choice",
"pickle.load",
"nltk.corpus.stopwords.words",
"multiprocessing.Pool",
"os.path.join",
"logging.getLogger"
] | [((290, 310), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (305, 310), False, 'import sys\n'), ((502, 645), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s - %(levelname)s - %(name)s - %(message)s"""', 'datefmt': '"""%m/%d/%Y %H:%M:%S"""', 'level': 'logging.INFO'}), "(format=\n '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt=\n '%m/%d/%Y %H:%M:%S', level=logging.INFO)\n", (521, 645), False, 'import logging\n'), ((692, 719), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (709, 719), False, 'import logging\n'), ((7096, 7112), 'tqdm.tqdm', 'tqdm', (['token_part'], {}), '(token_part)\n', (7100, 7112), False, 'from tqdm import tqdm\n'), ((9303, 9328), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (9326, 9328), False, 'import argparse\n'), ((13561, 13576), 'multiprocessing.Pool', 'Pool', (['PROCESSES'], {}), '(PROCESSES)\n', (13565, 13576), False, 'from multiprocessing import Pool\n'), ((8122, 8152), 'pickle.dump', 'pickle.dump', (['token2datas', 'fout'], {}), '(token2datas, fout)\n', (8133, 8152), False, 'import pickle\n'), ((11512, 11545), 'random.choice', 'random.choice', (['conceptnet_triples'], {}), '(conceptnet_triples)\n', (11525, 11545), False, 'import random\n'), ((12580, 12618), 'nltk.corpus.stopwords.words', 'nltk.corpus.stopwords.words', (['"""english"""'], {}), "('english')\n", (12607, 12618), False, 'import nltk\n'), ((12703, 12734), 'os.path.exists', 'os.path.exists', (['args.output_dir'], {}), '(args.output_dir)\n', (12717, 12734), False, 'import os\n'), ((12744, 12772), 'os.makedirs', 'os.makedirs', (['args.output_dir'], {}), '(args.output_dir)\n', (12755, 12772), False, 'import os\n'), ((14394, 14424), 'pickle.dump', 'pickle.dump', (['token2datas', 'fout'], {}), '(token2datas, fout)\n', (14405, 14424), False, 'import pickle\n'), ((14772, 14787), 'pickle.load', 'pickle.load', (['f1'], {}), '(f1)\n', (14783, 14787), False, 'import pickle\n'), ((14862, 14877), 'pickle.load', 'pickle.load', (['f2'], {}), '(f2)\n', (14873, 14877), False, 'import pickle\n'), ((7044, 7055), 'os.getpid', 'os.getpid', ([], {}), '()\n', (7053, 7055), False, 'import os\n'), ((14078, 14094), 'pickle.load', 'pickle.load', (['fin'], {}), '(fin)\n', (14089, 14094), False, 'import pickle\n'), ((14305, 14369), 'os.path.join', 'os.path.join', (['args.output_dir', '"""retrived_token_graphs_1hop.data"""'], {}), "(args.output_dir, 'retrived_token_graphs_1hop.data')\n", (14317, 14369), False, 'import os\n')] |
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from django.utils.translation import ugettext as _
from django.conf import settings
from models import BootstrapButtonPlugin
class BootstrapButtonPlugin(CMSPluginBase):
model = BootstrapButtonPlugin
name = _("Button")
text_enabled = True
render_template = "plugins/bootstrap_button.html"
def render(self, context, instance, placeholder):
if instance.mailto:
link = u"mailto:%s" % _(instance.mailto)
elif instance.url:
link = _(instance.url)
elif instance.page_link:
link = instance.page_link.get_absolute_url()
else:
link = ""
context.update({
'link': link,
'size': instance.button_size,
'type': instance.button_type,
'label': instance.label,
'new_window': instance.new_window,
})
return context
def icon_src(self, instance):
return settings.STATIC_URL + u"cms/images/plugins/link.png"
plugin_pool.register_plugin(BootstrapButtonPlugin) | [
"cms.plugin_pool.plugin_pool.register_plugin",
"django.utils.translation.ugettext"
] | [((1070, 1120), 'cms.plugin_pool.plugin_pool.register_plugin', 'plugin_pool.register_plugin', (['BootstrapButtonPlugin'], {}), '(BootstrapButtonPlugin)\n', (1097, 1120), False, 'from cms.plugin_pool import plugin_pool\n'), ((298, 309), 'django.utils.translation.ugettext', '_', (['"""Button"""'], {}), "('Button')\n", (299, 309), True, 'from django.utils.translation import ugettext as _\n'), ((505, 523), 'django.utils.translation.ugettext', '_', (['instance.mailto'], {}), '(instance.mailto)\n', (506, 523), True, 'from django.utils.translation import ugettext as _\n'), ((570, 585), 'django.utils.translation.ugettext', '_', (['instance.url'], {}), '(instance.url)\n', (571, 585), True, 'from django.utils.translation import ugettext as _\n')] |
from django.db import models
# Create your models here.
class Category(models.Model):
"""
Description: Model Description
"""
name = models.CharField(max_length=50)
class Meta:
pass
class Skill(models.Model):
"""
Description: Model Description
"""
name = models.CharField(max_length=50)
category = models.ForeignKey('Category', on_delete=models.CASCADE)
class Meta:
pass | [
"django.db.models.CharField",
"django.db.models.ForeignKey"
] | [((149, 180), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (165, 180), False, 'from django.db import models\n'), ((301, 332), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (317, 332), False, 'from django.db import models\n'), ((348, 403), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""Category"""'], {'on_delete': 'models.CASCADE'}), "('Category', on_delete=models.CASCADE)\n", (365, 403), False, 'from django.db import models\n')] |
from os.path import dirname, abspath, join
DIR_PATH = dirname(abspath(__file__))
OTC_TOOLKIT_PATH = abspath(join(DIR_PATH, '..'))
PATH_TO_RECORDINGS = "data"
STATIONS = ['ecdf', 'citylab']
BOARDS = ['nano', 'tx2', 'xavier']
COUNTER_LINE_COORDS = {'ecdf':
# {'ecdf-lindner': {"point1": {"x": 718, "y": 173}, Coords from first run, bad lines
# "point2": {"x": 702, "y": 864}},
# "cross": {"point1": {"x": 515, "y": 494},
# "point2": {"x": 932, "y": 377}}},
{"bundesstrasse": {"point1": {"x": 1046, "y": 132}, "point2": {"x": 1211, "y": 226}},
"lindner": {"point1": {"x": 393, "y": 166}, "point2": {"x": 718, "y": 72}},
"walking_bundesstrasse": {"point1": {"x": 1104, "y": 200}, "point2": {"x": 975, "y": 258}},
"walking_lindner": {"point1": {"x": 568, "y": 150}, "point2": {"x": 642, "y": 235}}},
# 'citylab':
# {"point1": {"x": 34, "y": 740}, "point2": {"x": 1433,
# "y": 103}}
"citylab": {
"platzderluftbruecke": {"point1": {"x": 541, "y": 445}, "point2": {"x": 960, "y": 179}}}
}
# tx2: same line for both directions going across two lanes
CLASSES = ["car", "truck", "bicycle", "bus", "motorbike"]
# CLASSES = ["car", "truck", "person", "bus"] # changed for second ecdf-recording
# COUNTER_LINE_NAMES = {
# "ecdf": {"a4ad8491-c790-4078-9092-94ac1e3e0b46": "ecdf-lindner", "882e3178-408a-4e3e-884f-d8d2290b47f0": "cross"}}
COUNTER_LINE_NAMES = {"ecdf": {
"c9f71c06-6baf-47c3-9ca2-4c26676b7336": "bundesstrasse",
"6c393a8f-a84f-4e31-8670-bfeb9e1cfadc": "lindner",
"240885bb-636e-41f2-8448-bfcdbabd42b5": "walking_bundesstrasse",
"25b11f4a-0d23-4878-9050-5b5a06834adc": "walking_lindner"
},
"citylab": {"a7317e7a-85da-4f08-8efc-4e90a2a2b2b8": "platzderluftbruecke"}
}
| [
"os.path.abspath",
"os.path.join"
] | [((63, 80), 'os.path.abspath', 'abspath', (['__file__'], {}), '(__file__)\n', (70, 80), False, 'from os.path import dirname, abspath, join\n'), ((109, 129), 'os.path.join', 'join', (['DIR_PATH', '""".."""'], {}), "(DIR_PATH, '..')\n", (113, 129), False, 'from os.path import dirname, abspath, join\n')] |
import os
import textwrap
import argparse
import pandas as pd
from pathlib import Path
from utils.action import Action, Actions
from utils.markdown import (
update_markdown_document,
SUMMARY_ID,
MarkdownData,
MarkdownDocument,
)
from utils.files import FileClient
README = Path(
os.path.realpath(os.path.join(os.path.abspath(__file__), os.pardir, "README.md"))
)
CSV = Path(
os.path.realpath(os.path.join(os.path.abspath(__file__), os.pardir, "actions.csv"))
)
def _get_parser():
parser = argparse.ArgumentParser(
description=textwrap.dedent(
"""
This script is used to:
- clean up files under /actions
- export the actions to a csv
- export the actions to the readme
"""
),
epilog=textwrap.dedent(
"""
# Update files in action folder
$ python update.py --files-cleanup
# Update actions.csv based on files
$ python update.py --files-to-csv
# Update README.md based on files
$ python update.py --files-to-readme
"""
),
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument(
"--files-to-csv",
action="store_true",
help="Update data.csv based on the action folder."
)
parser.add_argument(
"--files-to-readme",
action="store_true",
help="Update the table in the README.md based on the action folder."
)
parser.add_argument(
"--files-cleanup",
action="store_true",
help="Update the action folder by cleaning it up and sorting it."
)
parser.add_argument(
"--csv-to-files",
action="store_true",
help="Update the action folder from the actions.csv."
)
args = parser.parse_args()
return args
def update_files_from_csv():
print(f"Updating files in the /actions folder from actions.csv...")
df = pd.read_csv(CSV)
actions = Actions.read_from_df(df)
actions.to_files()
def update_files():
print(f"Updating files in the /actions folder...")
fc = FileClient()
files = fc.get_all_files()
actions = Actions.read_from_files(files)
actions.to_files()
def update_csv_from_files():
print(f"Updating actions.csv from files in the /actions folder...")
fc = FileClient()
files = fc.get_all_files()
actions = Actions.read_from_files(files)
df = actions.to_df()
df.to_csv(CSV)
def update_readme_from_files():
print(f"Updating README.md from files in the /actions folder...")
fc = FileClient()
files = fc.get_all_files()
actions = Actions.read_from_files(files)
actions.sort()
readme = Path(README)
md_document = readme.read_text()
md_document = update_markdown_document(md_document, Actions.action_id, actions)
readme.write_text(md_document)
if __name__ == "__main__":
args = _get_parser()
if args.files_cleanup:
update_files()
if args.files_to_csv:
update_csv_from_files()
if args.files_to_readme:
update_readme_from_files()
if args.csv_to_files:
update_files_from_csv()
| [
"textwrap.dedent",
"os.path.abspath",
"pandas.read_csv",
"pathlib.Path",
"utils.action.Actions.read_from_files",
"utils.markdown.update_markdown_document",
"utils.files.FileClient",
"utils.action.Actions.read_from_df"
] | [((1946, 1962), 'pandas.read_csv', 'pd.read_csv', (['CSV'], {}), '(CSV)\n', (1957, 1962), True, 'import pandas as pd\n'), ((1977, 2001), 'utils.action.Actions.read_from_df', 'Actions.read_from_df', (['df'], {}), '(df)\n', (1997, 2001), False, 'from utils.action import Action, Actions\n'), ((2111, 2123), 'utils.files.FileClient', 'FileClient', ([], {}), '()\n', (2121, 2123), False, 'from utils.files import FileClient\n'), ((2169, 2199), 'utils.action.Actions.read_from_files', 'Actions.read_from_files', (['files'], {}), '(files)\n', (2192, 2199), False, 'from utils.action import Action, Actions\n'), ((2335, 2347), 'utils.files.FileClient', 'FileClient', ([], {}), '()\n', (2345, 2347), False, 'from utils.files import FileClient\n'), ((2393, 2423), 'utils.action.Actions.read_from_files', 'Actions.read_from_files', (['files'], {}), '(files)\n', (2416, 2423), False, 'from utils.action import Action, Actions\n'), ((2581, 2593), 'utils.files.FileClient', 'FileClient', ([], {}), '()\n', (2591, 2593), False, 'from utils.files import FileClient\n'), ((2639, 2669), 'utils.action.Actions.read_from_files', 'Actions.read_from_files', (['files'], {}), '(files)\n', (2662, 2669), False, 'from utils.action import Action, Actions\n'), ((2702, 2714), 'pathlib.Path', 'Path', (['README'], {}), '(README)\n', (2706, 2714), False, 'from pathlib import Path\n'), ((2770, 2835), 'utils.markdown.update_markdown_document', 'update_markdown_document', (['md_document', 'Actions.action_id', 'actions'], {}), '(md_document, Actions.action_id, actions)\n', (2794, 2835), False, 'from utils.markdown import update_markdown_document, SUMMARY_ID, MarkdownData, MarkdownDocument\n'), ((330, 355), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (345, 355), False, 'import os\n'), ((430, 455), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (445, 455), False, 'import os\n'), ((566, 755), 'textwrap.dedent', 'textwrap.dedent', (['"""\n This script is used to:\n - clean up files under /actions\n - export the actions to a csv\n - export the actions to the readme\n\t\t"""'], {}), '(\n """\n This script is used to:\n - clean up files under /actions\n - export the actions to a csv\n - export the actions to the readme\n\t\t"""\n )\n', (581, 755), False, 'import textwrap\n'), ((784, 1084), 'textwrap.dedent', 'textwrap.dedent', (['"""\n # Update files in action folder\n $ python update.py --files-cleanup\n\n # Update actions.csv based on files\n $ python update.py --files-to-csv\n\n # Update README.md based on files\n $ python update.py --files-to-readme\n """'], {}), '(\n """\n # Update files in action folder\n $ python update.py --files-cleanup\n\n # Update actions.csv based on files\n $ python update.py --files-to-csv\n\n # Update README.md based on files\n $ python update.py --files-to-readme\n """\n )\n', (799, 1084), False, 'import textwrap\n')] |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib.widgets import Slider
import cv2 as cv
FILE_NAME = 'res/mountain-and-lake.jpg'
# https://matplotlib.org/3.3.1/gallery/widgets/slider_demo.html
# https://sodocumentation.net/matplotlib/topic/6983/animations-and-interactive-plotting
# img:
# image in rbg
#
# satadj:
# 1.0 means no change. Under it converts to greyscale
# and about 1.5 is immensely high
def saturate(img, satadj):
imghsv = cv.cvtColor(img, cv.COLOR_RGB2HSV).astype("float32")
(h, s, v) = cv.split(imghsv)
s = s*satadj
s = np.clip(s,0,255)
imghsv = cv.merge([h,s,v])
imgrgb = cv.cvtColor(imghsv.astype("uint8"), cv.COLOR_HSV2RGB)
# assume: return rgb
return imgrgb
def brightness(img, exp_adj):
imghsv = cv.cvtColor(img, cv.COLOR_RGB2HSV).astype("float32")
(h, s, v) = cv.split(imghsv)
v = v*exp_adj
v = np.clip(v,0,255)
imghsv = cv.merge([h,s,v])
imgrgb = cv.cvtColor(imghsv.astype("uint8"), cv.COLOR_HSV2RGB)
# assume: return rgb
return imgrgb
def plt_hist(ax, img, color):
colors = ['b', 'g', 'r']
k = colors.index(color)
histogram = cv.calcHist([img],[k],None,[256],[0,256])
plt_handle, = ax.plot(histogram, color=color)
return plt_handle
def main():
fig, ax = plt.subplots(1, 2,figsize=(27.0,27.0))
ax1 = ax[0] # The histogram
ax2 = ax[1] # The image
ax2.set_xlim(0.0,1280.0)
fig.suptitle('Image toner', fontsize=16)
# Calculate the initial value for the image
img = cv.imread(cv.samples.findFile(FILE_NAME)) # assume: BGR
img = cv.cvtColor(img, cv.COLOR_BGR2RGB) # plt assumes RGB
# Draw the image
# Take the handle for later
imobj = ax2.imshow(img)
# Axes for the saturation and brightness
ax_sat = plt.axes([0.25, .03, 0.50, 0.02])
ax_exp = plt.axes([0.25, 0.01, 0.50, 0.02])
# Slider
sat_slider = Slider(ax_sat, 'Saturation', 0, 20, valinit=1)
exp_slider = Slider(ax_exp, 'Brightness', -10, 10, valinit=1)
# Histogram
colors = ('r', 'g', 'b')
lines = []
for k,color in enumerate(colors):
histogram = cv.calcHist([img],[k],None,[256],[0,256])
line, = ax1.plot(histogram,color=color)
lines.append(line)
def update_sat(val):
newimg = img
# update image
newimg = saturate(newimg, val)
newimg = brightness(newimg, exp_slider.val)
imobj.set_data(newimg)
# update also the histogram
colors = ('r', 'g', 'b')
for k,color in enumerate(colors):
histogram = cv.calcHist([newimg],[k],None,[256],[0,256])
lines[k].set_ydata(histogram)
# redraw canvas while idle
fig.canvas.draw_idle()
def update_exp(val):
newimg = img
newimg = saturate(newimg, sat_slider.val)
newimg = brightness(newimg, val)
imobj.set_data(newimg)
# update also the histogram
colors = ('b', 'g', 'r')
for k,color in enumerate(colors):
histogram = cv.calcHist([newimg],[k],None,[256],[0,256])
lines[k].set_ydata(histogram)
# redraw canvas while idle
fig.canvas.draw_idle()
# call update function on slider value change
sat_slider.on_changed(update_sat)
exp_slider.on_changed(update_exp)
plt.show()
main()
| [
"matplotlib.pyplot.show",
"cv2.cvtColor",
"cv2.calcHist",
"matplotlib.pyplot.axes",
"matplotlib.widgets.Slider",
"numpy.clip",
"cv2.split",
"cv2.samples.findFile",
"cv2.merge",
"matplotlib.pyplot.subplots"
] | [((569, 585), 'cv2.split', 'cv.split', (['imghsv'], {}), '(imghsv)\n', (577, 585), True, 'import cv2 as cv\n'), ((607, 625), 'numpy.clip', 'np.clip', (['s', '(0)', '(255)'], {}), '(s, 0, 255)\n', (614, 625), True, 'import numpy as np\n'), ((634, 653), 'cv2.merge', 'cv.merge', (['[h, s, v]'], {}), '([h, s, v])\n', (642, 653), True, 'import cv2 as cv\n'), ((863, 879), 'cv2.split', 'cv.split', (['imghsv'], {}), '(imghsv)\n', (871, 879), True, 'import cv2 as cv\n'), ((902, 920), 'numpy.clip', 'np.clip', (['v', '(0)', '(255)'], {}), '(v, 0, 255)\n', (909, 920), True, 'import numpy as np\n'), ((929, 948), 'cv2.merge', 'cv.merge', (['[h, s, v]'], {}), '([h, s, v])\n', (937, 948), True, 'import cv2 as cv\n'), ((1145, 1191), 'cv2.calcHist', 'cv.calcHist', (['[img]', '[k]', 'None', '[256]', '[0, 256]'], {}), '([img], [k], None, [256], [0, 256])\n', (1156, 1191), True, 'import cv2 as cv\n'), ((1278, 1318), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(27.0, 27.0)'}), '(1, 2, figsize=(27.0, 27.0))\n', (1290, 1318), True, 'import matplotlib.pyplot as plt\n'), ((1560, 1594), 'cv2.cvtColor', 'cv.cvtColor', (['img', 'cv.COLOR_BGR2RGB'], {}), '(img, cv.COLOR_BGR2RGB)\n', (1571, 1594), True, 'import cv2 as cv\n'), ((1739, 1772), 'matplotlib.pyplot.axes', 'plt.axes', (['[0.25, 0.03, 0.5, 0.02]'], {}), '([0.25, 0.03, 0.5, 0.02])\n', (1747, 1772), True, 'import matplotlib.pyplot as plt\n'), ((1783, 1816), 'matplotlib.pyplot.axes', 'plt.axes', (['[0.25, 0.01, 0.5, 0.02]'], {}), '([0.25, 0.01, 0.5, 0.02])\n', (1791, 1816), True, 'import matplotlib.pyplot as plt\n'), ((1843, 1889), 'matplotlib.widgets.Slider', 'Slider', (['ax_sat', '"""Saturation"""', '(0)', '(20)'], {'valinit': '(1)'}), "(ax_sat, 'Saturation', 0, 20, valinit=1)\n", (1849, 1889), False, 'from matplotlib.widgets import Slider\n'), ((1904, 1952), 'matplotlib.widgets.Slider', 'Slider', (['ax_exp', '"""Brightness"""', '(-10)', '(10)'], {'valinit': '(1)'}), "(ax_exp, 'Brightness', -10, 10, valinit=1)\n", (1910, 1952), False, 'from matplotlib.widgets import Slider\n'), ((3071, 3081), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3079, 3081), True, 'import matplotlib.pyplot as plt\n'), ((1507, 1537), 'cv2.samples.findFile', 'cv.samples.findFile', (['FILE_NAME'], {}), '(FILE_NAME)\n', (1526, 1537), True, 'import cv2 as cv\n'), ((2054, 2100), 'cv2.calcHist', 'cv.calcHist', (['[img]', '[k]', 'None', '[256]', '[0, 256]'], {}), '([img], [k], None, [256], [0, 256])\n', (2065, 2100), True, 'import cv2 as cv\n'), ((503, 537), 'cv2.cvtColor', 'cv.cvtColor', (['img', 'cv.COLOR_RGB2HSV'], {}), '(img, cv.COLOR_RGB2HSV)\n', (514, 537), True, 'import cv2 as cv\n'), ((797, 831), 'cv2.cvtColor', 'cv.cvtColor', (['img', 'cv.COLOR_RGB2HSV'], {}), '(img, cv.COLOR_RGB2HSV)\n', (808, 831), True, 'import cv2 as cv\n'), ((2430, 2479), 'cv2.calcHist', 'cv.calcHist', (['[newimg]', '[k]', 'None', '[256]', '[0, 256]'], {}), '([newimg], [k], None, [256], [0, 256])\n', (2441, 2479), True, 'import cv2 as cv\n'), ((2817, 2866), 'cv2.calcHist', 'cv.calcHist', (['[newimg]', '[k]', 'None', '[256]', '[0, 256]'], {}), '([newimg], [k], None, [256], [0, 256])\n', (2828, 2866), True, 'import cv2 as cv\n')] |
# Generated by Django 2.0.5 on 2019-03-10 19:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('SV', '0005_auto_20190305_0116'),
]
operations = [
migrations.RemoveField(
model_name='cut',
name='user',
),
migrations.AlterField(
model_name='cut',
name='serial',
field=models.IntegerField(default=1),
),
migrations.AlterField(
model_name='ticketproducts',
name='alias',
field=models.CharField(blank=True, max_length=250, null=True),
),
migrations.AlterField(
model_name='ticketproducts',
name='ieps',
field=models.FloatField(default=0),
),
migrations.AlterField(
model_name='ticketproducts',
name='iva',
field=models.FloatField(default=0),
),
migrations.AlterField(
model_name='ticketproducts',
name='price',
field=models.FloatField(default=0),
),
migrations.AlterField(
model_name='ticketproducts',
name='productName',
field=models.CharField(blank=True, max_length=250, null=True),
),
migrations.AlterField(
model_name='ticketproducts',
name='quantity',
field=models.FloatField(default=0),
),
migrations.AlterField(
model_name='ticketproducts',
name='total',
field=models.FloatField(default=0),
),
]
| [
"django.db.migrations.RemoveField",
"django.db.models.IntegerField",
"django.db.models.CharField",
"django.db.models.FloatField"
] | [((230, 283), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""cut"""', 'name': '"""user"""'}), "(model_name='cut', name='user')\n", (252, 283), False, 'from django.db import migrations, models\n'), ((426, 456), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(1)'}), '(default=1)\n', (445, 456), False, 'from django.db import migrations, models\n'), ((585, 640), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(250)', 'null': '(True)'}), '(blank=True, max_length=250, null=True)\n', (601, 640), False, 'from django.db import migrations, models\n'), ((768, 796), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': '(0)'}), '(default=0)\n', (785, 796), False, 'from django.db import migrations, models\n'), ((923, 951), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': '(0)'}), '(default=0)\n', (940, 951), False, 'from django.db import migrations, models\n'), ((1080, 1108), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': '(0)'}), '(default=0)\n', (1097, 1108), False, 'from django.db import migrations, models\n'), ((1243, 1298), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(250)', 'null': '(True)'}), '(blank=True, max_length=250, null=True)\n', (1259, 1298), False, 'from django.db import migrations, models\n'), ((1430, 1458), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': '(0)'}), '(default=0)\n', (1447, 1458), False, 'from django.db import migrations, models\n'), ((1587, 1615), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': '(0)'}), '(default=0)\n', (1604, 1615), False, 'from django.db import migrations, models\n')] |
from twisted.web.resource import Resource
from genshi.template import TemplateLoader
import os
import cStringIO, gzip
class TemplateResource(Resource):
isLeaf = True
def __init__(self, path = None):
self.path = path
loader = TemplateLoader( search_path=[ os.path.join(os.path.dirname(__file__), '../web_templates') ] , auto_reload=True )
def render_GET(self, request):
if self.path is not None:
content = self._render_template( self.path.replace("docs/", "") + ".genshi" )
else:
content = self._render_template( request.path.replace("docs/", "").strip("/") + ".genshi" )
content = content.replace("\t", "")
encoding = request.getHeader("accept-encoding")
if encoding and "gzip" in encoding:
zbuf = cStringIO.StringIO()
zfile = gzip.GzipFile(None, 'wb', 9, zbuf)
if isinstance( content, unicode ):
zfile.write( unicode(content).encode("utf-8") )
elif isinstance( content, str ):
zfile.write( unicode(content, 'utf-8' ).encode("utf-8") )
else:
zfile.write( unicode(content).encode("utf-8") )
zfile.close()
request.setHeader("Content-encoding","gzip")
return zbuf.getvalue()
else:
return content
def _render_template(self, template, data=None):
if data is None:
data = {}
t = self.loader.load( template )
return t.generate( data=data ).render('xhtml', doctype='xhtml')
| [
"cStringIO.StringIO",
"os.path.dirname",
"gzip.GzipFile"
] | [((814, 834), 'cStringIO.StringIO', 'cStringIO.StringIO', ([], {}), '()\n', (832, 834), False, 'import cStringIO, gzip\n'), ((855, 889), 'gzip.GzipFile', 'gzip.GzipFile', (['None', '"""wb"""', '(9)', 'zbuf'], {}), "(None, 'wb', 9, zbuf)\n", (868, 889), False, 'import cStringIO, gzip\n'), ((292, 317), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (307, 317), False, 'import os\n')] |
import numpy as np
from platformx.plat_tensorflow.tools.processor.np_utils import shape_utils, \
anchor_generator_builder, box_list_ops, box_list, box_coder_builder, post_processing_builder, \
visualization_utils as vis_util
from platformx.plat_tensorflow.tools.processor.np_utils import standard_fields as fields
from platformx.plat_tensorflow.tools.processor import model_config
import config
from PIL import Image
import matplotlib
matplotlib.use('Agg')
from platformx.plat_tensorflow.tools.processor.np_utils import label_map_util
from scipy import misc
import os
BOX_ENCODINGS = 'box_encodings'
CLASS_PREDICTIONS_WITH_BACKGROUND = 'class_predictions_with_background'
BASE_BoxEncodingPredictor = "_BoxEncodingPredictor"
BASE_ClassPredictor = "_ClassPredictor"
PPN_BoxPredictor_0 = "WeightSharedConvolutionalBoxPredictor_BoxPredictor"
PPN_ClassPredictor_0 = "WeightSharedConvolutionalBoxPredictor_ClassPredictor"
BASE_PPN_BoxPredictor = "_BoxPredictor"
BASE_PPN_ClassPredictor = "WeightSharedConvolutionalBoxPredictor"
PATH_TO_LABELS = config.cfg.POSTPROCESSOR.PATH_TO_LABELS
def run_ssd_tf_post(preprocessed_inputs, result_middle=None):
boxes_encodings_np = []
classes_predictions_with_background_np = []
feature_maps_np = []
for i in range(6):
for key, value in result_middle.items():
if str(i) + BASE_BoxEncodingPredictor in key:
print(str(i) + BASE_BoxEncodingPredictor + ": ", value.shape)
boxes_encodings_np.append(value)
break
if i == 0:
if PPN_BoxPredictor_0 in key:
print("PPN_BoxPredictor_0:", value.shape)
boxes_encodings_np.append(value)
break
else:
if str(i) + BASE_PPN_BoxPredictor in key:
print(str(i) + BASE_PPN_BoxPredictor, value.shape)
boxes_encodings_np.append(value)
break
for key, value in result_middle.items():
if str(i) + BASE_ClassPredictor in key and BASE_PPN_ClassPredictor not in key:
print(str(i) + BASE_ClassPredictor+ ": ", value.shape)
classes_predictions_with_background_np.append(value)
break
if i == 0:
if PPN_ClassPredictor_0 in key:
print(PPN_ClassPredictor_0 + ":", value.shape)
classes_predictions_with_background_np.append(value)
break
else:
if str(i) + BASE_ClassPredictor in key and BASE_PPN_ClassPredictor in key:
print(str(i) + BASE_ClassPredictor + ":", value.shape)
classes_predictions_with_background_np.append(value)
break
for key, value in result_middle.items():
if "FeatureExtractor" in key and "fpn" not in key:
print("key {} value {}".format(key, value.shape))
feature_maps_np.append(value)
if len(feature_maps_np) < 1:
key_dict = {}
for key, value in result_middle.items():
if "FeatureExtractor" in key and "fpn"in key:
key_dict[key] = value.shape[1]
sorted_key_dict = sorted(key_dict.items(), key=lambda x: x[1], reverse=True)
for key, value in sorted_key_dict:
feature_maps_np.append(result_middle[key])
input_shape = preprocessed_inputs.shape
true_image_shapes = np.array([input_shape[1], input_shape[2], input_shape[3]], dtype=np.int32)
true_image_shapes = true_image_shapes.reshape((1, 3))
post_result = post_deal(boxes_encodings_np, classes_predictions_with_background_np, feature_maps_np,
preprocessed_inputs,
true_image_shapes)
show_detection_result(post_result)
return post_result
def show_detection_result(result):
print("PATH_TO_LABELS:", PATH_TO_LABELS)
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
# NUM_CLASSES
NUM_CLASSES = config.cfg.POSTPROCESSOR.NUM_CLASSES
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES,
use_display_name=True)
category_index = label_map_util.create_category_index(categories)
result['detection_classes'] = result[
'detection_classes'][0].astype(np.uint8)
result['detection_boxes'] = result['detection_boxes'][0]
result['detection_scores'] = result['detection_scores'][0]
img_dir = config.cfg.PREPROCESS.IMG_LIST
file_list = os.listdir(img_dir)
IMG_PATH = os.path.join(img_dir, file_list[0])
print("IMG_PATH:", IMG_PATH)
image = Image.open(IMG_PATH)
image_np = load_image_into_numpy_array(image)
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
result['detection_boxes'],
result['detection_classes'],
result['detection_scores'],
category_index,
instance_masks=result.get('detection_masks'),
use_normalized_coordinates=True,
line_thickness=8)
# IMAGE_SIZE = (12, 8)
# plt.figure(figsize=IMAGE_SIZE)
misc.imsave('detection_result_ssd.png', image_np)
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
def post_deal(boxes_encodings, classes_predictions_with_background, feature_maps, preprocessed_inputs=None,
true_image_shapes=None):
"""
SSD model POST processer
:param boxes_encodings:
:param classes_predictions_with_background:
:param feature_maps:
:param preprocessed_inputs:
:param true_image_shapes:
:return:
"""
prediction_dict, anchors = last_predict_part(boxes_encodings, classes_predictions_with_background, feature_maps,
preprocessed_inputs)
postprocessed_tensors = postprocess(anchors, prediction_dict, true_image_shapes)
return _add_output_tensor_nodes(postprocessed_tensors)
def _add_output_tensor_nodes(postprocessed_tensors):
print("------------------ _add_output_tensor_nodes ------------------")
detection_fields = fields.DetectionResultFields
label_id_offset = 1
boxes = postprocessed_tensors.get(detection_fields.detection_boxes)
scores = postprocessed_tensors.get(detection_fields.detection_scores)
classes = postprocessed_tensors.get(
detection_fields.detection_classes) + label_id_offset
keypoints = postprocessed_tensors.get(detection_fields.detection_keypoints)
masks = postprocessed_tensors.get(detection_fields.detection_masks)
num_detections = postprocessed_tensors.get(detection_fields.num_detections)
if isinstance(num_detections, list):
num_detections = num_detections[0]
elif isinstance(num_detections, float):
num_detections = int(num_detections)
elif isinstance(num_detections, np.ndarray):
num_detections = int(num_detections[0])
print("=============== num_detections :", num_detections)
outputs = {}
print("scores:", scores)
scores = scores.flatten()
# todo 读取配置文件 置 0 置 1 操作原始代码
if scores.shape[0] < 100:
raw_shape = 100
else:
raw_shape = scores.shape[0]
scores_1 = scores[0:num_detections]
print("scores_1:", scores_1)
scores_2 = np.zeros(shape=raw_shape - num_detections)
scores = np.hstack((scores_1, scores_2))
scores = np.reshape(scores, (1, scores.shape[0]))
outputs[detection_fields.detection_scores] = scores
classes = classes.flatten()
classes_1 = classes[0:num_detections]
print("classes_1:", classes_1)
classes_2 = np.ones(shape=raw_shape - num_detections)
classes = np.hstack((classes_1, classes_2))
classes = np.reshape(classes, (1, classes.shape[0]))
outputs[detection_fields.detection_classes] = classes
boxes_1 = boxes[:, 0:num_detections]
print("boxes_1:", boxes_1)
boxes_2 = np.zeros(shape=(1, raw_shape - num_detections, 4))
boxes = np.hstack((boxes_1, boxes_2))
outputs[detection_fields.detection_boxes] = boxes
outputs[detection_fields.num_detections] = num_detections
if keypoints is not None:
outputs[detection_fields.detection_keypoints] = keypoints
if masks is not None:
outputs[detection_fields.detection_masks] = masks
return outputs
def last_predict_part(boxes_encodings, classes_predictions_with_background, feature_maps, preprocessed_inputs=None):
print("------------------ last_predict_part ------------------")
"""Predicts unpostprocessed tensors from input tensor.
This function takes an input batch of images and runs it through the forward
pass of the network to yield unpostprocessesed predictions.
A side effect of calling the predict method is that self._anchors is
populated with a box_list.BoxList of anchors. These anchors must be
constructed before the postprocess or loss functions can be called.
Args:
boxes_encodings:
classes_predictions_with_background:
feature_maps:
preprocessed_inputs: a [batch, height, width, channels] image tensor.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
"""
anchor_generator = anchor_generator_builder.build()
num_predictions_per_location_list = anchor_generator.num_anchors_per_location()
# print("num_predictions_per_location_list:", num_predictions_per_location_list)
prediction_dict = post_processor(boxes_encodings, classes_predictions_with_background,
feature_maps, num_predictions_per_location_list)
image_shape = shape_utils.combined_static_and_dynamic_shape(
preprocessed_inputs)
feature_map_spatial_dims = get_feature_map_spatial_dims(
feature_maps)
anchors_list = anchor_generator.generate(
feature_map_spatial_dims,
im_height=image_shape[1],
im_width=image_shape[2])
anchors = box_list_ops.concatenate(anchors_list)
box_encodings = np.concatenate(prediction_dict['box_encodings'], axis=1)
if box_encodings.ndim == 4 and box_encodings.shape[2] == 1:
box_encodings = np.squeeze(box_encodings, axis=2)
class_predictions_with_background = np.concatenate(
prediction_dict['class_predictions_with_background'], axis=1)
predictions_dict = {
'preprocessed_inputs': preprocessed_inputs,
'box_encodings': box_encodings,
'class_predictions_with_background':
class_predictions_with_background,
'feature_maps': feature_maps,
'anchors': anchors.get()
}
return predictions_dict, anchors
def get_feature_map_spatial_dims(feature_maps):
"""Return list of spatial dimensions for each feature map in a list.
Args:
feature_maps: a list of tensors where the ith tensor has shape
[batch, height_i, width_i, depth_i].
Returns:
a list of pairs (height, width) for each feature map in feature_maps
"""
feature_map_shapes = [
shape_utils.combined_static_and_dynamic_shape(
feature_map) for feature_map in feature_maps
]
return [(shape[1], shape[2]) for shape in feature_map_shapes]
def post_processor(boxes_encodings, classes_predictions_with_background, image_features,
num_predictions_per_location_list):
print("------------------ post_processor ------------------")
"""Computes encoded object locations and corresponding confidences.
Args:
image_features: A list of float tensors of shape [batch_size, height_i,
width_i, channels_i] containing features for a batch of images.
num_predictions_per_location_list: A list of integers representing the
number of box predictions to be made per spatial location for each
feature map.
Returns:
box_encodings: A list of float tensors of shape
[batch_size, num_anchors_i, q, code_size] representing the location of
the objects, where q is 1 or the number of classes. Each entry in the
list corresponds to a feature map in the input `image_features` list.
class_predictions_with_background: A list of float tensors of shape
[batch_size, num_anchors_i, num_classes + 1] representing the class
predictions for the proposals. Each entry in the list corresponds to a
feature map in the input `image_features` list.
"""
box_encodings_list = []
class_predictions_list = []
for (image_feature,
num_predictions_per_location,
box_encodings,
class_predictions_with_background) in zip(image_features,
num_predictions_per_location_list,
boxes_encodings,
classes_predictions_with_background):
combined_feature_map_shape = image_feature.shape
box_code_size = config.cfg.POSTPROCESSOR.BOX_CODE_SIZE
new_shape = np.stack([combined_feature_map_shape[0],
combined_feature_map_shape[1] *
combined_feature_map_shape[2] *
num_predictions_per_location,
1, box_code_size])
box_encodings = np.reshape(box_encodings, new_shape)
box_encodings_list.append(box_encodings)
num_classes = config.cfg.POSTPROCESSOR.NUM_CLASSES
num_class_slots = num_classes + 1
class_predictions_with_background = np.reshape(
class_predictions_with_background,
np.stack([combined_feature_map_shape[0],
combined_feature_map_shape[1] *
combined_feature_map_shape[2] *
num_predictions_per_location,
num_class_slots]))
class_predictions_list.append(class_predictions_with_background)
return {BOX_ENCODINGS: box_encodings_list,
CLASS_PREDICTIONS_WITH_BACKGROUND: class_predictions_list}
def postprocess(anchors, prediction_dict, true_image_shapes):
print("------------------ postprocess ------------------")
if ('box_encodings' not in prediction_dict or
'class_predictions_with_background' not in prediction_dict):
raise ValueError('prediction_dict does not contain expected entries.')
preprocessed_images = prediction_dict['preprocessed_inputs']
box_encodings = prediction_dict['box_encodings']
box_encodings = box_encodings
class_predictions = prediction_dict['class_predictions_with_background']
detection_boxes, detection_keypoints = _batch_decode(anchors, box_encodings)
detection_boxes = detection_boxes
detection_boxes = np.expand_dims(detection_boxes, axis=2)
non_max_suppression_fn, score_conversion_fn = post_processing_builder.build(model_config.SSD)
detection_scores_with_background = score_conversion_fn(class_predictions)
detection_scores = detection_scores_with_background[0:, 0:, 1:]
additional_fields = None
if detection_keypoints is not None:
additional_fields = {
fields.BoxListFields.keypoints: detection_keypoints}
(nmsed_boxes, nmsed_scores, nmsed_classes, _, nmsed_additional_fields,
num_detections) = non_max_suppression_fn(
detection_boxes,
detection_scores,
clip_window=_compute_clip_window(
preprocessed_images, true_image_shapes),
additional_fields=additional_fields)
detection_dict = {
fields.DetectionResultFields.detection_boxes: nmsed_boxes,
fields.DetectionResultFields.detection_scores: nmsed_scores,
fields.DetectionResultFields.detection_classes: nmsed_classes,
fields.DetectionResultFields.num_detections:
float(num_detections)
}
if (nmsed_additional_fields is not None and
fields.BoxListFields.keypoints in nmsed_additional_fields):
detection_dict[fields.DetectionResultFields.detection_keypoints] = (
nmsed_additional_fields[fields.BoxListFields.keypoints])
return detection_dict
def _compute_clip_window(preprocessed_images, true_image_shapes):
resized_inputs_shape = shape_utils.combined_static_and_dynamic_shape(
preprocessed_images)
true_heights, true_widths, _ = np.split(true_image_shapes, 3, axis=1)
padded_height = float(resized_inputs_shape[1])
padded_width = float(resized_inputs_shape[2])
cliped_image = np.stack(
[np.zeros_like(true_heights), np.zeros_like(true_widths),
true_heights / padded_height, true_widths / padded_width], axis=1)
cliped_imaged = cliped_image.reshape(1, -1)
return cliped_imaged
def _batch_decode(anchors, box_encodings):
"""Decodes a batch of box encodings with respect to the anchors.
Args:
box_encodings: A float32 tensor of shape
[batch_size, num_anchors, box_code_size] containing box encodings.
Returns:
decoded_boxes: A float32 tensor of shape
[batch_size, num_anchors, 4] containing the decoded boxes.
decoded_keypoints: A float32 tensor of shape
[batch_size, num_anchors, num_keypoints, 2] containing the decoded
keypoints if present in the input `box_encodings`, None otherwise.
"""
combined_shape = shape_utils.combined_static_and_dynamic_shape(
box_encodings)
batch_size = combined_shape[0]
tiled_anchor_boxes = np.tile(
np.expand_dims(anchors.get(), 0), [batch_size, 1, 1])
tiled_anchors_boxlist = box_list.BoxList(
np.reshape(tiled_anchor_boxes, [-1, 4]))
box_coder = box_coder_builder.build("faster_rcnn_box_coder")
decoded_boxes = box_coder.decode(
np.reshape(box_encodings, [-1, box_coder.code_size]),
tiled_anchors_boxlist)
decoded_keypoints = None
if decoded_boxes.has_field(fields.BoxListFields.keypoints):
decoded_keypoints = decoded_boxes.get_field(
fields.BoxListFields.keypoints)
num_keypoints = decoded_keypoints.get_shape()[1]
decoded_keypoints = np.reshape(
decoded_keypoints,
np.stack([combined_shape[0], combined_shape[1], num_keypoints, 2]))
decoded_boxes = np.reshape(decoded_boxes.get(), np.stack(
[combined_shape[0], combined_shape[1], 4]))
return decoded_boxes, decoded_keypoints
| [
"numpy.ones",
"scipy.misc.imsave",
"os.path.join",
"numpy.zeros_like",
"platformx.plat_tensorflow.tools.processor.np_utils.label_map_util.create_category_index",
"platformx.plat_tensorflow.tools.processor.np_utils.label_map_util.load_labelmap",
"numpy.reshape",
"platformx.plat_tensorflow.tools.processor.np_utils.label_map_util.convert_label_map_to_categories",
"numpy.stack",
"platformx.plat_tensorflow.tools.processor.np_utils.shape_utils.combined_static_and_dynamic_shape",
"numpy.hstack",
"matplotlib.use",
"numpy.squeeze",
"platformx.plat_tensorflow.tools.processor.np_utils.box_coder_builder.build",
"os.listdir",
"numpy.concatenate",
"platformx.plat_tensorflow.tools.processor.np_utils.box_list_ops.concatenate",
"numpy.zeros",
"numpy.expand_dims",
"PIL.Image.open",
"numpy.split",
"platformx.plat_tensorflow.tools.processor.np_utils.anchor_generator_builder.build",
"numpy.array",
"platformx.plat_tensorflow.tools.processor.np_utils.post_processing_builder.build"
] | [((452, 473), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (466, 473), False, 'import matplotlib\n'), ((3544, 3618), 'numpy.array', 'np.array', (['[input_shape[1], input_shape[2], input_shape[3]]'], {'dtype': 'np.int32'}), '([input_shape[1], input_shape[2], input_shape[3]], dtype=np.int32)\n', (3552, 3618), True, 'import numpy as np\n'), ((4053, 4097), 'platformx.plat_tensorflow.tools.processor.np_utils.label_map_util.load_labelmap', 'label_map_util.load_labelmap', (['PATH_TO_LABELS'], {}), '(PATH_TO_LABELS)\n', (4081, 4097), False, 'from platformx.plat_tensorflow.tools.processor.np_utils import label_map_util\n'), ((4191, 4305), 'platformx.plat_tensorflow.tools.processor.np_utils.label_map_util.convert_label_map_to_categories', 'label_map_util.convert_label_map_to_categories', (['label_map'], {'max_num_classes': 'NUM_CLASSES', 'use_display_name': '(True)'}), '(label_map, max_num_classes=\n NUM_CLASSES, use_display_name=True)\n', (4237, 4305), False, 'from platformx.plat_tensorflow.tools.processor.np_utils import label_map_util\n'), ((4388, 4436), 'platformx.plat_tensorflow.tools.processor.np_utils.label_map_util.create_category_index', 'label_map_util.create_category_index', (['categories'], {}), '(categories)\n', (4424, 4436), False, 'from platformx.plat_tensorflow.tools.processor.np_utils import label_map_util\n'), ((4723, 4742), 'os.listdir', 'os.listdir', (['img_dir'], {}), '(img_dir)\n', (4733, 4742), False, 'import os\n'), ((4761, 4796), 'os.path.join', 'os.path.join', (['img_dir', 'file_list[0]'], {}), '(img_dir, file_list[0])\n', (4773, 4796), False, 'import os\n'), ((4844, 4864), 'PIL.Image.open', 'Image.open', (['IMG_PATH'], {}), '(IMG_PATH)\n', (4854, 4864), False, 'from PIL import Image\n'), ((5327, 5376), 'scipy.misc.imsave', 'misc.imsave', (['"""detection_result_ssd.png"""', 'image_np'], {}), "('detection_result_ssd.png', image_np)\n", (5338, 5376), False, 'from scipy import misc\n'), ((7639, 7681), 'numpy.zeros', 'np.zeros', ([], {'shape': '(raw_shape - num_detections)'}), '(shape=raw_shape - num_detections)\n', (7647, 7681), True, 'import numpy as np\n'), ((7696, 7727), 'numpy.hstack', 'np.hstack', (['(scores_1, scores_2)'], {}), '((scores_1, scores_2))\n', (7705, 7727), True, 'import numpy as np\n'), ((7742, 7782), 'numpy.reshape', 'np.reshape', (['scores', '(1, scores.shape[0])'], {}), '(scores, (1, scores.shape[0]))\n', (7752, 7782), True, 'import numpy as np\n'), ((7973, 8014), 'numpy.ones', 'np.ones', ([], {'shape': '(raw_shape - num_detections)'}), '(shape=raw_shape - num_detections)\n', (7980, 8014), True, 'import numpy as np\n'), ((8030, 8063), 'numpy.hstack', 'np.hstack', (['(classes_1, classes_2)'], {}), '((classes_1, classes_2))\n', (8039, 8063), True, 'import numpy as np\n'), ((8079, 8121), 'numpy.reshape', 'np.reshape', (['classes', '(1, classes.shape[0])'], {}), '(classes, (1, classes.shape[0]))\n', (8089, 8121), True, 'import numpy as np\n'), ((8274, 8324), 'numpy.zeros', 'np.zeros', ([], {'shape': '(1, raw_shape - num_detections, 4)'}), '(shape=(1, raw_shape - num_detections, 4))\n', (8282, 8324), True, 'import numpy as np\n'), ((8338, 8367), 'numpy.hstack', 'np.hstack', (['(boxes_1, boxes_2)'], {}), '((boxes_1, boxes_2))\n', (8347, 8367), True, 'import numpy as np\n'), ((9782, 9814), 'platformx.plat_tensorflow.tools.processor.np_utils.anchor_generator_builder.build', 'anchor_generator_builder.build', ([], {}), '()\n', (9812, 9814), False, 'from platformx.plat_tensorflow.tools.processor.np_utils import shape_utils, anchor_generator_builder, box_list_ops, box_list, box_coder_builder, post_processing_builder, visualization_utils as vis_util\n'), ((10190, 10256), 'platformx.plat_tensorflow.tools.processor.np_utils.shape_utils.combined_static_and_dynamic_shape', 'shape_utils.combined_static_and_dynamic_shape', (['preprocessed_inputs'], {}), '(preprocessed_inputs)\n', (10235, 10256), False, 'from platformx.plat_tensorflow.tools.processor.np_utils import shape_utils, anchor_generator_builder, box_list_ops, box_list, box_coder_builder, post_processing_builder, visualization_utils as vis_util\n'), ((10522, 10560), 'platformx.plat_tensorflow.tools.processor.np_utils.box_list_ops.concatenate', 'box_list_ops.concatenate', (['anchors_list'], {}), '(anchors_list)\n', (10546, 10560), False, 'from platformx.plat_tensorflow.tools.processor.np_utils import shape_utils, anchor_generator_builder, box_list_ops, box_list, box_coder_builder, post_processing_builder, visualization_utils as vis_util\n'), ((10584, 10640), 'numpy.concatenate', 'np.concatenate', (["prediction_dict['box_encodings']"], {'axis': '(1)'}), "(prediction_dict['box_encodings'], axis=1)\n", (10598, 10640), True, 'import numpy as np\n'), ((10808, 10884), 'numpy.concatenate', 'np.concatenate', (["prediction_dict['class_predictions_with_background']"], {'axis': '(1)'}), "(prediction_dict['class_predictions_with_background'], axis=1)\n", (10822, 10884), True, 'import numpy as np\n'), ((15463, 15502), 'numpy.expand_dims', 'np.expand_dims', (['detection_boxes'], {'axis': '(2)'}), '(detection_boxes, axis=2)\n', (15477, 15502), True, 'import numpy as np\n'), ((15554, 15601), 'platformx.plat_tensorflow.tools.processor.np_utils.post_processing_builder.build', 'post_processing_builder.build', (['model_config.SSD'], {}), '(model_config.SSD)\n', (15583, 15601), False, 'from platformx.plat_tensorflow.tools.processor.np_utils import shape_utils, anchor_generator_builder, box_list_ops, box_list, box_coder_builder, post_processing_builder, visualization_utils as vis_util\n'), ((16972, 17038), 'platformx.plat_tensorflow.tools.processor.np_utils.shape_utils.combined_static_and_dynamic_shape', 'shape_utils.combined_static_and_dynamic_shape', (['preprocessed_images'], {}), '(preprocessed_images)\n', (17017, 17038), False, 'from platformx.plat_tensorflow.tools.processor.np_utils import shape_utils, anchor_generator_builder, box_list_ops, box_list, box_coder_builder, post_processing_builder, visualization_utils as vis_util\n'), ((17085, 17123), 'numpy.split', 'np.split', (['true_image_shapes', '(3)'], {'axis': '(1)'}), '(true_image_shapes, 3, axis=1)\n', (17093, 17123), True, 'import numpy as np\n'), ((18102, 18162), 'platformx.plat_tensorflow.tools.processor.np_utils.shape_utils.combined_static_and_dynamic_shape', 'shape_utils.combined_static_and_dynamic_shape', (['box_encodings'], {}), '(box_encodings)\n', (18147, 18162), False, 'from platformx.plat_tensorflow.tools.processor.np_utils import shape_utils, anchor_generator_builder, box_list_ops, box_list, box_coder_builder, post_processing_builder, visualization_utils as vis_util\n'), ((18425, 18473), 'platformx.plat_tensorflow.tools.processor.np_utils.box_coder_builder.build', 'box_coder_builder.build', (['"""faster_rcnn_box_coder"""'], {}), "('faster_rcnn_box_coder')\n", (18448, 18473), False, 'from platformx.plat_tensorflow.tools.processor.np_utils import shape_utils, anchor_generator_builder, box_list_ops, box_list, box_coder_builder, post_processing_builder, visualization_utils as vis_util\n'), ((10731, 10764), 'numpy.squeeze', 'np.squeeze', (['box_encodings'], {'axis': '(2)'}), '(box_encodings, axis=2)\n', (10741, 10764), True, 'import numpy as np\n'), ((11623, 11681), 'platformx.plat_tensorflow.tools.processor.np_utils.shape_utils.combined_static_and_dynamic_shape', 'shape_utils.combined_static_and_dynamic_shape', (['feature_map'], {}), '(feature_map)\n', (11668, 11681), False, 'from platformx.plat_tensorflow.tools.processor.np_utils import shape_utils, anchor_generator_builder, box_list_ops, box_list, box_coder_builder, post_processing_builder, visualization_utils as vis_util\n'), ((13661, 13822), 'numpy.stack', 'np.stack', (['[combined_feature_map_shape[0], combined_feature_map_shape[1] *\n combined_feature_map_shape[2] * num_predictions_per_location, 1,\n box_code_size]'], {}), '([combined_feature_map_shape[0], combined_feature_map_shape[1] *\n combined_feature_map_shape[2] * num_predictions_per_location, 1,\n box_code_size])\n', (13669, 13822), True, 'import numpy as np\n'), ((13994, 14030), 'numpy.reshape', 'np.reshape', (['box_encodings', 'new_shape'], {}), '(box_encodings, new_shape)\n', (14004, 14030), True, 'import numpy as np\n'), ((18365, 18404), 'numpy.reshape', 'np.reshape', (['tiled_anchor_boxes', '[-1, 4]'], {}), '(tiled_anchor_boxes, [-1, 4])\n', (18375, 18404), True, 'import numpy as np\n'), ((18522, 18574), 'numpy.reshape', 'np.reshape', (['box_encodings', '[-1, box_coder.code_size]'], {}), '(box_encodings, [-1, box_coder.code_size])\n', (18532, 18574), True, 'import numpy as np\n'), ((19069, 19120), 'numpy.stack', 'np.stack', (['[combined_shape[0], combined_shape[1], 4]'], {}), '([combined_shape[0], combined_shape[1], 4])\n', (19077, 19120), True, 'import numpy as np\n'), ((14304, 14464), 'numpy.stack', 'np.stack', (['[combined_feature_map_shape[0], combined_feature_map_shape[1] *\n combined_feature_map_shape[2] * num_predictions_per_location,\n num_class_slots]'], {}), '([combined_feature_map_shape[0], combined_feature_map_shape[1] *\n combined_feature_map_shape[2] * num_predictions_per_location,\n num_class_slots])\n', (14312, 14464), True, 'import numpy as np\n'), ((17269, 17296), 'numpy.zeros_like', 'np.zeros_like', (['true_heights'], {}), '(true_heights)\n', (17282, 17296), True, 'import numpy as np\n'), ((17298, 17324), 'numpy.zeros_like', 'np.zeros_like', (['true_widths'], {}), '(true_widths)\n', (17311, 17324), True, 'import numpy as np\n'), ((18948, 19014), 'numpy.stack', 'np.stack', (['[combined_shape[0], combined_shape[1], num_keypoints, 2]'], {}), '([combined_shape[0], combined_shape[1], num_keypoints, 2])\n', (18956, 19014), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.