max_stars_repo_path
stringlengths 7
81
| max_stars_repo_name
stringlengths 12
43
| max_stars_count
int64 0
6.26k
| id
stringlengths 6
8
| content
stringlengths 110
39.1k
| score
float64 0.44
1
| label
stringclasses 3
values |
---|---|---|---|---|---|---|
tests/list/list05.py | ktok07b6/polyphony | 83 | 11175103 | <reponame>ktok07b6/polyphony
from polyphony import testbench
def list05(x):
a = [1,2,3,4]*100
def f(x:list):
for i in range(0, 4):
x[i] *= 2
def g(x:list):
for i in range(0, 4):
x[i] += 1
f(a)
g(a)
return a[x]
@testbench
def test():
assert 3 == list05(0)
assert 5 == list05(1)
assert 7 == list05(2)
assert 9 == list05(3)
test()
| 0.78125 | high |
src/005_dict/dict_test_run.py | xupingmao/benchmark | 0 | 1639560 | # -*- coding:utf-8 -*-
# @author xupingmao
# @since 2022/01/26 17:01:06
# @modified 2022/01/26 17:05:37
# @filename dict_test_run.py
import pyximport
pyximport.install()
import dict_test2
dict_test2.run()
| 0.980469 | low |
src/lexer/lexer.py | La-Tribu/cool-compiler-2021 | 0 | 3301688 | import ply.lex as lex
states = (
('commentLine', 'exclusive'),
('commentText', 'exclusive'),
('string', 'exclusive'),
)
reserved = {
'class': 'CLASS',
'inherits': 'INHERITS',
'if': 'IF',
'then': 'THEN',
'else': 'ELSE',
'fi': 'FI',
'while': 'WHILE',
'loop': 'LOOP',
'pool': 'POOL',
'let': 'LET',
'in': 'IN',
'case': 'CASE',
'of': 'OF',
'esac': 'ESAC',
'new': 'NEW',
'isvoid': 'ISVOID',
'not' : 'NOT'
}
t_ignore = ' \t'
tokens = [
#Identifiers
'TYPE', 'ID',
#Primitive data types
'INTEGER', 'STRING', 'BOOL',
# Special keywords
'ACTION',
# Operators
'ASSIGN', 'LESS', 'LESSEQUAL', 'EQUAL', 'PLUS', 'MINUS', 'MULT', 'DIV', 'INT_COMPLEMENT',
#Literals
'OPAR', 'CPAR', 'OBRACE', 'CBRACE', 'COLON', 'COMMA', 'DOT', 'SEMICOLON', 'AT',
] + list(reserved.values())
# Special keywords
t_ACTION = r'=>'
# Operators
t_ASSIGN = r'<-'
t_LESS = r'<'
t_LESSEQUAL = r'<='
t_EQUAL = r'='
t_PLUS = r'\+'
t_MINUS = r'-'
t_MULT = r'\*'
t_DIV = r'/'
t_INT_COMPLEMENT = r'~'
# Literals
t_OPAR = r'\('
t_CPAR = r'\)'
t_OBRACE = r'{'
t_CBRACE = r'}'
t_COLON = r':'
t_COMMA = r','
t_DOT = r'\.'
t_SEMICOLON = r';'
t_AT = r'@'
def t_INTEGER(t):
r'\d+'
t.value = int(t.value)
return t
def t_TYPE(t):
r'[A-Z][a-zA-Z_0-9]*'
t.type = reserved.get(t.value.lower(), 'TYPE')
return t
def t_BOOL(t):
r'f[Aa][Ll][Ss][Ee]|t[Rr][Uu][Ee]'
t.value = (t.value.lower() == 'true')
return t
def t_ID(t):
r'[a-z][a-zA-Z_0-9]*'
t.type = reserved.get(t.value.lower(), 'ID')
return t
t_commentLine_ignore = ' \t'
def t_LINECOMMENT(t):
r'--'
t.lexer.begin('commentLine')
def t_TEXTCOMMENT(t):
r'\(\*'
t.lexer.comment_start = t.lexer.lexpos
t.lexer.level = 1
t.lexer.begin('commentText')
def t_STRING(t):
r'"'
t.lexer.string_start = t.lexer.lexpos
t.lexer.begin('string')
def t_commentLine_error(t):
t.lexer.skip(1)
def t_commentLine_newline(t):
r'\n+'
t.lexer.begin('INITIAL')
t.lexer.lineno += len(t.value)
t_commentText_ignore = ' \t'
def t_commentText_error(t):
t.lexer.skip(1)
def t_commentText_OPENTEXT(t):
r'\(\*'
t.lexer.level += 1
def t_commentText_CLOSETEXT(t):
r'\*\)'
t.lexer.level -= 1
if t.lexer.level == 0:
t.lexer.begin('INITIAL')
def t_commentText_newline(t):
r'\n+'
t.lexer.lineno += len(t.value)
def t_commentText_eof(t):
append_error_lexing(t.lexer.lineno, find_column1(t), "EOF in comment")
t_string_ignore = ''
def t_string_CLOSESTRING(t):
r'"'
t.value = t.lexer.lexdata[t.lexer.string_start:t.lexer.lexpos - 1]
t.type = 'STRING'
t.lexer.begin('INITIAL')
return t
def t_string_newline(t):
r'\\\n'
t.lexer.lineno += 1
def t_string_body(t):
r'([^\n\"\\]|\\.)+'
if t.value.rfind('\0') != -1:
append_error_lexing(t.lineno, find_column1(t) + t.value.rfind('\0'),
"String contains null character")
def t_string_error(t):
if t.value[0] == '\n':
append_error_lexing(t.lineno, find_column1(t), "Unterminated string constant")
t.lexer.lineno += 1
t.lexer.skip(1)
t.lexer.begin('INITIAL')
def t_string_eof(t):
append_error_lexing(t.lineno, find_column1(t), "Unterminated string constant")
def t_error(t):
append_error_lexing(t.lineno, find_column1(t), f'ERROR \"{t.value[0]}\"')
t.lexer.skip(1)
def t_newline(t):
r'\n+'
t.lexer.lineno += len(t.value)
def find_column(lexdata, lexpos):
line_start = lexdata.rfind('\n', 0, lexpos)
return (lexpos - line_start)
def find_column1(t, i = None):
lexpos = t.lexpos if (i is None) else t.lexpos(i)
line_start = t.lexer.lexdata.rfind('\n', 0, lexpos)
return (lexpos - line_start)
errors_lexing = []
def append_error_lexing(line, column, message):
errors_lexing.append(f'({line}, {column}) - LexicographicError: {message}')
lexer = lex.lex()
| 0.988281 | high |
nicos_ess/utilities/validators.py | ess-dmsc/nicos | 1 | 4963800 | <filename>nicos_ess/utilities/validators.py
# -*- coding: utf-8 -*-
# *****************************************************************************
# NICOS, the Networked Instrument Control System of the MLZ
# Copyright (c) 2009-2021 by the NICOS contributors (see AUTHORS)
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Module authors:
# <NAME> <<EMAIL>>
#
# *****************************************************************************
from nicos.guisupport.qt import QDoubleValidator, QValidator
class DoubleValidator(QDoubleValidator):
def __init__(self, bottom, top, decimal=0):
QDoubleValidator.__init__(self)
# Required to handle manual float validation
self.maximum_value = top
self.minimum_value = bottom
self.decimal_precision = decimal
self.setNotation(QDoubleValidator.StandardNotation)
self.setRange(bottom, top, decimal) # Required for standard validation
def validate(self, string, pos):
# We need to add this to have a successful float casting.
if string == "":
return QValidator.Intermediate, string, pos
if string == "0.":
return QValidator.Intermediate, string, pos
# We check if the length of the string is equal to 1 so that only `-` is
# an intermediate value.
if string.startswith("-") and len(string) == 1:
return QValidator.Intermediate, string, pos
"""
Here we make sure to use `.` in floats instead of `,` which
QValidator expects. Try-except block prevents string-types after
`.` via float casting. That is,
>> if '.' in string:
return QValidator.Intermediate, string, pos
leads acceptable values like
>> 12.sdaad12r..asf-dsfs1*,
which are of course invalid.
"""
if '.' in string:
try:
if self.maximum_value > float(string) > self.minimum_value:
return QValidator.Acceptable, string, pos
except ValueError:
return QValidator.Invalid, string, pos
finally:
if len(string.split('.')[1]) > self.decimal_precision:
return QValidator.Invalid, string, pos
# Handle ranges in the absence of `.`, separately.
try:
if float(string) > self.maximum_value\
or self.minimum_value > float(string):
return QValidator.Invalid, string, pos
except ValueError:
return QValidator.Invalid, string, pos
return QDoubleValidator.validate(self, string, pos)
| 0.796875 | high |
src/main/CONFIG_READER/read.py | alvinajacquelyn/COMP0016 | 0 | 8025992 | import configparser
def get_details(field, detail):
config = configparser.ConfigParser()
config.read("../config.ini")
return config[field][detail] | 0.644531 | medium |
vumi/transports/netcore/__init__.py | seidu626/vumi | 199 | 6486032 | <reponame>seidu626/vumi<gh_stars>100-1000
from vumi.transports.netcore.netcore import NetcoreTransport
__all__ = ['NetcoreTransport']
| 0.914063 | low |
build/model/video_frame.py | mrKallah/Torcs-Autonomous-Vehicle-using-A3C | 2 | 9749192 | import matplotlib.pyplot as plt
import time
import cv2
import matplotlib
import numpy as np
class plt_video_frame:
def __init__(self, port):
'''
creates the plot window.
'''
matplotlib.use("TkAgg")
self.fig = plt.figure(figsize=(3.25, 2.75))
self.port = port
def refresh_plot(self, img):
'''
Refreshes the plot with an image.
'''
if img.shape[0] != 240:
img = cv2.resize(img, (320, 240))
self.fig.clf()
self.fig.canvas.flush_events()
self.fig.canvas.set_window_title("{}".format(self.port))
self.fig.figimage(img)
self.fig.suptitle("What the model sees")
self.fig.canvas.draw()
self.fig.show()
self.fig.canvas.flush_events()
if __name__ == "__main__":
img = cv2.imread("test.jpeg")
img2 = cv2.imread("test.png")
img = np.asarray(img, dtype=np.float32)
img2 = np.asarray(img2, dtype=np.float32)
img = cv2.resize(img, (240, 320)) / 255
img2 = cv2.resize(img2, (240, 320)) / 255
v1 = plt_video_frame(0)
v2 = plt_video_frame(1)
print("init done")
time.sleep(2)
v1.refresh_plot(img)
print("plot 1")
time.sleep(2)
v2.refresh_plot(img2)
print("plot 2")
time.sleep(2)
while True:
v1.refresh_plot(img)
v2.refresh_plot(img2)
img = cv2.rotate(img, cv2.ROTATE_90_CLOCKWISE)
img2 = cv2.rotate(img2, cv2.ROTATE_90_COUNTERCLOCKWISE)
v1.refresh_plot(img)
v2.refresh_plot(img2)
img = cv2.rotate(img, cv2.ROTATE_90_CLOCKWISE)
img2 = cv2.rotate(img2, cv2.ROTATE_90_COUNTERCLOCKWISE)
v1.refresh_plot(img)
v2.refresh_plot(img2)
img = cv2.rotate(img, cv2.ROTATE_90_CLOCKWISE)
img2 = cv2.rotate(img2, cv2.ROTATE_90_COUNTERCLOCKWISE)
v1.refresh_plot(img)
v2.refresh_plot(img2)
img = cv2.rotate(img, cv2.ROTATE_90_CLOCKWISE)
img2 = cv2.rotate(img2, cv2.ROTATE_90_COUNTERCLOCKWISE)
| 0.59375 | high |
setup.py | bellibot/penlm | 0 | 345775 | <filename>setup.py
from setuptools import setup, find_packages
from os import path
# read the contents of your README file
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name = 'penlm',
packages = ['penlm'],
version = 'v1.0.12',
license='MIT',
description = 'Penalized Linear Models for Classification and Regression',
long_description=long_description,
long_description_content_type='text/markdown',
author = '<NAME>',
author_email = '<EMAIL>',
url = 'https://github.com/bellibot/penlm',
download_url = 'https://github.com/bellibot/penlm/archive/refs/tags/v1.0.12.tar.gz',
keywords = ['Classification', 'Regression', 'Linear', 'Penalty'],
python_requires='>=3.5',
install_requires=[
'numpy',
'joblib',
'Pyomo',
'scikit-learn',
],
classifiers=[
'Development Status :: 5 - Production/Stable', # Chose either "3 - Alpha", "4 - Beta" or "5 - Production/Stable"
],
)
| 0.863281 | high |
api/views/utils.py | StateArchivesOfNorthCarolina/ratom_server | 1 | 3407967 | <gh_stars>1-10
from django_elasticsearch_dsl_drf.viewsets import DocumentViewSet
from api.documents.utils import LoggingSearch
class LoggingDocumentViewSet(DocumentViewSet):
"""DocumentViewSet extension that instantiates LoggingSearch to perform optional logging."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.search = LoggingSearch(
using=self.client, index=self.index, doc_type=self.document._doc_type.name
)
| 0.839844 | medium |
src/odm_report_shot_coverage/models/test_wavefront_25d.py | terra-submersa/opensfm-camera-coverage | 0 | 266959 | <filename>src/odm_report_shot_coverage/models/test_wavefront_25d.py
from unittest import TestCase
from odm_report_shot_coverage.models.shot import Boundaries
from odm_report_shot_coverage.models.wavefront_25d import _parse_facet_vertices, _paving_sizes
class Test(TestCase):
def test_parse_facet_vertices(self):
str = 'f 34922/4/34922 34921/7/34921 35192/2/35192'
got = _parse_facet_vertices(str)
self.assertEqual((34921, 34920, 35191), got)
def test_paving_sizes(self):
b = Boundaries(x_min=-50, x_max=150, y_min=25, y_max=125)
got = _paving_sizes(b, 30)
self.assertEqual((8, 4), got)
| 0.519531 | high |
common/ops/image_ops.py | vahidk/TensorflowFramework | 129 | 5131175 | <reponame>vahidk/TensorflowFramework
"""Image ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from common.ops import shape_ops
def center_crop(image, landmark, size):
"""Crop labeled image."""
image_size = image.shape[-2].value
if image_size == size:
return image, landmark
offset = (image_size - size) // 2
image = tf.image.crop_to_bounding_box(
image, offset, offset, size, size)
if landmark is not None:
landmark -= offset
return image, landmark
def scale_image(image, landmark, size):
image_size = image.shape[-2].value
if image_size == size:
return image, landmark
image = tf.image.resize_images(image, [size, size])
if landmark is not None:
scale = tf.cast(size, tf.float32) / image_size
landmark *= scale
return image, landmark
def random_crop_image(image, landmark, margin):
"""Crop labeled image."""
shape = shape_ops.get_shape(image)
max_offset = np.array(margin) * 2
crop_width, crop_height = np.array(shape[1::-1]) - max_offset
offset = tf.random_uniform([2], maxval=max_offset, dtype=tf.int32)
image = tf.image.crop_to_bounding_box(
image, offset[1], offset[0], crop_height, crop_width)
landmark = landmark - tf.cast(tf.expand_dims(offset, 0), tf.float32)
return image, landmark
def transform(image, landmark, translation=[0, 0], rotation=0, scale=1):
"""Apply an affine transformation to the image."""
image = tf.convert_to_tensor(image)
landmark = tf.convert_to_tensor(landmark, dtype=tf.float32)
translation = tf.convert_to_tensor(translation, dtype=tf.float32)
rotation = tf.convert_to_tensor(rotation, dtype=tf.float32)
scale = tf.convert_to_tensor(scale, dtype=tf.float32)
# Generate a transformation matrix
h, w = image.shape.as_list()[-3:-1]
tx, ty = tf.unstack(translation, axis=-1)
sc = tf.cos(rotation) / scale
ss = tf.sin(rotation) / scale
cx = (sc - 1) * w * 0.5 + ss * h * 0.5
cy = -ss * w * 0.5 + (sc - 1) * h * 0.5
ze = tf.zeros_like(scale)
# Apply transformation to image
p = tf.transpose([sc, ss, -cx - tx, -ss, sc, -cy - ty, ze, ze])
image_shape = image.shape
image = tf.contrib.image.transform(image, p, interpolation="BILINEAR")
image.set_shape(image_shape)
# Apply transformation to landmarks
a_r = tf.linalg.inv(tf.transpose([[sc, -ss], [ss, sc]]))
a_t = tf.expand_dims(tf.transpose([cx + tx, cy + ty]), -2)
landmark = tf.matmul(landmark + a_t, a_r, transpose_b=True)
return image, landmark
def random_transform(image, landmark, translation=[0, 0], rotation=0, scale=0):
"""Randomly apply an affine transformation to the image."""
shape = shape_ops.get_shape(image)[:len(image.shape)-3]
t = translation * tf.random_uniform(shape + [2], -1., 1., dtype=tf.float32)
r = rotation * tf.random_uniform(shape, -1., 1., dtype=tf.float32)
s = scale * tf.random_uniform(shape, -1., 1., dtype=tf.float32) + 1.
return transform(image, landmark, t, r, s)
def flip_image(image, landmark, reorder, random):
"""Flip images and landmarks."""
assert(landmark.shape.ndims == 2)
w = image.shape[-2].value
image_t = tf.image.flip_left_right(image)
landmark_r = tf.gather(landmark, reorder)
landmark_t = tf.stack([w - 1 - landmark_r[:, 0], landmark_r[:, 1]], -1)
if random:
flip = tf.random_uniform([]) > 0.5
image_t = tf.cond(flip, lambda: image_t, lambda: image)
landmark_t = tf.cond(flip, lambda: landmark_t, lambda: landmark)
return image_t, landmark_t
def compute_compact_crop(landmarks, input_size):
int_labels = tf.cast(landmarks, tf.int32)
minimum = tf.reduce_min(int_labels, axis=1)
maximum = tf.reduce_max(int_labels, axis=1)
centers = (minimum + maximum) // 2
half_sizes = tf.reduce_max(maximum - minimum, axis=1, keepdims=True) // 2
low = tf.maximum(half_sizes - centers, 0)
high = tf.maximum(half_sizes + centers - input_size, 0)
shifts = tf.maximum(low, high)
half_sizes -= tf.reduce_max(shifts, axis=1, keepdims=True)
offsets = centers - half_sizes
offsets = tf.cast(offsets, tf.float32)
sizes = half_sizes * 2
return offsets, sizes
def crop_images(images, landmarks, offsets, sizes, scales, target_size=None):
if target_size is None:
target_size = np.array(images.shape.as_list()[2:0:-1])
def _crop_and_scale_image(input_):
im, off, sz = input_
im = tf.image.crop_to_bounding_box(im, off[1], off[0], sz[0], sz[0])
im = tf.image.resize_images(im, target_size)
return im
int_offsets = tf.cast(offsets, tf.int32)
images = tf.map_fn(
_crop_and_scale_image, (images, int_offsets, sizes), dtype=tf.float32)
scales = tf.cast(np.array([target_size]), tf.float32) / sizes
landmarks -= tf.expand_dims(offsets, 1)
landmarks *= tf.expand_dims(scales, 1)
return images, landmarks
def augment_image(image):
"""Augment the input with its mirrored image."""
flipped = tf.reverse(image, axis=[-2])
image = tf.concat([image, flipped], axis=-2)
return image
def cutout_image(image, min_size, max_size):
"""Cutout part of the image."""
assert(image.shape.ndims == 3)
h, w = image.shape.as_list()[:2]
s = tf.random_uniform([2], min_size, max_size, tf.int32)
y = tf.random_uniform([], 0, h - s[0], tf.int32)
x = tf.random_uniform([], 0, w - s[1], tf.int32)
mask = tf.pad(tf.zeros([s[0], s[1], 3]),
[[y, h - s[0] - y], [x, w - s[1] - x], [0, 0]],
constant_values=1.0)
masked_image = image * mask
return masked_image
def image_noise(image, params):
"""Add image noise.
args:
image: Input 3D image tensor.
params: list of triplets (scale, gaussian_std, salt_and_pepper)
returns:
Noisy image.
"""
image = tf.convert_to_tensor(image)
h, w, d = image.shape.as_list()
for scale, std, snp in params:
sh, sw = int(h * scale), int(w * scale)
if std > 0:
noise = tf.random_normal([sh, sw, d], stddev=std)
else:
noise = tf.zeros([sh, sw, d])
if snp > 0:
noise += tf.cast(
tf.random_uniform([sh, sw, 1]) < snp * 0.5, tf.float32) * 2.0
noise -= tf.cast(
tf.random_uniform([sh, sw, 1]) < snp * 0.5, tf.float32) * 2.0
noise = tf.image.resize_images(noise, [h, w])
image += noise
image = tf.clip_by_value(image, 0.0, 1.0)
return image
| 0.996094 | high |
ferry/config/hadoop/hiveconfig.py | jhorey/ferry | 44 | 8394391 | # Copyright 2014 OpenCore LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sh
import sys
from string import Template
class HiveClientInitializer(object):
"""
Create a new initializer
Param user The user login for the git repo
"""
def __init__(self, system):
self.template_dir = None
self.template_repo = None
self.container_data_dir = HiveClientConfig.data_directory
self.container_log_dir = HiveClientConfig.log_directory
"""
Generate a new hostname
"""
def new_host_name(self, instance_id):
return 'hive-client' + str(instance_id)
"""
Start the service on the containers.
"""
def _execute_service(self, containers, entry_point, fabric, cmd):
output = fabric.cmd(containers, '/service/sbin/startnode %s client' % cmd)
"""
Generate a new configuration.
"""
def _generate_config_dir(self, uuid):
return 'hive_client_' + str(uuid)
def get_public_ports(self, num_instances):
"""
Ports to expose to the outside world.
"""
return []
def get_internal_ports(self, num_instances):
"""
Ports needed for communication within the network.
This is usually used for internal IPC.
"""
return []
def get_working_ports(self, num_instances):
"""
Ports necessary to get things working.
"""
ports = []
"""
Generate a new configuration
"""
def generate(self, num):
return HiveClientConfig(num)
"""
Generate the hive site configuration.
"""
def _generate_hive_site(self, config, new_config_dir):
in_file = open(self.template_dir + '/hive-site.xml.template', 'r')
out_file = open(new_config_dir + '/hive-site.xml', 'w+')
changes = { "DB":config.metastore,
"USER": os.environ['USER'] }
for line in in_file:
s = Template(line).substitute(changes)
out_file.write(s)
in_file.close()
out_file.close()
"""
Apply the configuration to the instances
"""
def apply(self, config, containers):
# The "entry point" is the way to contact the storage service.
# For gluster this is the IP address of the "master" and the volume name.
entry_point = { 'type' : 'hive' }
# Create a new configuration directory, and place
# into the template directory.
config_dirs = []
new_config_dir = "/tmp/" + self._generate_config_dir(config.uuid)
try:
sh.mkdir('-p', new_config_dir)
except:
sys.stderr.write('could not create config dir ' + new_config_dir)
self._generate_hive_site(config, new_config_dir)
# Each container needs to point to a new config dir.
for c in containers:
config_files = new_config_dir + '/*'
config_dirs.append([c['container'],
config_files,
config.config_directory])
return config_dirs, entry_point
class HiveClientConfig(object):
data_directory = '/service/data/main'
log_directory = '/service/data/logs'
config_directory = '/service/conf/hive'
def __init__(self, num):
self.num = num
self.config_directory = HiveClientConfig.config_directory
self.hadoop_config_dir = None
self.metastore = None
| 0.992188 | high |
array plus array test.py | heicj/code-katas | 0 | 6653383 | <filename>array plus array test.py
from code-katas import array_plus_array
import unittest
"""
Test.assert_equals(array_plus_array([1, 2, 3], [4, 5, 6]), 21)
Test.assert_equals(array_plus_array([-1, -2, -3], [-4, -5, -6]), -21)
Test.assert_equals(array_plus_array([0, 0, 0], [4, 5, 6]), 15)
Test.assert_equals(array_plus_array([100, 200, 300], [400, 500, 600]), 2100)
"""
class TestIt(unittest.TestCase):
def test_1(self):
self.assertEqual(array_plus_array([1, 2, 3], [4, 5, 6]), 21)
def test_2(self):
self.assertEqual(array_plus_array([-1, -2, -3], [-4, -5, -6]), -21)
def test_3(self):
self.assertEqual(array_plus_array([0, 0, 0], [4, 5, 6]), 15)
def test_4(self):
self.assertEqual(array_plus_array([100, 200, 300], [400, 500, 600]), 2100)
def test_5(self):
self.assertEqual(array_plus_array([10, 3, 2], [4, 6, 3]), 28)
def test_6(self):
self.assertEqual(array_plus_array([3, 20, 7], [6, 3, 1]), 40))
def test_7(self):
self.assertEqual(array_plus_array([2,10,8], [2, 4, 5]), 31)
def test_8(self):
self.assetEqual(array_plus_array([3, 7, 30], [1, 7, 9]), 57)
if __name__ == '__main__':
unittest.main() | 0.960938 | high |
core/src/main/python/synapse/ml/cyber/anomaly/collaborative_filtering.py | imatiach-msft/SynapseML | 869 | 11517599 | __author__ = 'rolevin'
import os
from typing import List, Optional, Tuple
from synapse.ml.cyber.anomaly.complement_access import ComplementAccessTransformer
from synapse.ml.cyber.feature import indexers, scalers
from synapse.ml.cyber.utils import spark_utils
import numpy as np
from pyspark import SQLContext # noqa
from pyspark.ml import Estimator, Transformer
from pyspark.ml.param.shared import Param, Params
from pyspark.ml.recommendation import ALS
from pyspark.sql import DataFrame, functions as f, types as t
"""
Glossary:
the term 'res' in this package is a shorthand for resource
"""
def _make_dot():
"""
create a method that performs a dot product between two vectors (list of doubles)
:return: the method
"""
@f.udf(t.DoubleType())
def dot(v, u):
if (v is not None) and (u is not None):
vv = np.pad(np.array(v), (0, len(u) - len(v)), 'constant', constant_values=1.0) if len(v) < len(
u) else np.array(v)
uu = np.pad(np.array(u), (0, len(v) - len(u)), 'constant', constant_values=1.0) if len(u) < len(
v) else np.array(u)
return float(vv.dot(uu))
else:
return None
return dot
class AccessAnomalyConfig:
"""
Define default values for AccessAnomaly Params
"""
default_tenant_col = 'tenant'
default_user_col = 'user'
default_res_col = 'res'
default_likelihood_col = 'likelihood'
default_output_col = 'anomaly_score'
default_rank = 10
default_max_iter = 25
default_reg_param = 1.0
default_num_blocks = None # |tenants| if separate_tenants is False else 10
default_separate_tenants = False
default_low_value = 5.0
default_high_value = 10.0
default_apply_implicit_cf = True
default_alpha = 1.0
default_complementset_factor = 2
default_neg_score = 1.0
class _UserResourceFeatureVectorMapping:
"""
Private class used to pass the mappings as calculated by the AccessAnomaliesEstimator.
An object representing the user and resource models
(mapping from name to latent vector)
and the relevant column names
"""
def __init__(self,
tenant_col: str,
user_col: str,
user_vec_col: str,
res_col: str,
res_vec_col: str,
history_access_df: Optional[DataFrame],
user2component_mappings_df: Optional[DataFrame],
res2component_mappings_df: Optional[DataFrame],
user_feature_vector_mapping_df: DataFrame,
res_feature_vector_mapping_df: DataFrame):
self.tenant_col = tenant_col
self.user_col = user_col
self.user_vec_col = user_vec_col
self.res_col = res_col
self.res_vec_col = res_vec_col
self.history_access_df = history_access_df
self.user2component_mappings_df = user2component_mappings_df
self.res2component_mappings_df = res2component_mappings_df
self.user_feature_vector_mapping_df = user_feature_vector_mapping_df
self.res_feature_vector_mapping_df = res_feature_vector_mapping_df
assert \
self.history_access_df is None or \
set(self.history_access_df.schema.fieldNames()) == {tenant_col, user_col, res_col}, \
self.history_access_df.schema.fieldNames()
def replace_mappings(
self,
user_feature_vector_mapping_df: Optional[DataFrame] = None,
res_feature_vector_mapping_df: Optional[DataFrame] = None):
"""
create a new model replacing the user and resource models with new ones (optional)
:param user_feature_vector_mapping_df: optional new user model mapping names to latent vectors
:param res_feature_vector_mapping_df: optional new resource model mapping names to latent vectors
:return:
"""
return _UserResourceFeatureVectorMapping(
self.tenant_col,
self.user_col,
self.user_vec_col,
self.res_col,
self.res_vec_col,
self.history_access_df,
self.user2component_mappings_df,
self.res2component_mappings_df,
user_feature_vector_mapping_df
if user_feature_vector_mapping_df is not None else self.user_feature_vector_mapping_df,
res_feature_vector_mapping_df
if res_feature_vector_mapping_df is not None else self.res_feature_vector_mapping_df
)
def check(self):
"""
check the validity of the model
:return: boolean value where True indicating the verification succeeded
"""
return self._check_user_mapping() and self._check_res_mapping()
def _check_user_mapping(self):
field_map = {ff.name: ff for ff in self.user_feature_vector_mapping_df.schema.fields}
assert field_map.get(self.tenant_col) is not None, field_map
assert field_map.get(self.user_col) is not None
return self.user_feature_vector_mapping_df.select(
self.tenant_col, self.user_col
).distinct().count() == self.user_feature_vector_mapping_df.count()
def _check_res_mapping(self):
field_map = {ff.name: ff for ff in self.res_feature_vector_mapping_df.schema.fields}
assert field_map.get(self.tenant_col) is not None, field_map
assert field_map.get(self.res_col) is not None
return self.res_feature_vector_mapping_df.select(
self.tenant_col, self.res_col
).distinct().count() == self.res_feature_vector_mapping_df.count()
# noinspection PyPep8Naming
class AccessAnomalyModel(Transformer):
outputCol = Param(
Params._dummy(),
"outputCol",
"The name of the output column representing the calculated anomaly score. "
"Values will be between (-inf, +inf) with an estimated mean of 0.0 and standard deviation of 1.0. "
)
"""
A pyspark.ml.Transformer model that can predict anomaly scores for user, resource access pairs
"""
def __init__(self, userResourceFeatureVectorMapping: _UserResourceFeatureVectorMapping, outputCol: str):
super().__init__()
self.user_res_feature_vector_mapping = userResourceFeatureVectorMapping
has_user2component_mappings = self.user_res_feature_vector_mapping.user2component_mappings_df is not None
has_res2component_mappings = self.user_res_feature_vector_mapping.res2component_mappings_df is not None
assert has_user2component_mappings == has_res2component_mappings
self.has_components = has_user2component_mappings and has_res2component_mappings
self.preserve_history = True
if self.has_components:
self._user_mapping_df = self.user_res_feature_vector_mapping.user_feature_vector_mapping_df.join(
self.user_res_feature_vector_mapping.user2component_mappings_df, [self.tenant_col, self.user_col]
).select(
self.tenant_col,
self.user_col,
self.user_vec_col,
f.col('component').alias('user_component')
).cache()
self._res_mapping_df = self.user_res_feature_vector_mapping.res_feature_vector_mapping_df.join(
self.user_res_feature_vector_mapping.res2component_mappings_df, [self.tenant_col, self.res_col]
).select(
self.tenant_col,
self.res_col,
self.res_vec_col,
f.col('component').alias('res_component')
).cache()
else:
self._user_mapping_df = self.user_res_feature_vector_mapping.user_feature_vector_mapping_df
self._res_mapping_df = self.user_res_feature_vector_mapping.res_feature_vector_mapping_df
spark_utils.ExplainBuilder.build(self, outputCol=outputCol)
@staticmethod
def _metadata_schema() -> t.StructType:
return t.StructType([
t.StructField('tenant_col', t.StringType(), False),
t.StructField('user_col', t.StringType(), False),
t.StructField('user_vec_col', t.StringType(), False),
t.StructField('res_col', t.StringType(), False),
t.StructField('res_vec_col', t.StringType(), False),
t.StructField('output_col', t.StringType(), False),
t.StructField('has_history_access_df', t.BooleanType(), False),
t.StructField('has_user2component_mappings_df', t.BooleanType(), False),
t.StructField('has_res2component_mappings_df', t.BooleanType(), False),
t.StructField('has_user_feature_vector_mapping_df', t.BooleanType(), False),
t.StructField('has_res_feature_vector_mapping_df', t.BooleanType(), False)
])
def save(self, path: str, path_suffix: str = '', output_format: str = 'parquet'):
dfs = [
self.user_res_feature_vector_mapping.history_access_df,
self.user_res_feature_vector_mapping.user2component_mappings_df,
self.user_res_feature_vector_mapping.res2component_mappings_df,
self.user_res_feature_vector_mapping.user_feature_vector_mapping_df,
self.user_res_feature_vector_mapping.res_feature_vector_mapping_df
]
adf = next(iter([df for df in dfs if df is not None]))
assert adf is not None
spark = spark_utils.DataFrameUtils.get_spark_session(adf)
metadata_df = spark.createDataFrame([
(
self.tenant_col,
self.user_col,
self.user_vec_col,
self.res_col,
self.res_vec_col,
self.output_col,
self.user_res_feature_vector_mapping.history_access_df is not None,
self.user_res_feature_vector_mapping.user2component_mappings_df is not None,
self.user_res_feature_vector_mapping.res2component_mappings_df is not None,
self.user_res_feature_vector_mapping.user_feature_vector_mapping_df is not None,
self.user_res_feature_vector_mapping.res_feature_vector_mapping_df is not None
)
], AccessAnomalyModel._metadata_schema())
metadata_df.write.format(output_format).save(os.path.join(path, 'metadata_df', path_suffix))
if self.user_res_feature_vector_mapping.history_access_df is not None:
self.user_res_feature_vector_mapping.history_access_df.write.format(output_format).save(
os.path.join(path, 'history_access_df', path_suffix)
)
if self.user_res_feature_vector_mapping.user2component_mappings_df is not None:
self.user_res_feature_vector_mapping.user2component_mappings_df.write.format(output_format).save(
os.path.join(path, 'user2component_mappings_df', path_suffix)
)
if self.user_res_feature_vector_mapping.res2component_mappings_df is not None:
self.user_res_feature_vector_mapping.res2component_mappings_df.write.format(output_format).save(
os.path.join(path, 'res2component_mappings_df', path_suffix)
)
if self.user_res_feature_vector_mapping.user_feature_vector_mapping_df is not None:
self.user_res_feature_vector_mapping.user_feature_vector_mapping_df.write.format(output_format).save(
os.path.join(path, 'user_feature_vector_mapping_df', path_suffix)
)
if self.user_res_feature_vector_mapping.res_feature_vector_mapping_df is not None:
self.user_res_feature_vector_mapping.res_feature_vector_mapping_df.write.format(output_format).save(
os.path.join(path, 'res_feature_vector_mapping_df', path_suffix)
)
@staticmethod
def load(spark: SQLContext, path: str, output_format: str = 'parquet') -> 'AccessAnomalyModel':
metadata_df = spark.read.format(output_format).load(os.path.join(path, 'metadata_df'))
assert metadata_df.count() == 1
metadata_row = metadata_df.collect()[0]
tenant_col = metadata_row['tenant_col']
user_col = metadata_row['user_col']
user_vec_col = metadata_row['user_vec_col']
res_col = metadata_row['res_col']
res_vec_col = metadata_row['res_vec_col']
output_col = metadata_row['output_col']
has_history_access_df = metadata_row['has_history_access_df']
has_user2component_mappings_df = metadata_row['has_user2component_mappings_df']
has_res2component_mappings_df = metadata_row['has_res2component_mappings_df']
has_user_feature_vector_mapping_df = metadata_row['has_user_feature_vector_mapping_df']
has_res_feature_vector_mapping_df = metadata_row['has_res_feature_vector_mapping_df']
history_access_df = spark.read.format(output_format).load(
os.path.join(path, 'history_access_df')
) if has_history_access_df else None
user2component_mappings_df = spark.read.format(output_format).load(
os.path.join(path, 'user2component_mappings_df')
) if has_user2component_mappings_df else None
res2component_mappings_df = spark.read.format(output_format).load(
os.path.join(path, 'res2component_mappings_df')
) if has_res2component_mappings_df else None
user_feature_vector_mapping_df = spark.read.format(output_format).load(
os.path.join(path, 'user_feature_vector_mapping_df')
) if has_user_feature_vector_mapping_df else None
res_feature_vector_mapping_df = spark.read.format(output_format).load(
os.path.join(path, 'res_feature_vector_mapping_df')
) if has_res_feature_vector_mapping_df else None
return AccessAnomalyModel(
_UserResourceFeatureVectorMapping(
tenant_col,
user_col,
user_vec_col,
res_col,
res_vec_col,
history_access_df,
user2component_mappings_df,
res2component_mappings_df,
user_feature_vector_mapping_df,
res_feature_vector_mapping_df
),
output_col
)
@property
def tenant_col(self):
return self.user_res_feature_vector_mapping.tenant_col
@property
def user_col(self):
return self.user_res_feature_vector_mapping.user_col
@property
def user_vec_col(self):
return self.user_res_feature_vector_mapping.user_vec_col
@property
def res_col(self):
return self.user_res_feature_vector_mapping.res_col
@property
def res_vec_col(self):
return self.user_res_feature_vector_mapping.res_vec_col
@property
def user_mapping_df(self):
return self._user_mapping_df
@property
def res_mapping_df(self):
return self._res_mapping_df
def _transform(self, df: DataFrame) -> DataFrame:
dot = _make_dot()
tenant_col = self.tenant_col
user_col = self.user_col
user_vec_col = self.user_vec_col
res_col = self.res_col
res_vec_col = self.res_vec_col
output_col = self.output_col
seen_token = '__seen__'
def value_calc():
return f.when(f.col(seen_token).isNull() | ~f.col(seen_token), f.when(
f.col(user_vec_col).isNotNull() & f.col(res_vec_col).isNotNull(),
f.when(
f.col('user_component') == f.col('res_component'),
dot(f.col(user_vec_col), f.col(res_vec_col))
).otherwise(f.lit(float("inf")))
).otherwise(f.lit(None)) if self.has_components else f.when(
f.col(user_vec_col).isNotNull() & f.col(res_vec_col).isNotNull(),
dot(f.col(user_vec_col), f.col(res_vec_col))
).otherwise(f.lit(None))).otherwise(f.lit(0.0))
history_access_df = self.user_res_feature_vector_mapping.history_access_df
the_df = df.join(
history_access_df.withColumn(seen_token, f.lit(True)),
[tenant_col, user_col, res_col],
how='left'
) if self.preserve_history and history_access_df is not None else df.withColumn(seen_token, f.lit(False))
user_mapping_df = self.user_mapping_df
res_mapping_df = self.res_mapping_df
return the_df.join(
user_mapping_df, [tenant_col, user_col], how='left'
).join(
res_mapping_df, [tenant_col, res_col], how='left'
).withColumn(output_col, value_calc()).drop(
user_vec_col,
res_vec_col,
'user_component',
'res_component',
seen_token
)
# noinspection PyPep8Naming
class ConnectedComponents:
def __init__(self, tenantCol: str, userCol: str, res_col: str, componentColName: str = 'component'):
self.tenant_col = tenantCol
self.user_col = userCol
self.res_col = res_col
self.component_col_name = componentColName
def transform(self, df: DataFrame) -> Tuple[DataFrame, DataFrame]:
edges = df.select(self.tenant_col, self.user_col, self.res_col).distinct().orderBy(
self.tenant_col, self.user_col, self.res_col
).cache()
users = df.select(self.tenant_col, self.user_col).distinct().orderBy(self.tenant_col, self.user_col).cache()
user2index = spark_utils.DataFrameUtils.zip_with_index(users, col_name='user_component')
user2components = user2index
res2components = None
chg = True
while chg:
res2components = edges.join(
user2components, [self.tenant_col, self.user_col]
).groupBy(self.tenant_col, self.res_col).agg(
f.min('user_component').alias('res_component')
)
next_user2components = edges.join(
res2components, [self.tenant_col, self.res_col]
).groupBy(self.tenant_col, self.user_col).agg(
f.min('res_component').alias('user_component')
).cache()
chg = user2components.join(
next_user2components, on=[self.tenant_col, self.user_col, 'user_component']
).count() != user2components.count()
user2components = next_user2components
assert res2components is not None
return (
user2components.select(
self.tenant_col, self.user_col, f.col('user_component').alias(self.component_col_name)
).orderBy(
self.tenant_col,
self.user_col
),
res2components.select(
self.tenant_col, self.res_col, f.col('res_component').alias(self.component_col_name)
).orderBy(
self.tenant_col,
self.res_col
)
)
# noinspection PyPep8Naming
class AccessAnomaly(Estimator):
"""
This is the AccessAnomaly, a pyspark.ml.Estimator which
creates the AccessAnomalyModel which is a pyspark.ml.Transformer
"""
tenantCol = Param(
Params._dummy(),
"tenantCol",
"The name of the tenant column. "
"This is a unique identifier used to partition the dataframe into independent "
"groups where the values in each such group are completely isolated from one another. "
"Note: if this column is irrelevant for your data, "
"then just create a tenant column and give it a single value for all rows."
)
userCol = Param(
Params._dummy(),
"userCol",
"The name of the user column. "
"This is a the name of the user column in the dataframe."
)
resCol = Param(
Params._dummy(),
"resCol",
"The name of the resource column. "
"This is a the name of the resource column in the dataframe."
)
likelihoodCol = Param(
Params._dummy(),
"likelihoodCol",
"The name of the column with the likelihood estimate for user, res access "
"(usually based on access counts per time unit). "
)
outputCol = Param(
Params._dummy(),
"outputCol",
"The name of the output column representing the calculated anomaly score. "
"Values will be between (-inf, +inf) with an estimated mean of 0.0 and standard deviation of 1.0. "
)
rankParam = Param(
Params._dummy(),
"rankParam",
"rankParam is the number of latent factors in the model (defaults to 10)."
)
maxIter = Param(
Params._dummy(),
"maxIter",
"maxIter is the maximum number of iterations to run (defaults to 25)."
)
regParam = Param(
Params._dummy(),
"regParam",
"regParam specifies the regularization parameter in ALS (defaults to 0.1)."
)
numBlocks = Param(
Params._dummy(),
"numBlocks",
"numBlocks is the number of blocks the users and items will be partitioned into "
"in order to parallelize computation "
"(defaults to |tenants| if separate_tenants is False else 10)."
)
separateTenants = Param(
Params._dummy(),
"separateTenants",
"separateTenants applies the algorithm per tenant in isolation. "
"Setting to True may reduce runtime significantly, if number of tenant is large, "
"but will increase accuracy. (defaults to False)."
)
lowValue = Param(
Params._dummy(),
"lowValue",
"lowValue is used to scale the values of likelihood_col to be in the range [lowValue, highValue] "
"(defaults to 5.0)."
)
highValue = Param(
Params._dummy(),
"highValue",
"highValue is used to scale the values of likelihood_col to be in the range [lowValue, highValue] "
"(defaults to 10.0)."
)
applyImplicitCf = Param(
Params._dummy(),
"applyImplicitCf",
"specifies whether to use the implicit/explicit feedback ALS for the data "
"(defaults to True which means using implicit feedback)."
)
alphaParam = Param(
Params._dummy(),
"alphaParam",
"alphaParam is a parameter applicable to the implicit feedback variant "
"of ALS that governs the baseline confidence in preference observations."
"(defaults to 1.0)."
)
complementsetFactor = Param(
Params._dummy(),
"complementsetFactor",
"complementsetFactor is a parameter applicable to the implicit feedback variant "
"of ALS that governs the baseline confidence in preference observations."
"(defaults to 2)."
)
negScore = Param(
Params._dummy(),
"negScore",
"negScore is a parameter applicable to the explicit feedback variant of ALS that governs "
"the value to assign to the values of the complement set."
"(defaults to 1.0)."
)
historyAccessDf = Param(
Params._dummy(),
"historyAccessDf",
"historyAccessDf is an optional spark dataframe which includes the "
"list of seen user resource pairs for which the anomaly score should be zero."
)
def __init__(self,
tenantCol: str = AccessAnomalyConfig.default_tenant_col,
userCol: str = AccessAnomalyConfig.default_user_col,
resCol: str = AccessAnomalyConfig.default_res_col,
likelihoodCol: str = AccessAnomalyConfig.default_likelihood_col,
outputCol: str = AccessAnomalyConfig.default_output_col,
rankParam: int = AccessAnomalyConfig.default_rank,
maxIter: int = AccessAnomalyConfig.default_max_iter,
regParam: float = AccessAnomalyConfig.default_reg_param,
numBlocks: Optional[int] = AccessAnomalyConfig.default_num_blocks,
separateTenants: bool = AccessAnomalyConfig.default_separate_tenants,
lowValue: Optional[float] = AccessAnomalyConfig.default_low_value,
highValue: Optional[float] = AccessAnomalyConfig.default_high_value,
applyImplicitCf: bool = AccessAnomalyConfig.default_apply_implicit_cf,
alphaParam: Optional[float] = None,
complementsetFactor: Optional[int] = None,
negScore: Optional[float] = None,
historyAccessDf: Optional[DataFrame] = None):
super().__init__()
if applyImplicitCf:
alphaParam = alphaParam if alphaParam is not None else AccessAnomalyConfig.default_alpha
assert complementsetFactor is None and negScore is None
else:
assert alphaParam is None
complementsetFactor = \
complementsetFactor if complementsetFactor is not None else AccessAnomalyConfig.default_complementset_factor
negScore = negScore \
if negScore is not None else AccessAnomalyConfig.default_neg_score
# must either both be None or both be not None
assert (lowValue is None) == (highValue is None)
assert lowValue is None or lowValue >= 1.0
assert (lowValue is None or highValue is None) or highValue > lowValue
assert \
(lowValue is None or negScore is None) or \
(lowValue is not None and negScore < lowValue)
spark_utils.ExplainBuilder.build(
self,
tenantCol=tenantCol,
userCol=userCol,
resCol=resCol,
likelihoodCol=likelihoodCol,
outputCol=outputCol,
rankParam=rankParam,
maxIter=maxIter,
regParam=regParam,
numBlocks=numBlocks,
separateTenants=separateTenants,
lowValue=lowValue,
highValue=highValue,
applyImplicitCf=applyImplicitCf,
alphaParam=alphaParam,
complementsetFactor=complementsetFactor,
negScore=negScore,
historyAccessDf=historyAccessDf
)
# --- getters and setters
@property
def indexed_user_col(self):
return self.user_col + '_index'
@property
def user_vec_col(self):
return self.user_col + '_vector'
@property
def indexed_res_col(self):
return self.res_col + '_index'
@property
def res_vec_col(self):
return self.res_col + '_vector'
@property
def scaled_likelihood_col(self):
return self.likelihood_col + '_scaled'
def _get_scaled_df(self, df: DataFrame) -> DataFrame:
return scalers.LinearScalarScaler(
input_col=self.likelihood_col,
partition_key=self.tenant_col,
output_col=self.scaled_likelihood_col,
min_required_value=self.low_value,
max_required_value=self.high_value
).fit(df).transform(df) if self.low_value is not None and self.high_value is not None else df
def _enrich_and_normalize(self, indexed_df: DataFrame) -> DataFrame:
tenant_col = self.tenant_col
indexed_user_col = self.indexed_user_col
indexed_res_col = self.indexed_res_col
scaled_likelihood_col = self.scaled_likelihood_col
if not self.apply_implicit_cf:
complementset_factor = self.complementset_factor
neg_score = self.neg_score
assert complementset_factor is not None and neg_score is not None
comp_df = ComplementAccessTransformer(
tenant_col, [indexed_user_col, indexed_res_col], complementset_factor
).transform(indexed_df).withColumn(
scaled_likelihood_col,
f.lit(neg_score)
)
else:
comp_df = None
scaled_df = self._get_scaled_df(indexed_df).select(
tenant_col, indexed_user_col, indexed_res_col, scaled_likelihood_col
)
return scaled_df.union(comp_df) if comp_df is not None else scaled_df
def _train_cf(self, als: ALS, df: DataFrame) -> Tuple[DataFrame, DataFrame]:
tenant_col = self.tenant_col
indexed_user_col = self.indexed_user_col
user_vec_col = self.user_vec_col
indexed_res_col = self.indexed_res_col
res_vec_col = self.res_vec_col
spark_model = als.fit(df)
user_mapping_df = spark_model.userFactors.select(
f.col('id').alias(indexed_user_col),
f.col('features').alias(user_vec_col)
).join(
df.select(indexed_user_col, tenant_col).distinct(), indexed_user_col
).select(
tenant_col, indexed_user_col, user_vec_col
)
res_mapping_df = spark_model.itemFactors.select(
f.col('id').alias(indexed_res_col),
f.col('features').alias(res_vec_col)
).join(
df.select(indexed_res_col, tenant_col).distinct(), indexed_res_col
).select(
tenant_col, indexed_res_col, res_vec_col
)
return user_mapping_df, res_mapping_df
def create_spark_model_vectors_df(self, df: DataFrame) -> _UserResourceFeatureVectorMapping:
tenant_col = self.tenant_col
indexed_user_col = self.indexed_user_col
user_vec_col = self.user_vec_col
indexed_res_col = self.indexed_res_col
res_vec_col = self.res_vec_col
max_iter = self.max_iter
distinct_tenants = df.select(tenant_col).distinct().cache()
num_tenants = distinct_tenants.count()
separate_tenants = self.separate_tenants
num_blocks = self.num_blocks if self.num_blocks is not None else (num_tenants if not separate_tenants else 10)
als = ALS(
rank=self.rank_param,
maxIter=max_iter,
regParam=self.reg_param,
numUserBlocks=num_blocks,
numItemBlocks=num_blocks,
implicitPrefs=self.apply_implicit_cf,
userCol=self.indexed_user_col,
itemCol=self.indexed_res_col,
ratingCol=self.scaled_likelihood_col,
nonnegative=True,
coldStartStrategy='drop'
)
alpha = self.alpha_param
if alpha is not None:
als.setAlpha(alpha)
if separate_tenants:
tenants = [row[tenant_col] for row in distinct_tenants.orderBy(tenant_col).collect()]
user_mapping_df: Optional[DataFrame] = None
res_mapping_df: Optional[DataFrame] = None
for curr_tenant in tenants:
curr_df = df.filter(f.col(tenant_col) == curr_tenant).cache()
curr_user_mapping_df, curr_res_mapping_df = self._train_cf(als, curr_df)
user_mapping_df = user_mapping_df.union(
curr_user_mapping_df
) if user_mapping_df is not None else curr_user_mapping_df
res_mapping_df = res_mapping_df.union(
curr_res_mapping_df
) if res_mapping_df is not None else curr_res_mapping_df
else:
user_mapping_df, res_mapping_df = self._train_cf(als, df)
assert user_mapping_df is not None and res_mapping_df is not None
return _UserResourceFeatureVectorMapping(
tenant_col,
indexed_user_col,
user_vec_col,
indexed_res_col,
res_vec_col,
None,
None,
None,
user_mapping_df,
res_mapping_df
)
def _fit(self, df: DataFrame) -> AccessAnomalyModel:
# index the user and resource columns to allow running the spark ALS algorithm
the_indexer = indexers.MultiIndexer(
indexers=[
indexers.IdIndexer(
input_col=self.user_col,
partition_key=self.tenant_col,
output_col=self.indexed_user_col,
reset_per_partition=self.separate_tenants
),
indexers.IdIndexer(
input_col=self.res_col,
partition_key=self.tenant_col,
output_col=self.indexed_res_col,
reset_per_partition=self.separate_tenants
)
]
)
the_indexer_model = the_indexer.fit(df)
# indexed_df is the dataframe with the indices for user and resource
indexed_df = the_indexer_model.transform(df)
enriched_df = self._enrich_and_normalize(indexed_df).cache()
user_res_feature_vector_mapping_df = self.create_spark_model_vectors_df(enriched_df)
user_res_norm_cf_df_model = ModelNormalizeTransformer(
enriched_df, self.rank_param
).transform(user_res_feature_vector_mapping_df)
# convert user and resource indices back to names
user_index_model = the_indexer_model.get_model_by_input_col(self.user_col)
res_index_model = the_indexer_model.get_model_by_input_col(self.res_col)
assert user_index_model is not None and res_index_model is not None
norm_user_mapping_df = user_res_norm_cf_df_model.user_feature_vector_mapping_df
norm_res_mapping_df = user_res_norm_cf_df_model.res_feature_vector_mapping_df
indexed_user_col = self.indexed_user_col
indexed_res_col = self.indexed_res_col
# do the actual index to name mapping (using undo_transform)
final_user_mapping_df = user_index_model.undo_transform(norm_user_mapping_df).drop(indexed_user_col)
final_res_mapping_df = res_index_model.undo_transform(norm_res_mapping_df).drop(indexed_res_col)
tenant_col, user_col, res_col = self.tenant_col, self.user_col, self.res_col
history_access_df = self.history_access_df
access_df = \
history_access_df if history_access_df is not None else df.select(tenant_col, user_col, res_col).cache()
user2component_mappings_df, res2component_mappings_df = ConnectedComponents(
tenant_col, user_col, res_col
).transform(access_df)
return AccessAnomalyModel(
_UserResourceFeatureVectorMapping(
tenant_col=self.tenant_col,
user_col=self.user_col,
user_vec_col=self.user_vec_col,
res_col=self.res_col,
res_vec_col=self.res_vec_col,
history_access_df=history_access_df,
user2component_mappings_df=user2component_mappings_df,
res2component_mappings_df=res2component_mappings_df,
user_feature_vector_mapping_df=final_user_mapping_df.cache(),
res_feature_vector_mapping_df=final_res_mapping_df.cache()
),
self.output_col
)
class ModelNormalizeTransformer:
"""
Given a UserResourceCfDataframeModel this class creates and returns
a new normalized UserResourceCfDataframeModel which has an anomaly score
with a mean of 0.0 and standard deviation of 1.0 when applied on the given dataframe
"""
def __init__(self, access_df: DataFrame, rank: int):
self.access_df = access_df
self.rank = rank
def _make_append_bias(self, user_col: str, res_col: str, col_name: str, target_col_name: str, rank: int) -> f.udf:
assert col_name == user_col or col_name == res_col
assert target_col_name == user_col or target_col_name == res_col
def value_at(bias: float, value: float, i: int) -> float:
if col_name != target_col_name or i < rank:
res = value
elif col_name == user_col and i == rank:
res = bias + value
elif col_name == res_col and i == (rank + 1):
res = bias + value
else:
res = value
assert res == 1.0 if i == rank and col_name == res_col else True
assert res == 1.0 if i == (rank + 1) and col_name == user_col else True
return res
@f.udf(t.ArrayType(t.DoubleType()))
def append_bias(v: List[float], bias: float, coeff: float = 1.0) -> List[float]:
assert len(v) == rank or len(v) == rank + 2
if len(v) == rank:
# increase vector size to adjust for bias
fix_value = bias if col_name == target_col_name else 0.0
u = [fix_value, 1.0] if col_name == user_col else [1.0, fix_value]
return [float(coeff * value) for value in np.append(np.array(v), np.array(u))]
else:
# fix enhanced vector to adjust for another bias
assert len(v) == rank + 2
return [coeff * value_at(bias, v[i], i) for i in range(len(v))]
return append_bias
def transform(self, user_res_cf_df_model: _UserResourceFeatureVectorMapping) -> _UserResourceFeatureVectorMapping:
likelihood_col_token = '__<PASSWORD>'
dot = _make_dot()
tenant_col = user_res_cf_df_model.tenant_col
user_col = user_res_cf_df_model.user_col
user_vec_col = user_res_cf_df_model.user_vec_col
res_col = user_res_cf_df_model.res_col
res_vec_col = user_res_cf_df_model.res_vec_col
fixed_df = self.access_df.join(
user_res_cf_df_model.user_feature_vector_mapping_df, [tenant_col, user_col]
).join(
user_res_cf_df_model.res_feature_vector_mapping_df, [tenant_col, res_col]
).select(
tenant_col,
user_col,
user_vec_col,
res_col,
res_vec_col,
dot(f.col(user_vec_col), f.col(res_vec_col)).alias(likelihood_col_token)
)
scaler_model = scalers.StandardScalarScaler(
likelihood_col_token, tenant_col, user_vec_col
).fit(fixed_df)
per_group_stats: DataFrame = scaler_model.per_group_stats
assert isinstance(per_group_stats, DataFrame)
append2user_bias = self._make_append_bias(user_col, res_col, user_col, user_col, self.rank)
append2res_bias = self._make_append_bias(user_col, res_col, res_col, user_col, self.rank)
fixed_user_mapping_df = user_res_cf_df_model.user_feature_vector_mapping_df.join(
per_group_stats, tenant_col
).select(
tenant_col,
user_col,
append2user_bias(
f.col(user_vec_col),
f.lit(-1.0) * f.col(scalers.StandardScalarScalerConfig.mean_token),
f.lit(-1.0) / f.when(
f.col(scalers.StandardScalarScalerConfig.std_token) != 0.0,
f.col(scalers.StandardScalarScalerConfig.std_token)
).otherwise(f.lit(1.0))
).alias(user_vec_col)
)
fixed_res_mapping_df = user_res_cf_df_model.res_feature_vector_mapping_df.join(
per_group_stats, tenant_col
).select(
tenant_col,
res_col,
append2res_bias(f.col(res_vec_col), f.lit(0)).alias(res_vec_col)
)
return user_res_cf_df_model.replace_mappings(fixed_user_mapping_df, fixed_res_mapping_df)
| 0.980469 | high |
instapp/models.py | kiptoo-rotich/Instagram | 0 | 2180799 | <filename>instapp/models.py
from django.db import models
from tinymce.models import HTMLField
import datetime as dt
from django.contrib.auth.models import User
class Photos(models.Model):
title = models.CharField(max_length=60)
post = models.TextField(max_length=1000)
pub_date = models.DateTimeField(auto_now_add=True)
image = models.ImageField(upload_to='images/', default="Image")
likes = models.ManyToManyField(User, related_name='post_like')
user = models.ForeignKey(User, blank=True,on_delete=models.CASCADE)
def __str__(self):
return str(self.post)[:10]
def number_of_likes(self):
return self.likes.count()
@classmethod
def search(cls,search_term):
photos=cls.objects.filter(title__icontains=search_term)
return photos
class tags(models.Model):
name = models.CharField(max_length=30)
def __str__(self):
return self.name
class Comment(models.Model):
comment = models.TextField()
image = models.ForeignKey(Photos, on_delete=models.CASCADE)
posted_on = models.DateTimeField(auto_now_add=True, null=True)
def save_comment(self):
self.save()
def delete(self):
self.delete()
class Profile(models.Model):
profilephoto = models.ImageField('image')
Bio = models.CharField(max_length=30)
user = models.OneToOneField(User, on_delete=models.CASCADE, null=True)
following = models.ManyToManyField(User, blank=True, related_name='follow')
def __str__(self):
return self.Bio
def delete_profile(self):
self.delete()
def save_profile(self):
self.save()
@classmethod
def search_profile(cls, name):
return cls.objects.filter(user__username__icontains=name).all()
| 0.949219 | high |
menu.py | jtomaszk/pibank | 0 | 5242927 | import os
from action import Action
class MainMenuMachine(object):
def __init__(self, screen):
self.state = ShowDevicesSelectedMenuState(screen)
self.screen = screen
def on_event(self, event):
self.state = self.state.on_event(event)
def select_callback(self, callback):
self.on_event(Action.SELECT)
def up_callback(self, callback):
self.on_event(Action.UP)
def down_callback(self, calback):
self.on_event(Action.DOWN)
def left_callback(self, calback):
self.on_event(Action.LEFT)
def right_callback(self, calback):
self.on_event(Action.RIGHT)
class MenuState(object):
def __init__(self, screen):
self.screen = screen
print 'Processing current state:', str(self)
self.draw()
def draw(self):
pass
def on_event(self, event):
"""
Handle events that are delegated to this State.
"""
pass
def __repr__(self):
"""
Leverages the __str__ method to describe the State.
"""
return self.__str__()
def __str__(self):
"""
Returns the name of the State.
"""
return self.__class__.__name__
class UpdateState(MenuState):
def draw(self):
os.system("git pull > /tmp/update_status")
update_status = open('/tmp/update_status', 'r').read()
self.screen.draw('update:\n' + update_status)
def on_event(self, event):
if event == Action.LEFT:
return UpdateSelectedMenuState(self.screen)
return self
class UpdateSelectedMenuState(MenuState):
def draw(self):
self.screen.draw_main_menu(self)
def on_event(self, event):
if event == Action.UP:
return MakeBackupSelectedMenuState(self.screen)
elif event == Action.DOWN:
return RebootSelectedMenuState(self.screen)
elif event == Action.SELECT:
return UpdateState(self.screen)
return self
class RebootState(MenuState):
def draw(self):
self.screen.draw('reboot...')
os.system('reboot')
class RebootSelectedMenuState(MenuState):
def draw(self):
self.screen.draw_main_menu(self)
def on_event(self, event):
if event == Action.UP:
return UpdateSelectedMenuState(self.screen)
elif event == Action.DOWN:
return ShutdownSelectedMenuState(self.screen)
elif event == Action.SELECT:
return RebootState(self.screen)
return self
class ShutdownState(MenuState):
def draw(self):
self.screen.draw('power off...')
os.system('poweroff')
class ShutdownSelectedMenuState(MenuState):
def draw(self):
self.screen.draw_main_menu(self)
def on_event(self, event):
if event == Action.UP:
return RebootSelectedMenuState(self.screen)
elif event == Action.SELECT:
return ShutdownState(self.screen)
return self
class MakeBackupSelectedMenuState(MenuState):
def draw(self):
self.screen.draw_main_menu(self)
def on_event(self, event):
if event == Action.UP:
return ShowDevicesSelectedMenuState(self.screen)
if event == Action.DOWN:
return ShutdownSelectedMenuState(self.screen)
return self
class ShowDevicesState(MenuState):
def draw(self):
os.system("df -h | grep media | awk '{print $5 \" \" $6}' > /tmp/media_list")
media_list = open('/tmp/media_list', 'r').read()
self.screen.draw('usage device\n' + media_list)
def on_event(self, event):
if event == Action.LEFT:
return ShowDevicesSelectedMenuState(self.screen)
return self
class ShowDevicesSelectedMenuState(MenuState):
def draw(self):
self.screen.draw_main_menu(self)
def on_event(self, event):
if event == Action.DOWN:
return MakeBackupSelectedMenuState(self.screen)
elif event == Action.SELECT:
return ShowDevicesState(self.screen)
return self
| 0.695313 | high |
models/provider_type.py | gnydick/qairon | 0 | 500943 | from sqlalchemy import *
from sqlalchemy.orm import relationship, validates
from db import db
class ProviderType(db.Model):
__tablename__ = "provider_type"
id = Column(String, primary_key=True)
defaults = Column(Text)
providers = relationship("Provider", back_populates="type")
fleet_types = relationship("FleetType", back_populates="provider_type")
def __repr__(self):
return self.id
@validates('id')
def validate_name(self, key, value):
assert value != ''
return value
| 0.523438 | high |
SR/trainer/trainer.py | AntonyYX/Super-Resolution | 0 | 8567263 | import os
import PIL
import torch
from tqdm import tqdm
from torchvision.utils import make_grid
from torchvision import transforms
from trainer.base_trainer import BaseTrainer
import matplotlib.pyplot as plt
from utils.util import get_divider_str
import numpy as np
from utils.util import get_lr, load_checkpoint_state, save_checkpoint_state
class Trainer(BaseTrainer):
def __init__(self, model, criterion, train_dataset, optimizer, device, config: dict,
valid_dataset=None,
train_dataloader=None,
valid_dataloader=None,
lr_scheduler=None):
super(Trainer, self).__init__(model, criterion,
optimizer, train_dataset, valid_dataset, config)
self.config = config
self.device = device
self.train_dataloader = train_dataloader
self.valid_dataloader = valid_dataloader
self.lr_scheduler = lr_scheduler
self.do_validation = self.valid_dataloader is not None
# number of batches in dataloader
self.len_epoch = len(self.train_dataloader)
self.log_step = config['log_step']
self.best_valid_loss = float('inf')
def _train_epoch(self, epoch):
"""
Training logic for one epoch
:param epoch: Integer, current epoch.
:return: A log that contains training information of current epoch
"""
self.logger.debug(f'train epoch {epoch}')
self.model.train()
total_loss = 0.0
for batch_idx, (data, target) in enumerate(self.train_dataloader):
data, target = data.to(self.device), target.to(self.device)
output = self.model(data)
loss = self.criterion(output, target)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
total_loss += loss.item()
# update progress bar
self.progress_bar.update(len(data))
self.progress_bar.set_postfix(train_L=loss.item(),
val_L=(
self.valid_loss[-1].item() if len(self.valid_loss) != 0 else None),
lr=get_lr(self.optimizer))
self.memory_profiler.update()
self.train_loss.append(total_loss / len(self.train_dataloader))
# do validation
valid_loss = self._valid_epoch(epoch) if self.do_validation else None
if valid_loss < self.best_valid_loss:
# new best weights
self.logger.debug(
f"Best validation loss so far. validation loss={valid_loss}. Save state at epoch={epoch}")
self.best_valid_loss = valid_loss
super(Trainer, self)._save_checkpoint(epoch, is_best=True)
self.valid_loss.append(valid_loss)
self.learning_rates.append(get_lr(self.optimizer))
if self.lr_scheduler is not None:
self.lr_scheduler.step()
return {'valid_loss': valid_loss}
def _valid_epoch(self, epoch):
"""
Validation after training an epoch
:param epoch: Integer, current epoch
:return: A log that contains validation information of current epoch
"""
self.logger.debug(f'validate epoch {epoch}')
self.model.eval()
total_loss = 0.0
with torch.no_grad():
for batch_idx, (data, target) in enumerate(self.valid_dataloader):
data, target = data.to(self.device), target.to(self.device)
output = self.model(data)
loss = self.criterion(output, target)
total_loss += loss
return total_loss / len(self.valid_dataloader)
def _progress(self, batch_idx):
pass
def _save_checkpoint(self, epoch):
super(Trainer, self)._save_checkpoint(epoch)
self.logger.debug(f'save checkpoint for epoch {epoch}')
# save one batch of validation image
self.model.eval()
with torch.no_grad():
data, target = next(iter(self.valid_dataloader))
data, target = data.to(self.device), target.to(self.device)
output_path = self.valid_results / f'epoch{epoch}.png'
# output_path.mkdir(parents=True, exist_ok=False)
output = self.model(data)
input_images = [transforms.ToPILImage()(img.cpu()) for img in data]
output_images = [transforms.ToPILImage()(img.cpu())
for img in output]
target_images = [transforms.ToPILImage()(img.cpu())
for img in target]
plot_height = self.valid_dataloader.batch_size * 10 * 2 // 3
fig, axes = plt.subplots(
nrows=len(input_images), ncols=3, figsize=(20, plot_height))
for i in range(len(input_images)):
interpolate_img = input_images[i].resize(
target.shape[-2:], resample=PIL.Image.BICUBIC)
axes[i][0].imshow(interpolate_img)
axes[i][1].imshow(output_images[i])
axes[i][2].imshow(target_images[i])
fig.savefig(output_path)
plt.close()
np.savetxt(self.checkpoint_dir / 'valid_loss.txt', self.valid_loss)
np.savetxt(self.checkpoint_dir / 'train_loss.txt', self.train_loss)
np.savetxt(self.checkpoint_dir /
'learning_rates.txt', self.learning_rates)
| 0.953125 | high |
sahara/cli/image_pack/cli.py | ksshanam/sahara | 0 | 6826263 | # Copyright 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from oslo_config import cfg
from oslo_log import log
import six
from sahara.cli.image_pack import api
from sahara.i18n import _
LOG = log.getLogger(__name__)
CONF = cfg.CONF
CONF.register_cli_opts([
cfg.StrOpt(
'image',
required=True,
help=_("The path to an image to modify. This image will be modified "
"in-place: be sure to target a copy if you wish to maintain a "
"clean master image.")),
cfg.StrOpt(
'root-filesystem',
dest='root_fs',
required=False,
help=_("The filesystem to mount as the root volume on the image. No"
"value is required if only one filesystem is detected.")),
cfg.BoolOpt(
'test-only',
dest='test_only',
default=False,
help=_("If this flag is set, no changes will be made to the image; "
"instead, the script will fail if discrepancies are found "
"between the image and the intended state."))])
def unregister_extra_cli_opt(name):
try:
for cli in CONF._cli_opts:
if cli['opt'].name == name:
CONF.unregister_opt(cli['opt'])
except Exception:
pass
for extra_opt in ["log-exchange", "host", "port"]:
unregister_extra_cli_opt(extra_opt)
def add_plugin_parsers(subparsers):
api.setup_plugins()
for plugin in CONF.plugins:
args_by_version = api.get_plugin_arguments(plugin)
if all(args is NotImplemented for version, args
in six.iteritems(args_by_version)):
continue
plugin_parser = subparsers.add_parser(
plugin, help=_('Image generation for the {plugin} plugin').format(
plugin=plugin))
version_parsers = plugin_parser.add_subparsers(
title=_("Plugin version"),
dest="version",
help=_("Available versions"))
for version, args in six.iteritems(args_by_version):
if args is NotImplemented:
continue
version_parser = version_parsers.add_parser(
version, help=_('{plugin} version {version}').format(
plugin=plugin, version=version))
for arg in args:
arg_token = ("--%s" % arg.name if len(arg.name) > 1 else
"-%s" % arg.name)
version_parser.add_argument(arg_token,
dest=arg.name,
help=arg.description,
default=arg.default,
required=arg.required,
choices=arg.choices)
version_parser.set_defaults(args={arg.name
for arg in args})
command_opt = cfg.SubCommandOpt('plugin',
title=_('Plugin'),
help=_('Available plugins'),
handler=add_plugin_parsers)
CONF.register_cli_opt(command_opt)
def main():
CONF(project='sahara')
CONF.reload_config_files()
log.setup(CONF, "sahara")
LOG.info("Command: {command}".format(command=' '.join(sys.argv)))
api.set_logger(LOG)
api.set_conf(CONF)
plugin = CONF.plugin.name
version = CONF.plugin.version
args = CONF.plugin.args
image_arguments = {arg: getattr(CONF.plugin, arg) for arg in args}
api.pack_image(CONF.image, plugin, version, image_arguments,
CONF.root_fs, CONF.test_only)
LOG.info("Finished packing image for {plugin} at version "
"{version}".format(plugin=plugin, version=version))
| 0.828125 | high |
DataAnalysis/day1_4/color_4.py | yunjung-lee/class_python_numpy | 0 | 11690495 | from bs4 import BeautifulSoup
import urllib.request as req
fp = open("color.html",encoding='utf-8')
soup = BeautifulSoup(fp,"html.parser")
# #print(soup)
# print(soup.select_one("#gr"))
# # <li id="gr">Gray</li>
# print(soup.select_one("#gr").string)
# # Gray
#
# #빈번한 사용으로 함수를 만들어 함수화
# #lambda 사용
# sel = lambda q:print(soup.select_one(q).string)
# #def sel(q): 와 같은 형식
#
# sel("#gr")
# # Gray
# sel("li#gr")
# # Gray
# sel("ul > li#gr")
# # Gray
# sel("#mycolor #gr")
# # Gray
# sel("#mycolor > #gr")
# # Gray
# sel("ul#mycolor > li#gr")
# # Gray
# sel("li[id='gr']")
# # Gray
# sel("li:nth-of-type(4)")
# # Gray
#
# print(soup.select("li")[3].string)
# # Gray
# print(soup.find_all("li")[3].string)
# # Gray
url = "https://ko.wikisource.org/wiki/%EC%A0%80%EC%9E%90:%EC%9C%A4%EB%8F%99%EC%A3%BC"
res = req.urlopen(url).read()
#print(res)
soup = BeautifulSoup(res,"html.parser")
# print(soup)
#mw-content-text > div > ul:nth-child(6) > li > ul > li > a =>:nth-child(6)오류가 많이 나서 지우기
#mw-content-text > div > ul > li > ul > li > a
alist = soup.select("#mw-content-text > div > ul > li > ul > li > a")
for a in alist :
print(a.string)
myurl = "https://news.sbs.co.kr/news/endPage.do?news_id=N1004883776&plink=TOPHEAD&cooper=SBSNEWSMAIN"
res9=req.urlopen(myurl).read().decode("utf-8")
#읽어온 페이지 decode
soup9 = BeautifulSoup(res9,'html.parser')
#html문서로 파서해서 만들어줌
article = soup9.select("#container > div.w_inner > div.w_article > div.w_article_cont > div.w_article_left > div.article_cont_area > div.main_text > div")
#print(article)
#article = list( string이 아니여서 string으로 만들어 줘야함)
import re
pat = re.compile("[^가-힣]+")
result = pat.sub(" ",res9)
# print(result)
nurl ="https://kin.naver.com/qna/detail.nhn?d1id=1&dirId=10405&docId=307066785&qb=67mF642w7J207YSw&enc=utf8§ion=kin&rank=3&search_sort=0&spq=1"
nres=req.urlopen(nurl).read().decode("utf-8")
soupN = BeautifulSoup(nres,'html.parser')
nlist = soupN.select("#contents_layer_0 > div.end_content._endContents > div")
nlist = str(nlist[0])
pat = re.compile("[^가-힣ㅏ-ㅣ]")
print(pat.sub("",nlist))
| 0.582031 | high |
tempserver/pump.py | jonte/tempserver | 0 | 2353663 | <filename>tempserver/pump.py
from enum import Enum
class PumpMode(Enum):
OFF = "OFF"
ON = "ON"
class Pump:
def __init__(self, gpio_pin, name, id_ = None, scheduler = None, notify_change = None):
self.id = id_
self.name = name
self.mode = PumpMode.OFF
self.notify_change = notify_change
def set_mode(self, mode):
self.mode = mode
self.publish_state()
def enable(self):
self.set_mode(PumpMode.ON)
def disable(self):
self.set_mode(PumpMode.OFF)
def publish_state(self):
if self.notify_change:
self.notify_change(("pump-mode-" + self.id, self.mode))
| 0.847656 | medium |
consecseries.py | chirag043043/python-program | 0 | 5415791 | arr1=[]
ele=1
while ele!=0:
ele=int(input("enter array element"))
arr1.append(ele)
p=arr1[0]
c=0
for index,i in enumerate(arr1):
if(p==arr1[index]):
c=c+1
p=arr1[index]
print(c)
| 0.746094 | low |
curriculum/migrations/0012_auto_20180808_2200.py | TeachFirstByte/firstbyte | 6 | 3875887 | # Generated by Django 2.0.7 on 2018-08-08 22:00
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('curriculum', '0011_auto_20180707_0436'),
]
operations = [
migrations.RenameField(
model_name='lessonfeedback',
old_name='strengths',
new_name='comments',
),
migrations.RemoveField(
model_name='lessonfeedback',
name='weaknesses',
),
]
| 0.605469 | medium |
bin/main.py | viktorzob/bidding_learning | 4 | 7139055 | import os, sys
path = os.path.dirname(os.path.realpath('__file__'))
os.chdir(path)
sys.path.append(os.path.dirname(path))
import numpy as np
import torch
import matplotlib.pyplot as plt
from PyPDF2 import PdfFileMerger
import pickle
import datetime
import time
from src.utils import UniformNoise, OUNoise, GaussianNoise, plot_run_outcome
from src.environment_bid_market import EnvironmentBidMarket
'''
High-Level Interface that calls learning algorithm and Energy-Market Environment
subject to user-specified inputs
Environment Parameters
capacities: np.array 1x(number of Agents)
costs: np.array 1x(number of Agents)
Attention these np.arrays have to correspond to the number of Agents
Demand: np.array 1x2
Chooses demand from arang between [min,max-1]
For fixed Demand, write: the preferred [Number, Number +1] (e.g. Demand = 99 -> [99,100])
Agents: scalar
Number of learning agents
Rewards: scalar
Type of Reward function. Usually 0.
Split: binary
Allow Split Bids
past_action: binary
Allow agents to learn from all agents past actions
lr_actor: float
Learning Rate Actor
lr_critic: float
Learning Rate Critic
Discrete: binary
Enables Discrete Spaces (Not yet functional)
'''
# if plots should be saved
pdfs = []
# save date/time
time_stamp = datetime.datetime.now()
meta_data_time = time_stamp.strftime('%d-%m-%y %H:%M')
# Agent Parameters
POWER_CAPACITIES = [50 / 100, 50 / 100] # 50
PRODUCTION_COSTS = [20 / 100, 20 / 100] # 20
DEMAND = [70 / 100, 70 / 100] # 70
ACTION_LIMITS = [-1, 1] # [-10/100,100/100]#[-100/100,100/100]
NUMBER_OF_AGENTS = 2
PAST_ACTION = 0
FRINGE = 0
# Neural Network Parameters
# rescaling the rewards to avoid hard weight Updates of the Criticer
REWARD_SCALING = 1 # 0.01 #
LEARNING_RATE_ACTOR = 1e-4
LEARNING_RATE_CRITIC = 1e-3
NORMALIZATION_METHOD = 'none' # options are BN = Batch Normalization, LN = Layer Normalization, none
# Noise Parameters
NOISE = 'GaussianNoise' # Options are: 'GaussianNoise',OUNoise','UniformNoise'
DECAY_RATE = 0.001 # 0.0004 strong; 0.0008 medium; 0.001 soft; # if 0: Not used, if:1: only simple Noise without decay used
REGULATION_COEFFICENT = 10 # if 1: Not used, if:0: only simple Noise used
TOTAL_TEST_RUNS = 1 # How many runs should be executed
EPISODES_PER_TEST_RUN = 10000 # How many episodes should one run contain
ROUNDS_PER_EPISODE = 1 # How many rounds are allowed per episode (right now number of rounds has no impact -due 'done' is executed if step >= round- and choosing 1 is easier to interpret; )
BATCH_SIZE = 128 # *0.5 # *2
# "Completely reproducible results are not guaranteed across PyTorch releases, individual commits, or different platforms.
# Furthermore, results may not be reproducible between CPU and GPU executions, even when using identical seeds."
# also see: https://pytorch.org/docs/stable/notes/randomness.html
seed = np.random.randint(1000)
# if reproducabilty is desired when training on GPU using CuDNN, the two commands below are needed
# torch.backends.cudnn.deterministic = True
# torch.backends.cudnn.benchmark = False
# Dictionary to save data and Parameter settings
Results = {}
Results['meta-data'] = {
'date_run':meta_data_time,
'power_capacities':POWER_CAPACITIES,
'production_costs':PRODUCTION_COSTS,
'demand':DEMAND,
'noise': NOISE,
'regulation_coef':REGULATION_COEFFICENT,
'decay_rate':DECAY_RATE,
'lr_critic':LEARNING_RATE_CRITIC,
'lr_actor':LEARNING_RATE_ACTOR,
'Normalization': NORMALIZATION_METHOD,
'reward_scaling':REWARD_SCALING,
'total_test_rounds':TOTAL_TEST_RUNS,
'episodes_per_test_run':EPISODES_PER_TEST_RUN,
'batches':BATCH_SIZE,
'rounds':ROUNDS_PER_EPISODE,
'agents':NUMBER_OF_AGENTS,
'action_limits': ACTION_LIMITS,
'past_action': PAST_ACTION,
'fringe:player': FRINGE,
'seed':seed}
for test_run in range(TOTAL_TEST_RUNS):
print('Test Run: {}'.format(test_run))
# save runtime
Results[test_run] = {'runtime':0}
t_0 = time.time()
# set seed
np.random.seed(seed+test_run)
torch.manual_seed(seed+test_run)
# set up environment
env = EnvironmentBidMarket(capacities=POWER_CAPACITIES, costs=PRODUCTION_COSTS, demand=DEMAND, agents=NUMBER_OF_AGENTS,
fringe_player=FRINGE, past_action=PAST_ACTION, lr_actor=LEARNING_RATE_ACTOR, lr_critic=LEARNING_RATE_CRITIC,
normalization=NORMALIZATION_METHOD, reward_scaling=REWARD_SCALING, action_limits=ACTION_LIMITS, rounds_per_episode=ROUNDS_PER_EPISODE)
# set up agents (ddpg)
agents = env.create_agents(env)
# set up noise
if NOISE == 'GaussianNoise':
noise = GaussianNoise(env.action_space, mu=0, sigma=0.1, regulation_coef=REGULATION_COEFFICENT, decay_rate=DECAY_RATE) # Gaussian Noise (only instead of Ornstein Uhlenbeck Noise)
elif NOISE == 'OUNoise':
noise = OUNoise(env.action_space, mu=0.0, theta=0.15, max_sigma=0.3, min_sigma=0.3, decay_period=100000) # Ornstein Uhlenbeck Noise ( only instead of Gaussian Noise)
elif NOISE == 'UniformNoise':
noise = UniformNoise(env.action_space, initial_exploration=0.99, final_exploration=0.05, decay_rate=0.999)
for episode in range(EPISODES_PER_TEST_RUN):
Results[test_run][episode] = {'rewards':[], 'actions':[], 'market_price':[] , 'sold_quantities':[],
'round':[], 'state':[], 'new_state':[]}
# reset noise and state (past_actions resets only at the bginning of a new run)
state = env.reset(episode)
noise.reset() # only important for OUNoise
for step in range(ROUNDS_PER_EPISODE):
actions = []
for n in range(len(agents)):
action_temp = agents[n].get_action(state)
action_temp = noise.get_action(action_temp, episode)
actions.append(action_temp[:])
actions = np.asarray(actions)
# get reward an new state from environment
new_state, reward, done, _ = env.step(actions)
# save data in memory
for n in range(len(agents)):
agents[n].memory.push(state, actions[n], np.array([reward[n]]), new_state, done)
# update
if len(agents[0].memory) > BATCH_SIZE:
for n in range(len(agents)):
agents[n].update(BATCH_SIZE)
# save data in dictionary
Results[test_run][episode]['rewards'].append(reward)
Results[test_run][episode]['actions'].append(actions)
Results[test_run][episode]['state'].append(state)
Results[test_run][episode]['new_state'].append(new_state)
Results[test_run][episode]['sold_quantities'].append(env.sold_quantities)
Results[test_run][episode]['market_price'].append(env.market_price)
# new_state becomes state
state = new_state
if done:
# use some rendering to get insights during running (turned off to save time).
# can be adjusted directly in the environment
# sys.stdout.write("***TestRound: {}, episode: {}, reward: {}, average _reward: {} \n".format(test_run, episode, np.round(episode_reward, decimals=2), np.mean(rewards[-10:])))
# env.render()
break
# save running tim in dictionary
t_end = time.time()
time_total = t_end - t_0
Results[test_run]['runtime'] = time_total
# Plot (only works optimal for "1" TOTAL_TEST_RUNS. If more Test Runs are executed, save plots as pdf, see below)
# takes as input the data from the saved dictionary, the number of agnets, the maximum action limit, a threshold for an Nash Equilibrium (if: 'none', no threshold gets displayed)
# the total number of episodes per run, the actual run, which curves shoul be plotted (options: 'actions', rewards' or 'both'),
# a title for the plot, rescale parameters if needed (usage: rescale[param for actions, param for rewards, param for bid limit])
# and a window "moving_window" for which a moving median gets computed (recommended for presntation reasons)
plot_run_outcome(Results, NUMBER_OF_AGENTS, ACTION_LIMITS[1], 52,
EPISODES_PER_TEST_RUN, test_run, curves='actions',
title='Norm:{} Agents:{} PA: {} Fringe: {}, Run:{}'.format(NORMALIZATION_METHOD, NUMBER_OF_AGENTS, PAST_ACTION, FRINGE, test_run),
rescale=[100, 1000, 100], moving_window=9)
# save plots (uncommment ALL below)
# plt.savefig('temp{}.pdf'.format(test_run))
# plt.close()
# pdfs.append('temp{}.pdf'.format(test_run))
'''
### Merge PDFs and pickle
with open('{}_results_lra{}_lrc{}_Agents{}_PA{}_00.pkl'.format(NORMALIZATION_METHOD,NUMBER_OF_AGENTS,LEARNING_RATE_ACTOR,LEARNING_RATE_CRITIC, PAST_ACTION), 'wb') as pickle_file:
pickle.dump(Results, pickle_file)
merger = PdfFileMerger()
for pdf in pdfs:
merger.append(pdf)
merger.write("{}_plots_lra{}_lrc{}_Agents{}_PA{}_00.pdf".format(NORMALIZATION_METHOD,NUMBER_OF_AGENTS,LEARNING_RATE_ACTOR,LEARNING_RATE_CRITIC, PAST_ACTION))
merger.close()
for file in sample(pdfs):
os.remove(pdfs[file])
'''
| 0.867188 | high |
apps/shared/setting.py | hashblock/sawtooth-uom | 5 | 8600111 | <reponame>hashblock/sawtooth-uom<gh_stars>1-10
# ------------------------------------------------------------------------------
# Copyright 2018 <NAME> and <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
"""setting - hashblock-setting business logic
This module is referenced when posting hashblock-setting transactions
"""
from modules.config import valid_signer
from protobuf.setting_pb2 import Settings, SettingPayload
from shared.transactions import (create_transaction, compose_builder)
from modules.address import Address
from modules.exceptions import DataException
def __validate_settings(authorizations, threshold):
"""Validates authorization keys and threshold"""
entries = []
threshold = int(threshold)
for entry in authorizations:
key = valid_signer(entry)
entries.append(key)
if not threshold:
raise DataException('approval thresholds must be greater than 1')
elif threshold < 1:
raise DataException('approval thresholds must be positive number')
elif threshold > len(entries):
raise DataException(
'approval thresholds must not be greater than number of '
'authorizing keys')
return entries
def __create_setting(ingest):
"""Creates the setting for a particular family"""
signer, addresser, auth_keys, threshold = ingest
settings = Settings(
auth_list=','.join(auth_keys),
threshold=threshold)
return (
signer,
addresser,
SettingPayload(
action=SettingPayload.CREATE,
dimension=addresser.family,
data=settings.SerializeToString()))
def __create_inputs_outputs(ingest):
"""Creates the input and output addresses for setting transaction"""
signer, addresser, payload = ingest
inputs = [
addresser.setting_address,
addresser.candidate_address]
outputs = [
addresser.setting_address,
addresser.candidate_address]
return (
signer,
Address.setting_addresser(),
# addresser,
{"inputs": inputs, "outputs": outputs},
payload)
def __create_settings(signer, assetauths, assetthresh, unitauths, unitthresh):
"""Creates and returns a batch of setting transactions"""
valid_signer(signer)
_asset_addrs = Address.asset_addresser()
_unit_addrs = Address.unit_addresser()
asset_auth_keys = __validate_settings(assetauths, assetthresh)
unit_auth_keys = __validate_settings(unitauths, unitthresh)
setting_txn_build = compose_builder(
create_transaction,
__create_inputs_outputs,
__create_setting)
asset_setting_txn = setting_txn_build(
(signer, _asset_addrs, asset_auth_keys, assetthresh))[1]
unit_setting_txn = setting_txn_build(
(signer, _unit_addrs, unit_auth_keys, unitthresh))[1]
return [asset_setting_txn, unit_setting_txn]
def create_settings_genesis(
signer, assetauths, assetthresh, unitauths, unitthresh):
"""Creates the setting transactions returns for later submission"""
return __create_settings(
signer, assetauths, assetthresh, unitauths, unitthresh)
| 0.964844 | high |
py3iperf3/error.py | KimJeongChul/py3iperf3 | 3 | 10262255 | <gh_stars>1-10
"""
Runtime exception definition
"""
class IPerf3Exception(Exception):
"""Class for runtime program errors"""
pass
| 0.523438 | low |
src/application/febraban_holiday_application_service.py | gabrielleandro0801/holidays-importer | 0 | 2526527 | <gh_stars>0
import boto3
from typing import List, Any
from src.domain.holiday import Holiday
from src.domain.holiday_service import HolidayService, create_holiday_service
import src.infrastructure.persistence.holiday_dynamo_repository as repository
class FebrabanHolidayApplicationService:
def __init__(self, holiday_service: HolidayService, holiday_repository: repository.HolidayDynamoRepository):
self.holiday_service = holiday_service
self.holiday_repository = holiday_repository
def import_holidays(self, year: int) -> None:
holidays: List[Holiday] = self.holiday_service.list_febraban_holidays(year=year)
self.holiday_repository.save(holidays=holidays)
def create_application_service() -> FebrabanHolidayApplicationService:
session = boto3.session.Session()
holiday_service_factory: Any = create_holiday_service()
repository_factory: Any = repository.create_holiday_dynamo_repository(session=session)
return FebrabanHolidayApplicationService(
holiday_service=holiday_service_factory(),
holiday_repository=repository_factory()
)
| 0.863281 | high |
tests/fixtures/cython-import-package/setup.py | jrottenberg/pipenv | 6,263 | 4188655 | import ast
import os
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
# ORDER MATTERS
# Import this after setuptools or it will fail
from Cython.Build import cythonize # noqa: I100
import Cython.Distutils
ROOT = os.path.dirname(__file__)
PACKAGE_NAME = 'cython_import_package'
VERSION = None
with open(os.path.join(ROOT, 'src', PACKAGE_NAME.replace("-", "_"), '__init__.py')) as f:
for line in f:
if line.startswith('__version__ = '):
VERSION = ast.literal_eval(line[len('__version__ = '):].strip())
break
if VERSION is None:
raise EnvironmentError('failed to read version')
# Put everything in setup.cfg, except those that don't actually work?
setup(
# These really don't work.
package_dir={'': 'src'},
packages=find_packages('src'),
# I don't know how to specify an empty key in setup.cfg.
package_data={
'': ['LICENSE*', 'README*'],
},
setup_requires=["setuptools_scm", "cython"],
# I need this to be dynamic.
version=VERSION,
)
| 0.699219 | high |
controllers/token_controller/token_controller.py | sourcebots/competition-simulator | 0 | 846623 | import sys
import struct
from typing import Dict
from pathlib import Path
# Webots specific library
from controller import Emitter, Supervisor
# Root directory of the SR webots simulator (equivalent to the root of the git repo)
REPO_ROOT = Path(__file__).resolve().parent.parent.parent
sys.path.insert(1, str(REPO_ROOT / 'modules'))
from sbot.utils import get_robot_device # isort:skip
from shared_utils import TOKENS, TargetInfo, TargetType # isort:skip
BROADCASTS_PER_SECOND = 10
class TokenController:
_emitters: Dict[TargetInfo, Emitter]
def __init__(self) -> None:
self._robot = Supervisor()
self._emitters = {
code: get_robot_device(
self._robot,
f'{code.owner.name}_{code.id} Emitter',
Emitter,
)
for code in TOKENS
if code.type == TargetType.CONTAINER
}
self._emitters.update({
code: get_robot_device(
self._robot,
f'BEACON_{code.id} Emitter',
Emitter,
)
for code in TOKENS
if code.type == TargetType.BEACON
})
def transmit_pulses(self) -> None:
for token_code, emitter in self._emitters.items():
emitter.send(
struct.pack(
"!bBb",
int(token_code.type),
token_code.id,
int(token_code.owner),
),
)
def main(self) -> None:
broadcast_spacing = int(1000 / BROADCASTS_PER_SECOND)
while True:
self.transmit_pulses()
self._robot.step(broadcast_spacing)
if __name__ == "__main__":
token_controller = TokenController()
token_controller.main()
| 0.777344 | high |
runcom/.bin/fmount.py | bossjones/linux-dotfiles | 1 | 7311919 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# If you often need to mount sshfs filesystems you may be interested in using an sshfs helper
import os
import argparse
import configparser
import subprocess
CONFIG = os.path.expanduser("~/.config/fmount.conf")
DEFAULT_MOUNTPATH = os.path.expanduser("~/mnt")
# we just strip spaces in the mntopts string
def reformat_mntopts(mntopts):
mntopts = mntopts.split(",")
options = []
for opt in mntopts:
options.append("=".join(tk.strip() for tk in opt.split("=")))
return ",".join(set(options))
def mount(name, mountpath, config):
mountpoint = os.path.join(mountpath, name)
host = config.get(name, "host", fallback=name)
path = config.get(name, "path", fallback="")
user = config.get(name, "user", fallback=None)
port = config.get(name, "port", fallback=None)
mntopts = config.get(name, "mntopts", fallback=[])
mntopts = reformat_mntopts(mntopts)
uhd = host + ":" + path
if user:
uhd = user + "@" + uhd
cmd = ["sshfs", uhd, mountpoint]
if mntopts:
cmd += ["-o", mntopts]
if port:
cmd += ["-p", port]
print("Mounting at '{}'...".format(mountpoint))
# the mountpoint might exist after an error or automatic unmount
try:
os.makedirs(mountpoint)
except FileExistsError:
pass
subprocess.run(cmd, check=True)
def umount(mntpoint):
if os.path.ismount(path):
cmd = ["fusermount", "-u", mntpoint]
subprocess.run(cmd, check=True)
clean(mntpoint)
def clean(path):
if not os.path.ismount(path) and len(os.listdir(path)) == 0:
print("Removing empty mountpoint '{}'...".format(path))
os.rmdir(path)
def cleanAll(mountpath):
for file in os.listdir(mountpath):
path = os.path.join(mountpath, file)
if os.path.isdir(path):
clean(path)
def writeDefaultConfig():
conf_str = """
# globals live in the DEFAULT section
[DEFAULT]
mountpath = {mountpath}
# mntopts = opt1=val1, opt2=val2, ... # optional
# [remote_name]
# host = ... # optional, equal to remote_name by default
# path = ... # optional, sshfs defaults to remote $HOME
# user = ... # optional, .ssh/config is honoured
# port = ... # optional, .ssh/config is honoured
# mntopts = opt1=val1, opt2=val2, ... # optional
""".format(mountpath=DEFAULT_MOUNTPATH)
# SOURCE: https://www.zengyu.ink/python/2017/03/10/files-and-io/
# Printing to a File
# NOTE: redirects the output of the print() function to a file f.
# Make sure that the file is opened in text mode.
with open(CONFIG, 'wt') as cfile:
print(conf_str, file=cfile)
if __name__ == "__main__":
config = configparser.ConfigParser()
if not os.path.exists(CONFIG):
writeDefaultConfig()
config.read(CONFIG)
parser = argparse.ArgumentParser(
description="wrapper for sshfs with a config file")
parser.add_argument(
"-u",
action="store_true",
dest="umount",
help="unmount given host or path")
parser.add_argument(
"host",
nargs="+",
help="remote name specified in the config file")
args = parser.parse_args()
mountpath = os.path.expanduser(
config.get(
"DEFAULT",
"mountpath",
fallback=DEFAULT_MOUNTPATH))
if args.umount:
for host in args.host:
if os.path.isdir(host):
# not a host, but a path
path = host
else:
path = os.path.join(mountpath, host)
if not os.path.isdir(path):
parser.error("Path '{}' does not exist.".format(path))
if os.path.ismount(path):
umount(path)
else:
parser.error("Path '{}' is not a mount point.".format(path))
else:
for host in args.host:
if config.has_section(host):
if os.path.ismount(os.path.join(mountpath, host)):
parser.error("Host '{}' is already mounted.".format(host))
mount(host, mountpath, config)
else:
parser.error(
"Section '{}' does not exist in the config file.".format(host))
cleanAll(mountpath)
| 0.863281 | high |
tests/unit/plugins/openstack/scenarios/ceilometer/test_utils.py | amit0701/rally | 0 | 10575087 | <filename>tests/unit/plugins/openstack/scenarios/ceilometer/test_utils.py
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import datetime as dt
from dateutil import parser
import mock
from rally import exceptions
from rally.plugins.openstack.scenarios.ceilometer import utils
from tests.unit import test
CEILOMETER_UTILS = "rally.plugins.openstack.scenarios.ceilometer.utils"
class CeilometerScenarioTestCase(test.ScenarioTestCase):
def setUp(self):
super(CeilometerScenarioTestCase, self).setUp()
self.scenario = utils.CeilometerScenario(self.context)
def test__make_samples_no_batch_size(self):
self.scenario.generate_random_name = mock.Mock(
return_value="fake_resource")
test_timestamp = dt.datetime(2015, 10, 20, 14, 18, 40)
result = list(self.scenario._make_samples(count=2, interval=60,
timestamp=test_timestamp))
self.assertEqual(1, len(result))
expected = {"counter_name": "cpu_util",
"counter_type": "gauge",
"counter_unit": "%",
"counter_volume": 1,
"resource_id": "fake_resource",
"timestamp": test_timestamp.isoformat()}
self.assertEqual(expected, result[0][0])
samples_int = (parser.parse(result[0][0]["timestamp"]) -
parser.parse(result[0][1]["timestamp"])).seconds
self.assertEqual(60, samples_int)
def test__make_samples_batch_size(self):
self.scenario.generate_random_name = mock.Mock(
return_value="fake_resource")
test_timestamp = dt.datetime(2015, 10, 20, 14, 18, 40)
result = list(self.scenario._make_samples(count=4, interval=60,
batch_size=2,
timestamp=test_timestamp))
self.assertEqual(2, len(result))
expected = {"counter_name": "cpu_util",
"counter_type": "gauge",
"counter_unit": "%",
"counter_volume": 1,
"resource_id": "fake_resource",
"timestamp": test_timestamp.isoformat()}
self.assertEqual(expected, result[0][0])
samples_int = (parser.parse(result[0][-1]["timestamp"]) -
parser.parse(result[1][0]["timestamp"])).seconds
# NOTE(idegtiarov): here we check that interval between last sample in
# first batch and first sample in second batch is equal 60 sec.
self.assertEqual(60, samples_int)
def test__make_timestamp_query(self):
start_time = "2015-09-09T00:00:00"
end_time = "2015-09-10T00:00:00"
expected_start = [
{"field": "timestamp", "value": "2015-09-09T00:00:00",
"op": ">="}]
expected_end = [
{"field": "timestamp", "value": "2015-09-10T00:00:00",
"op": "<="}
]
actual = self.scenario._make_timestamp_query(start_time, end_time)
self.assertEqual(expected_start + expected_end, actual)
self.assertRaises(exceptions.InvalidArgumentsException,
self.scenario._make_timestamp_query,
end_time, start_time)
self.assertEqual(
expected_start,
self.scenario._make_timestamp_query(start_time=start_time))
self.assertEqual(
expected_end,
self.scenario._make_timestamp_query(end_time=end_time))
def test__list_alarms_by_id(self):
self.assertEqual(self.clients("ceilometer").alarms.get.return_value,
self.scenario._list_alarms("alarm-id"))
self.clients("ceilometer").alarms.get.assert_called_once_with(
"alarm-id")
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.list_alarms")
def test__list_alarms(self):
self.assertEqual(self.clients("ceilometer").alarms.list.return_value,
self.scenario._list_alarms())
self.clients("ceilometer").alarms.list.assert_called_once_with()
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.list_alarms")
def test__create_alarm(self):
alarm_dict = {"alarm_id": "fake-alarm-id"}
orig_alarm_dict = copy.copy(alarm_dict)
self.scenario.generate_random_name = mock.Mock()
self.assertEqual(self.scenario._create_alarm("fake-meter-name", 100,
alarm_dict),
self.clients("ceilometer").alarms.create.return_value)
self.clients("ceilometer").alarms.create.assert_called_once_with(
meter_name="fake-meter-name",
threshold=100,
description="Test Alarm",
alarm_id="fake-alarm-id",
name=self.scenario.generate_random_name.return_value)
# ensure that _create_alarm() doesn't modify the alarm dict as
# a side-effect
self.assertDictEqual(alarm_dict, orig_alarm_dict)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.create_alarm")
def test__delete_alarms(self):
self.scenario._delete_alarm("alarm-id")
self.clients("ceilometer").alarms.delete.assert_called_once_with(
"alarm-id")
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.delete_alarm")
def test__update_alarm(self):
alarm_diff = {"description": "Changed Test Description"}
orig_alarm_diff = copy.copy(alarm_diff)
self.scenario._update_alarm("alarm-id", alarm_diff)
self.clients("ceilometer").alarms.update.assert_called_once_with(
"alarm-id", **alarm_diff)
# ensure that _create_alarm() doesn't modify the alarm dict as
# a side-effect
self.assertDictEqual(alarm_diff, orig_alarm_diff)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.update_alarm")
def test__get_alarm_history(self):
self.assertEqual(
self.scenario._get_alarm_history("alarm-id"),
self.clients("ceilometer").alarms.get_history.return_value)
self.clients("ceilometer").alarms.get_history.assert_called_once_with(
"alarm-id")
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.get_alarm_history")
def test__get_alarm_state(self):
self.assertEqual(
self.scenario._get_alarm_state("alarm-id"),
self.clients("ceilometer").alarms.get_state.return_value)
self.clients("ceilometer").alarms.get_state.assert_called_once_with(
"alarm-id")
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.get_alarm_state")
def test__set_alarm_state(self):
alarm = mock.Mock()
self.clients("ceilometer").alarms.create.return_value = alarm
return_alarm = self.scenario._set_alarm_state(alarm, "ok", 100)
self.mock_wait_for.mock.assert_called_once_with(
alarm,
ready_statuses=["ok"],
update_resource=self.mock_get_from_manager.mock.return_value,
timeout=100, check_interval=1)
self.mock_get_from_manager.mock.assert_called_once_with()
self.assertEqual(self.mock_wait_for.mock.return_value, return_alarm)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.set_alarm_state")
def test__list_events(self):
self.assertEqual(
self.scenario._list_events(),
self.admin_clients("ceilometer").events.list.return_value
)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.list_events")
def test__get_events(self):
self.assertEqual(
self.scenario._get_event(event_id="fake_id"),
self.admin_clients("ceilometer").events.get.return_value
)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.get_event")
def test__list_event_types(self):
self.assertEqual(
self.scenario._list_event_types(),
self.admin_clients("ceilometer").event_types.list.return_value
)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.list_event_types")
def test__list_event_traits(self):
self.assertEqual(
self.scenario._list_event_traits(
event_type="fake_event_type", trait_name="fake_trait_name"),
self.admin_clients("ceilometer").traits.list.return_value
)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.list_event_traits")
def test__list_event_trait_descriptions(self):
self.assertEqual(
self.scenario._list_event_trait_descriptions(
event_type="fake_event_type"
),
self.admin_clients("ceilometer").trait_descriptions.list.
return_value
)
self._test_atomic_action_timer(
self.scenario.atomic_actions(),
"ceilometer.list_event_trait_descriptions")
def test__list_meters(self):
self.assertEqual(self.scenario._list_meters(),
self.clients("ceilometer").meters.list.return_value)
self.clients("ceilometer").meters.list.assert_called_once_with(
q=None, limit=None)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.list_meters")
def test__list_resources(self):
self.assertEqual(
self.scenario._list_resources(),
self.clients("ceilometer").resources.list.return_value)
self.clients("ceilometer").resources.list.assert_called_once_with(
q=None, limit=None)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.list_resources")
def test__list_samples(self):
self.assertEqual(
self.scenario._list_samples(),
self.clients("ceilometer").samples.list.return_value)
self.clients("ceilometer").samples.list.assert_called_once_with(
q=None, limit=None)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.list_samples")
def test__list_samples_with_query(self):
self.assertEqual(
self.scenario._list_samples(query=[{"field": "user_id",
"volume": "fake_id"}],
limit=10),
self.clients("ceilometer").samples.list.return_value)
self.clients("ceilometer").samples.list.assert_called_once_with(
q=[{"field": "user_id", "volume": "fake_id"}], limit=10)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.list_samples:limit&user_id")
def test__get_resource(self):
self.assertEqual(self.scenario._get_resource("fake-resource-id"),
self.clients("ceilometer").resources.get.return_value)
self.clients("ceilometer").resources.get.assert_called_once_with(
"fake-resource-id")
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.get_resource")
def test__get_stats(self):
self.assertEqual(
self.scenario._get_stats("fake-meter"),
self.clients("ceilometer").statistics.list.return_value)
self.clients("ceilometer").statistics.list.assert_called_once_with(
"fake-meter", q=None, period=None, groupby=None, aggregates=None)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.get_stats")
def test__create_meter(self):
self.scenario.generate_random_name = mock.Mock()
self.assertEqual(
self.scenario._create_meter(fakearg="fakearg"),
self.clients("ceilometer").samples.create.return_value[0])
self.clients("ceilometer").samples.create.assert_called_once_with(
counter_name=self.scenario.generate_random_name.return_value,
fakearg="fakearg")
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.create_meter")
def test__query_alarms(self):
self.assertEqual(
self.scenario._query_alarms("fake-filter", "fake-orderby", 10),
self.clients("ceilometer").query_alarms.query.return_value)
self.clients("ceilometer").query_alarms.query.assert_called_once_with(
"fake-filter", "fake-orderby", 10)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.query_alarms")
def test__query_alarm_history(self):
self.assertEqual(
self.scenario._query_alarm_history(
"fake-filter", "fake-orderby", 10),
self.clients("ceilometer").query_alarm_history.query.return_value)
self.clients(
"ceilometer").query_alarm_history.query.assert_called_once_with(
"fake-filter", "fake-orderby", 10)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.query_alarm_history")
def test__query_samples(self):
self.assertEqual(
self.scenario._query_samples("fake-filter", "fake-orderby", 10),
self.clients("ceilometer").query_samples.query.return_value)
self.clients("ceilometer").query_samples.query.assert_called_once_with(
"fake-filter", "fake-orderby", 10)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.query_samples")
def test__create_sample_no_resource_id(self):
self.scenario.generate_random_name = mock.Mock()
created_sample = self.scenario._create_sample("test-counter-name",
"test-counter-type",
"test-counter-unit",
"test-counter-volume")
self.assertEqual(
created_sample,
self.clients("ceilometer").samples.create.return_value)
self.clients("ceilometer").samples.create.assert_called_once_with(
counter_name="test-counter-name",
counter_type="test-counter-type",
counter_unit="test-counter-unit",
counter_volume="test-counter-volume",
resource_id=self.scenario.generate_random_name.return_value)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.create_sample")
def test__create_sample(self):
created_sample = self.scenario._create_sample("test-counter-name",
"test-counter-type",
"test-counter-unit",
"test-counter-volume",
"test-resource-id")
self.assertEqual(
created_sample,
self.clients("ceilometer").samples.create.return_value)
self.clients("ceilometer").samples.create.assert_called_once_with(
counter_name="test-counter-name",
counter_type="test-counter-type",
counter_unit="test-counter-unit",
counter_volume="test-counter-volume",
resource_id="test-resource-id")
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"ceilometer.create_sample")
def test__make_general_query(self):
self.scenario.context = {
"user": {"tenant_id": "fake", "id": "fake_id"},
"tenant": {"id": "fake_id", "resources": ["fake_resource"]}}
metadata = {"fake_field": "boo"}
expected = [
{"field": "user_id", "value": "fake_id", "op": "eq"},
{"field": "project_id", "value": "fake_id", "op": "eq"},
{"field": "resource_id", "value": "fake_resource", "op": "eq"},
{"field": "metadata.fake_field", "value": "boo", "op": "eq"},
]
actual = self.scenario._make_general_query(True, True, True, metadata)
self.assertEqual(expected, actual)
def test__make_query_item(self):
expected = {"field": "foo", "op": "eq", "value": "bar"}
self.assertEqual(expected,
self.scenario._make_query_item("foo", value="bar"))
def test__make_profiler_key(self):
query = [
{"field": "test_field1", "op": "eq", "value": "bar"},
{"field": "test_field2", "op": "==", "value": None}
]
limit = 100
method = "fake_method"
actual = self.scenario._make_profiler_key(method, query, limit)
self.assertEqual("fake_method:limit&test_field1&test_field2", actual)
actual = self.scenario._make_profiler_key(method, query, None)
self.assertEqual("fake_method:test_field1&test_field2", actual)
self.assertEqual(method,
self.scenario._make_profiler_key(method, None, None))
| 0.96875 | high |
monitor/plugins/spark_mesos/plugin.py | GabrielSVinha/bigsea-monitor | 1 | 10435119 | # Copyright (c) 2017 UFCG-LSD.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from datetime import datetime
import json
import paramiko
import pytz
import requests
import tzlocal
from monitor.utils.monasca.connector import MonascaConnector
from monitor.plugins.base import Plugin
from monitor import api as api
LOG_FILE = "progress.log"
TIME_PROGRESS_FILE = "time_progress.log"
MONITORING_INTERVAL = 2
class SparkProgressUPV(Plugin):
def __init__(self, app_id, info_plugin, retries=60):
Plugin.__init__(self, app_id, info_plugin,
collect_period=5, retries=retries)
self.monasca = MonascaConnector()
self.submission_url = info_plugin['spark_submisson_url']
self.expected_time = info_plugin['expected_time']
self.remaining_time = int(self.expected_time)
self.job_expected_time = int(self.expected_time)
self.number_of_jobs = int(info_plugin['number_of_jobs'])
self.current_job_id = 0
self.dimensions = {'application_id': self.app_id,
'service': 'spark-sahara'}
self.conn = paramiko.SSHClient()
self.conn.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.conn.connect(hostname=api.mesos_cluster_addr,
username=api.mesos_username,
password=api.mesos_password)
self.spark_id = self._discover_id_from_spark()
def _publish_measurement(self, jobs):
application_progress_error = {}
# Init
jobs.reverse()
if not len(jobs) == 0:
current_job = jobs[self.current_job_id]
if current_job['status'] == 'FAILED':
self.current_job_id = len(jobs) - 1
elif current_job['status'] == 'SUCCEEDED':
elapsed_time = float(self._get_elapsed_time(
current_job['submissionTime']))
self.remaining_time = self.remaining_time - elapsed_time
self.current_job_id = len(jobs) - 1
# Job Time
self.job_expected_time = (self.remaining_time
/ (float(self.number_of_jobs)
- float(self.current_job_id)))
elif current_job['status'] == 'RUNNING':
# Job Progress
job_progress = (current_job['numCompletedTasks']
/ float(current_job['numTasks']))
# Elapsed Time
elapsed_time = float(self._get_elapsed_time(
current_job['submissionTime']))
# Reference Value
ref_value = (elapsed_time / self.job_expected_time)
# Error
error = job_progress - ref_value
application_progress_error['name'] = ('application-progress'
'.error')
application_progress_error['value'] = error
application_progress_error['timestamp'] = time.time() * 1000
application_progress_error['dimensions'] = self.dimensions
print application_progress_error['value']
self.monasca.send_metrics([application_progress_error])
time.sleep(MONITORING_INTERVAL)
def _get_elapsed_time(self, gmt_timestamp):
try:
local_tz = tzlocal.get_localzone()
except Exception as e:
local_tz = "America/Recife"
local_tz = pytz.timezone(local_tz)
date_time = datetime.strptime(gmt_timestamp, '%Y-%m-%dT%H:%M:%S.%fGMT')
date_time = date_time.replace(tzinfo=pytz.utc).astimezone(local_tz)
date_time = date_time.replace(tzinfo=None)
datetime_now = datetime.now()
elapsed_time = datetime_now - date_time
return elapsed_time.seconds
def _discover_id_from_spark(self):
for i in range(30):
i, o, e = self.conn.exec_command('curl %s/api/v1/applications' % self.submission_url)
applications_running = json.loads(o.read())
for app in applications_running:
if app['name'] == self.app_id:
return app['id']
time.sleep(1)
return None
def _get_progress(self, spark_id):
i, o, e = self.conn.exec_command('curl %s/api/v1/applications/%s/jobs'
% (self.submission_url,
spark_id))
return json.loads(o.read())
def monitoring_application(self):
try:
job_request = self._get_progress(self.spark_id)
self._publish_measurement(job_request)
except Exception as ex:
print ("Error: No application found for %s. %s remaining attempts"
% (self.app_id, self.attempts))
print ex.message
raise
| 0.902344 | high |
ampa/cole/migrations/0029_wordtemplate.py | jordiprats/django-ampa | 0 | 1098351 | <filename>ampa/cole/migrations/0029_wordtemplate.py
# Generated by Django 3.1.5 on 2021-05-09 17:33
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('cole', '0028_auto_20210307_1720'),
]
operations = [
migrations.CreateModel(
name='WordTemplate',
fields=[
('fileattachment_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='cole.fileattachment')),
],
bases=('cole.fileattachment',),
),
]
| 0.523438 | medium |
twitoff/hello.py | JamesBarciz/twitoff-ds16-jjb | 0 | 2760495 | #!/usr/bin/env Python
# Package imports
from flask import Flask
app = Flask(__name__)
@app.route('/')
def hello_world():
return 'Hello, World!'
| 0.527344 | high |
main.py | sdrrv/Fate-Wielding-Bot | 3 | 5822639 | import asyncio
import discord
from discord.ext import commands
from discord.utils import find
import os
from controllers.controller import controller
from keep_alive import keep_alive
import json
intents = discord.Intents.default()
bot = commands.Bot(command_prefix="!fate ", intents=intents)
cont = controller(bot)
admins = cont.getAdmins()
@bot.event # On Ready
async def on_ready():
await bot.change_presence(activity=discord.Game("!fate help"))
print(bot.user)
print([i.name for i in bot.guilds])
@bot.event # On Error
async def on_command_error(ctx, error):
print("error")
cont.debug(ctx)
if isinstance(error, commands.CommandNotFound):
await ctx.channel.send("Command not found.Try `!fate help`")
return
elif isinstance(error, commands.MissingPermissions):
await ctx.channel.send(f"Sorry <@{ctx.message.author.id}>, you do not have permissions to do that!")
return
elif isinstance(error, commands.MemberNotFound):
await ctx.channel.send(f"Member '`{error.argument}`' not found")
return
elif isinstance(error, commands.MissingRequiredArgument):
await ctx.channel.send("You're missing an argument there bud...\nTry `!fate help <command>`")
return
#await ctx.channel.send("**Something went wrong**... We are sorry. If you think this is a `bug` pls **report it**.")
raise error
@bot.event # On new Server Enter
async def on_guild_join(guild):
print(f"{guild.name}, Online")
await cont.debugerLogGreen(f"**{guild.name}**, `Online`")
with open("./models/leaderBoard.json", "r") as f:
leaderBoard = json.load(f)
leaderBoard[str(guild.id)] = {
"games": {},
"randomizers": {
"ban": []
}
}
with open("./models/leaderBoard.json", "w") as f:
json.dump(leaderBoard, f, indent=4)
general = find(lambda x: (x.name == 'general' or x.name ==
"geral"), guild.text_channels)
if general and general.permissions_for(guild.me).send_messages:
await general.send(f"Hello there, `{guild.name}`!\nThank you for adding our bot, we hope you have as mutch fun using it, as we did coding it.\n"
+ cont.help())
@bot.event
async def on_guild_remove(guild):
print(f"{guild.name}, Offline")
await cont.debugerLogRed(f"**{guild.name}**, `Offline`")
with open("./models/leaderBoard.json", "r") as f:
leaderBoard = json.load(f)
leaderBoard.pop(str(guild.id))
with open("./models/leaderBoard.json", "w") as f:
json.dump(leaderBoard, f, indent=4)
#!--------------------------------------------------------------------------------------------------------------------------------------
@bot.command(name="about",
brief="A little about the bot"
)
async def about(ctx):
cont.debug(ctx)
await ctx.channel.send(cont.help())
#!-----------------------------------------------------------------------------------------------------------------------------------------
@bot.command(name="pull", hidden=True)
async def pull(ctx):
if(ctx.author.id not in admins):
return
os.system("git pull https://github.com/sdrrv/Fate-Wielding-Bot.git")
#!-----------------------------------------------------------------------------------------------------------------------------------------
@bot.command(hidden=True)
async def load(ctx, name):
if(ctx.author.id not in admins):
return
bot.load_extension(f"cogs.{name}")
print(f"{name} Loaded!")
@bot.command(hidden=True)
async def unload(ctx, name):
if(ctx.author.id not in admins):
return
bot.unload_extension(f"cogs.{name}")
print(f"{name} UnLoaded!")
@bot.command(hidden=True)
async def reload(ctx, name):
if(ctx.author.id not in admins):
return
bot.unload_extension(f"cogs.{name}")
bot.load_extension(f"cogs.{name}")
print(f"{name} ReLoaded!")
for file in os.listdir("./cogs"): # Will load all COGs
if file.endswith(".py"):
print("Load "+file)
bot.load_extension(f"cogs.{ file[:-3] }")
#!-----------------------------------------------------------------------------------------------------------------------------------------
keep_alive()
bot.run(os.getenv("TOKEN")) # Secret Stuff
| 0.777344 | high |
mailpile/plugins/plugins.py | Sebrowsky/Mailpile | 0 | 9085823 | import os
from gettext import gettext as _
import mailpile.commands
from mailpile.plugins import PluginManager
_plugins = PluginManager(builtin=__file__)
class Plugins(mailpile.commands.Command):
"""List the currently available plugins."""
SYNOPSIS = (None, 'plugins', 'plugins', '[<plugins>]')
ORDER = ('Config', 9)
def command(self):
pm = self.session.config.plugins
wanted = self.args
info = dict((d, {
'loaded': d in pm.LOADED,
'builtin': d not in pm.DISCOVERED
}) for d in pm.available() if (not wanted or d in wanted))
for plugin in info:
if plugin in pm.DISCOVERED:
info[plugin]['manifest'] = pm.DISCOVERED[plugin][1]
return self._success(_('Listed available plugins'), info)
class LoadPlugin(mailpile.commands.Command):
"""Load and enable a given plugin."""
SYNOPSIS = (None, 'plugins/load', 'plugins/load', '<plugin>')
SPLIT_ARGS = False
ORDER = ('Config', 9)
def command(self):
config = self.session.config
plugins = config.plugins
for plugin in self.args:
if plugin in plugins.LOADED:
return self._error(_('Already loaded: %s') % plugin,
info={'loaded': plugin})
for plugin in self.args:
try:
# FIXME: This fails to update the ConfigManger
plugins.load(plugin, process_manifest=True, config=config)
config.sys.plugins.append(plugin)
except Exception, e:
self._ignore_exception()
return self._error(_('Failed to load plugin: %s') % plugin,
info={'failed': plugin})
self._serialize('Save config', lambda: config.save())
return self._success(_('Loaded plugins: %s') % ', '.join(self.args),
{'loaded': self.args})
class DisablePlugin(mailpile.commands.Command):
"""Disable a plugin."""
SYNOPSIS = (None, 'plugins/disable', 'plugins/disable', '<plugin>')
ORDER = ('Config', 9)
def command(self):
config = self.session.config
plugins = config.plugins
for plugin in self.args:
if plugin in plugins.REQUIRED:
return self._error(_('Required plugins can not be disabled: %s'
) % plugin)
if plugin not in config.sys.plugins:
return self._error(_('Plugin not loaded: %s') % plugin)
for plugin in self.args:
while plugin in config.sys.plugins:
config.sys.plugins.remove(plugin)
self._serialize('Save config', lambda: config.save())
return self._success(_('Disabled plugins: %s (restart required)'
) % ', '.join(self.args),
{'disabled': self.args})
_plugins.register_commands(Plugins, LoadPlugin, DisablePlugin)
| 0.757813 | high |
pyshader/opcodes.py | pygfx/pyshader | 48 | 7545887 | """ The opcodes of our bytecode.
Bytecode describing a stack machine is a pretty nice representation to
generate SpirV code, because the code gets visited in a flow, making
it relatively easy to do type inference.
By defining our own bytecode, we can implement a single generator that
consumes it, and use the bytecode as a target for different source
languages. Also, we can target the bytecode towards SpirV, which helps
keeping the generator relatively simple.
Our bytecode consists of a list of tuples, in which the first element
is a (str) opcode, and the remaining elements its arguments. These
opcodes are to be executed in a stack machine.
The term bytecode is a bit odd, because we never really store it as
bytes. But the meaning of the term "bytecode" most closely represents
this intermediate representation of code.
"""
import json
def bc2str(opcodes):
"""Serialize opcodes to str, one opcode + args per line (hint: it's json)."""
lines = [json.dumps(op)[1:-1] for op in opcodes]
return "\n".join(lines)
def str2bc(s):
"""Get a list of opcodes (+args) from string."""
opcodes = []
for line in s.splitlines():
line = line.strip()
if line:
opcodes.append(tuple(json.loads("[" + line + "]")))
return opcodes
class OpCodeDefinitions:
"""Abstract class that defines the bytecode ops as methods, making
it easy to document them (using docstring and arguments).
Code that produces bytecode can use this as class as a kind of enum
for the opcodes (and for documentation). Code that consumes bytecode
can subclass this class and implement the methods.
"""
def co_src_filename(self, filename):
"""Mark that the following instructions correspond to the given filename."""
raise NotImplementedError()
def co_src_linenr(self, linenr):
"""Mark that the following instructions correspond to the given linenr."""
raise NotImplementedError()
def co_func(self, name):
"""Define a function. WIP"""
raise NotImplementedError()
def co_entrypoint(self, name, shader_type, execution_modes):
"""Define the start of an entry point function.
* name (str): The function name.
* shader_type (str): 'vertex', 'fragment' or 'compute'.
* execution_modes (dict): a dict with execution modes.
"""
raise NotImplementedError()
def co_func_end(self):
"""Define the end of a function (or entry point)."""
raise NotImplementedError()
def co_return(self):
"""Return from a function. When the function is a fragment shader
entrypoint, this means discard.
"""
raise NotImplementedError()
def co_call(self, funcname, nargs):
"""Call a function. The arguments are on the stack. The
funcname should match a shader-specific type (e.g. f32 or Array),
a texture sample/read/write function, a function in the stdlib,
or another defined function (once we implement that).
"""
raise NotImplementedError()
def co_resource(self, name, kind, slot, typename):
"""Define a shader resource, to be available under the given name.
Kind can be 'input', 'output', 'uniform', 'buffer', 'texture' or 'sampler'.
Slot is typically an int defining the location/binding slot,
but can also be a string specifying a builtin (for input and output).
"""
raise NotImplementedError()
def co_pop_top(self):
"""Pop the top of the stack."""
raise NotImplementedError()
def co_dup_top(self):
"""Duplicate the top of the stack."""
raise NotImplementedError()
def co_rotate_stack(self, n):
"""Rotate the top n elements on the stack, moving the top item
to the nth position.
"""
raise NotImplementedError()
def co_reverse_stack(self, n):
"""Reverse the top n elements on the stack."""
raise NotImplementedError()
def co_load_name(self, name):
"""Load a local variable onto the stack."""
raise NotImplementedError()
def co_store_name(self, name):
"""Store the TOS under the given name, so it can be referenced later
using co_load_name.
"""
raise NotImplementedError()
def co_load_index(self):
"""Implements TOS = TOS1[TOS]."""
raise NotImplementedError()
def co_store_index(self):
"""Implements TOS1[TOS] = TOS2."""
raise NotImplementedError()
def co_load_attr(self, name):
"""Implements TOS = TOS.name. Mostly intended for vector swizzling
(e.g. pos.xyzw and color.rgba)
"""
raise NotImplementedError()
def co_load_constant(self, value):
"""Load a constant value onto the stack.
The value can be a float, int, bool. Tuple for vec?
"""
raise NotImplementedError()
def co_load_array(self, nargs):
"""Build an array composed of the nargs last elements on the stack,
and push that on the stack.
"""
raise NotImplementedError()
def co_binary_op(self, op):
"""Implements TOS = TOS1 ?? TOS, where ?? is the given operation,
which can be: add, sub, mul, div, fdiv, idiv, and, or, ...
"""
raise NotImplementedError()
def co_unary_op(self, op):
"""A unary operation: neg, not."""
raise NotImplementedError()
def co_compare(self, cmp):
"""Comparison operation. cmp can be "<", "<=", "==", "!=", ">", ">="."""
raise NotImplementedError()
def co_label(self, label):
"""Start a block with the given label.
The label must be a unique int or string.
"""
raise NotImplementedError()
def co_branch(self, label):
"""Branch to the target label."""
raise NotImplementedError()
def co_branch_conditional(self, true_label, false_label):
"""Branch to true_label if TOS is True, else branch to false_label.
The control flow of the bytecode must be such that for each
pair of branches there is a unique label where they merge and
where no other branches pass through. The exception is that a
branch-pair, and their sub-branches (and theirs, etc.) can merge
at the same label (the compiler can resolve this).
"""
raise NotImplementedError()
def co_branch_loop(self, iter_label, continue_label, merge_label):
"""Indicate the beginning of a loop (in a new block)."""
raise NotImplementedError()
def co_select(self):
"""Select between two values based on a bool.
If TOS3, select TOS2, otherwise select TOS1. Push
the selected object on the stack.
"""
raise NotImplementedError()
| 0.996094 | high |
utility.py | lambdamirror/Binance-Trading-Modules | 2 | 10608047 | # -*- coding: utf-8 -*-
"""
Created on Sat Mar 24 00:16:07 2020
@author: tranl
"""
import pandas as pd
import sys
### Ultility functions
def barstr(text, symbol='#', length=100, space_size=5):
'''
Returns a marked line in the form #### < str > ###
'''
bar_size = int((length-len(text))/2)
bar = ''.join([symbol]*(bar_size-space_size))
space = ''.join([' ']*space_size)
return '{:<}{}{}{}{:>}'.format(bar, space, text, space, bar)
def print_(s, file):
'''
Prints a string s into file
'''
with open(file, "a+") as f:
f.write('\n' + str(s))
f.close()
print(s)
def orderstr(order):
'''
Returns string representation of the order response from Binance
'''
try:
s = 'Market response: '
s += 'Id:' + str(order['orderId']) + ' status:' + str(order['status']) + ' side:' + str(order['side']) + ' type:' + str(order['type']) + ' quantity:' + str(order['origQty'])
if order['type']=='LIMIT':
s += ' price:' + str(order['price']) + ' TIF:' + str(order['timeInForce'])
elif order['type']=='TRAILING_STOP_MARKET':
s += ' price:' + str(order['activatePrice']) + ' cbRate:' + str(order['priceRate'])
s += ' time:' + pd.to_datetime(order['updateTime'], unit='ms').strftime("%y-%m-%d %H:%M:%S")
return s
except Exception:
s = "Invalid order response from the Market"
return s
def timestr(dateTime: int, end='f'):
'''
Returns string representation for an interger time
'''
if end=='m': s = pd.to_datetime(dateTime, unit='ms').strftime("%y-%m-%d %H:%M")
elif end=='s': s = pd.to_datetime(dateTime, unit='ms').strftime("%y-%m-%d %H:%M:%S")
elif end=='f': s = pd.to_datetime(dateTime, unit='ms').strftime("%y-%m-%d %H:%M:%S:%f")[:-3]
return s | 0.925781 | high |
my_allennlp_package/modules/adapters.py | guopeiming/crosslingual-DepPar | 2 | 1271215 | import torch
import torch.nn as nn
from torch.nn.functional import linear
import torch.nn.init as init
from allennlp.nn.activations import Activation
from ..utils.utils import batched_linear
from typing import Union
from transformers.models.bert.modeling_bert import BertOutput, BertSelfOutput
class Adapter(nn.Module):
def __init__(self, in_feats: int, adapter_size: int = 64, bias: bool = True,
activation: str = 'gelu', train_layer_norm: bool = True,
dynamic_weights: bool = False):
super(Adapter, self).__init__()
self.in_feats = in_feats
self.adapter_size = adapter_size
self.bias = bias
self.weight_down = None
self.weight_up = None
self.bias_down = None
self.bias_up = None
self.act_fn = Activation.by_name(activation)()
self.train_layer_norm = train_layer_norm
self.dynamic_weights = dynamic_weights
if not dynamic_weights:
self.weight_down = nn.Parameter(torch.Tensor(adapter_size, in_feats))
self.weight_up = nn.Parameter(torch.Tensor(in_feats, adapter_size))
if bias:
self.bias_down = nn.Parameter(torch.zeros(adapter_size))
self.bias_up = nn.Parameter(torch.zeros(in_feats))
self.reset_parameters()
def forward(self, hidden_states: torch.Tensor):
linear_func = batched_linear if self.weight_down.dim() == 3 else linear
x = linear_func(hidden_states, self.weight_down, self.bias_down)
x = self.act_fn(x)
x = linear_func(x, self.weight_up, self.bias_up)
x = x + hidden_states
return x
def reset_parameters(self) -> None:
init.normal_(self.weight_down, std=1e-3)
init.normal_(self.weight_up, std=1e-3)
def update_weight(
self,
weight_name,
weight: torch.Tensor,
) -> None:
object.__setattr__(self, weight_name, weight)
class AdapterBertLayer(nn.Module):
"""
替代 BertOutput 和 BertSelfOutput
"""
def __init__(self, base: Union[BertOutput, BertSelfOutput], adapter: Adapter):
super().__init__()
self.base = base
self.adapter = adapter
for param in base.LayerNorm.parameters():
param.requires_grad = adapter.train_layer_norm
def forward(self, hidden_states, input_tensor):
hidden_states = self.base.dense(hidden_states)
hidden_states = self.base.dropout(hidden_states)
hidden_states = self.adapter(hidden_states)
hidden_states = self.base.LayerNorm(hidden_states + input_tensor)
return hidden_states
| 0.980469 | high |
setup.py | florian-reck/ExaDatabase | 0 | 12747823 | #!/usr/bin/env python
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
name='ExasolDatabaseConnector',
version="0.1.7",
license="MIT",
maintainer="<NAME>",
maintainer_email="<EMAIL>",
description="Exasol database connector class written in python",
long_description="Exasol database connector classes using ODBC or WebSockets",
url='https://github.com/florian-reck/ExaDatabase',
packages=[
'ExasolDatabaseConnector',
'ExasolDatabaseConnector.ExaDatabaseAbstract',
'ExasolDatabaseConnector.ExaWebSockets',
'ExasolDatabaseConnector.ExaOdbcDriver'
],
install_requires=[
'websocket_client',
'rsa',
'EXASOL-DB-API',
'pyodbc'
]
)
| 0.628906 | high |
healthysnake/levels.py | jimah/healthysnake | 0 | 4595311 | HARD = 2
SOFT = 1
def level_as_string(level):
if level == HARD:
return "HARD"
return "SOFT"
| 0.523438 | high |
source/sagemaker/sagemaker_graph_entity_resolution/dgl_entity_resolution/graph.py | awslabs/sagemaker-graph-entity-resolution | 8 | 4455407 | import os
import dgl
from data import *
def construct_graph(training_dir, training_edges, transient_nodes, transient_edges, website_nodes, website_edges):
def _full_path(f):
return os.path.join(training_dir, f)
edgelists, id_to_node = {}, {}
# parse and add training edges
training_edgelist, id_to_node = parse_edgelist(_full_path(training_edges), id_to_node,
source_type='user', sink_type='user')
print("Read user -> user training edgelist from {}".format(_full_path(training_edges)))
edgelists[('user', 'same_entity', 'user')] = training_edgelist
edgelists[('user', 'same_entity_reversed', 'user')] = [(b, a) for a, b in training_edgelist]
# parse and add transient edges
transient_edgelist, id_to_node = parse_edgelist(_full_path(transient_edges), id_to_node,
source_type='user', sink_type='website')
print("Read user -> website edgelist from {}".format(_full_path(transient_edges)))
edgelists[('user', 'visits', 'website')] = transient_edgelist
edgelists[('website', 'visited_by', 'user')] = [(b, a) for a, b in transient_edgelist]
# parse and add website edges
website_edgelist, id_to_node = parse_edgelist(_full_path(website_edges), id_to_node,
source_type='website', sink_type='domain')
print("Read website -> domain edgelist from {}".format(_full_path(website_edges)))
edgelists[('website', 'owned_by', 'domain')] = website_edgelist
edgelists[('domain', 'owns', 'website')] = [(b, a) for a, b in website_edgelist]
# get user features
user_features, new_nodes = get_features(id_to_node['user'], _full_path(transient_nodes))
print("Got user features from {}".format(_full_path(transient_nodes)))
# add self relation to user nodes
edgelists[('user', 'self_relation', 'user')] = [(u, u) for u in id_to_node['user'].values()]
# get website features
website_features = get_website_features(id_to_node['website'], _full_path(website_nodes))
print("Got website features from {}".format(_full_path(website_nodes)))
g = dgl.heterograph(edgelists)
print("Constructed heterograph with the following metagraph structure: Node types {}, Edge types{}".format(
g.ntypes, g.canonical_etypes))
print("Number of user nodes : {}".format(g.number_of_nodes('user')))
reverse_etypes = {'same_entity': 'same_entity_reversed',
'same_entity_reversed': 'same_entity',
'visits': 'visited_by',
'visited_by': 'visits',
'owned_by': 'owns',
'owns': 'owned_by'
}
print(g)
return g, (user_features, website_features), id_to_node, reverse_etypes
| 0.957031 | high |
addons/website/models/website_visitor.py | SHIVJITH/Odoo_Machine_Test | 0 | 7718575 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from datetime import datetime, timedelta
import uuid
import pytz
from odoo import fields, models, api, _
from odoo.addons.base.models.res_partner import _tz_get
from odoo.exceptions import UserError
from odoo.tools.misc import _format_time_ago
from odoo.http import request
from odoo.osv import expression
class WebsiteTrack(models.Model):
_name = 'website.track'
_description = 'Visited Pages'
_order = 'visit_datetime DESC'
_log_access = False
visitor_id = fields.Many2one('website.visitor', ondelete="cascade", index=True, required=True, readonly=True)
page_id = fields.Many2one('website.page', index=True, ondelete='cascade', readonly=True)
url = fields.Text('Url', index=True)
visit_datetime = fields.Datetime('Visit Date', default=fields.Datetime.now, required=True, readonly=True)
class WebsiteVisitor(models.Model):
_name = 'website.visitor'
_description = 'Website Visitor'
_order = 'last_connection_datetime DESC'
name = fields.Char('Name')
access_token = fields.Char(required=True, default=lambda x: uuid.uuid4().hex, index=False, copy=False, groups='base.group_website_publisher')
active = fields.Boolean('Active', default=True)
website_id = fields.Many2one('website', "Website", readonly=True)
partner_id = fields.Many2one('res.partner', string="Linked Partner", help="Partner of the last logged in user.")
partner_image = fields.Binary(related='partner_id.image_1920')
# localisation and info
country_id = fields.Many2one('res.country', 'Country', readonly=True)
country_flag = fields.Char(related="country_id.image_url", string="Country Flag")
lang_id = fields.Many2one('res.lang', string='Language', help="Language from the website when visitor has been created")
timezone = fields.Selection(_tz_get, string='Timezone')
email = fields.Char(string='Email', compute='_compute_email_phone')
mobile = fields.Char(string='Mobile Phone', compute='_compute_email_phone')
# Visit fields
visit_count = fields.Integer('Number of visits', default=1, readonly=True, help="A new visit is considered if last connection was more than 8 hours ago.")
website_track_ids = fields.One2many('website.track', 'visitor_id', string='Visited Pages History', readonly=True)
visitor_page_count = fields.Integer('Page Views', compute="_compute_page_statistics", help="Total number of visits on tracked pages")
page_ids = fields.Many2many('website.page', string="Visited Pages", compute="_compute_page_statistics", groups="website.group_website_designer")
page_count = fields.Integer('# Visited Pages', compute="_compute_page_statistics", help="Total number of tracked page visited")
last_visited_page_id = fields.Many2one('website.page', string="Last Visited Page", compute="_compute_last_visited_page_id")
# Time fields
create_date = fields.Datetime('First connection date', readonly=True)
last_connection_datetime = fields.Datetime('Last Connection', default=fields.Datetime.now, help="Last page view date", readonly=True)
time_since_last_action = fields.Char('Last action', compute="_compute_time_statistics", help='Time since last page view. E.g.: 2 minutes ago')
is_connected = fields.Boolean('Is connected ?', compute='_compute_time_statistics', help='A visitor is considered as connected if his last page view was within the last 5 minutes.')
_sql_constraints = [
('access_token_unique', 'unique(access_token)', 'Access token should be unique.'),
('partner_uniq', 'unique(partner_id)', 'A partner is linked to only one visitor.'),
]
@api.depends('name')
def name_get(self):
res = []
for record in self:
res.append((
record.id,
record.name or _('Website Visitor #%s', record.id)
))
return res
@api.depends('partner_id.email_normalized', 'partner_id.mobile', 'partner_id.phone')
def _compute_email_phone(self):
results = self.env['res.partner'].search_read(
[('id', 'in', self.partner_id.ids)],
['id', 'email_normalized', 'mobile', 'phone'],
)
mapped_data = {
result['id']: {
'email_normalized': result['email_normalized'],
'mobile': result['mobile'] if result['mobile'] else result['phone']
} for result in results
}
for visitor in self:
visitor.email = mapped_data.get(visitor.partner_id.id, {}).get('email_normalized')
visitor.mobile = mapped_data.get(visitor.partner_id.id, {}).get('mobile')
@api.depends('website_track_ids')
def _compute_page_statistics(self):
results = self.env['website.track'].read_group(
[('visitor_id', 'in', self.ids), ('url', '!=', False)], ['visitor_id', 'page_id', 'url'], ['visitor_id', 'page_id', 'url'], lazy=False)
mapped_data = {}
for result in results:
visitor_info = mapped_data.get(result['visitor_id'][0], {'page_count': 0, 'visitor_page_count': 0, 'page_ids': set()})
visitor_info['visitor_page_count'] += result['__count']
visitor_info['page_count'] += 1
if result['page_id']:
visitor_info['page_ids'].add(result['page_id'][0])
mapped_data[result['visitor_id'][0]] = visitor_info
for visitor in self:
visitor_info = mapped_data.get(visitor.id, {'page_count': 0, 'visitor_page_count': 0, 'page_ids': set()})
visitor.page_ids = [(6, 0, visitor_info['page_ids'])]
visitor.visitor_page_count = visitor_info['visitor_page_count']
visitor.page_count = visitor_info['page_count']
@api.depends('website_track_ids.page_id')
def _compute_last_visited_page_id(self):
results = self.env['website.track'].read_group([('visitor_id', 'in', self.ids)],
['visitor_id', 'page_id', 'visit_datetime:max'],
['visitor_id', 'page_id'], lazy=False)
mapped_data = {result['visitor_id'][0]: result['page_id'][0] for result in results if result['page_id']}
for visitor in self:
visitor.last_visited_page_id = mapped_data.get(visitor.id, False)
@api.depends('last_connection_datetime')
def _compute_time_statistics(self):
for visitor in self:
visitor.time_since_last_action = _format_time_ago(self.env, (datetime.now() - visitor.last_connection_datetime))
visitor.is_connected = (datetime.now() - visitor.last_connection_datetime) < timedelta(minutes=5)
def _check_for_message_composer(self):
""" Purpose of this method is to actualize visitor model prior to contacting
him. Used notably for inheritance purpose, when dealing with leads that
could update the visitor model. """
return bool(self.partner_id and self.partner_id.email)
def _prepare_message_composer_context(self):
return {
'default_model': 'res.partner',
'default_res_id': self.partner_id.id,
'default_partner_ids': [self.partner_id.id],
}
def action_send_mail(self):
self.ensure_one()
if not self._check_for_message_composer():
raise UserError(_("There are no contact and/or no email linked to this visitor."))
visitor_composer_ctx = self._prepare_message_composer_context()
compose_form = self.env.ref('mail.email_compose_message_wizard_form', False)
compose_ctx = dict(
default_use_template=False,
default_composition_mode='comment',
)
compose_ctx.update(**visitor_composer_ctx)
return {
'name': _('Contact Visitor'),
'type': 'ir.actions.act_window',
'view_mode': 'form',
'res_model': 'mail.compose.message',
'views': [(compose_form.id, 'form')],
'view_id': compose_form.id,
'target': 'new',
'context': compose_ctx,
}
def _get_visitor_from_request(self, force_create=False):
""" Return the visitor as sudo from the request if there is a visitor_uuid cookie.
It is possible that the partner has changed or has disconnected.
In that case the cookie is still referencing the old visitor and need to be replaced
with the one of the visitor returned !!!. """
# This function can be called in json with mobile app.
# In case of mobile app, no uid is set on the jsonRequest env.
# In case of multi db, _env is None on request, and request.env unbound.
if not request:
return None
Visitor = self.env['website.visitor'].sudo()
visitor = Visitor
access_token = request.httprequest.cookies.get('visitor_uuid')
if access_token:
visitor = Visitor.with_context(active_test=False).search([('access_token', '=', access_token)])
# Prefetch access_token and other fields. Since access_token has a restricted group and we access
# a non restricted field (partner_id) first it is not fetched and will require an additional query to be retrieved.
visitor.access_token
if not self.env.user._is_public():
partner_id = self.env.user.partner_id
if not visitor or visitor.partner_id and visitor.partner_id != partner_id:
# Partner and no cookie or wrong cookie
visitor = Visitor.with_context(active_test=False).search([('partner_id', '=', partner_id.id)])
elif visitor and visitor.partner_id:
# Cookie associated to a Partner
visitor = Visitor
if visitor and not visitor.timezone:
tz = self._get_visitor_timezone()
if tz:
visitor._update_visitor_timezone(tz)
if not visitor and force_create:
visitor = self._create_visitor()
return visitor
def _handle_webpage_dispatch(self, response, website_page):
# get visitor. Done here to avoid having to do it multiple times in case of override.
visitor_sudo = self._get_visitor_from_request(force_create=True)
if request.httprequest.cookies.get('visitor_uuid', '') != visitor_sudo.access_token:
expiration_date = datetime.now() + timedelta(days=365)
response.set_cookie('visitor_uuid', visitor_sudo.access_token, expires=expiration_date)
self._handle_website_page_visit(website_page, visitor_sudo)
def _handle_website_page_visit(self, website_page, visitor_sudo):
""" Called on dispatch. This will create a website.visitor if the http request object
is a tracked website page or a tracked view. Only on tracked elements to avoid having
too much operations done on every page or other http requests.
Note: The side effect is that the last_connection_datetime is updated ONLY on tracked elements."""
url = request.httprequest.url
website_track_values = {
'url': url,
'visit_datetime': datetime.now(),
}
if website_page:
website_track_values['page_id'] = website_page.id
domain = [('page_id', '=', website_page.id)]
else:
domain = [('url', '=', url)]
visitor_sudo._add_tracking(domain, website_track_values)
if visitor_sudo.lang_id.id != request.lang.id:
visitor_sudo.write({'lang_id': request.lang.id})
def _add_tracking(self, domain, website_track_values):
""" Add the track and update the visitor"""
domain = expression.AND([domain, [('visitor_id', '=', self.id)]])
last_view = self.env['website.track'].sudo().search(domain, limit=1)
if not last_view or last_view.visit_datetime < datetime.now() - timedelta(minutes=30):
website_track_values['visitor_id'] = self.id
self.env['website.track'].create(website_track_values)
self._update_visitor_last_visit()
def _create_visitor(self):
""" Create a visitor. Tracking is added after the visitor has been created."""
country_code = request.session.get('geoip', {}).get('country_code', False)
country_id = request.env['res.country'].sudo().search([('code', '=', country_code)], limit=1).id if country_code else False
vals = {
'lang_id': request.lang.id,
'country_id': country_id,
'website_id': request.website.id,
}
tz = self._get_visitor_timezone()
if tz:
vals['timezone'] = tz
if not self.env.user._is_public():
vals['partner_id'] = self.env.user.partner_id.id
vals['name'] = self.env.user.partner_id.name
return self.sudo().create(vals)
def _link_to_partner(self, partner, update_values=None):
""" Link visitors to a partner. This method is meant to be overridden in
order to propagate, if necessary, partner information to sub records.
:param partner: partner used to link sub records;
:param update_values: optional values to update visitors to link;
"""
vals = {'name': partner.name}
if update_values:
vals.update(update_values)
self.write(vals)
def _link_to_visitor(self, target, keep_unique=True):
""" Link visitors to target visitors, because they are linked to the
same identity. Purpose is mainly to propagate partner identity to sub
records to ease database update and decide what to do with "duplicated".
THis method is meant to be overridden in order to implement some specific
behavior linked to sub records of duplicate management.
:param target: main visitor, target of link process;
:param keep_unique: if True, find a way to make target unique;
"""
# Link sub records of self to target partner
if target.partner_id:
self._link_to_partner(target.partner_id)
# Link sub records of self to target visitor
self.website_track_ids.write({'visitor_id': target.id})
if keep_unique:
self.unlink()
return target
def _cron_archive_visitors(self):
delay_days = int(self.env['ir.config_parameter'].sudo().get_param('website.visitor.live.days', 30))
deadline = datetime.now() - timedelta(days=delay_days)
visitors_to_archive = self.env['website.visitor'].sudo().search([('last_connection_datetime', '<', deadline)])
visitors_to_archive.write({'active': False})
def _update_visitor_timezone(self, timezone):
""" We need to do this part here to avoid concurrent updates error. """
try:
with self.env.cr.savepoint():
query_lock = "SELECT * FROM website_visitor where id = %s FOR NO KEY UPDATE NOWAIT"
self.env.cr.execute(query_lock, (self.id,), log_exceptions=False)
query = "UPDATE website_visitor SET timezone = %s WHERE id = %s"
self.env.cr.execute(query, (timezone, self.id), log_exceptions=False)
except Exception:
pass
def _update_visitor_last_visit(self):
""" We need to do this part here to avoid concurrent updates error. """
try:
with self.env.cr.savepoint():
query_lock = "SELECT * FROM website_visitor where id = %s FOR NO KEY UPDATE NOWAIT"
self.env.cr.execute(query_lock, (self.id,), log_exceptions=False)
date_now = datetime.now()
query = "UPDATE website_visitor SET "
if self.last_connection_datetime < (date_now - timedelta(hours=8)):
query += "visit_count = visit_count + 1,"
query += """
active = True,
last_connection_datetime = %s
WHERE id = %s
"""
self.env.cr.execute(query, (date_now, self.id), log_exceptions=False)
except Exception:
pass
def _get_visitor_timezone(self):
tz = request.httprequest.cookies.get('tz') if request else None
if tz in pytz.all_timezones:
return tz
elif not self.env.user._is_public():
return self.env.user.tz
else:
return None
| 0.992188 | high |
actionviews/exceptions.py | lig/django-actionviews | 2 | 10981767 | from django.http.response import HttpResponseBase
from django.core.exceptions import ImproperlyConfigured
class ActionResponse(Exception):
def __init__(self, response):
if not isinstance(response, HttpResponseBase):
raise ImproperlyConfigured(
'ActionResponse argument must be HttpResponseBase instance')
self.response = response
| 0.519531 | medium |
exopy/utils/widgets/qt_clipboard.py | jerjohste/exopy | 16 | 9240799 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright 2015-2018 by Exopy Authors, see AUTHORS for more details.
#
# Distributed under the terms of the BSD license.
#
# The full license is in the file LICENCE, distributed with this software.
# -----------------------------------------------------------------------------
"""Implements a wrapper around the PyQt clipboard that handles Python objects
using pickle.
This has been ported from Enthought TraitsUI.
"""
import warnings
from pickle import dumps, load, loads, PickleError
from io import StringIO
from enaml.qt import QtCore, QtWidgets
from atom.api import Atom, Property
class PyMimeData(QtCore.QMimeData):
""" The PyMimeData wraps a Python instance as MIME data.
Parameters
----------
data :
Object to copy to the clipboard.
pickle :
Whether or not to pickle the data.
"""
# The MIME type for instances.
MIME_TYPE = 'application/exopy-qt4-instance'
NOPICKLE_MIME_TYPE = 'application/exopy-qt4-instance'
def __init__(self, data=None, pickle=False):
QtCore.QMimeData.__init__(self)
# Keep a local reference to be returned if possible.
self._local_instance = data
if pickle:
if data is not None:
# We may not be able to pickle the data.
try:
pdata = dumps(data, -1)
# This format (as opposed to using a single sequence)
# allows the type to be extracted without unpickling
# the data.
self.setData(self.MIME_TYPE, dumps(data.__class__) + pdata)
except (PickleError, TypeError):
# if pickle fails, still try to create a draggable
warnings.warn(("Could not pickle dragged object %s, " +
"using %s mimetype instead") % (repr(data),
self.NOPICKLE_MIME_TYPE), RuntimeWarning)
self.setData(self.NOPICKLE_MIME_TYPE,
str(id(data)).encode('utf8'))
else:
self.setData(self.NOPICKLE_MIME_TYPE, str(id(data)).encode('utf8'))
@classmethod
def coerce(cls, md):
""" Wrap a QMimeData or a python object to a PyMimeData.
"""
# See if the data is already of the right type. If it is then we know
# we are in the same process.
if isinstance(md, cls):
return md
if isinstance(md, PyMimeData):
# If it is a PyMimeData, migrate all its data, subclasses should
# override this method if it doesn't do things correctly for them
data = md.instance()
nmd = cls()
nmd._local_instance = data
for format in md.formats():
nmd.setData(format, md.data(format))
elif isinstance(md, QtCore.QMimeData):
# If it is a QMimeData, migrate all its data
nmd = cls()
for format in md.formats():
nmd.setData(format, md.data(format))
else:
# By default, don't try to pickle the coerced object
pickle = False
# See if the data is a list, if so check for any items which are
# themselves of the right type. If so, extract the instance and
# track whether we should pickle.
# HINT lists should suffice for now, but may want other containers
if isinstance(md, list):
md = [item.instance() if isinstance(item, PyMimeData) else item
for item in md]
# Arbitrary python object, wrap it into PyMimeData
nmd = cls(md, pickle)
return nmd
def instance(self):
""" Return the instance.
"""
if self._local_instance is not None:
return self._local_instance
if not self.hasFormat(self.MIME_TYPE):
# We have no pickled python data defined.
return None
io = StringIO(bytes(self.data(self.MIME_TYPE)))
try:
# Skip the type.
load(io)
# Recreate the instance.
return load(io)
except PickleError:
pass
return None
def instance_type(self):
""" Return the type of the instance.
"""
if self._local_instance is not None:
return self._local_instance.__class__
try:
if self.hasFormat(self.MIME_TYPE):
return loads(bytes(self.data(self.MIME_TYPE)))
except PickleError:
pass
return None
def local_paths(self):
""" The list of local paths from url list, if any.
"""
ret = []
for url in self.urls():
if url.scheme() == 'file':
ret.append(url.toLocalFile())
return ret
class _Clipboard(Atom):
""" The _Clipboard class provides a wrapper around the PyQt clipboard.
"""
# --- Members definitions -------------------------------------------------
#: The instance on the clipboard (if any).
instance = Property()
#: Set if the clipboard contains an instance.
has_instance = Property()
#: The type of the instance on the clipboard (if any).
instance_type = Property()
# --- Instance property methods -------------------------------------------
@instance.getter
def _instance_getter(self):
""" The instance getter.
"""
md = PyMimeData.coerce(QtWidgets.QApplication.clipboard().mimeData())
if md is None:
return None
return md.instance()
@instance.setter
def _instance_setter(self, data):
""" The instance setter.
"""
QtWidgets.QApplication.clipboard().setMimeData(PyMimeData(data))
@has_instance.getter
def _has_instance_getter(self):
""" The has_instance getter.
"""
clipboard = QtWidgets.QApplication.clipboard()
return clipboard.mimeData().hasFormat(PyMimeData.MIME_TYPE)
@instance_type.getter
def _instance_type_getter(self):
""" The instance_type getter.
"""
md = PyMimeData.coerce(QtWidgets.QApplication.clipboard().mimeData())
if md is None:
return None
return md.instance_type()
#: The singleton clipboard instance.
CLIPBOARD = _Clipboard()
| 0.992188 | high |
src/datasets/__init__.py | abheesht17/super-pixels | 10 | 1505007 | <reponame>abheesht17/super-pixels<filename>src/datasets/__init__.py
# from src.datasets.hf_image_classification import HFImageClassification
# from src.datasets.tv_mnist import TvMnist
# from src.datasets.tg_mnist_slic import TgMnistSlic
from src.datasets.cifar import Cifar
from src.datasets.cifar_img_slic import CifarImgSlic
from src.datasets.cifar_slic import CifarSlic
from src.datasets.covid import Covid
from src.datasets.covid_img_slic import CovidImgSlic
from src.datasets.covid_slic import CovidSlic
from src.datasets.lfw import LFW
from src.datasets.lfw_img_slic import LFWImgSlic
from src.datasets.lfw_slic import LFWSlic
from src.datasets.mnist import Mnist
from src.datasets.mnist_img_slic import MnistImgSlic
from src.datasets.mnist_slic import MnistSlic
from src.datasets.socofing import Socofing
from src.datasets.socofing_img_slic import SocofingImgSlic
from src.datasets.socofing_slic import SocofingSlic
| 0.443359 | high |
End of preview. Expand
in Dataset Viewer.
README.md exists but content is empty.
Use the Edit dataset card button to edit it.
- Downloads last month
- 0