id
stringlengths 1
8
| text
stringlengths 6
1.05M
| dataset_id
stringclasses 1
value |
---|---|---|
6679629 | <gh_stars>0
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField, BooleanField
from wtforms.validators import DataRequired, Length, Email, EqualTo, ValidationError
import email_validator
from app.models import User
import pickle
class RegistrationForm(FlaskForm):
username = StringField('UserName',
validators=[DataRequired(), Length(min=2, max=20)])
email = StringField('Email',
validators=[DataRequired(), Email()])
phone = StringField('Phone', validators=[DataRequired(), Length(10)])
password = PasswordField('Password',
validators=[DataRequired()])
confirmPassword = PasswordField('Confirm Password',
validators=[DataRequired(), EqualTo('password')])
submit = SubmitField('Sign Up')
def validate_username(self, username):
user = User.query.filter_by(username=username.data).first()
if user:
raise ValidationError(
'That username is taken. Please choose an another username')
def validate_email(self, email):
user = User.query.filter_by(email=email.data).first()
if user:
raise ValidationError(
'That email is taken. Please choose an another email')
class LoginForm(FlaskForm):
email = StringField('Email',
validators=[DataRequired(), Email()])
password = PasswordField('Password',
validators=[DataRequired()])
remember = BooleanField('Remember Me')
submit = SubmitField('Login')
| StarcoderdataPython |
75674 | import os
from nxpy.nxfile import NXFile
def test_nxnode_resolve():
node = NXFile(os.path.join(os.path.dirname(__file__), 'map.nx')).get_root_node().resolve(
"Tile/grassySoil.img/bsc/0")
assert node.width == 90
node2 = NXFile(os.path.join(os.path.dirname(__file__), 'map.nx')).get_root_node().get_child('Tile').resolve(
"grassySoil.img/bsc/0")
assert node2.width == 90
node3 = NXFile(os.path.join(os.path.dirname(__file__), 'map.nx')).get_root_node().get_child('Tile').resolve(
"grassySoil.img/bsc").get_child('0')
assert node3.width == 90
| StarcoderdataPython |
150318 | <filename>colossus/apps/notifications/admin.py
from django.contrib import admin
from colossus.apps.notifications import models as m
admin.site.register(m.Notification)
| StarcoderdataPython |
12805451 | # This code adapted from https://github.com/python-pillow/Pillow/issues/4644 to resolve an issue
# described in https://github.com/python-pillow/Pillow/issues/4640
#
# There is a long-standing issue with the Pillow library that messes up GIF transparency by replacing the
# transparent pixels with black pixels (among other issues) when the GIF is saved using PIL.Image.save().
# This code works around the issue and allows us to properly generate transparent GIFs.
from typing import Tuple, List, Union
from collections import defaultdict
from random import randrange
from itertools import chain
from PIL.Image import Image
class TransparentAnimatedGifConverter(object):
_PALETTE_SLOTSET = set(range(256))
def __init__(self, img_rgba: Image, alpha_threshold: int = 0):
self._img_rgba = img_rgba
self._alpha_threshold = alpha_threshold
def _process_pixels(self):
"""Set the transparent pixels to the color 0."""
self._transparent_pixels = set(
idx
for idx, alpha in enumerate(
self._img_rgba.getchannel(channel="A").getdata()
)
if alpha <= self._alpha_threshold
)
def _set_parsed_palette(self):
"""Parse the RGB palette color `tuple`s from the palette."""
palette = self._img_p.getpalette()
self._img_p_used_palette_idxs = set(
idx
for pal_idx, idx in enumerate(self._img_p_data)
if pal_idx not in self._transparent_pixels
)
self._img_p_parsedpalette = dict(
(idx, tuple(palette[idx * 3 : idx * 3 + 3]))
for idx in self._img_p_used_palette_idxs
)
def _get_similar_color_idx(self):
"""Return a palette index with the closest similar color."""
old_color = self._img_p_parsedpalette[0]
dict_distance = defaultdict(list)
for idx in range(1, 256):
color_item = self._img_p_parsedpalette[idx]
if color_item == old_color:
return idx
distance = sum(
(
abs(old_color[0] - color_item[0]), # Red
abs(old_color[1] - color_item[1]), # Green
abs(old_color[2] - color_item[2]),
)
) # Blue
dict_distance[distance].append(idx)
return dict_distance[sorted(dict_distance)[0]][0]
def _remap_palette_idx_zero(self):
"""Since the first color is used in the palette, remap it."""
free_slots = self._PALETTE_SLOTSET - self._img_p_used_palette_idxs
new_idx = free_slots.pop() if free_slots else self._get_similar_color_idx()
self._img_p_used_palette_idxs.add(new_idx)
self._palette_replaces["idx_from"].append(0)
self._palette_replaces["idx_to"].append(new_idx)
self._img_p_parsedpalette[new_idx] = self._img_p_parsedpalette[0]
del self._img_p_parsedpalette[0]
def _get_unused_color(self) -> tuple:
"""Return a color for the palette that does not collide with any other already in the palette."""
used_colors = set(self._img_p_parsedpalette.values())
while True:
new_color = (randrange(256), randrange(256), randrange(256))
if new_color not in used_colors:
return new_color
def _process_palette(self):
"""Adjust palette to have the zeroth color set as transparent. Basically, get another palette
index for the zeroth color."""
self._set_parsed_palette()
if 0 in self._img_p_used_palette_idxs:
self._remap_palette_idx_zero()
self._img_p_parsedpalette[0] = self._get_unused_color()
def _adjust_pixels(self):
"""Convert the pixels into their new values."""
if self._palette_replaces["idx_from"]:
trans_table = bytearray.maketrans(
bytes(self._palette_replaces["idx_from"]),
bytes(self._palette_replaces["idx_to"]),
)
self._img_p_data = self._img_p_data.translate(trans_table)
for idx_pixel in self._transparent_pixels:
self._img_p_data[idx_pixel] = 0
self._img_p.frombytes(data=bytes(self._img_p_data))
def _adjust_palette(self):
"""Modify the palette in the new `Image`."""
unused_color = self._get_unused_color()
final_palette = chain.from_iterable(
self._img_p_parsedpalette.get(x, unused_color) for x in range(256)
)
self._img_p.putpalette(data=final_palette)
def process(self) -> Image:
"""Return the processed mode `P` `Image`."""
self._img_p = self._img_rgba.convert(mode="P")
self._img_p_data = bytearray(self._img_p.tobytes())
self._palette_replaces = dict(idx_from=list(), idx_to=list())
self._process_pixels()
self._process_palette()
self._adjust_pixels()
self._adjust_palette()
self._img_p.info["transparency"] = 0
self._img_p.info["background"] = 0
return self._img_p
def _create_animated_gif(
images: List[Image], durations: Union[int, List[int]]
) -> Tuple[Image, dict]:
"""If the image is a GIF, create an its thumbnail here."""
save_kwargs = dict()
new_images: List[Image] = []
for frame in images:
thumbnail = frame.copy() # type: Image
thumbnail_rgba = thumbnail.convert(mode="RGBA")
thumbnail_rgba.thumbnail(size=frame.size, reducing_gap=3.0)
converter = TransparentAnimatedGifConverter(img_rgba=thumbnail_rgba)
thumbnail_p = converter.process() # type: Image
new_images.append(thumbnail_p)
output_image = new_images[0]
save_kwargs.update(
format="GIF",
save_all=True,
optimize=False,
append_images=new_images[1:],
duration=durations,
disposal=2, # Other disposals don't work
loop=0,
)
return output_image, save_kwargs
def save_transparent_gif(
images: List[Image], durations: Union[int, List[int]], save_file
):
"""Creates a transparent GIF, adjusting to avoid transparency issues that are present in the PIL library
Note that this does NOT work for partial alpha. The partial alpha gets discarded and replaced by solid colors.
Parameters:
images: a list of PIL Image objects that compose the GIF frames
durations: an int or List[int] that describes the animation durations for the frames of this GIF
save_file: A filename (string), pathlib.Path object or file object. (This parameter corresponds
and is passed to the PIL.Image.save() method.)
Returns:
Image - The PIL Image object (after first saving the image to the specified target)
"""
root_frame, save_args = _create_animated_gif(images, durations)
root_frame.save(save_file, **save_args)
| StarcoderdataPython |
6554654 | import tensorflow as tf
class XTensorBoardCallback(tf.keras.callbacks.TensorBoard):
"""
TensorBoard logging with a learning rate added.
"""
def __init__(self, log_dir, **kwargs):
super().__init__(log_dir=log_dir, **kwargs)
def on_epoch_end(self, epoch, logs=None):
logs.update({"lr": tf.keras.backend.get_value(self.model.optimizer.lr)})
super().on_epoch_end(epoch, logs)
def on_batch_end(self, batch, logs=None):
logs.update({"lr": tf.keras.backend.get_value(self.model.optimizer.lr)})
super().on_batch_end(batch, logs)
| StarcoderdataPython |
3503032 | <filename>setup.py
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
with open("requirements.txt") as f:
install_requires = f.read().strip().split("\n")
# get version from __version__ variable in phytex_pharma_custom/__init__.py
from phytex_pharma_custom import __version__ as version
setup(
name="phytex_pharma_custom",
version=version,
description="Phytex Pharma Customizations",
author="MostafaFekry",
author_email="<EMAIL>",
packages=find_packages(),
zip_safe=False,
include_package_data=True,
install_requires=install_requires
)
| StarcoderdataPython |
5136987 | import sys
import configparser
import os
import subprocess
SAMBA_CONFIG_PARSER = configparser.ConfigParser()
SAMBA_FILE_PATH = '../smb.conf'
SAMBA_CONFIG_PARSER.read(SAMBA_FILE_PATH)
BLOCKED_SECTIONS = ["global", "homes", "printers", "print$"]
SECTION_NAME = sys.argv[2]
OPTION_NAME = sys.argv[3]
def section_exist(section_name):
""" Takes section_name(str) argument and returns True if the given name exist """
return SAMBA_CONFIG_PARSER.has_section(section_name)
def option_exist(section_name, option_name):
""" Checks section_name(str) and option_name(str) and returns True if option exist """
return SAMBA_CONFIG_PARSER.has_option(section_name, option_name)
def delete_option(section_name, option_name):
## I use % instead of .format cause /{:s} can not be seen by parser
sed_script = "sed -i '/^\[%s\]/,/^\[.*$/{/%s.*/d}' %s" % (SECTION_NAME, OPTION_NAME, SAMBA_FILE_PATH)
subprocess.Popen(sed_script, shell=True)
def make_bash_call(stage_name):
bash = ['python3.7', __file__, stage_name, sys.argv[2], sys.argv[3]]
output = subprocess.Popen(bash, stdout=subprocess.PIPE).stdout
return output.read().decode('utf-8')
def automate():
before_output = make_bash_call('before')
if before_output != "ok\n":
print(before_output)
exit()
print('before ok')
make_bash_call('run')
after_output = make_bash_call('after')
if after_output != 'ok\n':
print(after_output)
exit()
print('after ok')
def before():
if not section_exist(SECTION_NAME):
print('Section : {:s} is not exist'.format(SECTION_NAME))
exit()
if not option_exist(SECTION_NAME, OPTION_NAME):
print('Option : {:s} is not exist'.format(OPTION_NAME))
exit()
if OPTION_NAME == 'path':
print('Option : {:s} can not be deleted'.format(OPTION_NAME))
exit()
print('ok')
def run():
delete_option(SECTION_NAME, OPTION_NAME)
def after():
if option_exist(SECTION_NAME, OPTION_NAME):
print("Option : {:s} is not exist".format(OPTION_NAME))
exit()
print('ok')
if __name__ == "__main__":
globals()[sys.argv[1]]()
| StarcoderdataPython |
3420528 | def main():
print_header()
name = get_user_name()
print("Hello {}".format(name))
def print_header():
print("--------------------------------")
print(" THE MAIN APP ")
print("--------------------------------")
def get_user_name():
return input("What is your name? ")
if __name__ == '__main__':
main()
| StarcoderdataPython |
68096 | from gensim.models import FastText
from gensim.models import word2vec
import logging
import argparse
def fasttext_train(tool):
assert tool == 'fasttext' or tool == 'word2vec', 'you can choose: [word2vec, fasttext]'
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
sentences = word2vec.LineSentence(u'../corpus/scicite/all.txt')
if tool == 'fasttext':
_model = FastText(sentences, size=200, iter=30, min_count=2, word_ngrams=3)
else:
_model = word2vec.Word2Vec(sentences, size=100, iter=10, min_count=2)
_model.save('../reference/wc_model/output')
if __name__ == '__main__':
parse = argparse.ArgumentParser(description="choose the way to train word vectors")
parse.add_argument("--model", required=True, type=str, help="[word2vec, fasttext]")
args = parse.parse_args()
fasttext_train(args.model)
import pickle
if args.model == 'fasttext':
model = FastText.load("../reference/wc_model/output")
else:
model = word2vec.Word2Vec.load("../reference/wc_model/output")
dic = {}
for i in model.wv.vocab:
dic[i] = model.wv[i].tolist()
pickle.dump(dic, open("../reference/output/wv.pkl", 'wb')) | StarcoderdataPython |
332852 | <filename>infer.py
import tensorflow as tf
import numpy as np
import PIL
import glob
import os
import argparse
def get_args():
my_parser = argparse.ArgumentParser()
my_parser.add_argument('-p','--folder_path',type=str,help='Path to folder of frames',required=True)
my_parser.add_argument('-m','--model_path',type=str,help='Path to weights file',required=True)
my_parser.add_argument('-o','--output_path',type=str,help='Path to output folder',required=True)
return my_parser
def ssim(y_true,y_pred):
return tf.image.ssim(y_true,y_pred,max_val=1.0)
def psnr(y_true,y_pred):
return tf.image.psnr(y_true,y_pred,max_val=1.0)
def process_image_SR(impath):
im = PIL.Image.open(impath)
im = im.convert('YCbCr') # For single channel inference
im = np.asanyarray(im)
y = np.expand_dims(im[:,:,0],-1)/255 # Normalizing input
uv = np.asanyarray(im)[:,:,1:]
#print("uv:",uv.shape,"| y:",y.shape)
return (y,uv)
if __name__ == "__main__":
physical_devices = tf.config.experimental.list_physical_devices("GPU")
for i in physical_devices:
tf.config.experimental.set_memory_growth(i, True)
opt = get_args().parse_args()
try:
os.mkdir(opt.output_path)
except:
pass
ARCNN = tf.keras.models.load_model(opt.model_path,custom_objects={"ssim":ssim,"psnr":psnr})
print("Looking at folder",os.path.join(opt.folder_path,"*"))
flist = np.asarray(glob.glob(os.path.join(opt.folder_path,"*")))
count = 0
total = len(flist)
print("Processing",total,"files")
prog = tf.keras.utils.Progbar(total,unit_name='frames')
div = len(flist)/8
rem = (len(flist)%8) *-1
print("rem:",rem)
if(rem==0):
rem_files = []
flist = flist.reshape(int(len(flist)/8),8)
else:
rem_files = flist[rem:]
flist = flist[:rem].reshape(int(len(flist)/8),8)
print("Batched Files:",len(flist)*4,"| rem =",len(rem_files))
for i in flist:
im_y = []
im_uv = []
for j in range(8):
y,uv = process_image_SR(i[j])
im_y.append(y)
im_uv.append(uv)
im_y = np.stack(im_y,axis=0)
#print(im_y.shape)
outs = ARCNN.predict(im_y)
for y,uv,j in zip(outs,im_uv,range(8)):
count += 1
out = y.reshape(im_y.shape[1], im_y.shape[2])
y_pred = np.stack([out*255,uv[:,:,0],uv[:,:,1]],axis=-1)
y_pred= np.clip(y_pred,0,255).astype('uint8')
y_pred = PIL.Image.fromarray(y_pred,mode='YCbCr').convert('RGB')
fname = "out"+ i[j].split("/")[-1]
converter = PIL.ImageEnhance.Color(y_pred)
y_pred = converter.enhance(1.4)
y_pred.save(opt.output_path+fname)
prog.update(count)
#print("=",end="")
print(count,"Files done")
for i in rem_files:
im_y,im_uv = process_image_SR(i)
#print(im_y.shape)
im_y = np.expand_dims(im_y,0)
outs = ARCNN.predict(im_y)
count += 1
out = outs.reshape(im_y.shape[1], im_y.shape[2]) #Removing batch dimensions
y_pred = np.stack([out*255,im_uv[:,:,0],im_uv[:,:,1]],axis=-1)
y_pred= np.clip(y_pred,0,255).astype('uint8')
y_pred = PIL.Image.fromarray(y_pred,mode='YCbCr').convert('RGB')
fname = "out"+ i.split("/")[-1]
converter = PIL.ImageEnhance.Color(y_pred)
y_pred = converter.enhance(1.4)
y_pred.save(opt.output_path+fname)
prog.update(count)
print("\nDone")
| StarcoderdataPython |
8092869 | <reponame>slaclab/central_node_ioc<filename>CentralNodeApp/srcDisplay/fault_panel.py
from os import path
from pydm import Display
from fault_list_item import FaultListItem
import argparse
class FaultPanel(Display):
def __init__(self, fault_list=[], parent=None, args=[]):
super(FaultPanel, self).__init__(parent=parent)
parsed_args = self.parse_args(args)
if parsed_args.fault_list:
fault_list = self.fault_list_from_file(parsed_args.fault_list)
for fault in fault_list:
if not fault[0].startswith('#'):
list_item = FaultListItem(fault_description=fault[1], fault_pv=fault[0], parent=self)
self.faultList.layout().addWidget(list_item)
self.faultList.layout().addStretch(0)
self.scrollArea.setMinimumWidth(self.faultList.minimumSizeHint().width())
def parse_args(self, args):
parser = argparse.ArgumentParser()
parser.add_argument('--list', dest='fault_list', help='File containing a list of fault PV names to use.')
parsed_args, _unknown_args = parser.parse_known_args(args)
print(parsed_args)
return parsed_args
def fault_list_from_file(self, filename):
if not path.isabs(filename):
filename = path.join(path.dirname(__file__), filename)
lines = []
with open(filename) as f:
lines = f.readlines()
return [l.strip().split('|') for l in lines]
def ui_filename(self):
return 'fault-panel.ui'
def ui_filepath(self):
return path.join(path.dirname(path.realpath(__file__)), self.ui_filename())
intelclass = FaultPanel
| StarcoderdataPython |
1772172 | <reponame>neeravjain24/shogun
#!/usr/bin/env python
import shogun as sg
traindat = '../data/fm_train_real.dat'
testdat = '../data/fm_test_real.dat'
parameter_list=[[traindat,testdat, 1.0],[traindat,testdat, 5.0]]
def kernel_exponential (train_fname=traindat,test_fname=testdat, tau_coef=1.0):
from shogun import kernel, distance, CSVFile
feats_train=sg.features(CSVFile(train_fname))
feats_test=sg.features(CSVFile(test_fname))
distance = sg.distance('EuclideanDistance')
kernel = sg.kernel('ExponentialKernel', width=tau_coef, distance=distance, cache_size=10)
kernel.init(feats_train, feats_train)
km_train=kernel.get_kernel_matrix()
kernel.init(feats_train, feats_test)
km_test=kernel.get_kernel_matrix()
return km_train,km_test,kernel
if __name__=='__main__':
print('Exponential')
kernel_exponential(*parameter_list[0])
| StarcoderdataPython |
1763859 | print(detection_predictions[0]['labels'].size()[0], 'objects detected !')
detection_predictions[0] | StarcoderdataPython |
8129026 | <gh_stars>1-10
#!/usr/bin/env python
import os
import sys
from setuptools import setup, find_packages
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
with open('README') as readmeFile:
long_desc = readmeFile.read()
setup(
name='miette',
version='1.5',
description='Miette is a light-weight Microsoft Office documents reader',
long_description=long_desc,
url='https://github.com/rembish/Miette',
author='<NAME>',
author_email='<EMAIL>',
packages=find_packages(),
install_requires=['cfb'],
license='BSD 2-Clause license',
classifiers=(
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Text Processing'
),
)
| StarcoderdataPython |
3505177 | import pygame
pygame.init()
display_width = 800
display_height = 600
gameDisplay = pygame.display.set_mode((display_width,display_height))
pygame.display.set_caption('A bit Racey')
black = (0,0,0)
white = (255,255,255)
clock = pygame.time.Clock()
crashed = False
carImg_right = pygame.image.load('/Users/vladislavdevlikamov/Desktop/game/game/hh1.png')
carImg_left = pygame.image.load('/Users/vladislavdevlikamov/Desktop/game/game/hh2.png')
bg = pygame.image.load('/Users/vladislavdevlikamov/Desktop/game/game/tr.png')
apple_image = pygame.image.load('/Users/vladislavdevlikamov/Desktop/game/game/apple.png')
mushroom_image = pygame.image.load('/Users/vladislavdevlikamov/Desktop/game/game/mushroom.png')
bird_left = pygame.image.load('/Users/vladislavdevlikamov/Desktop/game/game/bird.png')
bird_right = pygame.image.load('/Users/vladislavdevlikamov/Desktop/game/game/bird1.png')
trampoline = pygame.image.load('/Users/vladislavdevlikamov/Desktop/game/game/trampoline.png')
banan_image = pygame.image.load('/Users/vladislavdevlikamov/Desktop/game/game/banana.png')
class person(object):
def __init__(self, x, y, car_speed, isJump, jumpCount, left):
self.x = x
self.y = y
self.car_speed = car_speed
self.isJump = isJump
self.isTramp = False
self.jumpCount = jumpCount
self.jumpTramp = jumpCount
self.left = left
self.hitbox = pygame.Rect(self.x, self.y + 8, 61, 43)
self.visible = True
self.score = 0
def draw(self):
if self.left == True:
if self.visible == True:
gameDisplay.blit(carImg_left, (self.x,self.y))
else:
if self.visible == True:
gameDisplay.blit(carImg_right, (self.x,self.y))
self.hitbox = pygame.Rect(self.x, self.y + 8, 61, 43)
#pygame.draw.rect(gameDisplay, (255, 0, 0), self.hitbox, 2)
class apple(object):
def __init__(self, x, y, visible):
self.x = x
self.y = y
self.visible = visible
self.hitbox = pygame.Rect(self.x, self.y - 1, 61, 63)
def draw(self):
if self.visible == True:
gameDisplay.blit(apple_image, (self.x, self.y))
self.hitbox = pygame.Rect(self.x, self.y - 1, 61, 63)
#pygame.draw.rect(gameDisplay, (255, 0, 0), self.hitbox, 2)
class mushroom(object):
def __init__(self, x, y, visible):
self.x = x
self.y = y
self.visible = visible
self.hitbox = (self.x, self.y + 2, 61, 54)
def draw(self):
if self.visible == True:
gameDisplay.blit(mushroom_image, (self.x, self.y))
self.hitbox = (self.x, self.y + 2, 61, 54)
#pygame.draw.rect(gameDisplay, (255, 0, 0), self.hitbox, 2)
class banana(object):
def __init__(self, x, y, visible):
self.x = x
self.y = y
self.visible = visible
self.hitbox = (self.x, self.y + 2, 61, 54)
def draw(self):
if self.visible == True:
gameDisplay.blit(banan_image, (self.x, self.y))
self.hitbox = (self.x, self.y + 2, 61, 54)
#pygame.draw.rect(gameDisplay, (255, 0, 0), self.hitbox, 2)
class bird(object):
def __init__(self, x, y, speed):
self.x = x
self.y = y
self.neg = 1
self.speed = speed
self.hitbox = pygame.Rect(self.x, self.y + 2, 61, 54)
self.visible = True
def draw(self):
if self.x + self.speed*self.neg >= display_width - 60:
self.neg = -1
if self.x + self.speed*self.neg <= 0:
self.neg = 1
self.x += self.speed*self.neg
if self.neg == 1:
if self.visible == True:
gameDisplay.blit(bird_right, (self.x, self.y))
else:
if self.visible == True:
gameDisplay.blit(bird_left, (self.x, self.y))
self.hitbox = pygame.Rect(self.x, self.y + 2, 61, 54)
#pygame.draw.rect(gameDisplay, (255, 0, 0), self.hitbox, 2)
class tramp(object):
def __init__(self, x, y):
self.x = x
self.y = y
self.visible = True
self.hitbox = pygame.Rect(self.x, self.y + 2, 63, 24)
def draw(self):
gameDisplay.blit(trampoline, (self.x, self.y))
#pygame.draw.rect(gameDisplay, (255, 0, 0), self.hitbox, 2)
def end():
one_apple.visible = False
second_apple.visible = False
one_mushroom.visible = False
second_mushroom.visible = False
yozhyk.visible = False
bird.visible = False
text = font.render("End Game", 1, (0, 0, 0))
gameDisplay.blit(text, (350, 350))
yozhyk = person(10, display_height*0.7, 10, False, 10, False)
one_apple = apple(250, display_height*0.7, True)
second_apple = apple(600, display_height*0.7, True)
one_mushroom = mushroom(100, display_height*0.7, True)
second_mushroom = mushroom(400, display_height*0.7, True)
banan = banana(500, display_height*0.1, True)
tramp = tramp(500, display_height*0.7 + 8)
bird = bird(10, display_height*0.4, 10)
score = 0
font = pygame.font.SysFont("comiscans", 30, bold=True, italic=False)
while not crashed:
for event in pygame.event.get():
if event.type == pygame.QUIT:
crashed = True
keys = pygame.key.get_pressed()
if keys[pygame.K_LEFT] and yozhyk.x > yozhyk.car_speed:
yozhyk.left = True
yozhyk.x -= yozhyk.car_speed
if keys[pygame.K_RIGHT] and yozhyk.x < display_width - yozhyk.car_speed - 60:
yozhyk.left = False
yozhyk.x += yozhyk.car_speed
if not(yozhyk.isJump):
if keys[pygame.K_SPACE]:
yozhyk.isJump = True
else:
if yozhyk.jumpCount >= -10:
neg = 1
if yozhyk.jumpCount < 0:
neg = -1
yozhyk.y -= (yozhyk.jumpCount ** 2) * 0.5 * neg
yozhyk.jumpCount -= 1
else:
yozhyk.isJump = False
yozhyk.jumpCount = 10
if yozhyk.isTramp:
if yozhyk.jumpTramp >= -10:
neg = 1
if yozhyk.jumpTramp < 0:
neg = -1
yozhyk.y -= (yozhyk.jumpTramp ** 2) * neg
yozhyk.jumpTramp -= 1
else:
yozhyk.isTramp = False
yozhyk.jumpTramp = 10
gameDisplay.fill(white)
gameDisplay.blit(bg, (0, 0))
if yozhyk.hitbox.colliderect(one_apple.hitbox) and one_apple.visible == True:
one_apple.visible = False
yozhyk.score += 1
if yozhyk.hitbox.colliderect(second_apple.hitbox) and second_apple.visible == True:
second_apple.visible = False
yozhyk.score += 1
if yozhyk.hitbox.colliderect(banan.hitbox) and banan.visible == True:
banan.visible = False
yozhyk.score += 1
if yozhyk.hitbox.colliderect(one_mushroom.hitbox) and one_mushroom.visible == True:
one_mushroom.visible = False
yozhyk.score -= 1
if yozhyk.hitbox.colliderect(second_mushroom.hitbox) and second_mushroom.visible == True:
second_mushroom.visible = False
yozhyk.score -= 1
if yozhyk.hitbox.colliderect(bird.hitbox):
yozhyk.score -= 1
if yozhyk.hitbox.colliderect(tramp.hitbox):
yozhyk.isTramp = True
one_apple.draw()
second_apple.draw()
one_mushroom.draw()
second_mushroom.draw()
banan.draw()
yozhyk.draw()
bird.draw()
tramp.draw()
text = font.render("Score: " + str(yozhyk.score), 1, (0, 0, 0))
gameDisplay.blit(text, (650, 10))
pygame.display.update()
clock.tick(60)
pygame.quit()
quit() | StarcoderdataPython |
281415 | <reponame>TPei/jawbone_visualizer<filename>helper/date_parser.py
__author__ = 'TPei'
import datetime
def parse_date(date):
"""
parse date string looking like this
December 6, 2014 at 5:17pm
:param date:
:return: datetime.datetime
"""
# used to get month no
# '' at beginning of list so that month count starts at 1
months = ['', 'January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December']
# split by whitespace
date = date.split()
# get hour from hh:mm[AM/PM] substring
hour = int(date[4][0:2])
# convert to 24h format
if hour == 12:
if date[4][5:7] == 'AM':
hour = 0
elif date[4][5:7] == 'PM':
hour += 12
# create datetime object
date_time = datetime.datetime(int(date[2]), months.index(date[0]), int(date[1][:-1]), hour, int(date[4][3:5]))
return date_time
def parse_hm_time(hm_string):
"""
parse hm string looking like this
"7h 20m" or "12m"
:param hm_string:
:return: datetime.timedelta
"""
hm = hm_string.split()
if len(hm) == 1:
return datetime.timedelta(minutes=int(hm[0][:-1]))
return datetime.timedelta(hours=int(hm[0][:-1]), minutes=int(hm[1][:-1]))
# quick and dirty unittest for myself
def test_parse_date():
date = 'December 06, 2014 at 12:17AM'
assert parse_date(date) == datetime.datetime(2014, 12, 6, 00, 17)
date = 'December 06, 2014 at 12:17PM'
assert parse_date(date) == datetime.datetime(2014, 12, 6, 12, 17)
date = 'December 06, 2014 at 01:17PM'
assert parse_date(date) == datetime.datetime(2014, 12, 6, 13, 17) | StarcoderdataPython |
6615342 | <reponame>Cam2337/snap-python
import snap
G = snap.GenPrefAttach(100000, 3)
snap.PlotInDegDistr(G, "pref-attach", "PrefAttach(100000, 3) in Degree")
| StarcoderdataPython |
84502 | <reponame>bryancatanzaro/copperhead
#!/usr/bin/env python
#
# Copyright 2008-2012 NVIDIA Corporation
# Copyright 2009-2010 University of California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
from distribute_setup import use_setuptools
use_setuptools()
from setuptools import setup
from distutils.errors import CompileError
from distutils.command.build_py import build_py as BuildPyCommand
from distutils.command.build_ext import build_ext as BuildExtCommand
from distutils.command.clean import clean as CleanCommand
from distutils.cmd import Command
import distutils.extension
import subprocess
import os.path
import os
import fnmatch
try:
subprocess.check_call(['scons'], shell=True)
except subprocess.CalledProcessError:
raise CompileError("Error while building Python Extensions")
def remove_head_directories(path, heads=1):
def explode_path(path):
head, tail = os.path.split(path)
return explode_path(head) + [tail] \
if head and head != path else [head or tail]
exploded_path = explode_path(path)
if len(exploded_path) < (heads+1):
return ''
else:
return os.path.join(*exploded_path[heads:])
build_product_patterns = ['*.h', '*.hpp', '*.so', '*.dll', '*.dylib']
build_products = []
build_path = 'stage'
for pattern in build_product_patterns:
for root, dirs, files in os.walk(build_path):
dir_path = remove_head_directories(root, 2)
for filename in fnmatch.filter(files, pattern):
build_products.append(os.path.join(dir_path, filename))
setup(name="copperhead",
version="0.2a2",
description="Data Parallel Python",
long_description="Copperhead is a Data Parallel Python dialect, with runtime code generation and execution for CUDA, OpenMP, and TBB.",
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Programming Language :: Python',
'Topic :: Software Development :: Compilers',
'Topic :: Software Development :: Code Generators',
'Operating System :: POSIX :: Linux',
'Operating System :: MacOS :: MacOS X'],
zip_safe=False,
author="<NAME>, <NAME>",
author_email="<EMAIL>, <EMAIL>",
license = "Apache 2.0",
package_dir = {'':'stage'}, # packages are under stage
packages=['copperhead', 'copperhead.runtime', 'copperhead.compiler'],
install_requires=["codepy>=2012.1.2"],
package_data={
'copperhead': build_products,
},
url="http://github.com/copperhead"
)
| StarcoderdataPython |
1667111 | /home/runner/.cache/pip/pool/7d/6d/ab/ac311c5a2b70a57850205b558ae0b62441c3c75a085d742c8fa6067792 | StarcoderdataPython |
4976001 | from phi.flow import *
from functools import partial
# Simulation parameters
k0 = 0.15 # smallest wavenumber in the box
x = 128 # x size
y = 128 # y size
dt = control(0.05) # timestep
scale = 1 / 100
# Physical Parameters
c1 = 0.1 # adiabatic coefficient [0, None]
# Numerical Parameters
arakawa_coeff = 1 # Poisson bracket coefficient
kappa_coeff = 1 # background flow dy coefficient
nu = 0.0005 # coefficient of hyperdiffusion
N = 3 # laplace**(2*N) diffusion
# Derived
L = 2 * np.pi / k0 # Box Size
dx = L / x # Grid Spacing
nu = (-1) ** (N + 1) * nu # Smoothing coefficient & sign
# Packing
PARAMS = dict(c1=c1, nu=nu, N=N, arak=arakawa_coeff, kappa=kappa_coeff)
def get_phi(plasma, guess=None):
"""Fourier Poisson Solve for Phi"""
centered_omega = plasma.omega # - math.mean(plasma.omega)
phi = math.fourier_poisson(centered_omega.values, plasma.dx)
# phi = math.solve_linear(
# math.laplace, plasma.omega.values, guess, math.LinearSolve, callback=None
# )
return CenteredGrid(
phi, bounds=plasma.omega.bounds, extrapolation=plasma.omega.extrapolation,
)
def diffuse(arr, N, dx):
if not isinstance(N, int):
print(f"{N} {type(N)}")
for i in range(int(N)):
arr = field.laplace(arr) # math.fourier_laplace(arr, dx)
return arr
def step_gradient_2d(plasma, phi, N=0, nu=0, c1=0, arak=0, kappa=0, dt=0):
"""time gradient of model"""
# Calculate Gradients
grad_phi = field.spatial_gradient(phi, stack_dim=channel("gradient"))
dx_p, dy_p = grad_phi.values.gradient.unstack_spatial("x,y")
# Get difference
diff = phi - plasma.density
# Step 2.1: New Omega.
o = c1 * diff
if arak:
o += -arak * math._nd._periodic_2d_arakawa_poisson_bracket(
phi.values, plasma.omega.values, plasma.dx # TODO: Fix dx
)
if nu and N:
o += nu * diffuse(plasma.omega, N=N, dx=plasma.dx)
# Step 2.2: New Density.
n = c1 * diff
if arak:
n += -arak * math._nd._periodic_2d_arakawa_poisson_bracket(
phi.values, plasma.density.values, plasma.dx
)
if kappa:
n += -kappa * dy_p
if nu:
n += nu * diffuse(plasma.density, N=N, dx=plasma.dx)
return math.Dict(density=n, omega=o, phi=phi, age=plasma.age + dt, dx=plasma.dx)
def rk4_step(dt, physics_params, gradient_func=step_gradient_2d, **kwargs):
gradient_func = partial(gradient_func, **physics_params)
yn = math.Dict(**kwargs) # given dict to Namespace
in_age = yn.age
# Only in the first iteration recalculate phi
if yn.age == 0:
pn = get_phi(yn, guess=yn.phi)
else:
pn = yn.phi
k1 = dt * gradient_func(yn, pn, dt=0)
p1 = get_phi(yn + k1 * 0.5) # , guess=pn)
k2 = dt * gradient_func(yn + k1 * 0.5, p1, dt=dt / 2)
p2 = get_phi(yn + k2 * 0.5) # , guess=pn+p1*0.5)
k3 = dt * gradient_func(yn + k2 * 0.5, p2, dt=dt / 2)
p3 = get_phi(yn + k3) # , guess=pn+p2*0.5)
k4 = dt * gradient_func(yn + k3, p3, dt=dt)
y1 = yn + (k1 + 2 * k2 + 2 * k3 + k4) / 6
phi = get_phi(y1) # , guess=pn+p3*0.5)
return math.Dict(density=y1.density, omega=y1.omega, phi=phi, age=in_age + dt, dx=yn.dx)
domain = dict(extrapolation=extrapolation.PERIODIC, bounds=Box(x=L, y=L))
density = CenteredGrid(math.random_normal(spatial(x=x, y=y)), **domain) * scale
omega = CenteredGrid(math.random_normal(spatial(x=x, y=y)), **domain) * scale
phi = CenteredGrid(math.random_normal(spatial(x=x, y=y)), **domain) * scale
age = 0
rk4 = partial(rk4_step, physics_params=PARAMS)
print(
"\n".join(
[
f"x,y: {x}x{y}",
f"L: {L}",
f"c1: {c1}",
f"dt: {dt}",
f"N: {N}",
f"nu: {nu}",
f"scale: {scale}",
]
)
)
for _ in view(density, omega, phi, play=False, framerate=10, namespace=globals()).range():
new_state = rk4(dt, density=density, omega=omega, phi=phi, age=age, dx=dx)
density, omega, phi = new_state["density"], new_state["omega"], new_state["phi"]
age += dt
| StarcoderdataPython |
9784794 | # -*- coding: utf-8 -*-
"""Console script for BitcoinExchangeFH."""
import logging
import click
import yaml
from befh import Configuration, Runner
LOGGER = logging.getLogger(__name__)
@click.command()
@click.option(
'--configuration',
help='Configuration file.',
required=True)
@click.option(
'--debug',
default=False,
is_flag=True,
help='Debug mode.')
@click.option(
'--cold',
default=False,
is_flag=True,
help='Cold start mode.')
@click.option(
'--archive',
default=None,
help='Manually archive the tables.',
required=False)
def main(configuration, debug, cold, archive):
"""Console script for BitcoinExchangeFH."""
if debug:
level = logging.DEBUG
else:
level = logging.INFO
logging.basicConfig(
level=level,
format='%(asctime)s %(levelname)s %(message)s')
configuration = open(configuration, 'r')
configuration = yaml.load(configuration, Loader=yaml.FullLoader)
LOGGER.debug('Configuration:\n%s', configuration)
configuration = Configuration(configuration)
runner = Runner(
config=configuration,
is_debug=debug,
is_cold=cold)
runner.load()
if archive is not None:
runner.archive(date=archive)
else:
runner.run()
if __name__ == "__main__":
main()
| StarcoderdataPython |
200684 | <filename>queue-based-ingestion/python-sam/src/api/authorizer.py
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
# Authorizer code based on https://github.com/awslabs/aws-apigateway-lambda-authorizer-blueprints/blob/master/blueprints/python/api-gateway-authorizer-python.py
# Token validation code based on https://github.com/awslabs/aws-support-tools/blob/master/Cognito/decode-verify-jwt/decode-verify-jwt.py
import os
import re
import json
import time
import urllib.request
from jose import jwk, jwt
from jose.utils import base64url_decode
is_cold_start = True
keys = {}
user_pool_id = os.getenv('USER_POOL_ID', None)
app_client_id = os.getenv('APPLICATION_CLIENT_ID', None)
admin_group_name = os.getenv('ADMIN_GROUP_NAME', None)
def validate_token(token, region):
global keys, is_cold_start, user_pool_id, app_client_id
if is_cold_start:
keys_url = f'https://cognito-idp.{region}.amazonaws.com/{user_pool_id}/.well-known/jwks.json'
with urllib.request.urlopen(keys_url) as f:
response = f.read()
keys = json.loads(response.decode('utf-8'))['keys']
is_cold_start = False
# get the kid from the headers prior to verification
headers = jwt.get_unverified_headers(token)
kid = headers['kid']
# search for the kid in the downloaded public keys
key_index = -1
for i in range(len(keys)):
if kid == keys[i]['kid']:
key_index = i
break
if key_index == -1:
print('Public key not found in jwks.json')
return False
# construct the public key
public_key = jwk.construct(keys[key_index])
# get the last two sections of the token,
# message and signature (encoded in base64)
message, encoded_signature = str(token).rsplit('.', 1)
# decode the signature
decoded_signature = base64url_decode(encoded_signature.encode('utf-8'))
# verify the signature
if not public_key.verify(message.encode("utf8"), decoded_signature):
print('Signature verification failed')
return False
print('Signature successfully verified')
# since we passed the verification, we can now safely
# use the unverified claims
claims = jwt.get_unverified_claims(token)
# additionally we can verify the token expiration
if time.time() > claims['exp']:
print('Token is expired')
return False
# and the Audience (use claims['client_id'] if verifying an access token)
if claims['aud'] != app_client_id:
print('Token was not issued for this audience')
return False
decoded_jwt = jwt.decode(token, key=keys[key_index], audience=app_client_id)
return decoded_jwt
def lambda_handler(event, context):
global admin_group_name
print(event)
# print("Client token: " + event['authorizationToken'])
# print("Method ARN: " + event['methodArn'])
tmp = event['methodArn'].split(':')
api_gateway_arn_tmp = tmp[5].split('/')
region = tmp[3]
aws_account_id = tmp[4]
# validate the incoming token
validated_decoded_token = validate_token(event['authorizationToken'], region)
if not validated_decoded_token:
raise Exception('Unauthorized')
principal_id = validated_decoded_token['sub']
# initialize the policy
policy = AuthPolicy(principal_id, aws_account_id)
policy.restApiId = api_gateway_arn_tmp[0]
policy.region = region
policy.stage = api_gateway_arn_tmp[1]
# allow all public resources/methods explicitly
policy.allow_method(HttpVerb.POST, "submit-job-request")
policy.allow_method(HttpVerb.POST, "submit-job-request/*")
policy.allow_method(HttpVerb.GET, "job-status/*")
# Check the Cognito group entry for Admin.
# Assuming here that the Admin group has always higher /precedence
# if 'cognito:groups' in validated_decoded_token and validated_decoded_token['cognito:groups'][0] == admin_group_name:
# add administrative privileges
# policy.allow_method(HttpVerb.DELETE, "locations")
# policy.allow_method(HttpVerb.DELETE, "locations/*")
# policy.allow_method(HttpVerb.PUT, "locations")
# policy.allow_method(HttpVerb.PUT, "locations/*")
# Finally, build the policy
auth_response = policy.build()
return auth_response
class HttpVerb:
GET = "GET"
POST = "POST"
PUT = "PUT"
PATCH = "PATCH"
HEAD = "HEAD"
DELETE = "DELETE"
OPTIONS = "OPTIONS"
ALL = "*"
class AuthPolicy(object):
awsAccountId = ""
"""The AWS account id the policy will be generated for. This is used to create the method ARNs."""
principalId = ""
"""The principal used for the policy, this should be a unique identifier for the end user."""
version = "2012-10-17"
"""The policy version used for the evaluation. This should always be '2012-10-17'"""
pathRegex = "^[/.a-zA-Z0-9-\*]+$"
"""The regular expression used to validate resource paths for the policy"""
"""these are the internal lists of allowed and denied methods. These are lists
of objects and each object has 2 properties: A resource ARN and a nullable
conditions statement.
the build method processes these lists and generates the appropriate
statements for the final policy"""
allowMethods = []
denyMethods = []
restApiId = "<<restApiId>>"
""" Replace the placeholder value with a default API Gateway API id to be used in the policy.
Beware of using '*' since it will not simply mean any API Gateway API id, because stars will greedily expand over '/' or other separators.
See https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_resource.html for more details. """
region = "<<region>>"
""" Replace the placeholder value with a default region to be used in the policy.
Beware of using '*' since it will not simply mean any region, because stars will greedily expand over '/' or other separators.
See https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_resource.html for more details. """
stage = "<<stage>>"
""" Replace the placeholder value with a default stage to be used in the policy.
Beware of using '*' since it will not simply mean any stage, because stars will greedily expand over '/' or other separators.
See https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_resource.html for more details. """
def __init__(self, principal, aws_account_id):
self.awsAccountId = aws_account_id
self.principalId = principal
self.allowMethods = []
self.denyMethods = []
def _add_method(self, effect, verb, resource, conditions):
"""Adds a method to the internal lists of allowed or denied methods. Each object in
the internal list contains a resource ARN and a condition statement. The condition
statement can be null."""
if verb != "*" and not hasattr(HttpVerb, verb):
raise NameError("Invalid HTTP verb " + verb + ". Allowed verbs in HttpVerb class")
resource_pattern = re.compile(self.pathRegex)
if not resource_pattern.match(resource):
raise NameError("Invalid resource path: " + resource + ". Path should match " + self.pathRegex)
if resource[:1] == "/":
resource = resource[1:]
resource_arn = ("arn:aws:execute-api:" +
self.region + ":" +
self.awsAccountId + ":" +
self.restApiId + "/" +
self.stage + "/" +
verb + "/" +
resource)
if effect.lower() == "allow":
self.allowMethods.append({
'resourceArn': resource_arn,
'conditions': conditions
})
elif effect.lower() == "deny":
self.denyMethods.append({
'resourceArn': resource_arn,
'conditions': conditions
})
def _get_empty_statement(self, effect):
"""Returns an empty statement object prepopulated with the correct action and the
desired effect."""
statement = {
'Action': 'execute-api:Invoke',
'Effect': effect[:1].upper() + effect[1:].lower(),
'Resource': []
}
return statement
def _get_statement_for_effect(self, effect, methods):
"""This function loops over an array of objects containing a resourceArn and
conditions statement and generates the array of statements for the policy."""
statements = []
if len(methods) > 0:
statement = self._get_empty_statement(effect)
for curMethod in methods:
if curMethod['conditions'] is None or len(curMethod['conditions']) == 0:
statement['Resource'].append(curMethod['resourceArn'])
else:
conditional_statement = self._get_empty_statement(effect)
conditional_statement['Resource'].append(curMethod['resourceArn'])
conditional_statement['Condition'] = curMethod['conditions']
statements.append(conditional_statement)
statements.append(statement)
return statements
def allow_all_methods(self):
"""Adds a '*' allow to the policy to authorize access to all methods of an API"""
self._add_method("Allow", HttpVerb.ALL, "*", [])
def deny_all_methods(self):
"""Adds a '*' allow to the policy to deny access to all methods of an API"""
self._add_method("Deny", HttpVerb.ALL, "*", [])
def allow_method(self, verb, resource):
"""Adds an API Gateway method (Http verb + Resource path) to the list of allowed
methods for the policy"""
self._add_method("Allow", verb, resource, [])
def deny_method(self, verb, resource):
"""Adds an API Gateway method (Http verb + Resource path) to the list of denied
methods for the policy"""
self._add_method("Deny", verb, resource, [])
def allow_method_with_conditions(self, verb, resource, conditions):
"""Adds an API Gateway method (Http verb + Resource path) to the list of allowed
methods and includes a condition for the policy statement. More on AWS policy
conditions here: http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements.html#Condition"""
self._add_method("Allow", verb, resource, conditions)
def deny_method_with_conditions(self, verb, resource, conditions):
"""Adds an API Gateway method (Http verb + Resource path) to the list of denied
methods and includes a condition for the policy statement. More on AWS policy
conditions here: http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements.html#Condition"""
self._add_method("Deny", verb, resource, conditions)
def build(self):
"""Generates the policy document based on the internal lists of allowed and denied
conditions. This will generate a policy with two main statements for the effect:
one statement for Allow and one statement for Deny.
Methods that includes conditions will have their own statement in the policy."""
if ((self.allowMethods is None or len(self.allowMethods) == 0) and
(self.denyMethods is None or len(self.denyMethods) == 0)):
raise NameError("No statements defined for the policy")
policy = {
'principalId': self.principalId,
'policyDocument': {
'Version': self.version,
'Statement': []
}
}
policy['policyDocument']['Statement'].extend(self._get_statement_for_effect("Allow", self.allowMethods))
policy['policyDocument']['Statement'].extend(self._get_statement_for_effect("Deny", self.denyMethods))
return policy
| StarcoderdataPython |
5160494 | <gh_stars>1-10
#!/usr/bin/env python3
import sys
import re
import mpmath as mp
mp.dps=250
mp.mp.dps = 250
if len(sys.argv) != 2:
print("Usage: format_CIAAW.py ciaawfile")
quit(1)
path = sys.argv[1]
atomre = re.compile(r'^(\d+) +(\w\w*) +(\w+) +\[?(\d+)\]?\*? +(.*) *$')
isore = re.compile(r'^(\d+)\*? +(\[?\d.*.*\]?) *$')
brange = re.compile(r'^\[([\d\.]+),([\d\.]+)\].*$')
buncertain = re.compile(r'^([\d\.]+)\((\d+)\)[a-z]*$')
bnum = re.compile(r'^([\d\d]+)$')
atommassline = re.compile(r'^(\d+) +(\w\w*) +(\w+) +(.*) *$')
def NumberStr(n):
# Replace spaces
s = n.replace(' ', '')
# remove "exactly" for the carbon mass
s = s.replace('(exactly)', '')
# if only a number, put it three times
m = bnum.match(s)
if m:
s = "{:<25} {:<25} {:<25}".format(m.group(1), m.group(1), m.group(1))
# if parentheses uncertainty...
m = buncertain.match(s)
if m:
# tricky. duplicate the first part as a string
s2 = m.group(1)
# but replace with all zero
s2 = re.sub(r'\d', '0', s2)
# now replace last characters
l = len(m.group(2))
s2 = s2[:len(s2)-l] + m.group(2)
# convert to a float
serr = mp.mpf(s2)
scenter = mp.mpf(m.group(1))
s = "{:<25} {:<25} {:<25}".format(mp.nstr(scenter, 18), mp.nstr(scenter-serr, 18), mp.nstr(scenter+serr, 18))
# Replace bracketed ranges with parentheses
m = brange.match(s)
if m:
slow = mp.mpf(m.group(1))
shigh = mp.mpf(m.group(2))
smid = (shigh + slow)/mp.mpf("2.0")
s = "{:<25} {:<25} {:<25}".format(mp.nstr(smid, 18), mp.nstr(slow, 18), mp.nstr(shigh, 18))
# just a dash?
if s == "-":
s = "{:<25} {:<25} {:<25}".format(0, 0, 0)
return s
# First 5 lines are comments
filelines = [ x.strip() for x in open(path).readlines() ]
curatom = None
for line in filelines:
matomre = atomre.match(line)
misore = isore.match(line)
matommass = atommassline.match(line)
if matomre:
curatom = "{:<5} {:<5}".format(matomre.group(1), matomre.group(2))
print("{} {:<6} {:<25}".format(curatom, matomre.group(4), NumberStr(matomre.group(5))))
elif misore:
print("{} {:<6} {:<25}".format(curatom, misore.group(1), NumberStr(misore.group(2))))
elif matommass:
curatom = "{:<5} {:<5}".format(matommass.group(1), matommass.group(2))
print("{} {:<25}".format(curatom, NumberStr(matommass.group(4))))
else:
print(line) # comment lines, etc
| StarcoderdataPython |
6667681 | <gh_stars>0
from classes.enemies.Enemy import Enemy
import random
from classes.enemies.BasicEnemy import BasicEnemy
from classes.enemies.GrungeEnemy import GrungeEnemy
class EnemyFactory:
__weak_enemies = [BasicEnemy, GrungeEnemy]
__regular_enemies = []
__strong_enemies = []
def __init__(self, window) -> None:
self.window = window
def __get_random_enemy_for(self, enemy_list: list[Enemy]):
random_index = random.randrange(0, len(enemy_list))
Random_enemy = enemy_list[random_index]
return Random_enemy(self.window)
def get_random_weak_enemy(self) -> Enemy:
random_weak_enemy = self.__get_random_enemy_for(self.__weak_enemies)
return random_weak_enemy | StarcoderdataPython |
9605572 | from abc import ABC
from logging import Logger
from typing import Dict, List, Optional, Union, Any, Iterable
from dacite import from_dict
from data import TimeUtil, LoggingUtil
from data.entity import SigningPolicyEntity, Entity, IndexEntity, PanelEntity, SeasonEntity, EpisodeEntity, \
SeriesEntity, MovieEntity
from data.model import Model, SigningPolicyModel, IndexModel, PanelModel, PanelSchema, SeasonSchema, SeasonModel, \
EpisodeModel, EpisodeSchema, SeriesModel, SeriesSchema, MovieModel, MovieSchema
class CoreMapper(ABC):
_response_key: Optional[str]
_logger: Logger
def __init__(
self,
response_key: Optional[str],
logging_client: LoggingUtil
) -> None:
"""
:param response_key: Optional key used to unpack a response
:param logging_client: Logging utility
"""
self._response_key = response_key
self._logger = logging_client.get_default_logger(__name__)
@classmethod
def _map_to_dict(cls, model: Model) -> Dict:
pass
@classmethod
def _map_to_entity(cls, model: Model) -> Entity:
pass
def to_model(self, response: Union[Dict, Model]) -> Union[Any, Model, List[Model]]:
"""
Produces a model or list of models
:param response: Network client response
:return: Data layer model/s
"""
if isinstance(response, dict) and self._response_key:
return response[self._response_key]
return response
def to_entity(self, model: Union[Model, List[Model]]) -> Union[Entity, List[Entity]]:
"""
Produces a entity or list of entities
:param model: Model to convert to entity
:return: Data layer entity/s
"""
pass
class SigningPolicyMapper(CoreMapper):
def __init__(self, logging_client: LoggingUtil, time_zone_client: TimeUtil) -> None:
super().__init__('signing_policies', logging_client)
self.time_util = time_zone_client
def _map_to_entity(self, model: SigningPolicyModel) -> SigningPolicyEntity:
expires_date_time = self.time_util.as_local_time(model.expires)
expire_time_stamp = self.time_util.from_date_time_to_time_stamp(expires_date_time)
return SigningPolicyEntity(
name=model.name,
path=model.path,
value=model.value,
expires=expire_time_stamp
)
def to_entity(self, model: List[SigningPolicyModel]) -> List[SigningPolicyEntity]:
entities = map(self._map_to_entity, model)
return list(entities)
class IndexMapper(CoreMapper):
def __init__(self, logging_client: LoggingUtil) -> None:
super().__init__('items', logging_client)
@classmethod
def _map_to_entity(cls, model: IndexModel) -> IndexEntity:
return IndexEntity(
prefix=model.prefix,
offset=model.offset,
count=model.count
)
def to_entity(self, model: List[IndexModel]) -> List[IndexEntity]:
entities = map(self._map_to_entity, model)
return list(entities)
class PanelMapper(CoreMapper):
def __init__(self, logging_client: LoggingUtil) -> None:
super().__init__('items', logging_client)
@classmethod
def _map_to_dict(cls, model: Model) -> Dict:
schema = PanelSchema()
return schema.dump(model)
@classmethod
def _map_to_entity(cls, model: PanelModel) -> PanelEntity:
data: Dict = cls._map_to_dict(model)
return from_dict(PanelEntity, data)
def to_entity(self, model: List[PanelModel]) -> List[PanelEntity]:
entities = map(self._map_to_entity, model)
return list(entities)
class SeasonMapper(CoreMapper):
def __init__(self, logging_client: LoggingUtil) -> None:
super().__init__('items', logging_client)
@classmethod
def _map_to_dict(cls, model: Model) -> Dict:
schema = SeasonSchema()
return schema.dump(model)
@classmethod
def _map_to_entity(cls, model: SeasonModel) -> SeasonEntity:
data = cls._map_to_dict(model)
return from_dict(SeasonEntity, data)
def to_entity(self, model: List[SeasonModel]) -> List[SeasonEntity]:
entities = map(self._map_to_entity, model)
return list(entities)
class EpisodeMapper(CoreMapper):
def __init__(self, logging_client: LoggingUtil) -> None:
super().__init__('items', logging_client)
@classmethod
def _map_to_dict(cls, model: Model) -> Dict:
schema = EpisodeSchema()
return schema.dump(model)
@classmethod
def _map_to_entity(cls, model: EpisodeModel) -> EpisodeEntity:
data = cls._map_to_dict(model)
return from_dict(EpisodeEntity, data)
def to_entity(self, model: List[EpisodeModel]) -> List[EpisodeEntity]:
entities = map(self._map_to_entity, model)
return list(entities)
class SeriesMapper(CoreMapper):
def __init__(self, logging_client: LoggingUtil) -> None:
super().__init__(None, logging_client)
@classmethod
def _map_to_dict(cls, model: Model) -> Dict:
schema = SeriesSchema()
return schema.dump(model)
@classmethod
def _map_to_entity(cls, model: SeriesModel) -> SeriesEntity:
data = cls._map_to_dict(model)
return from_dict(SeriesEntity, data)
def to_entity(self, model: SeriesModel) -> SeriesEntity:
entity = self._map_to_entity(model)
return entity
class MovieMapper(CoreMapper):
def __init__(self, logging_client: LoggingUtil) -> None:
super().__init__(None, logging_client)
@classmethod
def _map_to_dict(cls, model: Model) -> Dict:
schema = MovieSchema()
return schema.dump(model)
@classmethod
def _map_to_entity(cls, model: MovieModel) -> MovieEntity:
data = cls._map_to_dict(model)
return from_dict(MovieEntity, data)
def to_entity(self, model: MovieModel) -> MovieEntity:
entity = self._map_to_entity(model)
return entity
| StarcoderdataPython |
262901 | <reponame>willtwr/iSiam-TF<filename>datasets/vid.py<gh_stars>1-10
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright © 2017 bily Huazhong University of Science and Technology
#
# Distributed under terms of the MIT license.
"""VID Dataset"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pickle
import numpy as np
def downsample(n_in, n_out, max_frame_dist=1):
# Get a list of frame distance between consecutive frames
max_frame_dist = np.minimum(n_in, max_frame_dist)
possible_frame_dist = range(1, max_frame_dist + 1)
frame_dist = np.random.choice(possible_frame_dist, n_out - 1)
end_to_start_frame_dist = np.sum(frame_dist)
# Check frame dist boundary
possible_max_start_idx = n_in - 1 - end_to_start_frame_dist
if possible_max_start_idx < 0:
n_extra = - possible_max_start_idx
while n_extra > 0:
for idx, dist in enumerate(frame_dist):
if dist > 1:
frame_dist[idx] = dist - 1
n_extra -= 1
if n_extra == 0: break
# Get frame dist
end_to_start_frame_dist = np.sum(frame_dist)
possible_max_start_idx = n_in - 1 - end_to_start_frame_dist
start_idx = np.random.choice(possible_max_start_idx + 1, 1)
out_idxs = np.cumsum(np.concatenate((start_idx, frame_dist)))
return out_idxs
def upsample(n_in, n_out):
n_more = n_out - n_in
in_idxs = range(n_in)
more_idxs = np.random.choice(in_idxs, n_more)
out_idxs = sorted(list(in_idxs) + list(more_idxs))
return out_idxs
class VID:
def __init__(self, imdb_path, max_frame_dist, epoch_size=None, time_steps=2):
with open(imdb_path, 'rb') as f:
imdb = pickle.load(f)
self.videos = imdb['videos']
self.time_steps = time_steps
self.max_frame_dist = max_frame_dist
if epoch_size is None:
self.epoch_size = len(self.videos)
else:
self.epoch_size = int(epoch_size)
def __getitem__(self, index):
img_ids = self.videos[index % len(self.videos)]
n_frames = len(img_ids)
if n_frames < self.time_steps:
out_idxs = upsample(n_frames, self.time_steps)
elif n_frames == self.time_steps:
out_idxs = range(n_frames)
else:
out_idxs = downsample(n_frames, self.time_steps, self.max_frame_dist)
video = []
for j, frame_idx in enumerate(out_idxs):
img_path = img_ids[frame_idx]
video.append(img_path.encode('utf-8'))
return video
def __len__(self):
return self.epoch_size
| StarcoderdataPython |
5071582 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#sqltest.py - Fetch and display the MySQL database server version.
# import the MySQLdb and sys modules
#deletes first row, runs after the robot finishes order.
import MySQLdb
import sys
import os
import time
# open a database connection
# be sure to change the host IP address, username, password and database name to match your own
connection = MySQLdb.connect(host = "192.168.127.12", user = "test", passwd = "<PASSWORD>", db = "lunchbot")
# prepare a cursor object using cursor() method
cursor = connection.cursor ()
# print the row[0]
# (Python starts the first row in an array with the number zero – instead of one)
reddit=0
checker="There is no input"
exists=0
while(exists==0):
try:
cursor.execute("SELECT * FROM offices LIMIT 10")
existential=cursor.fetchone()
checkit=existential[0]
exists=1
except TypeError as error:
print "I'm waiting for a command..."
time.sleep(5)
exists=0
connection.commit() #basically updates the database to realtime
connection.commit()
while(reddit==0):
try:
cursor.execute("SELECT office_id FROM offices WHERE ID='1'")
row= cursor.fetchone()
checker=row[0]
reddit=1
connection.commit()
except TypeError as error:
reddit=0
cursor.execute("SET @i=0")
cursor.execute("UPDATE offices SET `ID` = @i:=@i+1;")
connection.commit()
print "Office ID:", checker
cursor.execute("DELETE FROM offices WHERE ID='1'")
cursor.execute("SET @i=0")
cursor.execute("UPDATE offices SET `ID` = @i:=@i+1;")
connection.commit()
# close the cursor object
cursor.close ()
# close the connection
connection.close ()
os.system("python go_to_v1.py "+checker)
#NOW WAITS FOR A BUTTON RESPONSE FROM PERSON
#os.system("python go_to_v1.py --"+checker)
#make the program so that if nothing is in queue, it goes home, or else it heads directly to its next destination on queue
# exit the program
#ps I fixed the connection issue by disabling the firewall.
sys.exit()
| StarcoderdataPython |
11227222 | <filename>linha/#2_legendas_no_grafico.py
# CONFIGURANDO
# -------------------------------------------
#%matplotlib inline
import matplotlib as mpl
#%mpl.rcParams['figure.dpi'] = 100
import numpy as np
import matplotlib.pyplot as plt
# CRIANDO DADOS
# -------------------------------------------
x = np.linspace(0, 10, 100)
y = np.sin(x)
# TÍTULOS E LEGENDAS
# -------------------------------------------
plt.title('Função de Onda')
plt.ylabel('f(x)')
plt.xlabel('x')
# REPRESENTANDO GRAFICAMENTE
# -------------------------------------------
plt.plot(x, y)
plt.show()
| StarcoderdataPython |
3523014 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# General libraries
import sys
import numpy as np
import math # OUT/REMOVE IT?
from math import sqrt
import random
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
import time
# My libraries
from biblioteka import rysowanie, stale
from gpu_code import source as source # Here we have the whole source code for GPU
# Libraries NVIDIA CUDA
import pycuda.autoinit
import pycuda.driver as cuda
import pycuda.cumath as cumath
import pycuda.gpuarray as gpuarray
from pycuda.curandom import rand as curand
from pycuda.compiler import SourceModule
start = time.clock()
# Get GPU function
mod = SourceModule(source)
get_energy = mod.get_function("energy")
polKroku = mod.get_function("polKroku")
fupdate = mod.get_function("fupdate")
leapfrog = mod.get_function("leapfrog")
repopulate= mod.get_function("repopulate")
#sila = mod.get_function("sila")
# Initialize data
t = 0
particles = []
velocities = []
energy = []
celllist={}
# random velocities
px = curand((stale.particleNumber,)).get().astype(np.float32)
py = curand((stale.particleNumber,)).get().astype(np.float32)
# velocity distribution around 0, not 0.5
px = px - 0.5
py = py - 0.5
# Here we have energy, not velocity ([XXX] needs correction)
v = np.zeros((stale.particleNumber,)).astype(np.float32)
rx = np.zeros((stale.particleNumber,)).astype(np.float32)
ry = np.zeros((stale.particleNumber,)).astype(np.float32)
fx = np.zeros((stale.particleNumber,)).astype(np.float32)
fy = np.zeros((stale.particleNumber,)).astype(np.float32)
# Initializing a list of neighbors (structure)
# It reduces complexity from O(N^2) to O(N)
nl = (-1)*np.ones((stale.particleNumber,stale.rn)).astype(np.float32)
# Initializing grid of initial coordinates
for i in range(stale.sqpart):
for j in range(stale.sqpart):
rx[j+stale.sqpart*i] = 2*i+1
ry[j+stale.sqpart*i] = 2*j+1
# Center of mass frame
px = px - px.sum()/stale.particleNumber
py = py - py.sum()/stale.particleNumber
# Loading data to GPU (momenta, positions, forces, energy (v), neighbor list)
px_gpu = cuda.mem_alloc(px.nbytes)
cuda.memcpy_htod(px_gpu,px)
py_gpu = cuda.mem_alloc(py.nbytes)
cuda.memcpy_htod(py_gpu,py)
rx_gpu = cuda.mem_alloc(rx.nbytes)
cuda.memcpy_htod(rx_gpu,rx)
ry_gpu = cuda.mem_alloc(ry.nbytes)
cuda.memcpy_htod(ry_gpu,ry)
fx_gpu = cuda.mem_alloc(fx.nbytes)
cuda.memcpy_htod(fx_gpu,fx)
fy_gpu = cuda.mem_alloc(ry.nbytes)
cuda.memcpy_htod(fy_gpu,fy)
v_gpu = cuda.mem_alloc(v.nbytes)
cuda.memcpy_htod(v_gpu,v)
nl_gpu = cuda.mem_alloc(nl.nbytes)
cuda.memcpy_htod(nl_gpu,nl)
# Initialize neighbor list with first data
repopulate(rx_gpu,ry_gpu,nl_gpu,np.array(stale.rn).astype(np.float32), block=(stale.particleNumber,1,1))
energia = np.zeros((stale.particleNumber,))
energia = energia.astype(np.float32)
energia_gpu = cuda.mem_alloc(energia.nbytes)
cuda.memcpy_htod(energia_gpu,energia)
get_energy(px_gpu,py_gpu,energia_gpu, block=(stale.particleNumber,1,1))
cuda.memcpy_dtoh(energia,energia_gpu)
energija = []
temperatura = []
###########
########### The main loop
###########
for i in range(stale.steps):
if (i%int(stale.steps/1000) == 0): # Shows the progress
procent =(100.0*i/stale.steps)
sys.stdout.write("\r")
sys.stdout.write("Processing: %.1f" % procent)
sys.stdout.flush()
# Update of the forces
fupdate(rx_gpu,ry_gpu,fx_gpu,fy_gpu, block=(stale.particleNumber,1,1))
# Calculate temporary energy for particles
polKroku(v_gpu,px_gpu,py_gpu,fx_gpu,fy_gpu, block=(stale.particleNumber,1,1))
cuda.memcpy_dtoh(v,v_gpu)
# Use energies and calculate tau parameter
tau = v.sum()/stale.particleNumber
eta = np.array(sqrt(stale.temp/tau)).astype(np.float32)
# LEAPFROG step
leapfrog(px_gpu,py_gpu,rx_gpu,ry_gpu,fx_gpu,fy_gpu,eta, block=(stale.particleNumber,1,1))
# Update the neighbor list
if (i%1000 == 0):
cuda.memcpy_dtoh(rx,rx_gpu)
cuda.memcpy_dtoh(ry,ry_gpu)
repopulate(rx_gpu,ry_gpu,nl_gpu,np.array(stale.rn).astype(np.float32),
block=(stale.particleNumber,1,1))
# Every 4000 step get data from GPU memory and plot it
if (i%4000 == 0):
cuda.memcpy_dtoh(rx,rx_gpu)
cuda.memcpy_dtoh(ry,ry_gpu)
rysowanie(i,rx,ry)
plt.plot(temperatura)
elapsed = (time.clock() - start)
print stale.particleNumber,elapsed
plt.show()
| StarcoderdataPython |
271622 | # -*- coding: utf-8 -*-
'''
Neural Network model definition using Tensorflow Keras
'''
__author__ = "<NAME>"
__date__ = "February 2021"
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Dense, Dropout, Flatten
# Define Keras model architecture
model = Sequential()
model.add(Dense(5, input_shape=(11,), activation='relu'))
model.add(Dense(1, activation='linear'))
# Save the model architecture in JSON format
filename = 'keras_model_redwine_raw.json'
model_json = model.to_json()
with open(filename, "w") as json_file:
json_file.write(model_json)
print('Model architecture %s saved to disk' %filename)
model.summary()
| StarcoderdataPython |
1971681 | <filename>tests/cases/build/builtin_functions.py
from minpiler.std import M
M.print(abs(-10))
M.print("test")
x, y = divmod(10, 3)
M.print(x, y)
M.print(pow(2, 3))
M.print(max(1, 2, 3, 1))
M.print(min(1, 2, 3, 1))
M.print(float(1.5))
M.print(int(1.5))
M.print(bool(1.5))
# > print 10.0
# > print "test"
# > print 3.0
# > print 1.0
# > print 8.0
# > print 3.0
# > print 1.0
# > print 1.5
# > print 1.0
# > print 1.0
| StarcoderdataPython |
1639140 | from setuptools import setup
"""
author: fungaegis
github: https://github.com/fungaegis/pytest-failed-screenshot
"""
with open("./README.rst", "r") as readme:
long_description = readme.read()
setup(
name='pytest_failed_screenshot',
url='https://github.com/fungaegis/pytest-failed-screenshot',
version='1.0.2',
author="fungaegis",
author_email="<EMAIL>",
description='Test case fails,take a screenshot,save it,attach it to the allure',
long_description=long_description,
classifiers=[
'Framework :: Pytest',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Testing',
'Programming Language :: Python :: 3.7',
],
license='MIT License',
py_modules=['pytest_failed_screenshot'],
keywords=[
'pytest', 'py.test', 'pytest_failed_screenshot', 'allure', 'screenshot', 'selenium', 'appium'
],
install_requires=[
'pytest',
'selenium',
'allure-pytest',
'allure-python-commons',
'helium'
],
entry_points={
'pytest11': [
'failed_screenshot = pytest_failed_screenshot',
]
}
)
| StarcoderdataPython |
11245249 | import pygame
from pygame.locals import *
from random import randrange
trackFiles = []
trackTile = ['empty.png', 'start.png', 'vertStraight.png',
'horiStraight.png', 'turn90.png', 'turn180.png',
'turn270.png', 'turn360.png', 'checkpointOne.png',
'checkpointTwo.png']
empty = 0
start = 1
vertStraight = 2
horiStraight = 3
turn90 = 4
turn180 = 5
turn270 = 6
turn360 = 7
check1 = 8
check2 = 9
roverfield = [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 4, 3, 5, 0, 0, 0, 0, 0],
[0, 0, 2, 0, 2, 0, 4, 3, 5, 0],
[0, 0, 1, 0, 2, 0, 2, 0, 2, 0],
[0, 0, 2, 0, 7, 3, 6, 0, 8, 0],
[0, 0, 9, 0, 0, 0, 0, 0, 2, 0],
[0, 0, 2, 0, 0, 0, 0, 0, 2, 0],
[0, 0, 7, 3, 3, 3, 3, 3, 6, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
]
roverfieldRot = [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
]
class Track(pygame.sprite.Sprite):
def __init__(self, tile_track, y, x, rot):
pygame.sprite.Sprite.__init__(self)
self.image = trackFiles[tile_track]
self.rect = self.image.get_rect()
if rot != 0:
self.image = pygame.transform.rotate(self.image, rot * 90)
self.x = x
self.y = y
def update(self, cam_x, cam_y):
self.rect.topleft = self.x - cam_x, self.y - cam_y
| StarcoderdataPython |
209480 | from setuptools import find_packages, setup
tests_requirements = [
'pytest',
'pytest-cov',
'pytest-flake8',
'pytest-isort',
]
setup(
name='babyte',
author='Kozea',
packages=find_packages(),
include_package_data=True,
install_requires=[
'flask',
'oauth2client',
'libsass',
],
tests_require=tests_requirements,
extras_require={'test': tests_requirements}
)
| StarcoderdataPython |
11367528 | <gh_stars>100-1000
import math
import os
from joblib import Parallel, delayed
from chazutsu.datasets.framework.dataset import Dataset
from chazutsu.datasets.framework.resource import Resource
from chazutsu.datasets.framework.xtqdm import xtqdm
class IMDB(Dataset):
def __init__(self):
super().__init__(
name="Large Movie Review Dataset(IMDB)",
site_url="http://ai.stanford.edu/~amaas/data/sentiment/",
download_url="http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz", # noqa
description="Movie review data is constructed by 25,000 reviews " \
"that have positive/negative annotation"
)
def download(self,
directory="", shuffle=True, test_size=0, sample_count=0,
force=False):
if test_size != 0:
raise Exception("The dataset is already splitted to train & test.")
return super().download(directory, shuffle, 0, sample_count, force)
def prepare(self, dataset_root, extracted_path):
extracted_dir = os.path.join(extracted_path, "aclImdb")
data_dirs = ["train", "test"]
pathes = []
for d in data_dirs:
target_dir = os.path.join(extracted_dir, d)
file_path = os.path.join(dataset_root, "imdb_" + d + ".txt")
self.label_by_dir(
file_path, target_dir, {"pos": 1, "neg": 0}, task_size=1000)
pathes.append(file_path)
if d == "train":
unlabeled = os.path.join(dataset_root, "imdb_unlabeled.txt")
self.label_by_dir(
unlabeled, target_dir, {"unsup": None}, task_size=1000)
pathes.append(unlabeled)
return pathes[0]
def make_resource(self, data_root):
return IMDBResource(data_root)
def label_by_dir(self, file_path, target_dir, dir_and_label, task_size=10):
label_dirs = dir_and_label.keys()
dirs = [d for d in os.listdir(target_dir)
if os.path.isdir(os.path.join(target_dir, d))
and d in label_dirs]
write_flg = True
for d in dirs:
self.logger.info(
"Extracting {} (labeled by {}).".format(d, dir_and_label[d]))
label = dir_and_label[d]
dir_path = os.path.join(target_dir, d)
pathes = [os.path.join(dir_path, f) for f in os.listdir(dir_path)]
pathes = [p for p in pathes if os.path.isfile(p)]
task_length = int(math.ceil(len(pathes) / task_size))
for i in xtqdm(range(task_length)):
index = i * task_size
tasks = pathes[index:(index + task_size)]
lines = Parallel(n_jobs=-1)(
delayed(self._make_pair)(label, t) for t in tasks)
mode = "w" if write_flg else "a"
with open(file_path, mode=mode, encoding="utf-8") as f:
for ln in lines:
f.write(ln)
write_flg = False
@classmethod
def _make_pair(cls, label, path):
features = cls._file_to_features(path)
line = "\t".join([str(label)] + features) + "\n"
return line
@classmethod
def _file_to_features(cls, path):
# override this method if you want implements custome process
fs = []
with open(path, encoding="utf-8") as f:
lines = f.readlines()
lines = [ln.replace("\t", " ").strip() for ln in lines]
fs = [" ".join(lines)]
return fs
@classmethod
def _parallel_parser(cls, label, path):
features = cls._file_to_features(path)
if label is not None:
line = "\t".join([str(label)] + features) + "\n"
else:
line = "\t".join(features) + "\n" # unlabeled
return line
@classmethod
def _file_to_features(cls, path):
# override this method if you want implements custome process
file_name = os.path.basename(path)
f, ext = os.path.splitext(file_name)
els = f.split("_")
rating = 0
if len(els) == 2:
rating = els[-1]
review = ""
with open(path, encoding="utf-8") as f:
lines = f.readlines()
lines = [ln.replace("\t", " ").strip() for ln in lines]
review = " ".join(lines)
if rating != "0":
return [rating, review]
else:
return [review]
class IMDBResource(Resource):
def __init__(self,
root,
columns=None, target="",
separator="\t", pattern=()):
super().__init__(
root,
["polarity", "rating", "review"],
"polarity",
separator,
{
"train": "_train",
"test": "_test",
"valid": "_valid",
"unlabeled": "_unlabeled",
"sample": "_samples"
})
@property
def unlabeled_file_path(self):
return self._get_prop("unlabeled")
def unlabeled_data(self, split_target=False):
return self._get_data("unlabeled", split_target)
| StarcoderdataPython |
3429302 | <reponame>BBN-E/ZS4IE
# Copyright 2015 by Raytheon BBN Technologies Corp.
# All Rights Reserved.
"""
Python API for Accessing SerifXML Files.
>>> import serifxml3
>>> document_text = '''
... John talked to his sister Mary.
... The president of Iran, <NAME>, said he wanted to resume talks.
... I saw peacemaker Bob at the mall.'''
>>> serif_doc = serifxml3.send_serifxml_document(document_text)
>>> print serif_doc
Document:
docid = u'anonymous'
language = u'English'
source_type = u'UNKNOWN'
...
or, to load a document from a file:
>>> serif_doc = serifxml3.Document(filename)
to save a document to a file:
>>> serif_doc.save(output_filename)
For a list of attributes that any serif theory object takes, use the
'help' method on the theory object (or its class). E.g.:
>>> serif_doc.sentences[0].help()
>>> MentionSet.help()
"""
from serif.theory.document import *
from serif.util.event_event_tree_functions import construct_joint_prop_between_events, prune_bad_patterns_from_joint_prop_tree | StarcoderdataPython |
5021162 | import operator
from pytest import mark, raises
from evaluator import evaluate, global_env
import evaluator
from parser import tokenize, parse
import errors
def test_evaluate_integer():
ast = 2
want = 2
got = evaluate(ast, {})
assert want == got
def test_evaluate_symbol():
ast = '*'
want = evaluator.Operator(2, operator.mul)
got = evaluate(ast, global_env)
assert want.arity == got.arity
assert want.function == got.function
@mark.parametrize("source,want", [
('(* 2 3)', 6),
('(* 2 (* 3 4))', 24),
# (100°F − 32) * 5 / 9 = 37°C
("(/ (* (- 100 32) 5) 9)", 37),
('(mod 8 3)', 2),
])
def test_evaluate_expr(source, want):
ast = parse(tokenize(source))
got = evaluate(ast, global_env)
assert want == got
def test_evaluate_missing_arg():
source = '(* 2)'
ast = parse(tokenize(source))
with raises(errors.MissingArgument):
evaluate(ast, global_env)
def test_evaluate_excess_arg():
source = '(mod 2 3 4)'
ast = parse(tokenize(source))
with raises(errors.TooManyArguments):
evaluate(ast, global_env)
def test_evaluate_excess_arg2():
source = '(abs -2 3)'
ast = parse(tokenize(source))
with raises(errors.TooManyArguments):
evaluate(ast, global_env)
def test_evaluate_multiple_lines():
source = '(+ 2 3)\n(* 2 3)'
want = [5, 6]
tokens = tokenize(source)
while tokens:
ast = parse(tokens)
got = evaluate(ast, global_env)
assert want.pop(0) == got
def test_evaluate_division_by_zero():
source = '(/ 1 0)'
ast = parse(tokenize(source))
with raises(errors.DivisionByZero):
evaluate(ast, global_env)
def test_evaluate_unknown_function():
source = '($ 1 2)'
ast = parse(tokenize(source))
with raises(errors.UnknownSymbol):
evaluate(ast, global_env)
@mark.parametrize("source,want", [
('(if 1 1 2)', 1),
('(if 0 1 2)', 2),
('(if (> 1 0) 1 (/ 1 0))', 1),
])
def test_evaluate_if(source, want):
ast = parse(tokenize(source))
got = evaluate(ast, global_env)
assert want == got
def test_evaluate_set():
source = '(set x 3)\n(* 2 x)'
want = [3, 6]
# make copy of global_env
environment = dict(evaluator.global_env)
tokens = tokenize(source)
while tokens:
ast = parse(tokens)
got = evaluate(ast, environment)
assert want.pop(0) == got
def test_print(capsys):
ast = parse(tokenize('(print 7)'))
got = evaluate(ast, global_env)
assert 7 == got
captured = capsys.readouterr()
assert '7\n' == captured.out
def test_begin(capsys):
source = """
(begin
(print 1)
(print 2)
(print 3)
)
"""
ast = parse(tokenize(source))
got = evaluate(ast, global_env)
assert 3 == got
captured = capsys.readouterr()
assert '1\n2\n3\n' == captured.out
@mark.parametrize("source,out", [
('(while 0 (print 1))', ''),
("""(begin
(set x 2)
(while x (begin
(print x)
(set x (- x 1))
))
)""", '2\n1\n'),
])
def test_while(capsys, source, out):
ast = parse(tokenize(source))
got = evaluate(ast, global_env)
assert 0 == got
captured = capsys.readouterr()
assert out == captured.out
def test_evaluate_define():
source = '(define double (n) (* 2 n))'
tokens = tokenize(source)
ast = parse(tokens)
# make copy of global_env
environment = dict(evaluator.global_env)
name = evaluate(ast, environment)
assert 'double' == name
assert name in environment
func = environment[name]
assert 'double' == func.name
assert 1 == func.arity
assert ['n'] == func.arg_names
assert ['*', 2, 'n'] == func.body
def test_evaluate_user_function():
source = '(define triple (n) (* 3 n))\n(triple 5)'
want = ['triple', 15]
tokens = tokenize(source)
# make copy of global_env
environment = dict(evaluator.global_env)
while tokens:
ast = parse(tokens)
got = evaluate(ast, environment)
assert want.pop(0) == got
| StarcoderdataPython |
9648449 | '''
# https://leetcode.com/problems/permutation-in-string
Approach 1:
0. Create hashmap of s1 with count of each letter
1. Create sliding window of length = len(s1)
2. Slide the window over s1:
2.1. Set hashmap/counter of alphbets in each substring of s1 of length = len(s1)
2.2. Compare the hashmap with that of s2:
2.2.1. If found, return true
2.2.2. Else continue untill all substrings are checked
2.3. return FALSE
TC: O(n)
SC: O(1)
Optimization:
1. Compute hashmap for first substring only(O(n))
2. For later substrings, just update the hashmap(O(1)): h[s2[start]] -= 1 , if end + 1 < len(s2): h[s2[end + 1]] += 1, start += 1, end += 1
Approach 3: (recursive):
'''
import string
class Solution:
def checkInclusion(self, s1: str, s2: str) -> bool:
if len(s2) < len(s1):
return False
hMapS1 = dict.fromkeys(string.ascii_lowercase, 0)
hMapS2 = dict.fromkeys(string.ascii_lowercase, 0)
for letter in s1:
hMapS1[letter] += 1
start = 0
end = len(s1) - 1
temp = start
# initililizing hashmap for s2 substring(within sliding window)
temp = start
while(temp <= end):
hMapS2[s2[temp]] += 1
temp += 1
while(end < len(s2)):
# compare if hashmaps match
if hMapS1 == hMapS2:
return True
else:
hMapS2[s2[start]] -= 1
if end + 1 < len(s2):
hMapS2[s2[end + 1]] += 1
start += 1
end += 1
return False
| StarcoderdataPython |
1919721 | <reponame>ecobasa/ecobasa
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from haystack.utils import Highlighter
from haystack.views import SearchView
from six import string_types
class FindView(SearchView):
def get_results(self):
"""
Override get_results to add the value of the field where query was found
Also takes care of highlighting the query.
"""
results = super(FindView, self).get_results()
query = self.query.lower()
highlight = Highlighter(query)
for r in results:
for field in r.get_stored_fields():
value = getattr(r, field)
# assume search index field 'text' is document field
if isinstance(value, string_types) and\
query in value.lower() and\
field != 'text':
# assume search index field name == model field name
try:
name = r.object._meta.get_field(field).verbose_name
except:
name = field
r.context = {
'field': name,
'value': highlight.highlight(value)
}
continue
return results
# SearchView is no Django view, so no "find = FindView.as_view()"
| StarcoderdataPython |
3401569 | <gh_stars>0
#!/usr/bin/env python
from distutils.core import setup
setup(name='SpaceScout-Server',
version='1.0',
description='REST Backend for SpaceScout',
install_requires=['Django>=1.4,<1.5','mock','oauth2','PIL','pyproj','pytz','South','simplejson>=2.1','django-oauth-plus'],
)
| StarcoderdataPython |
318391 | from InterlocksWdg import * | StarcoderdataPython |
3497161 | <gh_stars>1-10
from django.contrib.auth import get_user_model
from django.db import models
from django.urls import reverse
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
from girder_utils.db import DeferredFieldsManager
from s3_file_field import S3FileField
class Submission(models.Model):
class Meta:
ordering = ['-created']
class Status(models.TextChoices):
QUEUED = 'queued', _('Queued for scoring')
SCORING = 'scoring', _('Scoring')
INTERNAL_FAILURE = 'internal_failure', _('Internal failure')
FAILED = 'failed', _('Failed')
SUCCEEDED = 'succeeded', _('Succeeded')
created = models.DateTimeField(default=timezone.now)
creator = models.ForeignKey(get_user_model(), on_delete=models.CASCADE)
creator_fingerprint_id = models.CharField(max_length=32, null=True, blank=True)
creator_ip = models.GenericIPAddressField(null=True, blank=True)
approach = models.ForeignKey('Approach', on_delete=models.CASCADE)
accepted_terms = models.BooleanField(default=False)
test_prediction_file = S3FileField()
status = models.CharField(max_length=20, default=Status.QUEUED, choices=Status.choices)
score = models.JSONField(blank=True, null=True)
overall_score = models.FloatField(blank=True, null=True)
validation_score = models.FloatField(blank=True, null=True)
fail_reason = models.TextField(blank=True)
objects = DeferredFieldsManager('score')
def __str__(self):
return f'{self.id}'
def get_absolute_url(self):
return reverse('submission-detail', args=[self.id])
def reset_scores(self):
self.score = None
self.overall_score = None
self.validation_score = None
return self
| StarcoderdataPython |
9757847 | class Library:
def __init__(self, location):
self.location = location
self.books = []
def find_book(self, title):
try:
book = [b for b in self.books if b.title == title][0]
return "%s in library %s" % (book.title, self.location)
except IndexError:
return "This book is not in %s" % self.location
def add_book(self, book):
self.books.append(book)
def __iter__(self):
self.n = 0
return self
def __next__(self):
if self.n >= len(self.books):
raise StopIteration
result = self.books[self.n]
self.n += 1
return result
class Book:
def __init__(self, title, author):
self.title = title
self.author = author
self.page = 0
def turn_page(self, page):
self.page = page
l = Library("Velingrad")
b = Book("Name of the book", "Author Name")
l.add_book(b)
for i in l:
print(i.title)
print(l.find_book('Name of the book'))
| StarcoderdataPython |
5073517 | # --------------------------------------------
# File: byke_testapp.py
# Date: 30/09/2019
# Author: <NAME>
# Modified:
# Desc: Test application for testing of byke systems, gps, motion sensor, and pic communication.
# Setup for sql db testing.
# --------------------------------------------
import tkinter as tk
import math
import sqlite3
import board
conn = sqlite3.connect('byke_testApp.db')
print("Opened database successfully")
conn.execute('''CREATE TABLE IF NOT EXISTS TRIP_STATS
(TRIP_ID INTEGER PRIMARY KEY NOT NULL,
TRIP_DATE TEXT,
TRIP_TIME INTEGER,
TRIP_MAXSPEED REAL,
TRIP_AVGSPEED REAL,
TRIP_DISTANCE REAL,
TRIP_UPDISTANCE REAL,
TRIP_DOWNDISTANCE REAL);''')
conn.execute('''CREATE TABLE IF NOT EXISTS GPS_DATA
(ENTRY_ID INT PRIMARY KEY NOT NULL,
TIME TEXT NOT NULL,
SPEED REAL,
LAT REAL,
LNG REAL,
ALT REAL,
CLIMB REAL,
XROT REAL,
YROT REAL,
TRIP_ID INTEGER NOT NULL,
FOREIGN KEY (TRIP_ID)
REFERENCES TRIP_STATS (TRIP_ID) );''')
print("Table created successfully")
conn.close()
# raspberry pi libraries
import smbus # i2c smbus for pic communication
import gpsd # Gps library import
import adafruit_dht # import library for temperature sensor
global leftpressed
leftpressed = 0
global rightpressed
rightpressed = 0
global brakepressed
brakepressed = 0
global headlightpressed
headlightpressed = 0
global i
i = 0
global recordrunning
recordrunning = 0
global list1
list1 = []
global totaldistance
totaldistance = 0
global tripNum
tripNum = 0
# i2c addresses
i2cBus = smbus.SMBus(1) # Setup for i2c communication via smbus
tailEndPicAddress = 0x55 # i2c address of tail end pic
batteryPicAddress = 0x45 # i2c address of battery location pic
motionAddress = 0x68 # address for mpu5060 motion sensor
motionPowerMgmt1 = 0x6b # memory location of power register
motionPowerMgmt2 = 0x6c # memory location of power register
# -----------------------------------------------------
# Function: query_test
# Author:
# Modified: <NAME>
# Date: 01/10/19
# Desc: Test sql query
# Inputs:
# Outputs:
# -----------------------------------------------------
def query_test():
cur = conn.cursor()
cur.execute("SELECT * FROM TRIP_STATS WHERE TRIP_ID=?", (tripNum,))
rows = cur.fetchall()
conn.close()
for row in rows:
print(row)
# -----------------------------------------------------
# Function: temperature_read
# Author:
# Modified: <NAME>
# Date: 01/10/19
# Desc: Read temperature from DHT11 sensor
# Inputs:
# Outputs:
# -----------------------------------------------------
def temperature_read():
try:
sensor = adafruit_dht.DHT11(board.D16) # call library for DHT11 temperature sensor
temperature = sensor.temperature # read in temperature and humidity
temperaturebutton.config(text=str(temperature))
except RuntimeError as e:
print('temp error {}'.format(e.args))
# -----------------------------------------------------
# Function: record
# Author:
# Modified: <NAME>
# Date: 01/10/19
# Desc: Start and Stop recording
# Inputs:
# Outputs:
# -----------------------------------------------------
def record():
global tripNum
global i
global recordrunning
if recordrunning == 1:
recordrunning = 0
recordbutton.config(text="Record")
else:
recordrunning = 1
recordbutton.config(text="Recording")
conn = sqlite3.connect('byke_testApp.db')
cur = conn.cursor()
cur.execute("SELECT ENTRY_ID, TRIP_ID FROM GPS_DATA WHERE ENTRY_ID = (SELECT MAX(ENTRY_ID) FROM GPS_DATA)")
max_entry = cur.fetchone()
conn.close()
try:
i = max_entry[0] + 1
except:
i = 0
try:
tripNum = max_entry[1] + 1
except:
tripNum = 1
# -----------------------------------------------------
# Function: gps
# Author:
# Modified: <NAME>
# Date: 01/10/19
# Desc: Query gps module and save to database
# Inputs:
# Outputs:
# -----------------------------------------------------
def gps():
global i
global recordrunning
global totaldistance
global list1
gpsd.connect()
gpsData = gpsd.get_current()
if gpsData.mode > 1:
gpsTime = gpsData.time
speed = gpsData.hspeed
gpsLat = gpsData.lat
gpsLong = gpsData.lon
gpsAlt = gpsData.alt
gpsClimb = gpsData.climb
speed = speed * 3.6
gpsbutton.config(text='Fix ' + str(i))
if recordrunning == 1:
xrotate, yrotate = motion()
list1.append((i, str(gpsTime), speed, gpsLat, gpsLong, gpsAlt,
gpsClimb, xrotate, yrotate, tripNum))
i += 1
speed = float(speed)
if speed > 0.5:
distance = speed / 3600
totaldistance = totaldistance + distance
print("Speed: {} Distance: {} Total Distance: {}".format(speed, distance, totaldistance))
else:
try:
conn = sqlite3.connect('byke_testApp.db')
c = conn.cursor()
entry = "INSERT INTO GPS_DATA (ENTRY_ID, TIME, SPEED, LAT, LNG, ALT, CLIMB, TRIP_ID) \
VALUES (?, ?, ?, ? ,?, ?, ?, ?)"
c.executemany(entry, list1)
conn.commit()
c.execute("SELECT MAX(SPEED) FROM GPS_DATA")
maxspeed = c.fetchone()
print(maxspeed)
c.execute("SELECT AVG(SPEED) FROM GPS_DATA")
avgspeed = c.fetchone()
print(avgspeed)
listStats = ( (0, 0, maxspeed, avgspeed, totaldistance, 0, 0, tripNum) )
entry2 = "INSERT INTO TRIP_STATS (TRIP_TIME, TRIP_DATE, TRIP_MAXSPEED, TRIP_AVGSPEED, TRIP_DISTANCE, " \
"TRIP_UPDISTANCE, TRIP_DOWNDISTANCE, TRIP_ID)" \
" VALUES (?, ?, ?, ?, ?, ?, ?, ?)"
c.executemany(entry2, listStats)
conn.commit()
conn.close()
except:
pass
else:
gpsbutton.config(text='NO Fix')
if recordrunning == 1:
gpsbutton.after(1000, gps)
# -----------------------------------------------------
# Function: read_word
# Author:
# Modified: <NAME>
# Date: 01/10/19
# Desc: Combine two register from motion sensor
# Inputs:
# Outputs:
# -----------------------------------------------------
def read_word(adr):
high = i2cBus.read_byte_data(motionAddress, adr)
low = i2cBus.read_byte_data(motionAddress, adr + 1)
val = (high << 8) + low
return val
# -----------------------------------------------------
# Function: read_word_motion
# Author:
# Modified: <NAME>
# Date: 01/10/19
# Desc: Adjust value from motion sensor
# Inputs:
# Outputs:
# -----------------------------------------------------
def read_word_motion(adr):
val = read_word(adr)
if val >= 0x8000:
return -((65535 - val) + 1)
else:
return val
# -----------------------------------------------------
# Function: motion
# Author:
# Modified: <NAME>
# Date: 01/10/19
# Desc: Query motion sensor
# Inputs:
# Outputs:
# -----------------------------------------------------
def motion():
i2cBus.write_byte_data(motionAddress, motionPowerMgmt1, 0)
accel_xout_scaled = read_word_motion(0x3b) / 16384.0
accel_yout_scaled = read_word_motion(0x3d) / 16384.0
accel_zout_scaled = read_word_motion(0x3f) / 16384.0
yRotate = -math.degrees(math.atan2(accel_xout_scaled, (math.sqrt((accel_yout_scaled * accel_yout_scaled) +
(accel_zout_scaled * accel_zout_scaled)))))
xRotate = -math.degrees(math.atan2(accel_yout_scaled, (math.sqrt((accel_xout_scaled * accel_xout_scaled) +
(accel_zout_scaled * accel_zout_scaled)))))
motionbutton.config(text=str(round(yRotate, 2) + ' ' + str(round(xRotate, 2))))
return xRotate, yRotate
# -----------------------------------------------------
# Function: send_tail
# Author:
# Modified: <NAME>
# Date: 01/10/19
# Desc: Send value to tail end pic
# Inputs:
# Outputs:
# -----------------------------------------------------
def send_tail():
i2cBus.write_byte_data(tailEndPicAddress, int(regspinner.get()), int(regvaluespinner.get()))
# -----------------------------------------------------
# Function: send_motor
# Author:
# Modified: <NAME>
# Date: 01/10/19
# Desc: Send value to motor pic
# Inputs:
# Outputs:
# -----------------------------------------------------
def send_motor():
i2cBus.write_byte_data(batteryPicAddress, int(regspinner.get()), int(regvaluespinner.get()))
# -----------------------------------------------------
# Function: read_motor
# Author:
# Modified: <NAME>
# Date: 01/10/19
# Desc: Read value from motor pic
# Inputs:
# Outputs:
# -----------------------------------------------------
def read_motor():
motorrec = i2cBus.read_byte_data(batteryPicAddress, int(regspinner.get()))
regvaluespinner.delete(0, 'end')
regvaluespinner.insert(0, motorrec)
# -----------------------------------------------------
# Function: read_tail
# Author:
# Modified: <NAME>
# Date: 01/10/19
# Desc: Read value from tail end pic
# Inputs:
# Outputs:
# -----------------------------------------------------
def read_tail():
tailrec = i2cBus.read_byte_data(tailEndPicAddress, int(regspinner.get()))
regvaluespinner.delete(0, 'end')
regvaluespinner.insert(0, tailrec)
mainWindow = tk.Tk()
mainWindow.title('Byke')
mainWindow.geometry('400x250+0+0')
mainWindow.columnconfigure(0, weight=1)
mainWindow.columnconfigure(1, weight=1)
mainWindow.columnconfigure(2, weight=1)
mainWindow.rowconfigure(0, weight=1)
mainWindow.rowconfigure(1, weight=1)
mainWindow.rowconfigure(2, weight=1)
mainWindow.rowconfigure(3, weight=1)
mainWindow.config(bg='white')
gpsbutton = tk.Button(mainWindow, text='gps', borderwidth=2, command=gps)
gpsbutton.grid(row=0, column=0, sticky='nswe')
if recordrunning == 1:
gpsbutton.after(1000, gps)
temperaturebutton = tk.Button(mainWindow, text='temperature', borderwidth=2, command=temperature_read)
temperaturebutton.grid(row=2, column=0, sticky='nswe')
motionbutton = tk.Button(mainWindow, text='motion', borderwidth=2, command=motion)
motionbutton.grid(row=3, column=0, sticky='nswe')
recordbutton = tk.Button(mainWindow, text='record', borderwidth=2, command=record)
recordbutton.grid(row=1, column=0, sticky='nswe')
reglabel = tk.Label(mainWindow, text='Register')
reglabel.grid(row=0, column=1, sticky='nsew')
regspinner = tk.Spinbox(mainWindow, width=3, from_=0, to=12, font=(None, 18))
regspinner.grid(row=1, column=1, sticky='nswe')
tailsendbutton = tk.Button(mainWindow, text='Send to Tail', borderwidth=2, command=send_tail)
tailsendbutton.grid(row=0, column=2, sticky='nswe')
motorsendbutton = tk.Button(mainWindow, text='Send to Motor', borderwidth=2, command=send_motor)
motorsendbutton.grid(row=1, column=2, sticky='nswe')
regvalue = tk.Label(mainWindow, text='REG Value')
regvalue.grid(row=2, column=1, sticky='nsew')
tailreadbutton = tk.Button(mainWindow, text='Read Tail', borderwidth=2, command=read_tail)
tailreadbutton.grid(row=2, column=2, sticky='nswe')
motorreadbutton = tk.Button(mainWindow, text='Read Motor', borderwidth=2, command=read_motor)
motorreadbutton.grid(row=3, column=2, sticky='nswe')
regvaluespinner = tk.Spinbox(mainWindow, width=3, from_=0, to=100, font=(None, 18))
regvaluespinner.grid(row=3, column=1, sticky='nswe')
mainWindow.mainloop()
| StarcoderdataPython |
6406940 | from functools import partial
from typing import Callable, Iterable, Tuple
from rechunker.executors.util import chunk_keys, split_into_direct_copies
from rechunker.types import CopySpec, Executor, ReadableArray, WriteableArray
import pywren_ibm_cloud as pywren
from pywren_ibm_cloud.executor import FunctionExecutor
# PywrenExecutor represents delayed execution tasks as functions that require
# a FunctionExecutor.
Task = Callable[[FunctionExecutor], None]
class PywrenExecutor(Executor[Task]):
"""An execution engine based on Pywren.
Supports zarr arrays as inputs. Outputs must be zarr arrays.
Any Pywren FunctionExecutor can be passed to the constructor. By default
a Pywren `local_executor` will be used
Execution plans for PywrenExecutor are functions that accept no arguments.
"""
def __init__(self, pywren_function_executor: FunctionExecutor = None):
self.pywren_function_executor = pywren_function_executor
def prepare_plan(self, specs: Iterable[CopySpec]) -> Task:
tasks = []
for spec in specs:
# Tasks for a single spec must be executed in series
spec_tasks = []
for direct_spec in split_into_direct_copies(spec):
spec_tasks.append(partial(_direct_array_copy, *direct_spec))
tasks.append(partial(_execute_in_series, spec_tasks))
# TODO: execute tasks for different specs in parallel
return partial(_execute_in_series, tasks)
def execute_plan(self, plan: Task, **kwargs):
if self.pywren_function_executor is None:
# No Pywren function executor specified, so use a local one, and shutdown after use
with pywren_local_function_executor() as pywren_function_executor:
plan(pywren_function_executor)
else:
plan(self.pywren_function_executor)
def pywren_local_function_executor():
return pywren.local_executor(
# Minimal config needed to avoid Pywren error if ~/.pywren_config is missing
config={"pywren": {"storage_bucket": "unused"}}
)
def _direct_array_copy(
source: ReadableArray,
target: WriteableArray,
chunks: Tuple[int, ...],
pywren_function_executor: FunctionExecutor,
) -> None:
"""Direct copy between arrays using Pywren for parallelism"""
iterdata = [(source, target, key) for key in chunk_keys(source.shape, chunks)]
def direct_copy(iterdata):
source, target, key = iterdata
target[key] = source[key]
futures = pywren_function_executor.map(direct_copy, iterdata)
pywren_function_executor.get_result(futures)
def _execute_in_series(
tasks: Iterable[Task], pywren_function_executor: FunctionExecutor
) -> None:
for task in tasks:
task(pywren_function_executor)
| StarcoderdataPython |
8111216 | import random
#from colors import color, red, blue
lower="abcdefghijklmnopqrstuvwxyz"
upper="ABCDEFGHIJKLMNOPQRSTUVWXYZ"
numbers="0123456789"
symbols="~!@#$%^&*()_+}=-{:|<>?/.,';[]'"
all=lower+upper+numbers+symbols
length=int(input("REQUIRED LENGTH : "))
password= "".join(random.sample(all,length))
print ("NEW PASSWORD"+" : "+password)
| StarcoderdataPython |
367613 | [ORG 0x7C00]
| StarcoderdataPython |
6482184 | <gh_stars>0
# %%
#######################################
def pilshow_imagefile_vscode(image_file: str):
"""When used with a VS Code "Interactive Window", displays the referenced image file.
Args:
image_file (str): Reference the path of the image.
"""
from PIL import Image
#
image_object = Image.open(image_file)
return image_object
| StarcoderdataPython |
4998884 | from abc import ABC
from artemis_client.session import ArtemisSession
class ArtemisManager(ABC):
_session: ArtemisSession
def __init__(self, artemis_session: ArtemisSession) -> None:
self._session = artemis_session
| StarcoderdataPython |
1680540 | <filename>common/code/snippets/security/blind_sqli_dyn_field_len.py
#!/usr/bin/env python3
# Reference:
# https://spencerdodd.github.io/2017/06/22/kioptrix-2/
import sys
import requests
chars = "abcdefghijklmnopqrstuvwxyz01234567890.()<>*^%$@!"
target = "192.168.56.101"
url = "http://192.168.56.101/index.php"
port = 80
fail_string = "Remote System"
def injection_assembler(injection):
return url
def brute_force_field_value(field):
result = ""
length_of_field = get_field_length(field)
for x in range(1, length_of_field + 1):
print ("[*] Injecting for character in position {}".format(x))
for char in chars:
# print ("[*] Trying char {}".format(char))
# ' or 1=1 && substring("test",1,1)=char(116) #
injection = """' or 1=1 && substring({},{},1)=char({}) #""".format(field, x, ord(char))
post_data = {
"uname": injection,
"psw": "test"
}
r = requests.request("POST", url=url, data=post_data)
if injection_success(r):
print (" [+] Found character {}: {}".format(x, char))
result += char
print ("[+] Bruted value of {}: {}".format(field, result))
def get_field_length(field):
for x in range(0, 200):
# ' or 1=1 && char_length("test")=4 #
injection = """' or 1=1 && char_length({})={} #""".format(field, x)
post_data = {
"uname": injection,
"psw": "test"
}
r = requests.request("POST", url=url, data=post_data)
if injection_success(r):
print("[+] Length of field is {} characters".format(x))
return x
raise Exception("[-] Couldn't determine field length. Exiting.")
def injection_success(inj_response):
if fail_string in inj_response.text:
return False
else:
return True
def main():
if len(sys.argv) > 1:
field_to_brute = sys.argv[1]
brute_force_field_value(field_to_brute)
else:
print("usage: python blind_sqli.py \"(field_to_brute)\""
"\nexample:\tpython blind_sqli.py \"version()\"")
if __name__ == "__main__":
main()
| StarcoderdataPython |
9797294 | class BLNKController:
def __init__(self):
# stalk signal for less than 550ms means it was tapped
self.tap_duration_frames = 55
self.tap_direction = 0
self.blinker_on_frame_start = 0
self.blinker_on_frame_end = 0
self.prev_turnSignalStalkState = 0
def update_state(self, CS, frame):
#opposite direction so start canceling
if (
self.tap_direction > 0
and CS.turnSignalStalkState > 0
and self.tap_direction != CS.turnSignalStalkState
):
self.tap_direction = 0
self.blinker_on_frame_start = 0
self.blinker_on_frame_end = 0
# turn signal stalk just turned on, capture frame
if (
CS.turnSignalStalkState > 0
and self.prev_turnSignalStalkState == 0
):
self.blinker_on_frame_start = frame
# turn signal stalk just turned off
elif (
CS.turnSignalStalkState == 0
and self.prev_turnSignalStalkState > 0
):
if frame - self.blinker_on_frame_start <= self.tap_duration_frames:
#recognize tap
self.tap_direction = self.prev_turnSignalStalkState
self.blinker_on_frame_end = frame
else:
#too long, no tap
self.tap_direction = 0
self.blinker_on_frame_start = 0
self.blinker_on_frame_end = 0
# check what we need to maintain
if (
self.tap_direction > 0
and frame - self.blinker_on_frame_start > self.tap_duration_frames
and frame - self.blinker_on_frame_end > self.tap_duration_frames
and CS.alca_direction != self.tap_direction
):
self.tap_direction = 0
self.blinker_on_frame_start = 0
self.blinker_on_frame_end = 0
self.prev_turnSignalStalkState = CS.turnSignalStalkState | StarcoderdataPython |
9653866 | import inspect
from state_machine.models import Event, State, InvalidStateTransition
from state_machine.orm import get_adaptor
_temp_callback_cache = None
def get_callback_cache():
global _temp_callback_cache
if _temp_callback_cache is None:
_temp_callback_cache = dict()
return _temp_callback_cache
def get_function_name(frame):
return inspect.getouterframes(frame)[1][3]
def before(before_what):
def wrapper(func):
frame = inspect.currentframe()
calling_class = get_function_name(frame)
calling_class_dict = get_callback_cache().setdefault(calling_class, {'before': {}, 'after': {}})
calling_class_dict['before'].setdefault(before_what, []).append(func)
return func
return wrapper
def after(after_what):
def wrapper(func):
frame = inspect.currentframe()
calling_class = get_function_name(frame)
calling_class_dict = get_callback_cache().setdefault(calling_class, {'before': {}, 'after': {}})
calling_class_dict['after'].setdefault(after_what, []).append(func)
return func
return wrapper
def acts_as_state_machine(original_class):
adaptor = get_adaptor(original_class)
global _temp_callback_cache
modified_class = adaptor.modifed_class(original_class, _temp_callback_cache)
_temp_callback_cache = None
return modified_class
| StarcoderdataPython |
1833782 | <filename>bytesviewapi/constants.py<gh_stars>0
# All the API URL and language suported by API.
BASE_URL = 'https://api.bytesview.com/1/'
# Sentiment URL
SENTIMENT_URL = BASE_URL + 'static/sentiment'
SENTIMENT_LANGUAGES_SUPPORT = {"ar", "en"}
# Emotion URL
EMOTION_URL = BASE_URL + 'static/emotion'
EMOTION_LANGUAGES_SUPPORT = {"en"}
# Keywords URL
KEYWORDS_URL = BASE_URL + 'static/keywords'
KEYWORDS_LANGUAGES_SUPPORT = {"en"}
# Semantic URL
SEMANTIC_URL = BASE_URL + 'static/semantic'
SEMANTIC_LANGUAGES_SUPPORT = {"en"}
# Name-gender URL
NAME_GENDER_URL = BASE_URL + 'static/name-gender'
# NER URL
NER_URL = BASE_URL + 'static/ner'
NER_LANGUAGES_SUPPORT = {"en"}
# Intent URL
INTENT_URL = BASE_URL + 'static/intent'
INTENT_LANGUAGES_SUPPORT = {"en"} | StarcoderdataPython |
8198170 | <filename>binding.gyp
{
"targets": [
{
"target_name": "roaring",
"default_configuration": "Release",
"cflags": ["-O3", "-std=c99"],
"cflags_cc": ["-O3", "-std=c++11"],
"defines": ["DISABLEAVX"],
"sources": [
"src/cpp/roaring.c",
"src/cpp/module.cpp",
"src/cpp/v8utils/v8utils.cpp",
"src/cpp/RoaringBitmap32/RoaringBitmap32.cpp",
"src/cpp/RoaringBitmap32/RoaringBitmap32_operations.cpp",
"src/cpp/RoaringBitmap32BufferedIterator/RoaringBitmap32BufferedIterator.cpp"
]
}
],
"conditions": [
[
"target_arch in \"x64 x86_64\"",
{
"targets": [
{
"target_name": "cpuinfo",
"default_configuration": "Release",
"cflags": ["-O3"],
"cflags_cc": ["-O3", "-std=c++11"],
"sources": ["src/cpuinfo/cpuinfo.cpp"]
},
{
"target_name": "roaring-sse42",
"default_configuration": "Release",
"cflags": ["-O3", "-std=c99", "-msse4.2"],
"cflags_cc": ["-O3", "-std=c++11", "-msse4.2"],
"defines": ["DISABLEAVX", "__POPCNT__", "__SSE4_2__"],
"xcode_settings": {
"GCC_ENABLE_SSE42_EXTENSIONS": "YES",
"OTHER_CFLAGS": ["-msse4.2"]
},
"sources": [
"src/cpp/roaring.c",
"src/cpp/module.cpp",
"src/cpp/v8utils/v8utils.cpp",
"src/cpp/RoaringBitmap32/RoaringBitmap32.cpp",
"src/cpp/RoaringBitmap32/RoaringBitmap32_operations.cpp",
"src/cpp/RoaringBitmap32BufferedIterator/RoaringBitmap32BufferedIterator.cpp"
]
},
{
"target_name": "roaring-avx2",
"default_configuration": "Release",
"cflags": ["-O3", "-std=c99", "-mavx2"],
"cflags_cc": ["-O3", "-std=c++11", "-mavx2"],
"defines": ["USEAVX", "FORCE_AVX=ON", "__POPCNT__", "__SSE4_2__"],
"xcode_settings": {
"GCC_ENABLE_SSE42_EXTENSIONS": "YES",
"CLANG_X86_VECTOR_INSTRUCTIONS": "avx2",
"OTHER_CFLAGS": ["-mavx2"]
},
"msvs_settings": {
"VCCLCompilerTool": {
"AdditionalOptions": ["/arch:AVX2"]
}
},
"sources": [
"src/cpp/roaring.c",
"src/cpp/module.cpp",
"src/cpp/v8utils/v8utils.cpp",
"src/cpp/RoaringBitmap32/RoaringBitmap32.cpp",
"src/cpp/RoaringBitmap32/RoaringBitmap32_operations.cpp",
"src/cpp/RoaringBitmap32BufferedIterator/RoaringBitmap32BufferedIterator.cpp"
]
}
]
}
]
]
}
| StarcoderdataPython |
1733638 | <filename>convert_temp.py
#!/usr/bin/env python
import time
import os
temp_file = '/sys/devices/platform/dht22@0/iio:device0/in_temp_input'
dir_path = '/home/pi/.openauto/temp_conversion'
write_file = '/home/pi/.openauto/temp_conversion/temp.txt'
def check_path():
''' Verify that the paths exist '''
if os.path.isdir(dir_path):
return True
else:
os.mkdir(dir_path)
if os.path.isfile(write_file):
return True
else:
create_file = open(write_file, 'w')
create_file.close()
def convert_temp():
while True:
file = open(temp_file, 'r').read()
writefile = open(write_file, 'w')
converted_temp = float(file) * 1000
writefile.write('31 00 4b 46 ff ff 05 10 1c : crc=1c YES\n31 00 4b 46 ff ff 05 10 1c t=' + str(converted_temp))
writefile.close()
time.sleep(10)
def main():
check_path()
convert_temp()
if __name__ == '__main__':
main()
| StarcoderdataPython |
11213529 | <reponame>mingaleg/yakubovich<filename>clerk/signals/__init__.py
import django.dispatch
new_judged_submission = django.dispatch.Signal(['contest_pk', 'run_id'])
| StarcoderdataPython |
66960 | import collections
import numpy as np
import pandas as pd
import matplotlib.colors
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
import plotly
import chart_studio.plotly as py
import plotly.graph_objects as go
from plotly.subplots import make_subplots
class MultiResolutionAnalysis:
"""
Class for multi-resolution single cell analysis
Parameters
----------
exp_df: DataFrame (Pandas)
Cell barcode x gene dataframe
meta_df: DataFrame (Pandas)
Cell barcode x metadata attributes
genes: List[str]
List of strings that are gene names. The gene names must be present in exp_df.
coexpressed_genes: List[List[str]] (optional)
Nested lists of strings of length three that are gene names. The gene names must be present in exp_df.
exp_color: str
String that is a matplotlib colormap continuous color.
modularity: DataFrame (Pandas) (optional)
Cluster resolution value by modularity score
silhouette: DataFrame (Pandas) (optional)
Cluster resolution and community assignment by silhouette score
Attributes
----------
sankey_dict: dict
Dictionary containing the multi-resolution cluster analysis for building a Sankey Network
"""
#TODO: Add node hovertemplate text
#TODO: Cleanup normalize silhouette
#TODO: Add class parameters for silhouette and expression coloring
def __init__(self,
exp_df,
meta_df,
genes,
coexpressed_genes,
exp_color,
modularity,
silhouette): #TODO: add additional args for user metadata
if genes == None:
print('Exit Error: Please add gene(s).')
self._exp_df = exp_df
self._meta_df = meta_df.astype(str)
self._build_sankey_dict(genes, exp_color, silhouette, modularity, coexpressed_genes)
def _build_sankey_dict(self, genes, exp_color, silhouette, modularity, coexpressed_genes):
"""
Build a sankey dictionary for the multi-resolution cluster analysis
Parameters
----------
genes: List[str]
List of strings that are gene names. The gene names must be present in exp_df.
exp_color: str
String that is a matplotlib colormap continuous color.
silhouette: DataFrame (Pandas) (optional)
Cluster resolution and community assignment by silhouette score
modularity: DataFrame (Pandas) (optional)
Cluster resolution value by modularity score
coexpressed_genes: List[List[str]] (optional)
Nested lists of strings of length three that are gene names. The gene names must be present in exp_df.
"""
# Check user defined gene list
check_gene_input = list(set(genes) - set(list(self._exp_df)))
if len(check_gene_input) > 0:
print('Warning the following gene(s) are missing: ', *check_gene_input, sep = ", ")
self.sankey_dict = {'data' : None,
'genes' : genes,
'exp_dict' : {gene:[] for gene in genes},
'exp_color' : exp_color,
'exp_colorbar' : {},
'coexp_genes' : None,
'resolutions' : [data for data in list(self._meta_df) if data.split('.')[0] == 'res']}
self._wrangle_node_data()
self._check_args(coexpressed_genes, silhouette, modularity)
self._count_flow_by_flow()
self._wrangle_source_to_target()
def _wrangle_node_data(self):
"""Create a list of resolution values and their communities"""
self.sankey_dict['node_labels'] = [res for sublist in
[list(map(( lambda x: res + '_' + str(x)), list(set(self._meta_df[res]))))
for res in self.sankey_dict['resolutions']] for res in sublist]
self.sankey_dict['node_data'] = pd.DataFrame(self.sankey_dict['node_labels'], columns=['node_labels'])
self.sankey_dict['node_data']['res'] = [x.split('_')[0] for x in self.sankey_dict['node_data']['node_labels'].tolist()]
def _check_args(self, coexpressed_genes, silhouette, modularity):
"""
Check additional user arguments and add data to sankey_dict
Parameters
----------
coexpressed_genes: List[List[str]] (optional)
Nested lists of strings of length three that are gene names. The gene names must be present in exp_df.
silhouette: DataFrame (Pandas) (optional)
Cluster resolution and community assignment by silhouette score
modularity: DataFrame (Pandas) (optional)
Cluster resolution value by modularity score
"""
if silhouette is not None:
self.sankey_dict['silhouette'] = {data[0]:data[1] for data in silhouette.values}
self.sankey_dict['node_data']['silhoutte_score'] = [round(self.sankey_dict['silhouette'][x],2) for x in self.sankey_dict['node_data']['node_labels']]
self._create_silhouette_colorbar()
if modularity is not None:
self.sankey_dict['modularity'] = {data[0]:data[1] for data in modularity.values}
self.sankey_dict['node_data']['modularity'] = [round(self.sankey_dict['modularity'][x],2) for x in self.sankey_dict['node_data']['res']]
if coexpressed_genes is not None:
check_coexp_gene_input = list(set([genes for gene_sublist in coexpressed_genes for genes in gene_sublist]) - set(list(self._exp_df)))
if len(check_coexp_gene_input) > 0:
print('Exiting Error: The following gene(s) are missing: ', *check_coexp_gene_input, sep = ", ")
exit()
self.sankey_dict['coexp_genes'] = coexpressed_genes
self.sankey_dict['coexp_dict'] = {tuple(coexp_set): [] for coexp_set in coexpressed_genes}
self.sankey_dict['coexp_color'] = {tuple(coexp_set): [] for coexp_set in coexpressed_genes}
def _color_mapper(self, vmin, vmax, values_to_map, cmap):
"""
Map a list of values to a hex color codes
Parameters
----------
vmin: float
minimum value for normalization
vmax: float
max value for normalization
values_to_map: list
list of floats for normalization
Returns
-------
hex_list: list[str]
List of strings that are color hex codes
mapper: class object
Class object from matplotlib.cm ScalarMappable
"""
norm = matplotlib.colors.Normalize(vmin=vmin, vmax=vmax, clip=True)
mapper = cm.ScalarMappable(norm=norm, cmap=cmap)
rgba_list = [mapper.to_rgba(val) for val in values_to_map]
hex_list = [mcolors.to_hex(color) for color in rgba_list]
return hex_list, mapper
def _normalize_silhouette(self):
"""Normalize silhouette score within each cluster resolution"""
node_list = []
for items in self.sankey_dict['resolutions']:
node_temp = self.sankey_dict['node_data'][self.sankey_dict['node_data']['res'] == items].copy()
node_temp['silhoutte_norm_by_res'] = [abs(float(i)/max(node_temp['silhoutte_score'].tolist()))
for i in node_temp['silhoutte_score'].tolist()]
node_list.append(node_temp)
norm_sil_df = pd.concat(node_list)
self.sankey_dict['node_data']['silhoutte_norm_by_res'] = norm_sil_df['silhoutte_norm_by_res']
def _create_silhouette_colorbar(self):
"""Create Silhouette color bar"""
self._normalize_silhouette()
silhouette_list = [x/10.0 for x in list(range(-10,11,1))] # define silhouette score range
silhouette_hex_list, mapper = self._color_mapper(min(silhouette_list), max(silhouette_list), self.sankey_dict['node_data']['silhoutte_norm_by_res'], 'RdYlBu_r')
self.sankey_dict['node_data']['silhoutte_hex'] = silhouette_hex_list # update to modularity
self.sankey_dict['silhouette_list'] = silhouette_list
self.sankey_dict['silhouette_mapper'] = mapper
self.sankey_dict['silhouette_colorbar'] = [mcolors.to_hex(self.sankey_dict['silhouette_mapper'].to_rgba(x))
for x in self.sankey_dict['silhouette_list']]
def _count_flow_by_flow(self):
""""Count the number of cells flowing from resolution (n) community (i) to resolution (m) community (j)"""
self.sankey_dict['sankey_flow_count'] = dict(collections.Counter(tuple(['_'.join([self.sankey_dict['resolutions'][res], str(dag[0])]), '_'.join([self.sankey_dict['resolutions'][res+1], str(dag[1])])])
for res in range(0, len(self.sankey_dict['resolutions'])-1)
for dag in self._meta_df[[self.sankey_dict['resolutions'][res], self.sankey_dict['resolutions'][res+1]]].values))
def _wrangle_source_to_target(self):
"""Create a source to target mapping for each resolution (n) community (i) to resolution (m) community (j)"""
self.sankey_dict['data'] = pd.DataFrame([[res[0], res[1],
self.sankey_dict['node_labels'].index(res[0]),
self.sankey_dict['node_labels'].index(res[1]), cell_count]
for res, cell_count in self.sankey_dict['sankey_flow_count'].items()],
columns=['source_label', 'target_label', 'source', 'target', 'value'])
self.sankey_dict['data']['source_res'] = np.array([x.split('_') for x in self.sankey_dict['data']['source_label']])[:,0].tolist()
self.sankey_dict['data']['source_cluster'] = np.array([x.split('_') for x in self.sankey_dict['data']['source_label']])[:,1].tolist()
self.sankey_dict['data']['target_res'] = np.array([x.split('_') for x in self.sankey_dict['data']['target_label']])[:,0].tolist()
self.sankey_dict['data']['target_cluster'] = np.array([x.split('_') for x in self.sankey_dict['data']['target_label']])[:,1].tolist()
def _create_expression_colorbar(self):
"""Create a gene expression bar"""
# Create a hex code color range for a gene's expression in the sankey network streams
for gene, expression in self.sankey_dict['exp_dict'].items():
exp_hex_list, mapper = self._color_mapper(min(expression), max(expression), expression, 'Purples')
self.sankey_dict['data'][gene+'_hex'] = exp_hex_list
self._sort_hex_colorbar(gene, expression, exp_hex_list)
def _sort_hex_colorbar(self, gene, expression, hex_list):
"""
Sort the hex color code by gene expression in ascending order
Parameters
----------
gene: str
String that is a gene name
expression: list
List of floats for each cell's gene expression value in the sankey stream
hex_list: list
List of strings that are hex codes
"""
gene_color_bar_df = pd.DataFrame(self.sankey_dict['exp_dict'][gene])
gene_color_bar_df['hex'] = hex_list
gene_color_bar_df.sort_values(0, inplace=True)
self.sankey_dict['exp_colorbar'][gene] = dict(colorscale=gene_color_bar_df['hex'].tolist(), showscale=True, cmin=min(expression), cmax=max(expression))
def _coexpression(self, cell_ids):
"""
Compute the co-expression of user defined genes for each batch of cell ids
Parameters
----------
cell_ids : list
List of strings that are cell barcodes
"""
#TODO: Check code
for genes in self.sankey_dict['coexp_genes']:
# gene_sums = self._exp_df.loc[cell_ids, genes].sum(axis = 1).tolist()
gene_sum = sum(self._exp_df.loc[cell_ids][genes].sum().tolist())
# if sum(gene_sums) > 0:
if gene_sum > 0:
dec_percentile = [x/gene_sum for x in self._exp_df.loc[cell_ids][genes].sum().tolist()]
# dec_percentile = [x/sum(gene_sums) for x in gene_sums]
self.sankey_dict['coexp_dict'][tuple(genes)].append(dec_percentile)
self.sankey_dict['coexp_color'][tuple(genes)].append(matplotlib.colors.to_hex(dec_percentile))
else:
pass
def _avg_expression(self, cell_ids):
"""
Compute the average expression of a gene for a subset of cell ids
Parameters
----------
cell_ids: list
List of strings to be spliced from expression dataframe
"""
for gene in self.sankey_dict['genes']:
self.sankey_dict['exp_dict'][gene].append(self._exp_df.loc[cell_ids][gene].mean())
def compute(self):
"""Compute the average expression of the user defined genes for each flow transition"""
for cluster_data in self.sankey_dict['data'][['source_res', 'source_cluster', 'target_res', 'target_cluster']].values:
cell_ids = self._meta_df[(self._meta_df[cluster_data[0]]==cluster_data[1]) & (self._meta_df[cluster_data[2]]==cluster_data[3])].index.tolist()
self._avg_expression(cell_ids)
if self.sankey_dict['coexp_genes'] is not None:
self._coexpression(cell_ids)
self._create_expression_colorbar() | StarcoderdataPython |
6661294 | <gh_stars>10-100
import logging
from async_v20.client import OandaClient
from async_v20.client import __version__
from async_v20.definitions import *
from async_v20.endpoints.annotations import *
logging.getLogger(__name__).addHandler(logging.NullHandler())
__version__ = __version__
| StarcoderdataPython |
376558 | <filename>ait/core/server/plugins/__init__.py
from .data_archive import *
from .limit_monitor import *
from .openmct import *
| StarcoderdataPython |
9607681 | <filename>To-Do/main.py
#Importing Modules
from textwrap import fill
import tkinter as tr
from tkinter import TOP, Listbox, messagebox
import pickle
from tkinter.tix import Tk
#Title
root = tr.Tk()
root.title("To-Do List")
#Declaration
height1 = 30
width1 = 100
width2 = 20
default_font = "Arial Rounded MT"
font_size = 12
saver = 2
button_color = "white"
#Functions
def add_entry():
global saver
task_data = entry.get()
if task_data == "": #Making sure no blank task
tr.messagebox.showwarning(title="Failed",message="Blank entry can not be added")
else:
if saver %2 == 0:
task_data = "❒ "+task_data
tasks.insert(tr.END, task_data)
entry.delete(0, tr.END) #Clears task after input
else:
task_data = "❒ "+task_data
tasks.insert(tr.END, task_data)
entry.delete(0, tr.END) #Clears task after input
data = tasks.get(0,tasks.size())
pickle.dump(data, open("To-Do.dat","wb"))
def drop_entry():
try:
select_entry = tasks.curselection()[0]
tasks.delete(select_entry)
except:
tr.messagebox.showwarning(title="Failed",message="Please select a task to delete")
def save_entry():
data = tasks.get(0,tasks.size())
pickle.dump(data, open("To-Do.dat","wb"))
tr.messagebox.showwarning(title="Successful",message="Tasks are saved at To-Do.dat")
def auto_save():
global saver
saver += 1
if saver%2 != 0:
tr.messagebox.showwarning(title="Successful",message="Auto Save Enabled")
else:
tr.messagebox.showwarning(title="Successful",message="Auto Save Disabled")
def delete_all():
tasks.delete(0,tr.END)
#Design Part
framming = tr.Frame(root)
framming.pack()
scrollbar = tr.Scrollbar(framming)
scrollbar.pack(side=tr.RIGHT, fill=tr.Y)
tasks = tr.Listbox(framming, height=height1, width=width1, bg="#9cedc3", font=(default_font,font_size), cursor="dot")
tasks.pack()
tasks.config(yscrollcommand=scrollbar.set)
scrollbar.config(command=tasks.yview)
entry = tr.Entry(root, width=80, bg="#c3e7fd", font=(default_font,font_size))
entry.pack()
#Buttons
button1 = tr.Button(root, command= add_entry, text= "[+] Add", width= width2, bg="#03ac13", fg=button_color, font=(default_font,font_size))
button1.pack(side=tr.RIGHT)
button2 = tr.Button(root, command= drop_entry, text= "[-] Drop", width= width2, bg="#ffa500", fg=button_color, font=(default_font,font_size))
button2.pack(side=tr.RIGHT)
button3 = tr.Button(root, command= save_entry, text= "[✔] Save", width= width2, bg= "#1260cc", fg=button_color, font=(default_font,font_size))
button3.pack(side=tr.RIGHT)
button4 = tr.Button(root, command= auto_save, text= "[✔✔] Auto Save", width= width2, bg= "#006242", fg=button_color, font=(default_font,font_size))
button4.pack(side=tr.RIGHT)
button5 = tr.Button(root, command= delete_all, text= "[🗑] Delete All", width= width2, bg= "#8b0000", fg=button_color, font=(default_font,font_size))
button5.pack(side=tr.RIGHT)
#Runner
def runner():
try: #For auto restoring previous state
data = pickle.load(open("To-Do.dat", "rb"))
tasks.delete(0, tr.END)
for x in data:
tasks.insert(tr.END, x)
except:
pass
root.mainloop()
runner() | StarcoderdataPython |
8077880 | <reponame>tomasoptytek/cf_data_mining
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'daleksovski'
from sklearn import metrics
def build_classifier(classifier, data):
'''Builds a classifier
:param classifier: a Classifier object
:param data: a SciKit dataset structure
'''
# generic, for all DataMining libraries
# -------------------------------------
classifier.build_classifier(data)
def apply_classifier(classifier, data):
'''Applies a classifier on a dataset, and gets predictions
:param classifier: a classifier
:param data: a SciKit dataset
:return: the input data containing a key targetPredicted with the classifier predictions
'''
# generic, for all DataMining libraries
# -------------------------------------
new_data = classifier.apply_classifier(data)
return new_data
def helper_extract_true_values_and_predictions(data):
y_true = data["target"]
y_pred = data["targetPredicted"]
return (y_true, y_pred)
def accuracy_score(data):
'''Calculates accuracy of a classification classifier
:param data: a SciKit dataset, containing key targetPredicted
:return: accuracy, float
'''
y_true, y_pred = helper_extract_true_values_and_predictions(data)
from sklearn.metrics import accuracy_score
result_acc = accuracy_score( y_true, y_pred )
return result_acc
def mse(data):
'''Calculates mean_squared_error (MSE) of a regression classifier
:param data: a SciKit dataset, containing key targetPredicted
:return: MSE, float
'''
from sklearn.metrics import mean_squared_error
y_true, y_pred = helper_extract_true_values_and_predictions(data)
result_mse = mean_squared_error(y_true, y_pred)
return result_mse
def calculate_classification_statistics(dataset):
# Old format:
# 'Expected true and predicted labels for each fold, but failed.' +
# 'If you wish to provide labels for each fold separately it should look like: ' +
# '[[y_true_1, y_predicted_1], [y_true_2, y_predicted_2], ...]')
labels = [[],[]]
for i in range(0,len(dataset.target)):
labels[0].append(dataset['target'][i])
labels[1].append(dataset['targetPredicted'][i])
# Check if we have true and predicted labels for each fold
if labels and type(labels[0][0]) == list:
try:
# Flatten
y_true, y_pred = [], []
for fold_labels in labels:
y_true.extend(fold_labels[0])
y_pred.extend(fold_labels[1])
labels = [y_true, y_pred]
except:
raise Exception('Expected true and predicted labels for each fold, but failed.' +
'If you wish to provide labels for each fold separately it should look like: ' +
'[[y_true_1, y_predicted_1], [y_true_2, y_predicted_2], ...]')
if len(labels) != 2:
raise Exception('Wrong input structure, this widget accepts labels in the form: [y_true, y_pred]')
y_true, y_pred = labels
classes = set()
classes.update(y_true + y_pred)
classes = sorted(list(classes))
# Assign integers to classes
class_to_int = {}
for i, cls_label in enumerate(classes):
class_to_int[cls_label] = i
y_true = [class_to_int[lbl] for lbl in y_true]
y_pred = [class_to_int[lbl] for lbl in y_pred]
accuracy = metrics.accuracy_score(y_true, y_pred)
precision = metrics.precision_score(y_true, y_pred, average='micro')
recall = metrics.recall_score(y_true, y_pred, average='micro')
f1 = metrics.f1_score(y_true, y_pred, average='micro')
confusion_matrix = metrics.confusion_matrix(y_true, y_pred)
if len(classes) == 2:
auc = metrics.auc_score(y_true, y_pred)
else:
auc = 'AUC for multiclass problems requires class probabilities'
return accuracy, precision, recall, f1, auc, confusion_matrix
| StarcoderdataPython |
3285864 | <filename>common/bulk_import.py<gh_stars>0
import datetime
import re
from django.contrib.auth.models import User
from common.models import Class, Semester, Subject
from io import StringIO
from lxml.html import parse
class ImportException(Exception):
pass
class BulkImport:
def is_allowed(self, clazz, no_lectures, no_exercises):
t, _ = clazz.split('/')
if t == 'P':
if no_lectures:
return False
elif t == 'C':
if no_exercises:
return False
else:
print(f"Uknown class type: {clazz}")
return True
def parse_subject(self, doc):
h2 = doc.xpath('//h2[@class="nomargin"]')
if not h2:
raise ImportException("Missing h2 element, have you imported correct file?")
subject = re.search(r'\(([^)]+)', h2[0].text)
if not subject:
raise ImportException("Subject missing in h2 element")
return subject.group(1).strip()
def parse_semester(self, doc):
elems = doc.xpath('//h2[@class="nomargin"]/span[@class="outputText"]')
if len(elems) != 2:
raise ImportException("two elements .outputText with semester not found in h2")
year = elems[0].text.split('/')[0]
h = elems[1].text.strip().lower()
if h == 'letní':
return year, False
elif h == 'zimní':
return year, True
raise ImportException("failed to parse semester")
def run(self, content, no_lectures=False, no_exercises=False, class_code=None):
doc = parse(StringIO(content)).getroot()
abbr = self.parse_subject(doc)
try:
subject = Subject.objects.get(abbr=abbr)
except Subject.DoesNotExist:
raise ImportException(f"Subject {abbr} does not exist. Please create it first.")
year, is_winter = self.parse_semester(doc)
semester = Semester.objects.get(year=year, winter=is_winter)
classes = list(map(str.strip, doc.xpath('//tr[@class="rowClass1"]/th/div/span[1]/text()')))
labels = list(doc.xpath('//tr[@class="rowClass1"]/th/div/@title'))
default_classes = []
for code in class_code or []:
try:
default_classes.append(Class.objects.get(semester__year=year, semester__winter=is_winter, code=code, subject__abbr=opts['subject']))
except Class.DoesNotExist:
raise ImportException(f"Class with code {code} does not exist.")
class_in_db = {}
for c, label in zip(classes, labels):
if not self.is_allowed(c, no_lectures, no_exercises):
continue
try:
class_in_db[c] = Class.objects.get(code=c, semester=semester, subject=subject)
except Class.DoesNotExist:
s = label.split(' ')
class_in_db[c] = Class()
class_in_db[c].code = c
day = s[6].upper()
mapping = {'ÚT': 'UT', 'ČT': 'CT', 'PÁ': 'PA'}
class_in_db[c].day = mapping.get(day, day)
class_in_db[c].hour = s[7]
class_in_db[c].year = datetime.datetime.now().year
class_in_db[c].winter = datetime.datetime.now().month >= 9
class_in_db[c].time = s[7]
class_in_db[c].subject = subject
class_in_db[c].semester = semester
first_name, last_name = label.replace(',', '').replace('Ph.D.', '').replace('Bc', '').replace('DiS', '').strip().split(' ')[-2:]
if first_name and last_name:
teacher = User.objects.filter(first_name=first_name, last_name=last_name)
if not teacher:
raise ImportException(f"Teacher '{first_name}' '{last_name}' not found")
class_in_db[c].teacher = teacher[0]
class_in_db[c].save()
for row in doc.xpath('//table[@class="dataTable"]//tr')[1:]:
def clean_name(s):
for remove in ['Ing', 'Bc', 'BA', 'MBA', 'Mgr', 'MgrA', '.', ',']:
s = s.replace(remove, '')
return ' '.join(s.split()).strip()
if not row.xpath('./td[2]/a'):
raise ImportException("Student login not found in table. Have you imported correct file?")
login = row.xpath('./td[2]/a/text()')[0].strip()
email = row.xpath('./td[2]/a/@href')[0].replace('mailto:', '').strip()
name = clean_name(row.xpath('./td[3]/a/text()')[0])
lastname, firstname = name.strip().split(' ', 1)
member_of = []
created = False
user = None
try:
user = User.objects.get(username=login)
except User.DoesNotExist:
user = User.objects.create_user(login.upper(), email)
user.first_name = firstname
user.last_name = lastname
user.save()
created = True
for i, el in enumerate(row.xpath('.//input')):
clazz = classes[i]
if "checked" in el.attrib:
if not self.is_allowed(clazz, no_lectures, no_exercises):
continue
if user not in class_in_db[clazz].students.all():
member_of.append(clazz)
class_in_db[clazz].students.add(user)
elif clazz in class_in_db:
class_in_db[clazz].students.remove(user)
for clazz in default_classes:
if user not in clazz.students.all():
member_of.append(clazz.code)
clazz.students.add(user)
classess = []
for c in Class.objects.filter(students__username=login, semester__year=year, semester__winter=is_winter, subject_id=subject.id):
classess.append(f"{c.timeslot} {c.teacher.username}")
yield {
'login': login,
'firstname': firstname,
'lastname': lastname,
'created': created,
'classes': classess,
}
| StarcoderdataPython |
8093155 | <reponame>bluePhlavio/eph
"""Defines parsing functions to read Jpl Horizons ephemeris."""
import re
from string import whitespace as ws
from astropy import units as u
from astropy.table import Table, QTable
from .util import parse_table, parse_row, numberify, transpose, yes_or_no
from .exceptions import JplBadReqError, ParserError
from .horizons import get_col_dim
def get_sections(source):
"""
Split a Jpl Horizons ephemeris in header, data and footer.
Args:
source (str): the content of the Jpl Horizons ephemeris data output.
Returns:
:class:`tuple`: a tuple of strings containing header, data and footer sections respectively.
.. note:
Note that whitespaces and \* are stripped out from section contents.
"""
m = re.match(r'(.*?)\$\$SOE(.*?)\$\$EOE(.*?)', source, flags=re.DOTALL)
if m:
to_strip = ws + '*'
return (m.group(i).strip(to_strip) for i in range(1, 4))
else:
problem_report, jplparams = map(lambda x: x.strip(ws),
re.split(r'!\$\$SOF', source))
raise JplBadReqError(problem_report)
def get_subsections(source):
"""
Split a source string in a list of sections separated by one or more \*.
Args:
source (str): the source string to be splitted.
Returns:
:class:`list`: the lists of subsections.
"""
to_strip = ws
return list(map(lambda ss: ss.strip(to_strip), re.split(r'\*{3,}', source)))
def parse_params(source):
m = re.search(r'(?<=!\$\$SOF)[\s\S]*$', source)
if m:
to_strip = ws
cleaned = m.group().strip(to_strip)
return {
m.group(1): m.group(2)
for m in re.finditer(r'(\S*)\s=\s(\S*)', cleaned)
}
return dict()
def check_csv(source):
params = parse_params(source)
csv = params.get('CSV_FORMAT', 'NO')
to_strip = ws + '\'"'
cleaned = csv.strip(to_strip)
return yes_or_no(cleaned)
def parse_meta(header):
meta = {
m.group(1).strip(ws): m.group(2).strip(ws)
for m in re.finditer(r'(.*?\D):\s(.*)', header)
}
meta['Target body name'] = re.match(
r'^\S*', meta['Target body name']).group(0).lower()
meta['Center body name'] = re.match(
r'^\S*', meta['Center body name']).group(0).lower()
return meta
def parse_units(meta):
if 'Output units' in meta.keys():
value = meta['Output units'].split(',')
space_u, time_u = map(lambda unit: u.Unit(unit),
value[0].lower().split('-'))
return dict(
JD=u.Unit('day'),
TIME=time_u,
SPACE=space_u,
VELOCITY=space_u / time_u,
ANGLE=u.Unit('deg'),
ANGULAR_VELOCITY=u.Unit('deg') / time_u,
)
def parse_data(data, **kwargs):
"""
Parses the data section of a Jpl Horizons ephemeris in a *list of lists*
table.
Args:
data (str): the section containing data of a Jpl Horizons ephemeris.
Returns:
:class:`list`: the list of lists representing a data table.
"""
try:
return numberify(parse_table(data, **kwargs))
except:
raise ParserError
def parse_cols(header):
"""
Finds and parses ephemeris column names in a Jpl Horizons ephemeris.
Args:
header (str): the header of a Jpl Horizons ephemeris.
Returns:
:class:`tuple`: a tuple with the names of columns.
"""
cols_subsection = get_subsections(header)[-1]
cols = parse_row(cols_subsection)
return tuple(cols)
def parse(source, target=QTable):
"""
Parses an entire Jpl Horizons ephemeris and build an `astropy`_ table out
of it.
Args:
source (str): the content of the Jpl Horizons data file.
target: the type of table to produce (Table or QTable).
Returns:
table: the table containing data from Jpl Horizons source ephemeris.
.. _`astropy`: http://docs.astropy.org/en/stable/table/
"""
cols_del = ',' if check_csv(source) else r'\s'
header, ephemeris, footer = get_sections(source)
data = transpose(parse_data(ephemeris, cols_del=cols_del))
cols = parse_cols(header)
meta = parse_meta(header)
units = parse_units(meta)
if target in (Table, QTable):
table = target(data, names=cols, meta=meta)
else:
raise TypeError('Available target classes are Table and QTable.')
if units and target is not Table:
for col in cols:
dim = get_col_dim(col)
if dim:
table[col].unit = units[dim]
return table
| StarcoderdataPython |
6568393 | <filename>HW1/linprimalsvm.py
# Input: numpy matrix X of features, with n rows (samples), d columns (features)
# X[i,j] is the j-th feature of the i-th sample
# numpy vector y of labels, with n rows (samples), 1 column
# y[i] is the label (+1 or -1) of the i-th sample
# Output: numpy vector theta of d rows, 1 column
import numpy as np
import cvxopt as co
def run(X, y):
n, d = X.shape
H = np.identity(d);
f = np.zeros(d);
A = np.zeros((n, d));
for i in range(n):
for j in range(d):
A[i][j] = -y[i]*X[i][j]
b = np.full(n, -1)
theta = np.array(co.solvers.qp(co.matrix(H,tc='d'), co.matrix(f,tc='d'), co.matrix(A,tc='d'), co.matrix(b,tc='d'))['x'])
return theta | StarcoderdataPython |
1846838 | <filename>model-1/serve/code/serve.py
#!/usr/bin/env python3
import os
from flask import Flask
from flask import request
import pandas as pd
from sklearn import linear_model
import pickle
app = Flask(__name__)
@app.route('/ping')
def index():
return "true"
@app.route('/invocation', methods=['GET'])
def get_prediction():
feature1 = float(request.args.get('f1'))
feature2 = float(request.args.get('f2'))
feature3 = float(request.args.get('f3'))
loaded_model = pickle.load(open('model/model.pkl', 'rb'))
prediction = loaded_model.predict([[feature1, feature2, feature3]])
return str(prediction)
if __name__ == '__main__':
app.run(port=5000,host='0.0.0.0')
| StarcoderdataPython |
3352621 | import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
from networkx.drawing.nx_agraph import graphviz_layout
def plot_graph(G, G2=None, nodelist=None, pos=None, figsize=[40, 20], edge_color='b', edge_color2='r', node_size=2500, node_color='y', font_size=12, label=True, width= None):
if pos is None:
if G2 is not None:
g = nx.compose(G, G2)
pos = graphviz_layout(g, prog='dot')
else:
pos = graphviz_layout(G, prog='dot')
if nodelist is None:
nodelist = list(G.nodes)
if width == 'r':
width = [np.abs(G[u][v]['weight']) for u, v in G.edges * 2]
plt.figure(figsize=figsize)
nx.draw_networkx(G,
pos=pos,
nodelist = nodelist,
width = width,
edge_color=edge_color,
node_size=node_size,
node_color=node_color,
font_size=font_size)
if label is True:
nx.draw_networkx_edge_labels(G,
pos=pos,
edge_labels={i: f"{G.edges[i]['weight']:.2f}" for i in G.edges},
font_size=font_size)
if G2 is not None:
nx.draw_networkx_edges(G2,
pos=pos,
nodelist=nodelist,
# width=[np.abs(G2[u][v]['weight']) for u, v in G2.edges],
node_size=node_size,
edge_color=edge_color2)
if label is True:
nx.draw_networkx_edge_labels(G2,
pos=pos,
edge_labels={i: f"{G2.edges[i]['weight']:.2f}" for i in G2.edges},
font_size=font_size)
| StarcoderdataPython |
8029757 | import enum
import logging
from typing import Optional, Tuple
from PyQt5 import QtCore
from .component import Component
from .motors import Motor, MotorRole, MotorDirection
from ...devices.device.frontend import DeviceFrontend
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class BeamStop(QtCore.QObject, Component):
class States(enum.Enum):
In = 'in'
Out = 'out'
Undefined = 'undefined'
Moving = 'moving'
Error = 'error'
stateChanged = QtCore.pyqtSignal(str)
movingFinished = QtCore.pyqtSignal(bool)
movingProgress = QtCore.pyqtSignal(str, float, float, float)
_movetarget: Optional[States]
_movephase: Optional[str]
state: States = States.Undefined
motionstoprequested: bool = False
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._movephase = None
self._movetarget = None
logger.debug(str(self.__dict__.keys()))
self.instrument.motors.newMotor.connect(self.onNewMotorConnected)
def onMotorDestroyed(self):
# no need to disconnect signal handlers from the destroyed object: Qt does it automatically
self.state = self.States.Undefined
self.stateChanged.emit(self.state.value)
def _disconnectMotor(self, motor: Motor):
motor.started.disconnect(self.onMotorStarted)
motor.stopped.disconnect(self.onMotorStopped)
motor.moving.disconnect(self.onMotorMoving)
motor.positionChanged.disconnect(self.onMotorPositionChanged)
motor.destroyed.disconnect(self.onMotorDestroyed)
motor.cameOnLine.disconnect(self.onMotorOnLine)
motor.wentOffLine.disconnect(self.onMotorOffLine)
def _connectMotor(self, motor: Motor):
motor.started.connect(self.onMotorStarted)
motor.stopped.connect(self.onMotorStopped)
motor.moving.connect(self.onMotorMoving)
motor.positionChanged.connect(self.onMotorPositionChanged)
motor.destroyed.connect(self.onMotorDestroyed)
motor.cameOnLine.connect(self.onMotorOnLine)
motor.wentOffLine.connect(self.onMotorOffLine)
def onMotorOnLine(self):
self.checkState()
def onMotorOffLine(self):
self.state = self.States.Undefined
self.stateChanged.emit(self.state.value)
def onNewMotorConnected(self, motorname: str):
if self.instrument.motors[motorname].role == MotorRole.BeamStop:
self._connectMotor(self.instrument.motors[motorname])
def moveOut(self):
if self._panicking != self.PanicState.NoPanic:
raise RuntimeError('Cannot move beam-stop: panic!')
self._movetarget = self.States.Out
self.motionstoprequested = False
self.motorx.moveTo(self.config['beamstop']['out'][0])
def moveIn(self):
if self._panicking != self.PanicState.NoPanic:
raise RuntimeError('Cannot move beam-stop: panic!')
self._movetarget = self.States.In
self.motionstoprequested = False
self.motorx.moveTo(self.config['beamstop']['in'][0])
def calibrateIn(self, posx: float, posy: float):
self.config['beamstop']['in'] = (posx, posy)
logger.info(f'Beamstop IN position changed to {posx:.4f}, {posy:.4f}')
self.checkState()
def calibrateOut(self, posx: float, posy: float):
self.config['beamstop']['out'] = (posx, posy)
logger.info(f'Beamstop OUT position changed to {posx:.4f}, {posy:.4f}')
self.checkState()
def checkState(self) -> States:
oldstate = self.state
if not self.motorsAvailable():
self.state = self.States.Undefined
elif self.motorx.isMoving() or self.motory.isMoving():
self.state = self.States.Moving
else:
xpos = self.motorx.where()
ypos = self.motory.where()
if (abs(xpos - self.config['beamstop']['in'][0]) <= 0.0001) and \
(abs(ypos - self.config['beamstop']['in'][1]) <= 0.0001):
self.state = self.States.In
elif (abs(xpos - self.config['beamstop']['out'][0]) <= 0.0001) and \
(abs(ypos - self.config['beamstop']['out'][1]) <= 0.0001):
self.state = self.States.Out
else:
self.state = self.States.Undefined
if self.state != oldstate:
self.stateChanged.emit(self.state.value)
return self.state
def onMotorMoving(self, current: float, start: float, end: float):
if (self.state == self.States.Moving) and (self._movetarget is not None):
self.movingProgress.emit(
f'Moving beamstop {self._movetarget.value}, moving motor {self.sender().name}', start, end, current)
def onMotorStarted(self, startposition: float):
self.checkState()
def onMotorStopped(self, success: bool, endposition: float):
self.checkState()
motor = self.sender()
assert isinstance(motor, Motor)
if self._movetarget is not None:
if self.motionstoprequested:
self.movingFinished.emit(False)
elif (motor.role == MotorRole.BeamStop) and (motor.direction == MotorDirection.X):
# movement of X motor is done, start with Y
if success:
self.motory.moveTo(self.config['beamstop'][self._movetarget.value][1])
else:
# not successful, break moving
logger.error('Error while moving beam-stop: target not reached.')
self.movingFinished.emit(False)
self._movetarget = None
elif (motor.role == MotorRole.BeamStop) and (motor.direction == MotorDirection.Y):
# moving the Y motor finished
self._movetarget = None
self.movingFinished.emit(True)
if self.stopping and (not self.motorx.isMoving()) and (not self.motory.isMoving()):
self.stopComponent()
if self._panicking == self.PanicState.Panicking:
super().panichandler()
def onMotorPositionChanged(self, actualposition: float):
try:
self.checkState()
except DeviceFrontend.DeviceError:
# can happen at the very beginning
pass
def motorsAvailable(self) -> bool:
try:
return self.instrument.motors.beamstop_x.isOnline() and self.instrument.motors.beamstop_y.isOnline()
except KeyError:
# happens when either of the motors is not present
return False
def stopMoving(self):
if self._movetarget is not None:
self.motory.stop()
self.motorx.stop()
def disconnectMotors(self):
for motorname in [self.xmotorname, self.ymotorname]:
if motorname is None:
continue
try:
motor = self.instrument.motors[motorname]
except KeyError:
pass
self._disconnectMotor(motor)
def startComponent(self):
for motor in self.instrument.motors:
if motor.role == MotorRole.BeamStop:
self._connectMotor(motor)
super().startComponent()
def stopComponent(self):
try:
self._disconnectMotor(self.motorx)
except KeyError:
pass
try:
self._disconnectMotor(self.motory)
except KeyError:
pass
super().stopComponent()
@property
def motorx(self) -> Motor:
return self.instrument.motors.beamstop_x
@property
def motory(self) -> Motor:
return self.instrument.motors.beamstop_y
def inPosition(self) -> Tuple[float, float]:
return self.config['beamstop']['in']
def outPosition(self) -> Tuple[float, float]:
return self.config['beamstop']['out']
def panichandler(self):
self._panicking = self.PanicState.Panicking
if self._movetarget is not None:
self.stopMoving()
else:
super().panichandler()
| StarcoderdataPython |
6532610 | #!/usr/bin/env python
import sys
from operator import add
import numpy as np
from sklearn.decomposition import PCA
from sklearn import preprocessing
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib
from matplotlib import rcParams
rcParams['font.family'] = 'Arial'
rcParams['legend.numpoints'] = 1
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-c",
dest="cat_file",
help="Categories")
parser.add_option("-o",
dest="output_file",
help="Output file")
parser.add_option("-m",
"--mirror",
action="store_true",
dest="mirror_m",
default=False)
(options, args) = parser.parse_args()
if not options.output_file:
parser.error('Output file not given')
if not options.cat_file:
parser.error('Category file not given')
M = []
for l in sys.stdin:
A = [float(x) for x in l.rstrip().split()]
M.append(A)
X = np.array(M)
X_std = preprocessing.StandardScaler().fit_transform(X)
cor_mat = np.corrcoef(X.T)
eig_val_cor, eig_vec_cor = np.linalg.eig(cor_mat)
eig_pairs_cor = [(np.abs(eig_val_cor[i]), eig_vec_cor[:,i]) for i in range(len(eig_val_cor))]
eig_pairs_cor.sort()
eig_pairs_cor.reverse()
PCA_1 = 0
PCA_2 = 1
print eig_pairs_cor[PCA_1][0]
print eig_pairs_cor[PCA_2][0]
#exit(1)
matrix_w_cor = np.hstack((eig_pairs_cor[PCA_1][1].reshape(len(M),1), eig_pairs_cor[PCA_2][1].reshape(len(M),1)))
X_transf = matrix_w_cor.T.dot(X_std.T).T
#sklearn_pca = PCA(n_components=4)
#X_transf = sklearn_pca.fit_transform(X_std)
#
#if (options.mirror_m):
#X_transf = X_transf * -1
X_transf[:,0] = X_transf[:,0] * -1
#X_transf[:,1] = X_transf[:,1] * -1
#print X_transf[:,1]
f = open(options.cat_file)
C=[]
for l in f:
C.append(l.rstrip())
f.close()
tmp=set(C)
U=[]
for t in tmp:
U.append(t)
colors = cm.rainbow(np.linspace(0, 1, len(U)))
#colors = ['cyan', 'lightgreen', 'lightcoral', 'violet', 'grey']
color_map = {}
for i in range(len(U)):
color_map[U[i]] = colors[i]
print color_map
matplotlib.rcParams.update({'font.size': 12})
#fig = plt.figure(figsize=(5,5), dpi=300)
#fig = matplotlib.pyplot.figure(figsize=(5,5),dpi=300,facecolor='black')
fig = matplotlib.pyplot.figure(figsize=(5,5),dpi=300)
ax = fig.add_subplot(1,1,1)
#ax = fig.add_subplot(1,1,1,axisbg='k')
ax.tick_params(labelsize=10)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
#ax.spines['bottom'].set_color('white')
#ax.spines['left'].set_color('white')
#ax.title.set_color('white')
#ax.yaxis.label.set_color('white')
#ax.xaxis.label.set_color('white')
#ax.tick_params(axis='x', colors='white')
#ax.tick_params(axis='y', colors='white')
#ax.set_xlabel("Principal component 1")
#ax.set_ylabel("Principal component 2")
ax.set_xlabel("PC1 (EV = " + str(round(eig_pairs_cor[PCA_1][0],2)) + ")")
ax.set_ylabel("PC2 (EV = " + str(round(eig_pairs_cor[PCA_2][0],2)) + ")")
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
l = []
for cat in U:
print cat
# tmp_X = []
# tmp_Y = []
# for idx in [i for i, x in enumerate(C) if x == cat]:
# tmp_X.append(X_transf[idx,0])
# tmp_Y.append(X_transf[idx,1])
# print tmp_X
# print tmp_Y
# plt.scatter(tmp_X, tmp_Y, X_transf[idx,1], c=color_map[cat], label=cat)
idxs = [i for i, x in enumerate(C) if x == cat]
ax.plot(X_transf[idxs[0],0], \
X_transf[idxs[0],1], \
'o',
c=color_map[cat],
label=cat)
for idx in idxs[1:]:
ax.plot(X_transf[idx,0], \
X_transf[idx,1], \
'o',
c=color_map[cat])
#plt.legend(frameon=False, fontsize=10,labelspacing=0.25,numpoints=1)
#plt.savefig(options.output_file,bbox_inches='tight')
#l1=ax.legend(loc='lower right', \
l1=ax.legend(loc='upper right', \
labelspacing=0.25,\
frameon=False, \
fontsize=12, \
ncol=1)
yticks, yticklabels = matplotlib.pyplot.yticks()
ymin = (3*yticks[0] - yticks[1])/2.
ymax = (3*yticks[-1] - yticks[-2])/2.
matplotlib.pyplot.ylim(ymin, ymax)
matplotlib.pyplot.yticks(yticks)
xticks, xticklabels = matplotlib.pyplot.xticks()
xmin = (3*xticks[0] - xticks[1])/2.
xmax = (3*xticks[-1] - xticks[-2])/2.
matplotlib.pyplot.xlim(xmin, xmax)
matplotlib.pyplot.xticks(xticks)
#for text in l1.get_texts():
#matplotlib.pyplot.setp(text,color='white')
####################################################
matplotlib.pyplot.savefig(options.output_file,\
bbox_inches='tight',
transparent=True)
| StarcoderdataPython |
11229019 | """Tests for the aerial_position module."""
from auvsi_suas.models.aerial_position import AerialPosition
from auvsi_suas.models.gps_position import GpsPosition
from django.test import TestCase
class TestAerialPositionModel(TestCase):
"""Tests the AerialPosition model."""
def assertDistanceEqual(self, pos1, pos2, dist, threshold=10):
"""AerialPosition distances are within threshold (ft)."""
self.assertAlmostEqual(pos1.distance_to(pos2), dist, delta=threshold)
self.assertAlmostEqual(pos2.distance_to(pos1), dist, delta=threshold)
def evaluate_distance_inputs(self, io_list):
"""Evaluates the distance_to calc with the given input list."""
for (lon1, lat1, alt1, lon2, lat2, alt2, dist_actual) in io_list:
gps1 = GpsPosition(latitude=lat1, longitude=lon1)
gps1.save()
gps2 = GpsPosition(latitude=lat2, longitude=lon2)
gps2.save()
pos1 = AerialPosition(gps_position=gps1, altitude_msl=alt1)
pos2 = AerialPosition(gps_position=gps2, altitude_msl=alt2)
self.assertDistanceEqual(pos1, pos2, dist_actual)
def test_distance_zero(self):
"""Tests distance calc for same position."""
self.evaluate_distance_inputs([
# (lon1, lat1, alt1, lon2, lat2, alt2, dist_actual)
(0, 0, 0, 0, 0, 0, 0),
(1, 2, 3, 1, 2, 3, 0),
(-30, 30, 100, -30, 30, 100, 0),
]) # yapf: disable
def test_distance_competition_amounts(self):
"""Tests distance calc for competition amounts."""
self.evaluate_distance_inputs([
# (lon1, lat1, alt1, lon2, lat2, alt2, dist_actual)
(-76.428709, 38.145306, 0, -76.426375, 38.146146, 0, 736.4),
(-76.428537, 38.145399, 0, -76.427818, 38.144686, 100, 344.4),
(-76.434261, 38.142471, 100, -76.418876, 38.147838, 800, 4873.7),
]) # yapf: disable
def test_duplicate_unequal(self):
"""Tests the duplicate function with unequal positions."""
gps1 = GpsPosition(latitude=0, longitude=0)
gps1.save()
gps2 = GpsPosition(latitude=1, longitude=1)
gps2.save()
pos1 = AerialPosition(gps_position=gps1, altitude_msl=0)
pos2 = AerialPosition(gps_position=gps2, altitude_msl=0)
pos3 = AerialPosition(gps_position=gps1, altitude_msl=1)
self.assertFalse(pos1.duplicate(pos2))
self.assertFalse(pos1.duplicate(pos3))
def test_duplicate_equal(self):
"""Tests the duplicate function with unequal positions."""
gps1 = GpsPosition(latitude=0, longitude=0)
gps1.save()
gps2 = GpsPosition(latitude=0, longitude=0)
gps2.save()
pos1 = AerialPosition(gps_position=gps1, altitude_msl=0)
pos2 = AerialPosition(gps_position=gps2, altitude_msl=0)
pos3 = AerialPosition(gps_position=gps1, altitude_msl=0)
self.assertTrue(pos1.duplicate(pos2))
self.assertTrue(pos1.duplicate(pos3))
| StarcoderdataPython |
9602981 | <reponame>CityOfZion/neo3-boa<gh_stars>10-100
from typing import Any
from boa3.builtin import contract, public
from boa3.builtin.type import UInt160
@contract('0xf3349090a6abd4771739da994dd155a4294e6837')
class Nep17:
@staticmethod
def symbol() -> str:
pass
@staticmethod
def decimals() -> int:
pass
@staticmethod
def totalSupply() -> int:
pass
@staticmethod
def balanceOf(account: UInt160) -> int:
pass
@staticmethod
def transfer(from_address: UInt160, to_address: UInt160, amount: int, data: Any) -> bool:
pass
@public
def nep17_symbol() -> str:
return Nep17.symbol()
@public
def nep17_decimals() -> int:
return Nep17.decimals()
@public
def nep17_total_supply() -> int:
return Nep17.totalSupply()
@public
def nep17_balance_of(account: UInt160) -> int:
return Nep17.balanceOf(account)
@public
def nep17_transfer(from_account: UInt160, to_account: UInt160, amount: int, additional_data: Any) -> bool:
return Nep17.transfer(from_account, to_account, amount, additional_data)
| StarcoderdataPython |
156863 | import copy
def parse_lines(input_text):
action = input_text.split(" ")[0]
amount = int(input_text.split(" ")[1])
return [action, amount, 0]
def run_game(input_file):
acc = 0
index = 0
curr = input_file[index]
curr[2] += 1
while curr[2] <= 1 and index < len(input_file):
action = curr[0]
num = curr[1]
if action == "nop":
index += 1
elif action == "acc":
acc += num
index += 1
elif action == "jmp":
index += num
else:
raise Exception("Invalid command")
if index >= len(input_file):
break
curr = input_file[index]
curr[2] += 1
status = "success" if index >= len(input_file) else "failed"
for idx, val in enumerate(input_file):
val[2] = 0
input_file[idx] = val
return acc, status
def flip_command(input_file):
for idx, val in enumerate(input_file):
if val[0] not in ["jmp", "nop"]:
continue
revised = copy.deepcopy(input_file)
if val[0] == "jmp":
revised[idx][0] = "nop"
elif val[0] == "nop":
revised[idx][0] = "jmp"
check = run_game(revised)
if check[1] == "success":
return check[0]
return -999
filename = "input_day8.txt"
with open(filename, "r") as input:
input_file = input.read().splitlines()
input_file = [parse_lines(i) for i in input_file]
check_acc = run_game(input_file)[0]
print(f"Acc:\t{check_acc}")
flip_acc = flip_command(input_file)
print(f"Flipped acc:\t{flip_acc}")
| StarcoderdataPython |
56616 | <reponame>devTaemin/Anchorvalue-fintech-hackathon
import pandas as pd
from pandas import DataFrame
df_0 = pd.read_csv('2019.csv', delimiter=',', encoding='utf-8-sig')
df_1 = pd.read_csv('2020.csv', delimiter=',', encoding='utf-8-sig')
#df_2 = pd.read_csv('2020_news_summary.csv', delimiter=',')
df_merge = pd.concat([df_0, df_1]) # row bind : axis = 0, default
data = {"Date": [],
"Title": [],
"Summary": [],
"Url": [],
"Label": []}
new_df = DataFrame(data)
new_df["Date"] = df_merge["Date"]
new_df["Title"] = df_merge["Title"]
new_df["Summary"] = df_merge["Summary"]
new_df["Url"] = df_merge["Url"]
new_df["Label"] = df_merge["Label"]
result = new_df.dropna(axis=0)
print(len(result))
new_df.to_csv('news_label.csv', sep=',', encoding='utf-8-sig') | StarcoderdataPython |
11235105 | <gh_stars>0
'''
Assembles plot pages based on the grimsel.plotting.plotting module
'''
import sys
from importlib import reload
import logging
import subprocess
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import pyAndy.core.plotting as lpplt
from pyAndy.auxiliary import aux_sql_func as aql
from pyAndy.core.plotpagedata import PlotPageData as PlotPageData
reload(lpplt)
reload(aql)
class PlotPage:
"""
Sets up the page and the axes layout, making use of gridspec and subplots.
"""
# default parameters for page layout
dim_a4_wide = (11.69, 8.27)
dim_a4_high = (8.27, 11.69)
pg_layout = {'page_dim': dim_a4_high,
'bottom': 0.25, 'top': 0.95, 'left': 0.2, 'right': 0.8,
'wspace': 0.2, 'hspace': 0.2,
'width_ratios': None, 'height_ratios': None,
'axarr': None,
'dpi': 150}
# scaling depending on screen resolution
page_scale = 2#get_config('page_scale')[sys.platform]
def __init__(self, nx, ny, sharex, sharey, **kwargs):
self.pg_layout = PlotPage.pg_layout.copy()
for key, val in self.pg_layout.items():
if key in kwargs.keys():
self.pg_layout.update({key: kwargs.pop(key)})
page_dim = self.pg_layout.pop('page_dim')
self.dpi = self.pg_layout.pop('dpi')
print('dpi', self.dpi)
axarr = self.pg_layout.pop('axarr')
if not isinstance(axarr, np.ndarray):
self.fig, self.axarr = plt.subplots(nrows=ny, ncols=nx,
sharex=sharex,
sharey=sharey,
squeeze=False,
gridspec_kw=self.pg_layout,
dpi=self.dpi
)
self.axarr = self.axarr.T
else:
self.fig, self.axarr = (axarr[0][0].get_figure(), axarr)
if not page_dim is None:
self.fig.set_size_inches([PlotPage.page_scale * idim
for idim in page_dim])
class PlotTiled(PlotPage):
''' Set up a tiled plot page. '''
def __init__(self, pltpgdata, **kwargs):
self.pltpgdata = pltpgdata
self.nx, self.ny = self.pltpgdata.get_plot_nxy()
defaults = {
'kind_def': 'LinePlot',
'kind_dict': {},
'plotkwargsdict': {},
'sharex': False, 'sharey': False,
'val_axy': False,
'caption': True,
'drop_nan_cols': True,
'draw_now': True,
'legend': 'page',
}
for key, val in defaults.items():
setattr(self, key, val)
if key in kwargs.keys():
setattr(self, key, kwargs[key])
kwargs.pop(key)
self.plotdict = {}
self.posdict = {} # dict containing plot locations in the grid
if self.draw_now:
# Produce an actual figure
kws = dict(nx=self.nx, ny=self.ny,
sharex=self.sharex, sharey=self.sharey,
**{kk: vv for kk, vv in kwargs.items()
if kk in PlotPage.pg_layout.keys()})
super(PlotTiled, self).__init__(**kws)
else:
# Generate a dummy axarr, which is the only necessary parameter
self.axarr = np.tile(np.nan, (self.nx, self.ny))
# remove PlotPage kwargs from kwargs
kwargs = {kk: vv for kk, vv in kwargs.items()
if not kk in PlotPage.pg_layout.keys()}
# left-over kwargs are for plots
self.plotkwargs = kwargs
self._init_default_xylabel()
# get full dictionary plot type --> columns
self._expand_kind_dict()
self._expand_plotkwargsdict()
self.ax_loop()
if self.draw_now:
self.draw_plots()
self.finalize_plottiled()
@staticmethod
def save_plot(fig, name):
with PdfPages(name + '.pdf') as pp:
pp.savefig(fig)
fig.savefig(name + '.svg')
fig.savefig(name + '.png')
try:
cmd = 'inkscape --file {f}.svg --export-emf {f}.emf'.format(f=name)
cmd = cmd.split(' ')
subprocess.run(cmd)
except:
pass
@classmethod
def concat(cls, concat_list, concat_dir='y',
sharex=False, sharey=False, draw_now=True, alt_align=False,
**kwargs):
if not concat_dir in ['x', 'y']:
raise ValueError('Parameter concat_dir must be one of ("x", "y")')
self = cls.__new__(cls)
# are we concatenating along x?
concat_x = (concat_dir == 'x')
# get new subplots shape
_dim_sum = 0 if concat_dir == 'x' else 1
_dim_max = 1 if concat_dir == 'x' else 0
len_sum = sum([obj.axarr.shape[_dim_sum] for obj in concat_list])
len_max = max([obj.axarr.shape[_dim_max] for obj in concat_list])
self.nx, self.ny = ((len_sum, len_max) if concat_x else
(len_max, len_sum))
# generate instance new PlotPage
if draw_now:
kws = dict(nx=self.nx, ny=self.ny,
sharex=sharex, sharey=sharey,
**{kk: vv for kk, vv in kwargs.items()
if kk in PlotPage.pg_layout.keys()})
super(PlotTiled, self).__init__(**kws)
else:
# Generate a dummy axarr, which is the only necessary parameter
self.axarr = np.tile(np.nan, (self.nx, self.ny))
# generate new plotdict dict
self.plotdict = {}
self.posdict = {}
# keep track of axes which are being used
list_ax_not_empty = []
cnt_xy = 0 # counter along concatenation dimension
for npo, po_slct in enumerate(concat_list):
print(npo)
for nx, pltx, ny, plty, plot, ax, kind in po_slct.get_plot_ax_list():
''''''
if concat_x:
align_offset = self.ny - po_slct.ny if alt_align else 0
else:
align_offset = self.nx - po_slct.nx if alt_align else 0
gridpos_x = nx + (cnt_xy if concat_x else 0) \
+ (align_offset if not concat_x else 0)
gridpos_y = ny + (cnt_xy if not concat_x else 0) \
+ (align_offset if concat_x else 0)
print(nx, pltx, ny, plty, gridpos_x, gridpos_y)
# raise error if plot indices are already in plotdict keys;
# note that this is not strictly necessary but avoids trouble
# later on (double keys in plotdict):
if (pltx, plty) in self.plotdict.keys():
e = ('Trying to add a second plot {}. Make sure plot ' +
'indices are unique before concatenating TiledPlot ' +
'objects. Consider using the _name attribute as ' +
'an index.').format(str((pltx, plty)))
raise IndexError(e)
# add plot to new plotdict
self.plotdict[pltx, plty, kind] = plot
# assign new axes to the original plot objects
plot.ax = self.axarr[gridpos_x][gridpos_y]
list_ax_not_empty.append(plot.ax)
# define new position for the original plot objects
self.posdict[pltx, plty] = (gridpos_x, gridpos_y)
cnt_xy += nx + 1 if concat_x else ny + 1
# generate list_ind_pltx/y lists
if draw_now:
# delete empty axes
for ax_del in [ax for ax in self.axarr.flatten()
if not ax in list_ax_not_empty]:
self.fig.delaxes(ax_del)
for plt_slct in concat_list:
for plot in plt_slct.plotdict.values():
plot.gen_plot()
plot.finalize_axis()
return self
def finalize_plottiled(self):
if self.caption:
self.add_caption()
def _init_default_xylabel(self):
'''
Modify the plot kwargs x/ylabel param if no value is provided.
'''
for xy in 'xy':
lab = '%slabel'%xy
no_inp = (not lab in self.plotkwargs or
(lab in self.plotkwargs
and self.plotkwargs[lab] in [False, None]))
if no_inp:
# Setting the x/y data name by default, if available, else the
# value name.
axxy = getattr(self.pltpgdata, 'ind_ax%s'%xy)
if axxy is not None:
self.plotkwargs.update({lab: axxy})
elif xy != 'x':
self.plotkwargs.update({lab: self.pltpgdata.values})
def _expand_legend_dict(self):
if not isinstance(self.legend, dict):
_legend = dict()
if self.legend == 'plots':
# plot-level legends on all plots
_legend['plots'] = list(self.plotdict.values())
elif isinstance(self.legend, tuple):
# legend is assumed to specify a plot for legend addition
_legend['plot'] = list({self.plotdict[self.legend]})
elif self.legend == 'page':
# page-level legend on last plot
_legend['page'] = [self.current_plot]
else:
# translate plot indices to plot objects
_legend = {key: [self.plotdict[plot_ind] for plot_ind in vals]
for key, vals in self.legend.items()}
return _legend
def get_legend_handles_labels(self, unique=True):
'''
Collects legend handles and labels from plot objects and
removes duplicates if unique is True
'''
hdls_lbls = []
for plot in self.plotdict.values():
plot_hdl, plot_lbl = plot.get_legend_handles_labels()
hdls_lbls += list(zip(plot_hdl, plot_lbl))
hdls_lbls = list(zip(*hdls_lbls))
if unique:
# dictionary and back
hdls_lbls = list(reversed(list(zip(*list(dict(
zip(*reversed(hdls_lbls))).items())))))
return hdls_lbls
def add_legends(self, **plot_legend_kwargs):
print('plot_legend_kwargs : ', plot_legend_kwargs)
_legend = self._expand_legend_dict()
if 'plots' in _legend:
for plot in _legend['plots']:
plot.add_plot_legend(**plot_legend_kwargs, from_ax=True)
if 'page' in _legend or 'plot' in _legend:
hdls, lbls = self.get_legend_handles_labels(unique=True)
if 'page' in _legend:
self.add_page_legend(slct_plot=_legend['page'][0],
handles=hdls, labels=lbls,
**plot_legend_kwargs)
if 'plot' in _legend:
_legend['plot'][0].add_plot_legend(handles=hdls, labels=lbls,
**plot_legend_kwargs)
logging.debug(_legend)
def add_page_legend(self, slct_plot=None, handles=None, labels=None,
**plot_legend_kwargs):
'''
Add legend in the corner of the figure.
This adds a plot legend with some bbox kwargs to move it to the
figure corner.
'''
legkw = {'bbox_transform': self.fig.transFigure,
'bbox_to_anchor': (1, 1)}
print('add_page_legend', plot_legend_kwargs)
plot_legend_kwargs.update(legkw)
plot_legend_kwargs.update(handles=handles, labels=labels)
logging.debug(plot_legend_kwargs)
slct_plot.add_plot_legend(**plot_legend_kwargs)
def _expand_kind_dict(self):
'''
Expands the dictionary data series -> plot type.
This dictionary is used to slice the data by plot type.
Note: input arg kind_dict is of shape {'series_element': 'kind0', ...}
'''
cols = self.pltpgdata.data.columns
# flatten column names
cols_all_items = set([cc for c in cols for cc in c])
# default map: columns -> default type
kind_dict_cols = {kk: self.kind_def for kk in cols}
# update using the kind_dict entries corresponding to single elements
dct_update = {kk: vv for kk, vv in self.kind_dict.items()
if kk in cols_all_items or kk in cols}
dct_update = {cc: [vv for kk, vv in dct_update.items()
if kk in cc or kk == cc][0] for cc in cols
if any([c in cc or c == cc
for c in dct_update.keys()])}
kind_dict_cols.update(dct_update)
# update using the kind_dict entries corresponding to specific columns
dct_update = {kk: vv for kk, vv in self.kind_dict.items()
if kk in cols}
dct_update = {cc: [vv for kk, vv in dct_update.items() if kk in cc][0]
for cc in cols
if any([c in cc for c in dct_update.keys()])}
kind_dict_cols.update(dct_update)
# invert to plot type -> data series
kind_dict_rev = {k: [v for v in kind_dict_cols.keys()
if kind_dict_cols[v] == k]
for k in list(set(kind_dict_cols.values()))}
self._kind_dict_cols = kind_dict_rev
@staticmethod
def _select_dict_dict(dct, key):
'''
From a dictionary of dictionaries select 'all' and update with key.
Select the 'all' key of the input dictionary---if it exists---and
update the value dictionary. If 'all' doesn't exist, return the
'key' value dictionary, if that exists. Otherwise return an
empty dictionary.
Args:
dct (dict): Dictionary holding the dictionaries to be accessed
key (immutable): Dictionary key for secondary access
Returns:
dictionary: selected dictionary
'''
# get relevant part of plotkwargsdict
if 'all' in dct:
_dct_slct = dct['all'].copy()
# update with more specific entry, if present
if key in dct:
_dct_slct.update(dct[key])
elif key in dct:
_dct_slct = dct[key].copy()
else:
_dct_slct = {}
return _dct_slct
def _expand_plotkwargsdict(self):
# generate expanded default dictionary
_plotkwargsdict = dict()
_, ipx, _, ipy = self.pltpgdata._iter_ind[0]
for _, ipx, _, ipy in self.pltpgdata._iter_ind:
kind = list(self._kind_dict_cols.keys())[0]
for kind in self._kind_dict_cols:
plotkey = ipx, ipy, kind
dct = self.plotkwargsdict.copy()
key = plotkey
_plotkwd_slct = self._select_dict_dict(dct, plotkey)
_plotkwargsdict[plotkey] = dict()
# kind = self._kind_dict_cols[kind][0]
for ser in self._kind_dict_cols[kind]:
# default
_plotkwargsdict[plotkey][ser] = self.plotkwargs.copy()
_srskwd_slct = self._select_dict_dict(_plotkwd_slct, ser)
_plotkwargsdict[plotkey][ser].update(_srskwd_slct)
# make sure all entries are copies
for key, val in _plotkwargsdict[plotkey][ser].items():
if hasattr(val, 'copy'):
_plotkwargsdict[plotkey][ser][key] = val.copy()
# as long as it's not a pandas plot we don't need the
# whole colormap for each series
# cmap = _plotkwargsdict[plotkey][ser]['colormap']
# if not '.' in kind and isinstance(cmap, dict):
#
# ser_color = cmap[ser[-1]]
# cmap.clear()
# cmap[ser[-1]] = ser_color
self._plotkwargsdict = _plotkwargsdict
def draw_plots(self):
for nameplot, plot in self.plotdict.items():
plot.draw_plot()
# loop over plots
# update ax!
# call plot method gen_plot_series
# do legend stuff
def gen_plot(self, data_slct, ipltxy, kind, _plotkwargs):
''' Generate plots of the selected kind. '''
ax = self.axarr[ipltxy[0]][ipltxy[1]]
# main call to lpplt
self.data_slct = data_slct
# only generate plot objects, don't draw them
no_draw = {'draw_now': False}
if kind in ['BoxPlot']: # requires special input kwargs
kwargs = dict(data=data_slct, ax=ax,
x=self.pltpgdata.ind_axx[0], **no_draw)
self.current_plot = lpplt.BoxPlot(**kwargs)
# check whether plotting contains a dedicated class for this
# plot kind; if yes, create an instance. ...
elif hasattr(lpplt, kind):
kwargs = dict(data=data_slct, ax=ax, plotkwargs=_plotkwargs,
**no_draw)
self.current_plot = getattr(lpplt, kind)(**kwargs)
# ... if no, it's a pandas plot, for now
else:
# all pandas series are drawn simultaneously... select only the
# first _plotkwargsdict
pkws_arg = {'all': list(_plotkwargs.items())[0][1]}
kwargs = dict(data=data_slct, ax=ax, pd_method=kind,
plotkwargs=pkws_arg, **no_draw)
self.current_plot = lpplt.PlotPandas(**kwargs)
def ax_loop(self):
'''
Loops over
- axis columns
- axis rows
- plot types as defined in self._kind_dict_cols
Selects data for the corresponding subplot/type and calls gen_plot.
'''
for ipltx, slct_ipltx, iplty, slct_iplty, data_slct_0 in self.pltpgdata.get_data():
pass
data_slct_0 = pd.DataFrame(data_slct_0)
index_slct = self.pltpgdata._merge_plt_indices(slct_ipltx, slct_iplty)
kind, kind_cols = list(self._kind_dict_cols.items())[0]
for kind, kind_cols in self._kind_dict_cols.items():
_plotkwargs = self._plotkwargsdict[slct_ipltx, slct_iplty, kind]
title_dict = {'title': '{}\n{}'.format(str(slct_ipltx),
str(slct_iplty))}
for key in _plotkwargs:
print(key)
# 1. title
if (not 'title' in _plotkwargs[key].keys()) \
or ('title' in _plotkwargs[key].keys()
and _plotkwargs[key]['title'] in [False, None]):
_plotkwargs[key].update(title_dict)
# 2. plotkwargs know where they are located
_plotkwargs[key]['gridpos'] = (ipltx, iplty)
col_subset = [c for c in data_slct_0.columns if c in kind_cols]
if not col_subset:
continue
print('Plotting ', kind, index_slct,
self.pltpgdata.ind_pltx, self.pltpgdata.ind_plty)
data_slct = data_slct_0[col_subset]
_indx_drop = [ii for ii in data_slct.index.names
if not ii in self.pltpgdata._ind_ax_all]
if _indx_drop:
data_slct.reset_index(_indx_drop, drop=True, inplace=True)
ipltxy = [ipltx, iplty]
self.gen_plot(data_slct, ipltxy, kind, _plotkwargs)
self.plotdict[slct_ipltx, slct_iplty, kind] = self.current_plot
self.posdict[slct_ipltx, slct_iplty] = (ipltx, iplty)
def _gen_caption_string(self):
'''
Generate caption string to be added to the bottom of the plot page.
'''
self.caption_str = ('n_min={}, n_max={}, data_threshold={}\n' +
'table: {}\nfilt={}')\
.format(*self.pltpgdata.nsample,
self.pltpgdata.data_threshold,
'.'.join([str(self.pltpgdata.sc),
str(self.pltpgdata.table)]),
self.pltpgdata.filt)
def add_caption(self):
''' Add basic information to the bottom of the page. '''
if not 'caption_str' in self.__dict__.keys():
self._gen_caption_string()
plt.figtext(0.05, 0.05, self.caption_str.replace('_', ' '),
va='top', wrap=True)
def get_plot_ax_list(self):
'''
Return all relevant plot indices and objects.
This is useful to make specific changes to the plot after the object
instantiation. The return list is constructed from the
posdict and plotdict attributes. Note: The keys of the plotdict
are (name_x, name_y, plot_kind), the keys of the posdict are
(name_x, name_y).
Returns:
list of tuples: (index_x, name_x, index_y, name_y,
plot_object, axes, plotkind)
for each plot/plot_type
'''
return [(self.posdict[nxyk[:2]][0], nxyk[0],
self.posdict[nxyk[:2]][1], nxyk[1],
p, p.ax, nxyk[2]) for nxyk, p in self.plotdict.items()]
@staticmethod
def add_shared_label(text, ax1, ax2, axis='x', label_offset=0.1,
twinax=False, rotation=None):
'''
Adds an x or y-label between two axes.
'''
label_pos1 = (0 if axis == 'y' and not twinax else 1,
1 if axis == 'x' and twinax else 0)
label_pos2 = (1 if axis == 'y' and twinax else 0,
0 if axis == 'x' and not twinax else 1)
if not twinax:
label_offset *= -1
label_pos = (0.5 * (ax1.transAxes.transform(label_pos1)
+ ax2.transAxes.transform(label_pos2)))
label_pos = ax1.transAxes.inverted().transform(label_pos)
label_pos[1 if not axis=='y' else 0] += label_offset
if axis == 'x':
ax1.set_xlabel(text)
ax1.xaxis.set_label_coords(*label_pos)
if rotation:
ax1.yaxis.label.set_rotation(rotation)
elif axis == 'y':
ax1.set_ylabel(text)
ax1.yaxis.set_label_coords(*label_pos)
if rotation:
ax1.yaxis.label.set_rotation(rotation)
# %%
if __name__ == '__main__':
import grimsel.auxiliary.maps as maps
from pyAndy import PlotPageData
logging.basicConfig(stream=sys.stdout, level=logging.NOTSET)
logger = logging.getLogger()
logger.setLevel(0)
sc_out = 'out_levels'
slct_nd = 'DE0'
db = 'storage2'
mps = maps.Maps(sc_out, db)
ind_pltx = ['sta_mod']
ind_plty = ['pwrerg_cat']
ind_axx = ['sy']
ind_axy = []
values = ['value_posneg']
series = ['bool_out', 'fl']
table = sc_out + '.analysis_time_series'
stats_data = {'DE0': '%agora%',
'FR0': '%eco2%',
'CH0': '%entsoe%'}
filt = [
('nd', [slct_nd]),
('swfy_vl', ['yr2015', 'nan'], ' LIKE '),
# ('fl', ['%nuclear%'], ' LIKE '),
# ('swchp_vl', ['chp_off']),
# ('swcadj_vl', ['adjs']),
('run_id', [0, -1]),
('wk_id', [5]),
('sta_mod', ['%model%', stats_data[slct_nd]], ' LIKE '),
# ('sta_mod', ['%model%'], ' LIKE '),
('pwrerg_cat', ['%pwr%'], ' LIKE '),
('fl', ['dmnd', '%coal%', '%nuc%', '%lig%', '%gas', 'load_prediction_d',
'wind_%', '%photo%', '%bio%', 'lost%', 'dmnd_flex'], ' LIKE '),
# ('fl', ['dmnd', 'load_prediction_d'], ' LIKE ')
]
post_filt = [] # from ind_rel
lst_series = aql.read_sql(db, sc_out, 'def_pp_type')['pt'].tolist()
dict_series_order = {'BAL': -100,
'WAS': -50,
'NUC': -75,
'LIG': 10,
'natural_gas': 40,
'reservoir': 2000,
'export': -200,
'import': 1000,
'run_of_river': -10,
'wind_onshore': 4000,
'wind_offshore': 4000,
'photovoltaics': 4001,
}
df_series_order = aql.read_sql(db, sc_out, 'def_plant', keep=['pp', 'pt_id', 'nd_id', 'fl_id'])
df_series_order = df_series_order.join(aql.read_sql(db, sc_out, 'def_pp_type').set_index('pt_id')['pt'], on='pt_id')
df_series_order = df_series_order.join(aql.read_sql(db, sc_out, 'def_fuel').set_index('fl_id')['fl'], on='fl_id')
df_series_order = df_series_order.join(aql.read_sql(db, sc_out, 'def_node').set_index('nd_id')['nd'], on='nd_id')
df_series_order['pp_red'] = df_series_order.pp.apply(lambda x: x.split('_')[1])
df_series_order = df_series_order.loc[df_series_order.nd.isin([f for f in filt if 'nd' == f[0]][0][1])]
df_series_order['rank'] = np.inf
for icol in ['pp_red', 'pp_red', 'pt']:
df_order_dict = pd.DataFrame.from_dict(dict_series_order, columns=['rank_new'], orient='index')
df_order_dict.index.names = [icol]
df_series_order = df_series_order.join(df_order_dict, on=df_order_dict.index.names).fillna(1e10)
df_series_order['rank'] = df_series_order[['rank', 'rank_new']].min(axis=1)
df_series_order = df_series_order.drop('rank_new', axis=1)
series_order = df_series_order.sort_values(['rank', 'pp']).fl.unique().tolist()
data_kw = {'filt': filt, 'post_filt': post_filt, 'data_scale': {'dmnd': -1},
'totals': {'others': ['waste_mix'],
'total_dmnd': ['dmnd']
},
'data_threshold': 1e-9, 'aggfunc': np.sum, 'harmonize': False,
'series_order': series_order}
do = PlotPageData(db, ind_pltx, ind_plty, ind_axx, values, series,
table, **data_kw)
do.data = do.data.fillna(0).applymap(float)
# %
# delete data aggregated in others
do.data = do.data.loc[:, ~do.data.columns.isin(do.totals['others'])]
do.data = do.data.loc[:, ~do.data.columns.isin([c for c in do.data.columns if any(comp in c for comp in do.totals['total_dmnd'])])]
# %%
color=mps.get_color_dict(series[-1])
color.update({'other': '#99aaaa',
'others': '#99aaaa',
'other_negative': '#ffffff',
'other_ren': '#ffffff',
'DE_DMND': 'k',
'CH_DMND': 'k',
'co2_intensity': 'g',
'hydro_total': color['reservoir'],
'dmnd_flex': 'k',
'biomass': color['bio_all'],
'biogas': color['bio_all'],
'natural_gas_cc': color['natural_gas'],
'natural_gas_chp': color['natural_gas'],
'natural_gas_others': color['natural_gas'],
'natural_gas_turbines': color['natural_gas'],
'pumped_hydro_pumping': color['pumped_hydro'],
'load_prediction_d': color['dmnd']},)
color.update({sr[-1]: 'k' for sr in do.data.columns if not sr[-1] in color})
color['extra'] = 'k'
color['total_dmnd'] = color['dmnd']
# color = False
legend = (('stats_agora',), ('pwr',), 'StackedArea')
# plot level series level
plotkwargsdict = {'all': {('value_posneg', 'False', 'natural_gas'): dict(edgewidth=2, edgecolor='b')},
'all': {('', '', 'total_dmnd'): dict(linewidth=3, markersize=10, marker='.')},
}
# plotkwargsdict = {}
layout_kw = {'left': 0.1, 'right': 0.875, 'wspace': 0.2, 'hspace': 0.2, 'bottom': 0.1, 'top': 0.8}
label_kw = {'label_format': ' ,%.2f', 'label_subset':[-1], 'label_threshold':-1e-6,
'label_ha': 'right', 'loc_labels': 1}
plot_kw = dict(kind_def='StackedArea',
kind_dict={'total_dmnd': 'StepPlot'},
plotkwargsdict=plotkwargsdict,
stacked=True, on_values=False, sharex=True, sharey=True,
colormap=color, barwidth=0.1, linewidth=0, edgecolor=None,
edgewidth=0, marker=None,
reset_xticklabels=False,
legend=legend,#(('stats_agora',), ('pwr',), 'LinePlot'),
draw_now=True,
ylabel='Power [MW]', step='post', #ylim=dict(bottom=0),
)
from collections import OrderedDict
#
# new_data = np.abs(do.data.index.get_level_values('sy').values - 800) * 100
# do.data.loc['model', 'extra'] = new_data[:int(len(new_data) / 2)]
#with plt.style.context(('ggplot')):
self = PlotTiled(do, **layout_kw, **label_kw, **plot_kw)
self.add_legends(loc=1, ncol=3, string_replace_dict=OrderedDict({'(': '', '(value_posneg': '', 'False': '', 'True': '', ', ': '', '\'': '', ')': ''}))
# %%
| StarcoderdataPython |
5144881 | <gh_stars>1-10
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
def evaluate(b, dir):
gt = np.zeros([7, 168, 168])
for num in range(7):
gt[num] = np.asarray(Image.open(dir + '/train/' + str(num + 1) + '.bmp'))
ang = np.zeros([7, 2])
lvector = np.zeros([7, 3]) # the direction of light
for line in open(dir + '/train.txt'):
i, ang1, ang2 = line.strip().split(",")
i = int(i)
ang1 = int(ang1)
ang2 = int(ang2)
ang[i - 1] = (ang1, ang2)
lvector[i - 1] = (np.sin(np.pi * ang1 / 180) * np.cos(np.pi * ang2 / 180), np.sin(np.pi * ang2 / 180),
np.cos(np.pi * ang1 / 180) * np.cos(np.pi * ang2 / 180))
lvector = -lvector
img = np.clip(np.einsum('ij,jkl->ikl', lvector, b), 0, 255)
err = np.mean(np.mean(gt - img, axis=-1), axis=-1)
plt.figure()
for num in range(7):
plt.subplot(2, 7, 1 + num)
plt.imshow(gt[num], cmap='gray')
plt.title('Ang1={} Ang2={}\nMean Error={:.3f}'.format(ang[num][0], ang[num][1], err[num]))
plt.subplot(2, 7, 8 + num)
plt.imshow(img[num], cmap='gray')
plt.show()
return img
| StarcoderdataPython |
142802 | #!/usr/bin/env python3
#
# MIT License
#
# Copyright (c) 2020-2021 EntySec
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import re
import base64
import random
import string
##############################################################
# Lempel-Ziv-Stac decompression
# BitReader and RingList classes
#
# Copyright (C) 2011 <NAME> - FiloSottile
# filosottile.wiki gmail.com - www.pytux.it
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################
import collections
class BitReader:
def __init__(self, data_bytes):
self._bits = collections.deque()
for byte in data_bytes:
for n in range(8):
self._bits.append(bool((byte >> (7 - n)) & 1))
def getBit(self):
return self._bits.popleft()
def getBits(self, num):
res = 0
for i in range(num):
res += self.getBit() << num - 1 - i
return res
def getByte(self):
return self.getBits(8)
def __len__(self):
return len(self._bits)
class RingList:
def __init__(self, length):
self.__data__ = collections.deque()
self.__full__ = False
self.__max__ = length
def append(self, x):
if self.__full__:
self.__data__.popleft()
self.__data__.append(x)
if self.size() == self.__max__:
self.__full__ = True
def get(self):
return self.__data__
def size(self):
return len(self.__data__)
def maxsize(self):
return self.__max__
def __getitem__(self, n):
if n >= self.size():
return None
return self.__data__[n]
def LZSDecompress(data, window=RingList(2048)):
reader = BitReader(data)
result = ''
while True:
bit = reader.getBit()
if not bit:
char = reader.getByte()
result += chr(char)
window.append(char)
else:
bit = reader.getBit()
if bit:
offset = reader.getBits(7)
if offset == 0:
# EOF
break
else:
offset = reader.getBits(11)
lenField = reader.getBits(2)
if lenField < 3:
length = lenField + 2
else:
lenField <<= 2
lenField += reader.getBits(2)
if lenField < 15:
length = (lenField & 0x0f) + 5
else:
lenCounter = 0
lenField = reader.getBits(4)
while lenField == 15:
lenField = reader.getBits(4)
lenCounter += 1
length = 15 * lenCounter + 8 + lenField
for i in range(length):
char = window[-offset]
result += chr(char)
window.append(char)
return result, window
class StringTools:
@staticmethod
def extract_strings(binary_data):
strings = re.findall("[^\x00-\x1F\x7F-\xFF]{4,}", binary_data)
return strings
@staticmethod
def xor_string(string):
result = ""
for c in string:
result += chr(ord(c) ^ len(string))
return result
@staticmethod
def base64_string(string):
base64.b64encode(string.encode())
return string.decode()
@staticmethod
def random_string(length=16, alphabet=string.ascii_letters + string.digits):
return "".join(random.choice(alphabet) for _ in range(length))
@staticmethod
def lzs_decompress(data, window=RingList(2048)):
result, window = LZSDecompress(data, window)
return result
| StarcoderdataPython |
5174594 | # type: ignore
from .batchrequest import *
from .scraperequest import *
| StarcoderdataPython |
3258104 | """Utility, helps with gen3."""
import os
import urllib3
import requests
import sys
import json
from gen3.auth import Gen3Auth
from gen3.submission import Gen3Submission
from gen3_etl.utils.collections import grouper
import logging
import hashlib
import multiprocessing as mp
from requests.packages.urllib3.exceptions import InsecureRequestWarning
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
os.environ['CURL_CA_BUNDLE'] = ''
logger = logging.getLogger('utils.gen3')
DEFAULT_CREDENTIALS_PATH = os.path.join('config', 'credentials.json')
DEFAULT_HOST = 'localhost'
DEFAULT_ENDPOINT = 'https://{}'.format(DEFAULT_HOST)
def submission_client(endpoint=DEFAULT_ENDPOINT, refresh_file=DEFAULT_CREDENTIALS_PATH):
"""Create authorized client."""
auth = Gen3Auth(endpoint, refresh_file=refresh_file)
assert auth, 'should return an auth client'
submission_client = Gen3Submission(endpoint, auth)
assert submission_client, 'should return a submission client'
assert 'delete_program' in dir(submission_client), 'should have a delete_program method'
assert 'create_program' in dir(submission_client), 'should have a create_program method'
return submission_client
def create_node(submission_client, program_name, project_code, node):
"""Create node(s)."""
try:
nodes = node
if not isinstance(node, (list,)):
nodes = [node]
response_text = None
response = None
response = submission_client.submit_record(program_name, project_code, nodes)
# response = json.loads(response_text)
# logger.info(f"create_node: status_code:{response['code']}")
assert response['code'] == 200, 'could not create {} {}'.format(nodes[0]['type'], response_text)
logger.info('created {} {}(s)'.format(len(response['entities']), response['entities'][0]['type']))
return response
except Exception as e:
logger.error(f"create_node: error {e}")
logger.error(f"create_node: error {response_text} {nodes}")
if response:
for entity in response.get('entities', []):
for error in entity.get('errors', []):
logger.error('{} {} {}'.format(error['type'], entity['type'], entity))
for error in response.get('transactional_errors', []):
logger.error(' transactional_error {}'.format(error))
logger.error(json.dumps(response))
raise e
# if error['type'] == 'INVALID_LINK':
# print('WARNING INVALID_LINK {} {}'.format(entity['type'],entity), file=sys.stderr)
# else:
# print('ERROR {} {} {}'.format(error['type'], entity['type'],entity), file=sys.stderr)
# raise e
def delete_type(submission_client, program, project, batch_size, t):
response = submission_client.export_node(program, project, node_type=t, fileformat='json')
# # pool = mp.Pool(mp.cpu_count())
def collect_result(delete_response):
delete_response = delete_response.json()
assert delete_response['code'] == 200, delete_response
logger.info('deleted {} {}'.format(t, delete_response['message']))
if 'data' not in response or len(response['data']) == 0:
logger.warning(f'No {t} to delete {response}')
else:
for ids in grouper(batch_size, [n['node_id'] for n in response['data']]):
logger.info(f'deleting {len(ids)}')
ids = ','.join(ids)
collect_result(submission_client.delete_record(program, project, ids))
# # pool.apply_async(submission_client.delete_record, args=(program, project, ids), callback=collect_result)
# Close Pool and let all the processes complete
# postpones the execution of next line of code until all processes in the queue are done
# # pool.close()
# # pool.join()
def delete_all(submission_client, program, project, batch_size=200, types=['submitted_methylation', 'aliquot', 'sample', 'demographic', 'case', 'experiment']):
"""Delete all nodes in types hierarchy."""
for t in types:
print('{}-{}.{}'.format(program, project, t))
try:
delete_type(submission_client, program, project, batch_size, t)
except Exception as e:
print(e)
def create_experiment(submission_client, program, project, submitter_id):
"""Create experiment."""
experiment = {
'*projects': {'code': project},
'*submitter_id': submitter_id,
'type': 'experiment'
}
return create_node(submission_client, program, project, experiment)
# https://github.com/uc-cdis/gdcdatamodel/blob/develop/gdcdatamodel/models/__init__.py#L163
def get_class_tablename_from_id(_id):
return 'node_{}'.format(_id.replace('_', ''))
# https://github.com/uc-cdis/gdcdatamodel/blob/develop/gdcdatamodel/models/__init__.py#L370
def generate_edge_tablename(src_label, label, dst_label):
"""Generate a name for the edge table.
Because of the limit on table name length on PostgreSQL, we have
to truncate some of the longer names. To do this we concatenate
the first 2 characters of each word in each of the input arguments
up to 10 characters (per argument). However, this strategy would
very likely lead to collisions in naming. Therefore, we take the
first 8 characters of a hash of the full, un-truncated name
*before* we truncate and prepend this to the truncation. This
gets us a name like ``edge_721d393f_LaLeSeqDaFrLaLeSeBu``. This
is rather an undesirable workaround. - jsm
"""
tablename = 'edge_{}{}{}'.format(
src_label.replace('_', ''),
label.replace('_', ''),
dst_label.replace('_', ''),
)
# If the name is too long, prepend it with the first 8 hex of it's hash
# truncate the each part of the name
if len(tablename) > 40:
oldname = tablename
logger.debug('Edge tablename {} too long, shortening'.format(oldname))
tablename = 'edge_{}_{}'.format(
str(hashlib.md5(tablename.encode('utf-8')).hexdigest())[:8],
"{}{}{}".format(
''.join([a[:2] for a in src_label.split('_')])[:10],
''.join([a[:2] for a in label.split('_')])[:7],
''.join([a[:2] for a in dst_label.split('_')])[:10],
)
)
logger.debug('Shortening {} -> {}'.format(oldname, tablename))
return tablename
| StarcoderdataPython |
5070642 | <reponame>huent189/crnn
from __future__ import print_function
from __future__ import division
import numpy as np
import tensorflow as tf
import codecs
def testCustomOp(feedMat, corpus, chars, wordChars):
"decode using word beam search. Result is tuple, first entry is label string, second entry is char string."
# TF session
sess=tf.Session()
sess.run(tf.global_variables_initializer())
# load custom TF op
word_beam_search_module = tf.load_op_library('../cpp/proj/TFWordBeamSearch.so')
# input with shape TxBxC
mat=tf.placeholder(tf.float32, shape=feedMat.shape)
# decode using the "Words" mode of word beam search with beam width set to 25 and add-k smoothing to 0.0
assert(len(chars)+1==mat.shape[2])
decode=word_beam_search_module.word_beam_search(mat, 25, 'Words', 0.0, corpus.encode('utf8'), chars.encode('utf8'), wordChars.encode('utf8'))
# feed matrix of shape TxBxC and evaluate TF graph
res=sess.run(decode, { mat:feedMat })
# result is string of labels terminated by blank (similar to C-strings) if shorter than T
blank=len(chars)
s=''
for label in res[0]:
if label==blank:
break
s+=chars[label] # map label to char
return (res[0], s)
def loadMat(fn):
"load matrix from csv and apply softmax"
mat=np.genfromtxt(fn, delimiter=';')[:,:-1] #load matrix from file
maxT,_=mat.shape # dim0=t, dim1=c
# apply softmax
res=np.zeros(mat.shape)
for t in range(maxT):
y=mat[t,:]
e=np.exp(y)
s=np.sum(e)
res[t,:]=e/s
# expand to TxBxC
return np.expand_dims(res,1)
def testMiniExample():
"mini example, just to check that everything is working"
corpus='a ba' # two words "a" and "ba", separated by whitespace
chars='ab ' # the first three characters which occur in the matrix (in this ordering)
wordChars='ab' # whitespace not included which serves as word-separating character
mat=np.array([[[0.9, 0.1, 0.0, 0.0]],[[0.0, 0.0, 0.0, 1.0]],[[0.6, 0.4, 0.0, 0.0]]]) # 3 time-steps and 4 characters per time time ("a", "b", " ", blank)
res=testCustomOp(mat, corpus, chars, wordChars)
print('')
print('Mini example:')
print('Label string: ',res[0])
print('Char string:', '"'+res[1]+'"')
def testRealExample():
"real example using a sample from a HTR dataset"
dataPath='../data/bentham/'
corpus=codecs.open(dataPath+'corpus.txt', 'r', 'utf8').read()
chars=codecs.open(dataPath+'chars.txt', 'r', 'utf8').read()
wordChars=codecs.open(dataPath+'wordChars.txt', 'r', 'utf8').read()
mat=loadMat(dataPath+'mat_2.csv')
res=testCustomOp(mat, corpus, chars, wordChars)
print('')
print('Real example:')
print('Label string: ',res[0])
print('Char string:', '"'+res[1]+'"')
if __name__=='__main__':
# test custom op
testMiniExample()
testRealExample()
| StarcoderdataPython |
129670 | import EmailParser.pst_parser
"""
if __name__ == "__main__":
pass
else:
from EmailBoxClass import EmailBox
EmailBox.main = EmailParser.pst_parser.main
"""
| StarcoderdataPython |
313936 | '''
'''
############################################################################
from optparse import OptionParser
import sys
import re
import numpy as np
import os
import sys
import gzip
from subprocess import check_call
def parse_options():
parser = OptionParser()
parser.add_option("-f", "--compressed_bin_file", dest="compressed_bin_file",
help="Compressed bin file", metavar="BFILE")
parser.add_option("-c", "--chromosome", dest="chromosome",
help="Chromosome of compressed bin file", metavar="CHR")
parser.add_option("-s", "--synteny_file", dest="synteny_file",
help="Synteny file", metavar="SFILE")
parser.add_option("-o", "--out_folder", dest="out_folder",
help="Folder output files", metavar="OUTFOLDER")
parser.add_option("-r", "--ref_species", dest="ref_species",
help="Reference species designation within synteny file", metavar="REF")
parser.add_option("-t", "--target_species", dest="target_species",
help="Target species (matches compressed bin data) designation within synteny file",
metavar="TARGET")
parser.add_option("-w", "--width", dest="width",
help="Optional: width in bins of compressed bin data", default=3500,
metavar="TARGET")
parser.add_option("-b", "--bin_size", dest="bin_size",
help="Optional: size of bins in bp", default=500,
metavar="BINSIZE")
(options, args) = parser.parse_args()
return options
def parse_synteny_file(file, ref_sp, target_sp):
target = []
ref = []
with open(file, 'r') as f:
for line in f:
line = line.rstrip()
if (len(line) == 0):
pass
elif (line[0] == '#'):
pass
elif(line[0] == '>'):
pass
else:
(species, remain) = line.split('.')
(remain, strand) = remain.split(' ')
(chr_, remain) = remain.split(':')
(loc1, loc2) = remain.split('-')
chr_ = re.sub('chr', '', chr_)
if (species == ref_sp):
ref.append(chr_ + '-' + loc1 + '-' + loc2)
elif(species == target_sp):
target.append([chr_, loc1, loc2, strand])
return ref, target
def read_compressed_bin(file, width):
init_max_size = int(1e6)
bin_counts = np.zeros(init_max_size)
bin_bin_counts = np.zeros((init_max_size, init_max_size))
max_bin = 0
if (file[-2:] == 'gz'):
f = gzip.open(file, 'rt')
else:
f = open(file, 'r')
for line in f:
line = line.rstrip()
if (line[0] == '#'):
chr_, bin_, count = line[1:].split('\t')
bin_ = int(bin_)
count = float(count)
bin_counts[bin_] = count
if (bin_ > max_bin):
max_bin = bin_
else:
chr_, bin1, bin2, count = line.split('\t')
bin1 = int(bin1)
bin2 = int(bin2)
count = float(count)
bin_bin_counts[bin1][bin2] = count
f.close()
print('Done reading' + file)
return bin_counts, bin_bin_counts, max_bin
options = parse_options()
compressed_bin_file = options.compressed_bin_file
synteny_file = options.synteny_file
out_folder = options.out_folder
ref_species = options.ref_species
target_species = options.target_species
width = int(options.width)
chromosome = options.chromosome
bin_size = int(options.bin_size)
synteny_ref, synteny_target = parse_synteny_file(synteny_file, ref_species, target_species)
bin_counts, bin_bin_counts, max_bin = read_compressed_bin(compressed_bin_file, width)
for i in range(0, len(synteny_ref)):
ref_name = synteny_ref[i]
chr_, loc1, loc2, strand = synteny_target[i]
if (chr_ == chromosome):
bin1 = int(int(loc1) / bin_size)
bin2 = int(int(loc2) / bin_size)
outfile = os.path.join(out_folder, ref_name + '.txt')
with open(outfile, 'w') as out:
sub_bin_counts = bin_counts[bin1:bin2]
sub_bin_bin_counts = bin_bin_counts[bin1:bin2, bin1:bin2]
# Flip if negative strand
if (strand == '-'):
sub_bin_counts = sub_bin_counts[::-1]
sub_bin_bin_counts = sub_bin_bin_counts[::-1, ::-1]
for j in range(0, len(sub_bin_counts)):
count = sub_bin_counts[j]
if (count > 0):
out.write('#' + chromosome + '\t' + str(j) + '\t' + str(count) + '\n')
for j in range(0, len(sub_bin_counts)):
left = np.max((0, j - width))
right = np.min((j + width, len(sub_bin_counts)))
for k in range(left, right):
count = sub_bin_bin_counts[j,k]
if (count > 0):
out.write(chromosome + '\t' + str(j) + '\t' + str(k) + '\t'+ str(count) + '\n')
check_call(['gzip', outfile])
| StarcoderdataPython |
1863458 | <reponame>EtcAug10/Domaineer
#!/usr/bin/env python3
"""
Copyright (C) 2021 Semi-Auto bot tool
made by c0del1ar a.k.a <NAME> and it is licensed
"""
class Color:
gray = "\033[30;1m"
red = "\033[31;1m"
green = "\033[32;1m"
yellow = "\033[33;1m"
blue = "\033[34;1m"
pink = "\033[35;1m"
cyan = "\033[36;1m"
white = "\033[37;1m"
default = "\033[37;0m"
_version_ = "1.3.6.4"
_author_ = "c0del1ar"
_name_ = "Domaineer"
banner = ["""
{}##
###
## {}##### {}Domaineer
## {}# ### {}by c0del1ar |
## {}# ##### {}Version {}{} |
### {}##{} |
####### {}##{} This FREE tool is |
{}##{} Licensed |
{}############{} ________/_____/___/__/{}
""".format(Color.green,Color.gray,Color.green,Color.gray,Color.green,Color.gray,Color.white,_version_,Color.green,Color.gray,Color.green,Color.gray,Color.green,Color.gray,Color.green,Color.gray,Color.green,Color.default),
"""
{}###########
#############
### ###
### ### {}Domain Engineer
{}### {}##{} ## {}by c0del1ar
## {}### {}## {}#
{}## {}### {}## {}##
{}## {}## {}## {}### {}v{}
{}## ## {}###
{}## {}######### {}#
{}#################### {}##
#########################{}
""".format(Color.gray,Color.green,Color.gray,Color.green,Color.gray,Color.green,Color.gray,Color.green,Color.gray,Color.green,Color.gray,Color.green,Color.gray,Color.green,Color.gray,Color.green,Color.gray,Color.white,_version_,Color.green,Color.gray,Color.green,Color.gray,Color.green,Color.gray,Color.green,Color.default)]
prefix = "\u2699"
about = ["Domain Engineer or called as Domaineer is a tool to extract or dump any datas of domains in hole net lines.\n\nWhen you use this tool, It means you are accepting all of the Terms and Conditions in Ethical Hacker's guide book. Hope you've read it.","This bot helps you in doing penetration testing, learning the ins and outs of domains, analyzing objects, even doing stupid things like hacking your own domain and showing it off in front of your friends.","A wide variety of hacking tools are here, use them with great care. Don't worry, there is no backdoor logger here. The bot will continue to be updated if bugs and changes are found to improve the quality of the bot and update exploits"]
tool_choices = ["Grab Sites","Reverse IP","CMS Scanner","Google SE","DFuzzer","Domain to IP"]
| StarcoderdataPython |
9710299 | from numpy import matrix
from numpy import shape
from numpy import transpose
from laff.matmat.trsm_lnu import trsm_lnu
from laff.matmat.trsm_utn import trsm_utn
from laff.matmat.trsm_ltu import trsm_ltu
from laff.matmat.trsm_unn import trsm_unn
import sys
def trsm(uplo, trans, diag, A, B ):
"""
Solve A X = B or trans( A X ) = trans( B ), overwriting B with X
Parameter uplo indicates whether to use the lower triangular or
upper triangular part of A:
if uplo == 'Lower triangular':
A is lower triangular
elif upl == 'Upper triangular':
A is upper trianglar
Parameter trans indicates whether to transpose A:
if trans == 'No transpose':
solve A X = B
elif trans == 'Transpose':
solve trans( A X ) = trans( B )
Parameter diag indicates whether A has an (implicit) unit diagonal:
if diag == 'Unit diagonal':
A has an implicit unit diagonal
elif diag == 'Nonunit diagonal':
Use the entries on the diagonal of A
"""
"""
Check parameters
"""
assert (uplo == 'Lower triangular' or uplo == 'Upper triangular'), "laff.trsv: illegal value for uplo"
assert (trans == 'No transpose' or trans == 'Transpose'), "laff.trsv: illegal value for trans"
assert (diag == 'Nonunit diagonal' or diag == 'Unit diagonal'), "laff.trsv: illegal value for diag"
assert type(A) is matrix and len(A.shape) is 2, \
"laff.trsv: matrix A must be a 2D numpy.matrix"
assert type(B) is matrix and len(B.shape) is 2, \
"laff.trsvv: matrix B must be a 2D numpy.matrix"
"""
Extract sizes
"""
m_A, n_A = A.shape
m_B, n_B = B.shape
if 'Lower triangular' == uplo:
if 'No transpose' == trans:
if 'Unit diagonal' == diag:
trsm_lnu( A, B )
else:
print( "laff.trsm: diag == Nonunit diagonal not yet implemented for Lower triangular" )
sys.exit( 0 )
else:
if 'Unit diagonal' == diag:
trsm_ltu( A, B )
else:
print( "laff.trsm: trans == Transpose not yet implemented for Lower triangular, nonunit diagonal" )
sys.exit( 0 )
else: #'Upper triangular' == uplo
if 'No transpose' == trans:
if 'Unit diagonal' == diag:
print( "laff.trsm: trans == No transpose not yet implemented for Upper triangular, unit diagonal" )
sys.exit( 0 )
else:
trsm_unn( A, B )
else:
if 'Unit diagonal' == diag:
print( "laff.trsm: diag == Unit diagonal not yet implemented for Upper triangular" )
sys.exit( 0 )
else:
trsm_utn( A, B )
| StarcoderdataPython |
3256302 | <gh_stars>0
#!/usr/bin/env python3
#coding=utf-8
class AscertainmentBias(object):
"""Mixin to test for Ascertainment Bias"""
def test_ascertainment_character(self):
sequences = self.xml.findall('./data/sequence')
p = './/distribution[@id="likelihood"]/distribution/data/data'
for part in self.xml.findall(p):
try:
site = int(part.get('filter').split("-")[0])
except:
print("Invalid filter %r for %s" % (part.get('filter'), part.get('id')))
raise
for seq in sequences:
site_zero = seq.get('value')[0]
if site_zero not in ('0', '?', '-'):
raise AssertionError(
"Expected site zero to be 0/?/- for ascertainment"
)
def test_treeLikelihood_corrects_for_ascertainment(self):
p = './/distribution[@id="likelihood"]/distribution/data'
for data in self.xml.findall(p):
assert data.get('ascertained') == 'true'
def test_treeLikelihood_has_exclude_set_correctly(self):
p = './/distribution[@id="likelihood"]/distribution/data'
for data in self.xml.findall(p):
# not inclusive
assert data.get('excludeto') == '1', "%s does not have exclude set to 1" % data.get('id')
| StarcoderdataPython |
154193 | <reponame>8Banana/dotfiles
import os
import pathlib
import shutil
import stat
import sys
import time
import types
from enum import Enum, auto
import importlib.util
import socket
import json
from multicomputering import Packer
class WorkerStates(Enum):
Listening = auto()
Connecting = auto()
PreparingWork = auto()
Working = auto()
class ComputerWorker:
_GUID_SEP = '0x27177c1797dc03ee853922f411bdf83f55e9ed2dcd953a4369f9b1a454e60fa0'.encode('utf-8')
def __init__(self, sock):
self.state = WorkerStates.Listening
self.sock = sock
self.workspace = {}
self._loc = None
self._packages_loc = None
self.results = {}
def ready_filesys(self, loc):
self._loc = os.path.dirname(os.path.abspath(__file__))
self._packages_loc = os.path.join(
self._loc, '..', '.multicomputering_packages_' + str(id(self)))
sys.path.append(loc or self._packages_loc)
pathlib.Path(self._packages_loc).mkdir(parents=True, exist_ok=True)
def clean_up_filesys(self):
shutil.rmtree(self._packages_loc, onerror=self.remove_readonly)
def start(self, loc=None):
self.ready_filesys(loc)
print('doing task :)')
self.recv_init()
self.recv_code()
self.recv_data()
self.wait_for_pulse()
self.run()
print('Done!')
self.clean_up_filesys()
self.disconnect()
def recv_init(self):
pass
def recv_code(self, *args):
data = self.recv()
data = json.loads(data.decode('utf-8'))
for module_name, contents in data.items():
Packer.write_package(self._packages_loc, module_name, contents)
self.workspace[module_name] = contents
self.pulse()
def recv_data(self, *args):
data = self.recv()
data = json.loads(data.decode('utf-8'))
Packer.write_data(
self._packages_loc, '_remote_data', data)
self.pulse()
def reflect(self):
pass
def clear_callables(self):
pass
def run(self):
py_obj = importlib.util.spec_from_file_location(
'main', os.path.join(
self._packages_loc, 'main' + '.py'))
module = importlib.util.module_from_spec(py_obj)
py_obj.loader.exec_module(module)
result = module.main()
self.send(result.encode('utf-8'))
def disconnect(self):
self.sock.close()
raise SystemExit(0)
def send(self, *args, **kwargs):
self.sock.sendall(*args, **kwargs)
def recv(self):
buffer = bytearray()
while True:
buffer += self.sock.recv(4096)
if len(buffer) >= 64:
bytes_target = int.from_bytes(buffer[:64], 'big')
buffer = buffer[64:]
break
while len(buffer) != bytes_target:
buffer += self.sock.recv(4096)
return buffer
def pulse(self):
self.send(b'wololoo')
def wait_for_pulse(self):
p = self.recv()
assert(p == b'wololoo')
@staticmethod
def remove_readonly(func, path, _):
print(path)
"Clear the readonly bit and reattempt the removal"
os.chmod(path, stat.S_IWRITE)
func(path)
def handler(sock, addr):
pc = ComputerWorker(sock)
try:
pc.start()
except KeyboardInterrupt as e:
pc.clean_up_filesys()
raise e
def main():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('localhost', int(sys.argv[1])))
sock.listen(5)
while True:
(clientsock, address) = sock.accept()
print("Got client!", address)
handler(clientsock, address)
if __name__ == '__main__':
main()
| StarcoderdataPython |
8003185 | from pymoo.factory import get_problem, get_reference_directions, get_visualization
from pymoo.util.plotting import plot
x = [0.040971105531507235,0.550373235584878,0.6817311625009819,0.6274478938025135,0.9234111071427142,0.02499901960750534,0.136171616578574,0.9084459589232222,0.21089363254881652,0.08574450529306678,0.20551052286248087,0.43442188671029464]
n_var = len(x)
n_obj = 3
problems = ["dtlz1", "dtlz2", "dtlz3", "dtlz4", "dtlz5", "dtlz6", "dtlz7"]
for problem in problems:
p = get_problem(problem, n_var, n_obj)
r = p.evaluate(x)
print(r)
# PYMOO
# [ 9.86928923 8.06270417 419.74214702]
# [1.35981478 1.59402999 0.13503034]
# [566.92680262 664.57456923 56.29613817]
# [2.09958592e+000 3.83693240e-026 5.83474399e-139]
# [1.41890847 1.54166358 0.13503034]
# [6.42107287 7.40537588 0.63167125]
# [ 0.04097111 0.55037324 17.32742807]
# GOLANG
# [9.869289225575503 8.062704169938133 419.74214702336616]
# [1.3598147826944689 1.5940299863405385 0.13503034348631712]
# [566.9268026207471 664.5745692269643 56.296138168016384]
# [2.0995859197111355 3.8369323955770535e-26 5.834743988703213e-139]
# [1.4189084710787399 1.5416635791534465 0.13503034348631712]
# [6.421072865759349 7.405375882273675 0.6316712455606305]
# [0.040971105531507235 0.550373235584878 17.32742807181844]
| StarcoderdataPython |
9704506 | # Convert a Rogue Python file into a CPSW YAML file
import os
from collections import OrderedDict
import yaml
import pyrogue as pr
from version import CPSW_YAML_SCHEMA_VERSION
class YamlConverter:
"""
Convert a rogue Python device object into CPSW YAML, and write the YAML into a file.
"""
# Default values as required by the CPSW YAML specs
MMIO_DEVICE_CLASS = "MMIODev"
CONFIG_PRIO_VALUE = 1
ROOT_DEVICE_SIZE = 8
SEQUENCE_COMMAND_OFFSET = 0
CHILD_DEVICE_CLASS = "IntField"
SEQUENCE_COMMAND_CLASS = "SequenceCommand"
CHILD_DEVICE_BYTE_ORDER = "BE"
def __init__(self, pyrogue_device):
"""
Initialize the Converter.
Parameters
----------
pyrogue_device : Device
The Rogue device object, from which its CPSW YAML representation is to be formed.
"""
self._pyrogue_device = pyrogue_device
self._serialized_data = OrderedDict()
def convert(self, export_filename, export_dirname="output"):
"""
Perform the conversion, i.e. dumping the Rogue device object's data into a YAML-formatted file.
The output file will be saved into the output/ directory, which is under the working directory.
Parameters
----------
export_filename : str
The name of the output file
export_dirname : str
The name of the output directory
"""
self._serialize_rogue_data()
self._export_to_yaml(export_filename, export_dirname)
def _serialize_rogue_data(self):
"""
Serialize the Rogue device object.
"""
self._serialized_data = OrderedDict()
if hasattr(self._pyrogue_device, "name"):
name = self._pyrogue_device.name
self._serialized_data[name] = ''.join(['&', name])
self._serialized_data["__root__"] = OrderedDict()
if hasattr(self._pyrogue_device, "description"):
self._serialized_data["__root__"]["description"] = self._pyrogue_device.description
self._serialized_data["__root__"]["configPrio"] = YamlConverter.CONFIG_PRIO_VALUE
self._serialized_data["__root__"]["class"] = YamlConverter.MMIO_DEVICE_CLASS
self._serialized_data["__root__"]["size"] = hex(YamlConverter.ROOT_DEVICE_SIZE)
replica_count = 0
if hasattr(self._pyrogue_device, "_numBuffers"):
replica_count = self._pyrogue_device._numBuffers
if replica_count:
self._serialized_data["__root__"]["metadata"] = OrderedDict()
self._serialized_data["__root__"]["metadata"]["numBuffers"] = ' '.join(['&numBuffers', str(replica_count)])
self._serialized_data["__root__"]["children"] = OrderedDict()
self._serialize_children(self._pyrogue_device, replica_count)
def _serialize_children(self, device, replica_count):
"""
Serialize the Rogue device's children, i.e. remote variables and commands. Potentially recursive (if needed)
if a remote variable can contain children.
Parameters
----------
device : pr.Device
A Rogue device whose children are to be serialized
"""
# Serialize Remote Variables
if hasattr(device, "getNodes"):
remote_variables = device.getNodes(pr.RemoteVariable)
self._serialize_remote_variables(remote_variables, replica_count)
# Serialize devices
if hasattr(device, "devices"):
devices = device.devices
self._serialize_devices(devices)
# Serialize Commands
if hasattr(device, "commands"):
commands = device.commands
self._serialize_commands(commands)
def _serialize_remote_variables(self, remote_variables, replica_count):
"""
Serialize just the remote variables.
Parameters
----------
remote_variables : OrderedDict
An ordered dictionary of remote variables for a Rogue device.
"""
if remote_variables and len(remote_variables):
for key, remote_var in remote_variables.items():
remote_var_name = remote_var.name
search_index = remote_var_name.find('[')
if search_index >= 0:
remote_var_name = remote_var_name[0:search_index]
if key[search_index:search_index + 3] != "[0]" and not replica_count:
current_node_count = child_data[remote_var_name].get("nelms", None)
if current_node_count is None:
self._serialized_data["__root__"]["children"][remote_var_name]["at"]["nelms"] = 2
else:
self._serialized_data["__root__"]["children"][remote_var_name]["at"]["nelms"] = \
current_node_count + 1
continue
child_data = OrderedDict()
child_data['#'] = '#' * 20
child_data[remote_var_name] = OrderedDict()
child_data[remote_var_name]["at"] = OrderedDict()
child_data[remote_var_name]["at"]["offset"] = hex(remote_var.offset)
if replica_count:
child_data[remote_var_name]["at"]["nelms"] = "*numBuffers"
child_data[remote_var_name]["at"]["byteOrder"] = YamlConverter.CHILD_DEVICE_BYTE_ORDER
child_data[remote_var_name]["description"] = remote_var.description
child_data[remote_var_name]["class"] = YamlConverter.CHILD_DEVICE_CLASS
child_data[remote_var_name]["sizeBits"] = remote_var.varBytes * 8
# Must adjust so that the value falls within the CPSW range -- [0..7]
ls_bit = remote_var.bitOffset[-1] if remote_var.bitOffset[-1] < 8 else remote_var.bitOffset[-1] % 8
if ls_bit:
# Since the default value is 0, only output if the ls_bit value is larger than 0
child_data[remote_var_name]["lsBit"] = ls_bit
child_data[remote_var_name]["mode"] = remote_var.mode
child_data[remote_var_name]['##'] = '#' * 20
self._serialized_data["__root__"]["children"].update(child_data)
def _serialize_devices(self, devices):
"""
Serialize the child devices.
Parameters
----------
devices : OrderedDict
The child device record.
"""
if devices and len(devices):
for key, v in devices.items():
device_name = key
search_index = device_name.find('[')
if search_index >= 0:
device_name = device_name[0:search_index]
if key[search_index:search_index + 3] != "[0]":
# Do not output duplicate remote var names with different subscripts
current_node_count = child_data[device_name].get("nelms", None)
if current_node_count is None:
self._serialized_data["__root__"]["children"][device_name]["at"]["nelms"] = 2
else:
self._serialized_data["__root__"]["children"][device_name]["at"]["nelms"] = \
current_node_count + 1
continue
child_data = OrderedDict()
child_data['#'] = '#' * 20
child_data[device_name] = OrderedDict()
child_data[device_name]["<<"] = ''.join(['*', device_name])
child_data[device_name]["at"] = OrderedDict()
if hasattr(devices[key], "offset"):
child_data[device_name]["at"]["offset"] = hex(devices[key].offset)
child_data[device_name]['##'] = '#' * 20
self._serialized_data["__root__"]["children"].update(child_data)
def _serialize_commands(self, commands):
"""
Serialize just the commands.
Parameters
----------
commands : OrderedDict
An ordered dictionary of commands for a Rogue device.
"""
if commands and len(commands):
for _, command in commands.items():
command_name = command.name
child_data = OrderedDict()
child_data['#'] = '#' * 20
child_data[command_name] = OrderedDict()
child_data[command_name]["at"] = OrderedDict()
child_data[command_name]["at"]["offset"] = hex(command.offset) if hasattr(command, "offset") else \
hex(YamlConverter.SEQUENCE_COMMAND_OFFSET)
child_data[command_name]["name"] = command_name
child_data[command_name]["description"] = command.description
child_data[command_name]["class"] = YamlConverter.SEQUENCE_COMMAND_CLASS
child_data[command_name]['##'] = '#' * 20
self._serialized_data["__root__"]["children"].update(child_data)
def _export_to_yaml(self, filename, dirname="output"):
"""
Post-process the YAML contents before writing into the final output file.
This is accomplished by first writing into a temporary file, then read the temporary file and process certain
lines, i.e. adding headers, then write to the official output line. Finally, automatically delete the temporary
line.
Parameters
----------
filename : str
The user-provided output data file name.
dirname : str
The name of the output directory
"""
with open(os.path.join(dirname, '.'.join([filename, "tmp"])), 'w') as temp_yaml_file:
contents = YamlConverter.ordered_dump(self._serialized_data, dumper=yaml.SafeDumper,
default_flow_style=False)
contents = contents.replace("__root__:\n", "")
temp_yaml_file.write(contents)
with open(os.path.join(dirname, filename), 'w') as yaml_file:
YamlConverter._insert_heading(yaml_file, filename)
with open(os.path.join(dirname, '.'.join([filename, "tmp"])), 'r') as temp_yaml_file:
lines = temp_yaml_file.readlines()
for line in lines:
line = YamlConverter._post_process_line(line)
yaml_file.write(line)
os.remove(os.path.join(dirname, '.'.join([filename, "tmp"])))
@staticmethod
def _post_process_line(line):
"""
Decorate an output line.
Parameters
----------
line : str
An output line to be decorated.
Returns : str
-------
The decorated output line
"""
# Add headers to separate sections in the YAML file
if any(keyword in line for keyword in ("children:", "\'#\'", "\'##\'")):
space_count = len(line) - len(line.lstrip())
if "children:" in line:
markings = ''.join([' ' * space_count, '#' * 10, '\n'])
line = markings + line + markings
elif "\'#\'" in line:
line = ''.join([' ' * space_count, '#' * 80, '\n'])
elif "\'##'" in line:
space_count -= 2
line = ''.join([' ' * space_count, '#' * 80, '\n'])
elif "\'" in line:
# Remove single quotes surrounding strings as they're not in the CPSW YAML specs
line = line.replace("\'", '')
return line
@staticmethod
def _insert_heading(yaml_file, filename):
yaml_file.write("##############################################################################\n")
yaml_file.write("## This file is part of 'SLAC Firmware Standard Library'.\n")
yaml_file.write("## It is subject to the license terms in the LICENSE.txt file found in the \n")
yaml_file.write("## top-level directory of this distribution and at: \n")
yaml_file.write("## https://confluence.slac.stanford.edu/display/ppareg/LICENSE.html. \n")
yaml_file.write("## No part of 'SLAC Firmware Standard Library', including this file, \n")
yaml_file.write("## may be copied, modified, propagated, or distributed except according to \n")
yaml_file.write("## the terms contained in the LICENSE.txt file. \n")
yaml_file.write("############################################################################## \n")
yaml_file.write(' '.join(["#schemaversion", CPSW_YAML_SCHEMA_VERSION, '\n']))
yaml_file.write(' '.join(["#once", filename, '\n\n\n']))
@staticmethod
def ordered_dump(data, stream=None, dumper=yaml.Dumper, **kwds):
"""
Overriding PyYAML generation to support OrderedDict.
Reference: https://stackoverflow.com/a/21912744
Parameters
----------
data : OrderedDict
The ordered data structure
stream : stream
In this context, the file to dump the YAML contents
dumper : yaml.Dumper
The YAML dumping object
kwds : args
Additional arguments as supported by PyYAML.
Returns : str
-------
A string containing the entire YAML contents.
"""
class OrderedDumper(dumper):
pass
def _dict_representer(dumper, data):
return dumper.represent_mapping(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
data.items())
OrderedDumper.add_representer(OrderedDict, _dict_representer)
return yaml.dump(data, stream, OrderedDumper, **kwds)
| StarcoderdataPython |
312717 | #!/usr/bin/env python
import re
import sys
from EPPs.common import GenerateHamiltonInputEPP, InvalidStepError
class GenerateHamiltonInputSeqQuantPlate(GenerateHamiltonInputEPP):
""""Generate a CSV containing the necessary information for preparing the spectramax picogreen plate. The standards locaiton is not stored in the LIMS and will be hard-coded into the
Hamilton method"""
_use_load_config = False # prevent the loading of the config
# Define the column headers that will be used in the Hamilton input file
csv_column_headers = ['Sample ID', 'Source Plate BC', 'Source Plate Position', 'Sample Volume (ul)',
'Destination Plate BC', 'Destination Plate Position', 'Master Mix Volume (ul)']
# Define the output file
output_file_name = 'SEQ_PLATE_QUANT.csv'
# Define the number of input containers that are permitted
_max_nb_input_containers = 1
# Define the number of output containers that are permitted
_max_nb_output_containers = 1
# the step requires 3 output replicates per input
_nb_resfiles_per_input = 3
def _generate_csv_dict(self):
# build a dictionary of the csv lines with the output well as the key so can be populated into the output file in the best order for straightforward import into the Hamilton method
# in a pattern for most efficient pipetting i.e. columns then rows
csv_dict = {}
# find the corresponding lot number i.e. barcode for the SDNA plate.
sdna_template = "LP[0-9]{7}-SDNA"
sdna_barcode = ""
reagent_lots = list(self.process.step.reagent_lots)
for lot in reagent_lots:
if re.match(sdna_template, lot.lot_number):
sdna_barcode = lot.lot_number
if not sdna_barcode:
raise InvalidStepError(
'SDNA Plate lot not selected. Please select in "Reagent Lot Tracking" at top of step.')
# find all the inputs for the step that are analytes (i.e. samples and not associated files)
for art in self.artifacts:
outputs = self.process.outputs_per_input(art.id, ResultFile=True)
# obtain input and output plate names (barcode) for use in loop below
output_plate_name = outputs[0].location[0].name
# input container and location are not stored in the LIMS for the SDNA standards but can be extrapolated from metadata in
# the reagent record.
if art.name.split(" ")[0] == 'SDNA':
input_plate_name = sdna_barcode
input_location = art.name.split(" ")[2]
else:
input_plate_name = art.location[0].name
# remove colon from input location as this is not compatible with Hamilton Venus software
input_location = art.location[1].replace(':', '')
for output in outputs:
# create the csv line with key based on output location that can be sorted by column then row
csv_dict[output.location[1]] = [output.name, input_plate_name, input_location,
self.process.udf['Sample Volume (ul)'], output_plate_name,
output.location[1].replace(':', ''),
self.process.udf['Master Mix Volume (ul)']]
return csv_dict
if __name__ == '__main__':
sys.exit(GenerateHamiltonInputSeqQuantPlate().run())
| StarcoderdataPython |
8175027 | <gh_stars>0
from rasa_core.agent import Agent
from rasa_core.interpreter import RasaNLUInterpreter
interpreter = RasaNLUInterpreter('models/current/nlu')
messages = ["Hi! you can chat in this window. Type 'stop' to end the conversation."]
agent = Agent.load('models/current/dialogue', interpreter=interpreter)
def respond_to_messages(message):
responses = agent.handle_message(message)
for r in responses:
messages.append(r.get("text"))
return responses
| StarcoderdataPython |
4885749 | # -*- coding: utf-8 -*-
import re
import sys
import unittest
from io import StringIO
from iktomi.cli.sqla import Sqla, drop_everything
from sqlalchemy import (
create_engine, orm, MetaData, Column, Integer, ForeignKey,
)
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.dialects.mysql import MEDIUMTEXT
try:
from unittest import mock
except ImportError:
import mock
__all__ = ['SqlaTests']
class SqlaTests(unittest.TestCase):
def test_drop_everything(self):
# Prepare.
# Non-trivial case with circular foreign key constraints.
# SQLite doesn't support dropping constraint by name and creation of
# custom types, so these cases are not covered by the test.
Base = declarative_base()
class A(Base):
__tablename__ = 'A'
id = Column(Integer, primary_key=True)
b_id = Column(ForeignKey('B.id', use_alter=True))
class B(Base):
__tablename__ = 'B'
id = Column(Integer, primary_key=True)
a_id = Column(ForeignKey(A.id))
engine = create_engine('sqlite://')
Base.metadata.create_all(bind=engine)
self.assertTrue(engine.has_table('A'))
self.assertTrue(engine.has_table('B'))
# Actual test
drop_everything(engine)
self.assertFalse(engine.has_table('A'))
self.assertFalse(engine.has_table('B'))
def test_specific_dialect(self):
Base = declarative_base()
class Obj(Base):
__tablename__ = 'Obj'
id = Column(Integer, primary_key=True)
text = Column(MEDIUMTEXT)
engine = create_engine('mysql+pymysql://')
cli = Sqla(orm.sessionmaker(bind=engine), metadata=Base.metadata)
schema = cli._schema(Obj.__table__)
self.assertIn('MEDIUMTEXT', schema)
def test_create_drop_tables_single_meta(self):
Base = declarative_base()
class A(Base):
__tablename__ = 'A'
id = Column(Integer, primary_key=True)
engine = create_engine('sqlite://')
cli = Sqla(orm.sessionmaker(bind=engine), metadata=Base.metadata)
for verbose in [False, True]:
cli.command_create_tables(verbose=verbose)
self.assertTrue(engine.has_table('A'))
with mock.patch.object(sys.stdin, 'readline', return_value='n'):
try:
cli.command_drop_tables()
except SystemExit:
pass
self.assertTrue(engine.has_table('A'))
with mock.patch.object(sys.stdin, 'readline', return_value='y'):
cli.command_drop_tables()
self.assertFalse(engine.has_table('A'))
def test_create_drop_tables_several_meta(self):
Base1 = declarative_base()
class A1(Base1):
__tablename__ = 'A'
id = Column(Integer, primary_key=True)
Base2 = declarative_base()
class A2(Base2):
__tablename__ = 'A'
id = Column(Integer, primary_key=True)
engine1 = create_engine('sqlite://')
engine2 = create_engine('sqlite://')
binds = {
A1.__table__: engine1,
A2.__table__: engine2,
}
meta = {
'm1': Base1.metadata,
'm2': Base2.metadata,
'm3': MetaData(),
}
cli = Sqla(orm.sessionmaker(binds=binds), metadata=meta)
for verbose in [False, True]:
cli.command_create_tables(verbose=verbose)
self.assertTrue(engine1.has_table('A'))
self.assertTrue(engine2.has_table('A'))
with mock.patch.object(sys.stdin, 'readline', return_value='y'):
cli.command_drop_tables('m1')
self.assertFalse(engine1.has_table('A'))
self.assertTrue(engine2.has_table('A'))
with mock.patch.object(sys.stdin, 'readline', return_value='y'):
cli.command_drop_tables()
self.assertFalse(engine1.has_table('A'))
self.assertFalse(engine2.has_table('A'))
cli.command_create_tables('m1', verbose=verbose)
self.assertTrue(engine1.has_table('A'))
self.assertFalse(engine2.has_table('A'))
with mock.patch.object(sys.stdin, 'readline', return_value='y'):
cli.command_drop_tables()
self.assertFalse(engine1.has_table('A'))
self.assertFalse(engine2.has_table('A'))
cli.command_create_tables('m3', verbose=verbose)
self.assertFalse(engine1.has_table('A'))
self.assertFalse(engine2.has_table('A'))
def test_reset(self):
Base = declarative_base()
class A(Base):
__tablename__ = 'A'
id = Column(Integer, primary_key=True)
id_values = [id_value1, id_value2] = [12, 34]
# Each time it uses different value
def initial(db):
db.add(A(id=id_values.pop(0)))
engine = create_engine('sqlite://')
cli = Sqla(orm.sessionmaker(bind=engine), metadata=Base.metadata,
initial=initial)
with mock.patch.object(sys.stdin, 'readline', return_value='y'):
cli.command_reset()
query = cli.session.query(A)
self.assertEqual(query.count(), 1)
a = query.one()
self.assertEqual(a.id, id_value1)
with mock.patch.object(sys.stdin, 'readline', return_value='y'):
cli.command_reset()
query = cli.session.query(A)
self.assertEqual(query.count(), 1)
a = query.one()
self.assertEqual(a.id, id_value2)
_created_tables = re.compile(r'create\s+table\s+\W?(\w+)', re.I).findall
def test_schema_single_meta(self):
Base = declarative_base()
class A(Base):
__tablename__ = 'A'
id = Column(Integer, primary_key=True)
class B(Base):
__tablename__ = 'B'
id = Column(Integer, primary_key=True)
engine = create_engine('sqlite://')
cli = Sqla(orm.sessionmaker(bind=engine), metadata=Base.metadata)
output = StringIO()
with mock.patch.object(sys, 'stdout', output):
cli.command_schema()
created = self._created_tables(output.getvalue())
self.assertEqual(len(created), 2)
self.assertEqual(created.count('A'), 1)
self.assertEqual(created.count('B'), 1)
output = StringIO()
with mock.patch.object(sys, 'stdout', output):
cli.command_schema('A')
created = self._created_tables(output.getvalue())
self.assertEqual(created, ['A'])
output = StringIO()
with mock.patch.object(sys, 'stdout', output):
try:
cli.command_schema('C')
except SystemExit:
pass
created = self._created_tables(output.getvalue())
self.assertEqual(created, [])
def test_schema_several_meta(self):
Base1 = declarative_base()
class A1(Base1):
__tablename__ = 'A'
id = Column(Integer, primary_key=True)
class B1(Base1):
__tablename__ = 'B'
id = Column(Integer, primary_key=True)
Base2 = declarative_base()
class A2(Base2):
__tablename__ = 'A'
id = Column(Integer, primary_key=True)
engine1 = create_engine('sqlite://')
engine2 = create_engine('sqlite://')
binds = {
A1.__table__: engine1,
B1.__table__: engine1,
A2.__table__: engine2,
}
meta = {
'm1': Base1.metadata,
'm2': Base2.metadata,
'm3': MetaData(),
}
cli = Sqla(orm.sessionmaker(binds=binds), metadata=meta)
output = StringIO()
with mock.patch.object(sys, 'stdout', output):
cli.command_schema()
created = self._created_tables(output.getvalue())
self.assertEqual(len(created), 3)
self.assertEqual(created.count('A'), 2)
self.assertEqual(created.count('B'), 1)
output = StringIO()
with mock.patch.object(sys, 'stdout', output):
cli.command_schema('m1')
created = self._created_tables(output.getvalue())
self.assertEqual(len(created), 2)
self.assertEqual(created.count('A'), 1)
self.assertEqual(created.count('B'), 1)
output = StringIO()
with mock.patch.object(sys, 'stdout', output):
cli.command_schema('m1.B')
created = self._created_tables(output.getvalue())
self.assertEqual(created, ['B'])
output = StringIO()
with mock.patch.object(sys, 'stdout', output):
try:
cli.command_schema('m2.B')
except SystemExit:
pass
created = self._created_tables(output.getvalue())
self.assertEqual(created, [])
output = StringIO()
with mock.patch.object(sys, 'stdout', output):
try:
cli.command_schema('m3.A')
except SystemExit:
pass
created = self._created_tables(output.getvalue())
self.assertEqual(created, [])
def test_gen(self):
gen_a = mock.MagicMock()
cli = Sqla(orm.sessionmaker(), MetaData(), generators={'a': gen_a})
try:
cli.command_gen()
except SystemExit:
pass
gen_a.assert_not_called()
gen_a.reset_mock()
cli.command_gen('a')
gen_a.assert_called_once_with(cli.session, 0)
gen_a.reset_mock()
cli.command_gen('a:123')
gen_a.assert_called_once_with(cli.session, 123)
| StarcoderdataPython |
5100058 | from vacore import VACore
import os
modname = os.path.basename(__file__)[:-3] # calculating modname
# функция на старте
def start(core:VACore):
manifest = {
"name": "Акции на Московской бирже",
"version": "1.2",
"require_online": True,
"commands": {
},
"default_options": {
"tickers": {
"": [ # "ирина акции"
["sber", "Сбер"],
["gazp", "Газпром"]
],
"сбер": [ # "ирина акции сбер"
["sber", "Сбер"],
],
},
"portfolios": { # разные портфели акций
"тест": { # команда "ирина портфель тест"
"portfolio": [ # портфель акций
["sberp", 100],
["sber", 100]
],
"start_inv": 40000, # стартовая цена портфеля
}
}
}
}
return manifest
def start_with_options(core:VACore, manifest:dict):
# модифицируем манифест, добавляя команды на основе options, сохраненных в файле
cmds = {}
cmdoptions = manifest["options"]["portfolios"]
for cmd in cmdoptions.keys():
cmds[cmd] = (run_portfolio, cmdoptions[cmd])
manifest["commands"]["портфель"] = cmds
cmds2 = {}
cmdoptions = manifest["options"]["tickers"]
for cmd in cmdoptions.keys():
cmds2[cmd] = (run_stocks, cmdoptions[cmd])
manifest["commands"]["акции|акция"] = cmds2
return manifest
data_stocks = {}
def update_stocks():
try:
import requests
res = requests.get("https://iss.moex.com/iss/engines/stock/markets/shares/boards/TQBR/securities.json?iss.meta=off&iss.only=marketdata&marketdata.columns=SECID,LAST")
data = res.json()
global data_stocks
data_stocks = {}
for st in data["marketdata"]["data"]:
data_stocks[st[0]] = st[1]
return True
except:
import traceback
traceback.print_exc()
return False
def run_stocks(core:VACore, phrase:str, param):
isUpdated = update_stocks()
if isUpdated:
#print(data_stocks["SBERP"])
#options = core.plugin_options(modname)
#from utils.num_to_text_ru import num2text
txt = ""
for t in param:
price = data_stocks[str(t[0]).upper()]
pricetxt = str(price)
if price > 1000: # спецкейс для дорогих акций, иначе тупо
from utils.num_to_text_ru import num2text
pricetxt = num2text(int(price))
txt += t[1]+" "+pricetxt+" . "
core.say(txt)
else:
core.say("Проблемы с соединением с биржей")
def run_portfolio(core:VACore, phrase:str, param: dict):
isUpdated = update_stocks()
if isUpdated:
#print(data_stocks["SBERP"])
from utils.num_to_text_ru import num2text
sum = 0
for t in param["portfolio"]:
cur = data_stocks[str(t[0]).upper()]
value = cur*t[1]
print("{0}: {1} шт по цене {2} - всего {3}".format(str(t[0]).upper(),t[1],cur,value))
sum += value
print("Всего: {0}".format(sum))
# округляем до удобного
sum = int(sum/1000)*1000
txt = "Стоимость портфеля {0} . ".format(num2text(sum))
if sum > param["start_inv"]:
txt += "Текущая прибыль {0} . ".format(num2text(sum-param["start_inv"]))
else:
txt += "Текущий убыток {0} . ".format(num2text(param["start_inv"]-sum))
core.say(txt)
else:
core.say("Проблемы с соединением с биржей")
| StarcoderdataPython |
3501905 | from glob import glob
import json
import os
import re
import yaml
if os.path.exists("./savedata/config.json"):
with open("./savedata/config.json") as json_file:
raw_json = json_file.read()
config = json.loads(raw_json)
else:
with open("./savedata/config.json", "w") as json_file:
json_file.write("{}")
config = {}
with open("./config-types") as f:
config_types = {
re.compile(re.escape(k).replace(r"\*", r"\w+")): v.strip()
for k, v in dict(
map(lambda line: line.strip().split(":"), f.readlines())
).items()
}
for dpb_yml in glob("./packages/*@*/dpb.yml"):
with open(dpb_yml) as f:
custom_config_type = yaml.safe_load(f)
try:
with open(
os.path.dirname(dpb_yml)
+ "/"
+ custom_config_type.get("config-types", "config-types"),
) as custom_config_type_file:
config_types.update(
{
re.compile(re.escape(k).replace(r"\*", r"\w+")): v.strip()
for k, v in map(
lambda line: line.strip().split(":"),
custom_config_type_file.readlines(),
)
}
)
except FileNotFoundError:
pass
def convert_bool(value):
if isinstance(value, bool):
return value
lowered = value.lower()
if lowered in ("yes", "y", "true", "t", "1", "enable", "on"):
return True
elif lowered in ("no", "n", "false", "f", "0", "disable", "off"):
return False
else:
return None
def check_config_type(key):
for config_type in config_types.items():
if config_type[0].match(key):
return config_type[1]
return None
def convert_config(data):
for key, value in data.items():
if isinstance(value, dict):
data[key] = convert_config(value)
elif check_config_type(key):
key_type = check_config_type(key)
if key_type == "bool":
data[key] = convert_bool(value)
elif key_type == "int":
data[key] = int(value)
elif key_type == "float":
data[key] = float(value)
else:
pass
else:
pass
return data
config = convert_config(config)
| StarcoderdataPython |
5043487 | # Filename: notepicker.py
#
# Summary: reads wav files
#
# Author: <NAME>
#
# Last Updated: Oct 07 2015
import sys # exit argv
import time # time
import wave # open getframerate getnchannels getsampwidth getnframes readframes error
import numpy # empty uint8 fromstring shape reshape view
import scipy.signal # fftconvolve
class Picker():
def __init__(self):
self.notes = () # tuple of Note objects
self.signal = numpy.empty(0) # ndarray of amplitudes
self.channels = 0
self.frequency = 0.0
self.no_samples = 0
self.sample_rate = 0
# TODO
# Values must be adjusted for different songs
# THRESHOLD is a value that represents a
# minimum value in volume of the signal to
# start determining if there is an audible
# note.
# NOTE is the minimum number of peaks above
# the threshold to consider the part of the
# signal an actual note.
# BREAK is the minimum number of consecutive
# peaks below the threshold to signify the
# end of a possible note.
def findNotes(self, signal):
''' Estimates the number of notes in the signal. '''
THRESHOLD = .45
NOTE = 50
BREAK = 50
# no_greater refers to the number of peaks above
# the threshold.
# no_less refers to the number of consecutive
# peaks below the threshold.
# unique marks a potential note for the storage
# of the indices where it starts and ends.
start = 0
end = 0
no_greater = 0
no_less = 0
unique = True
notes = []
peaks = scipy.signal.argrelextrema(signal, numpy.greater)[0]
# for every peak, check where it lies with
# respect to the threshold.
for peak in peaks:
# if it is above, check if the note is
# unique, increment the no_greater and
# reset no_less.
if (signal[peak] > THRESHOLD):
if (unique):
unique = False
start = peak
else:
end = peak
no_greater = no_greater + 1
no_less = 0
# otherwise, increment the no_less.
else:
no_less = no_less + 1
# if no_less is greater than BREAK,
# check if no_greater is greater than
# NOTE, append the note to the list if
# appropriate, and reset the rest of
# the values.
if (no_less > BREAK):
if (no_greater > NOTE):
note = (start, end)
notes.append(note)
no_greater = 0
no_less = 0
unique = True
if (no_greater > NOTE):
note = (start, end)
notes.append(note)
return notes
'''
'''
if __name__ == '__main__':
notepicker = NotePicker()
notepicker.read('../Music/c1.wav')
notepicker.getFrequency()
print notepicker.frequency
audio = []
for index in range(1, len(sys.argv)):
audio.append(str(sys.argv[index]))
for filename in audio:
start_time = time.time()
signal, sample_rate = read(filename)
freq = findFrequency(signal, sample_rate)
print '******************************'
print 'Number of Samples: %i' % len(signal)
print 'Sample rate: %i' % sample_rate
print 'Frequency: %.3f' % freq
print 'Note: %s' % recognize(freq)
print 'Time elapsed: %.3f s' % (time.time() - start_time)
'''
| StarcoderdataPython |
11316321 | <filename>superres/src/models/srgan.py<gh_stars>10-100
import logging
from collections import OrderedDict
import torch
import torch.nn as nn
from torch.nn.parallel import DataParallel, DistributedDataParallel
import models.networks as networks
import models.lr_scheduler as lr_scheduler
from .base_model import BaseModel
from models.modules.loss import GANLoss, LMaskLoss
logger = logging.getLogger('base')
class SRGANModel(BaseModel):
def __init__(self, opt):
super(SRGANModel, self).__init__(opt)
if opt['dist']:
self.rank = torch.distributed.get_rank()
else:
self.rank = -1 # non dist training
train_opt = opt['train']
self.train_opt = train_opt
self.opt = opt
self.segmentor = None
# define networks and load pretrained models
self.netG = networks.define_G(opt).to(self.device)
if opt['dist']:
self.netG = DistributedDataParallel(self.netG, device_ids=[torch.cuda.current_device()])
else:
self.netG = DataParallel(self.netG)
if self.is_train:
self.netD = networks.define_D(opt).to(self.device)
if train_opt.get("gan_video_weight", 0) > 0:
self.net_video_D = networks.define_video_D(opt).to(self.device)
if opt['dist']:
self.netD = DistributedDataParallel(self.netD,
device_ids=[torch.cuda.current_device()])
if train_opt.get("gan_video_weight", 0) > 0:
self.net_video_D = DistributedDataParallel(self.net_video_D,
device_ids=[torch.cuda.current_device()])
else:
self.netD = DataParallel(self.netD)
if train_opt.get("gan_video_weight", 0) > 0:
self.net_video_D = DataParallel(self.net_video_D)
self.netG.train()
self.netD.train()
if train_opt.get("gan_video_weight", 0) > 0:
self.net_video_D.train()
# define losses, optimizer and scheduler
if self.is_train:
# G pixel loss
if train_opt['pixel_weight'] > 0:
l_pix_type = train_opt['pixel_criterion']
if l_pix_type == 'l1':
self.cri_pix = nn.L1Loss().to(self.device)
elif l_pix_type == 'l2':
self.cri_pix = nn.MSELoss().to(self.device)
else:
raise NotImplementedError('Loss type [{:s}] not recognized.'.format(l_pix_type))
self.l_pix_w = train_opt['pixel_weight']
else:
logger.info('Remove pixel loss.')
self.cri_pix = None
# Pixel mask loss
if train_opt.get("pixel_mask_weight", 0) > 0:
l_pix_type = train_opt['pixel_mask_criterion']
self.cri_pix_mask = LMaskLoss(l_pix_type=l_pix_type, segm_mask=train_opt['segm_mask']).to(self.device)
self.l_pix_mask_w = train_opt['pixel_mask_weight']
else:
logger.info('Remove pixel mask loss.')
self.cri_pix_mask = None
# G feature loss
if train_opt['feature_weight'] > 0:
l_fea_type = train_opt['feature_criterion']
if l_fea_type == 'l1':
self.cri_fea = nn.L1Loss().to(self.device)
elif l_fea_type == 'l2':
self.cri_fea = nn.MSELoss().to(self.device)
else:
raise NotImplementedError('Loss type [{:s}] not recognized.'.format(l_fea_type))
self.l_fea_w = train_opt['feature_weight']
else:
logger.info('Remove feature loss.')
self.cri_fea = None
if self.cri_fea: # load VGG perceptual loss
self.netF = networks.define_F(opt, use_bn=False).to(self.device)
if opt['dist']:
self.netF = DistributedDataParallel(self.netF,
device_ids=[torch.cuda.current_device()])
else:
self.netF = DataParallel(self.netF)
# GD gan loss
self.cri_gan = GANLoss(train_opt['gan_type'], 1.0, 0.0).to(self.device)
self.l_gan_w = train_opt['gan_weight']
# Video gan weight
if train_opt.get("gan_video_weight", 0) > 0:
self.cri_video_gan = GANLoss(train_opt['gan_video_type'], 1.0, 0.0).to(self.device)
self.l_gan_video_w = train_opt['gan_video_weight']
# can't use optical flow with i and i+1 because we need i+2 lr to calculate i+1 oflow
if 'train' in self.opt['datasets'].keys():
key = "train"
else:
key = 'test_1'
assert self.opt['datasets'][key]['optical_flow_with_ref'] == True, f"Current value = {self.opt['datasets'][key]['optical_flow_with_ref']}"
# D_update_ratio and D_init_iters
self.D_update_ratio = train_opt['D_update_ratio'] if train_opt['D_update_ratio'] else 1
self.D_init_iters = train_opt['D_init_iters'] if train_opt['D_init_iters'] else 0
# optimizers
# G
wd_G = train_opt['weight_decay_G'] if train_opt['weight_decay_G'] else 0
optim_params = []
for k, v in self.netG.named_parameters(): # can optimize for a part of the model
if v.requires_grad:
optim_params.append(v)
else:
if self.rank <= 0:
logger.warning('Params [{:s}] will not optimize.'.format(k))
self.optimizer_G = torch.optim.Adam(optim_params, lr=train_opt['lr_G'],
weight_decay=wd_G,
betas=(train_opt['beta1_G'], train_opt['beta2_G']))
self.optimizers.append(self.optimizer_G)
# D
wd_D = train_opt['weight_decay_D'] if train_opt['weight_decay_D'] else 0
self.optimizer_D = torch.optim.Adam(self.netD.parameters(), lr=train_opt['lr_D'],
weight_decay=wd_D,
betas=(train_opt['beta1_D'], train_opt['beta2_D']))
self.optimizers.append(self.optimizer_D)
# Video D
if train_opt.get("gan_video_weight", 0) > 0:
self.optimizer_video_D = torch.optim.Adam(self.net_video_D.parameters(), lr=train_opt['lr_D'],
weight_decay=wd_D,
betas=(train_opt['beta1_D'], train_opt['beta2_D']))
self.optimizers.append(self.optimizer_video_D)
# schedulers
if train_opt['lr_scheme'] == 'MultiStepLR':
for optimizer in self.optimizers:
self.schedulers.append(
lr_scheduler.MultiStepLR_Restart(optimizer, train_opt['lr_steps'],
restarts=train_opt['restarts'],
weights=train_opt['restart_weights'],
gamma=train_opt['lr_gamma'],
clear_state=train_opt['clear_state']))
elif train_opt['lr_scheme'] == 'CosineAnnealingLR_Restart':
for optimizer in self.optimizers:
self.schedulers.append(
lr_scheduler.CosineAnnealingLR_Restart(
optimizer, train_opt['T_period'], eta_min=train_opt['eta_min'],
restarts=train_opt['restarts'], weights=train_opt['restart_weights']))
else:
raise NotImplementedError('MultiStepLR learning rate scheme is enough.')
self.log_dict = OrderedDict()
self.print_network() # print network
self.load() # load G and D if needed
def feed_data(self, data, need_GT=True):
self.img_path = data['GT_path']
self.var_L = data['LQ'].to(self.device) # LQ
if need_GT:
self.var_H = data['GT'].to(self.device) # GT
if self.train_opt.get("use_HR_ref"):
self.var_HR_ref = data['img_reference'].to(self.device)
if "LQ_next" in data.keys():
self.var_L_next = data['LQ_next'].to(self.device)
if "GT_next" in data.keys():
self.var_H_next = data['GT_next'].to(self.device)
self.var_video_H = torch.cat([data['GT'].unsqueeze(2), data['GT_next'].unsqueeze(2)], dim=2).to(self.device)
else:
self.var_L_next = None
def optimize_parameters(self, step):
# G
for p in self.netD.parameters():
p.requires_grad = False
self.optimizer_G.zero_grad()
args = [self.var_L]
if self.train_opt.get('use_HR_ref'):
args += [self.var_HR_ref]
if self.var_L_next is not None:
args += [self.var_L_next]
self.fake_H, self.binary_mask = self.netG(*args)
#Video Gan
if self.opt['train'].get("gan_video_weight", 0) > 0:
with torch.no_grad():
args = [self.var_L, self.var_HR_ref, self.var_L_next]
self.fake_H_next, self.binary_mask_next = self.netG(*args)
l_g_total = 0
if step % self.D_update_ratio == 0 and step > self.D_init_iters:
if self.cri_pix: # pixel loss
l_g_pix = self.l_pix_w * self.cri_pix(self.fake_H, self.var_H)
l_g_total += l_g_pix
if self.cri_pix_mask:
l_g_pix_mask = self.l_pix_mask_w * self.cri_pix_mask(self.fake_H, self.var_H, self.var_HR_ref)
l_g_total += l_g_pix_mask
if self.cri_fea: # feature loss
real_fea = self.netF(self.var_H).detach()
fake_fea = self.netF(self.fake_H)
l_g_fea = self.l_fea_w * self.cri_fea(fake_fea, real_fea)
l_g_total += l_g_fea
# Image Gan
if self.opt['network_D'] == "discriminator_vgg_128_mask":
import torch.nn.functional as F
from models.modules import psina_seg
if self.segmentor is None:
self.segmentor = psina_seg.base.SegmentationModule(encode='stationary_probs').to(self.device)
self.segmentor = self.segmentor.eval()
lr = F.interpolate(self.var_H, scale_factor=0.25, mode='nearest')
with torch.no_grad():
binary_mask = (1 - self.segmentor.predict(lr[:, [2,1,0],::]))
binary_mask = F.interpolate(binary_mask, scale_factor=4, mode='nearest')
pred_g_fake = self.netD(self.fake_H, self.fake_H *(1-binary_mask), self.var_HR_ref, binary_mask * self.var_HR_ref)
else:
pred_g_fake = self.netD(self.fake_H)
if self.opt['train']['gan_type'] == 'gan':
l_g_gan = self.l_gan_w * self.cri_gan(pred_g_fake, True)
elif self.opt['train']['gan_type'] == 'ragan':
if self.opt['network_D'] == "discriminator_vgg_128_mask":
pred_g_fake = self.netD(self.var_H, self.var_H *(1-binary_mask), self.var_HR_ref, binary_mask * self.var_HR_ref)
else:
pred_d_real = self.netD(self.var_H)
pred_d_real = pred_d_real.detach()
l_g_gan = self.l_gan_w * (
self.cri_gan(pred_d_real - torch.mean(pred_g_fake), False) +
self.cri_gan(pred_g_fake - torch.mean(pred_d_real), True)) / 2
l_g_total += l_g_gan
#Video Gan
if self.opt['train'].get("gan_video_weight", 0) > 0:
self.fake_video_H = torch.cat([self.fake_H.unsqueeze(2), self.fake_H_next.unsqueeze(2)], dim=2)
pred_g_video_fake = self.net_video_D(self.fake_video_H)
if self.opt['train']['gan_video_type'] == 'gan':
l_g_video_gan = self.l_gan_video_w * self.cri_video_gan(pred_g_video_fake, True)
elif self.opt['train']['gan_type'] == 'ragan':
pred_d_video_real = self.net_video_D(self.var_video_H)
pred_d_video_real = pred_d_video_real.detach()
l_g_video_gan = self.l_gan_video_w * (
self.cri_video_gan(pred_d_video_real - torch.mean(pred_g_video_fake), False) +
self.cri_video_gan(pred_g_video_fake - torch.mean(pred_d_video_real), True)) / 2
l_g_total += l_g_video_gan
# OFLOW regular
if self.binary_mask is not None:
l_g_total += 1* self.binary_mask.mean()
l_g_total.backward()
self.optimizer_G.step()
# D
for p in self.netD.parameters():
p.requires_grad = True
if self.opt['train'].get("gan_video_weight", 0) > 0:
for p in self.net_video_D.parameters():
p.requires_grad = True
# optimize Image D
self.optimizer_D.zero_grad()
l_d_total = 0
pred_d_real = self.netD(self.var_H)
pred_d_fake = self.netD(self.fake_H.detach()) # detach to avoid BP to G
if self.opt['train']['gan_type'] == 'gan':
l_d_real = self.cri_gan(pred_d_real, True)
l_d_fake = self.cri_gan(pred_d_fake, False)
l_d_total = l_d_real + l_d_fake
elif self.opt['train']['gan_type'] == 'ragan':
l_d_real = self.cri_gan(pred_d_real - torch.mean(pred_d_fake), True)
l_d_fake = self.cri_gan(pred_d_fake - torch.mean(pred_d_real), False)
l_d_total = (l_d_real + l_d_fake) / 2
l_d_total.backward()
self.optimizer_D.step()
# optimize Video D
if self.opt['train'].get("gan_video_weight", 0) > 0:
self.optimizer_video_D.zero_grad()
l_d_video_total = 0
pred_d_video_real = self.net_video_D(self.var_video_H)
pred_d_video_fake = self.net_video_D(self.fake_video_H.detach()) # detach to avoid BP to G
if self.opt['train']['gan_video_type'] == 'gan':
l_d_video_real = self.cri_video_gan(pred_d_video_real, True)
l_d_video_fake = self.cri_video_gan(pred_d_video_fake, False)
l_d_video_total = l_d_video_real + l_d_video_fake
elif self.opt['train']['gan_video_type'] == 'ragan':
l_d_video_real = self.cri_video_gan(pred_d_video_real - torch.mean(pred_d_video_fake), True)
l_d_video_fake = self.cri_video_gan(pred_d_video_fake - torch.mean(pred_d_video_real), False)
l_d_video_total = (l_d_video_real + l_d_video_fake) / 2
l_d_video_total.backward()
self.optimizer_video_D.step()
# set log
if step % self.D_update_ratio == 0 and step > self.D_init_iters:
if self.cri_pix:
self.log_dict['l_g_pix'] = l_g_pix.item()
if self.cri_fea:
self.log_dict['l_g_fea'] = l_g_fea.item()
self.log_dict['l_g_gan'] = l_g_gan.item()
self.log_dict['l_d_real'] = l_d_real.item()
self.log_dict['l_d_fake'] = l_d_fake.item()
self.log_dict['D_real'] = torch.mean(pred_d_real.detach())
self.log_dict['D_fake'] = torch.mean(pred_d_fake.detach())
if self.opt['train'].get("gan_video_weight", 0) > 0:
self.log_dict['D_video_real'] = torch.mean(pred_d_video_real.detach())
self.log_dict['D_video_fake'] = torch.mean(pred_d_video_fake.detach())
def test(self):
self.netG.eval()
with torch.no_grad():
args = [self.var_L]
if self.train_opt.get('use_HR_ref'):
args += [self.var_HR_ref]
if self.var_L_next is not None:
args += [self.var_L_next]
self.fake_H, self.binary_mask = self.netG(*args)
self.netG.train()
def get_current_log(self):
return self.log_dict
def get_current_visuals(self, need_GT=True):
out_dict = OrderedDict()
out_dict['LQ'] = self.var_L.detach()[0].float().cpu()
out_dict['SR'] = self.fake_H.detach()[0].float().cpu()
if self.binary_mask is not None:
out_dict['binary_mask'] = self.binary_mask.detach()[0].float().cpu()
if need_GT:
out_dict['GT'] = self.var_H.detach()[0].float().cpu()
return out_dict
def print_network(self):
# Generator
s, n = self.get_network_description(self.netG)
if isinstance(self.netG, nn.DataParallel) or isinstance(self.netG, DistributedDataParallel):
net_struc_str = '{} - {}'.format(self.netG.__class__.__name__,
self.netG.module.__class__.__name__)
else:
net_struc_str = '{}'.format(self.netG.__class__.__name__)
if self.rank <= 0:
logger.info('Network G structure: {}, with parameters: {:,d}'.format(net_struc_str, n))
logger.info(s)
if self.is_train:
# Discriminator
s, n = self.get_network_description(self.netD)
if isinstance(self.netD, nn.DataParallel) or isinstance(self.netD,
DistributedDataParallel):
net_struc_str = '{} - {}'.format(self.netD.__class__.__name__,
self.netD.module.__class__.__name__)
else:
net_struc_str = '{}'.format(self.netD.__class__.__name__)
if self.rank <= 0:
logger.info('Network D structure: {}, with parameters: {:,d}'.format(
net_struc_str, n))
logger.info(s)
if self.cri_fea: # F, Perceptual Network
s, n = self.get_network_description(self.netF)
if isinstance(self.netF, nn.DataParallel) or isinstance(
self.netF, DistributedDataParallel):
net_struc_str = '{} - {}'.format(self.netF.__class__.__name__,
self.netF.module.__class__.__name__)
else:
net_struc_str = '{}'.format(self.netF.__class__.__name__)
if self.rank <= 0:
logger.info('Network F structure: {}, with parameters: {:,d}'.format(
net_struc_str, n))
logger.info(s)
def load(self):
# G
load_path_G = self.opt['path']['pretrain_model_G']
if load_path_G is not None:
logger.info('Loading model for G [{:s}] ...'.format(load_path_G))
self.load_network(load_path_G, self.netG, self.opt['path']['pretrain_model_G_strict_load'])
if self.opt['network_G'].get("pretrained_net") is not None:
self.netG.module.load_pretrained_net_weights(self.opt['network_G']['pretrained_net'])
# D
load_path_D = self.opt['path']['pretrain_model_D']
if self.opt['is_train'] and load_path_D is not None:
logger.info('Loading model for D [{:s}] ...'.format(load_path_D))
self.load_network(load_path_D, self.netD, self.opt['path']['pretrain_model_D_strict_load'])
# Video D
if self.opt['train'].get("gan_video_weight", 0) > 0:
load_path_video_D = self.opt['path'].get("pretrain_model_video_D")
if self.opt['is_train'] and load_path_video_D is not None:
self.load_network(load_path_video_D, self.net_video_D, self.opt['path']['pretrain_model_video_D_strict_load'])
def save(self, iter_step):
self.save_network(self.netG, 'G', iter_step)
self.save_network(self.netD, 'D', iter_step)
if self.opt['train'].get("gan_video_weight", 0) > 0:
self.save_network(self.net_video_D, 'video_D', iter_step)
@staticmethod
def _freeze_net(network):
for p in network.parameters():
p.requires_grad = False
return network
@staticmethod
def _unfreeze_net(network):
for p in network.parameters():
p.requires_grad = True
return network
def freeze(self, G, D):
if G:
self.netG.module.net = self._freeze_net(self.netG.module.net)
if D:
self.netD.module = self._freeze_net(self.netD.module)
def unfreeze(self, G, D):
if G:
self.netG.module.net = self._unfreeze_net(self.netG.module.net)
if D:
self.netD.module = self._unfreeze_net(self.netD.module)
| StarcoderdataPython |
6576456 | # coding: utf-8
"""
Healthbot APIs
API interface for Healthbot application # noqa: E501
OpenAPI spec version: 1.0.0
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.api.administration_api import AdministrationApi # noqa: E501
from swagger_client.rest import ApiException
class TestAdministrationApi(unittest.TestCase):
"""AdministrationApi unit test stubs"""
def setUp(self):
self.api = swagger_client.api.administration_api.AdministrationApi() # noqa: E501
def tearDown(self):
pass
def test_create_groups(self):
"""Test case for create_groups
Create groups # noqa: E501
"""
pass
def test_create_users(self):
"""Test case for create_users
Create an user. # noqa: E501
"""
pass
def test_delete_group(self):
"""Test case for delete_group
Delete groups # noqa: E501
"""
pass
def test_delete_user(self):
"""Test case for delete_user
Delete list of users. # noqa: E501
"""
pass
def test_flush_groups(self):
"""Test case for flush_groups
Flush the groups # noqa: E501
"""
pass
def test_flush_users(self):
"""Test case for flush_users
Flush user base with new set of records. # noqa: E501
"""
pass
def test_get_group_details(self):
"""Test case for get_group_details
Get lits of all the groups # noqa: E501
"""
pass
def test_get_user_details(self):
"""Test case for get_user_details
Get lits of all the users # noqa: E501
"""
pass
def test_retrieve_groups(self):
"""Test case for retrieve_groups
Get lits of all the groups # noqa: E501
"""
pass
def test_retrieve_roles(self):
"""Test case for retrieve_roles
Get list of all the roles # noqa: E501
"""
pass
def test_retrieve_users(self):
"""Test case for retrieve_users
Get lits of all the users # noqa: E501
"""
pass
def test_update_group(self):
"""Test case for update_group
Get lits of all the roles # noqa: E501
"""
pass
def test_update_user_profile(self):
"""Test case for update_user_profile
Update user profile informations. # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
1744631 | <reponame>evanbrumley/aoc2021
def main():
with open("input", "r") as f:
numbers_raw = f.read()
numbers = [int(num) for num in numbers_raw.splitlines() if num]
last_num = None
count = 0
for num in numbers:
if last_num is not None and num > last_num:
count += 1
last_num = num
print(count)
if __name__ == "__main__":
main()
| StarcoderdataPython |
11210768 | <filename>lib/tinygpgs/main.py
"""Encryption and decryption command-line tool with gpg(1) compatibility."""
import sys
from tinygpgs.pyx import ensure_binary, integer_types, is_stdin_text, is_python_function, callable
# Here we don't import anything from tinygpgs, to make --help and flag
# parsing fast. We do lazy imports later as needed.
# !! Convert README.txt to README.md for easier viewing on GitHub.
# !! Add --speed-test with dependency checks on Crypto.Cipher._AES.
# !! Add --install-pycrypto, make it work as root and non-root, make it
# download pip first if needed, make it remind the user about
# `apt-get install python-dev' (not needed on macOS, needed on Linux;
# maybe python3-dev), make it do `python -m pip install --user pycrypto',
# or `sudo apt-get install pycrypto'.
# !! Add --install-hashlib for Python 2.4.
# !! Add section about installing pycrypto (even on macOS) to README.txt,
# including --speed-test.
# !! Add warning if slow because of Python hash or cipher.
# !! Document encryptedfile and other Python modules.
# !! Check proper error message in Python 2.3, 2.2, 2.1 and 2.0.
# !! Add bzip2 compression and decompression using `import subprocess'. This helps with `sudo apt-get install python3.5-minimal'.
# !! Add mdc_obj.update using a subprocess.
# !! Verify and improve Win32 support (binary mode etc.). Add a .cmd file which runs `python tinygpgs.single ...'.
# !! Make mksingle.py generic, independent of tinygpgs.
# !! doc: py3 syntax conversion: print statement
# !! doc: py3 syntax conversion: octal literals: 0123 is SyntaxError, but 0123e6 is OK.
# !! doc: py3 conversion: ord(data[:1]) instead of ord(data[0])
# !! doc: py3 conversion: basic level: just display an error message on Python 3, octal still has to be converted
# !! doc: py3 conversion: b'%d' % 42 doesn't work in Python 3.4.
# !! doc: py3 conversion: how to detect Python 3: if type(zip()) is not list: # Python 3.
# !! doc: py3 conversion: type(memoryview(b'x')[0]) == type(b'') is different in Python 3.2 (True) and 3.3 (False).
# !! doc: py3 syntax conversion: no u'...' in Python 3.2, so no u'...' in tinygpgs.
# !! doc: py3 warnings SUXX: if size is (): # SyntaxWarning: "is" with a literal. Did you mean "=="?
# This line is read by setup.py.
VERSION = '0.18'
# --- Passphrase prompt.
def prompt_passphrase(is_twice):
sys.stderr.flush()
sys.stdout.flush()
import getpass
# Unfortunately in Python 2.4 and 2.5, this raises a termios.error if
# stdin is redirected to a file. As a workaround, upgrade to Python >=2.6,
# or use `--passphrase ...'.
passphrase = getpass.getpass('Enter passphrase: ')
if not passphrase:
raise SystemExit('fatal: empty passphrase')
if is_twice:
passphrase2 = getpass.getpass('Re-enter passphrase: ')
if passphrase != passphrase2:
raise SystemExit('fatal: passphrases do not match')
return passphrase
# --- Platform-specific code.
def set_fd_binary(fd):
"""Make sure that os.write(fd, ...) doesn't write extra \r bytes etc."""
import sys
if sys.platform.startswith('win'):
import os
import msvcrt
msvcrt.setmode(fd, os.O_BINARY)
# --- Command-line parsing and main().
FLAG_DOC = r"""
Encryption and decryption flags:
* --pinentry-mode <value>: Value must be loopback. Otherwise ignored for PGP
2.x compatibility.
* --slow-cipher: Use the slow, pure Python implementation of the ciphers.
* --no-slow-cipher: Use the fast, C extension implementation (PyCrypto) of the
ciphers, if available. Otherwise, use the slow, pure Python implementation.
* --slow-hash: Use the slow, pure Python implementation of the hashes.
* --no-slow-hash: Use the fast, C extension implementation (PyCrypto) of the
hashes, if available. Otherwise, use the slow, pure Python implementation.
* --file-class: Use the GpgSymmetricFile* class API to do the work. It's a bit
slower than the function-based API, so it's not recommended for regular use.
* --file-class-big: Use the GpgSymmetricFile* class API with a single call
to .read() and .write(...) to do the work. This is not recommended for general
use, because it may use a lot of memory.
* --no-file-class: Use the function API to do the work.
* --batch: Don't ask the user. Fail if no passphrase given on the command line.
* --no-batch: Ask the user if needed. Typically the passphrase is asked.
* --yes: Auto-answer yes to questions. Will overwrite existing output files.
* --no: Don't answer yes to questions. Will abort if the output file already
exists, and will keep it intact.
* --quiet (-q, --no-show-info): Don't show file parameters on stderr.
* --verbose (-v, --show-info): Show file parameters on stderr.
* --show-session-key: Show the session key (block cipher key) on stderr.
Insecure, should be used for debugging or during coercion only.
* --no-show-session-key: Don't show the session key on stderr. Default.
* --passphrase <pass>: Use the specified passphrase. This
flag is insecure (because it adds the passphrase to your shell history),
please don't specify it, but type the passphrases interactively or use
--passphrase-file ... or --passphrase-fd ... instead.
* --passphrase-file <file>: Read the first line of <file> and use it (without
the trailing line breaks) as passphrase.
* --passphrase-fd <fd>: Read the first line from file descriptor <fd> (e.g. 0
for stdin), and use it (without the trailing line breaks) as passphrase.
Be careful: if stdin is used and it's a TTY, it will echo the passphrase.
* --passphrase-repeat <n>: If <n> is more than 1, ask the passphrase twice
before encryption. Doesn't affect decryption. The default is same as for
gpg(1): 1 if `--pinentry-mode loopback' is specified, otherwise 2.
* --bufcap <n>: Use buffer size of (approximately) <n> throughout, and also use
it as GP partial packet size. <n> must be a power of 2 at least 512. The
default is 8192.
* --output (-o) <file>: Write the output to <file> instead of stdout.
* --no-options: Ignored for gpg(1) compatibility.
* --no-keyring: Ignored for gpg(1) compatibility.
* --no-default-keyring: Ignored for gpg(1) compatibility.
* --no-usage-agent: Ignored for gpg(1) 1.x compatibility.
* --no-symkey-cache: Ignored for gpg(1) 2 compatibility.
* --no-auto-check-trustdb: Ignored for gpg(1) 2 compatibility.
Encryption-only flags:
* --cipher-algo <algo>: Use the specified encryption algorithm. Values:
idea, 3des (des3), cast5, blowfish, aes-128, aes-192, aes-256,
twofish-256, des, twofish-128.
* --digest-algo <algo>: Use the specified hash for string-to-key. Values:
md5, sha1, ripemd160, sha256, sha384, sha512, sha224.
* --compress-algo <algo>: Use the specified compression algorithm. Values:
none (uncompressed), zlib, zlib, bzip2.
* --compress-level <n>: 0: disable compression, 1: fastest, low effort
compression, ..., 6: default compression effort, ..., 9: slowest compression.
* --bzip2-compress-level <n>: Like --compress-level ..., but applies only if
--compress-algo bzip2 is active.
* -a (--armor): Wrap output in ASCII armor (header and Base64 encoding).
* --no-armor: Don't wrap output in ASCII armor.
* --s2k-mode <n>. Use the specified string-to-key mode. Values:
0: simple, 1: 1: salted, 3: iterated-salted.
* --s2k-count <n>: Run the hash over approximately <n> bytes for --s2k-mode 3.
* --force-mdc: Enable appending of modification-detection code (MDC, hash,
message digest). Default.
* --disable-mdc: Omit the modification-detection code from the output.
* --plain-filename <file>: Store the specified filename in the output.
* --literal-type <c>: Store the specified file type in the output. The value
b indicates binary, other values are not recommended. No matter this flag
value, all files are treated as binary.
* --mtime <mtime>: Store the specified last modification time in the output.
The value is an integer, the Unix timestamp. The default is 0.
* -f (--recipient-file) <keyfile>: Do public key encryption to to specified
recipient (<keyfile> generated by `gpg --export'). Use `-e' instead of `-c'
if you don't want to enter a passphrase as well.
Decryption-only flags:
* --override-session-key: <cipher-algo-int>:<session-key-hex>: Ignore the
session key in the file, use the specified value instead. Useful for
revealing the plaintext withouth revealing the passphrase.
"""
LICENSE = (
'This is free software, MIT license. '
'There is NO WARRANTY. Use at your risk.\n')
def show_usage(argv0, do_flags=False):
flag_doc = FLAG_DOC * bool(do_flags)
if flag_doc.startswith('\n '):
flag_doc = flag_doc.replace('\n ', '\n')
sys.stderr.write(
'tinygpgs %s: symmetric key encryption tool compatible with GPG\n%s'
'encryption usage: %s -c [<flag> ...] [FILE.bin]\n'
'decryption usage: %s -d [<flag> ...] FILE.bin.gpg >FILE.bin\n'
'https://github.com/pts/tinygpgs\n%s' %
(VERSION, LICENSE, argv0, argv0, flag_doc))
def get_flag_arg(argv, i):
if i >= len(argv):
raise SystemExit('usage: missing argument for flag: ' + argv[i - 1])
return argv[i]
def get_flag_arg_int(argv, i):
value = get_flag_arg(argv, i)
try:
return int(value)
except ValueError:
# TODO(pts): Check for positive value etc.
raise SystemExit('usage: flag value must be an integer: %s %s' % (argv[i - 1], argv[i]))
def read_passphrase_from_fd(fd):
# We need binary mode for 8-bit accurate s2k.
set_fd_binary(fd)
import os
# Don't read more bytes than needed.
output = []
while 1:
c = os.read(fd, 1) # Don't read too much.
if not c or c in b'\r\n':
# No need for os.close(fd).
return b''.join(output)
output.append(c)
def show_info(msg):
sys.stderr.write('info: %s\n' % (msg,))
sys.stderr.flush() # Automatic, but play it safe.
def show_version():
from tinygpgs import cipher
from tinygpgs import hash
compressions = ['Uncompressed']
try:
import zlib
compressions.extend(('ZIP', 'ZLIB'))
except ImportError:
pass
try:
import bz2
compressions.append('BZIP2')
except ImportError:
pass
cipher_map = {'DES3': '3DES', 'CAST': 'CAST5'}
ciphers = sorted(cipher_map.get(c, c) for c in (n.upper() for n in dir(cipher) if callable(getattr(cipher, n)) and callable(getattr(getattr(cipher, n), 'encrypt', None))))
from tinygpgs import gpgs
ciphers2 = []
for c in ciphers:
cipher_cons, _, keytable_size = gpgs.get_cipher_cons(c + '-128' * (c in ('AES', 'TWOFISH')), False, False)
ciphers2.append(''.join((c, ' (slow)' * is_python_function(cipher_cons(b'\0' * keytable_size).encrypt))))
hashes = sorted(n[5:].upper() for n in dir(hash) if n.startswith('Slow_') and callable(getattr(hash, n)) and callable(getattr(getattr(hash, n), 'update', None)))
hashes2 = []
for n in hashes:
mdc_obj = gpgs.new_hash(n)
hashes2.append(''.join((n, ' (slow)' * is_python_function(gpgs.new_hash(n).update))))
# Format similar to GPG 2.1.
sys.stdout.write('tinygpgs %s\n%s\nSupported algorithms:\nCipher: %s\nHash: %s\nCompression: %s\n' %
(VERSION, LICENSE, ', '.join(ciphers2), ', '.join(hashes2), ', '.join(compressions)))
def main(argv, zip_file=None):
if len(argv) < 2:
show_usage(argv[0])
sys.exit(1)
argv = list(argv)
is_batch = is_yes_flag = False
do_passphrase_twice = True
file_class_mode = 0
output_file = input_file = None
bufcap = 8192
params = {}
params['passphrase'] = ()
params['show_info_func'] = show_info
encrypt_params = {}
encrypt_params['recipients'] = []
decrypt_params = {}
flags_with_arg = (
'--pinentry-mode', '--passphrase', '--passphrase-fd', '--passphrase-file',
'--passphrase-repeat', '--bufcap', '--output', '--cipher-algo',
'--digest-algo', '--s2k-digest-algo', '--compress-algo',
'--compress-level', '--bzip2-compress-level', '--s2k-mode', '--s2k-count',
'--plain-filename', '--literal-type', '--mtime', '--recipient-file',
'--keyring', '--secret-keyring', '--trust-model')
public_key_flags = (
'-e', '--encrypt', '-s', '--sign', '--verify', '--generate-key',
'--gen-key', '--full-generate-key', '--full-gen-key', '--edit-key',
'--export', '--import', '--fast-import',
'--change-passphrase', '--passwd', '--sign-key',
'--lsign-key', '--quick-sign-key', '--quick-lsign-key',
'--trusted-key', '--trust-model', '--recipient', '-r',
'--hidden-recipient', '-R',
'--hidden-recipient-file', '-F', '--encrypt-to', '--hidden-encrypt-to',
'--no-encrypt-to', '--sender', '--primary-keyring',
) # gpg(1).
i, do_encrypt = 1, None
while i < len(argv): # Scan for -c or -d.
arg = argv[i]
if arg == '-' or not arg.startswith('-'):
break
i += 1
if arg == '--':
break
if arg in ('-c', '--symmetric'): # Like gpg(1).
do_encrypt = True
elif arg in ('-e', '--encrypt'):
do_encrypt = 2
elif arg in ('-d', '--decrypt'): # Like gpg(1).
do_encrypt = False
elif arg == '--help':
show_usage(argv[0], do_flags=True)
sys.exit(0)
elif arg == '--version':
show_version()
sys.exit(0)
elif arg in public_key_flags:
raise SystemExit('usage: public-key cryptography not supported: %s' % arg)
elif arg in flags_with_arg and i < len(argv):
i += 1
if do_encrypt is None:
show_usage(argv[0])
sys.exit(1)
i = 1
while i < len(argv):
arg = argv[i]
if arg == '-' or not arg.startswith('-'):
break
i += 1
is_yes = not arg.startswith('--no-')
if arg == '--':
break
elif arg in ('-c', '--symmetric', '-d', '--decrypt', '-e', '--encrypt'): # gpg(1).
pass # Already parsed in the loop above.
elif arg in ('--no-options', '--no-keyring', '--no-default-keyring', '--no-use-agent', '--no-symkey-cache', '--no-auto-check-trustdb'): # gpg(1).
pass
elif arg == '--use-agent':
raise SystemExit('usage: unsupported flag: %s' % arg)
elif arg == '--options':
options_filename = get_flag_arg(argv, i)
i += 1
raise SystemExit('usage: unsupported flag: %s' % arg)
elif arg in ('--keyring', '--secret-keyring'):
if get_flag_arg(argv, i) != '/dev/null':
raise SystemExit('usage: unsupported flag value, expecting /dev/null: %s %s' % (arg, argv[i]))
i += 1
elif arg == '--pinentry-mode': # gpg(1).
if get_flag_arg(argv, i) != 'loopback':
raise SystemExit('usage: unsupported flag value, expecting loopback: %s %s' % (arg, argv[i]))
i += 1
do_passphrase_twice = False # Compatible with gpg(1).
elif arg == '--trust-model': # gpg(1).
get_flag_arg(argv, i) # Typical value: always
i += 1
elif arg in ('--slow-cipher', '--no-slow-cipher'):
params['is_slow_cipher'] = is_yes
elif arg in ('--slow-hash', '--no-slow-hash'):
params['is_slow_hash'] = is_yes
elif arg in ('--file-class', '--no-file-class'):
file_class_mode = int(is_yes)
elif arg == '--file-class-big':
file_class_mode = 2
elif arg in ('--batch', '--no-batch'): # gpg(1).
is_batch = is_yes
elif arg == '--yes': # gpg(1).
is_yes_flag = True
elif arg == '--no': # gpg(1).
is_yes_flag = False
elif arg in ('-q', '--quiet'): # gpg(1).
params['show_info_func'] = False
elif arg in ('-v', '--verbose'): # gpg(1).
params['show_info_func'] = show_info
elif arg in ('--show-info', '--no-show-info'):
params['show_info_func'] = is_yes and show_info
elif arg in ('--show-session-key', '--no-show-session-key'): # gpg(1).
params['do_show_session_key'] = is_yes
elif arg == '--passphrase': # gpg(1).
params['passphrase'] = get_flag_arg(argv, i)
i += 1
elif arg == '--passphrase-fd': # gpg(1).
params['passphrase'] = get_flag_arg_int(argv, i)
i += 1
elif arg == '--passphrase-file': # gpg(1).
params['passphrase'] = [get_flag_arg(argv, i)]
i += 1
elif arg == '--passphrase-repeat': # gpg(1).
# GPG 2.1.18 ignores this flag with `--pinentry-mode loopback'. We use
# it for a repeated passphrase prompt.
do_passphrase_twice = get_flag_arg_int(argv, i) > 1
i += 1
elif arg == '--bufcap':
bufcap = get_flag_arg_int(argv, i)
i += 1
elif arg in ('-o', '--output'): # gpg(1).
output_file = get_flag_arg(argv, i)
i += 1
elif not do_encrypt and arg == '--override-session-key': # gpg(1).
flag_value = get_flag_arg(argv, i)
from tinygpgs import gpgs
try:
decrypt_params['override_session_key'] = gpgs.parse_override_session_key(flag_value)
except ValueError as e:
raise SystemExit('usage: invalid flag syntax (%s): %s %s' % (e, arg, argv[i]))
i += 1
elif do_encrypt and arg == '--cipher-algo': # gpg(1).
encrypt_params['cipher'] = get_flag_arg(argv, i)
i += 1
elif do_encrypt and arg in ('--digest-algo', '--s2k-digest-algo'): # gpg(1).
encrypt_params['s2k_digest'] = get_flag_arg(argv, i)
i += 1
elif do_encrypt and arg == '--compress-algo': # gpg(10>
encrypt_params['compress'] = get_flag_arg(argv, i)
i += 1
elif do_encrypt and arg in ('-a', '--armor', '--no-armor'): # gpg(1).
encrypt_params['do_add_ascii_armor'] = is_yes
elif do_encrypt and arg in ('-z', '--compress-level'): # gpg(1).
encrypt_params['compress_level'] = get_flag_arg_int(argv, i)
i += 1
elif do_encrypt and arg == '--bzip2-compress-level': # gpg(1).
encrypt_params['bzip2_compress_level'] = get_flag_arg_int(argv, i)
i += 1
elif do_encrypt and arg == '--s2k-mode': # gpg(1).
encrypt_params['s2k_mode'] = get_flag_arg_int(argv, i)
i += 1
elif do_encrypt and arg == '--s2k-count': # gpg(1).
encrypt_params['s2k_count'] = get_flag_arg_int(argv, i)
i += 1
elif do_encrypt and arg == '--force-mdc': # gpg(1).
encrypt_params['do_mdc'] = True
elif do_encrypt and arg == '--disable-mdc': # gpg(1).
encrypt_params['do_mdc'] = False
elif do_encrypt and arg == '--plain-filename':
encrypt_params['plain_filename'] = get_flag_arg(argv, i)
i += 1
elif do_encrypt and arg == '--literal-type':
encrypt_params['literal_type'] = get_flag_arg(argv, i)
i += 1
elif do_encrypt and arg == '--mtime':
encrypt_params['mtime'] = get_flag_arg_int(argv, i)
i += 1
elif do_encrypt and arg in ('--recipient-file', '-f'): # gpg(1) 2.
from tinygpgs import pubkey
filename = get_flag_arg(argv, i)
i += 1
f = open(filename, 'rb')
try:
encrypt_params['recipients'].append(pubkey.load_pk_encryption_key(f.read, params.get('is_slow_cipher', False)))
finally:
f.close()
else:
raise SystemExit('usage: unknown flag: ' + arg)
if i < len(argv) and input_file is None: # gpg(1).
input_file = argv[i]
i += 1
if i != len(argv):
raise SystemExit('usage: too many command-line arguments')
if do_encrypt == 2:
if not encrypt_params['recipients']:
raise SystemExit('usage: public-key encryption needs recipients; pass e.g. -f')
if params['passphrase'] is ():
params['passphrase'] = None # Don't ask for passphrase.
if input_file is None:
input_file = '-'
if output_file is None:
if do_encrypt and input_file != '-': # gpg(1).
if encrypt_params.get('do_add_ascii_armor'):
output_file = input_file + '.asc'
else:
output_file = input_file + '.gpg'
else:
output_file = '-'
if bufcap < 1 or bufcap & (bufcap - 1):
raise SystemExit('usage: --bufcap value must be a power of 2, got: %d' % bufcap)
buflog2cap = 1
while bufcap > (1 << buflog2cap):
buflog2cap += 1
assert bufcap == 1 << buflog2cap # Just to make sure.
if buflog2cap > 30:
raise SystemExit('usage: --bufcap value too large, must be at most %d, got: %d' % (1 << 30, bufcap))
if buflog2cap < 9:
# Some ciphers need >= 16, GPG partial packets need at least 512.
raise SystemExit('usage: --bufcap value too small, must be at least %d, got: %d' % (1 << 9, bufcap))
if isinstance(params['passphrase'], list): # File:
# We need binary mode for 8-bit accurate s2k.
try:
f = open(params['passphrase'][0], 'rb')
except IOError as e:
raise SystemExit('fatal: error opening the passphrase file: %s' % e)
try:
params['passphrase'] = f.readline().rstrip(b'\r\n')
finally:
f.close()
elif isinstance(params['passphrase'], integer_types): # File descroptor.
params['passphrase'] = read_passphrase_from_fd(params['passphrase'])
elif params['passphrase'] is (): # Interactive prompt.
if is_batch:
raise SystemExit('usage: passphrase prompt conflicts with --batch mode')
params['passphrase'] = lambda is_twice=do_encrypt and do_passphrase_twice: (
prompt_passphrase(is_twice))
if is_stdin_text:
import os
inf, of = os.fdopen(0, 'rb'), os.fdopen(1, 'wb')
else:
inf, of = sys.stdin, sys.stdout
try:
if input_file == '-':
set_fd_binary(inf.fileno())
else:
inf = open(input_file, 'rb')
if output_file == '-':
set_fd_binary(of.fileno())
else:
if not is_yes_flag:
import os.path
# TODO(pts): Like gpg(1), don't clobber the output on a decrypt
# attempt with a bad passphrase or on a user abort during the
# passphrase prompt.
if os.path.exists(output_file):
# gpg(1) asks the user interactively after the passphrase prompt.
raise SystemExit('fatal: output file exists, not overwriting: %s' %
output_file)
of = open(output_file, 'wb')
if do_encrypt:
encrypt_params.update(params) # Shouldn't have common fields.
encrypt_params['buflog2cap'] = buflog2cap
if ('bzip2_compress_level' in encrypt_params and
encrypt_params.get('compress', '').lower() == 'bzip2'):
encrypt_params['compress_level'] = encrypt_params.pop('bzip2_compress_level')
else:
encrypt_params.pop('bzip2_compress_level', None)
# Defaults in GPG <2.1, including 1.4.18.
#encrypt_params.setdefault('cipher', 'sha256')
#encrypt_params.setdefault('compress', 'none')
#encrypt_params.setdefault('compress_level', 9)
#encrypt_params.setdefault('do_mdc', False)
from tinygpgs import gpgs
if file_class_mode:
from tinygpgs import file # Lazy import to make startup (flag parsing) fast.
f = file.GpgSymmetricFileWriter(of.write, 'wb', **encrypt_params)
try:
if file_class_mode == 2:
f.write(inf.read())
else:
assert bufcap == f.bufcap
data = inf.read(f.write_hint)
while data:
f.write(data)
data = inf.read(f.write_hint)
finally:
f.close()
else:
gpgs.encrypt_symmetric_gpg(inf.read, of, **encrypt_params)
else: # Decrypt.
decrypt_params.update(params) # Shouldn't have common fields.
from tinygpgs import gpgs
try:
if file_class_mode:
from tinygpgs import file
f = file.GpgSymmetricFileReader(inf.read, 'rb', **decrypt_params)
try:
if file_class_mode == 2:
of.write(f.read())
else:
while 1:
data = f.read(bufcap)
if not data:
break
of.write(data)
finally:
f.close()
dparams = f.params
else:
dparams = gpgs.decrypt_symmetric_gpg(inf.read, of, **decrypt_params)
except gpgs.BadPassphraseError as e:
msg = str(e)
sys.stderr.write('fatal: %s%s\n' % (msg[0].lower(), msg[1:].rstrip('.')))
sys.exit(2)
if not dparams['has_mdc']: # Same as gpg(1)'s output after the colon.
sys.stderr.write('warning: message was not integrity protected\n')
finally:
try:
if of is not sys.stdout:
of.close()
finally:
if inf is not sys.stdin:
inf.close()
| StarcoderdataPython |
3404366 | from math import floor
import pygame
from pygame.locals import *
import time
from bait import Bait
from snake import Snake
game_height = 611
game_width = 914
BLOCK_SIZE = 23
TILE_COLOR = (195, 207, 161)
BG_COLOR = (64, 64, 64)
class Game:
def __init__(self):
pygame.init()
pygame.display.set_caption("Classic Snake Game")
self.surface = pygame.display.set_mode((game_width, game_height))
self.pause_tile = pygame.image.load("resources\pause.png")
self.lost_img = pygame.image.load("resources\game_lost.png")
self.pause_sound = pygame.mixer.Sound("resources\sound_03.mp3")
self.lost_sound = pygame.mixer.Sound("resources\sound_04.mp3")
self.bg_img = pygame.image.load("resources\game_bg.png")
self.numbers = [pygame.image.load("resources\z_0.png"), pygame.image.load("resources\z_1.png"),
pygame.image.load("resources\z_2.png"), pygame.image.load("resources\z_3.png"),
pygame.image.load("resources\z_4.png"), pygame.image.load("resources\z_5.png"),
pygame.image.load("resources\z_6.png"), pygame.image.load("resources\z_7.png"),
pygame.image.load("resources\z_8.png"), pygame.image.load("resources\z_9.png")]
self.numbers_red = [pygame.image.load("resources\zz_0.png"), pygame.image.load("resources\zz_1.png"),
pygame.image.load("resources\zz_2.png"), pygame.image.load("resources\zz_3.png"),
pygame.image.load("resources\zz_4.png"), pygame.image.load("resources\zz_5.png"),
pygame.image.load("resources\zz_6.png"), pygame.image.load("resources\zz_7.png"),
pygame.image.load("resources\zz_8.png"), pygame.image.load("resources\zz_9.png")]
# build places
self.tiles = [[(0, 0) for x in range(18)] for y in range(32)]
for i in range(18):
for j in range(32):
x = 90 + (j * 20) + (3 * j)
y = 122 + (i * 20) + (3 * i)
self.tiles[j][i] = (x, y)
self.reset_game()
# classes and variables
self.snake = Snake(self, self.surface, 5)
self.snake.draw()
self.bait_1 = Bait(self, self.surface, 'bait_1')
self.bait_2 = Bait(self, self.surface, 'bait_2')
self.is_running = True
self.game_status = ''
self.score = 0
self.level = 1
self.speed = 0.3
def reset_game(self):
# build surface - draw background
self.draw_background(self.surface)
self.reset_board()
# update display
pygame.display.flip()
self.snake = Snake(self, self.surface, 5)
self.snake.draw()
self.bait_1 = Bait(self, self.surface, 'bait_1')
self.bait_2 = Bait(self, self.surface, 'bait_2')
self.is_running = True
self.game_status = ''
self.score = 0
self.level = 1
self.speed = 0.3
def locate_xy(self, part):
xy = self.tiles[part[1]][part[0]]
return xy
def run(self):
running = True
pause = False
while running:
for event in pygame.event.get():
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
running = False
if event.key == K_RETURN:
if self.game_status != 'lost':
pygame.mixer.Sound.play(self.pause_sound)
pause = not pause
else:
self.reset_game()
pause = False
if not pause and event.key == K_LEFT:
self.snake.move('left')
if not pause and event.key == K_RIGHT:
self.snake.move('right')
if not pause and event.key == K_UP:
self.snake.move('up')
if not pause and event.key == K_DOWN:
self.snake.move('down')
elif event.type == QUIT:
running = False
try:
if not pause:
self.play()
self.update_score_level()
time.sleep(self.speed - self.level * 0.00253)
elif pause:
if self.game_status == '':
self.surface.blit(self.pause_tile, (834, 12))
pygame.display.flip()
time.sleep(1)
pygame.draw.rect(self.surface, BG_COLOR, pygame.Rect(832, 10, 50, 50))
pygame.display.flip()
time.sleep(1)
except Exception as e:
self.game_status = 'lost'
pygame.mixer.Sound.play(self.lost_sound)
rect = self.lost_img.get_rect()
self.surface.blit(self.lost_img, (game_width / 2 - rect.width / 2, game_height / 2 - rect.height / 2))
pygame.display.flip()
pause = True
def play(self):
self.snake.walk()
# draw elements
snake_body = []
for i in self.snake.body:
snake_body.append(i)
self.bait_1.draw(0.1, snake_body)
snake_body.append(self.bait_1.position)
self.bait_2.draw(0.2, snake_body)
# check snake collision
if self.snake.body_hit():
raise "Collision Occurred"
# check bait hit 1
if self.snake.bait_hit(self.bait_1.position):
print("snake ate {0} - from bait_1".format(self.bait_1.bait_type))
self.bait_1.redefine()
self.bait_1.clear()
self.snake.increase_length(self.bait_1)
self.score += self.bait_1.score
# check bait hit 2
if self.snake.bait_hit(self.bait_2.position):
self.bait_2.redefine()
self.bait_2.clear()
print("snake ate {0} - from bait_2".format(self.bait_2.bait_type))
self.snake.increase_length(self.bait_2)
self.score += self.bait_2.score
def update_score_level(self):
is_red = False
# calculate level
self.level = int(self.score / 10)
if self.level > 99:
self.level = 99
is_red = True
# fix background
pygame.draw.rect(self.surface, BG_COLOR, pygame.Rect(266, 18, 120, 50))
pygame.draw.rect(self.surface, BG_COLOR, pygame.Rect(570, 18, 120, 50))
self.surface.blit(self.bg_img, (250, 20), (230, 0, 500, 51))
# calculate numbers
hu = floor(self.score / 100)
te = floor(self.score / 10) - hu * 10
on = self.score - hu * 100 - te * 10
if self.score < 999:
score_hundred = self.numbers[hu]
score_ten = self.numbers[te]
score_one = self.numbers[on]
else:
score_hundred = self.numbers_red[9]
score_ten = self.numbers_red[9]
score_one = self.numbers_red[9]
lte = floor(self.level / 10)
lon = self.level - lte * 10
if not is_red:
level_ten = self.numbers[lte]
level_one = self.numbers[lon]
else:
level_ten = self.numbers_red[lte]
level_one = self.numbers_red[lon]
# blit numbers on screen
self.surface.blit(score_hundred, (266, 18))
self.surface.blit(score_ten, (266 + 36, 18))
self.surface.blit(score_one, (266 + 36 + 36, 18))
self.surface.blit(level_ten, (586, 18))
self.surface.blit(level_one, (586 + 36, 18))
pygame.display.flip()
def draw_background(self, surface):
surface.fill(BG_COLOR)
surface.blit(self.bg_img, (20, 20))
def reset_board(self):
for j in range(len(self.tiles)):
for i in range(len(self.tiles[j])):
xy = self.tiles[j][i]
pygame.draw.rect(self.surface, TILE_COLOR, pygame.Rect(xy[0], xy[1], 20, 20))
| StarcoderdataPython |
1690689 | <reponame>zakandrewking/theseus<gh_stars>0
from theseus.models import *
import cobra
import os
import pytest
def test_get_model_list():
model_list = get_model_list()
assert 'iJO1366' in model_list
assert 'iAF1260' in model_list
assert 'E coli core' in model_list
def test_check_for_model():
assert check_for_model('e_coli_core')=='E coli core'
assert check_for_model('E. coli core')=='E coli core'
def test_load_model():
model = load_model('iJO1366')
assert isinstance(model, cobra.core.Model)
model = load_model('RECON1')
assert isinstance(model, cobra.core.Model)
def test_load_model_json():
model = load_model('iML1503')
assert isinstance(model, cobra.core.Model)
def test_id_for_new_id_style():
assert(id_for_new_id_style('EX_glc(r)', is_metabolite=False)=='EX_glc_r')
assert(id_for_new_id_style('glucose[r]', is_metabolite=True)=='glucose_r')
def test_convert_ids():
for model_name in 'iJO1366', 'iAF1260', 'E coli core':
print "\n"
print model_name
model = load_model(model_name)
# cobrapy style
model = convert_ids(model, new_id_style='cobrapy')
print 'cobrapy ids'
print [str(x) for x in model.reactions if 'lac' in str(x)]
assert 'EX_lac__D_e' in [str(x) for x in model.reactions]
assert ['-' not in str(x) for x in model.reactions]
print [str(x) for x in model.metabolites if 'lac' in str(x)]
assert 'lac__D_e' in [str(x) for x in model.metabolites]
assert ['-' not in str(x) for x in model.metabolites]
# simpheny style
model = convert_ids(model, new_id_style='simpheny')
print 'simpheny ids'
print [str(x) for x in model.reactions if 'lac' in str(x)]
assert 'EX_lac-D(e)' in [str(x) for x in model.reactions]
assert ['__' not in str(x) for x in model.reactions]
print [str(x) for x in model.metabolites if 'lac' in str(x)]
assert 'lac-D[e]' in [str(x) for x in model.metabolites]
assert ['__' not in str(x) for x in model.metabolites]
def test_get_formulas_from_names():
model = load_model('iAF1260')
assert model.metabolites.get_by_id('acald_c').formula.elements['C'] == 2
assert model.metabolites.get_by_id('acald_c').formula.elements['H'] == 4
assert model.metabolites.get_by_id('acald_c').formula.elements['O'] == 1
def test_turn_off_carbon_sources():
for model_name in 'iJO1366', 'iAF1260', 'E coli core':
print model_name
model = load_model(model_name)
model.reactions.get_by_id('EX_glc_e').lower_bound = -100
model.reactions.get_by_id('EX_ac_e').lower_bound = -100
model = turn_off_carbon_sources(model)
assert model.reactions.get_by_id('EX_glc_e').lower_bound==0
assert model.reactions.get_by_id('EX_ac_e').lower_bound==0
def test_setup_model():
model = load_model('iJO1366')
model = setup_model(model, 'EX_glc_e', aerobic=False, sur=18)
assert model.reactions.get_by_id('EX_glc_e').lower_bound==-18
assert model.reactions.get_by_id('EX_o2_e').lower_bound==0
assert model.reactions.get_by_id('CAT').upper_bound==0
assert model.reactions.get_by_id('SPODM').upper_bound==0
model = setup_model(model, ['EX_glc_e', 'EX_xyl__D_e'], sur=-18)
assert model.reactions.get_by_id('EX_glc_e').lower_bound==-18
assert model.reactions.get_by_id('EX_xyl__D_e').lower_bound==-18
model = setup_model(model, {'EX_glc_e':-5, 'EX_xyl__D_e':5}, sur=999)
assert model.reactions.get_by_id('EX_glc_e').lower_bound==-5
assert model.reactions.get_by_id('EX_xyl__D_e').lower_bound==-5
def test_turn_on_subsystem():
with pytest.raises(NotImplementedError):
turn_on_subsystem(None, None)
def test_carbons_for_exchange_reaction():
model = load_model('iJO1366')
assert carbons_for_exchange_reaction(model.reactions.get_by_id('EX_glc_e'))==6
assert carbons_for_exchange_reaction(model.reactions.get_by_id('EX_ac_e'))==2
assert carbons_for_exchange_reaction(model.reactions.get_by_id('EX_for_e'))==1
def test_add_pathway():
new = [ { '1poh_c': {'formula': 'C3H8O', 'name': '1-propanol'} },
{ '2OBUTDC': {'2obut_c': -1, 'h_c': -1, 'ppal_c': 1, 'co2_c': 1},
'1PDH': {'ppal_c': -1, 'nadh_c': -1, 'h_c': -1, '1poh_c': 1, 'nad_c': 1},
'EX_1poh_e': {'1poh_c': -1} },
{ 'EX_1poh_e': '1-propanol production' },
{ 'EX_1poh_e': (0, 1000) } ]
m = load_model('iJO1366')
model = add_pathway(m.copy(), *new, check_mass_balance=True)
assert isinstance(model.metabolites.get_by_id(new[0].keys()[0]), cobra.Metabolite)
reaction = model.reactions.get_by_id('EX_1poh_e')
assert isinstance(model.reactions.get_by_id(new[1].keys()[0]), cobra.Reaction)
assert reaction.reversibility == False
assert reaction.upper_bound == 1000
assert reaction.lower_bound == 0
# test metabolite name
assert model.metabolites.get_by_id('1poh_c').name == '1-propanol'
# test mass balance
assert len(model.reactions.get_by_id('1PDH').check_mass_balance()) == 0
# test ignore repeats
model = add_pathway(m.copy(), *new)
with pytest.raises(Exception):
model = add_pathway(model, *new)
model = add_pathway(model, *new, ignore_repeats=True)
| StarcoderdataPython |
3346737 | <reponame>rikeshtailor/Office365-REST-Python-Client
import uuid
from office365.teams.team import Team
from tests.graph_case import GraphTestCase
class TestGraphTeam(GraphTestCase):
"""Tests for teams"""
target_team = None # type: Team
@classmethod
def setUpClass(cls):
super(TestGraphTeam, cls).setUpClass()
def test1_create_team_from_group(self):
grp_name = "Group_" + uuid.uuid4().hex
result = self.client.teams.create(grp_name).execute_query_retry(max_retry=6, timeout_secs=5)
self.assertIsNotNone(result.value.id)
self.__class__.target_team = result.value
def test3_get_all_teams(self):
teams = self.client.teams.get_all().execute_query()
self.assertGreater(len(teams), 0)
def test4_get_joined_teams(self):
my_teams = self.client.me.joined_teams.get().execute_query()
self.assertIsNotNone(my_teams.resource_path)
self.assertGreater(len(my_teams), 0)
def test5_get_team(self):
group_id = self.__class__.target_team.id
existing_team = self.client.teams[group_id].get().execute_query()
self.assertIsNotNone(existing_team.resource_url)
self.assertIsNotNone(existing_team.messaging_settings)
if existing_team.is_archived:
existing_team.unarchive()
self.client.load(existing_team)
self.client.execute_query()
self.assertFalse(existing_team.is_archived)
def test6_update_team(self):
team_id = self.__class__.target_team.id
team_to_update = self.client.teams[team_id]
team_to_update.fun_settings.allowGiphy = False
team_to_update.update().execute_query()
def test7_archive_team(self):
team_id = self.__class__.target_team.id
self.client.teams[team_id].archive().execute_query()
def test8_delete_team(self):
grp_to_delete = self.__class__.target_team
grp_to_delete.delete_object().execute_query()
| StarcoderdataPython |
380565 | <filename>tests/sample_apps/how_to/_achievement.py
from ._integration_test_case import IntegrationTestCase
from accelbyte_py_sdk.api.achievement.models import ModelsAchievementRequest
class AchievementTestCase(IntegrationTestCase):
exist: bool = False
models_achievement_request: ModelsAchievementRequest = ModelsAchievementRequest.create(
achievement_code="CODE",
default_language="EN",
description={"EN": "DESCRIPTION"},
goal_value=1,
hidden=False,
incremental=False,
locked_icons=[],
name={"EN": "NAME"},
stat_code="STAT_CODE",
tags=["TAG"],
unlocked_icons=[]
)
def tearDown(self) -> None:
from accelbyte_py_sdk.api.achievement import admin_delete_achievement
if self.exist:
_, error = admin_delete_achievement(achievement_code=self.models_achievement_request.achievement_code)
self.log_warning(msg=f"Failed to tear down achievement. {str(error)}", condition=error is not None)
self.exist = error is not None
super().tearDown()
def test_admin_create_new_achievement(self):
from accelbyte_py_sdk.api.achievement import admin_create_new_achievement
from accelbyte_py_sdk.api.achievement import admin_delete_achievement
# arrange
_, error = admin_delete_achievement(achievement_code=self.models_achievement_request.achievement_code)
self.exist = error is not None
# act
result, error = admin_create_new_achievement(body=self.models_achievement_request)
self.exist = error is None
# assert
self.assertIsNone(error, error)
def test_admin_delete_achievement(self):
from accelbyte_py_sdk.api.achievement import admin_create_new_achievement
from accelbyte_py_sdk.api.achievement import admin_delete_achievement
# arrange
_, error = admin_create_new_achievement(body=self.models_achievement_request)
self.log_warning(msg=f"Failed to set up achievement. {str(error)}", condition=error is not None)
self.exist = error is None
# act
result, error = admin_delete_achievement(achievement_code=self.models_achievement_request.achievement_code)
self.exist = error is not None
# assert
self.assertIsNone(error, error)
def test_admin_get_achievement(self):
from accelbyte_py_sdk.api.achievement import admin_create_new_achievement
from accelbyte_py_sdk.api.achievement import admin_get_achievement
# arrange
_, error = admin_create_new_achievement(body=self.models_achievement_request)
self.log_warning(msg=f"Failed to set up achievement. {str(error)}", condition=error is not None)
self.exist = error is None
# act
_, error = admin_get_achievement(achievement_code=self.models_achievement_request.achievement_code)
# assert
self.assertIsNone(error, error)
def test_admin_list_achievements(self):
from accelbyte_py_sdk.api.achievement import admin_create_new_achievement
from accelbyte_py_sdk.api.achievement import admin_list_achievements
# arrange
_, error = admin_create_new_achievement(body=self.models_achievement_request)
self.log_warning(msg=f"Failed to set up achievement. {str(error)}", condition=error is not None)
self.exist = error is None
# act
_, error = admin_list_achievements()
# assert
self.assertIsNone(error, error)
def test_admin_update_achievement(self):
from accelbyte_py_sdk.api.achievement import admin_create_new_achievement
from accelbyte_py_sdk.api.achievement import admin_update_achievement
from accelbyte_py_sdk.api.achievement.models import ModelsAchievementUpdateRequest
# arrange
_, error = admin_create_new_achievement(body=self.models_achievement_request)
self.log_warning(msg=f"Failed to set up achievement. {str(error)}", condition=error is not None)
self.exist = error is None
# act
result, error = admin_update_achievement(
achievement_code=self.models_achievement_request.achievement_code,
body=ModelsAchievementUpdateRequest.create(
default_language="ID",
description={"ID": "KETERANGAN"},
goal_value=1,
hidden=False,
incremental=False,
locked_icons=[],
name={"ID": "NAMA"},
stat_code="KODE_STATUS",
tags=["MENANDAI"],
unlocked_icons=[]
)
)
# assert
self.assertIsNone(error, error)
self.assertIn("ID", result.name)
self.assertEqual("NAMA", result.name["ID"])
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.