hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
โ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
โ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
โ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
116k
โ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
โ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
โ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
โ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
โ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
โ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ad2877c3531e129bf21b558a0ab264817d4fb3aa | 3,432 | py | Python | second/pytorch/mayank_play/reading_train_info_and_ploting_multiple_sweeps_all_info.py | mayanks888/second.pytorch | 02d37885a543ee46516648dcab7db8f5d677a179 | [
"MIT"
] | null | null | null | second/pytorch/mayank_play/reading_train_info_and_ploting_multiple_sweeps_all_info.py | mayanks888/second.pytorch | 02d37885a543ee46516648dcab7db8f5d677a179 | [
"MIT"
] | null | null | null | second/pytorch/mayank_play/reading_train_info_and_ploting_multiple_sweeps_all_info.py | mayanks888/second.pytorch | 02d37885a543ee46516648dcab7db8f5d677a179 | [
"MIT"
] | null | null | null | import pickle
import numpy as np
# import mayavi.mlab as mlab
def draw_lidar_simple(pc, color=None):
''' Draw lidar points. simplest set up. '''
fig = mlab.figure(figure=None, bgcolor=(0,0,0), fgcolor=None, engine=None, size=(1600, 1000))
if color is None: color = pc[:,2]
#draw points
mlab.points3d(pc[:,0], pc[:,1], pc[:,2], color, color=None, mode='point', colormap = 'gnuplot', scale_factor=1, figure=fig)
#draw origin
mlab.points3d(0, 0, 0, color=(1,1,1), mode='sphere', scale_factor=0.5)
#draw axis
axes=np.array([
[2.,0.,0.,0.],
[0.,2.,0.,0.],
[0.,0.,2.,0.],
],dtype=np.float64)
mlab.plot3d([0, axes[0,0]], [0, axes[0,1]], [0, axes[0,2]], color=(1,0,0), tube_radius=None, figure=fig)
mlab.plot3d([0, axes[1,0]], [0, axes[1,1]], [0, axes[1,2]], color=(0,1,0), tube_radius=None, figure=fig)
mlab.plot3d([0, axes[2,0]], [0, axes[2,1]], [0, axes[2,2]], color=(0,0,1), tube_radius=None, figure=fig)
mlab.view(azimuth=180, elevation=70, focalpoint=[ 12.0909996 , -1.04700089, -2.03249991], distance=62.0, figure=fig)
# mlab.show()
return fig
# datapath_file ='/home/mayank_sati/pycharm_projects/pytorch/second_nuscene_mayank/second/save_pkl/nuscenes_infos_train.pkl'
# datapath_file ='/home/mayank_sati/pycharm_projects/tensorflow/traffic_light_detection_classification-master/traffic_light_classification/autokeras/model_file/test_autokeras_model.pkl'
# datapath_file ='/home/mayank_sati/pycharm_projects/pytorch/second.pytorch_traveller59_date_9_05/second/point_pp_nuscene/eval_results/step_140670/result.pkl'
# datapath_file ='/home/mayank_sati/Documents/point_clouds/nuscene_v_mayank/infos_train.pkl'
# datapath_file ='/home/mayank_sati/Downloads/v1.0-mini/infos_train.pkl'
datapath_file ='/home/user/Downloads/v1.0-mini/infos_val.pkl'
# datapath_file ='/home/mayank_sati/pycharm_projects/pytorch/second.pytorch_traveller59_date_9_05/second/pytorch/38_lidar.pkl'
boxes = pickle.load(open(datapath_file, "rb"))
print(1)
# for info in boxes['infos'][2]['sweeps']:
# for info in boxes['infos']:
for iIndex, info in enumerate(boxes['infos'], start=0):
if iIndex==0:
continue
print(info)
lidar_path=info['lidar_path']
#this is how they did it superimposing
# lidar_path = lidar_path
points = np.fromfile(str(lidar_path), dtype=np.float32, count=-1).reshape([-1, 5])
points[:, 3] /= 255
points[:, 4] = 0
sweep_points_list = [points]
ts = info["timestamp"] / 1e6
for sweep in info["sweeps"]:
points_sweep = np.fromfile(str(sweep["lidar_path"]), dtype=np.float32, count=-1).reshape([-1, 5])
sweep_ts = sweep["timestamp"] / 1e6
points_sweep[:, 3] /= 255
points_sweep[:, :3] = points_sweep[:, :3] @ sweep["sweep2lidar_rotation"].T
points_sweep[:, :3] += sweep["sweep2lidar_translation"]
points_sweep[:, 4] = ts - sweep_ts
sweep_points_list.append(points_sweep)
#################################################33
# # if you need to visualise the point cloud formation sweeps by sweeps
# points = np.concatenate(sweep_points_list, axis=0)[:, [0, 1, 2, 4]]
# fig = draw_lidar_simple(points)
# mlab.show()
###############################################################
points = np.concatenate(sweep_points_list, axis=0)[:, [0, 1, 2, 4]]
fig = draw_lidar_simple(points)
# mlab.show() | 50.470588 | 185 | 0.650641 |
c19b5c2b3ccb062f0b638c0e0ee563d3e90c8e0e | 2,308 | py | Python | gen.py | DotBowder/pygame-chaser | dc7469bc16fe4a91abbedd444ff61cbc3163ce45 | [
"MIT"
] | null | null | null | gen.py | DotBowder/pygame-chaser | dc7469bc16fe4a91abbedd444ff61cbc3163ce45 | [
"MIT"
] | null | null | null | gen.py | DotBowder/pygame-chaser | dc7469bc16fe4a91abbedd444ff61cbc3163ce45 | [
"MIT"
] | null | null | null | from keras.layers.core import *
from keras.layers import *
from keras.utils import *
from keras.optimizers import *
from keras.models import *
import numpy as np
import keras
import cv2
import pygame
import random
import sys
if raw_input('Are you sure you want to overwrite your existing model? (y/n)') == 'y':
pass
else:
exit()
##############################
## Define Hyper Parameters ##
##############################
trainingFrameCount = 4000
testingFrameCount = 4000
layer1_size = 128
layer2_size = 100
layer3_size = 120
layer4_size = 256
layer5_size = 48
layer6_size = 48
layer7_size = 48
layer8_size = 32
nb_classes = 4
###################
## Define Model ##
###################
model = Sequential()
model.add(keras.layers.convolutional.Conv2D(layer1_size, 15, strides=2, input_shape=(100,100,1)))
model.add(Activation('relu'))
model.add(keras.layers.pooling.MaxPooling2D(pool_size=(2,2)))
#model.add(keras.layers.convolutional.Conv2D(layer2_size, 5))
#model.add(Activation('relu'))
#model.add(keras.layers.pooling.MaxPooling2D(pool_size=(2,2)))
#model.add(keras.layers.convolutional.Conv2D(layer3_size, 2))
#model.add(Activation('relu'))
#model.add(keras.layers.pooling.MaxPooling2D(pool_size=(2,2)))
model.add(Flatten())
#model.add(Dense(layer3_size))
#model.add(Activation('relu'))
#model.add(Dropout(0.1))
model.add(Dense(layer4_size))
model.add(Activation('relu'))
#model.add(Dense(layer5_size))
#model.add(Activation('relu'))
#model.add(Dropout(0.05))
#model.add(Dense(layer6_size))
#model.add(Activation('relu'))
#model.add(Dropout(0.05))
model.add(Dense(layer7_size))
model.add(Activation('relu'))
model.add(Dropout(0.03))
model.add(Dense(layer8_size))
model.add(Activation('relu'))
model.add(Dropout(0.01))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
model.summary()
###########################
## Save TensorBoard Data ##
###########################
tensorBoardCallback = keras.callbacks.TensorBoard(log_dir='/your-home-dir/chaser/model/logs', histogram_freq=0, write_graph=False, write_images=True)
########################
## Save Model to DIsk ##
########################
jsonModel = model.to_json()
with open("chaser-model.json", "w") as jsonFile:
jsonFile.write(jsonModel)
model.save_weights("chaser-model.h5")
print("Saved model to disk")
| 25.644444 | 149 | 0.682842 |
0789a8962457bcfe36570f212d19aa6c0e5d900a | 95 | py | Python | api/data/src/apps/infoscreen/apps.py | xeor/hohu | ab5edb47eb50fe2434432d76d5599f5a2f168f57 | [
"MIT"
] | null | null | null | api/data/src/apps/infoscreen/apps.py | xeor/hohu | ab5edb47eb50fe2434432d76d5599f5a2f168f57 | [
"MIT"
] | null | null | null | api/data/src/apps/infoscreen/apps.py | xeor/hohu | ab5edb47eb50fe2434432d76d5599f5a2f168f57 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class InfoscreenConfig(AppConfig):
name = 'infoscreen'
| 15.833333 | 34 | 0.768421 |
2c7e984dbbbfc8d083ee0e2b6871cf62939f8544 | 2,938 | py | Python | lib/source/siteinfo.py | SmartDataProjects/ddm | 911f90c88702c9f7ed7a885744028c5705543e88 | [
"MIT"
] | 1 | 2018-08-02T03:06:27.000Z | 2018-08-02T03:06:27.000Z | lib/source/siteinfo.py | SmartDataProjects/ddm | 911f90c88702c9f7ed7a885744028c5705543e88 | [
"MIT"
] | 16 | 2017-11-24T21:09:26.000Z | 2019-05-14T15:13:57.000Z | lib/source/siteinfo.py | SmartDataProjects/ddm | 911f90c88702c9f7ed7a885744028c5705543e88 | [
"MIT"
] | 11 | 2016-08-03T10:37:31.000Z | 2018-08-21T14:32:25.000Z | import fnmatch
import re
import logging
from dynamo.utils.classutil import get_instance
from dynamo.dataformat import Configuration
LOG = logging.getLogger(__name__)
class SiteInfoSource(object):
"""
Interface specs for probe to the site information source.
"""
@staticmethod
def get_instance(module = None, config = None):
if module is None:
module = SiteInfoSource._module
if config is None:
config = SiteInfoSource._config
return get_instance(SiteInfoSource, module, config)
_module = ''
_config = Configuration()
@staticmethod
def set_default(config):
SiteInfoSource._module = config.module
SiteInfoSource._config = config.config
def __init__(self, config):
if hasattr(config, 'include'):
if type(config.include) is list:
self.include = map(lambda pattern: re.compile(fnmatch.translate(pattern)), config.include)
else:
self.include = [re.compile(fnmatch.translate(config.include))]
else:
self.include = None
if hasattr(config, 'exclude'):
if type(config.exclude) is list:
self.exclude = map(lambda pattern: re.compile(fnmatch.translate(pattern)), config.exclude)
else:
self.exclude = [re.compile(fnmatch.translate(config.exclude))]
else:
self.exclude = None
def get_site(self, name, inventory):
"""
@param name Name of the site
@return A Site object with full info, or None if the site is not found.
"""
raise NotImplementedError('get_site')
def get_site_list(self, inventory):
"""
@return List of unlinked Site objects
"""
raise NotImplementedError('get_site_list')
def get_site_status(self, site_name):
"""
@param site_name Site name
"""
raise NotImplementedError('get_site_status')
def get_filename_mapping(self, site_name):
"""
Get the list of regular expression file name mapping rules for the given site.
@param site_name Site name
@return {protocol: chains} where chains = [chain] and chain = [(match, dest), (match, dest)]
"""
raise NotImplementedError('get_filename_mapping')
def check_allowed_site(self, site_name):
if self.include is not None:
for pattern in self.include:
if pattern.match(site_name):
break
else:
# no match
LOG.debug('Site %s is not in include list.', site_name)
return False
if self.exclude is not None:
for pattern in self.exclude:
if pattern.match(site_name):
LOG.debug('Site %s is in exclude list.', site_name)
return False
return True
| 31.255319 | 106 | 0.600408 |
b1bb475e972655ff870c987bf364ac2b82d09edf | 512 | py | Python | pypersonalfin/utils/amount.py | guilhermebruzzi/pypersonalfin | 180619b36ed28e90b2891a9b2b9b4708d22cbdc8 | [
"MIT"
] | 1 | 2021-12-05T17:51:00.000Z | 2021-12-05T17:51:00.000Z | pypersonalfin/utils/amount.py | guilhermebruzzi/pypersonalfin | 180619b36ed28e90b2891a9b2b9b4708d22cbdc8 | [
"MIT"
] | null | null | null | pypersonalfin/utils/amount.py | guilhermebruzzi/pypersonalfin | 180619b36ed28e90b2891a9b2b9b4708d22cbdc8 | [
"MIT"
] | 1 | 2021-02-21T20:07:18.000Z | 2021-02-21T20:07:18.000Z | from .locale import is_brazil
def amount_to_str(amount, locale=None, include_currency=True):
float_amount = amount / 100.0
# Fallback to US amount
float_amount_str = "{:.2f}".format(float_amount)
if is_brazil(locale):
float_amount_str = float_amount_str.replace('.', ',')
if include_currency:
return 'R${}'.format(float_amount_str)
return float_amount_str
if include_currency:
return '${}'.format(float_amount_str)
return float_amount_str
| 26.947368 | 62 | 0.675781 |
9ebd335fa1d806b7e44bec2565a44f034c191e2d | 1,301 | py | Python | 19-countingSundays.py | cmaron/Project-Euler | c4950302f71ee65d81040fae5764ec9eeef6b1f0 | [
"MIT"
] | 2 | 2015-01-20T14:00:14.000Z | 2016-01-27T16:36:53.000Z | 19-countingSundays.py | cmaron/Project-Euler | c4950302f71ee65d81040fae5764ec9eeef6b1f0 | [
"MIT"
] | null | null | null | 19-countingSundays.py | cmaron/Project-Euler | c4950302f71ee65d81040fae5764ec9eeef6b1f0 | [
"MIT"
] | null | null | null | days_in_month = {1:31,2:28,3:31,4:30,5:31,6:30,7:31,8:31,9:30,10:31,11:30,12:31}
x = 0
# M = 0, T = 1, W = 2, R = 3, F = 4, S = 5, U = 6
day_of_week = 0
month = 1
day = 1
year = 1900
d_i_m = days_in_month[month]
# This could probably be sped up by jumping a head a week/month at a time and adjusting as
# needed.
while year < 2001:
if year > 1900 and day_of_week == 6 and day == 1:
x += 1
day += 1
day_of_week += 1
if day > d_i_m:
day = 1
month += 1
if month > 12:
month = 1
year += 1
d_i_m = days_in_month[month]
if month == 2:
if year%100 == 0:
if year%400 == 0:
d_i_m += 1
elif year%4 == 0:
d_i_m += 1
if day_of_week > 6:
day_of_week = 0
print day_of_week, day, month, '/', d_i_m, year, (year > 1900 and day_of_week == 6 and day == 1)
print x
# def is_leap_year(y):
# if y % 4 == 0 and y % 100 != 0 or y % 400 == 0:
# return True
# return False
#
# numdays = [ 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 ]
#
# year, month, wday = 1901, 1, 2
# cnt = 0
#
# while year < 2001:
# if wday == 0:
# cnt += 1
#
# days = 29 if month == 2 and is_leap_year(year) else numdays[month-1]
# wday = (wday + days) % 7
#
# month += 1
#
# if month > 12:
# year += 1
# month = 1
#
# print cnt
| 21.327869 | 97 | 0.538048 |
9459fd916901e69b6b57844f1d69447d22a421a1 | 198 | py | Python | app/models/db_session.py | atomberg/transaction-organizer | 40898300209a7fe7d4ed740a98c451060fa442f6 | [
"MIT"
] | null | null | null | app/models/db_session.py | atomberg/transaction-organizer | 40898300209a7fe7d4ed740a98c451060fa442f6 | [
"MIT"
] | null | null | null | app/models/db_session.py | atomberg/transaction-organizer | 40898300209a7fe7d4ed740a98c451060fa442f6 | [
"MIT"
] | null | null | null | from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
engine = create_engine('sqlite:///transactions.db')
Session = scoped_session(sessionmaker(bind=engine))
| 33 | 55 | 0.828283 |
9bf5c882039bb5216945cc29b56482a7cee38a25 | 7,520 | py | Python | setup.py | jeffersonlizar/ShipIt | 501ad01eefa4a9cc2ff794dddca605e3bad92841 | [
"MIT"
] | 1 | 2019-02-19T23:25:28.000Z | 2019-02-19T23:25:28.000Z | setup.py | jeffersonlizar/ShipIt | 501ad01eefa4a9cc2ff794dddca605e3bad92841 | [
"MIT"
] | 4 | 2018-03-02T14:50:18.000Z | 2020-01-06T22:17:36.000Z | setup.py | jeffersonlizar/ShipIt | 501ad01eefa4a9cc2ff794dddca605e3bad92841 | [
"MIT"
] | 2 | 2019-02-08T23:12:24.000Z | 2020-04-27T22:18:27.000Z | """A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
# Arguments marked as "Required" below must be included for upload to PyPI.
# Fields marked as "Optional" may be commented out.
setup(
# This is the name of your project. The first time you publish this
# package, this name will be registered for you. It will determine how
# users can install this project, e.g.:
#
# $ pip install sampleproject
#
# And where it will live on PyPI: https://pypi.org/project/sampleproject/
#
# There are some restrictions on what makes a valid project name
# specification here:
# https://packaging.python.org/specifications/core-metadata/#name
name='shipitchile', # Required
# Versions should comply with PEP 440:
# https://www.python.org/dev/peps/pep-0440/
#
# For a discussion on single-sourcing the version across setup.py and the
# project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='1', # Required
# This is a one-line description or tagline of what your project does. This
# corresponds to the "Summary" metadata field:
# https://packaging.python.org/specifications/core-metadata/#summary
description='Library that allows integration with the Shipit API. http://shipit.cl/', # Required
# This is an optional longer description of your project that represents
# the body of text which users will see when they visit PyPI.
#
# Often, this is the same as your README, so you can just read it in from
# that file directly (as we have already done above)
#
# This field corresponds to the "Description" metadata field:
# https://packaging.python.org/specifications/core-metadata/#description-optional
long_description=long_description, # Optional
# This should be a valid link to your project's main homepage.
#
# This field corresponds to the "Home-Page" metadata field:
# https://packaging.python.org/specifications/core-metadata/#home-page-optional
url='https://github.com/jeffersonlizar/shipitchile', # Optional
# This should be your name or the name of the organization which owns the
# project.
author='Jefferson Lizarzabal', # Optional
# This should be a valid email address corresponding to the author listed
# above.
author_email='dvjefferson@gmail.com', # Optional
# Classifiers help users find your project by categorizing it.
#
# For a list of valid classifiers, see
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[ # Optional
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 5 - Production/Stable',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
# Pick your license as you wish
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
# 'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
# This field adds keywords for your project which will appear on the
# project page. What does your project relate to?
#
# Note that this is a string of words separated by whitespace, not a list.
keywords='shipit development chile api consumer client courier', # Optional
# You can just specify package directories manually here if your project is
# simple. Or you can use find_packages().
#
# Alternatively, if you just want to distribute a single Python file, use
# the `py_modules` argument instead as follows, which will expect a file
# called `my_module.py` to exist:
#
# py_modules=["my_module"],
#
packages=find_packages(exclude=['contrib', 'docs', 'tests']), # Required
# This field lists other packages that your project depends on to run.
# Any package you put here will be installed by pip when your project is
# installed, so they must be valid existing projects.
#
# For an analysis of "install_requires" vs pip's requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['requests'], # Optional
# List additional groups of dependencies here (e.g. development
# dependencies). Users will be able to install these using the "extras"
# syntax, for example:
#
# $ pip install sampleproject[dev]
#
# Similar to `install_requires` above, these must be valid existing
# projects.
extras_require={ # Optional
'dev': ['check-manifest'],
'test': ['coverage', 'pytest'],
},
# If there are data files included in your packages that need to be
# installed, specify them here.
#
# If using Python 2.6 or earlier, then these have to be included in
# MANIFEST.in as well.
# package_data={ # Optional
# 'sample': ['package_data.dat'],
# },
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files
#
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('my_data', ['data/data_file'])], # Optional
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# `pip` to create the appropriate form of executable for the target
# platform.
#
# For example, the following would provide a command called `sample` which
# executes the function `main` from this package when invoked:
# entry_points={ # Optional
# 'console_scripts': [
# 'sample=sample:main',
# ],
# },
# List additional URLs that are relevant to your project as a dict.
#
# This field corresponds to the "Project-URL" metadata fields:
# https://packaging.python.org/specifications/core-metadata/#project-url-multiple-use
#
# Examples listed include a pattern for specifying where the package tracks
# issues, where the source is hosted, where to say thanks to the package
# maintainers, and where to support the project financially. The key is
# what's used to render the link text on PyPI.
project_urls={ # Optional
'Bug Reports': 'https://github.com/jeffersonlizar/shipitchile/issues',
'Funding': 'https://donate.pypi.org',
'Say Thanks!': 'https://saythanks.io/to/jeffersonlizar',
'Source': 'https://github.com/jeffersonlizar/shipitchile',
},
)
| 40.648649 | 101 | 0.681383 |
4a0a1a3abc058fd9e02c03c20b12a563365bdcf2 | 7,493 | py | Python | pyparsehtml/src/parse_doc_string.py | pdoms/PyParseHtml | 513ad30cdfb77eea815b66b1ad91c1c96f3dff81 | [
"MIT"
] | null | null | null | pyparsehtml/src/parse_doc_string.py | pdoms/PyParseHtml | 513ad30cdfb77eea815b66b1ad91c1c96f3dff81 | [
"MIT"
] | null | null | null | pyparsehtml/src/parse_doc_string.py | pdoms/PyParseHtml | 513ad30cdfb77eea815b66b1ad91c1c96f3dff81 | [
"MIT"
] | null | null | null | import re
import copy
from .element import Element
from .utils import isSelfCloser, mergeDict, representElementAsString, seqIdtoDict, getTagBySeqId
from .html_data import global_attributes, css_properties, html_tags_incl_attributes, html_tags_stripped
def addGlobalAttributes():
attributes = {}
for g in global_attributes:
if g == 'style':
attributes[g] = {}
for prop in css_properties:
attributes[g][prop] = ""
else:
attributes[g] = ""
return attributes
def addSpecificAttributes(meta_tag):
attributes = {}
for a in html_tags_incl_attributes[meta_tag['as_tag_identifier']]:
attributes[a] = ""
return attributes
def sortTags(tags):
return sorted(tags, key = lambda i: i['start_idx'])
def getInnerContents(tags_up, input):
for t in tags_up:
if t['tag_role'] == 'open_close' or t['tag_role'] == 'open_close_alt':
continue
else:
t['innerHTML'] = input[t['end_idx']+1:t['closer']['start_idx']]
t['outerHTML'] = input[t['start_idx']:t['closer']['end_idx']]
return tags_up
def hasClosingTags(collected):
result = False
for no, c in enumerate(collected):
if c['tag_role'] == 'close' and no != 1:
result = True
return result
def identifyTags(input):
collected_tags = []
for tag in html_tags_stripped:
as_open = re.findall(f'<{tag}(?=\s)', input)
as_close = re.findall(f'</{tag}', input)
##handle openers
current_idx = 0
for o in as_open:
meta_tag = {}
meta_tag['tag_type'] = tag
matcher = f"<{tag} />"
meta_tag['start_idx'] = input.index(o, current_idx)
meta_tag['end_idx'] = input.index('>', meta_tag['start_idx'])
meta_tag['with_attributes'] = input[meta_tag['start_idx']:meta_tag['end_idx'] +1]
if isSelfCloser(matcher):
meta_tag['tag_role'] = 'open_close'
meta_tag['as_tag_identifier'] = matcher
else:
meta_tag['as_tag_identifier'] = f"<{tag}>"
if meta_tag['end_idx'] > input.index('/', meta_tag['start_idx']):
meta_tag['tag_role'] = 'open_close_alt'
else:
meta_tag['tag_role'] = 'open'
specific = addSpecificAttributes(meta_tag)
globals = addGlobalAttributes()
meta_tag['allowed_attributes'] = mergeDict([globals, specific])
meta_tag['rest_string'] = input[meta_tag['end_idx'] + 1:]
current_idx = meta_tag['end_idx']
collected_tags.append(meta_tag)
##handle closers
current_idx = 0
for c in as_close:
meta_tag = {}
meta_tag['tag_type'] = tag
meta_tag['tag_role'] = 'close'
meta_tag['as_tag_identifier'] = f"{o}>"
meta_tag['start_idx'] = input.index(c, current_idx)
meta_tag['end_idx'] = input.index('>', meta_tag['start_idx'])
meta_tag['with_attributes'] = ""
meta_tag['rest_string'] = input[meta_tag['end_idx'] + 1:]
collected_tags.append(meta_tag)
current_idx = meta_tag['end_idx'] +1
return collected_tags
def parseStyleString(styles_, tag_styles):
for val in styles_.split(";"):
if (val == ""):
continue
else:
idx = val.index(":")
kee = val[:idx].strip()
value = val[idx+1:].strip()
tag_styles[kee] = value
return tag_styles
def parseAttributes(tags):
for tag in tags:
#loop through the attribute keys
for kee in tag['allowed_attributes'].keys():
tag_with = tag['with_attributes']
if f"{kee}=" not in tag_with:
continue
else:
idx = tag_with.index(f"{kee}=")
idx_equ = tag_with.index("=", idx)
quot_type = tag_with[idx_equ + 1]
idx_end = tag_with.index(quot_type, idx_equ + 2)
if kee == 'style':
tag['allowed_attributes'][kee] = parseStyleString(tag_with[idx_equ+2:idx_end], tag['allowed_attributes'][kee])
else:
tag['allowed_attributes'][kee] = tag_with[idx_equ+2:idx_end]
return tags
def createSequence(sorted_tags):
sequence = []
for i, t in enumerate(sorted_tags):
t['seq_id'] = f"{str(i)}-$$_{t['tag_type']}"
sequence.append(t['seq_id'])
return (sequence, sorted_tags)
def matchTokens(tags_collected):
tags = sortTags(tags_collected)
(seq, tags) = createSequence(tags)
updated_tags = []
to_remove = []
no_of_open = 0
for t in tags:
if t['tag_role'] == 'open':
no_of_open += 1
if t['tag_role'] == 'open_close':
s = t['seq_id']
t['seq_id'] = s.replace('$$', "3")
s_idx = seq.index(s)
seq[s_idx] = t['seq_id']
updated_tags.append(t)
to_remove.append(t)
if t['tag_role'] == 'open_close_alt':
s = t['seq_id']
t['seq_id'] = s.replace('$$', "3")
s_idx = seq.index(s)
seq[s_idx] = t['seq_id']
updated_tags.append(t)
to_remove.append(t)
for item in to_remove:
tags.remove(item)
#count open tags?
current_length = len(tags)
while no_of_open > 0:
for i in reversed(range(0, current_length)):
open = {}
close = {}
if tags[i]['tag_role'] == 'open':
open = tags[i]
open_s = tags[i]['seq_id']
open['seq_id'] = open['seq_id'].replace('$$', "1")
seq[seq.index(open_s)] = open['seq_id']
open_seq = seqIdtoDict(open['seq_id'])
for f in range(i, len(tags)):
if tags[f]['tag_role'] == 'close':
close = tags[f]
close_s = tags[f]['seq_id']
close['seq_id'] = f"{open_seq['seq_unique']}-2_{open_seq['seq_tag_type']}"
seq[seq.index(close_s)] = close['seq_id']
break
# wrong - needs to be a copy of the unfinished seq ID
open['closer'] = close
updated_tags.append(open)
tags.remove(open)
tags.remove(close)
break
current_length = len(tags)
no_of_open -= 1
return (seq, updated_tags)
# lifts style, id, class attributes to top level
def liftAttributes(tags):
rel_attr = ['id', 'style', 'class']
for tag in tags:
for att in rel_attr:
tag[att] = tag['allowed_attributes'][att]
tag['allowed_attributes'].pop(att)
return tags
def getText(seq_id, next_tag, tags):
element = getTagBySeqId(tags, seq_id['seq_id'])
text_after = element['rest_string']
idx = -1
next = next_tag['seq_tag_type']
if next_tag['seq_tag_role'] == '2':
idx = text_after.find(f'</{next}')
else:
idx = text_after.find(f'<{next}')
if idx == -1:
return ''
else:
return '$_text_$_' + text_after[0:idx]
def handleTexts(sqs, tgs):
items = []
for s in range(0, len(sqs) - 1):
item = {}
seq_current = seqIdtoDict(sqs[s])
seq_next = seqIdtoDict(sqs[s+1])
item['after'] = sqs[s]
item['text'] = getText(seq_current, seq_next, tgs)
items.append(item)
for i in items:
if i['text'] != '$_text_$':
idx = sqs.index(i['after'])
sqs.insert(idx+1, i['text'])
return sqs
#find a way to represent dom as dictionary with levels of nesting (irrelevant of text, just to have it ready)
#e.g:
#body: {
# div: {
# text: ...
# p: {},
# p: {},
# p: {
# img: {}
# }
# }
# div: {}
# }
#
#
#
#
def mapHTMLString(input):
tags = identifyTags(input)
(seq, tags) = matchTokens(tags)
tags = getInnerContents(tags, input)
tags = parseAttributes(tags)
tags = liftAttributes(tags)
seq = handleTexts(seq, tags)
tags_asClass = []
for e in tags:
element = Element(e)
tags_asClass.append(element)
return (seq, tags_asClass) | 28.599237 | 120 | 0.605899 |
d17590fdc8ad7f8cef5662a782cf9a9b26912061 | 2,099 | py | Python | configs/_base_/datasets/pascal_voc12.py | LinB203/remotesense | 37ecce4b7971a1d83df0fd2c8fe033f2bee613f0 | [
"Apache-2.0"
] | null | null | null | configs/_base_/datasets/pascal_voc12.py | LinB203/remotesense | 37ecce4b7971a1d83df0fd2c8fe033f2bee613f0 | [
"Apache-2.0"
] | null | null | null | configs/_base_/datasets/pascal_voc12.py | LinB203/remotesense | 37ecce4b7971a1d83df0fd2c8fe033f2bee613f0 | [
"Apache-2.0"
] | null | null | null | # dataset settings
# dataset_type = 'PascalVOCDataset'
dataset_type = 'RometeSenseDataset'
data_root = 'data/VOCdevkit/VOC2012'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
# img_norm_cfg = dict(
# mean=[76.26166752, 87.26432839, 73.30952511], std=[49.25764543, 43.61505594, 44.20263873], to_rgb=True)
crop_size = (512, 512)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations'),
dict(type='Resize', img_scale=(512, 512), ratio_range=(1.0, 1.0)),
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(512, 512),
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=4,
workers_per_gpu=2,
train=dict(
type=dataset_type,
data_root=data_root,
img_dir='JPEGImages',
ann_dir='SegmentationClass',
split='ImageSets/Segmentation/train.txt',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
data_root=data_root,
img_dir='JPEGImages',
ann_dir='SegmentationClass',
split='ImageSets/Segmentation/val.txt',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
data_root=data_root,
img_dir='JPEGImages',
ann_dir='SegmentationClass',
split='ImageSets/Segmentation/val.txt',
pipeline=test_pipeline))
| 34.409836 | 109 | 0.629347 |
7823aa06c2880a71ab442d554f2a131e5e3e4c49 | 11,796 | py | Python | scripts/wgan.py | manas-avi/detection-2016-nipsws | b25669dbf1c5d3d1a79638f928c989aca1c32622 | [
"MIT"
] | null | null | null | scripts/wgan.py | manas-avi/detection-2016-nipsws | b25669dbf1c5d3d1a79638f928c989aca1c32622 | [
"MIT"
] | null | null | null | scripts/wgan.py | manas-avi/detection-2016-nipsws | b25669dbf1c5d3d1a79638f928c989aca1c32622 | [
"MIT"
] | 2 | 2018-12-02T08:39:24.000Z | 2018-12-08T15:55:54.000Z | from __future__ import print_function, division
from keras.layers import Input, Dense, Reshape, Flatten, Dropout
from keras.layers import BatchNormalization, Activation, ZeroPadding2D
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.merge import _Merge
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.models import Sequential, Model
from keras.optimizers import Adam, SGD
import matplotlib.pyplot as plt
plt.switch_backend('agg')
import sys, os
import numpy as np
import pdb
from keras.callbacks import TensorBoard
import tensorflow as tf
import keras.backend as K
import argparse
from functools import partial
class RandomWeightedAverage(_Merge):
"""Provides a (random) weighted average between real and generated image samples"""
def _merge_function(self, inputs):
alpha = K.random_uniform((32, 1, 1, 1))
return (alpha * inputs[0]) + ((1 - alpha) * inputs[1])
class WGAN():
def __init__( self, dataset_name ):
# Input shape
self.img_rows = 128
self.img_cols = 128
self.channels = 1
self.img_shape = ( self.img_rows, self.img_cols, self.channels )
self.latent_dim = 100
# Load dataset
self.DIR = '../data/quickdraw/'
self.dataset_name = dataset_name
train_path = os.path.join( self.DIR, dataset_name, 'object.npy' )
self.train_data = np.load( train_path )
g_optimizer = Adam( 0.0002, 0.5 )
c_optimizer = SGD( 0.0002, 0.5 )
self.n_critic = 5
# Build the generator
self.generator = self.build_generator()
self.critic = self.build_discriminator()
#-------------------------------
# Construct Computational Graph
# for the Critic
#-------------------------------
# Freeze generator's layers while training critic
self.generator.trainable = False
# Image input (real sample)
real_img = Input(shape=self.img_shape)
# Noise input
z_disc = Input(shape=(self.latent_dim,))
# Generate image based of noise (fake sample)
fake_img = self.generator(z_disc)
# Construct weighted average between real and fake images
interpolated_img = RandomWeightedAverage()([real_img, fake_img])
# Determine validity of weighted sample
validity_interpolated = self.critic(interpolated_img)
# Discriminator determines validity of the real and fake images
fake = self.critic(fake_img)
valid = self.critic(real_img)
# Use Python partial to provide loss function with additional
# 'averaged_samples' argument
partial_gp_loss = partial(self.gradient_penalty_loss,
averaged_samples=interpolated_img)
partial_gp_loss.__name__ = 'gradient_penalty' # Keras requires function names
self.critic_model = Model(inputs=[real_img, z_disc],
outputs=[valid, fake, validity_interpolated])
self.critic_model.compile(loss=[self.wasserstein_loss,
self.wasserstein_loss,
partial_gp_loss],
optimizer=c_optimizer,
loss_weights=[1, 1, 10])
#-------------------------------
# Construct Computational Graph
# for Generator
#-------------------------------
# For the generator we freeze the critic's layers
self.critic.trainable = False
self.generator.trainable = True
# Sampled noise for input to generator
z_gen = Input(shape=(100,))
# Generate images based of noise
img = self.generator(z_gen)
# Discriminator determines validity
valid = self.critic(img)
# Defines generator model
self.generator_model = Model(z_gen, valid)
self.generator_model.compile(loss=self.wasserstein_loss, optimizer=g_optimizer)
def gradient_penalty_loss(self, y_true, y_pred, averaged_samples):
"""
Computes gradient penalty based on prediction and weighted real / fake samples
"""
gradients = K.gradients(y_pred, averaged_samples)[0]
# compute the euclidean norm by squaring ...
gradients_sqr = K.square(gradients)
# ... summing over the rows ...
gradients_sqr_sum = K.sum(gradients_sqr,
axis=np.arange(1, len(gradients_sqr.shape)))
# ... and sqrt
gradient_l2_norm = K.sqrt(gradients_sqr_sum)
# compute lambda * (1 - ||grad||)^2 still for each single sample
gradient_penalty = K.square(1 - gradient_l2_norm)
# return the mean as loss over all the batch samples
return K.mean(gradient_penalty)
def wasserstein_loss(self, y_true, y_pred):
return K.mean(y_true * y_pred)
def build_generator( self ):
model = Sequential()
model.add( Dense( 128 * 16 * 16,
activation='relu',
input_dim=self.latent_dim ) )
model.add( Reshape( ( 16, 16, 128 ) ) )
model.add( UpSampling2D() )
model.add( Conv2D( 128, kernel_size=3, padding='same' ) )
model.add( BatchNormalization( momentum=0.8 ) )
model.add( Activation( 'relu' ) )
model.add( Conv2D( 128, kernel_size=3, padding='same' ) )
model.add( BatchNormalization( momentum=0.8 ) )
model.add( Activation( 'relu' ) )
model.add( UpSampling2D() )
model.add( Conv2D( 64, kernel_size=3, padding='same' ) )
model.add( BatchNormalization( momentum=0.8 ) )
model.add( Activation( 'relu' ) )
model.add( Conv2D( 64, kernel_size=3, padding='same' ) )
model.add( BatchNormalization( momentum=0.8 ) )
model.add( Activation( 'relu' ) )
if self.img_rows == 128:
model.add( UpSampling2D() )
model.add( Conv2D( 64, kernel_size=3, padding='same' ) )
model.add( BatchNormalization( momentum=0.8 ) )
model.add( Activation( 'relu' ) )
model.add( Conv2D( self.channels, kernel_size=3, padding='same' ) )
model.add( Activation( 'tanh' ) )
model.summary()
noise = Input( shape=( self.latent_dim, ) )
img = model( noise )
return Model( noise, img )
def build_discriminator( self ):
model = Sequential()
model.add( Conv2D( 32,
kernel_size=3,
strides=2,
input_shape=self.img_shape,
padding='same' ) )
model.add( LeakyReLU( alpha=0.2 ) )
model.add( Dropout( 0.25 ) )
model.add( Conv2D( 64, kernel_size=3, strides=2, padding='same' ) )
model.add( ZeroPadding2D( padding=( ( 0, 1 ), ( 0,1 ) ) ) )
model.add( BatchNormalization( momentum=0.8 ) )
model.add( LeakyReLU( alpha=0.2 ) )
model.add( Dropout( 0.25 ) )
model.add( Conv2D( 128, kernel_size=3, strides=2, padding='same' ) )
model.add( BatchNormalization( momentum=0.8 ) )
model.add( LeakyReLU( alpha=0.2 ) )
model.add( Dropout( 0.25 ) )
model.add( Conv2D( 256, kernel_size=3, strides=1, padding='same' ) )
model.add( BatchNormalization( momentum=0.8 ) )
model.add( LeakyReLU( alpha=0.2 ) )
model.add( Dropout( 0.25 ) )
model.add( Flatten() )
model.add( Dense( 1 ) )
model.summary()
img = Input( shape=self.img_shape )
validity = model( img )
return Model( img, validity )
def write_log(self, callback, names, logs, batch_no ):
for name, value in zip( names, logs ):
summary = tf.Summary()
summary_value = summary.value.add()
summary_value.simple_value = value
summary_value.tag = name
callback.writer.add_summary( summary, batch_no )
callback.writer.flush()
def train( self,
epochs=11,
batch_size=32,
sample_interval=10,
save_interval=10,
enable_plot=False ):
if enable_plot:
log_path = self.DIR + self.dataset_name + '/graphs/wgan'
callback = TensorBoard( log_path )
callback.set_model( self.generator_model )
train_names = [ 'D_loss','G_loss', ]
# Adversarial ground truths
valid = -np.ones( ( batch_size, 1 ) )
fake = np.ones( ( batch_size, 1 ) )
dummy = np.zeros( ( batch_size, 1 ) )
for epoch in range( epochs ):
for _ in range(self.n_critic):
# ---------------------
# Train Discriminator
# ---------------------
# Select a random half of images
idx = np.random.randint( 0, self.train_data.shape[ 0 ], batch_size )
imgs = self.train_data[ idx ]
# Sample noise and generate a batch of new images
noise = np.random.normal( 0, 1, ( batch_size, self.latent_dim ) )
# Train the critic
d_loss = self.critic_model.train_on_batch([imgs, noise],
[valid, fake, dummy])
# ---------------------
# Train Generator
# ---------------------
g_loss = self.generator_model.train_on_batch( noise, valid )
# Plot the progress
if enable_plot:
self.write_log( callback,
train_names,
np.asarray( [ d_loss[ 0 ],
g_loss ] ),
epoch )
print ( '%d [D loss: %f] [G loss: %f]' % \
( epoch, d_loss[ 0 ], g_loss ) )
# If at save interval => save generated image samples
if epoch % sample_interval == 0:
self.sample_imgs( epoch )
if epoch % save_interval == 0:
save_dir = os.path.join( self.DIR,
self.dataset_name,
'wgan_saved_weights',
'background' )
os.makedirs( save_dir, exist_ok=True )
save_name = os.path.join( save_dir, 'g_' + str( epoch ) + '.hdf5' )
self.generator.save_weights( save_name )
def sample_imgs( self, epoch ):
r, c = 5, 5
noise = np.random.normal( 0, 1, ( r * c, self.latent_dim ) )
gen_imgs = self.generator.predict( noise )
# Rescale images 0 - 1
gen_imgs = 0.5 * gen_imgs + 0.5
fig, axs = plt.subplots( r, c )
cnt = 0
for i in range( r ):
for j in range( c ):
axs[ i, j ].imshow( gen_imgs[ cnt, : , : , 0 ], cmap='gray' )
axs[ i, j ].axis( 'off' )
cnt += 1
sample_dir = os.path.join( self.DIR,
self.dataset_name,
'wgan-output',
'background' )
os.makedirs( sample_dir, exist_ok=True )
fig.savefig( os.path.join( sample_dir, str( epoch ) + '.png' ) )
plt.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser( description='Train the background generator' )
parser.add_argument( 'dataset_name', help='dataset name' )
args = parser.parse_args()
wgan = WGAN( args.dataset_name )
wgan.train(enable_plot=True)
| 39.32 | 87 | 0.546965 |
1fc81c6ac7e9d32ebc9d118220a58c60c15a9e96 | 7,781 | py | Python | tclCommands/TclCommandAlignDrill.py | DannyPol/flatcam | 25a8634d0658e98b7fae31a095f8bef40c1b3067 | [
"MIT"
] | 1 | 2022-02-11T06:19:34.000Z | 2022-02-11T06:19:34.000Z | tclCommands/TclCommandAlignDrill.py | MRemy2/FlatCam | d4f941335ca8a8d5351aab23b396f99da06a9029 | [
"MIT"
] | null | null | null | tclCommands/TclCommandAlignDrill.py | MRemy2/FlatCam | d4f941335ca8a8d5351aab23b396f99da06a9029 | [
"MIT"
] | null | null | null | import collections
from tclCommands.TclCommand import TclCommandSignaled
from shapely.geometry import Point
import shapely.affinity as affinity
class TclCommandAlignDrill(TclCommandSignaled):
"""
Tcl shell command to create excellon with drills for aligment.
"""
# array of all command aliases, to be able use old names for
# backward compatibility (add_poly, add_polygon)
aliases = ['aligndrill']
description = '%s %s' % ("--", "Create an Excellon object with drills for alignment.")
# Dictionary of types from Tcl command, needs to be ordered.
# For positional arguments
arg_names = collections.OrderedDict([
('name', str)
])
# Dictionary of types from Tcl command, needs to be ordered.
# For options like -optionname value
option_types = collections.OrderedDict([
('box', str),
('axis', str),
('holes', str),
('grid', float),
('minoffset', float),
('gridoffset', float),
('axisoffset', float),
('dia', float),
('dist', float),
('outname', str),
])
# array of mandatory options for current Tcl command: required = {'name','outname'}
required = ['name', 'axis']
# structured help for current command, args needs to be ordered
help = {
'main': "Create an Excellon object with drills for alignment.",
'args': collections.OrderedDict([
('name', 'Name of the object (Gerber or Excellon) to mirror.'),
('dia', 'Tool diameter'),
('box', 'Name of object which act as box (cutout for example.)'),
('holes', 'Tuple of tuples where each tuple it is a set of x, y coordinates. '
'E.g: (x0, y0), (x1, y1), ... '),
('grid', 'Aligning to grid, for those, who have aligning pins'
'inside table in grid (-5,0),(5,0),(15,0)...'),
('gridoffset', 'offset of grid from 0 position.'),
('minoffset', 'min and max distance between align hole and pcb.'),
('axisoffset', 'Offset on second axis before aligment holes'),
('axis', 'Mirror axis parallel to the X or Y axis.'),
('dist', 'Distance of the mirror axis to the X or Y axis.'),
('outname', 'Name of the resulting Excellon object.'),
]),
'examples': ['aligndrill my_object -axis X -box my_object -dia 3.125 -grid 1 '
'-gridoffset 0 -minoffset 2 -axisoffset 2']
}
def execute(self, args, unnamed_args):
"""
execute current TCL shell command
:param args: array of known named arguments and options
:param unnamed_args: array of other values which were passed into command
without -somename and we do not have them in known arg_names
:return: None or exception
"""
name = args['name']
if 'outname' in args:
outname = args['outname']
else:
outname = name + "_aligndrill"
# Get source object.
try:
obj = self.app.collection.get_by_name(str(name))
except Exception:
return "Could not retrieve object: %s" % name
if obj is None:
return "Object not found: %s" % name
if obj.kind != "geometry" and obj.kind != 'gerber' and obj.kind != 'excellon':
return "ERROR: Only Gerber, Geometry and Excellon objects can be used."
# Axis
try:
axis = args['axis'].upper()
except KeyError:
return "ERROR: Specify -axis X or -axis Y"
if not ('holes' in args or ('grid' in args and 'gridoffset' in args)):
return "ERROR: Specify -holes or -grid with -gridoffset "
if 'holes' in args:
try:
holes = eval("[" + args['holes'] + "]")
except KeyError:
return "ERROR: Wrong -holes format (X1,Y1),(X2,Y2)"
xscale, yscale = {"X": (1.0, -1.0), "Y": (-1.0, 1.0)}[axis]
tooldia = args['dia']
# Tools
# tools = {"1": {"C": args['dia']}}
def alligndrill_init_me(init_obj, app_obj):
"""
This function is used to initialize the new
object once it's created.
:param init_obj: The new object.
:param app_obj: The application (FlatCAMApp)
:return: None
"""
drills = []
if 'holes' in args:
for hole in holes:
point = Point(hole)
point_mirror = affinity.scale(point, xscale, yscale, origin=(px, py))
drills.append(point)
drills.append(point_mirror)
else:
if 'box' not in args:
return "ERROR: -grid can be used only for -box"
if 'axisoffset' in args:
axisoffset = args['axisoffset']
else:
axisoffset = 0
# This will align hole to given aligngridoffset and minimal offset from pcb, based on selected axis
if axis == "X":
firstpoint = args['gridoffset']
while (xmin - args['minoffset']) < firstpoint:
firstpoint = firstpoint - args['grid']
lastpoint = args['gridoffset']
while (xmax + args['minoffset']) > lastpoint:
lastpoint = lastpoint + args['grid']
localholes = (firstpoint, axisoffset), (lastpoint, axisoffset)
else:
firstpoint = args['gridoffset']
while (ymin - args['minoffset']) < firstpoint:
firstpoint = firstpoint - args['grid']
lastpoint = args['gridoffset']
while (ymax + args['minoffset']) > lastpoint:
lastpoint = lastpoint + args['grid']
localholes = (axisoffset, firstpoint), (axisoffset, lastpoint)
for hole in localholes:
point = Point(hole)
point_mirror = affinity.scale(point, xscale, yscale, origin=(px, py))
drills.append(point)
drills.append(point_mirror)
init_obj.tools = {
1: {
'tooldia': tooldia,
'drills': drills,
'solid_geometry': []
}
}
init_obj.create_geometry()
# Box
if 'box' in args:
try:
box = self.app.collection.get_by_name(args['box'])
except Exception:
return "Could not retrieve object box: %s" % args['box']
if box is None:
return "Object box not found: %s" % args['box']
try:
xmin, ymin, xmax, ymax = box.bounds()
px = 0.5 * (xmin + xmax)
py = 0.5 * (ymin + ymax)
obj.app.app_obj.new_object("excellon", outname, alligndrill_init_me, plot=False)
except Exception as e:
return "Operation failed: %s" % str(e)
else:
try:
dist = float(args['dist'])
except KeyError:
dist = 0.0
except ValueError:
return "Invalid distance: %s" % args['dist']
try:
px = dist
py = dist
obj.app.app_obj.new_object("excellon", outname, alligndrill_init_me, plot=False)
except Exception as e:
return "Operation failed: %s" % str(e)
return 'Ok. Align Drills Excellon object created'
| 35.208145 | 115 | 0.517928 |
19dc4c0de73f893d29d80deb7d29fa2e7be623e5 | 8,880 | py | Python | docs/conf.py | mongodb-labs/mongo-mockup-db | 317c4e049965f9d99423698a81e52d0ab37b7599 | [
"Apache-2.0"
] | 42 | 2015-09-12T18:56:51.000Z | 2021-08-16T17:57:40.000Z | docs/conf.py | mongodb-labs/mongo-mockup-db | 317c4e049965f9d99423698a81e52d0ab37b7599 | [
"Apache-2.0"
] | 31 | 2015-11-06T13:39:39.000Z | 2021-01-13T11:07:51.000Z | docs/conf.py | mongodb-labs/mongo-mockup-db | 317c4e049965f9d99423698a81e52d0ab37b7599 | [
"Apache-2.0"
] | 14 | 2015-11-22T11:24:51.000Z | 2020-09-08T05:14:11.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# mongo-mockup-db documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import mockupdb
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.coverage',
'sphinx.ext.todo',
'sphinx.ext.intersphinx',
]
intersphinx_mapping = {
'python': ('https://docs.python.org/3/', None),
'pymongo': ('http://api.mongodb.com/python/current/', None),
}
primary_domain = 'py'
default_role = 'py:obj'
doctest_global_setup = """
from collections import OrderedDict
from mockupdb import *
"""
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'MockupDB'
copyright = '2015, MongoDB, Inc.'
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = mockupdb.__version__
# The full version, including alpha/beta/rc tags.
release = mockupdb.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# Theme gratefully vendored from CPython source.
html_theme = "pydoctheme"
html_theme_path = ["."]
html_theme_options = {'collapsiblesidebar': True}
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'mockupdbdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'mockupdb.tex',
'MockupDB Documentation',
'A. Jesse Jiryu Davis', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'mockupdb',
'MockupDB Documentation',
['A. Jesse Jiryu Davis'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'mockupdb',
'MockupDB Documentation',
'A. Jesse Jiryu Davis',
'mockupdb',
('Mock server for testing MongoDB clients and creating MongoDB Wire Protocol'
' servers.'),
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| 29.89899 | 82 | 0.712387 |
c229a87a87d919366746c5613025bdd29eadb26d | 2,698 | py | Python | src/models/community.py | Carbulator/carbulator-server | 7211fcb4c497e5a0bfb0be44e34f39ec94c3c56e | [
"MIT"
] | 2 | 2018-12-14T17:21:58.000Z | 2020-07-22T11:39:28.000Z | src/models/community.py | Carbulator/carbulator-server | 7211fcb4c497e5a0bfb0be44e34f39ec94c3c56e | [
"MIT"
] | 12 | 2018-09-27T07:27:55.000Z | 2018-12-03T17:02:56.000Z | src/models/community.py | Carbulator/carbulator-server | 7211fcb4c497e5a0bfb0be44e34f39ec94c3c56e | [
"MIT"
] | null | null | null | import datetime
from flask_restful import fields
from src.app import db
from src.exceptions.no_data import NoData
from src.models.car import CarModel
from src.models.user import UserModel
class CommunityModel(db.Model):
__tablename__ = 'communities'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.String(120), nullable=False)
time_created = db.Column(db.DateTime(), default=datetime.datetime.utcnow)
time_updated = db.Column(db.DateTime(), onupdate=datetime.datetime.utcnow)
users = db.relationship('UserModel', secondary='community_user_link',
secondaryjoin='and_(CommunityUserLinkModel.user_id == UserModel.id, '
'CommunityUserLinkModel.invitation_accepted == True)')
car_id = db.Column(db.Integer, db.ForeignKey('cars.id'), unique=True)
car = db.relationship("CarModel", backref=db.backref("community", uselist=False))
is_favourite = None
def persist(self):
db.session.add(self)
db.session.commit()
@staticmethod
def get_marshaller():
return {
'id': fields.Integer,
'name': fields.String,
'time_created': fields.DateTime,
'time_updated': fields.DateTime,
'users': fields.List(fields.Nested(UserModel.get_marshaller())),
'car': fields.Nested(CarModel.get_marshaller())
}
@staticmethod
def get_detailed_marshaller():
return {
'id': fields.Integer,
'name': fields.String,
'time_created': fields.DateTime,
'time_updated': fields.DateTime,
'users': fields.List(fields.Nested(UserModel.get_marshaller())),
'car': fields.Nested(CarModel.get_marshaller()),
'is_deletable': fields.Boolean,
'is_editable': fields.Boolean
}
@staticmethod
def add_is_fav_to_marshaller(marshaller):
marshaller['is_favourite'] = fields.Boolean
return marshaller
@classmethod
def find_by_car_id(cls, id):
return cls.query.filter_by(car_id=id).first()
@classmethod
def find_by_id(cls, id):
return cls.query.filter_by(id=id).first()
@classmethod
def return_all(cls):
return CommunityModel.query.all()
@classmethod
def delete_all(cls):
db.session.query(cls).delete()
db.session.commit()
@classmethod
def delete_by_id(cls, id):
community = db.session.query(cls).filter(cls.id == id).first()
if community:
db.session.delete(community)
db.session.commit()
else:
raise NoData
| 32.506024 | 97 | 0.63195 |
4694c61e6b563e60e7304fdbe96dc311c45a0151 | 1,317 | py | Python | project/speech_recognition/speech_demo.py | shanaka-desoysa/tensorflow | 0effc668f42b64bd0712240ab2f5e8a8be42960f | [
"Apache-2.0"
] | null | null | null | project/speech_recognition/speech_demo.py | shanaka-desoysa/tensorflow | 0effc668f42b64bd0712240ab2f5e8a8be42960f | [
"Apache-2.0"
] | null | null | null | project/speech_recognition/speech_demo.py | shanaka-desoysa/tensorflow | 0effc668f42b64bd0712240ab2f5e8a8be42960f | [
"Apache-2.0"
] | null | null | null | ## https://github.com/llSourcell/tensorflow_speech_recognition_demo/blob/master/demo.py
## https://www.youtube.com/watch?v=u9FPqkuoEJ8
from __future__ import division, print_function, absolute_import
import tflearn
import speech_data
import tensorflow as tf
learning_rate = 0.0001
training_iters = 300000 # steps
batch_size = 64
width = 20 # mfcc features
height = 80 # (max) length of utterance
classes = 10 # digits
batch = word_batch = speech_data.mfcc_batch_generator(batch_size)
X, Y = next(batch)
trainX, trainY = X, Y
testX, testY = X, Y #overfit for now
# Network building
net = tflearn.input_data([None, width, height])
net = tflearn.lstm(net, 128, dropout=0.8)
net = tflearn.fully_connected(net, classes, activation='softmax')
net = tflearn.regression(net, optimizer='adam', learning_rate=learning_rate, loss='categorical_crossentropy')
# Training
### add this "fix" for tensorflow version errors
col = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
for x in col:
tf.add_to_collection(tf.GraphKeys.VARIABLES, x )
model = tflearn.DNN(net, tensorboard_verbose=0)
while 1: #training_iters
model.fit(trainX, trainY, n_epoch=10, validation_set=(testX, testY), show_metric=True,
batch_size=batch_size)
_y=model.predict(X)
model.save("tflearn.lstm.model")
print (_y)
print (y)
| 29.931818 | 109 | 0.760061 |
45daa144acaf092b610a7d7122c0f044f1986f53 | 1,309 | py | Python | examples/v2/utility/utils.py | seanli9jan/tensorflow-examples | c0921149b9d88e5836e4eaa5d3024f579ced1029 | [
"MIT"
] | null | null | null | examples/v2/utility/utils.py | seanli9jan/tensorflow-examples | c0921149b9d88e5836e4eaa5d3024f579ced1029 | [
"MIT"
] | null | null | null | examples/v2/utility/utils.py | seanli9jan/tensorflow-examples | c0921149b9d88e5836e4eaa5d3024f579ced1029 | [
"MIT"
] | null | null | null | import tensorflow as tf
import os
def tf_disable_logging(interactive="DEBUG"):
level = {"DEBUG":'0', "INFO":'1', "WARNING":'2', "ERROR":'3'}
os.environ['TF_CPP_MIN_LOG_LEVEL'] = level[interactive]
def tf_limit_gpu():
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
# Currently, memory growth needs to be the same across GPUs
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
except RuntimeError as e:
# Memory growth must be set before GPUs have been initialized
print(e)
def tf_save_model(obj, export_dir):
tf.get_logger().setLevel("ERROR")
obj.save(export_dir)
tf.get_logger().setLevel("WARNING")
def tf_load_model(export_dir, custom_objects=None, compile=True):
return tf.keras.models.load_model(export_dir, custom_objects=custom_objects, compile=compile)
def tf_print_tensor(x, message='', map_fn=None):
def _func(x):
return map_fn(x) if map_fn else x
def _print(x):
tf.print(message, _func(x)) if message else tf.print(_func(x))
return x
return tf.keras.layers.Lambda(_print)(x)
| 34.447368 | 97 | 0.685256 |
ebafe3b2b5ea712f7fe5995452bc1d07577145b5 | 2,727 | py | Python | kmmi/heuristics/utils.py | Decitizen/kMMI | 921ef6e45fbec484251444886e246741d7f0120a | [
"MIT"
] | null | null | null | kmmi/heuristics/utils.py | Decitizen/kMMI | 921ef6e45fbec484251444886e246741d7f0120a | [
"MIT"
] | null | null | null | kmmi/heuristics/utils.py | Decitizen/kMMI | 921ef6e45fbec484251444886e246741d7f0120a | [
"MIT"
] | null | null | null | from time import process_time
from datetime import timedelta as td
import numpy as np
from numba import *
from kmmi.heuristics.initialize import *
def __to_len_classes(ss):
n_ss = {}
for i,s in enumerate(ss):
n_s = len(s)
if n_s not in n_ss:
n_ss[n_s] = []
n_ss[n_s].append(i)
return n_ss
@njit
def __svns_score(H_w, Ho_w, H, Ho, k):
return (H_w / Ho_w) + (k - (H & Ho).sum()) / k
@njit
def __update_degree_vecs(A, alpha, beta, xi, xj, inplace=False):
alpha_p = alpha if not inplace else alpha.copy()
beta_p = beta if not inplace else beta.copy()
for y in range(A.shape[0]):
if y != xj:
alpha_p[y] = alpha[y] - A[y,xi] + A[y,xj]
beta_p[y] = beta[y] + A[y,xi] - A[y,xj]
return alpha_p, beta_p
@njit
def __create_bvns_array(A):
"""Compute neighbor array for bvns such that ith row corresponds to node i and
indeces of nodes adjacent to i are the first elements in the row, while end of
the rows are padded with -1.
"""
n = A.shape[0]
Ap = np.zeros((n,n), dtype=np.int32) - 1
for i in range(n):
nz = np.where(A[i,:])[0]
n_nz = nz.shape[0]
Ap[i,:n_nz] = nz
Ap[i,n_nz:] = -1
return Ap
@njit
def __create_beam_array(A, A_as, w_thres):
"""Compute a beam array out of adjacency matrix A. In a beam array each row
i will contain the indexes of all connected nodes for node i in sorted order
based on the link weight."""
n = A.shape[0]
A_beam = np.zeros((n,n), dtype=np.int32) - 1
maxlens = np.zeros(n, dtype=np.int32) + n
for i in range(n):
j = 0
for k in A_as[i,:]:
if A[i,k] >= w_thres:
A_beam[i,j] = k
j+=1
else:
if j < maxlens[i]:
maxlens[i] = j
break
return A_beam[:,:maxlens.max()], maxlens.mean()
@njit
def __create_beam_array_constant_width(A, A_as, w_thres):
"""Compute a beam array out of adjacency matrix A. In a beam array each row
i will contain the indexes of all connected nodes for node i in sorted order
based on the link weight."""
#print('Beam width set')
n_beam = 6
n = A.shape[0]
A_beam = np.zeros((n,n_beam), dtype=np.int32) - 1
maxlen = n
for i in range(n):
for j in range(n_beam):
k = A_as[i,j]
if A[i,k] > 0.0:
A_beam[i,j] = k
j+=1
else:
if j < maxlen:
maxlen = j
break
if maxlen < n_beam:
A_beam = A_beam[:,:maxlen]
return A_beam
| 28.113402 | 83 | 0.541988 |
8e53f7c1634b55e4e4c004411d0b6b8bdfd33635 | 664 | py | Python | test/tests/test_package/import_target.py | jmgc/pyston | 9f672c1bbb75710ac17dd3d9107da05c8e9e8e8f | [
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null | test/tests/test_package/import_target.py | jmgc/pyston | 9f672c1bbb75710ac17dd3d9107da05c8e9e8e8f | [
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null | test/tests/test_package/import_target.py | jmgc/pyston | 9f672c1bbb75710ac17dd3d9107da05c8e9e8e8f | [
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null | print "running test_package.import_target"
# Since we are currently importing test_package.import_target, this
# import will succeed (return directly from sys.modules), even though
# test_package will not have the 'import_target' attribute yet
import test_package.import_target
try:
print test_package.import_target
assert 0
except AttributeError:
pass
try:
print getattr(test_package, 'import_target')
assert 0
except AttributeError:
pass
# You can do 'import test_package.import_target', but adding an asname will cause an exception:
try:
import test_package.import_target as i
assert 0
i
except AttributeError:
pass
| 23.714286 | 95 | 0.769578 |
a9414909f5010a8992ced493af7b010f5d0816ef | 157 | py | Python | ejercicio8/pregunta2.py | mariagarciau/introduccionAlgortimica2 | 49d11ec4bed07badf751fef9d39e4337b8213a27 | [
"Apache-2.0"
] | null | null | null | ejercicio8/pregunta2.py | mariagarciau/introduccionAlgortimica2 | 49d11ec4bed07badf751fef9d39e4337b8213a27 | [
"Apache-2.0"
] | null | null | null | ejercicio8/pregunta2.py | mariagarciau/introduccionAlgortimica2 | 49d11ec4bed07badf751fef9d39e4337b8213a27 | [
"Apache-2.0"
] | null | null | null | def intereses(capital,interes,meses):
for i in range (0,meses):
capital= capital+capital*interes/100
return print(capital)
intereses(100,5,3) | 31.4 | 44 | 0.707006 |
e1bcefbaa1ce97422b7ac2e9d28403ae4b55c0b3 | 3,483 | py | Python | maskprop/MiVOS/fbrs/model/metrics.py | qinliuliuqin/iSegFormer | 67b634588cc0a1e09fb3e092966eae997eb209fa | [
"MIT"
] | 14 | 2021-12-09T08:33:23.000Z | 2022-03-26T13:11:01.000Z | maskprop/MiVOS/fbrs/model/metrics.py | qinliuliuqin/iSegFormer | 67b634588cc0a1e09fb3e092966eae997eb209fa | [
"MIT"
] | null | null | null | maskprop/MiVOS/fbrs/model/metrics.py | qinliuliuqin/iSegFormer | 67b634588cc0a1e09fb3e092966eae997eb209fa | [
"MIT"
] | null | null | null | import torch
import numpy as np
from fbrs.utils import misc
class TrainMetric(object):
def __init__(self, pred_outputs, gt_outputs):
self.pred_outputs = pred_outputs
self.gt_outputs = gt_outputs
def update(self, *args, **kwargs):
raise NotImplementedError
def get_epoch_value(self):
raise NotImplementedError
def reset_epoch_stats(self):
raise NotImplementedError
def log_states(self, sw, tag_prefix, global_step):
pass
@property
def name(self):
return type(self).__name__
class AdaptiveIoU(TrainMetric):
def __init__(self, init_thresh=0.4, thresh_step=0.025, thresh_beta=0.99, iou_beta=0.9,
ignore_label=-1, from_logits=True,
pred_output='instances', gt_output='instances'):
super().__init__(pred_outputs=(pred_output,), gt_outputs=(gt_output,))
self._ignore_label = ignore_label
self._from_logits = from_logits
self._iou_thresh = init_thresh
self._thresh_step = thresh_step
self._thresh_beta = thresh_beta
self._iou_beta = iou_beta
self._ema_iou = 0.0
self._epoch_iou_sum = 0.0
self._epoch_batch_count = 0
def update(self, pred, gt):
gt_mask = gt > 0
if self._from_logits:
pred = torch.sigmoid(pred)
gt_mask_area = torch.sum(gt_mask, dim=(1, 2)).detach().cpu().numpy()
if np.all(gt_mask_area == 0):
return
ignore_mask = gt == self._ignore_label
max_iou = _compute_iou(pred > self._iou_thresh, gt_mask, ignore_mask).mean()
best_thresh = self._iou_thresh
for t in [best_thresh - self._thresh_step, best_thresh + self._thresh_step]:
temp_iou = _compute_iou(pred > t, gt_mask, ignore_mask).mean()
if temp_iou > max_iou:
max_iou = temp_iou
best_thresh = t
self._iou_thresh = self._thresh_beta * self._iou_thresh + (1 - self._thresh_beta) * best_thresh
self._ema_iou = self._iou_beta * self._ema_iou + (1 - self._iou_beta) * max_iou
self._epoch_iou_sum += max_iou
self._epoch_batch_count += 1
def get_epoch_value(self):
if self._epoch_batch_count > 0:
return self._epoch_iou_sum / self._epoch_batch_count
else:
return 0.0
def reset_epoch_stats(self):
self._epoch_iou_sum = 0.0
self._epoch_batch_count = 0
def log_states(self, sw, tag_prefix, global_step):
sw.add_scalar(tag=tag_prefix + '_ema_iou', value=self._ema_iou, global_step=global_step)
sw.add_scalar(tag=tag_prefix + '_iou_thresh', value=self._iou_thresh, global_step=global_step)
@property
def iou_thresh(self):
return self._iou_thresh
def _compute_iou(pred_mask, gt_mask, ignore_mask=None, keep_ignore=False):
if ignore_mask is not None:
pred_mask = torch.where(ignore_mask, torch.zeros_like(pred_mask), pred_mask)
reduction_dims = misc.get_dims_with_exclusion(gt_mask.dim(), 0)
union = torch.mean((pred_mask | gt_mask).float(), dim=reduction_dims).detach().cpu().numpy()
intersection = torch.mean((pred_mask & gt_mask).float(), dim=reduction_dims).detach().cpu().numpy()
nonzero = union > 0
iou = intersection[nonzero] / union[nonzero]
if not keep_ignore:
return iou
else:
result = np.full_like(intersection, -1)
result[nonzero] = iou
return result
| 34.147059 | 103 | 0.654895 |
bc681265cce1e23e3ce4fc3adfb2725ca4c9d16c | 271 | py | Python | openbox/runner/__init__.py | shlomos/obsi | b1fccb6cef6c28f39371954f7f98fefa22b4144a | [
"Apache-2.0"
] | null | null | null | openbox/runner/__init__.py | shlomos/obsi | b1fccb6cef6c28f39371954f7f98fefa22b4144a | [
"Apache-2.0"
] | null | null | null | openbox/runner/__init__.py | shlomos/obsi | b1fccb6cef6c28f39371954f7f98fefa22b4144a | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# Copyright (c) 2015 Pavel Lazar pavel.lazar (at) gmail.com
#
# The Software is provided WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED.
#####################################################################
"""
An Execution Engine Runner package
"""
| 20.846154 | 69 | 0.531365 |
f626779fc60dce20577e26dd7577f0b9088dbe30 | 508 | py | Python | build/jy901_driver/catkin_generated/pkg.installspace.context.pc.py | FProgrammerLIU/caster_man_ros | a75b503fad3a470f985072a2b3953e89074f3223 | [
"MIT"
] | null | null | null | build/jy901_driver/catkin_generated/pkg.installspace.context.pc.py | FProgrammerLIU/caster_man_ros | a75b503fad3a470f985072a2b3953e89074f3223 | [
"MIT"
] | null | null | null | build/jy901_driver/catkin_generated/pkg.installspace.context.pc.py | FProgrammerLIU/caster_man_ros | a75b503fad3a470f985072a2b3953e89074f3223 | [
"MIT"
] | null | null | null | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/caster/ros_ws/caster/install/include".split(';') if "/home/caster/ros_ws/caster/install/include" != "" else []
PROJECT_CATKIN_DEPENDS = "serial;roscpp;sensor_msgs;tf;message_runtime".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "jy901_driver"
PROJECT_SPACE_DIR = "/home/caster/ros_ws/caster/install"
PROJECT_VERSION = "1.1.0"
| 56.444444 | 151 | 0.75 |
7215e999b421702aec92ef64115f015ee50a6700 | 4,210 | py | Python | aae_architechture_3_layer.py | raktim-mondol/breast-cancer-sub-types | 98e8205c8f7edceeb1cfbe5ac4482fd8122aadd2 | [
"CC-BY-4.0"
] | 1 | 2021-08-01T11:49:05.000Z | 2021-08-01T11:49:05.000Z | aae_architechture_3_layer.py | raktim-mondol/breast-cancer-sub-types | 98e8205c8f7edceeb1cfbe5ac4482fd8122aadd2 | [
"CC-BY-4.0"
] | null | null | null | aae_architechture_3_layer.py | raktim-mondol/breast-cancer-sub-types | 98e8205c8f7edceeb1cfbe5ac4482fd8122aadd2 | [
"CC-BY-4.0"
] | null | null | null | seed=75
import os
import matplotlib as mpl
mpl.use('Agg')
import numpy as np
np.random.seed(seed)
from tensorflow import set_random_seed
set_random_seed(seed)
from keras.layers import Dense, Reshape, Flatten, Input, merge, Dropout, LeakyReLU, Activation
from keras.models import Sequential, Model, load_model
from keras.optimizers import Adam, SGD, Adagrad, RMSprop, Adadelta
from keras.regularizers import l1, l1_l2
from keras.datasets import mnist
import keras.backend as K
import pandas as pd
#from keras.utils import multi_gpu_model
from keras import backend as K
from keras_adversarial.image_grid_callback import ImageGridCallback
from keras_adversarial import AdversarialModel, fix_names, n_choice
from keras_adversarial import AdversarialOptimizerSimultaneous, normal_latent_sampling
def model_generator(latent_dim, input_shape, hidden_dim=1000):
return Sequential([
Dense(hidden_dim, name="generator_h1", input_dim=latent_dim, activation='tanh'),
Dense(hidden_dim, name="generator_h2", activation='tanh'),
Dense(input_shape[0], name="generator_output", activation='sigmoid')],
name="generator")
def model_encoder(latent_dim, input_shape, hidden_dim=1000):
x = Input(input_shape, name="x")
h = Dense(hidden_dim, name="encoder_h1", activation='tanh')(x)
h = Dense(hidden_dim, name="encoder_h2", activation='tanh')(h)
z = Dense(latent_dim, name="encoder_mu", activation='tanh')(h)
return Model(x, z, name="encoder")
def model_discriminator(input_shape, output_dim=1, hidden_dim=1000):
z = Input(input_shape)
h = z
h = Dense(hidden_dim, name="discriminator_h1", activation='tanh')(h)
h = Dense(hidden_dim, name="discriminator_h2", activation='tanh')(h)
y = Dense(output_dim, name="discriminator_y", activation="sigmoid")(h)
return Model(z, y)
def aae_model(path, adversarial_optimizer,xtrain,ytrain,xtest,ytest,encoded_dim=100,img_dim=25, nb_epoch=20):
# z \in R^100
latent_dim = encoded_dim
# x \in R^{28x28}
input_shape = (img_dim,)
# generator (z -> x)
generator = model_generator(latent_dim, input_shape)
# encoder (x ->z)
encoder = model_encoder(latent_dim, input_shape)
# autoencoder (x -> x')
autoencoder = Model(encoder.inputs, generator(encoder(encoder.inputs)), name="autoencoder")
# discriminator (z -> y)
discriminator = model_discriminator(input_shape)
# assemple AAE
x = encoder.inputs[0]
z = encoder(x)
xpred = generator(z)
yreal = discriminator(x)
yfake = discriminator(xpred)
aae = Model(x, fix_names([xpred, yfake, yreal], ["xpred", "yfake", "yreal"]))
# print summary of models
encoder.summary()
generator.summary()
discriminator.summary()
#autoencoder.summary()
# build adversarial model
generative_params = generator.trainable_weights + encoder.trainable_weights
model = AdversarialModel(base_model=aae,
player_params=[generative_params, discriminator.trainable_weights],
player_names=["generator", "discriminator"])
#parallel_model = multi_gpu_model(model, gpus=4)
model.adversarial_compile(adversarial_optimizer=adversarial_optimizer,
player_optimizers=[Adadelta(),Adadelta()],
loss={"yfake": "binary_crossentropy", "yreal": "binary_crossentropy",
"xpred": "binary_crossentropy"},
player_compile_kwargs=[{"loss_weights": {"yfake": 1e-4, "yreal": 1e-4, "xpred": 1e1}}]*2)
# train network
n = xtrain.shape[0]
y = [xtrain, np.ones((n, 1)), np.zeros((n, 1)), xtrain, np.zeros((n, 1)), np.ones((n, 1))]
history = model.fit(x=xtrain, y=y, epochs=nb_epoch, batch_size=128, shuffle=False)
# save history
df = pd.DataFrame(history.history)
df.to_csv(os.path.join(path, "aae_history.csv"))
# save model
encoder.save(os.path.join(path, "aae_encoder.h5"))
generator.save(os.path.join(path, "aae_decoder.h5"))
discriminator.save(os.path.join(path, "aae_discriminator.h5"))
K.clear_session()
| 40.873786 | 124 | 0.680048 |
3d3976e0ec764bb399eacee7e97a6d253d6abe8c | 3,165 | py | Python | qubell/tests/common/test_entity_list.py | storgashov/contrib-python-qubell-client | 9409c051ee4f4a7bef696dccc01edf3137affdf4 | [
"Apache-2.0"
] | null | null | null | qubell/tests/common/test_entity_list.py | storgashov/contrib-python-qubell-client | 9409c051ee4f4a7bef696dccc01edf3137affdf4 | [
"Apache-2.0"
] | null | null | null | qubell/tests/common/test_entity_list.py | storgashov/contrib-python-qubell-client | 9409c051ee4f4a7bef696dccc01edf3137affdf4 | [
"Apache-2.0"
] | null | null | null | from qubell import deprecated
import unittest
from qubell.api.private.common import EntityList, IdName
from qubell.api.private import exceptions
class EntityListTests(unittest.TestCase):
class DummyEntity:
def __init__(self, id, name):
self.id = id
self.name = name
@property
def dummy(self):
'dummy property'
return self.id + "--==--" + self.name
@deprecated
def plain_old(self): pass
@deprecated(msg="yo")
def plain_old_with_message(self): pass
class DummyEntityList(EntityList):
def __init__(self, raw_json):
self.raw_json = raw_json
EntityList.__init__(self)
def _id_name_list(self):
self._list = [IdName(item["id"], item["name"]) for item in self.raw_json]
def _get_item(self, id_name):
return EntityListTests.DummyEntity(id_name.id, id_name.name)
raw_objects = [
{"id": "1", "name": "name1"},
{"id": "2", "name": "name2"},
{"id": "3", "name": "name3dup"},
{"id": "4", "name": "name3dup"},
{"id": "1234567890abcd1234567890", "name": "with_bson_id"}
]
def setUp(self):
self.entity_list = EntityListTests.DummyEntityList(self.raw_objects)
def test_get_item_by_name(self):
assert self.entity_list["name2"].id == "2"
def test_get_item_by_id(self):
assert self.entity_list["1234567890abcd1234567890"].name == "with_bson_id"
def test_get_last_item_when_duplicate_by_name(self):
assert "4" == self.entity_list["name3dup"].id
def test_get_item_by_index(self):
assert "2" == self.entity_list[1].id
assert "4" == self.entity_list[-2].id
def test_get_item_by_slice(self):
assert ["2", "4"] == [i.id for i in self.entity_list[1:4:2]]
def test_not_existing_item(self):
with self.assertRaises(exceptions.NotFoundError) as context:
assert self.entity_list["hren"]
assert str(context.exception) == "None of 'hren' in DummyEntityList"
def test__len(self):
assert len(self.raw_objects) == len(self.entity_list)
def test__in_by_item(self):
dummy = EntityListTests.DummyEntity("1", "name1")
assert dummy in self.entity_list
def test__in_by_id(self):
assert "1234567890abcd1234567890" in self.entity_list
def test__in_by_uid(self):
assert u"1234567890abcd1234567890" in self.entity_list
def test__in_by_name(self):
assert "name2" in self.entity_list
assert "name3dup" in self.entity_list
def test__iter(self):
entity_ids = [e.id for e in self.entity_list]
raw_ids = [e["id"] for e in self.raw_objects]
self.assertEqual(entity_ids, raw_ids)
for e in self.entity_list:
assert isinstance(e, EntityListTests.DummyEntity)
def test__repr(self):
assert repr(self.entity_list) == "DummyEntityList([IdName(id='1', name='name1'), IdName(id='2', name='name2'), IdName(id='3', name='name3dup'), IdName(id='4', name='name3dup'), IdName(id='1234567890abcd1234567890', name='with_bson_id')])"
| 34.032258 | 246 | 0.637915 |
e5561ab4bbb0066a09c843e991195e4eb26fadd7 | 4,924 | py | Python | google/appengine/tools/devappserver2/admin/admin_request_handler.py | MiCHiLU/google_appengine_sdk | 3da9f20d7e65e26c4938d2c4054bc4f39cbc5522 | [
"Apache-2.0"
] | 16 | 2016-04-23T20:16:12.000Z | 2021-10-09T16:58:25.000Z | google/appengine/tools/devappserver2/admin/admin_request_handler.py | MiCHiLU/google_appengine_sdk | 3da9f20d7e65e26c4938d2c4054bc4f39cbc5522 | [
"Apache-2.0"
] | 53 | 2016-04-06T21:10:43.000Z | 2018-03-19T23:14:33.000Z | google/appengine/tools/devappserver2/admin/admin_request_handler.py | MiCHiLU/google_appengine_sdk | 3da9f20d7e65e26c4938d2c4054bc4f39cbc5522 | [
"Apache-2.0"
] | 23 | 2016-04-19T05:45:26.000Z | 2021-12-31T23:22:36.000Z | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A base class for all Admin UI request handlers and related utilities."""
import os.path
import random
import string
import urllib
import google
import jinja2
import webapp2
from google.appengine.tools import sdk_update_checker
from google.appengine.tools.devappserver2 import metrics
def _urlencode_filter(value):
if isinstance(value, basestring):
return urllib.quote(value)
else:
return urllib.urlencode(value)
def _byte_size_format(value):
byte_count = int(value)
if byte_count == 1:
return '1 Byte'
elif byte_count < 1024:
return '%d Bytes' % byte_count
elif byte_count < 1024 ** 2:
return '%.1f KiB (%d Bytes)' % (byte_count/1024.0, byte_count)
elif byte_count < 1024 ** 3:
return '%.1f MiB (%d Bytes)' % (byte_count/1024.0 ** 2, byte_count)
else:
return '%.1f GiB (%d Bytes)' % (byte_count/1024.0 ** 3, byte_count)
TEMPLATE_PATH = os.path.abspath(
os.path.join(os.path.dirname(__file__), 'templates'))
admin_template_environment = jinja2.Environment(
loader=jinja2.FileSystemLoader(TEMPLATE_PATH),
autoescape=True)
admin_template_environment.filters['urlencode'] = _urlencode_filter
admin_template_environment.filters['bytesizeformat'] = _byte_size_format
_DEFAULT_SDK_VERSION = '(Internal)'
def _get_sdk_version():
version_object = sdk_update_checker.GetVersionObject()
if version_object:
return version_object['release']
else:
return _DEFAULT_SDK_VERSION
class AdminRequestHandler(webapp2.RequestHandler):
"""Base class for all admin UI request handlers."""
_SDK_VERSION = _get_sdk_version()
@classmethod
def init_xsrf(cls, xsrf_path):
"""Load the XSRF token from the given path."""
if os.path.exists(xsrf_path):
with open(xsrf_path, 'r') as token_file:
cls.xsrf_token = token_file.read().strip()
else:
cls.xsrf_token = ''.join(random.choice(string.ascii_letters)
for _ in range(10))
with open(xsrf_path, 'w') as token_file:
token_file.write(cls.xsrf_token)
def dispatch(self):
if self.request.method in ['PATCH', 'POST', 'PUT', 'DELETE'] and (
self.request.get('xsrf_token') != self.xsrf_token):
self.response.set_status(403, 'Invalid XSRF token')
self.response.out.write('<h1>Invalid XSRF token</h1>')
else:
super(AdminRequestHandler, self).dispatch()
def render(self, template, context):
"""Returns a rendered version of the given jinja2 template.
Args:
template: The file name of the template file to use e.g.
"memcache_viewer.html".
context: A dict of values to use when rendering the template.
Returns:
A Unicode object containing the rendered template.
"""
template = admin_template_environment.get_template(template)
values = {
'app_id': self.configuration.app_id,
'request': self.request,
'sdk_version': self._SDK_VERSION,
'xsrf_token': self.xsrf_token,
}
values.update(context)
return template.render(values)
def _construct_url(self, remove=None, add=None):
"""Returns a URL referencing the current resource with the same params.
For example, if the request URL is
"http://foo/bar?animal=cat&color=redirect" then
_construct_url(['animal'], {'vehicle': 'car'}) will return
"http://foo/bar?color=redirect&vehicle=car"
Args:
remove: A sequence of query parameters to remove from the query string.
add: A mapping of query parameters to add to the query string.
Returns:
A new query string suitable for use in "GET" requests.
"""
remove = remove or []
add = add or {}
params = dict(self.request.params)
for arg in remove:
if arg in params:
del params[arg]
params.update(add)
return str('%s?%s' % (self.request.path,
urllib.urlencode(sorted(params.iteritems()))))
@property
def dispatcher(self):
return self.request.app.dispatcher
@property
def configuration(self):
return self.request.app.configuration
@metrics.LogHandlerRequest('admin-console')
def get(self, *args, **kwargs):
"""Base method for all get requests."""
@metrics.LogHandlerRequest('admin-console')
def post(self, *args, **kwargs):
"""Base method for all post requests."""
| 30.395062 | 77 | 0.693136 |
089db57576c00e174cd3f9b736674724da5d5a87 | 3,280 | py | Python | TensorFlow-with-dynamic-scaling/tensorflow/python/ops/raw_ops_test.py | BingyangWu/Antman | e9323cc8ccda637d3962b0de29ce154317f17e7a | [
"MIT"
] | 388 | 2020-06-27T01:38:29.000Z | 2022-03-29T14:12:01.000Z | tensorflow/python/ops/raw_ops_test.py | yashsehgal/tensorflow | de743966b1c6da186f13a8007f68b04e52357ad1 | [
"Apache-2.0"
] | 80 | 2020-09-02T01:57:33.000Z | 2022-03-28T08:51:57.000Z | tensorflow/python/ops/raw_ops_test.py | yashsehgal/tensorflow | de743966b1c6da186f13a8007f68b04e52357ad1 | [
"Apache-2.0"
] | 75 | 2021-12-24T04:48:21.000Z | 2022-03-29T10:13:39.000Z | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Raw ops tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import gen_string_ops
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class RawOpsTest(test.TestCase, parameterized.TestCase):
def testSimple(self):
x = constant_op.constant(1)
self.assertEqual([2], self.evaluate(gen_math_ops.Add(x=x, y=x)))
def testRequiresKwargs(self):
with self.assertRaisesRegexp(TypeError, "only takes keyword args"):
gen_math_ops.Add(1., 1.)
def testRequiresKwargs_providesSuggestion(self):
msg = "possible keys: \\['x', 'y', 'name'\\]"
with self.assertRaisesRegexp(TypeError, msg):
gen_math_ops.Add(1., y=2.)
def testName(self):
x = constant_op.constant(1)
op = gen_math_ops.Add(x=x, y=x, name="double")
if not context.executing_eagerly():
# `Tensor.name` is not available in eager.
self.assertEqual(op.name, "double:0")
def testDoc(self):
self.assertEqual(gen_math_ops.add.__doc__, gen_math_ops.Add.__doc__)
def testDefaults(self):
x = constant_op.constant([[True]])
self.assertAllClose(
gen_math_ops.Any(input=x, axis=0),
gen_math_ops.Any(input=x, axis=0, keep_dims=False))
@parameterized.parameters([[0, 8]], [[-1, 6]])
def testStringNGramsBadDataSplits(self, splits):
data = ["aa", "bb", "cc", "dd", "ee", "ff"]
with self.assertRaisesRegex(errors.InvalidArgumentError,
"Invalid split value"):
self.evaluate(
gen_string_ops.string_n_grams(
data=data,
data_splits=splits,
separator="",
ngram_widths=[2],
left_pad="",
right_pad="",
pad_width=0,
preserve_short_sequences=False))
def testGetSessionHandle(self):
if context.executing_eagerly():
with self.assertRaisesRegex(
errors.FailedPreconditionError,
"GetSessionHandle called on null session state"):
gen_data_flow_ops.GetSessionHandle(value=[1])
if __name__ == "__main__":
ops.enable_eager_execution()
test.main()
| 35.268817 | 80 | 0.689939 |
1ca86262ea33a06360d8f0e1a8086b2c8a7cc7a7 | 292 | py | Python | function1.py | priyalbhatewara123/Python-programs | 90b84310101b76c14b89f256ee9206711908a4ae | [
"bzip2-1.0.6"
] | null | null | null | function1.py | priyalbhatewara123/Python-programs | 90b84310101b76c14b89f256ee9206711908a4ae | [
"bzip2-1.0.6"
] | null | null | null | function1.py | priyalbhatewara123/Python-programs | 90b84310101b76c14b89f256ee9206711908a4ae | [
"bzip2-1.0.6"
] | null | null | null | #Function with dictionary
def build_profile(first,last,**user_info):
p = {}
p['first_name'] = first
p['last_name'] = last
for k,v in user_info.items():
p[k] = v
return p
m = build_profile('priyal','bhatewara',location = 'ratlam',field = 'IT',clg = 'MSIT')
print(m) | 29.2 | 85 | 0.619863 |
711ccab443dd5710870e298be074dadd0b8eb825 | 1,550 | py | Python | assignment3/sim/a35.py | michalmiotk/my_assingment_particle_filter | d518b427ef700cb6db9f04d31e8293d0814c1b1c | [
"MIT"
] | 6 | 2020-12-04T11:00:23.000Z | 2022-01-29T13:56:08.000Z | assignment3/sim/a35.py | michalmiotk/my_assingment_particle_filter | d518b427ef700cb6db9f04d31e8293d0814c1b1c | [
"MIT"
] | null | null | null | assignment3/sim/a35.py | michalmiotk/my_assingment_particle_filter | d518b427ef700cb6db9f04d31e8293d0814c1b1c | [
"MIT"
] | 6 | 2020-12-04T11:00:54.000Z | 2022-01-30T17:58:40.000Z | import matplotlib.pyplot as plt
import math
def plot_particles(particles, distance, show=True):
plt.xlim([-0.9, distance + 0.9])
for particle in particles:
plt.plot([particle.pos], [0.0], '*', color=particle.color)
if show:
plt.show()
def plot_resample_counts(particles, resample, i_count, distance, show=True):
plot_particles(particles, distance, show=False)
for i in range(len(particles)):
i_count += [resample.count(i)]
plt.plot([particles[i].pos, particles[i].pos],
[0.0, -i_count[-1]], 'g-')
if show:
plt.show()
def plot_resampled(
particles,
resample,
i_count,
resampled_particles,
distance,
show=True,
move=False):
plot_particles(particles, distance, show=False)
plot_resample_counts(particles, resample, i_count, distance, show=False)
for particle in resampled_particles:
if move:
particle.predict()
plt.plot([particle.pos], [-max(i_count)], '*', color=particle.color)
if show:
plt.show()
def plot(particles, resampled_particles, resample, distance):
i_count = []
# Plot 1
plot_particles(particles, distance)
# Plot 2
plot_resample_counts(particles, resample, i_count, distance)
# Plot 3
plot_resampled(particles, resample, i_count, resampled_particles, distance)
# Plot 4
plot_resampled(
particles,
resample,
i_count,
resampled_particles,
distance,
move=True)
| 27.192982 | 79 | 0.627097 |
6e01c198b47920e09f6d0a0faece1e8758af9f22 | 507 | py | Python | toolbox/visualization/setup.py | brennengreen/NIRFAST-Parallel | 9ee2a40d039cbfcaf03acea82e91e25d350cc0a5 | [
"BSD-3-Clause"
] | 1 | 2015-03-18T01:57:36.000Z | 2015-03-18T01:57:36.000Z | toolbox/visualization/setup.py | brennengreen/NIRFAST-Parallel | 9ee2a40d039cbfcaf03acea82e91e25d350cc0a5 | [
"BSD-3-Clause"
] | null | null | null | toolbox/visualization/setup.py | brennengreen/NIRFAST-Parallel | 9ee2a40d039cbfcaf03acea82e91e25d350cc0a5 | [
"BSD-3-Clause"
] | null | null | null | # for building the exe:
# python setup.py py2exe --includes sip
from distutils.core import setup
from py2exe.build_exe import py2exe
from glob import glob
import py2exe
import sys
sys.path.append("C:\\Program Files (x86)\\Microsoft Visual Studio 9.0\\VC\\redist\\x86\\Microsoft.VC90.CRT")
data_files = [("Microsoft.VC90.CRT", glob(r'C:\Program Files (x86)\Microsoft Visual Studio 9.0\VC\redist\x86\Microsoft.VC90.CRT\*.*'))]
setup(
data_files=data_files,
console=[{"script": "final.py"}]
) | 33.8 | 136 | 0.721893 |
c80d18528d72a72ccb9f58cd763e62f716b1474c | 1,098 | py | Python | maio/urls.py | jonmsawyer/maio | 468fe495189d970ccc6ec01665865bbf2c6ec578 | [
"MIT"
] | null | null | null | maio/urls.py | jonmsawyer/maio | 468fe495189d970ccc6ec01665865bbf2c6ec578 | [
"MIT"
] | 5 | 2016-09-22T23:17:40.000Z | 2018-04-05T22:36:37.000Z | maio/urls.py | jonmsawyer/maio | 468fe495189d970ccc6ec01665865bbf2c6ec578 | [
"MIT"
] | null | null | null | """
maio URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.conf.urls import include
from maio.admin import admin_site
from maio.views import home
from maio.views import dashboard
from maio.views import logout
urlpatterns = [
url(r'^admin/?', admin_site.urls),
#url(r'^portal/?', include(portal.urls, namespace='portal')),
url(r'^dashboard/?', dashboard, name='dashboard'),
url(r'^logout/?', logout, name='logout'),
url(r'^$', home, name='home'),
]
| 32.294118 | 79 | 0.6949 |
43a344148a3cbb469accf53228539aa0abeb9aff | 1,746 | py | Python | extract.py | Kyubyong/bert-token-embeddings | 73d68466be290551b4a9a31d4a02a7330cac4ce3 | [
"Apache-2.0"
] | 101 | 2019-01-22T08:36:41.000Z | 2021-11-08T10:33:56.000Z | extract.py | pengshuang/bert-token-embeddings | 73d68466be290551b4a9a31d4a02a7330cac4ce3 | [
"Apache-2.0"
] | 1 | 2019-01-28T11:32:54.000Z | 2019-02-26T02:29:39.000Z | extract.py | pengshuang/bert-token-embeddings | 73d68466be290551b4a9a31d4a02a7330cac4ce3 | [
"Apache-2.0"
] | 17 | 2019-01-22T19:24:57.000Z | 2021-09-15T02:10:50.000Z | import torch
import numpy as np
np.set_printoptions(threshold=np.nan)
from multiprocessing import Pool
import re
from tqdm import tqdm
import os
os.system("pip install pytorch_pretrained_bert")
from pytorch_pretrained_bert import BertTokenizer, BertModel
def get_embeddings(mname):
'''Gets pretrained embeddings of Bert-tokenized tokens or subwords
mname: string. model name.
'''
print("# Model name:", mname)
print("# Load pre-trained model tokenizer (vocabulary)")
tokenizer = BertTokenizer.from_pretrained(mname)
print("# Construct vocab")
vocab = [token for token in tokenizer.vocab]
print("# Load pre-trained model")
model = BertModel.from_pretrained(mname)
print("# Load word embeddings")
emb = model.embeddings.word_embeddings.weight.data
emb = emb.numpy()
print("# Write")
with open("{}.{}.{}d.vec".format(mname, len(vocab), emb.shape[-1]), "w") as fout:
fout.write("{} {}\n".format(len(vocab), emb.shape[-1]))
assert len(vocab)==len(emb), "The number of vocab and embeddings MUST be identical."
for token, e in zip(vocab, emb):
e = np.array2string(e, max_line_width=np.inf)[1:-1]
e = re.sub("[ ]+", " ", e)
fout.write("{} {}\n".format(token, e))
if __name__ == "__main__":
mnames = (
"bert-base-uncased",
"bert-large-uncased",
"bert-base-cased",
"bert-large-cased",
"bert-base-multilingual-cased",
"bert-base-multilingual-uncased",
"bert-base-chinese"
)
p = Pool(16)
with tqdm(total=len(mnames)) as pbar:
for _ in tqdm(p.imap(get_embeddings, mnames)):
pbar.update() | 32.333333 | 92 | 0.616838 |
ae22c4658af4d167ed1ad184faf468e89244b620 | 4,137 | py | Python | Dangerous/Golismero/tools/sqlmap/thirdparty/colorama/winterm.py | JeyZeta/Dangerous- | 824ea6b571eda98bb855f176361e9b35dfda578e | [
"MIT"
] | null | null | null | Dangerous/Golismero/tools/sqlmap/thirdparty/colorama/winterm.py | JeyZeta/Dangerous- | 824ea6b571eda98bb855f176361e9b35dfda578e | [
"MIT"
] | null | null | null | Dangerous/Golismero/tools/sqlmap/thirdparty/colorama/winterm.py | JeyZeta/Dangerous- | 824ea6b571eda98bb855f176361e9b35dfda578e | [
"MIT"
] | 1 | 2018-07-04T18:35:16.000Z | 2018-07-04T18:35:16.000Z |
from . import win32
# from wincon.h
class WinColor(object):
BLACK = 0
BLUE = 1
GREEN = 2
CYAN = 3
RED = 4
MAGENTA = 5
YELLOW = 6
GREY = 7
# from wincon.h
class WinStyle(object):
NORMAL = 0x00 # dim text, dim background
BRIGHT = 0x08 # bright text, dim background
class WinTerm(object):
def __init__(self):
self._default = win32.GetConsoleScreenBufferInfo(win32.STDOUT).wAttributes
self.set_attrs(self._default)
self._default_fore = self._fore
self._default_back = self._back
self._default_style = self._style
def get_attrs(self):
return self._fore + self._back * 16 + self._style
def set_attrs(self, value):
self._fore = value & 7
self._back = (value >> 4) & 7
self._style = value & WinStyle.BRIGHT
def reset_all(self, on_stderr=None):
self.set_attrs(self._default)
self.set_console(attrs=self._default)
def fore(self, fore=None, on_stderr=False):
if fore is None:
fore = self._default_fore
self._fore = fore
self.set_console(on_stderr=on_stderr)
def back(self, back=None, on_stderr=False):
if back is None:
back = self._default_back
self._back = back
self.set_console(on_stderr=on_stderr)
def style(self, style=None, on_stderr=False):
if style is None:
style = self._default_style
self._style = style
self.set_console(on_stderr=on_stderr)
def set_console(self, attrs=None, on_stderr=False):
if attrs is None:
attrs = self.get_attrs()
handle = win32.STDOUT
if on_stderr:
handle = win32.STDERR
win32.SetConsoleTextAttribute(handle, attrs)
def get_position(self, handle):
position = win32.GetConsoleScreenBufferInfo(handle).dwCursorPosition
# Because Windows coordinates are 0-based,
# and win32.SetConsoleCursorPosition expects 1-based.
position.X += 1
position.Y += 1
return position
def set_cursor_position(self, position=None, on_stderr=False):
if position is None:
#I'm not currently tracking the position, so there is no default.
#position = self.get_position()
return
handle = win32.STDOUT
if on_stderr:
handle = win32.STDERR
win32.SetConsoleCursorPosition(handle, position)
def cursor_up(self, num_rows=0, on_stderr=False):
if num_rows == 0:
return
handle = win32.STDOUT
if on_stderr:
handle = win32.STDERR
position = self.get_position(handle)
adjusted_position = (position.Y - num_rows, position.X)
self.set_cursor_position(adjusted_position, on_stderr)
def erase_data(self, mode=0, on_stderr=False):
# 0 (or None) should clear from the cursor to the end of the screen.
# 1 should clear from the cursor to the beginning of the screen.
# 2 should clear the entire screen. (And maybe move cursor to (1,1)?)
#
# At the moment, I only support mode 2. From looking at the API, it
# should be possible to calculate a different number of bytes to clear,
# and to do so relative to the cursor position.
if mode[0] not in (2,):
return
handle = win32.STDOUT
if on_stderr:
handle = win32.STDERR
# here's where we'll home the cursor
coord_screen = win32.COORD(0,0)
csbi = win32.GetConsoleScreenBufferInfo(handle)
# get the number of character cells in the current buffer
dw_con_size = csbi.dwSize.X * csbi.dwSize.Y
# fill the entire screen with blanks
win32.FillConsoleOutputCharacter(handle, ord(' '), dw_con_size, coord_screen)
# now set the buffer's attributes accordingly
win32.FillConsoleOutputAttribute(handle, self.get_attrs(), dw_con_size, coord_screen );
# put the cursor at (0, 0)
win32.SetConsoleCursorPosition(handle, (coord_screen.X, coord_screen.Y))
| 34.190083 | 95 | 0.629683 |
9c08721e50a9252035d6ca33b282764795b45733 | 5,433 | py | Python | tests/test_adapter_fusion_loading.py | tilmanbeck/adapter-transformers | ed42ced6983891060bb160c5c4f2c5d64d2c205c | [
"Apache-2.0"
] | null | null | null | tests/test_adapter_fusion_loading.py | tilmanbeck/adapter-transformers | ed42ced6983891060bb160c5c4f2c5d64d2c205c | [
"Apache-2.0"
] | null | null | null | tests/test_adapter_fusion_loading.py | tilmanbeck/adapter-transformers | ed42ced6983891060bb160c5c4f2c5d64d2c205c | [
"Apache-2.0"
] | null | null | null | import copy
import tempfile
import unittest
import torch
from transformers import (
ADAPTER_CONFIG_MAP,
ADAPTERFUSION_CONFIG_MAP,
AdapterType,
BertModel,
RobertaModel,
XLMRobertaModel,
)
from .test_modeling_common import ids_tensor
from .utils import require_torch
def create_twin_models(model1):
model1.eval()
# create a twin initialized with the same random weights
model2 = copy.deepcopy(model1)
model2.eval()
return model1, model2
@require_torch
class AdapterFusionModelTest(unittest.TestCase):
model_classes = [BertModel, RobertaModel, XLMRobertaModel]
def test_add_adapter_fusion(self):
for adater_fusion_config_name, adapter_fusion_config in ADAPTERFUSION_CONFIG_MAP.items():
for config_name, adapter_config in ADAPTER_CONFIG_MAP.items():
for type_name, adapter_type in AdapterType.__members__.items():
for model_class in self.model_classes:
model_config = model_class.config_class
model = model_class(model_config())
# skip configs without invertible language adapters
if adapter_type == AdapterType.text_lang and not adapter_config.invertible_adapter:
continue
with self.subTest(model_class=model_class, config=config_name, adapter_type=type_name):
name1 = f"{type_name}-{config_name}-1"
name2 = f"{type_name}-{config_name}-2"
model.add_adapter(name1, adapter_type, config=adapter_config)
model.add_adapter(name2, adapter_type, config=adapter_config)
# adapter is correctly added to config
self.assertTrue(name1 in model.config.adapters.adapter_list(adapter_type))
self.assertTrue(name2 in model.config.adapters.adapter_list(adapter_type))
self.assertEqual(adapter_config, model.config.adapters.get(name1))
self.assertEqual(adapter_config, model.config.adapters.get(name2))
model.add_fusion([name1, name2], adater_fusion_config_name)
# check forward pass
input_ids = ids_tensor((1, 128), 1000)
input_data = {"input_ids": input_ids}
if adapter_type == AdapterType.text_task or adapter_type == AdapterType.text_lang:
input_data["adapter_names"] = [[name1, name2]]
adapter_output = model(**input_data)
base_output = model(input_ids)
self.assertEqual(len(adapter_output), len(base_output))
self.assertFalse(torch.equal(adapter_output[0], base_output[0]))
def test_load_adapter_fusion(self):
for adater_fusion_config_name, adapter_fusion_config in ADAPTERFUSION_CONFIG_MAP.items():
for name, adapter_type in AdapterType.__members__.items():
for model_class in self.model_classes:
with self.subTest(model_class=model_class, adapter_type=name):
model_config = model_class.config_class
model1 = model_class(model_config())
name1 = "name1"
name2 = "name2"
model1.add_adapter(name1, adapter_type)
model1.add_adapter(name2, adapter_type)
model1, model2 = create_twin_models(model1)
model1.add_fusion([name1, name2], adater_fusion_config_name)
with tempfile.TemporaryDirectory() as temp_dir:
model1.save_adapter_fusion(temp_dir, ",".join([name1, name2]))
model2.load_adapter_fusion(temp_dir)
model1.eval()
model2.eval()
# check if adapter was correctly loaded
self.assertTrue(model1.config.adapter_fusion_models == model2.config.adapter_fusion_models)
# check equal output
in_data = ids_tensor((1, 128), 1000)
output1 = model1(in_data, adapter_names=[[name1, name2]])
output2 = model2(in_data, adapter_names=[[name1, name2]])
self.assertEqual(len(output1), len(output2))
self.assertTrue(torch.equal(output1[0], output2[0]))
def test_model_config_serialization(self):
"""PretrainedConfigurations should not raise an Exception when serializing the config dict
See, e.g., PretrainedConfig.to_json_string()
"""
for model_class in self.model_classes:
for k, v in ADAPTERFUSION_CONFIG_MAP.items():
model_config = model_class.config_class
model = model_class(model_config())
model.add_adapter("test1", AdapterType.text_task)
model.add_adapter("test2", AdapterType.text_task)
model.add_fusion(["test1", "test2"], adapter_fusion_config=v)
# should not raise an exception
model.config.to_json_string()
| 48.079646 | 115 | 0.585864 |
7e83e32b790ec316c24bd025c3c50af78279c966 | 692 | py | Python | app/main.py | pyVarad/fast-api-auth0-recepie | 453a1e05b76cb10829a4991ce60a1a0afb112803 | [
"Apache-2.0"
] | null | null | null | app/main.py | pyVarad/fast-api-auth0-recepie | 453a1e05b76cb10829a4991ce60a1a0afb112803 | [
"Apache-2.0"
] | null | null | null | app/main.py | pyVarad/fast-api-auth0-recepie | 453a1e05b76cb10829a4991ce60a1a0afb112803 | [
"Apache-2.0"
] | null | null | null | """ MAIN Application starts here.
"""
from fastapi import FastAPI
from db.database import engine
from models import dimensions, questions, answers, users
from controllers.dimension import dimension_router
from controllers.questions import questions_router
from controllers.answers import answer_router
from controllers.users import users_router
app = FastAPI()
app.include_router(dimension_router)
app.include_router(questions_router)
app.include_router(answer_router)
app.include_router(users_router)
dimensions.Base.metadata.create_all(bind=engine)
questions.Base.metadata.create_all(bind=engine)
answers.Base.metadata.create_all(bind=engine)
users.Base.metadata.create_all(bind=engine)
| 31.454545 | 56 | 0.848266 |
e784c6a1e050bdbb2f38e6e71264e01e86f32dad | 27 | py | Python | hola.py | Esfireno/Course_Pytorch | 1c61f0ba8f8b9f144b44048981d71e345065ea57 | [
"MIT"
] | null | null | null | hola.py | Esfireno/Course_Pytorch | 1c61f0ba8f8b9f144b44048981d71e345065ea57 | [
"MIT"
] | null | null | null | hola.py | Esfireno/Course_Pytorch | 1c61f0ba8f8b9f144b44048981d71e345065ea57 | [
"MIT"
] | null | null | null | pritn("Hola pinche mundo")
| 13.5 | 26 | 0.740741 |
e5afd90af2d2d12981bf1809906017958276488c | 22,962 | py | Python | scripts/create_bokeh_dash.py | rynecarbone/fantasy_scoring | b3788e7642f7f8b0a34ed334c83ecf573c429a6e | [
"MIT"
] | null | null | null | scripts/create_bokeh_dash.py | rynecarbone/fantasy_scoring | b3788e7642f7f8b0a34ed334c83ecf573c429a6e | [
"MIT"
] | null | null | null | scripts/create_bokeh_dash.py | rynecarbone/fantasy_scoring | b3788e7642f7f8b0a34ed334c83ecf573c429a6e | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""Create a bokeh dashboard with customJS callbacks for interactivity"""
import pandas as pd
from bokeh.layouts import row, column, layout, gridplot, widgetbox
from bokeh.models import (CustomJS, RangeSlider, HoverTool, Range1d,
CheckboxGroup, CheckboxButtonGroup, Select, RadioButtonGroup,
ColumnDataSource, CDSView, BooleanFilter, GroupFilter, LinearAxis, LogAxis)
from bokeh.plotting import figure, output_file,show
from bokeh.palettes import Category10
from bokeh.models.widgets import Tabs, Panel
from bokeh.models.tickers import FixedTicker
__author__ = 'Ryne Carbone'
# Define some global variables
default_roster = dict(nQB=1, nRB=2, nWR=2, nTE=1, nFLEX=2, nTEAMS=10)
POSITIONS = ['QB','RB','TE','WR']
PPR_TYPES = ['STD','HPPR','PPR']
PFD_TYPES = ['STD','HPFD','PFD']
sorts = ['PPR_type','PFD_type','Pos','RankPos']
groups = ['PPR_type','PFD_type','Pos','RankPos']
# Read input data
df = (pd.read_csv('data/espn_fantasy_data_small.csv')
.sort_values(by=sorts)
.reset_index(drop=True))
YEARS = sorted(list(df.Year.unique()))
# Split data for determining Flex info: [year_ind][pfd_ind][ppr_ind]
df = df.sort_values(by=['Year','PPR_type','PFD_type','Pts'],ascending=False).reset_index(drop=True)
# pscript behaves weirdly with Float64Array: https://github.com/flexxui/pscript/issues/5
df['Pts']=df['Pts'].astype(str)
l_df_flex = [[[ColumnDataSource(df[(df.Year==y)&(df.PFD_type==pf)&(df.PPR_type==pp)][['Pos','RankPos','Pts']])
for pp in ['STD','HPPR','PPR']]
for pf in ['STD','HPFD','PFD']]
for y in sorted(list(set(df.Year.tolist())))]
# Define source data
raw_source = ColumnDataSource({'Pos': [], 'RankPos': [], 'PPR_type': [], 'PFD_type': [], 'AVG': []})
raw_source_bands = ColumnDataSource({'Pos': [], 'RankPos': [], 'PPR_type': [], 'PFD_type': [], 'BANDS': []})
rel_source = ColumnDataSource({'Pos': [], 'RankPos': [], 'PPR_type': [], 'PFD_type': [], 'REL': []})
rel_source_bands = ColumnDataSource({'Pos': [], 'RankPos': [], 'PPR_type': [], 'PFD_type': [], 'BANDS': []})
rb_source = ColumnDataSource({'Pos': [], 'RankPos': [], 'PPR_type': [], 'PFD_type': [], 'RBB': []})
rb_source_bands = ColumnDataSource({'Pos': [], 'RankPos': [], 'PPR_type': [], 'PFD_type': [], 'BANDS': []})
# Define the tools
tools = 'box_zoom,wheel_zoom,pan,reset,save'
rel_hover = HoverTool(tooltips=[
('Pos. Rank','@Pos-@RankPos'),
('Rel. Val.', '@REL{0.000 a}')])
rb_hover = HoverTool(tooltips=[
('Pos. Rank','@Pos-@RankPos'),
('Rel. Val. (RB baseline)', '@RBB{0.000 a}')])
hover = HoverTool(tooltips=[
('Pos. Rank','@Pos-@RankPos'),
('Avg Pts.', '@AVG{0.0 a}')])
# Create filters for using views
qb = GroupFilter(column_name='Pos', group='QB')
rb = GroupFilter(column_name='Pos', group='RB')
te = GroupFilter(column_name='Pos', group='TE')
wr = GroupFilter(column_name='Pos', group='WR')
ppr = GroupFilter(column_name='PPR_type', group='PPR')
hppr = GroupFilter(column_name='PPR_type', group='HPPR')
sppr = GroupFilter(column_name='PPR_type', group='STD')
pfd = GroupFilter(column_name='PFD_type', group='PFD')
hpfd = GroupFilter(column_name='PFD_type', group='HPFD')
spfd = GroupFilter(column_name='PFD_type', group='STD')
ppr_filters = [sppr, hppr, ppr]
pfd_filters = [spfd, hpfd, pfd]
pos_filters= [qb, rb, te, wr]
def callback(l_df_flex=l_df_flex, default_roster=None, yr_lo=None, yr_hi=None, init_run=False,
rel_source=rel_source, rel_source_bands=rel_source_bands,
raw_source=raw_source, raw_source_bands=raw_source_bands,
rb_source=rb_source, rb_source_bands=rb_source_bands,
YEARS=YEARS, PPR_TYPES=PPR_TYPES, PFD_TYPES=PFD_TYPES, window=None):
"""Massive callback written with pscript, converted to javascript
Note: this is a terrible way to calculate averages/min/max by groups,
but pscript doesn't allow the use of python packages, and bokeh only allows
python through pscript with standalone html pages :(
:param l_df_flex: list of data frames in 3D array (by year, ppr type, pfd type)
:param default_roster: roster settings for initial plot creation
:param yr_lo: min year for initial plot creation
:param yr_hi: max year for initial plot creation
:param init_run: flag to indicate if initial plot creation
:param rel_source: ColumnDataSource holding data for relative value plot
:param rel_source_bands: ColumnDataSource holidng data for relative value plot bands
:param raw_source: ColumnDataSource holding data for raw value plot
:param raw_source_bands: ColumnDataSource holidng data for raw value plot bands
:param rb_source: ColumnDataSource holding data for RB baseline plot
:param rb_source_bands: ColumnDataSource holidng data for RB baseline plot bands
:param YEARS: list of possible years to select
:param PPR_TYPES: list of possible ppr setings
:param PFD_TYPES: list of possible pfd types
:param window: allows access to javascript functions/variables
:return: Updated ColumnDataSources on initial run, otherwise bokeh handles JS updates
"""
# Read in the roster settings for calculating flex replacement values
if default_roster:
roster = default_roster
else:
roster = dict(nQB=nQB.value, nRB=nRB.value, nWR=nWR.value,
nTE=nTE.value, nFLEX=nFLEX.value, nTEAMS=nTEAMS.value)
# Set the correct year range for selecting data
if not yr_lo:
yr_lo = year.value[0]
if not yr_hi:
yr_hi = year.value[1]
yr_range = range(YEARS.index(yr_lo), YEARS.index(yr_hi)+1)
# Connect to plotting data sources
data = rel_source.data
data_bands = rel_source_bands.data
data_raw = raw_source.data
data_bands_raw = raw_source_bands.data
data_rb = rb_source.data
data_bands_rb = rb_source_bands.data
bmax=[]; bmin=[]; bmax_raw=[]; bmin_raw=[]; bmax_rb=[]; bmin_rb=[]
# Reset return data
return_keys = ['Pos','RankPos','PPR_type','PFD_type','REL']
return_keys_raw = ['Pos','RankPos','PPR_type','PFD_type','AVG']
return_keys_rb = ['Pos','RankPos','PPR_type','PFD_type','RBB']
return_bands_keys = ['Pos','RankPos','PPR_type','PFD_type','BANDS']
for rk,rkr,rkrb,rbk in zip(return_keys, return_keys_raw, return_keys_rb, return_bands_keys):
data[rk]=[]; data_bands[rbk]=[];
data_raw[rkr]=[]; data_bands_raw[rbk]=[]
data_rb[rkrb]=[]; data_bands_rb[rbk]=[]
# Start calculating
for i_pfd in range(3):
for i_ppr in range(3):
raw_val_dict = {}
rel_val_dict = {}
for i_y in yr_range:
# Get data for this year and scoring type
i_df = l_df_flex[i_y][i_pfd][i_ppr].data
# For each year, calculate replacement value by position
rep_val = {'QB':0,'RB':0,'WR':0,'TE':0}
nflex_left = int(roster['nFLEX'])*int(roster['nTEAMS'])
# Update values until filled up all flex spots
for pos, rk, pts in zip(i_df['Pos'],i_df['RankPos'],i_df['Pts']):
if pos=='QB' and rk==roster['nQB']*roster['nTEAMS']:
rep_val['QB'] = pts
elif pos!='QB' and int(rk) > int(roster[f'n{pos}'])*int(roster['nTEAMS']) and nflex_left>0:
rep_val[pos] = pts
nflex_left -= 1
elif pos!='QB' and int(rk)==int(roster[f'n{pos}'])*int(roster['nTEAMS']):
rep_val[pos] = pts
# Calculate pts over rep
pts_over_rep = []
for pos, pts in zip(i_df['Pos'],i_df['Pts']):
tmp_pts = (float(pts) - float(rep_val[pos])) if (float(pts)-float(rep_val[pos])) > 0 else 0
pts_over_rep.append(tmp_pts)
# Convert to relative val
tot_rep_pts = sum(pts_over_rep)
i_rel_val = [i_por/tot_rep_pts for i_por in pts_over_rep]
# Make dict for rel val/raw val by pos and rank
for pos, rk, pts, rv in zip(i_df['Pos'], i_df['RankPos'], i_df['Pts'], i_rel_val):
key = f'{pos}_{rk}'
# Create entry for raw val
if not raw_val_dict.get(key):
raw_val_dict[key] = []
raw_val_dict[key].append(float(pts))
# Only create entry for rel val if val not 0
if rv == 0: continue
if not rel_val_dict.get(key):
rel_val_dict[key] = []
rel_val_dict[key].append(100*rv)
# Get the average per year, and bands for rel val
l_pos = []; l_rankpos = []; l_rel = []; l_bmin = []; l_bmax = []
l_pos_rb = []; l_rankpos_rb = []; l_rel_rb = []; l_bmin_rb = []; l_bmax_rb = []
for kk, vv in rel_val_dict.items():
k_pos, k_rk = kk.split('_')
l_pos.append(k_pos)
l_rankpos.append(int(k_rk))
l_rel.append(sum(vv)/len(vv))
l_bmin.append(min(vv))
l_bmax.append(max(vv))
if rel_val_dict.get(f'RB_{k_rk}'):
vv_rb = rel_val_dict.get(f'RB_{k_rk}')
l_pos_rb.append(k_pos)
l_rankpos_rb.append(int(k_rk))
l_rel_rb.append((sum(vv)/len(vv))/(sum(vv_rb)/len(vv_rb)))
l_bmin_rb.append(min(vv)/(sum(vv_rb)/len(vv_rb)))
l_bmax_rb.append(max(vv)/(sum(vv_rb)/len(vv_rb)))
# Get the average per year, and bands or raw val
l_pos_raw = []; l_rankpos_raw = []; l_raw = []; l_bmin_raw = []; l_bmax_raw = []
for kk, vv in raw_val_dict.items():
k_pos, k_rk = kk.split('_')
l_pos_raw.append(k_pos)
l_rankpos_raw.append(int(k_rk))
l_raw.append(sum(vv)/len(vv))
l_bmin_raw.append(min(vv))
l_bmax_raw.append(max(vv))
# Sort by rankpos?
sorted_zip = sorted(zip(l_pos, l_rankpos, l_rel, l_bmin, l_bmax),
key=lambda tup: (tup[0], float(tup[1])/1000.))
sorted_zip_raw = sorted(zip(l_pos_raw, l_rankpos_raw, l_raw, l_bmin_raw, l_bmax_raw),
key=lambda tup: (tup[0], float(tup[1])/1000.))
sorted_zip_rb = sorted(zip(l_pos_rb, l_rankpos_rb, l_rel_rb, l_bmin_rb, l_bmax_rb),
key=lambda tup: (tup[0], float(tup[1])/1000.))
for tup in sorted_zip:
data['Pos'].append(tup[0])
data['RankPos'].append(tup[1])
data['PFD_type'].append(PFD_TYPES[i_pfd])
data['PPR_type'].append(PPR_TYPES[i_ppr])
data['REL'].append(float(tup[2]))
bmin.append(float(tup[3]))
bmax.append(float(tup[4]))
for tup in sorted_zip_raw:
data_raw['Pos'].append(tup[0])
data_raw['RankPos'].append(tup[1])
data_raw['PFD_type'].append(PFD_TYPES[i_pfd])
data_raw['PPR_type'].append(PPR_TYPES[i_ppr])
data_raw['AVG'].append(float(tup[2]))
bmin_raw.append(float(tup[3]))
bmax_raw.append(float(tup[4]))
for tup in sorted_zip_rb:
data_rb['Pos'].append(tup[0])
data_rb['RankPos'].append(tup[1])
data_rb['PFD_type'].append(PFD_TYPES[i_pfd])
data_rb['PPR_type'].append(PPR_TYPES[i_ppr])
data_rb['RBB'].append(float(tup[2]))
bmin_rb.append(float(tup[3]))
bmax_rb.append(float(tup[4]))
# Create bands for relative data
data_bands['Pos'] = list(data['Pos']) + list(reversed(data['Pos']))
data_bands['RankPos'] = list(data['RankPos']) + list(reversed(data['RankPos']))
data_bands['PFD_type'] = list(data['PFD_type']) + list(reversed(data['PFD_type']))
data_bands['PPR_type'] = list(data['PPR_type']) + list(reversed(data['PPR_type']))
data_bands['BANDS'] = list(bmin) + list(reversed(bmax))
# Create bands for raw data
data_bands_raw['Pos'] = list(data_raw['Pos']) + list(reversed(data_raw['Pos']))
data_bands_raw['RankPos'] = list(data_raw['RankPos']) + list(reversed(data_raw['RankPos']))
data_bands_raw['PFD_type'] = list(data_raw['PFD_type']) + list(reversed(data_raw['PFD_type']))
data_bands_raw['PPR_type'] = list(data_raw['PPR_type']) + list(reversed(data_raw['PPR_type']))
data_bands_raw['BANDS'] = list(bmin_raw) + list(reversed(bmax_raw))
# Create bands for rb baseline data
data_bands_rb['Pos'] = list(data_rb['Pos']) + list(reversed(data_rb['Pos']))
data_bands_rb['RankPos'] = list(data_rb['RankPos']) + list(reversed(data_rb['RankPos']))
data_bands_rb['PFD_type'] = list(data_rb['PFD_type']) + list(reversed(data_rb['PFD_type']))
data_bands_rb['PPR_type'] = list(data_rb['PPR_type']) + list(reversed(data_rb['PPR_type']))
data_bands_rb['BANDS'] = list(bmin_rb) + list(reversed(bmax_rb))
if not init_run:
rel_source.change.emit()
rel_source_bands.change.emit()
raw_source.change.emit()
raw_source_bands.change.emit()
rb_source.change.emit()
rb_source_bands.change.emit()
else:
return raw_source, raw_source_bands,rel_source, rel_source_bands, rb_source, rb_source_bands
# Run initial relative value code
raw_source, raw_source_bands, rel_source, rel_source_bands, rb_source, rb_source_bands = callback(default_roster=default_roster, yr_lo=2002, yr_hi=2017, init_run=True)
# Define relative value y-axis ticks
rel_ticker = FixedTicker(ticks=[0.01,0.02,0.05,0.1, 0.2, 0.5, 1,2,5,10,20,50,100],
minor_ticks=[0.03,0.04,0.06,0.07,0.08,0.09,0.3,0.4,0.6,0.7,0.8,0.9,3,4,6,7,8,9,30,40,60,70,80,90])
raw_ticker = FixedTicker(ticks=[1,100,200,300,400, 500,600,700,800, 900,1000],
minor_ticks=[20,40,60,80,120,140,160,180,220,240,260,280,320,340,360,380,420,440,460,480,520,
540,560,580,620,640,660,680,720,740,760,780,820,840,860,880,920,940,960,980])
# Keep list of figures and glyphs
plots=[]; rel_plots=[]; rb_plots=[]
lines_list=[]; rel_lines_list=[]; rb_lines_list=[]
patches_list=[]; rel_patches_list=[]; rb_patches_list=[]
# Create Raw Pts plots separately for each score type
for i_pfd, (FD, pfd_type) in enumerate(zip(pfd_filters, PFD_TYPES)):
for i_ppr, (PR, ppr_type) in enumerate(zip(ppr_filters, PPR_TYPES)):
# Keep track of glyphs in each figure
lines=[]; rel_lines=[];rb_lines=[]
patches=[]; rel_patches=[]; rb_patches=[]
# Create figure
temp_plot = figure(tools=[hover, tools], x_axis_label='Position Rank', x_range=Range1d(start=-1, end=122, bounds="auto"),
y_axis_label='Points', y_axis_type='log', y_range=Range1d(start=20,end=520, bounds=(20,1000)))
temp_plot2 = figure(tools=[rel_hover, tools], x_axis_label='Position Rank',
y_axis_label='Relative Value (%)', y_axis_type='log', y_range=Range1d(start=0.01, end=11, bounds=(0.01,100)))
temp_plot3 = figure(tools=[rb_hover, tools], x_axis_label='Position Rank',
y_axis_label='Relative Value (RB==1)', y_axis_type='log', y_range=Range1d(start=0.04, end=25, bounds=(0.01,100)))
temp_plot.yaxis.ticker = raw_ticker
temp_plot2.yaxis.ticker = rel_ticker
temp_plot3.yaxis.ticker = rel_ticker
# Add line and patch for each position
for i, (P, p, c) in enumerate(zip(pos_filters, POSITIONS, reversed(Category10[4]))):
# Only creae legend in upper right plot
leg = p if (i_pfd==0 and i_ppr==2) else None
# Create a line for the avg pts by positional rank
l1 = temp_plot.line('RankPos', 'AVG', source=raw_source, legend=leg, color=c, line_width=3,
view=CDSView(source=raw_source, filters=[P, FD, PR]))
l2 = temp_plot2.line('RankPos','REL', source=rel_source, legend=leg, color=c, line_width=3,
view=CDSView(source=rel_source, filters=[P, FD, PR]))
l3 = temp_plot3.line('RankPos','RBB', source=rb_source, legend=leg, color=c, line_width=3,
view=CDSView(source=rb_source, filters=[P, FD, PR]))
# Create a patched area for range of values
p1 = temp_plot.patch('RankPos', 'BANDS', source=raw_source_bands, legend=leg, color=c, alpha=0.1,
view=CDSView(source=raw_source_bands, filters=[P, FD, PR]))
p2 = temp_plot2.patch('RankPos','BANDS', source=rel_source_bands, legend=leg, color=c, alpha=0.1,
view=CDSView(source=rel_source_bands, filters=[P, FD, PR]))
p3 = temp_plot3.patch('RankPos','BANDS', source=rb_source_bands, legend=leg, color=c, alpha=0.1,
view=CDSView(source=rb_source_bands, filters=[P, FD, PR]))
# Update lines/patches lists
lines.append(l1)
rel_lines.append(l2)
rb_lines.append(l3)
patches.append(p1)
rel_patches.append(p2)
rb_patches.append(p3)
# Add lists to master list of lists for all figures
lines_list.append(lines)
rel_lines_list.append(rel_lines)
rb_lines_list.append(rb_lines)
patches_list.append(patches)
rel_patches_list.append(rel_patches)
rb_patches_list.append(rb_patches)
# Hide yaxis
if i_ppr != 0:
temp_plot.yaxis.visible=False
temp_plot2.yaxis.visible=False
temp_plot3.yaxis.visible=False
# Hide xaxis
if i_pfd != 2:
temp_plot.xaxis.visible=False
temp_plot2.xaxis.visible=False
temp_plot3.xaxis.visible=False
# Add Row labels on right
if i_ppr == 2:
temp_plot.add_layout(LogAxis(axis_label=pfd_type,axis_label_text_font_style='bold', ticker=raw_ticker), 'right')
temp_plot2.add_layout(LogAxis(axis_label=pfd_type,axis_label_text_font_style='bold', ticker=rel_ticker), 'right')
temp_plot3.add_layout(LogAxis(axis_label=pfd_type,axis_label_text_font_style='bold', ticker=rel_ticker), 'right')
# Add Col labels on top
if i_pfd == 0:
temp_plot.title.text = ppr_type
temp_plot.title.align = 'center'
temp_plot2.title.text = ppr_type
temp_plot2.title.align = 'center'
temp_plot3.title.text = ppr_type
temp_plot3.title.align = 'center'
plots.append(temp_plot)
rel_plots.append(temp_plot2)
rb_plots.append(temp_plot3)
# Synchronize all axis ranges for linked panning/zooming
for p, p2, p3 in zip(plots, rel_plots, rb_plots):
p.x_range = plots[8].x_range
p.y_range = plots[8].y_range
p2.x_range = rel_plots[8].x_range
p2.y_range = rel_plots[8].y_range
p3.x_range = rb_plots[8].x_range
p3.y_range = rb_plots[8].y_range
# Arrange into a grid
grid = gridplot(plots, ncols=3, toolbar_location='right',
plot_height=250, plot_width=300)
grid_rel = gridplot(rel_plots, ncols=3, toolbar_location='right',
plot_height=250, plot_width=300)
grid_rb = gridplot(rb_plots, ncols=3, toolbar_location='right',
plot_height=250, plot_width=300)
# Define the layout
row_plot = row(children=[grid])
# This changes the alpha of the lines and fills
def checkbox_callback(checkboxes=None, lines_list=None, patches_list=None,
rel_lines_list=None, rel_patches_list=None,
rb_lines_list=None, rb_patches_list=None):
for l in lines_list:
l[0].glyph.line_alpha = 1 if 0 in checkboxes.active else 0.15
l[1].glyph.line_alpha = 1 if 1 in checkboxes.active else 0.15
l[2].glyph.line_alpha = 1 if 2 in checkboxes.active else 0.15
l[3].glyph.line_alpha = 1 if 3 in checkboxes.active else 0.15
for l in rel_lines_list:
l[0].glyph.line_alpha = 1 if 0 in checkboxes.active else 0.15
l[1].glyph.line_alpha = 1 if 1 in checkboxes.active else 0.15
l[2].glyph.line_alpha = 1 if 2 in checkboxes.active else 0.15
l[3].glyph.line_alpha = 1 if 3 in checkboxes.active else 0.15
for l in rb_lines_list:
l[0].glyph.line_alpha = 1 if 0 in checkboxes.active else 0.15
l[1].glyph.line_alpha = 1 if 1 in checkboxes.active else 0.15
l[2].glyph.line_alpha = 1 if 2 in checkboxes.active else 0.15
l[3].glyph.line_alpha = 1 if 3 in checkboxes.active else 0.15
for p in patches_list:
p[0].glyph.fill_alpha = .1 if 0 in checkboxes.active else 0.01
p[1].glyph.fill_alpha = .1 if 1 in checkboxes.active else 0.01
p[2].glyph.fill_alpha = .1 if 2 in checkboxes.active else 0.01
p[3].glyph.fill_alpha = .1 if 3 in checkboxes.active else 0.01
for p in rel_patches_list:
p[0].glyph.fill_alpha = .1 if 0 in checkboxes.active else 0.01
p[1].glyph.fill_alpha = .1 if 1 in checkboxes.active else 0.01
p[2].glyph.fill_alpha = .1 if 2 in checkboxes.active else 0.01
p[3].glyph.fill_alpha = .1 if 3 in checkboxes.active else 0.01
for p in rb_patches_list:
p[0].glyph.fill_alpha = .1 if 0 in checkboxes.active else 0.01
p[1].glyph.fill_alpha = .1 if 1 in checkboxes.active else 0.01
p[2].glyph.fill_alpha = .1 if 2 in checkboxes.active else 0.01
p[3].glyph.fill_alpha = .1 if 3 in checkboxes.active else 0.01
# This selects y-axis
def pts_callback(grid_raw=grid, grid_rel=grid_rel, grid_rb=grid_rb):
if points.value == 'Raw Points':
row_plot.children=[grid_raw]
elif points.value == 'Relative Value':
row_plot.children=[grid_rel]
elif points.value == 'RB Baseline':
row_plot.children = [grid_rb]
# Slider (select year range)
rel_callback = CustomJS.from_py_func(callback)
year_slider = RangeSlider(start=2002, end=2017, step=1, value=(2002, 2017), title='Year Range',
callback=rel_callback, callback_policy='mouseup')
rel_callback.args['year'] = year_slider
# Add checkbox for toggling lines for positions
checkboxes = CheckboxButtonGroup(labels=POSITIONS, active=[0,1,2,3],
callback=CustomJS.from_py_func(checkbox_callback))
checkboxes.callback.args = dict(checkboxes=checkboxes, lines_list=lines_list, patches_list=patches_list,
rel_lines_list=rel_lines_list, rel_patches_list=rel_patches_list,
rb_lines_list=rb_lines_list, rb_patches_list=rb_patches_list)
# Add select for defining roster
select_qb = Select(options=["1","2"], value="1", title='nQB', callback=rel_callback)
rel_callback.args['nQB'] = select_qb
select_rb = Select(options=["1","2","3","4"], value="2", title='nRB', callback=rel_callback)
rel_callback.args['nRB'] = select_rb
select_wr = Select(options=["1","2","3","4"], value="2", title='nWR',callback=rel_callback)
rel_callback.args['nWR'] = select_wr
select_te = Select(options=["1","2"], value="1", title='nTE',callback=rel_callback)
rel_callback.args['nTE'] = select_te
select_flex = Select(options=["1","2","3","4"], value="2", title='nFlex',callback=rel_callback)
rel_callback.args['nFLEX'] = select_flex
select_teams = Select(options=["8","10","12","14"], value="10", title='nTeams',callback=rel_callback)
rel_callback.args['nTEAMS'] = select_teams
# Add select for choosing Y-axis
pts_callback = CustomJS.from_py_func(pts_callback)
pts_group = Select(title='Y-axis', options=['Raw Points', 'Relative Value','RB Baseline'], value='Raw Points', callback=pts_callback)
pts_callback.args['points'] = pts_group
pts_callback.args['row_plot'] = row_plot
# Add widgetbox, define page layout
wbox = widgetbox(children=[pts_group, year_slider, checkboxes, select_qb, select_rb,
select_wr, select_te, select_flex, select_teams], width=200)
l = row(children=[wbox, row_plot], sizing_mode='scale_height')
# Define output location
output_file('output/dashboard.html')
show(l)
| 51.254464 | 168 | 0.668757 |
78dff661c93c0ab33bc26e125d6f14baa06e65d8 | 6,107 | py | Python | lib/tests/streamlit/metrics_test.py | cdeil/streamlit | 173aa1cd5835174620e8246eb5d7116be2cb6ffc | [
"Apache-2.0"
] | 1 | 2020-09-20T11:18:09.000Z | 2020-09-20T11:18:09.000Z | lib/tests/streamlit/metrics_test.py | cdeil/streamlit | 173aa1cd5835174620e8246eb5d7116be2cb6ffc | [
"Apache-2.0"
] | 108 | 2020-11-10T22:19:28.000Z | 2022-03-29T16:48:55.000Z | lib/tests/streamlit/metrics_test.py | cdeil/streamlit | 173aa1cd5835174620e8246eb5d7116be2cb6ffc | [
"Apache-2.0"
] | null | null | null | # Copyright 2018-2020 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Metrics Module Unittest."""
from unittest.mock import call, patch
import unittest
import pytest
import streamlit.metrics
from streamlit import config
class MetricsTest(unittest.TestCase):
"""Metrics Unittest class."""
def setUp(self):
"""Make sure Client singleton is always empty before starting tests."""
streamlit.metrics.Client._singleton = None
def tearDown(self):
"""Cleanup metrics client."""
config.set_option("global.metrics", False)
streamlit.metrics.Client._singleton = None
client = streamlit.metrics.Client.get_current()
client.toggle_metrics()
def test_constructor(self):
"""Test streamlit.metrics.Client."""
client = streamlit.metrics.Client()
self.assertEqual(streamlit.metrics.Client._singleton, client)
def test_get_current(self):
"""Test streamlit.metrics.clientget_current."""
client = streamlit.metrics.Client.get_current()
self.assertEqual(streamlit.metrics.Client._singleton, client)
def test_not_singleton(self):
"""Test streamlit.metrics.Client not singleton."""
client = streamlit.metrics.Client.get_current()
with pytest.raises(RuntimeError) as e:
streamlit.metrics.Client()
msg = "Client already initialized. Use .get_current() instead"
self.assertEqual(msg, str(e.value))
def test_enabled_metrics_no_prometheus(self):
"""Test streamlit.metrics.Client.toggle_metrics no prometheus."""
config.set_option("global.metrics", True)
client = streamlit.metrics.Client.get_current()
builtin_import = "builtins.__import__"
with pytest.raises(ImportError) as e:
with patch(builtin_import, side_effect=ImportError):
client.toggle_metrics()
msg = "prometheus-client is not installed. pip install prometheus-client"
self.assertEqual(msg, str(e.value))
def test_enabled_metrics(self):
"""Test streamlit.metrics.toggle_metrics enabled."""
config.set_option("global.metrics", True)
client = streamlit.metrics.Client.get_current()
client._metrics = {}
# yapf: disable
client._raw_metrics = [
('Counter', 'unittest_counter', 'Unittest counter', []),
('Counter', 'unittest_counter_labels', 'Unittest counter labels', ['label']),
('Gauge', 'unittest_gauge', 'Unittest gauge', []),
]
# yapf: enable
client.toggle_metrics()
client.get("unittest_counter").inc()
client.get("unittest_counter_labels").labels("some_label")
client.get("unittest_gauge").set(42)
truth = [
"unittest_counter_total 1.0",
'unittest_counter_labels_total{label="some_label"} 0.0',
"unittest_gauge 42.0",
]
lines = client.generate_latest().splitlines()
metrics = [
x.decode("utf-8") for x in lines if x.decode("utf-8").startswith("unit")
]
metrics = [str(x) for x in metrics if "_created" not in x]
self.assertEqual(sorted(truth), sorted(metrics))
def test_disabled_metrics_check_value(self):
"""Test streamlit.metrics.Client.toggle_metrics disabled check value."""
with patch("streamlit.metrics.MockMetric", spec=True) as mock_metric:
config.set_option("global.metrics", False)
client = streamlit.metrics.Client.get_current()
client._metrics = {}
# yapf: disable
client._raw_metrics = [
('Counter', 'unittest_counter', 'Unittest counter', []),
('Counter', 'unittest_counter_labels', 'Unittest counter labels', ['label']),
('Gauge', 'unittest_gauge', 'Unittest gauge', []),
]
# yapf: enable
client.toggle_metrics()
# Test that handler in Server.py will return nothing.
self.assertEqual(client.generate_latest(), "")
client.get("unittest_counter").inc()
client.get("unittest_counter_labels").labels("some_label")
client.get("unittest_gauge").set(42)
client.get("unittest_gauge").dec()
calls = [
call(), # Constructor
call(), # unittest_counter
call(), # unittest_counter_labels
call(), # unittest_gauge
call().inc(),
call().labels("some_label"),
call().set(42),
call().dec(),
]
self.assertEqual(calls, mock_metric.mock_calls)
def test_disabled_metrics(self):
"""Test streamlit.metrics.Client.toggle_metrics disabled."""
config.set_option("global.metrics", False)
client = streamlit.metrics.Client.get_current()
client._metrics = {}
# yapf: disable
client._raw_metrics = [
('Counter', 'unittest_counter', 'Unittest counter', []),
('Counter', 'unittest_counter_labels', 'Unittest counter labels', ['label']),
('Gauge', 'unittest_gauge', 'Unittest gauge', []),
]
# yapf: enable
client.toggle_metrics()
client.get("unittest_counter").inc()
client.get("unittest_counter_labels").labels("some_label")
client.get("unittest_gauge").set(42)
client.get("unittest_gauge").dec()
# Purposely not testing anything, just verifying the calls
# actually work.
| 37.466258 | 93 | 0.626494 |
f1066f217e54855f45c8db5ba0ac4e410e4f530d | 4,782 | py | Python | RelatedCode/FindVesselMaterialIntersection.py | Jack-XHP/LabPicV2-MaskRCNN | b0586b2827000c7b7337d5110b2b1fd6185053a8 | [
"MIT"
] | null | null | null | RelatedCode/FindVesselMaterialIntersection.py | Jack-XHP/LabPicV2-MaskRCNN | b0586b2827000c7b7337d5110b2b1fd6185053a8 | [
"MIT"
] | null | null | null | RelatedCode/FindVesselMaterialIntersection.py | Jack-XHP/LabPicV2-MaskRCNN | b0586b2827000c7b7337d5110b2b1fd6185053a8 | [
"MIT"
] | null | null | null | import numpy
import json
import cv2
import numpy as np
import os
import scipy.misc as misc
def show(Im):
cv2.imshow("show",Im.astype(np.uint8))
cv2.waitKey()
cv2.destroyAllWindows()
###############################################################################################
def FindIntersection(InDir,MatDir, VesselDir):
pp=0
for DirName in os.listdir(InDir):
pp+=1
print(pp)
DirName=InDir+"/"+DirName
MSgDir = DirName + "/" + MatDir + "//"
VSgDir = DirName + "/" + VesselDir + "//"
if not os.path.isdir(MSgDir):
print(MSgDir)
continue
# listfile=[]
# for fl in os.listdir(MSgDir):
# if ".png" in fl:
# listfile.append(fl)
# l=len(listfile)
k=0
Im = cv2.imread(DirName+"/Image.png")
#for i in range(l):
for mfile in os.listdir(MSgDir):
NVessels = 0
path1=MSgDir+"/"+mfile
if not os.path.exists(path1):continue
msg = cv2.imread(path1,0)
if msg.sum()==0:
os.remove(path1)
print(path1+"File Removed!")
continue
# CatName=listfile[i][listfile[i].find("Class__")+7:listfile[i].find("__ClasID__")]
# CatID=listfile[i][listfile[i].find("ClasID__")+8:listfile[i].find(".png")]
emsg=np.expand_dims(msg,axis=2)
for vfile in os.listdir(VSgDir):
path2 = VSgDir + "/" + vfile
if not os.path.exists(path2): continue
vsg = cv2.imread(path2, 0)
inter=((vsg*msg)>0)#.astype(np.uint8)
print(path1)
print(path2)
if (inter).sum()/((msg>0).sum())<0.8:
if (inter).sum()/((msg>0).sum())>0.01:
#..........................................
Txt=" i(in vessel) f(front of vessel) a(after vessel)"
Im1=Im.copy()
Im1[:,:,0] *= 1-vsg
Im1[:, :, 2] *= 1 - msg
cv2.imshow(Txt+"2", cv2.resize(Im1,(500,500)))
cv2.imshow(Txt, cv2.resize(np.concatenate([vsg, msg], axis=1) * 250,(1000,500)))
while (True):
ch = chr(cv2.waitKey())
if ch=='i' or ch=='f' or ch=='a': break
cv2.destroyAllWindows()
if ch=='i':
emsg = np.concatenate([emsg, np.expand_dims(vsg, axis=2)], axis=2)
NVessels+=1
if ch=='a':
msg[inter > 0]=5
emsg[:,:,0]=msg
else:
emsg = np.concatenate([emsg, np.expand_dims(vsg,axis=2)],axis=2)
NVessels += 1
if NVessels>2:
print("error")
print(path1)
print(path2)
show(Im)
show(msg*50)
exit(0)
if emsg.shape[2]==2:
emsg = np.concatenate([emsg, np.expand_dims(vsg*0,axis=2)],axis=2)
cv2.imwrite(path1, emsg)
###############################################################################################################
# sg = cv2.imread(path1)
# Im = cv2.imread(DirName + "/Image.png")
# cv2.imshow("results", sg*50)
# Im[:,:,0]*=1-(sg[:,:,0]>0).astype(np.uint8)
# Im[:, :, 1] *= 1 - (sg[:, :, 1] > 0).astype(np.uint8)
# cv2.imshow("im",Im)
#
#
# for i in range(sg.shape[2]):
# print("------------------------------------------------------------------------")
# print(str(i))
# print(np.unique(sg[:,:,i]))
# cv2.imshow(str(i) +" ", sg[:,:,i] * 35)
#
# cv2.waitKey()
# cv2.destroyAllWindows()
###########################################################################################################################
os.rename(MSgDir,MSgDir.replace(MatDir,MatDir+"V"))
InDir=r"C:\Users\Sagi\Desktop\NewChemistryDataSet\NewFormat\Temp\\"##C:\Users\Sagi\Desktop\NewChemistryDataSet\NewFormat\Instance\\"
MatDir=r"PartsVi"
VesselDir=r"VesselV"
# FindIntersection(InDir, SubDir)
FindIntersection(InDir,MatDir, VesselDir) | 39.85 | 133 | 0.384776 |
acd42e8f17259fe1ebf3cf14b698e7b35d810a52 | 4,630 | py | Python | prod/jobs/check_dominat_symbol.py | garywangiam02/vnpy | fbb168bf977d95ae874e92a3655c6c893db16a1f | [
"MIT"
] | null | null | null | prod/jobs/check_dominat_symbol.py | garywangiam02/vnpy | fbb168bf977d95ae874e92a3655c6c893db16a1f | [
"MIT"
] | null | null | null | prod/jobs/check_dominat_symbol.py | garywangiam02/vnpy | fbb168bf977d95ae874e92a3655c6c893db16a1f | [
"MIT"
] | null | null | null | # flake8: noqa
"""
ๆดๆฐไธปๅๅ็บฆ
"""
from vnpy.trader.utility import load_json, save_json, append_data
from vnpy.trader.util_wechat import send_wx_msg
from vnpy.data.tdx.tdx_future_data import *
import os
import sys
import json
from collections import OrderedDict
import pandas as pd
vnpy_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
if vnpy_root not in sys.path:
sys.path.append(vnpy_root)
os.environ["VNPY_TESTING"] = "1"
log_csv_name = 'dominat_change_history.csv'
field_names = ['account_name', 'strategy_name', 'old_vt_symbol', 'new_vt_symbol', 'datetime']
if __name__ == "__main__":
# if len(sys.argv) < 2:
# print(f'่ฏท่พๅ
ฅ{vnpy_root}ไธๆฃๆฅ็ฎๅฝ๏ผไพๅฆ prod/account01', file=sys.stderr)
# exit()
# print(sys.argv)
# for account_folder in sys.argv[1:]:
for account_folder in ['prod/future_simnow']:
cta_path = os.path.abspath(os.path.join(vnpy_root, account_folder))
if not os.path.exists(cta_path):
print(f'{cta_path}ไธๅญๅจ', file=sys.stderr)
continue
print(f'ๅผๅงๆฃๆฅ{cta_path}ไธ็็ญ็ฅ่ฟ่ก้
็ฝฎๆไปถ')
account_name = account_folder.split('/')[-1]
# ๅๅปบAPIๅฏน่ฑก
api_01 = TdxFutureData()
# ๆดๆฐๆฌๅฐๅ็บฆ็ผๅญไฟกๆฏ
api_01.update_mi_contracts()
setting_file_path = os.path.abspath(os.path.join(cta_path, '.vntrader', 'cta_strategy_pro_setting.json'))
settings = load_json(setting_file_path, auto_save=False)
if len(settings) == 0:
print('ๆ ็ญ็ฅ้
็ฝฎ')
os._exit(0)
changed = False
for strategy_name, setting in settings.items():
vt_symbol = setting.get('vt_symbol')
if not vt_symbol:
print(f'{strategy_name}้
็ฝฎไธญๆ vt_symbol', file=sys.stderr)
continue
if '.' in vt_symbol:
symbol, exchange = vt_symbol.split('.')
else:
symbol = vt_symbol
exchange = None
if exchange == Exchange.SPD:
print(f"ๆไธๅค็่ชๅฎไนๅฅๅฉๅ็บฆ{vt_symbol}")
continue
full_symbol = get_full_symbol(symbol).upper()
underlying_symbol = get_underlying_symbol(symbol).upper()
contract_info = api_01.future_contracts.get(underlying_symbol)
if not contract_info:
print(f'{account_name}ไธปๅๅ็บฆ้
็ฝฎไธญ๏ผๆพไธๅฐ{underlying_symbol}', file=sys.stderr)
continue
if 'mi_symbol' not in contract_info or 'exchange' not in contract_info or 'full_symbol' not in contract_info:
print(f'{account_name}ไธปๅๅ็บฆ้
็ฝฎไธญ๏ผๆพไธๅฐmi_symbol/exchange/full_symbol. {contract_info}', file=sys.stderr)
continue
new_mi_symbol = contract_info.get('mi_symbol')
new_exchange = contract_info.get('exchange')
# ๆๅ่ฎก็ฎๆปก่ถณๆกไปถๅพๆฌกไธปๅๅ็บฆ
next_mi_symbol = get_pre_switch_mi_symbol(contract_info)
if next_mi_symbol > new_mi_symbol:
print(f'ไฝฟ็จๆๅๅๆขๅพๆฌกไธปๅๅ็บฆ{new_mi_symbol} => {next_mi_symbol}')
next_mi_symbol = new_mi_symbol
new_vt_symbol = '.'.join([new_mi_symbol, new_exchange])
new_full_symbol = get_full_symbol(new_mi_symbol).upper()
if full_symbol >= new_full_symbol:
print(f'{account_name}็ญ็ฅ้
็ฝฎ๏ผ้ฟๅ็บฆ{full_symbol}๏ผ ไธปๅ้ฟๅ็บฆ{new_full_symbol}๏ผไธๆดๆฐ')
continue
if exchange:
if len(vt_symbol) != len(new_vt_symbol):
print(f'{account_name}้
็ฝฎไธญ๏ผๅ็บฆ{vt_symbol} ไธ{new_vt_symbol} ้ฟๅบฆไธๅน้
๏ผไธๆดๆฐ', file=sys.stderr)
continue
else:
if len(symbol) != len(new_mi_symbol):
print(f'{account_name}้
็ฝฎไธญ๏ผๅ็บฆ{vt_symbol} ไธ{new_mi_symbol} ้ฟๅบฆไธๅน้
๏ผไธๆดๆฐ', file=sys.stderr)
continue
setting.update({'vt_symbol': new_vt_symbol})
send_wx_msg(f'{account_name}{strategy_name} ไธปๅๅ็บฆๆดๆข:{vt_symbol} => {new_vt_symbol} ')
changed = True
# ๅๅ
ฅๆฅๅฟcsv๏ผไพๅ็ปญๆฃๆฅ
append_data(file_name=log_csv_name,
dict_data={
'account_name': account_name,
'strategy_name': strategy_name,
'old_vt_symbol': vt_symbol,
'new_vt_symbol': new_vt_symbol,
'datetime': datetime.now().strftime('%Y-%m-%d %H:%M:%S')
},
field_names=field_names)
if changed:
save_json(setting_file_path, settings)
print(f'ไฟๅญ{account_name}ๆฐ้
็ฝฎ')
print('ๆดๆฐๅฎๆฏ')
os._exit(0)
| 36.746032 | 121 | 0.590929 |
6065fdaedf80bd7a86c8877ed6ae31ca774c75c3 | 426 | py | Python | api_scrapper.py | sallagoi/python_scrapper | 47bf682e4348c2289f0991df5d40f75d6dbac091 | [
"BSD-2-Clause"
] | null | null | null | api_scrapper.py | sallagoi/python_scrapper | 47bf682e4348c2289f0991df5d40f75d6dbac091 | [
"BSD-2-Clause"
] | null | null | null | api_scrapper.py | sallagoi/python_scrapper | 47bf682e4348c2289f0991df5d40f75d6dbac091 | [
"BSD-2-Clause"
] | null | null | null | from flask import Flask, jsonify, request
from pages_jaunes_france import PJ
app = Flask(__name__)
# incomes = [
# { 'description': 'salary', 'amount': 5000 }
# ]
@app.route('/phones/')
def get_phones():
query = 'pulido'
location = 'bayonne'
proximite = 0
pj = PJ()
pj.set_query(query)
pj.set_location(location)
pj.set_proximite(proximite)
result = pj.search()
return jsonify(result) | 19.363636 | 47 | 0.650235 |
58effa89618484e3ea4af4306b856a65528b2e7b | 352 | py | Python | hatespeech/api/__init__.py | tkhoa2711/twitter-hate-speech | 92476235bf3bf176a80b0b5879450f4acff42913 | [
"MIT"
] | null | null | null | hatespeech/api/__init__.py | tkhoa2711/twitter-hate-speech | 92476235bf3bf176a80b0b5879450f4acff42913 | [
"MIT"
] | 9 | 2018-06-12T04:52:15.000Z | 2020-04-22T02:45:43.000Z | hatespeech/api/__init__.py | tkhoa2711/twitter-hate-speech | 92476235bf3bf176a80b0b5879450f4acff42913 | [
"MIT"
] | null | null | null | from hatespeech.api.app import app
# import and register blueprints
from hatespeech.api import auth
app.register_blueprint(auth.mod)
from hatespeech.api import twitter
app.register_blueprint(twitter.mod)
from hatespeech.api import hateword
app.register_blueprint(hateword.mod)
from hatespeech.api import testing
app.register_blueprint(testing.mod)
| 23.466667 | 36 | 0.840909 |
c2f2f6f92e3699c775a47ce403c56f395601b6be | 2,194 | py | Python | userbot/modules/zipfile.py | rishi432/oubx | bc960e4b4e002c1c45535e13ec24f4547aa23a56 | [
"CNRI-Python",
"Condor-1.1",
"Naumen",
"Xnet",
"FTL",
"X11",
"MS-PL"
] | null | null | null | userbot/modules/zipfile.py | rishi432/oubx | bc960e4b4e002c1c45535e13ec24f4547aa23a56 | [
"CNRI-Python",
"Condor-1.1",
"Naumen",
"Xnet",
"FTL",
"X11",
"MS-PL"
] | 1 | 2021-02-08T20:44:56.000Z | 2021-02-08T20:44:56.000Z | userbot/modules/zipfile.py | GokuMUI7/oubx | bc960e4b4e002c1c45535e13ec24f4547aa23a56 | [
"CNRI-Python",
"Condor-1.1",
"Naumen",
"Xnet",
"FTL",
"X11",
"MS-PL"
] | null | null | null | """ command: .compress """
from telethon import events
import asyncio
import zipfile
from pySmartDL import SmartDL
from userbot.events import register
import time
import os
from userbot import TEMP_DOWNLOAD_DIRECTORY ,bot
from userbot import CMD_HELP
# from uniborg.util import admin_cmd, humanbytes, progress, time_formatter
from userbot.util import admin_cmd, humanbytes, progress, time_formatter
# @borg.on(admin_cmd("compress"))
@register(outgoing=True, pattern=r"^.compress(?: |$)(.*)")
async def _(event):
if event.fwd_from:
return
if not event.is_reply:
await event.edit("Reply to a file to compress it.")
return
mone = await event.edit("Processing ...")
if not os.path.isdir(TEMP_DOWNLOAD_DIRECTORY):
os.makedirs(TEMP_DOWNLOAD_DIRECTORY)
if event.reply_to_msg_id:
reply_message = await event.get_reply_message()
try:
c_time = time.time()
downloaded_file_name = await bot.download_media(
reply_message,
TEMP_DOWNLOAD_DIRECTORY,
progress_callback=lambda d, t: asyncio.get_event_loop().create_task(
progress(d, t, mone, c_time, "trying to download")
)
)
directory_name = downloaded_file_name
await event.edit(downloaded_file_name)
except Exception as e: # pylint:disable=C0103,W0703
await mone.edit(str(e))
zipfile.ZipFile(directory_name + '.zip', 'w', zipfile.ZIP_DEFLATED).write(directory_name)
await bot.send_file(
event.chat_id,
directory_name + ".zip",
caption="`File zipped!`",
force_document=True,
allow_cache=False,
reply_to=event.message.id,
)
await event.edit("DONE!!!")
await asyncio.sleep(7)
await event.delete()
def zipdir(path, ziph):
# ziph is zipfile handle
for root, dirs, files in os.walk(path):
for file in files:
ziph.write(os.path.join(root, file))
os.remove(os.path.join(root, file))
CMD_HELP.update({
"compress":
".compress [optional: <reply to file >]\
\nUsage: make files to zip."
})
| 32.264706 | 93 | 0.640383 |
0eb629cdb2722cef131cea87a5b1776ac9a76edb | 1,824 | py | Python | configs/_base_/datasets/cocostuff.py | yutao1008/MSwin | acdb750e9e0a7b978b1bae0a1d571e197eeb358a | [
"MIT"
] | null | null | null | configs/_base_/datasets/cocostuff.py | yutao1008/MSwin | acdb750e9e0a7b978b1bae0a1d571e197eeb358a | [
"MIT"
] | null | null | null | configs/_base_/datasets/cocostuff.py | yutao1008/MSwin | acdb750e9e0a7b978b1bae0a1d571e197eeb358a | [
"MIT"
] | null | null | null | # dataset settings
dataset_type = 'COCOStuffDataset'
data_root = 'data/cocostuff/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
img_scale = (520, 520)
crop_size = (480, 480)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations'),
dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)),
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=img_scale,
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=4,
workers_per_gpu=4,
train=dict(
type=dataset_type,
data_root=data_root,
img_dir='images',
ann_dir='annotations',
split='imageLists/train.txt',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
data_root=data_root,
img_dir='images',
ann_dir='annotations',
split='imageLists/test.txt',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
data_root=data_root,
img_dir='images',
ann_dir='annotations',
split='imageLists/test.txt',
pipeline=test_pipeline))
| 30.4 | 77 | 0.617325 |
1aba2a9f78cb0674d8b8592da03fcb1c0a58e5b7 | 1,572 | py | Python | tests/Poisson/Poisson3d/pdeapp.py | wraith1995/Exasim | ad475c7066c5bde1a7941e1703650e3a0db34fbb | [
"MIT"
] | 1 | 2022-01-09T21:26:24.000Z | 2022-01-09T21:26:24.000Z | tests/Poisson/Poisson3d/pdeapp.py | wraith1995/Exasim | ad475c7066c5bde1a7941e1703650e3a0db34fbb | [
"MIT"
] | null | null | null | tests/Poisson/Poisson3d/pdeapp.py | wraith1995/Exasim | ad475c7066c5bde1a7941e1703650e3a0db34fbb | [
"MIT"
] | null | null | null | # import external modules
import numpy, os
# Add Exasim to Python search path
cdir = os.getcwd(); ii = cdir.find("Exasim");
exec(open(cdir[0:(ii+6)] + "/Installation/setpath.py").read());
# import internal modules
import Preprocessing, Postprocessing, Gencode, Mesh
# Create pde object and mesh object
pde,mesh = Preprocessing.initializeexasim();
# Define a PDE model: governing equations and boundary conditions
pde['model'] = "ModelD"; # ModelC, ModelD, ModelW
pde['modelfile'] = "pdemodel"; # name of a file defining the PDE model
# Set discretization parameters, physical parameters, and solver parameters
pde['porder'] = 3; # polynomial degree
pde['physicsparam'] = numpy.array([1.0, 0.0]); # unit thermal conductivity
pde['tau'] = numpy.array([1.0]); # DG stabilization parameter
# Choose computing platform and set number of processors
#pde['platform'] = "gpu"; # choose this option if NVIDIA GPUs are available
pde['mpiprocs'] = 2; # number of MPI processors
# create a mesh of 8 by 8 by 8 hexes for a unit cube
mesh['p'], mesh['t'] = Mesh.cubemesh(8,8,8,1)[0:2];
# expressions for domain boundaries
mesh['boundaryexpr'] = [lambda p: (p[1,:] < 1e-3), lambda p: (p[0,:] > 1-1e-3), lambda p: (p[1,:] > 1-1e-3), lambda p: (p[0,:] < 1e-3), lambda p: (p[2,:] < 1e-3), lambda p: (p[2,:] > 1-1e-3)];
mesh['boundarycondition'] = numpy.array([1, 1, 1, 1, 1, 1]); # Set boundary condition for each boundary
# call exasim to generate and run C++ code to solve the PDE model
sol, pde, mesh = Postprocessing.exasim(pde,mesh)[0:3];
| 43.666667 | 192 | 0.673664 |
4a4b0d6848a594dd5b64ea412a5149097af6ebe3 | 2,455 | py | Python | aliyun-python-sdk-hbr/aliyunsdkhbr/request/v20170908/CreateContactRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 1,001 | 2015-07-24T01:32:41.000Z | 2022-03-25T01:28:18.000Z | aliyun-python-sdk-hbr/aliyunsdkhbr/request/v20170908/CreateContactRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 363 | 2015-10-20T03:15:00.000Z | 2022-03-08T12:26:19.000Z | aliyun-python-sdk-hbr/aliyunsdkhbr/request/v20170908/CreateContactRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 682 | 2015-09-22T07:19:02.000Z | 2022-03-22T09:51:46.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkhbr.endpoint import endpoint_data
class CreateContactRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'hbr', '2017-09-08', 'CreateContact','hbr')
self.set_protocol_type('https')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Mobile(self):
return self.get_query_params().get('Mobile')
def set_Mobile(self,Mobile):
self.add_query_param('Mobile',Mobile)
def get_Description(self):
return self.get_query_params().get('Description')
def set_Description(self,Description):
self.add_query_param('Description',Description)
def get_MobileVerifyCode(self):
return self.get_query_params().get('MobileVerifyCode')
def set_MobileVerifyCode(self,MobileVerifyCode):
self.add_query_param('MobileVerifyCode',MobileVerifyCode)
def get_Token(self):
return self.get_query_params().get('Token')
def set_Token(self,Token):
self.add_query_param('Token',Token)
def get_EmailVerifyCode(self):
return self.get_query_params().get('EmailVerifyCode')
def set_EmailVerifyCode(self,EmailVerifyCode):
self.add_query_param('EmailVerifyCode',EmailVerifyCode)
def get_Name(self):
return self.get_query_params().get('Name')
def set_Name(self,Name):
self.add_query_param('Name',Name)
def get_Email(self):
return self.get_query_params().get('Email')
def set_Email(self,Email):
self.add_query_param('Email',Email) | 32.733333 | 74 | 0.757637 |
f45f10929a147bc2af9a27f943922859a2cfcdf2 | 4,872 | py | Python | network_model/model_for_distillation.py | yuga-n/ModelLearner | 3193efd5eb15172ba8231a34829942040fcb0fc5 | [
"MIT"
] | null | null | null | network_model/model_for_distillation.py | yuga-n/ModelLearner | 3193efd5eb15172ba8231a34829942040fcb0fc5 | [
"MIT"
] | null | null | null | network_model/model_for_distillation.py | yuga-n/ModelLearner | 3193efd5eb15172ba8231a34829942040fcb0fc5 | [
"MIT"
] | 1 | 2021-09-14T14:52:28.000Z | 2021-09-14T14:52:28.000Z | import keras.callbacks
from typing import List
from typing import Optional
import os
from datetime import datetime
from DataIO import data_loader as dl
from network_model.distillation.flow_wrapper import FlowForDistillation
from network_model.wrapper.abstract_model import AbstractModel, build_record_path
class ModelForDistillation(AbstractModel):
def __init__(self,
train_model: keras.engine.training.Model,
student_model: keras.engine.training.Model,
class_set: List[str],
callbacks: Optional[List[keras.callbacks.Callback]] = None,
monitor: str = "",
will_save_h5: bool = True):
self.__train_model = train_model
self.__student_model = student_model
super().__init__(train_model.input.shape.as_list(),
class_set,
callbacks,
monitor,
will_save_h5)
@property
def model(self):
return self.__student_model
def fit_generator(self,
image_generator: FlowForDistillation,
epochs: int,
validation_data: Optional[FlowForDistillation] = None,
steps_per_epoch: Optional[int] = None,
validation_steps: Optional[int] = None,
temp_best_path: str = "",
save_weights_only: bool = False):
"""
ใขใใซใฎ้ฉๅๅบฆใ็ฎๅบใใ
:param image_generator: ใใกใคใซใในใใๅญฆ็ฟใใผใฟใ็ๆใใ็ๆๅจ
:param epochs: ใจใใใฏๆฐ
:param validation_data: ใในใใซไฝฟ็จใใใใผใฟใๅฎใใผใฟใจใฉใใซใฎใปใใใฎใฟใใซ
:param steps_per_epoch:
:param validation_steps:
:param temp_best_path:
:param save_weights_only:
:return:
"""
callbacks = self.get_callbacks(temp_best_path, save_weights_only)
print("fit builder")
if validation_data is None:
self.__history = self.__student_model.fit_generator(image_generator,
steps_per_epoch=steps_per_epoch,
epochs=epochs,
callbacks=callbacks)
else:
print('epochs', epochs)
self.__history = self.__student_model.fit_generator(image_generator,
steps_per_epoch=steps_per_epoch,
validation_steps=validation_steps,
epochs=epochs,
validation_data=validation_data,
callbacks=callbacks)
return self
def test(self,
image_generator: FlowForDistillation,
epochs: int,
validation_data: Optional[FlowForDistillation] = None,
normalize_type: dl.NormalizeType = dl.NormalizeType.Div255,
result_dir_name: str = None,
dir_path: str = None,
model_name: str = None,
steps_per_epoch: Optional[int] = None,
validation_steps: Optional[int] = None,
save_weights_only: bool = False
):
"""
ๆๅฎใใใใผใฟใปใใใซๅฏพใใฆใฎๆญฃ็ญ็ใ็ฎๅบใใ
:param image_generator: ใใกใคใซใในใใๅญฆ็ฟใใผใฟใ็ๆใใ็ๆๅจ
:param epochs: ใจใใใฏๆฐ
:param validation_data: ใในใใซไฝฟ็จใใใใผใฟใๅฎใใผใฟใจใฉใใซใฎใปใใใฎใฟใใซใใใใฏimage_generatorใจๅใๅฝขๅผ
:param epochs: ใจใใใฏๆฐ
:param normalize_type: ใฉใฎใใใซๆญฃ่ฆๅใใใ
:param result_dir_name: ่จ้ฒใใใใใฎใใกใคใซๅใฎใใผใน
:param dir_path: ่จ้ฒใใใใฃใฌใฏใใช ใใใฉใซใใงใฏใซใฌใณใใใฃใฌใฏใใช็ดไธใซresultใใฃใฌใฏใใชใไฝๆใใ
:param model_name: ใขใใซๅใใใใฉใซใใงใฏmodel
:param steps_per_epoch: ่จ้ฒๅพใขใใซใๅ้คใใใใฉใใ
:param validation_steps: ่จ้ฒๅพใขใใซใๅ้คใใใใฉใใ
:param save_weights_only:
:return:ๅญฆ็ฟ็จใใผใฟใฎๆญฃ็ญ็ใจใในใ็จใใผใฟใฎๆญฃ็ญ็ใฎใฟใใซ
"""
write_dir_path = build_record_path(result_dir_name, dir_path)
save_tmp_name = model_name + "_best.h5" if self.will_save_h5 else model_name + "_best"
self.fit_generator(image_generator,
epochs,
validation_data,
steps_per_epoch=steps_per_epoch,
validation_steps=validation_steps,
temp_best_path=os.path.join(write_dir_path, save_tmp_name),
save_weights_only=save_weights_only)
now_result_dir_name = result_dir_name + datetime.now().strftime("%Y%m%d%H%M%S")
self.record_model(now_result_dir_name, dir_path, model_name)
self.record_conf_json(now_result_dir_name, dir_path, normalize_type, model_name)
| 45.111111 | 98 | 0.566297 |
a52584a6b22d58aa92c65a52622b1df0bd18c71e | 26,417 | py | Python | DTLN_model.py | DgtalRock/DTLN | fec686e7a5edaf92b21aa31ff20089af50af92cf | [
"MIT"
] | 1 | 2021-09-09T08:28:08.000Z | 2021-09-09T08:28:08.000Z | DTLN_model.py | DgtalRock/DTLN | fec686e7a5edaf92b21aa31ff20089af50af92cf | [
"MIT"
] | null | null | null | DTLN_model.py | DgtalRock/DTLN | fec686e7a5edaf92b21aa31ff20089af50af92cf | [
"MIT"
] | 2 | 2020-08-11T10:50:14.000Z | 2021-07-15T17:53:49.000Z | # -*- coding: utf-8 -*-
"""
This File contains everything to train the DTLN model.
For running the training see "run_training.py".
To run evaluation with the provided pretrained model see "run_evaluation.py".
Author: Nils L. Westhausen (nils.westhausen@uol.de)
Version: 24.06.2020
This code is licensed under the terms of the MIT-license.
"""
import os, fnmatch
import tensorflow.keras as keras
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Activation, Dense, LSTM, Dropout, \
Lambda, Input, Multiply, Layer, Conv1D
from tensorflow.keras.callbacks import ReduceLROnPlateau, CSVLogger, \
EarlyStopping, ModelCheckpoint
import tensorflow as tf
import soundfile as sf
from wavinfo import WavInfoReader
from random import shuffle, seed
import numpy as np
class audio_generator():
'''
Class to create a Tensorflow dataset based on an iterator from a large scale
audio dataset. This audio generator only supports single channel audio files.
'''
def __init__(self, path_to_input, path_to_s1, len_of_samples, fs, train_flag=False):
'''
Constructor of the audio generator class.
Inputs:
path_to_input path to the mixtures
path_to_s1 path to the target source data
len_of_samples length of audio snippets in samples
fs sampling rate
train_flag flag for activate shuffling of files
'''
# set inputs to properties
self.path_to_input = path_to_input
self.path_to_s1 = path_to_s1
self.len_of_samples = len_of_samples
self.fs = fs
self.train_flag=train_flag
# count the number of samples in your data set (depending on your disk,
# this can take some time)
self.count_samples()
# create iterable tf.data.Dataset object
self.create_tf_data_obj()
def count_samples(self):
'''
Method to list the data of the dataset and count the number of samples.
'''
# list .wav files in directory
self.file_names = fnmatch.filter(os.listdir(self.path_to_input), '*.wav')
# count the number of samples contained in the dataset
self.total_samples = 0
for file in self.file_names:
info = WavInfoReader(os.path.join(self.path_to_input, file))
self.total_samples = self.total_samples + \
int(np.fix(info.data.frame_count/self.len_of_samples))
def create_generator(self):
'''
Method to create the iterator.
'''
# check if training or validation
if self.train_flag:
shuffle(self.file_names)
# iterate over the files
for file in self.file_names:
# read the audio files
noisy, fs_1 = sf.read(os.path.join(self.path_to_input, file))
speech, fs_2 = sf.read(os.path.join(self.path_to_s1, file))
# check if the sampling rates are matching the specifications
if fs_1 != self.fs or fs_2 != self.fs:
raise ValueError('Sampling rates do not match.')
if noisy.ndim != 1 or speech.ndim != 1:
raise ValueError('Too many audio channels. The DTLN audio_generator \
only supports single channel audio data.')
# count the number of samples in one file
num_samples = int(np.fix(noisy.shape[0]/self.len_of_samples))
# iterate over the number of samples
for idx in range(num_samples):
# cut the audio files in chunks
in_dat = noisy[int(idx*self.len_of_samples):int((idx+1)*
self.len_of_samples)]
tar_dat = speech[int(idx*self.len_of_samples):int((idx+1)*
self.len_of_samples)]
# yield the chunks as float32 data
yield in_dat.astype('float32'), tar_dat.astype('float32')
def create_tf_data_obj(self):
'''
Method to to create the tf.data.Dataset.
'''
# creating the tf.data.Dataset from the iterator
self.tf_data_set = tf.data.Dataset.from_generator(
self.create_generator,
(tf.float32, tf.float32),
output_shapes=(tf.TensorShape([self.len_of_samples]), \
tf.TensorShape([self.len_of_samples])),
args=None
)
class DTLN_model():
'''
Class to create and train the DTLN model
'''
def __init__(self):
'''
Constructor
'''
# defining default cost function
self.cost_function = self.snr_cost
# empty property for the model
self.model = []
# defining default parameters
self.fs = 16000
self.batchsize = 32
self.len_samples = 15
self.activation = 'sigmoid'
self.numUnits = 128
self.numLayer = 2
self.blockLen = 512
self.block_shift = 128
self.dropout = 0.25
self.lr = 1e-3
self.max_epochs = 200
self.encoder_size = 256
self.eps = 1e-7
# reset all seeds to 42 to reduce invariance between training runs
os.environ['PYTHONHASHSEED']=str(42)
seed(42)
np.random.seed(42)
tf.random.set_seed(42)
# some line to correctly find some libraries in TF 2.x
physical_devices = tf.config.experimental.list_physical_devices('GPU')
if len(physical_devices) > 0:
for device in physical_devices:
tf.config.experimental.set_memory_growth(device, enable=True)
@staticmethod
def snr_cost(s_estimate, s_true):
'''
Static Method defining the cost function.
The negative signal to noise ratio is calculated here. The loss is
always calculated over the last dimension.
'''
# calculating the SNR
snr = tf.reduce_mean(tf.math.square(s_true), axis=-1, keepdims=True) / \
(tf.reduce_mean(tf.math.square(s_true-s_estimate), axis=-1, keepdims=True)+1e-7)
# using some more lines, because TF has no log10
num = tf.math.log(snr)
denom = tf.math.log(tf.constant(10, dtype=num.dtype))
loss = -10*(num / (denom))
# returning the loss
return loss
def lossWrapper(self):
'''
A wrapper function which returns the loss function. This is done to
to enable additional arguments to the loss function if necessary.
'''
def lossFunction(y_true,y_pred):
# calculating loss and squeezing single dimensions away
loss = tf.squeeze(self.cost_function(y_pred,y_true))
# calculate mean over batches
loss = tf.reduce_mean(loss)
# return the loss
return loss
# returning the loss function as handle
return lossFunction
'''
In the following some helper layers are defined.
'''
def stftLayer(self, x):
'''
Method for an STFT helper layer used with a Lambda layer. The layer
calculates the STFT on the last dimension and returns the magnitude and
phase of the STFT.
'''
# creating frames from the continuous waveform
frames = tf.signal.frame(x, self.blockLen, self.block_shift)
# calculating the fft over the time frames. rfft returns NFFT/2+1 bins.
stft_dat = tf.signal.rfft(frames)
# calculating magnitude and phase from the complex signal
mag = tf.abs(stft_dat)
phase = tf.math.angle(stft_dat)
# returning magnitude and phase as list
return [mag, phase]
def fftLayer(self, x):
'''
Method for an fft helper layer used with a Lambda layer. The layer
calculates the rFFT on the last dimension and returns the magnitude and
phase of the STFT.
'''
# expanding dimensions
frame = tf.expand_dims(x, axis=1)
# calculating the fft over the time frames. rfft returns NFFT/2+1 bins.
stft_dat = tf.signal.rfft(frame)
# calculating magnitude and phase from the complex signal
mag = tf.abs(stft_dat)
phase = tf.math.angle(stft_dat)
# returning magnitude and phase as list
return [mag, phase]
def ifftLayer(self, x):
'''
Method for an inverse FFT layer used with an Lambda layer. This layer
calculates time domain frames from magnitude and phase information.
As input x a list with [mag,phase] is required.
'''
# calculating the complex representation
s1_stft = (tf.cast(x[0], tf.complex64) *
tf.exp( (1j * tf.cast(x[1], tf.complex64))))
# returning the time domain frames
return tf.signal.irfft(s1_stft)
def overlapAddLayer(self, x):
'''
Method for an overlap and add helper layer used with a Lambda layer.
This layer reconstructs the waveform from a framed signal.
'''
# calculating and returning the reconstructed waveform
return tf.signal.overlap_and_add(x, self.block_shift)
def seperation_kernel(self, num_layer, mask_size, x, stateful=False):
'''
Method to create a separation kernel.
!! Important !!: Do not use this layer with a Lambda layer. If used with
a Lambda layer the gradients are updated correctly.
Inputs:
num_layer Number of LSTM layers
mask_size Output size of the mask and size of the Dense layer
'''
# creating num_layer number of LSTM layers
for idx in range(num_layer):
x = LSTM(self.numUnits, return_sequences=True, stateful=stateful)(x)
# using dropout between the LSTM layer for regularization
if idx<(num_layer-1):
x = Dropout(self.dropout)(x)
# creating the mask with a Dense and an Activation layer
mask = Dense(mask_size)(x)
mask = Activation(self.activation)(mask)
# returning the mask
return mask
def seperation_kernel_with_states(self, num_layer, mask_size, x,
in_states):
'''
Method to create a separation kernel, which returns the LSTM states.
!! Important !!: Do not use this layer with a Lambda layer. If used with
a Lambda layer the gradients are updated correctly.
Inputs:
num_layer Number of LSTM layers
mask_size Output size of the mask and size of the Dense layer
'''
states_h = []
states_c = []
# creating num_layer number of LSTM layers
for idx in range(num_layer):
in_state = [in_states[:,idx,:, 0], in_states[:,idx,:, 1]]
x, h_state, c_state = LSTM(self.numUnits, return_sequences=True,
unroll=True, return_state=True)(x, initial_state=in_state)
# using dropout between the LSTM layer for regularization
if idx<(num_layer-1):
x = Dropout(self.dropout)(x)
states_h.append(h_state)
states_c.append(c_state)
# creating the mask with a Dense and an Activation layer
mask = Dense(mask_size)(x)
mask = Activation(self.activation)(mask)
out_states_h = tf.reshape(tf.stack(states_h, axis=0),
[1,num_layer,self.numUnits])
out_states_c = tf.reshape(tf.stack(states_c, axis=0),
[1,num_layer,self.numUnits])
out_states = tf.stack([out_states_h, out_states_c], axis=-1)
# returning the mask and states
return mask, out_states
def build_DTLN_model(self, norm_stft=False):
'''
Method to build and compile the DTLN model. The model takes time domain
batches of size (batchsize, len_in_samples) and returns enhanced clips
in the same dimensions. As optimizer for the Training process the Adam
optimizer with a gradient norm clipping of 3 is used.
The model contains two separation cores. The first has an STFT signal
transformation and the second a learned transformation based on 1D-Conv
layer.
'''
# input layer for time signal
time_dat = Input(batch_shape=(None, None))
# calculate STFT
mag,angle = Lambda(self.stftLayer)(time_dat)
# normalizing log magnitude stfts to get more robust against level variations
if norm_stft:
mag_norm = InstantLayerNormalization()(tf.math.log(mag + 1e-7))
else:
# behaviour like in the paper
mag_norm = mag
# predicting mask with separation kernel
mask_1 = self.seperation_kernel(self.numLayer, (self.blockLen//2+1), mag_norm)
# multiply mask with magnitude
estimated_mag = Multiply()([mag, mask_1])
# transform frames back to time domain
estimated_frames_1 = Lambda(self.ifftLayer)([estimated_mag,angle])
# encode time domain frames to feature domain
encoded_frames = Conv1D(self.encoder_size,1,strides=1,use_bias=False)(estimated_frames_1)
# normalize the input to the separation kernel
encoded_frames_norm = InstantLayerNormalization()(encoded_frames)
# predict mask based on the normalized feature frames
mask_2 = self.seperation_kernel(self.numLayer, self.encoder_size, encoded_frames_norm)
# multiply encoded frames with the mask
estimated = Multiply()([encoded_frames, mask_2])
# decode the frames back to time domain
decoded_frames = Conv1D(self.blockLen, 1, padding='causal',use_bias=False)(estimated)
# create waveform with overlap and add procedure
estimated_sig = Lambda(self.overlapAddLayer)(decoded_frames)
# create the model
self.model = Model(inputs=time_dat, outputs=estimated_sig)
# show the model summary
print(self.model.summary())
def build_DTLN_model_stateful(self, norm_stft=False):
'''
Method to build stateful DTLN model for real time processing. The model
takes one time domain frame of size (1, blockLen) and one enhanced frame.
'''
# input layer for time signal
time_dat = Input(batch_shape=(1, self.blockLen))
# calculate STFT
mag,angle = Lambda(self.fftLayer)(time_dat)
# normalizing log magnitude stfts to get more robust against level variations
if norm_stft:
mag_norm = InstantLayerNormalization()(tf.math.log(mag + 1e-7))
else:
# behaviour like in the paper
mag_norm = mag
# predicting mask with separation kernel
mask_1 = self.seperation_kernel(self.numLayer, (self.blockLen//2+1), mag_norm, stateful=True)
# multiply mask with magnitude
estimated_mag = Multiply()([mag, mask_1])
# transform frames back to time domain
estimated_frames_1 = Lambda(self.ifftLayer)([estimated_mag,angle])
# encode time domain frames to feature domain
encoded_frames = Conv1D(self.encoder_size,1,strides=1,use_bias=False)(estimated_frames_1)
# normalize the input to the separation kernel
encoded_frames_norm = InstantLayerNormalization()(encoded_frames)
# predict mask based on the normalized feature frames
mask_2 = self.seperation_kernel(self.numLayer, self.encoder_size, encoded_frames_norm, stateful=True)
# multiply encoded frames with the mask
estimated = Multiply()([encoded_frames, mask_2])
# decode the frames back to time domain
decoded_frame = Conv1D(self.blockLen, 1, padding='causal',use_bias=False)(estimated)
# create the model
self.model = Model(inputs=time_dat, outputs=decoded_frame)
# show the model summary
print(self.model.summary())
def compile_model(self):
'''
Method to compile the model for training
'''
# use the Adam optimizer with a clipnorm of 3
optimizerAdam = keras.optimizers.Adam(lr=self.lr, clipnorm=3.0)
# compile model with loss function
self.model.compile(loss=self.lossWrapper(), optimizer=optimizerAdam)
def create_saved_model(self, weights_file, target_name):
'''
Method to create a saved model folder from a weights file
'''
# check for type
if weights_file.find('_norm_') != -1:
norm_stft = True
else:
norm_stft = False
# build model
self.build_DTLN_model_stateful(norm_stft=norm_stft)
# load weights
self.model.load_weights(weights_file)
# save model
tf.saved_model.save(self.model, target_name)
def create_tf_lite_model(self, weights_file, target_name, use_dynamic_range_quant=False):
'''
Method to create a tf lite model folder from a weights file.
The conversion creates two models, one for each separation core.
Tf lite does not support complex numbers yet. Some processing must be
done outside the model.
For further information and how real time processing can be
implemented see "real_time_processing_tf_lite.py".
The conversion only works with TF 2.3.
'''
# check for type
if weights_file.find('_norm_') != -1:
norm_stft = True
num_elements_first_core = 2 + self.numLayer * 3 + 2
else:
norm_stft = False
num_elements_first_core = self.numLayer * 3 + 2
# build model
self.build_DTLN_model_stateful(norm_stft=norm_stft)
# load weights
self.model.load_weights(weights_file)
#### Model 1 ##########################
mag = Input(batch_shape=(1, 1, (self.blockLen//2+1)))
states_in_1 = Input(batch_shape=(1, self.numLayer, self.numUnits, 2))
# normalizing log magnitude stfts to get more robust against level variations
if norm_stft:
mag_norm = InstantLayerNormalization()(tf.math.log(mag + 1e-7))
else:
# behaviour like in the paper
mag_norm = mag
# predicting mask with separation kernel
mask_1, states_out_1 = self.seperation_kernel_with_states(self.numLayer,
(self.blockLen//2+1),
mag_norm, states_in_1)
model_1 = Model(inputs=[mag, states_in_1], outputs=[mask_1, states_out_1])
#### Model 2 ###########################
estimated_frame_1 = Input(batch_shape=(1, 1, (self.blockLen)))
states_in_2 = Input(batch_shape=(1, self.numLayer, self.numUnits, 2))
# encode time domain frames to feature domain
encoded_frames = Conv1D(self.encoder_size,1,strides=1,
use_bias=False)(estimated_frame_1)
# normalize the input to the separation kernel
encoded_frames_norm = InstantLayerNormalization()(encoded_frames)
# predict mask based on the normalized feature frames
mask_2, states_out_2 = self.seperation_kernel_with_states(self.numLayer,
self.encoder_size,
encoded_frames_norm,
states_in_2)
# multiply encoded frames with the mask
estimated = Multiply()([encoded_frames, mask_2])
# decode the frames back to time domain
decoded_frame = Conv1D(self.blockLen, 1, padding='causal',
use_bias=False)(estimated)
model_2 = Model(inputs=[estimated_frame_1, states_in_2],
outputs=[decoded_frame, states_out_2])
# set weights to submodels
weights = self.model.get_weights()
model_1.set_weights(weights[:num_elements_first_core])
model_2.set_weights(weights[num_elements_first_core:])
# convert first model
converter = tf.lite.TFLiteConverter.from_keras_model(model_1)
if use_dynamic_range_quant:
converter.optimizations = [tf.lite.Optimize.DEFAULT]
tflite_model = converter.convert()
with tf.io.gfile.GFile(target_name + '_1.tflite', 'wb') as f:
f.write(tflite_model)
# convert second model
converter = tf.lite.TFLiteConverter.from_keras_model(model_2)
if use_dynamic_range_quant:
converter.optimizations = [tf.lite.Optimize.DEFAULT]
tflite_model = converter.convert()
with tf.io.gfile.GFile(target_name + '_2.tflite', 'wb') as f:
f.write(tflite_model)
print('TF lite conversion complete!')
def train_model(self, runName, path_to_train_mix, path_to_train_speech, \
path_to_val_mix, path_to_val_speech):
'''
Method to train the DTLN model.
'''
# create save path if not existent
savePath = './models_'+ runName+'/'
if not os.path.exists(savePath):
os.makedirs(savePath)
# create log file writer
csv_logger = CSVLogger(savePath+ 'training_' +runName+ '.log')
# create callback for the adaptive learning rate
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5,
patience=3, min_lr=10**(-10), cooldown=1)
# create callback for early stopping
early_stopping = EarlyStopping(monitor='val_loss', min_delta=0,
patience=10, verbose=0, mode='auto', baseline=None)
# create model check pointer to save the best model
checkpointer = ModelCheckpoint(savePath+runName+'.h5',
monitor='val_loss',
verbose=1,
save_best_only=True,
save_weights_only=True,
mode='auto',
save_freq='epoch'
)
# calculate length of audio chunks in samples
len_in_samples = int(np.fix(self.fs * self.len_samples /
self.block_shift)*self.block_shift)
# create data generator for training data
generator_input = audio_generator(path_to_train_mix,
path_to_train_speech,
len_in_samples,
self.fs, train_flag=True)
dataset = generator_input.tf_data_set
dataset = dataset.batch(self.batchsize, drop_remainder=True).repeat()
# calculate number of training steps in one epoch
steps_train = generator_input.total_samples//self.batchsize
# create data generator for validation data
generator_val = audio_generator(path_to_val_mix,
path_to_val_speech,
len_in_samples, self.fs)
dataset_val = generator_val.tf_data_set
dataset_val = dataset_val.batch(self.batchsize, drop_remainder=True).repeat()
# calculate number of validation steps
steps_val = generator_val.total_samples//self.batchsize
# start the training of the model
self.model.fit(
x=dataset,
batch_size=None,
steps_per_epoch=steps_train,
epochs=self.max_epochs,
verbose=1,
validation_data=dataset_val,
validation_steps=steps_val,
callbacks=[checkpointer, reduce_lr, csv_logger, early_stopping],
max_queue_size=50,
workers=4,
use_multiprocessing=True)
# clear out garbage
tf.keras.backend.clear_session()
class InstantLayerNormalization(Layer):
'''
Class implementing instant layer normalization. It can also be called
channel-wise layer normalization and was proposed by
Luo & Mesgarani (https://arxiv.org/abs/1809.07454v2)
'''
def __init__(self, **kwargs):
'''
Constructor
'''
super(InstantLayerNormalization, self).__init__(**kwargs)
self.epsilon = 1e-7
self.gamma = None
self.beta = None
def build(self, input_shape):
'''
Method to build the weights.
'''
shape = input_shape[-1:]
# initialize gamma
self.gamma = self.add_weight(shape=shape,
initializer='ones',
trainable=True,
name='gamma')
# initialize beta
self.beta = self.add_weight(shape=shape,
initializer='zeros',
trainable=True,
name='beta')
def call(self, inputs):
'''
Method to call the Layer. All processing is done here.
'''
# calculate mean of each frame
mean = tf.math.reduce_mean(inputs, axis=[-1], keepdims=True)
# calculate variance of each frame
variance = tf.math.reduce_mean(tf.math.square(inputs - mean),
axis=[-1], keepdims=True)
# calculate standard deviation
std = tf.math.sqrt(variance + self.epsilon)
# normalize each frame independently
outputs = (inputs - mean) / std
# scale with gamma
outputs = outputs * self.gamma
# add the bias beta
outputs = outputs + self.beta
# return output
return outputs
| 41.147975 | 109 | 0.593746 |
03f473f81abfcfe1e26fe9864864f9a3b037bb4e | 1,826 | py | Python | tests/databases/value/fixtures/mongo_db.py | stevenbennett96/stk | 6e5af87625b83e0bfc7243bc42d8c7a860cbeb76 | [
"MIT"
] | null | null | null | tests/databases/value/fixtures/mongo_db.py | stevenbennett96/stk | 6e5af87625b83e0bfc7243bc42d8c7a860cbeb76 | [
"MIT"
] | null | null | null | tests/databases/value/fixtures/mongo_db.py | stevenbennett96/stk | 6e5af87625b83e0bfc7243bc42d8c7a860cbeb76 | [
"MIT"
] | null | null | null | from dataclasses import dataclass
from typing import Callable
import pymongo
import pytest
import stk
from ..case_data import CaseData
@dataclass(frozen=True)
class CaseDataData:
"""
Data used to create a :class:`.CaseData` instance.
Attributes
----------
get_database : :class:`callable`
Creates the database to test. Takes a
:class:`pymongo.MongoClient` as input and returns a
:class:`.ValueMongoDb` instance.
molecule : :class:`.Molecule`
The molecule to test.
value : :class:`object`
The value to put into the database.
"""
get_database: Callable[[pymongo.MongoClient], stk.ValueMongoDb]
molecule: stk.Molecule
value: object
@pytest.fixture(
params=(
lambda: CaseDataData(
get_database=lambda mongo_client: stk.ValueMongoDb(
mongo_client=mongo_client,
collection='values',
database='_stk_test_database_for_testing',
put_lru_cache_size=0,
get_lru_cache_size=0,
),
molecule=stk.BuildingBlock('BrCCBr'),
value=12,
),
lambda: CaseDataData(
get_database=lambda mongo_client: stk.ValueMongoDb(
mongo_client=mongo_client,
collection='values',
database='_stk_test_database_for_testing',
put_lru_cache_size=128,
get_lru_cache_size=128,
),
molecule=stk.BuildingBlock('BrCCBr'),
value=12,
),
),
)
def mongo_db(
request,
mongo_client: pymongo.MongoClient,
) -> CaseData:
data = request.param()
return CaseData(
database=data.get_database(mongo_client),
molecule=data.molecule,
value=data.value,
)
| 24.675676 | 67 | 0.600219 |
934ef35a6bb93cde2a0a5658cdb61cf81b4058b6 | 81 | py | Python | src/resources/__init__.py | amritha-devadiga/aaib4-inference | 963d1847b1946fed6afbccb58f3f13c8588ae447 | [
"MIT"
] | null | null | null | src/resources/__init__.py | amritha-devadiga/aaib4-inference | 963d1847b1946fed6afbccb58f3f13c8588ae447 | [
"MIT"
] | null | null | null | src/resources/__init__.py | amritha-devadiga/aaib4-inference | 963d1847b1946fed6afbccb58f3f13c8588ae447 | [
"MIT"
] | null | null | null | from .translate import InteractiveMultiTranslateResourceNew,NMTTranslateResource
| 40.5 | 80 | 0.925926 |
f7e7cb7871a7b8621bda8fd393c7e877102ee0bf | 4,155 | py | Python | FEV_KEGG/Experiments/12.py | ryhaberecht/FEV-KEGG | f55f294aae07b76954ed823f0c2e6d189fb2b1bb | [
"MIT"
] | null | null | null | FEV_KEGG/Experiments/12.py | ryhaberecht/FEV-KEGG | f55f294aae07b76954ed823f0c2e6d189fb2b1bb | [
"MIT"
] | 2 | 2019-05-30T06:42:08.000Z | 2021-05-06T10:37:40.000Z | FEV_KEGG/Experiments/12.py | ryhaberecht/FEV-KEGG | f55f294aae07b76954ed823f0c2e6d189fb2b1bb | [
"MIT"
] | null | null | null | """
Question
--------
Which EC numbers are not present in all Escherichia coli K-12 organisms?
Method
------
- Get all metabolic pathways of all E. coli K-12 organisms from KEGG.
- For each organism, combine all pathways to the metabolic network, by UNION operation.
- Convert this metabolic network into a substance-ecNumber graph.
- Combine all organisms' networks to a single unified E. coli network, by UNION operation.
- Combine all organisms' networks to a consensus network, by INTERSECT operation, leaving only substances and EC numbers that occur in all organisms.
- Subtract the consensus network from the E. coli network, by DIFFERENCE operation, leaving only substances and EC numbers that do not occur in all organisms.
- Print all EC numbers that do not occur in all organisms.
Result
------
::
69 results
1.1.1.-
1.1.1.133
1.1.1.157
1.1.1.251
1.1.1.271
1.1.1.28
1.1.1.381
1.1.1.57
1.1.1.58
1.1.1.65
1.1.1.85
1.1.3.15
1.14.11.17
1.14.13.149
1.16.3.1
1.17.1.9
1.2.1.16
1.2.1.2
1.2.1.20
1.2.1.39
1.2.1.79
1.2.1.91
1.2.7.-
1.2.7.1
1.4.3.21
1.5.1.34
2.1.1.10
2.1.2.10
2.3.1.174
2.3.1.223
2.3.3.13
2.3.3.5
2.5.1.7
2.6.1.16
2.7.1.16
2.7.1.200
2.7.1.6
2.7.7.13
2.7.8.7
2.8.3.-
2.9.1.1
3.1.2.-
3.1.3.7
3.2.1.14
3.2.1.17
3.2.1.23
3.2.1.37
3.3.2.12
3.5.1.25
3.5.4.1
4.1.3.30
4.1.3.39
4.2.1.33
4.2.1.35
4.2.1.47
4.2.1.79
4.2.1.80
4.2.1.9
4.4.1.15
4.6.1.1
5.1.3.13
5.3.1.4
5.3.2.6
5.3.3.18
5.4.2.8
5.4.99.2
5.4.99.9
6.2.1.17
6.2.1.30
Conclusion
----------
Some EC numbers are not shared between organisms.
It could make sense to ignore incomplete EC numbers, as they may represent identical reactions on identical substances and could, thus, be counted twice.
For example, 1.2.7.- might merely represent incomplete data, while the associated enzyme actually performs 1.2.7.1., causing a duplicate in the result list.
"""
from FEV_KEGG.Graph.SubstanceGraphs import SubstanceReactionGraph, SubstanceGeneGraph, SubstanceEcGraph
import FEV_KEGG.KEGG.Organism
if __name__ == '__main__':
#- Get all metabolic pathways of all E. coli organisms from KEGG.
eColiOrganisms = FEV_KEGG.KEGG.Organism.Group(searchString = 'Escherichia coli K-12').organisms
#- For each organism, combine all pathways to the metabolic network, by UNION operation.
organismEcGraphs = []
for organism in eColiOrganisms:
organismPathways = organism.getMetabolicPathways()
organismSubstanceReactionGraph = SubstanceReactionGraph.fromPathway(organismPathways)
#- Convert this metabolic network into a substance-ecNumber graph.
organismSubstanceGeneGraph = SubstanceGeneGraph.fromSubstanceReactionGraph(organismSubstanceReactionGraph)
organismSubstanceEcGraph = SubstanceEcGraph.fromSubstanceGeneGraph(organismSubstanceGeneGraph)
organismEcGraphs.append(organismSubstanceEcGraph)
firstGraph = organismEcGraphs.pop(0)
#- Combine all organisms' networks to a single unified E. coli network, by UNION operation.
unifiedEcGraph = firstGraph
unifiedEcGraph = unifiedEcGraph.union(organismEcGraphs)
#- Combine all organisms' networks to a consensus network, by INTERSECT operation, leaving only substances and EC numbers that occur in all organisms.
intersectedEcGraph = firstGraph
intersectedEcGraph = intersectedEcGraph.intersection(organismEcGraphs)
#- Subtract the consensus network from the E. coli network, by DIFFERENCE operation, leaving only substances and EC numbers that do not occur in all organisms.
differenceEcGraph = unifiedEcGraph.difference(intersectedEcGraph)
#- Print all EC numbers that do not occur in all organisms.
output = []
for ecNumber in differenceEcGraph.getECs():
output.append(ecNumber.__str__())
output.sort()
print(str(len(output)) + ' results')
for line in output:
print(line) | 29.678571 | 163 | 0.67485 |
0a546b4e94ef4c430e39a82fecf5e63d4561c129 | 11,263 | py | Python | src/commonwidgets.py | takumak/tuna | a50d1d34c9917d73f02257bcffcf7cc6bf582747 | [
"MIT"
] | null | null | null | src/commonwidgets.py | takumak/tuna | a50d1d34c9917d73f02257bcffcf7cc6bf582747 | [
"MIT"
] | null | null | null | src/commonwidgets.py | takumak/tuna | a50d1d34c9917d73f02257bcffcf7cc6bf582747 | [
"MIT"
] | null | null | null | import sys
import re
import logging
import html
from PyQt5.QtCore import Qt, pyqtSignal, QObject, QPoint, QRect, QSize, QEvent, QCoreApplication
from PyQt5.QtGui import QKeySequence, QValidator, QPainter, \
QPen, QBrush, QColor, QPixmap, QMouseEvent
from PyQt5.QtWidgets import QApplication, QWidget, QTableWidget, QMenu, \
QFrame, QVBoxLayout, QHBoxLayout, QWidget, QLabel, QLineEdit, QLayout, \
QComboBox, QGridLayout, QPushButton
import numpy as np
import log
__all__ = [
'TableWidget', 'HSeparator', 'HBoxLayout', 'VBoxLayout',
'ErrorBaloon', 'ErrorCheckEdit', 'FlowLayout',
'DescriptionWidget', 'ComboBoxWithDescriptor',
'ExpanderWidget'
]
class TableWidget(QTableWidget):
def __init__(self):
super().__init__()
self.menu = QMenu()
self.keys = []
self.addAction('&Copy selected', self.copySelected, QKeySequence.Copy)
self.addAction('&Paste', self.paste, QKeySequence.Paste)
def addAction(self, label, func, key):
self.menu.addAction(label, func, key)
self.keys.append((key, func))
def keyPressEvent(self, ev):
for key, func in self.keys:
if isinstance(key, QKeySequence.StandardKey):
m = ev.matches(key)
else:
m = (ev.key() | int(ev.modifiers())) in key
if m:
ev.accept()
func()
return
super().keyPressEvent(ev)
def contextMenuEvent(self, ev):
self.menu.exec_(ev.globalPos())
def getSelectedItemTable(self):
col, row, val = [], [], []
for index in self.selectedIndexes():
col.append(self.visualColumn(index.column()))
row.append(self.visualRow(index.row()))
val.append(self.item(index.row(), index.column()))
col = np.array(col) - min(col)
row = np.array(row) - min(row)
data = dict(zip(zip(row, col), val))
tbl = [[data[(r, c)] for c in range(max(col)+1)] for r in range(max(row)+1)]
return tbl
def copySelected(self):
tbl = self.getSelectedItemTable()
QApplication.clipboard().setText('\n'.join([
'\t'.join([item.text().strip() for item in r]) for r in tbl]))
def paste(self):
text = QApplication.clipboard().text()
data = [[c.strip() for c in l.split('\t')] for l in re.split(r'\r?\n', text)]
sel = self.getSelectedItemTable()
if len(sel) == 0:
return
elif len(sel) == 1 and len(sel[0]) == 1:
r0 = self.visualRow(sel[0][0].row())
c0 = self.visualColumn(sel[0][0].column())
v2l_r = dict([(self.visualRow(r), r) for r in range(self.rowCount())])
v2l_c = dict([(self.visualColumn(c), c) for c in range(self.columnCount())])
for r, vals in enumerate(data):
for c, text in enumerate(vals):
item = self.item(v2l_r[r0+r], v2l_c[c0+c])
if item and item.flags() & Qt.ItemIsEditable:
item.setText(text)
self.cellChanged.emit(item.row(), item.column())
return
selerr_msg = 'The shapes of table selection and paste data are different'
if len(sel) != len(data):
logging.error(selerr_msg)
return
for items, values in zip(sel, data):
if len(items) != len(values):
logging.error(selerr_msg)
return
for item, val in zip(items, values):
if val and not item:
logging.error(selerr_msg)
return
for items, values in zip(sel, data):
for item, val in zip(items, values):
if item:
item.setText(val)
class HSeparator(QFrame):
def __init__(self):
super().__init__()
self.setFrameShape(QFrame.HLine)
self.setFrameShadow(QFrame.Sunken)
class BoxLayoutBase():
def __init__(self, vmargins=False, hmargins=False):
vm = 4 if hmargins else 0
hm = 4 if hmargins else 0
self.setContentsMargins(hm, vm, hm, vm)
self.setSpacing(4)
class HBoxLayout(QHBoxLayout, BoxLayoutBase): pass
class VBoxLayout(QVBoxLayout, BoxLayoutBase): pass
class ErrorBaloon(QFrame):
def __init__(self):
super().__init__()
self.label = QLabel()
vbox = VBoxLayout()
vbox.setContentsMargins(4, 4, 4, 4)
vbox.addWidget(self.label)
self.setLayout(vbox)
self.setFrameShape(QFrame.StyledPanel)
self.setWindowFlags(Qt.ToolTip)
def setMessage(self, text):
self.label.setText('<span style="font-weight:bold; color:#800">%s</span>' % html.escape(text))
def updatePosition(self, widget):
self.adjustSize()
r = self.rect()
tl = widget.mapToGlobal(QPoint(0, 0))
tr = tl + QPoint(widget.size().width(), 0)
x = (tl.x() + tr.x())/2 - r.width()/2
self.move(x, tl.y() - r.height())
class ErrorCheckEdit(QLineEdit):
def __init__(self, validator, *args, **kwargs):
super().__init__(*args, **kwargs)
self.validator = validator
self.baloon = ErrorBaloon()
self.state = QValidator.Acceptable
self.textChanged.connect(lambda t: self.checkValue())
def checkValue(self):
try:
self.state, message = self.validator(self.text())
except:
log.warnException()
self.state = QValidator.Invalid
message = '%s %s' % sys.exc_info()[:2]
self.baloon.setMessage(message)
if self.state != QValidator.Acceptable and self.hasFocus():
self.showBaloon()
else:
self.hideBaloon()
def showBaloon(self):
self.baloon.updatePosition(self)
self.baloon.show()
self.setStyleSheet('');
def hideBaloon(self):
self.baloon.hide()
if self.state == QValidator.Acceptable:
self.setStyleSheet('');
else:
self.setStyleSheet('background-color:red');
def focusInEvent(self, ev):
super().focusInEvent(ev)
if self.state == QValidator.Acceptable:
self.hideBaloon()
else:
self.showBaloon()
def focusOutEvent(self, ev):
super().focusOutEvent(ev)
self.hideBaloon()
class FlowLayout(QLayout):
def __init__(self):
super().__init__()
self.items = []
def count(self):
return len(self.items)
def addItem(self, item):
self.items.append(item)
def setGeometry(self, rect):
super().setGeometry(rect)
self.doLayout(rect, False)
def sizeHint(self):
if self.count() == 0:
return QSize(0, 0)
s = [item.minimumSize() for item in self.items]
w = sum([i.width() for i in s])
h = max([i.height() for i in s])
return QSize(w, h)
def expandingDirections(self):
return Qt.Orientation(0)
def hasHeightForWidth(self):
return True
def heightForWidth(self, width):
return self.doLayout(QRect(0, 0, width, 0), True)
def itemAt(self, idx):
try:
return self.items[idx]
except IndexError:
return None
def takeAt(self, idx):
try:
item = self.items[idx]
item.widget().close()
del self.items[idx]
return item
except IndexError:
return None
def doLayout(self, rect, test):
l, t = rect.x(), rect.y()
r, b = l+rect.width(), t+rect.height()
width = 0
col, rh = 0, 0
x, y = l, t
for item in self.items:
s = item.minimumSize()
if col > 0 and x+s.width() >= r:
x = l
y += rh + 4
col, rh = 0, 0
if not test:
item.setGeometry(QRect(x, y, s.width(), s.height()))
col += 1
rh = max([rh, s.height()])
x += s.width()+4
width = max(width, x-4)
return y-t+rh
class DescriptionWidget(QFrame):
closed = pyqtSignal(QObject)
def __init__(self):
super().__init__()
vbox = VBoxLayout()
vbox.setContentsMargins(4, 4, 4, 4)
self.setLayout(vbox)
self.vbox = vbox
self.setFrameShape(QFrame.StyledPanel)
def addTitle(self, title):
label = QLabel(title)
label.setContentsMargins(16, 4, 16, 4)
vbox = VBoxLayout()
vbox.addWidget(label)
frame = QFrame()
frame.setFrameShape(QFrame.StyledPanel)
frame.setContentsMargins(4, 4, 4, 4)
frame.setLayout(vbox)
self.vbox.addWidget(frame)
def addLabel(self, text, **kwargs):
label = QLabel(text)
if kwargs.get('richtext'):
label.setTextFormat(Qt.RichText)
label.setTextInteractionFlags(Qt.TextBrowserInteraction)
label.setOpenExternalLinks(True)
self.vbox.addWidget(label)
def addImage(self, image):
imglabel = QLabel()
imglabel.setContentsMargins(16, 4, 16, 4)
imglabel.setPixmap(QPixmap.fromImage(image))
self.vbox.addWidget(imglabel)
def addGrid(self):
grid = QGridLayout()
grid.setContentsMargins(16, 4, 4, 16)
grid.setColumnStretch(1, 1)
grid.setHorizontalSpacing(16)
self.vbox.addLayout(grid)
return grid
def closeEvent(self, event):
super().closeEvent(event)
self.closed.emit(self)
class ComboBoxWithDescriptor(QComboBox):
mouseEvents = (
QEvent.MouseButtonPress,
QEvent.MouseButtonRelease,
QEvent.MouseMove,
QEvent.MouseButtonDblClick
)
def __init__(self):
super().__init__()
self.currDescriptor = None
self.preventHide = False
self.view().entered.connect(self.showDescriptor)
def closeDescriptor(self):
if self.currDescriptor:
self.currDescriptor.close()
self.currDescriptor = None
def descriptorClosed(self, desc):
QApplication.instance().removeEventFilter(self)
desc.closed.disconnect(self.descriptorClosed)
def showDescriptor(self, index):
self.closeDescriptor()
widget = index.data(Qt.UserRole+1)
if not isinstance(widget, QWidget):
return
view = self.view()
pos = view.mapToGlobal(QPoint(view.width(), view.visualRect(index).y()))
widget.setWindowFlags(Qt.ToolTip)
widget.move(pos)
widget.show()
widget.closed.connect(self.descriptorClosed)
QApplication.instance().installEventFilter(self)
self.currDescriptor = widget
@classmethod
def isDescendant(self, widget, ancestor):
while isinstance(widget, QWidget):
if widget == ancestor:
return True
widget = widget.parentWidget()
return False
def eventFilter(self, obj, event):
if event.type() in self.mouseEvents and self.isDescendant(obj, self.view().window()):
w = QApplication.widgetAt(event.globalPos())
if self.isDescendant(w, self.currDescriptor):
localpos = w.mapFromGlobal(event.globalPos())
newev = QMouseEvent(
event.type(),
localpos,
event.screenPos(),
event.button(),
event.buttons(),
event.modifiers()
)
QApplication.sendEvent(w, newev)
self.preventHide = True
if event.type() in (QEvent.Close, QEvent.Hide) and obj == self.view().window():
self.closeDescriptor()
return False
def hidePopup(self):
if self.preventHide:
self.preventHide = False
return
super().hidePopup()
class ExpanderWidget(QWidget):
def __init__(self, label, widget):
super().__init__()
if isinstance(widget, QLayout):
layout = widget
widget = QWidget()
widget.setLayout(layout)
self.button = QPushButton(label)
self.button.setCheckable(True)
self.button.toggled.connect(self.buttonToggled)
self.widget = widget
self.buttonToggled()
vbox = VBoxLayout()
vbox.addWidget(self.button)
vbox.addWidget(self.widget)
self.setLayout(vbox)
def buttonToggled(self, *args):
self.widget.setVisible(self.button.isChecked())
| 25.4819 | 98 | 0.643257 |
4e3939907daa9b1febbf723bfa4ce38baca20f79 | 673 | py | Python | titration/utils/devices/temperature_probe.py | kieransukachevin/AlkalinityTitrator | 09b642ee1368278b7b4fc180bed50ff538a0938a | [
"MIT"
] | null | null | null | titration/utils/devices/temperature_probe.py | kieransukachevin/AlkalinityTitrator | 09b642ee1368278b7b4fc180bed50ff538a0938a | [
"MIT"
] | 31 | 2021-06-29T17:53:56.000Z | 2021-08-19T21:59:03.000Z | titration/utils/devices/temperature_probe.py | kieransukachevin/AlkalinityTitrator | 09b642ee1368278b7b4fc180bed50ff538a0938a | [
"MIT"
] | 4 | 2021-02-12T23:21:17.000Z | 2021-11-15T16:55:38.000Z | import adafruit_max31865
import busio
import digitalio
from titration.utils import constants
class Temperature_Probe:
def __init__(self, sck, mosi, miso, cs, wires=2):
self.spi = busio.SPI(sck, MOSI=mosi, MISO=miso)
self.cs = digitalio.DigitalInOut(cs)
self.sensor = adafruit_max31865.MAX31865(
self.spi,
self.cs,
wires=wires,
rtd_nominal=constants.TEMPERATURE_NOMINAL_RESISTANCE,
ref_resistor=constants.TEMPERATURE_REF_RESISTANCE,
)
def get_temperature(self):
return self.sensor.temperature
def get_resistance(self):
return self.sensor.resistance
| 26.92 | 65 | 0.67162 |
21369cddeae1f7cdac0a1e2e4d107a5fff0b26c5 | 1,509 | py | Python | python/marvin/tests/utils/datamodel/test_query.py | margudo/marvin | 6f5a11b5b7ef80dbdb43a4538e27ccda126bab6e | [
"BSD-3-Clause"
] | null | null | null | python/marvin/tests/utils/datamodel/test_query.py | margudo/marvin | 6f5a11b5b7ef80dbdb43a4538e27ccda126bab6e | [
"BSD-3-Clause"
] | null | null | null | python/marvin/tests/utils/datamodel/test_query.py | margudo/marvin | 6f5a11b5b7ef80dbdb43a4538e27ccda126bab6e | [
"BSD-3-Clause"
] | null | null | null | # !usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under a 3-clause BSD license.
#
# @Author: Brian Cherinka
# @Date: 2018-11-15 10:27:30
# @Last modified by: Brian Cherinka
# @Last Modified time: 2018-11-16 10:12:52
from __future__ import print_function, division, absolute_import
import pytest
from marvin import config
from marvin.utils.datamodel.query import datamodel
PARAM_COUNT = {'MPL-4': {'all': 571, 'nospaxels': 309, 'nodap': 309},
'MPL-5': {'all': 703, 'nospaxels': 322, 'nodap': 301},
'MPL-6': {'all': 1676, 'nospaxels': 1008, 'nodap': 1031},
'MPL-7': {'all': 1676, 'nospaxels': 1008, 'nodap': 1031},
'DR15': {'all': 1676, 'nospaxels': 1008, 'nodap': 1031},
'MPL-8': {'all': 1676, 'nospaxels': 1008, 'nodap': 1031}
}
RELEASES = config._allowed_releases.keys()
@pytest.fixture(params=RELEASES)
def release(request):
"""Yield a release."""
return request.param
@pytest.fixture
def paramtype():
return 'all' if config._allow_DAP_queries else 'nodap'
class TestDataModel(object):
def test_local_param_count(self, release, paramtype):
dm = datamodel[release]
assert len(dm.parameters) == PARAM_COUNT[release][paramtype]
def test_remote_param_count(self, monkeypatch, db_off, release, paramtype):
monkeypatch.setenv('MANGA_LOCALHOST', 0)
dm = datamodel[release]
assert len(dm.parameters) == PARAM_COUNT[release][paramtype]
| 27.944444 | 79 | 0.638834 |
b02f29b283d8147b85277bedffef512bbf79a389 | 351 | py | Python | example_project/misc/debug2/handlers.py | ghuntley/simpleapi | e64e05e9b2276098d3442db174a4d0204be56b39 | [
"MIT"
] | 1 | 2019-06-27T11:41:03.000Z | 2019-06-27T11:41:03.000Z | example_project/misc/debug2/handlers.py | ghuntley/simpleapi | e64e05e9b2276098d3442db174a4d0204be56b39 | [
"MIT"
] | null | null | null | example_project/misc/debug2/handlers.py | ghuntley/simpleapi | e64e05e9b2276098d3442db174a4d0204be56b39 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import urllib
from simpleapi import Namespace
class MyAPI(Namespace):
def add_one(self, a, b):
a = a + 1
return a + b
add_one.published = True
add_one.constraints = lambda ns, key, val: int(val)
def download(self, url):
return urllib.urlopen(url).read()
download.published = True | 21.9375 | 55 | 0.623932 |
02956112e3985a5786862c9a38d1168dc2350f02 | 16,242 | py | Python | gui/plugins/cron_view_test.py | nahidupa/grr | 100a9d85ef2abb234e12e3ac2623caffb4116be7 | [
"Apache-2.0"
] | 1 | 2015-06-24T09:07:20.000Z | 2015-06-24T09:07:20.000Z | gui/plugins/cron_view_test.py | nahidupa/grr | 100a9d85ef2abb234e12e3ac2623caffb4116be7 | [
"Apache-2.0"
] | 3 | 2020-02-11T22:29:15.000Z | 2021-06-10T17:44:31.000Z | gui/plugins/cron_view_test.py | nahidupa/grr | 100a9d85ef2abb234e12e3ac2623caffb4116be7 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- mode: python; encoding: utf-8 -*-
"""Test the cron_view interface."""
import mock
from grr.gui import runtests_test
from grr.lib import aff4
from grr.lib import config_lib
from grr.lib import flags
from grr.lib import rdfvalue
from grr.lib import test_lib
from grr.lib.aff4_objects import cronjobs
class TestCronView(test_lib.GRRSeleniumTest):
"""Test the Cron view GUI."""
def AddJobStatus(self, job, status):
with self.ACLChecksDisabled():
with aff4.FACTORY.OpenWithLock("aff4:/cron/OSBreakDown",
token=self.token) as job:
job.Set(job.Schema.LAST_RUN_TIME(rdfvalue.RDFDatetime().Now()))
job.Set(job.Schema.LAST_RUN_STATUS(status=status))
def setUp(self):
super(TestCronView, self).setUp()
with self.ACLChecksDisabled():
with mock.patch.object(cronjobs, "GetStartTime", autospec=True,
return_value=rdfvalue.RDFDatetime().Now()):
cronjobs.ScheduleSystemCronFlows(token=self.token)
cronjobs.CRON_MANAGER.RunOnce(token=self.token)
def testCronView(self):
self.Open("/")
self.WaitUntil(self.IsElementPresent, "client_query")
self.Click("css=a[grrtarget=ManageCron]")
# Table should contain Last Run
self.WaitUntil(self.IsTextPresent, "Last Run")
# Table should contain system cron jobs
self.WaitUntil(self.IsTextPresent, "GRRVersionBreakDown")
self.WaitUntil(self.IsTextPresent, "LastAccessStats")
self.WaitUntil(self.IsTextPresent, "OSBreakDown")
# Select a Cron.
self.Click("css=td:contains('OSBreakDown')")
# Check that there's one flow in the list.
self.WaitUntil(self.IsElementPresent,
"css=#main_bottomPane td:contains('OSBreakDown')")
def testMessageIsShownWhenNoCronJobSelected(self):
self.Open("/")
self.WaitUntil(self.IsElementPresent, "client_query")
self.Click("css=a[grrtarget=ManageCron]")
self.WaitUntil(self.IsTextPresent,
"Please select a cron job to see the details.")
def testShowsCronJobDetailsOnClick(self):
self.Open("/")
self.Click("css=a[grrtarget=ManageCron]")
self.Click("css=td:contains('OSBreakDown')")
# Tabs should appear in the bottom pane
self.WaitUntil(self.IsElementPresent, "css=#main_bottomPane #Details")
self.WaitUntil(self.IsElementPresent, "css=#main_bottomPane #Flows")
self.WaitUntil(self.IsTextPresent, "CURRENT_FLOW_URN")
self.WaitUntil(self.IsTextPresent, "CRON_ARGS")
# Click on "Flows" tab
self.Click("css=#main_bottomPane #Flows")
# Click on the first flow and wait for flow details panel to appear.
self.Click("css=#main_bottomPane td:contains('OSBreakDown')")
self.WaitUntil(self.IsTextPresent, "FLOW_STATE")
self.WaitUntil(self.IsTextPresent, "next_states")
self.WaitUntil(self.IsTextPresent, "outstanding_requests")
# Close the panel.
self.Click("css=#main_bottomPane .panel button.close")
self.WaitUntilNot(self.IsTextPresent, "FLOW_STATE")
self.WaitUntilNot(self.IsTextPresent, "next_states")
self.WaitUntilNot(self.IsTextPresent, "outstanding_requests")
def testToolbarStateForDisabledCronJob(self):
with self.ACLChecksDisabled():
cronjobs.CRON_MANAGER.DisableJob(
rdfvalue.RDFURN("aff4:/cron/OSBreakDown"))
self.Open("/")
self.Click("css=a[grrtarget=ManageCron]")
self.Click("css=td:contains('OSBreakDown')")
self.assertTrue(self.IsElementPresent(
"css=button[name=EnableCronJob]:not([disabled])"))
self.assertTrue(self.IsElementPresent(
"css=button[name=DisableCronJob][disabled]"))
self.assertTrue(self.IsElementPresent(
"css=button[name=DeleteCronJob]:not([disabled])"))
def testToolbarStateForEnabledCronJob(self):
with self.ACLChecksDisabled():
cronjobs.CRON_MANAGER.EnableJob(
rdfvalue.RDFURN("aff4:/cron/OSBreakDown"))
self.Open("/")
self.Click("css=a[grrtarget=ManageCron]")
self.Click("css=td:contains('OSBreakDown')")
self.assertTrue(self.IsElementPresent(
"css=button[name=EnableCronJob][disabled]"))
self.assertTrue(self.IsElementPresent(
"css=button[name=DisableCronJob]:not([disabled])"))
self.assertTrue(self.IsElementPresent(
"css=button[name=DeleteCronJob]:not([disabled])"))
def testEnableCronJob(self):
with self.ACLChecksDisabled():
cronjobs.CRON_MANAGER.DisableJob(
rdfvalue.RDFURN("aff4:/cron/OSBreakDown"))
self.Open("/")
self.Click("css=a[grrtarget=ManageCron]")
self.Click("css=td:contains('OSBreakDown')")
# Click on Enable button and check that dialog appears.
self.Click("css=button[name=EnableCronJob]")
self.WaitUntil(self.IsTextPresent,
"Are you sure you want to ENABLE this cron job?")
# Click on "Proceed" and wait for authorization dialog to appear.
self.Click("css=button[name=Proceed]")
# This should be rejected now and a form request is made.
self.WaitUntil(self.IsTextPresent, "Create a new approval")
self.Click("css=#acl_dialog button[name=Close]")
# Wait for dialog to disappear.
self.WaitUntilNot(self.IsVisible, "css=.modal-backdrop")
with self.ACLChecksDisabled():
self.GrantCronJobApproval(rdfvalue.RDFURN("aff4:/cron/OSBreakDown"))
# Click on Enable button and check that dialog appears.
self.Click("css=button[name=EnableCronJob]")
self.WaitUntil(self.IsTextPresent,
"Are you sure you want to ENABLE this cron job?")
# Click on "Proceed" and wait for success label to appear.
# Also check that "Proceed" button gets disabled.
self.Click("css=button[name=Proceed]")
self.WaitUntil(self.IsTextPresent, "Cron job was ENABLEd successfully!")
self.assertTrue(self.IsElementPresent("css=button[name=Proceed][disabled]"))
# Click on "Cancel" and check that dialog disappears.
self.Click("css=button[name=Cancel]")
self.WaitUntilNot(self.IsVisible, "css=.modal-backdrop")
# View should be refreshed automatically.
self.WaitUntil(self.IsTextPresent, "OSBreakDown")
self.WaitUntil(self.IsElementPresent,
"css=tr:contains('OSBreakDown') *[state=enabled]")
def testDisableCronJob(self):
with self.ACLChecksDisabled():
cronjobs.CRON_MANAGER.EnableJob(
rdfvalue.RDFURN("aff4:/cron/OSBreakDown"))
self.Open("/")
self.Click("css=a[grrtarget=ManageCron]")
self.Click("css=td:contains('OSBreakDown')")
# Click on Enable button and check that dialog appears.
self.Click("css=button[name=DisableCronJob]")
self.WaitUntil(self.IsTextPresent,
"Are you sure you want to DISABLE this cron job?")
# Click on "Proceed" and wait for authorization dialog to appear.
self.Click("css=button[name=Proceed]")
self.WaitUntil(self.IsTextPresent, "Create a new approval")
self.Click("css=#acl_dialog button[name=Close]")
# Wait for dialog to disappear.
self.WaitUntilNot(self.IsVisible, "css=.modal-backdrop")
with self.ACLChecksDisabled():
self.GrantCronJobApproval(rdfvalue.RDFURN("aff4:/cron/OSBreakDown"))
# Click on Disable button and check that dialog appears.
self.Click("css=button[name=DisableCronJob]")
self.WaitUntil(self.IsTextPresent,
"Are you sure you want to DISABLE this cron job?")
# Click on "Proceed" and wait for success label to appear.
# Also check that "Proceed" button gets disabled.
self.Click("css=button[name=Proceed]")
self.WaitUntil(self.IsTextPresent, "Cron job was DISABLEd successfully!")
self.assertTrue(self.IsElementPresent("css=button[name=Proceed][disabled]"))
# Click on "Cancel" and check that dialog disappears.
self.Click("css=button[name=Cancel]")
self.WaitUntilNot(self.IsVisible, "css=.modal-backdrop")
# View should be refreshed automatically.
self.WaitUntil(self.IsTextPresent, "OSBreakDown")
self.WaitUntil(self.IsElementPresent,
"css=tr:contains('OSBreakDown') *[state=disabled]")
def testDeleteCronJob(self):
with self.ACLChecksDisabled():
cronjobs.CRON_MANAGER.EnableJob(
rdfvalue.RDFURN("aff4:/cron/OSBreakDown"))
self.Open("/")
self.Click("css=a[grrtarget=ManageCron]")
self.Click("css=td:contains('OSBreakDown')")
# Click on Enable button and check that dialog appears.
self.Click("css=button[name=DeleteCronJob]")
self.WaitUntil(self.IsTextPresent,
"Are you sure you want to DELETE this cron job?")
# Click on "Proceed" and wait for authorization dialog to appear.
self.Click("css=button[name=Proceed]")
self.WaitUntil(self.IsTextPresent, "Create a new approval")
self.Click("css=#acl_dialog button[name=Close]")
# Wait for dialog to disappear.
self.WaitUntilNot(self.IsVisible, "css=.modal-backdrop")
with self.ACLChecksDisabled():
self.GrantCronJobApproval(rdfvalue.RDFURN("aff4:/cron/OSBreakDown"))
# Click on Disable button and check that dialog appears.
self.Click("css=button[name=DeleteCronJob]")
self.WaitUntil(self.IsTextPresent,
"Are you sure you want to DELETE this cron job?")
# Click on "Proceed" and wait for success label to appear.
# Also check that "Proceed" button gets disabled.
self.Click("css=button[name=Proceed]")
self.WaitUntil(self.IsTextPresent, "Cron job was DELETEd successfully!")
self.assertTrue(self.IsElementPresent("css=button[name=Proceed][disabled]"))
# Click on "Cancel" and check that dialog disappears.
self.Click("css=button[name=Cancel]")
self.WaitUntilNot(self.IsVisible, "css=.modal-backdrop")
# View should be refreshed automatically.
self.WaitUntil(self.IsElementPresent,
"css=#main_topPane td:contains('GRRVersionBreakDown')")
self.WaitUntilNot(self.IsElementPresent,
"css=#main_topPane td:contains('OSBreakDown')")
def testHuntSchedulingWorksCorrectly(self):
self.Open("/")
self.Click("css=a[grrtarget=ManageCron]")
self.Click("css=button[name=ScheduleHuntCronJob]")
self.WaitUntil(self.IsTextPresent, "What to run?")
# Click on Filesystem item in flows list
self.WaitUntil(self.IsElementPresent, "css=#_Filesystem > ins.jstree-icon")
self.Click("css=#_Filesystem > ins.jstree-icon")
# Click on Find Files item in Filesystem flows list
self.Click("link=File Finder")
# Wait for flow configuration form to be rendered (just wait for first
# input field).
self.WaitUntil(self.IsElementPresent,
"css=.Wizard input[id=args-paths-0]")
# Change "path", "pathtype", "depth" and "ignore_errors" values
self.Type("css=.Wizard input[id=args-paths-0]", "/tmp")
self.Select("css=.Wizard select[id=args-pathtype]", "TSK")
# Click on "Next" button
self.Click("css=.Wizard button.Next")
self.WaitUntil(self.IsTextPresent, "Output Processing")
# Configure the hunt to use a collection and also send an email on results.
self.Click("css=.Wizard button:contains('Add Output Plugin')")
self.Select("css=.Wizard select[id=output_1-option]",
"Send an email for each result.")
self.Type("css=.Wizard input[id=output_1-email_address]",
"test@%s" % config_lib.CONFIG["Logging.domain"])
# Click on "Next" button
self.Click("css=.Wizard button.Next")
self.WaitUntil(self.IsTextPresent, "Where to run?")
# Create 3 foreman rules
self.WaitUntil(
self.IsElementPresent,
"css=.Wizard select[id=rule_1-option]")
self.Select("css=.Wizard select[id=rule_1-option]",
"Regular Expressions")
self.Select("css=.Wizard select[id=rule_1-attribute_name]",
"System")
self.Type("css=.Wizard input[id=rule_1-attribute_regex]",
"Linux")
# Make the button visible by scrolling to the bottom.
self.driver.execute_script("""
$("button:contains('Add Rule')").parent().scrollTop(10000)
""")
self.Click("css=.Wizard button:contains('Add Rule')")
self.Select("css=.Wizard select[id=rule_2-option]",
"Integer Rule")
self.Select("css=.Wizard select[id=rule_2-attribute_name]",
"Clock")
self.Select("css=.Wizard select[id=rule_2-operator]",
"GREATER_THAN")
self.Type("css=.Wizard input[id=rule_2-value]",
"1336650631137737")
# Make the button visible by scrolling to the bottom.
self.driver.execute_script("""
$("button:contains('Add Rule')").parent().scrollTop(10000)
""")
self.Click("css=.Wizard button:contains('Add Rule')")
self.Select("css=.Wizard select[id=rule_3-option]",
"OSX")
# Make the button visible by scrolling to the bottom.
self.driver.execute_script("""
$("button:contains('Add Rule')").parent().scrollTop(10000)
""")
# Click on "Next" button
self.Click("css=.Wizard button.Next")
self.WaitUntil(self.IsTextPresent, "When to run?")
# Select daily periodicity
self.Type("css=.Wizard input[id=cron-periodicity]", "1d")
# Click on "Next" button
self.Click("css=.Wizard button.Next")
self.WaitUntil(self.IsTextPresent, "Review")
# Check that the arguments summary is present.
self.assertTrue(self.IsTextPresent("Paths"))
self.assertTrue(self.IsTextPresent("/tmp"))
# Check that output plugins are shown.
self.assertTrue(self.IsTextPresent("EmailOutputPlugin"))
self.assertTrue(self.IsTextPresent("test@%s" %
config_lib.CONFIG["Logging.domain"]))
# Check that rules summary is present.
self.assertTrue(self.IsTextPresent("Regex rules"))
# Check that periodicity information is present in the review.
self.assertTrue(self.IsTextPresent("Hunt Periodicity"))
self.assertTrue(self.IsTextPresent("Hunt will run 1d."))
# Click on "Schedule" button
self.Click("css=.Wizard button.Next")
# Anyone can schedule a hunt but we need an approval to actually start it.
self.WaitUntil(self.IsTextPresent,
"Hunt was successfully scheduled")
# Close the window and check that cron job object was created.
self.Click("css=button.Finish")
# Select newly created cron job.
self.Click("css=td:contains('cron/CreateAndRunGenericHuntFlow_')")
# Check that correct details are displayed in cron job details tab.
self.WaitUntil(self.IsTextPresent, "CreateAndRunGenericHuntFlow")
self.WaitUntil(self.IsTextPresent, "Flow args")
self.assertTrue(self.IsTextPresent("Paths"))
self.assertTrue(self.IsTextPresent("/tmp"))
def testStuckCronJobIsHighlighted(self):
# Make sure a lot of time has passed since the last
# execution
with test_lib.FakeTime(0):
self.AddJobStatus("aff4:/cron/OSBreakDown",
rdfvalue.CronJobRunStatus.Status.OK)
self.Open("/")
self.WaitUntil(self.IsElementPresent, "client_query")
self.Click("css=a[grrtarget=ManageCron]")
# OSBreakDown's row should have a 'warn' class
self.WaitUntil(self.IsElementPresent,
"css=tr.warning td:contains('OSBreakDown')")
# Check that only OSBreakDown is highlighted
self.WaitUntilNot(self.IsElementPresent,
"css=tr.warning td:contains('GRRVersionBreakDown')")
def testFailingCronJobIsHighlighted(self):
for _ in range(4):
self.AddJobStatus("aff4:/cron/OSBreakDown",
rdfvalue.CronJobRunStatus.Status.ERROR)
self.Open("/")
self.WaitUntil(self.IsElementPresent, "client_query")
self.Click("css=a[grrtarget=ManageCron]")
# OSBreakDown's row should have an 'error' class
self.WaitUntil(self.IsElementPresent,
"css=tr.danger td:contains('OSBreakDown')")
# Check that only OSBreakDown is highlighted
self.WaitUntilNot(self.IsElementPresent,
"css=tr.danger td:contains('GRRVersionBreakDown')")
def main(argv):
# Run the full test suite
runtests_test.SeleniumTestProgram(argv=argv)
if __name__ == "__main__":
flags.StartMain(main)
| 37.167048 | 80 | 0.685876 |
0116d23a82721e9ae1e2b28171234a3f433da950 | 242 | py | Python | run-python-script/test.py | louperelo/cwl-examples | 296587a0a4392d0a193f7d1a3684e55d0bc3bde1 | [
"MIT"
] | 3 | 2021-09-07T13:23:33.000Z | 2021-09-16T09:14:30.000Z | run-python-script/test.py | louperelo/cwl-examples | 296587a0a4392d0a193f7d1a3684e55d0bc3bde1 | [
"MIT"
] | 6 | 2021-09-07T08:07:41.000Z | 2021-09-28T12:55:58.000Z | run-python-script/test.py | louperelo/cwl-examples | 296587a0a4392d0a193f7d1a3684e55d0bc3bde1 | [
"MIT"
] | 3 | 2021-09-13T15:21:01.000Z | 2021-09-27T08:48:57.000Z | import argparse
print("I am a Python Script running in docker")
parser = argparse.ArgumentParser()
parser.add_argument('firstArg')
parser.add_argument('secondArg')
args = parser.parse_args()
print("you passed me some args:")
print(args) | 17.285714 | 47 | 0.760331 |
ac91bf07f053e468dcdfc87083655106ba07cc7d | 3,203 | py | Python | karbor-1.3.0/karbor/services/protection/flows/verify.py | scottwedge/OpenStack-Stein | 7077d1f602031dace92916f14e36b124f474de15 | [
"Apache-2.0"
] | null | null | null | karbor-1.3.0/karbor/services/protection/flows/verify.py | scottwedge/OpenStack-Stein | 7077d1f602031dace92916f14e36b124f474de15 | [
"Apache-2.0"
] | 5 | 2019-08-14T06:46:03.000Z | 2021-12-13T20:01:25.000Z | karbor-1.3.0/karbor/services/protection/flows/verify.py | scottwedge/OpenStack-Stein | 7077d1f602031dace92916f14e36b124f474de15 | [
"Apache-2.0"
] | 2 | 2020-03-15T01:24:15.000Z | 2020-07-22T20:34:26.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import timeutils
from taskflow import task
from karbor.common import constants
from karbor.services.protection.flows import utils
from karbor.services.protection import resource_flow
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class InitiateVerifyTask(task.Task):
def execute(self, context, verify, operation_log, *args, **kwargs):
LOG.debug("Initiate verify verify_id: %s", verify.id)
verify['status'] = constants.VERIFICATION_STATUS_IN_PROGRESS
verify.save()
update_fields = {"status": verify.status}
utils.update_operation_log(context, operation_log, update_fields)
def revert(self, context, verify, operation_log, *args, **kwargs):
LOG.debug("Failed to verify verify_id: %s", verify.id)
verify['status'] = constants.VERIFICATION_STATUS_FAILURE
verify.save()
update_fields = {
"status": verify.status,
"ended_at": timeutils.utcnow()
}
utils.update_operation_log(context, operation_log, update_fields)
class CompleteVerifyTask(task.Task):
def execute(self, context, verify, operation_log, *args, **kwargs):
LOG.debug("Complete verify verify_id: %s", verify.id)
verify['status'] = constants.VERIFICATION_STATUS_SUCCESS
verify.save()
update_fields = {
"status": verify.status,
"ended_at": timeutils.utcnow()
}
utils.update_operation_log(context, operation_log, update_fields)
def get_flow(context, workflow_engine, checkpoint, provider, verify):
resource_graph = checkpoint.resource_graph
operation_log = utils.create_operation_log_verify(context, verify)
parameters = verify.parameters
flow_name = "Verify_" + checkpoint.id
verify_flow = workflow_engine.build_flow(flow_name, 'linear')
plugins = provider.load_plugins()
resources_task_flow = resource_flow.build_resource_flow(
operation_type=constants.OPERATION_VERIFY,
context=context,
workflow_engine=workflow_engine,
resource_graph=resource_graph,
plugins=plugins,
parameters=parameters
)
workflow_engine.add_tasks(
verify_flow,
InitiateVerifyTask(),
resources_task_flow,
CompleteVerifyTask()
)
flow_engine = workflow_engine.get_engine(
verify_flow,
store={
'context': context,
'checkpoint': checkpoint,
'verify': verify,
'new_resources': {},
'operation_log': operation_log
}
)
return flow_engine
| 34.44086 | 75 | 0.695598 |
b903b9f9f9fb22386049aa700049192f475b90d4 | 309 | py | Python | 3. Python Advanced (September 2021)/3.1 Python Advanced (September 2021)/01. Lists as Stacks and Queues/02_matching_parentheses.py | kzborisov/SoftUni | ccb2b8850adc79bfb2652a45124c3ff11183412e | [
"MIT"
] | 1 | 2021-02-07T07:51:12.000Z | 2021-02-07T07:51:12.000Z | 3. Python Advanced (September 2021)/3.1 Python Advanced (September 2021)/01. Lists as Stacks and Queues/02_matching_parentheses.py | kzborisov/softuni | 9c5b45c74fa7d9748e9b3ea65a5ae4e15c142751 | [
"MIT"
] | null | null | null | 3. Python Advanced (September 2021)/3.1 Python Advanced (September 2021)/01. Lists as Stacks and Queues/02_matching_parentheses.py | kzborisov/softuni | 9c5b45c74fa7d9748e9b3ea65a5ae4e15c142751 | [
"MIT"
] | null | null | null | expr = input()
parenthesis_index = []
for idx, ch in enumerate(expr):
if ch not in ["(", ")"]:
continue
if ch == "(":
parenthesis_index.append(idx)
elif ch == ")":
start_index = parenthesis_index.pop()
end_index = idx
print(expr[start_index:end_index+1])
| 22.071429 | 45 | 0.566343 |
1326d6c6cf9868dc8315fe01e0e6d1ce6eb2aa1b | 6,134 | py | Python | ironstubs/process_stubs.py | YKato521/ironpython-stubs | b1f7c580de48528490b3ee5791b04898be95a9ae | [
"MIT"
] | null | null | null | ironstubs/process_stubs.py | YKato521/ironpython-stubs | b1f7c580de48528490b3ee5791b04898be95a9ae | [
"MIT"
] | null | null | null | ironstubs/process_stubs.py | YKato521/ironpython-stubs | b1f7c580de48528490b3ee5791b04898be95a9ae | [
"MIT"
] | null | null | null | """ Stub Generator for IronPython
Extended script based on script developed by Gary Edwards at:
gitlab.com/reje/revit-python-stubs
This is uses a slightly modify version of generator3,
github.com/JetBrains/intellij-community/blob/master/python/helpers/generator3.py
Iterates through a list of targeted assemblies and generates stub directories
for the namespaces using pycharm's generator3.
Note:
Some files ended up too large for Jedi to handle and would cause
memory errors and crashes - 1mb+ in a single files was enough to
cause problems. To fix this, there is a separate module that creates
a compressed version of the stubs, but it also split large file
into separate files to deal with jedi.
These directories will show up in the stubs as (X_parts)
MIT LICENSE
https://github.com/gtalarico/ironpython-stubs
Gui Talarico
--------------------------------------------------------------------------
Large files, such as `System/__init__.py` or `Revit/DB/__init__.py`
can exceed memory limits and crash the system.
These files need to be optimized so Jedi won't misbehave and crash your system
when parsing these files to index autocomplete options.
The primary strategies are:
1. Remove unecessary characters (empty lines, extra spaces, etc)
2. Split Large file into parts to improve Jedi perfomance and avoid crashes
#1 is very straight forward. Use a few regexes.
#2 is more complex. Some of the stubs created by generator3 such as DB/__init__.py
had nearyly 2mb. Doesn't seem like much, but for a raw .py file, that's more than
120K lines. System.Windows.Forms had over 7mb.
The strategy here was simple. Take all the classes inside this monster files,
create separate files for each one, and import them back into the original file.
For an example, compare:
`\stubs\Autodesk\Revit\DB\__init__.py`
and
``\stubs.min\Autodesk\Revit\DB\__init__.py`
"""
import re
import os
import sys
import subprocess
from collections import defaultdict
import json
from pprint import pprint
#############################################################################
# TODO: Integrate with CLI
# TODO: FIX Vars
# TODO: FIX Character Replacement + Optimize
#############################################################################
##########
# CONFIG #
##########
join = os.path.join
project_dir = os.getcwd() # Must execute from project dir
SAVE_PATH = os.path.join(project_dir, "release", "stubs")
LIMIT_IN_KB = 200
FILESIZE_LIMITE = LIMIT_IN_KB * 1024
def file_is_too_damn_big(filepath):
return os.path.getsize(filepath) > FILESIZE_LIMITE
def read_source(filepath):
with open(filepath) as fp:
source = fp.read()
return source
def write_source(filepath, source):
folderpath = os.path.dirname(filepath)
if not os.path.exists(folderpath):
os.makedirs(folderpath)
with open(filepath, "w") as fp:
source = fp.write(source)
print("File Written: {}".format(filepath))
target_files = []
TESTING = False
# TESTING = True
print("Starting...")
print(SAVE_PATH)
for root, subfolders, files in os.walk(SAVE_PATH):
py_files = [f for f in files if f.endswith(".py")]
for filename in py_files:
filepath = join(root, filename)
filesize = os.path.getsize(filepath)
filedir = os.path.dirname(filepath)
new_filedir = filedir.replace("\stubs", "\stubs.min")
new_filepath = os.path.join(new_filedir, filename)
source = read_source(filepath)
print("Processing File detected: {}".format(filepath))
if TESTING:
if not filepath.endswith("DB\\__init__.py"):
continue
# SOME OF THESE WORK IN TESTS BUT ARE NOT WORKING ON BATCH REPLACEMENT
replacements = [
(r" {4}", " "), # Convert 4 spaces into single
(r":\r\n( )+pass", r":pass"), # Put pass in one line
(r'"""\r\n( )+pass', r'"""'), # If has doc string, not need to keep pass
(r"pass\n", r"pass"), # Remove Extra Line after pass
(r" = ", "="),
(r", ", ","),
(r" # known case of __new__", ""), # Pycharm Note
(r" #cannot find CLR method", ""), # Pycharm Note
(r" # default", ""), # Pycharm Note
]
new_source = source
for old, new in replacements:
new_source = re.sub(old, new, new_source)
write_source(new_filepath, new_source)
print("=" * 30)
#####################################
# SEPARATE FILE INTO SEPARATE FILES #
#####################################
if file_is_too_damn_big(new_filepath):
print("=" * 30)
print("WARNING: file above breaking max: {}".format(new_filepath))
module_name = os.path.basename(filepath).replace(".py", "_parts")
chunks_dir = join(new_filedir, module_name)
# Create Blank Init File
write_source(join(chunks_dir, "__init__.py"), "")
# Split File into Classes
chunks = re.split(r"(?:\n)class ", new_source)
header = chunks.pop(0)
clean_source = header
write_source(new_filepath, clean_source)
for chunk in chunks:
# Find Class Name and body
class_source = "class " + chunk
re_class_name = re.search("(class )(\w+)", class_source)
class_name = re_class_name.group(2)
if not os.path.exists(chunks_dir):
os.mkdir(chunks_dir)
# Write individual class files
with open(join(chunks_dir, class_name + ".py"), "w") as fp:
fp.write(class_source)
# New class file import to __init__
with open(new_filepath, "a") as fp:
fp.write(
"from {0}.{1} import {1}\n".format(module_name, class_name)
)
| 34.655367 | 86 | 0.588523 |
81e604b52fe44d62364b7dc23c56b2b0dbd76f4e | 3,735 | py | Python | tests/summarizer/test_rouge.py | doruktiktiklar/sadedegel | 3362c4b6bf07c34634313b9eafe52e6817efec60 | [
"MIT"
] | null | null | null | tests/summarizer/test_rouge.py | doruktiktiklar/sadedegel | 3362c4b6bf07c34634313b9eafe52e6817efec60 | [
"MIT"
] | null | null | null | tests/summarizer/test_rouge.py | doruktiktiklar/sadedegel | 3362c4b6bf07c34634313b9eafe52e6817efec60 | [
"MIT"
] | null | null | null | from pytest import approx, raises
import numpy as np
import pytest
from .context import Rouge1Summarizer, Doc, tokenizer_context, SimpleTokenizer, BertTokenizer
@pytest.mark.parametrize("tokenizer, score_true",
[(SimpleTokenizer.__name__, np.array([2 / 4, 1 / 4, 2 / 4])),
(BertTokenizer.__name__, np.array([2 / 4, 2 / 5, 3 / 4]))])
def test_rouge1_summarizer_precision_all_lower(tokenizer, score_true):
with tokenizer_context(tokenizer):
summ = Rouge1Summarizer(normalize=False, metric="precision")
assert summ.predict(Doc('ali topu tut. oya ip atla. ahmet topu at.').sents) == approx(
score_true)
@pytest.mark.parametrize("tokenizer, score_true",
[(SimpleTokenizer.__name__, np.array([2 / 4, 1 / 4, 2 / 4])),
(BertTokenizer.__name__, np.array([2 / 4, 2 / 5, 3 / 4]))])
def test_rouge1_summarizer_precision_proper_case(tokenizer, score_true):
with tokenizer_context(tokenizer):
summ = Rouge1Summarizer(normalize=False, metric="precision")
assert summ.predict(Doc('Ali topu tut. Oya ip atla. Ahmet topu at.').sents) == approx(
score_true)
@pytest.mark.parametrize("tokenizer, score_true",
[(SimpleTokenizer.__name__, np.array([2 / 8, 1 / 8, 2 / 8])),
(BertTokenizer.__name__, np.array([2 / 9, 2 / 8, 3 / 9]))])
def test_rouge1_summarizer_recall_all_lower(tokenizer, score_true):
with tokenizer_context(tokenizer):
summ = Rouge1Summarizer(normalize=False, metric="recall")
assert summ.predict(Doc('ali topu tut. oya ip atla. ahmet topu at.').sents) == approx(
score_true)
@pytest.mark.parametrize("tokenizer, score_true",
[(SimpleTokenizer.__name__, np.array([2 / 8, 1 / 8, 2 / 8])),
(BertTokenizer.__name__, np.array([2 / 9, 2 / 8, 3 / 9]))])
def test_rouge1_summarizer_recall_proper_case(tokenizer, score_true):
with tokenizer_context(tokenizer):
summ = Rouge1Summarizer(normalize=False, metric="recall")
assert summ.predict(Doc('Ali topu tut. Oya ip atla. Ahmet topu at.').sents) == approx(
score_true)
@pytest.mark.parametrize("tokenizer, score_true",
[(SimpleTokenizer.__name__, np.array([0.33333333, 0.16666667, 0.33333333])),
(BertTokenizer.__name__, np.array([0.30769231, 0.30769231, 0.46153846]))])
def test_rouge1_summarizer_f1_all_lower(tokenizer, score_true):
with tokenizer_context(tokenizer):
summ = Rouge1Summarizer(normalize=False)
assert summ.predict(Doc('ali topu tut. oya ip atla. ahmet topu at.').sents) == approx(
score_true)
@pytest.mark.parametrize("tokenizer, score_true",
[(SimpleTokenizer.__name__, np.array([0.33333333, 0.16666667, 0.33333333])),
(BertTokenizer.__name__, np.array([0.30769231, 0.30769231, 0.46153846]))])
def test_rouge1_summarizer_f1_proper_case(tokenizer, score_true):
with tokenizer_context(tokenizer):
summ = Rouge1Summarizer(normalize=False)
assert summ.predict(Doc('Ali topu tut. Oya ip atla. Ahmet topu at.').sents) == approx(
score_true)
@pytest.mark.parametrize("tokenizer", [SimpleTokenizer.__name__, BertTokenizer.__name__])
def test_rouge1_summarize_text(tokenizer):
with tokenizer_context(tokenizer):
summ = Rouge1Summarizer()
doc = Doc('ali topu tut. oya ip atla. ahmet topu at.')
assert summ(doc, k=1) == [doc.sents[2]]
def test_rouge1_summarizer_unknown_mode():
with raises(ValueError):
_ = Rouge1Summarizer('unknown')
| 46.111111 | 101 | 0.64739 |
2a0e649d08b84aa0122a3ecff832545a7787c4de | 1,649 | py | Python | neo/Core/Header.py | neo-goo/neo-python | 82082b624a417a6631ef9effe40556e876f4ea9e | [
"MIT"
] | 12 | 2017-12-20T14:13:10.000Z | 2020-07-16T12:59:55.000Z | neo/Core/Header.py | kjhdigit/neo-project | 82082b624a417a6631ef9effe40556e876f4ea9e | [
"MIT"
] | 1 | 2022-03-17T00:01:50.000Z | 2022-03-17T00:01:50.000Z | neo/Core/Header.py | kjhdigit/neo-project | 82082b624a417a6631ef9effe40556e876f4ea9e | [
"MIT"
] | 2 | 2017-11-29T13:21:41.000Z | 2018-10-23T03:31:49.000Z | # -*- coding: UTF-8 -*-
from neo.Core.BlockBase import BlockBase
from neo.IO.MemoryStream import MemoryStream, StreamManager
from neo.IO.BinaryReader import BinaryReader
from neo.Core.Witness import Witness
class Header(BlockBase):
def __init__(self, prevhash=None, merlke_root=None, timestamp=None,
index=None, consensus_data=None, next_consenus=None, script=None):
super(Header, self).__init__()
self.PrevHash = prevhash
self.MerkleRoot = merlke_root
self.Timestamp = timestamp
self.Index = index
self.ConsensusData = consensus_data
self.NextConsensus = next_consenus
self.Script = script
def Size(self):
return super(Header, self).Size() + 1
def Deserialize(self, reader):
super(Header, self).Deserialize(reader)
if reader.ReadByte() != 0:
raise Exception('Incorrect Header Format')
def Equals(self, other):
if other is None:
return False
if other is self:
return True
return self.Hash == other.Hash
@staticmethod
def FromTrimmedData(data, index):
header = Header()
ms = StreamManager.GetStream(data)
reader = BinaryReader(ms)
header.DeserializeUnsigned(reader)
reader.ReadByte()
witness = Witness()
witness.Deserialize(reader)
header.Script = witness
StreamManager.ReleaseStream(ms)
return header
def GetHashCode(self):
return self.Hash
def Serialize(self, writer):
super(Header, self).Serialize(writer)
writer.WriteByte(0)
| 24.984848 | 83 | 0.637356 |
874e2931490f15a787d6c8a0e847a80fa3206cbf | 1,389 | py | Python | coaching_sessions/sorted_matrix/search_sorted_matrix.py | Mrsteveson/Review | 0dc401e9ba45efcc4cccfddfd425f72ced96e562 | [
"MIT"
] | null | null | null | coaching_sessions/sorted_matrix/search_sorted_matrix.py | Mrsteveson/Review | 0dc401e9ba45efcc4cccfddfd425f72ced96e562 | [
"MIT"
] | null | null | null | coaching_sessions/sorted_matrix/search_sorted_matrix.py | Mrsteveson/Review | 0dc401e9ba45efcc4cccfddfd425f72ced96e562 | [
"MIT"
] | null | null | null | def search_in_sorted_matrix(matrix, target):
# O(r * c) runtime, O(1) space
# for row in matrix
# for col in row
# if matrix[row][col] == target
# return (row, col)
# return (-1, -1)
โ
# O(r log c) ~ O(n log n) runtime, O(1) space
# binary search on all the elements in a row, starting with
# the first row
# if our binary search doesn't find the target in that row,
# move on to the next row
โ
# O(r + c) runtime, O(1) space
# how do we take advantage of the fact that both rows are sorted
# where should we start searching?
# would starting at a different corner help?
# start at the top right corner
# init starting indices at the top right corner
row = 0
col = len(matrix[0]) - 1
โ
# loop so long as the indices stay in bounds of the matrix
while row < len(matrix) and col >= 0:
# if the current element > target, move left
if matrix[row][col] > target:
col -= 1
# if the the current element < target, move down
elif matrix[row][col] < target:
row += 1
else:
return (row, col)
# we have to start in a corner where moving in one direction
# gets us smaller elements, and moving in the other direction gets
# us larger elements
return (-1, -1)
โ
# could we start in the middle? it's not clear how to systematically
# move through the matrix if we start in the middle | 34.725 | 71 | 0.645068 |
e8bb0c421723c610581174322d7768fe77d615de | 2,216 | py | Python | imooc/imooc/middlewares.py | BitTigerInst/CourseWebCrawler | 1037b223ba779e81ddda2423f6082e3c67765651 | [
"MIT"
] | 6 | 2016-11-23T05:43:35.000Z | 2019-05-03T09:54:17.000Z | imooc/imooc/middlewares.py | BitTigerInst/CourseWebCrawler | 1037b223ba779e81ddda2423f6082e3c67765651 | [
"MIT"
] | null | null | null | imooc/imooc/middlewares.py | BitTigerInst/CourseWebCrawler | 1037b223ba779e81ddda2423f6082e3c67765651 | [
"MIT"
] | 7 | 2016-07-23T03:21:28.000Z | 2019-07-17T08:41:40.000Z | __author__ = 'huafei'
import random
from scrapy import log
from scrapy.downloadermiddlewares.useragent import UserAgentMiddleware
class RandomUserAgentMiddleware(UserAgentMiddleware):
def __init__(self, settings, user_agent='Scrapy'):
super(RandomUserAgentMiddleware, self).__init__()
self.user_agent = user_agent
def process_request(self, request, spider):
ua = random.choice(self.user_agent_list)
if ua:
request.headers.setdefault('User-Agent', ua)
spider.log(
u'User-Agent: {} {}'.format(request.headers.get('User-Agent'), request),
level=log.DEBUG
)
"""
the default user_agent_list composes chrome, IE, Firefox, Mozilla, Opera,
for more user agent strings, you can find it in http://www.useragentstring.com/pages/useragentstring.php
"""
user_agent_list = [
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1664.3 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.103 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1664.3 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_2) AppleWebKit/537.13 (KHTML, like Gecko) Chrome/24.0.1290.1 Safari/537.13",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_8) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.45 Safari/535.19",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_2) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.45 Safari/535.19",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_8) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.11 Safari/535.19",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.66 Safari/535.11",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.93 Safari/537.36",
] | 59.891892 | 132 | 0.680957 |
8f0d6a8873c09a7e59de790a2f937841f59ba09a | 224 | py | Python | EuclideanAlgorithm.py | Krylovsentry/Algorithms | 0cd236f04dc065d5247a6f274bb3db503db591b0 | [
"MIT"
] | 1 | 2016-08-21T13:01:42.000Z | 2016-08-21T13:01:42.000Z | EuclideanAlgorithm.py | Krylovsentry/Algorithms | 0cd236f04dc065d5247a6f274bb3db503db591b0 | [
"MIT"
] | null | null | null | EuclideanAlgorithm.py | Krylovsentry/Algorithms | 0cd236f04dc065d5247a6f274bb3db503db591b0 | [
"MIT"
] | null | null | null | # Efficient method for computing the greatest common divisor (GCD) of two numbers
def gcd(a, b):
while a % b != 0:
den = a % b
a = b
b = den
return b
print(gcd(125, 55))
print(gcd(46, 78))
| 17.230769 | 81 | 0.558036 |
f2f4bb971e82ad12a441af120cc131398d0a5521 | 2,747 | py | Python | dora/sirene/migrations/0001_initial.py | francoisromain/dora-back | 868491097d12b9a23135db3d91bc6495431e8237 | [
"MIT"
] | 1 | 2022-01-03T22:12:45.000Z | 2022-01-03T22:12:45.000Z | dora/sirene/migrations/0001_initial.py | francoisromain/dora-back | 868491097d12b9a23135db3d91bc6495431e8237 | [
"MIT"
] | 2 | 2022-03-17T18:04:11.000Z | 2022-03-18T14:55:27.000Z | dora/sirene/migrations/0001_initial.py | francoisromain/dora-back | 868491097d12b9a23135db3d91bc6495431e8237 | [
"MIT"
] | 1 | 2022-01-03T09:02:54.000Z | 2022-01-03T09:02:54.000Z | import django.contrib.postgres.indexes
from django.contrib.postgres.operations import TrigramExtension
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
TrigramExtension(),
migrations.CreateModel(
name="Establishment",
fields=[
(
"siret",
models.CharField(
max_length=14,
primary_key=True,
serialize=False,
verbose_name="Siret",
),
),
("siren", models.CharField(max_length=9, verbose_name="Siren")),
("denomination", models.CharField(max_length=100, verbose_name="Nom")),
("ape", models.CharField(max_length=6)),
("code_cedex", models.CharField(max_length=9)),
("code_commune", models.CharField(max_length=5)),
("code_postal", models.CharField(max_length=5)),
("complement_adresse", models.CharField(max_length=38)),
("distribution_speciale", models.CharField(max_length=26)),
("enseigne1", models.CharField(max_length=50)),
("enseigne2", models.CharField(max_length=50)),
("enseigne3", models.CharField(max_length=50)),
("is_siege", models.BooleanField()),
("repetition_index", models.CharField(max_length=1)),
("libelle_cedex", models.CharField(max_length=100)),
("libelle_commune", models.CharField(max_length=100)),
("libelle_voie", models.CharField(max_length=100)),
("nic", models.CharField(max_length=5)),
("numero_voie", models.CharField(max_length=4)),
("diffusable", models.BooleanField()),
("type_voie", models.CharField(max_length=4)),
("denomination_parent", models.TextField(blank=True, default="")),
(
"sigle_parent",
models.CharField(blank=True, default="", max_length=20),
),
("longitude", models.FloatField(blank=True, null=True)),
("latitude", models.FloatField(blank=True, null=True)),
("full_search_text", models.TextField()),
],
),
migrations.AddIndex(
model_name="establishment",
index=django.contrib.postgres.indexes.GinIndex(
fields=["full_search_text"],
name="full_text_trgm_idx",
opclasses=("gin_trgm_ops",),
),
),
]
| 42.261538 | 87 | 0.531489 |
4e64f3e6f79e714fb9d7735915124b391301fe08 | 5,777 | py | Python | ehr_ml/clmbr/dataset.py | som-shahlab/ehr_ml | 4f83ac5b882916a175f0d242b38d914d00bf8a7c | [
"MIT"
] | 4 | 2021-03-12T21:41:37.000Z | 2021-06-25T16:49:52.000Z | ehr_ml/clmbr/dataset.py | som-shahlab/ehr_ml | 4f83ac5b882916a175f0d242b38d914d00bf8a7c | [
"MIT"
] | 22 | 2020-11-19T00:04:27.000Z | 2022-03-02T18:16:08.000Z | ehr_ml/clmbr/dataset.py | som-shahlab/ehr_ml | 4f83ac5b882916a175f0d242b38d914d00bf8a7c | [
"MIT"
] | 2 | 2021-05-12T13:11:46.000Z | 2021-10-15T18:30:14.000Z | from __future__ import annotations
import os
import math
import queue
import torch
import bisect
import datetime
import threading
import numpy as np
from .. import timeline
from . import PatientTimelineDataset
from .rnn_model import PatientRNN
from .sequential_task import SequentialTask
from .labeler_task import LabelerTask
from .doctorai_task import DoctorAITask
from typing import Any, Dict, Optional, Iterable, Tuple, List, Union
def finalize_data(
batch: Dict[Any, Any], device: torch.device
) -> Dict[Any, Any]:
batch["pid"] = batch["pid"].tolist()
batch["day_index"] = batch["day_index"].tolist()
batch["rnn"] = PatientRNN.finalize_data(batch["rnn"], device)
if "task" in batch:
batch["task"] = SequentialTask.finalize_data(batch["task"], device)
if "doctorai" in batch:
batch["doctorai"] = DoctorAITask.finalize_data(batch["doctorai"])
if "labeler" in batch:
batch["labeler"] = LabelerTask.finalize_data(batch["labeler"])
if "label" in batch:
batch["label"] = [torch.tensor(a, device=device) for a in batch["label"]]
return batch
def prepare_batch_thread(
dataset: PatientTimelineDataset,
args: Any,
out_queue: queue.Queue[Optional[Dict[Any, Any]]],
stop_event: threading.Event,
device: torch.device,
) -> None:
iterator = dataset.get_iterator(*args)
while True:
if stop_event.is_set():
out_queue.put(None)
break
item = next(iterator, None)
if item is None:
out_queue.put(None)
break
result = finalize_data(item, device)
out_queue.put(result)
def convert_pid(
pid: int, search_list: List[int], result_list: List[int]
) -> Tuple[int, int]:
pid_index = bisect.bisect_left(search_list, pid)
assert search_list[pid_index] == pid, f"patient ID {pid} not in timeline"
return_pid = result_list[pid_index]
return return_pid
def orig2ehr_pid(orig_pid: int, timelines: timeline.TimelineReader):
all_original_pids = timelines.get_original_patient_ids()
all_ehr_ml_pids = timelines.get_patient_ids()
return convert_pid(orig_pid, all_original_pids, all_ehr_ml_pids)
def ehr2orig_pid(ehr_pid: int, timelines: timeline.TimelineReader):
all_original_pids = timelines.get_original_patient_ids()
all_ehr_ml_pids = timelines.get_patient_ids()
return convert_pid(ehr_pid, all_ehr_ml_pids, all_original_pids)
def convert_patient_data(
extract_dir: str,
original_patient_ids: Iterable[int],
dates: Iterable[Union[str, datetime.date]],
) -> Tuple[np.array, np.array]:
timelines = timeline.TimelineReader(os.path.join(extract_dir, "extract.db"))
all_original_pids = timelines.get_original_patient_ids()
all_ehr_ml_pids = timelines.get_patient_ids()
def get_date_index(pid: int, date_obj: datetime.date) -> int:
patient = timelines.get_patient(pid)
i = 0
for day in patient.days:
if day.date > date_obj:
break
i += 1
if i == 0:
assert 0, f"should find correct date in timeline! {pid} {date_obj}"
else:
return i - 1
def convert_data(
og_pid: int, date: Union[str, datetime.date]
) -> Tuple[int, int]:
pid_index = bisect.bisect_left(all_original_pids, og_pid)
assert (
all_original_pids[pid_index] == og_pid
), f"original patient ID {og_pid} not in timeline"
ehr_ml_pid = all_ehr_ml_pids[pid_index]
date_obj = (
datetime.date.fromisoformat(date) if type(date) == str else date
)
assert type(date_obj) == datetime.date
date_index = get_date_index(ehr_ml_pid, date_obj)
return ehr_ml_pid, date_index
ehr_ml_patient_ids = []
day_indices = []
for og_pid, date in zip(original_patient_ids, dates):
ehr_ml_pid, date_index = convert_data(og_pid, date)
ehr_ml_patient_ids.append(ehr_ml_pid)
day_indices.append(date_index)
return np.array(ehr_ml_patient_ids), np.array(day_indices)
class DataLoader:
def __init__(
self,
dataset: PatientTimelineDataset,
threshold: int,
is_val: bool = False,
batch_size: int = 2000,
seed: int = 0,
day_dropout: float = 0,
code_dropout: float = 0,
device: Optional[torch.device] = None,
):
if device is None:
device = (
torch.device("cuda")
if torch.cuda.is_available()
else torch.device("cpu")
)
self.batch_queue: queue.Queue[Any] = queue.Queue(maxsize=20)
self.stop_event = threading.Event()
self.num_batches = dataset.num_batches(batch_size, is_val)
args = (is_val, batch_size, seed, threshold, day_dropout, code_dropout)
self.data_thread = threading.Thread(
target=prepare_batch_thread,
args=(dataset, args, self.batch_queue, self.stop_event, device,),
)
self.data_thread.start()
self.stopped = False
def __len__(self) -> int:
return self.num_batches
def __iter__(self) -> DataLoader:
return self
def __enter__(self) -> DataLoader:
return self
def __exit__(self, type: Any, value: Any, traceback: Any) -> None:
self.stop_event.set()
while not self.stopped:
item = self.batch_queue.get()
if item is None:
self.stopped = True
self.data_thread.join()
def __next__(self) -> Any:
next_item = self.batch_queue.get()
if next_item is None:
self.stopped = True
raise StopIteration
else:
return next_item
| 30.246073 | 81 | 0.645318 |
9b6ee386a94936ee7146e59671374ac4fdb1a960 | 41 | py | Python | Problems/Good names/task.py | gabrielizalo/jetbrains-academy-zookeeper | 467b43da3cb81f82987daf6b063eb2078d476d4f | [
"MIT"
] | null | null | null | Problems/Good names/task.py | gabrielizalo/jetbrains-academy-zookeeper | 467b43da3cb81f82987daf6b063eb2078d476d4f | [
"MIT"
] | null | null | null | Problems/Good names/task.py | gabrielizalo/jetbrains-academy-zookeeper | 467b43da3cb81f82987daf6b063eb2078d476d4f | [
"MIT"
] | null | null | null | model_score = 0.9875
client_name = "Bob"
| 13.666667 | 20 | 0.731707 |
94cf35ff5c2796677c4e0bab26d24a391a37a766 | 1,083 | py | Python | setup.py | taisei-project/python-zipfile-zstd | e596dd89bb35accd97727ae8bc9237aac269d8d1 | [
"MIT"
] | 1 | 2021-09-26T08:36:21.000Z | 2021-09-26T08:36:21.000Z | setup.py | taisei-project/python-zipfile-zstd | e596dd89bb35accd97727ae8bc9237aac269d8d1 | [
"MIT"
] | 3 | 2021-08-19T01:27:00.000Z | 2021-12-08T07:31:58.000Z | setup.py | taisei-project/python-zipfile-zstd | e596dd89bb35accd97727ae8bc9237aac269d8d1 | [
"MIT"
] | 1 | 2021-08-14T08:27:53.000Z | 2021-08-14T08:27:53.000Z |
import setuptools
with open('README.md', 'r', encoding='utf-8') as fh:
long_description = fh.read()
setuptools.setup(
name='zipfile-zstd',
version="0.0.3",
author='Andrei Alexeyev',
author_email='akari@taisei-project.org',
description='Monkey patch the standard zipfile module to enable Zstandard support',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/taisei-project/python-zipfile-zstd',
project_urls={
'Bug Tracker': 'https://github.com/taisei-project/python-zipfile-zstd/issues',
},
keywords='zip zipfile zstd zstandard',
classifiers=[
'Intended Audience :: Developers',
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Topic :: System :: Archiving',
'Topic :: System :: Archiving :: Compression',
],
packages=setuptools.find_packages(),
python_requires=">=3.6",
install_requires=[
'zstandard>=0.15.0',
],
)
| 30.942857 | 87 | 0.650046 |
d5c2a59c3190a7551a287c94da3d043a01d4ca55 | 4,517 | py | Python | models/SelectionGAN/gaugan_pix2pixhd_guided/models/networks/normalization.py | xianjian-xie/pose-generation | ad0495e80c6fe1e7690fa8691f1eb11b4e9bca32 | [
"MIT"
] | 445 | 2019-04-14T17:48:11.000Z | 2022-03-20T11:53:30.000Z | models/SelectionGAN/gaugan_pix2pixhd_guided/models/networks/normalization.py | xianjian-xie/pose-generation | ad0495e80c6fe1e7690fa8691f1eb11b4e9bca32 | [
"MIT"
] | 17 | 2019-06-03T11:34:22.000Z | 2022-02-28T01:26:13.000Z | models/SelectionGAN/gaugan_pix2pixhd_guided/models/networks/normalization.py | xianjian-xie/pose-generation | ad0495e80c6fe1e7690fa8691f1eb11b4e9bca32 | [
"MIT"
] | 71 | 2019-04-16T01:55:39.000Z | 2022-03-22T05:09:59.000Z | """
Copyright (C) 2019 NVIDIA Corporation. All rights reserved.
Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
"""
import re
import torch
import torch.nn as nn
import torch.nn.functional as F
from models.networks.sync_batchnorm import SynchronizedBatchNorm2d
import torch.nn.utils.spectral_norm as spectral_norm
# Returns a function that creates a normalization function
# that does not condition on semantic map
def get_nonspade_norm_layer(opt, norm_type='instance'):
# helper function to get # output channels of the previous layer
def get_out_channel(layer):
if hasattr(layer, 'out_channels'):
return getattr(layer, 'out_channels')
return layer.weight.size(0)
# this function will be returned
def add_norm_layer(layer):
nonlocal norm_type
if norm_type.startswith('spectral'):
layer = spectral_norm(layer)
subnorm_type = norm_type[len('spectral'):]
else:
subnorm_type = norm_type
if subnorm_type == 'none' or len(subnorm_type) == 0:
return layer
# remove bias in the previous layer, which is meaningless
# since it has no effect after normalization
if getattr(layer, 'bias', None) is not None:
delattr(layer, 'bias')
layer.register_parameter('bias', None)
if subnorm_type == 'batch':
norm_layer = nn.BatchNorm2d(get_out_channel(layer), affine=True)
elif subnorm_type == 'sync_batch':
norm_layer = SynchronizedBatchNorm2d(get_out_channel(layer), affine=True)
elif subnorm_type == 'instance':
norm_layer = nn.InstanceNorm2d(get_out_channel(layer), affine=False)
else:
raise ValueError('normalization layer %s is not recognized' % subnorm_type)
return nn.Sequential(layer, norm_layer)
return add_norm_layer
# Creates SPADE normalization layer based on the given configuration
# SPADE consists of two steps. First, it normalizes the activations using
# your favorite normalization method, such as Batch Norm or Instance Norm.
# Second, it applies scale and bias to the normalized output, conditioned on
# the segmentation map.
# The format of |config_text| is spade(norm)(ks), where
# (norm) specifies the type of parameter-free normalization.
# (e.g. syncbatch, batch, instance)
# (ks) specifies the size of kernel in the SPADE module (e.g. 3x3)
# Example |config_text| will be spadesyncbatch3x3, or spadeinstance5x5.
# Also, the other arguments are
# |norm_nc|: the #channels of the normalized activations, hence the output dim of SPADE
# |label_nc|: the #channels of the input semantic map, hence the input dim of SPADE
class SPADE(nn.Module):
def __init__(self, config_text, norm_nc, label_nc):
super().__init__()
assert config_text.startswith('spade')
parsed = re.search('spade(\D+)(\d)x\d', config_text)
param_free_norm_type = str(parsed.group(1))
ks = int(parsed.group(2))
if param_free_norm_type == 'instance':
self.param_free_norm = nn.InstanceNorm2d(norm_nc, affine=False)
elif param_free_norm_type == 'syncbatch':
self.param_free_norm = SynchronizedBatchNorm2d(norm_nc, affine=False)
elif param_free_norm_type == 'batch':
self.param_free_norm = nn.BatchNorm2d(norm_nc, affine=False)
else:
raise ValueError('%s is not a recognized param-free norm type in SPADE'
% param_free_norm_type)
# The dimension of the intermediate embedding space. Yes, hardcoded.
nhidden = 128
pw = ks // 2
self.mlp_shared = nn.Sequential(
nn.Conv2d(label_nc, nhidden, kernel_size=ks, padding=pw),
nn.ReLU()
)
self.mlp_gamma = nn.Conv2d(nhidden, norm_nc, kernel_size=ks, padding=pw)
self.mlp_beta = nn.Conv2d(nhidden, norm_nc, kernel_size=ks, padding=pw)
def forward(self, x, segmap):
# Part 1. generate parameter-free normalized activations
normalized = self.param_free_norm(x)
# Part 2. produce scaling and bias conditioned on semantic map
segmap = F.interpolate(segmap, size=x.size()[2:], mode='nearest')
actv = self.mlp_shared(segmap)
gamma = self.mlp_gamma(actv)
beta = self.mlp_beta(actv)
# apply scale and bias
out = normalized * (1 + gamma) + beta
return out
| 39.973451 | 105 | 0.673234 |
26374b63366148c7c1f283d9efe641ceb534d28d | 1,578 | py | Python | votesim/votemethods/tests/test_majority_judgment.py | johnh865/election_sim | b73b7e65f1bb22abb82cbe8442fcf02b0c20894e | [
"MIT"
] | 8 | 2019-10-21T23:24:51.000Z | 2021-09-14T03:04:59.000Z | votesim/votemethods/tests/test_majority_judgment.py | johnh865/election_sim | b73b7e65f1bb22abb82cbe8442fcf02b0c20894e | [
"MIT"
] | 2 | 2021-02-09T23:52:47.000Z | 2021-02-10T04:08:35.000Z | votesim/votemethods/tests/test_majority_judgment.py | johnh865/election_sim | b73b7e65f1bb22abb82cbe8442fcf02b0c20894e | [
"MIT"
] | 1 | 2019-10-21T23:32:18.000Z | 2019-10-21T23:32:18.000Z | # -*- coding: utf-8 -*-
import numpy as np
import votesim
# votesim.logSettings.start_debug()
from votesim.votemethods.score import majority_judgment
from votesim.models import spatial
def test_run():
for seed in range(50):
v = spatial.Voters(seed=seed)
v.add_random(20)
c = spatial.Candidates(v, seed=seed)
c.add_random(6)
e = spatial.Election(voters=v, candidates=c, seed=0,)
e.run('maj_judge')
# scores = e.output[0]['round_history']
scores = e.result.runner.output['round_history']
print('ratings for each elimination round')
print(scores)
print('winner=%s' % e.result.winners)
print('')
def test_case():
"""Test a case that failed during simple test benchmark.
After investigation it seems like this is a case where
all ballot scores are zero.
"""
seed = 0
numvoters = 101
cnum = 3
trial = 54
trialnum = 100
ndim = 2
stol = 0.25
base = 'linear'
name = 'test'
e = spatial.Election(None, None, seed=seed, name=name)
v = spatial.Voters(seed=seed, tol=stol, base=base)
v.add_random(numvoters, ndim=ndim)
cseed = seed * trialnum
c = spatial.Candidates(v, seed=trial + cseed)
c.add_random(cnum, sdev=1.5)
e.set_models(voters=v, candidates=c)
ballots = e.ballotgen.get_honest_ballots('maj_judge')
result = e.run('maj_judge')
assert np.all(result.ties == [0, 1, 2])
return
if __name__ == '__main__':
test_case()
test_run() | 25.047619 | 61 | 0.613435 |
f36beaa02a48cb6c68edf44e27f6c48b6f313d78 | 30,459 | py | Python | ravens/agents/transporter.py | YunchuZhang/Learning-to-use-different-tools-for-objects-rearrangement | 3759664cd77b5810834937c478a9a44ad36ac90c | [
"Apache-2.0"
] | 1 | 2022-03-20T19:03:02.000Z | 2022-03-20T19:03:02.000Z | ravens/agents/transporter.py | YunchuZhang/Learning-to-use-different-tools-for-objects-rearrangement | 3759664cd77b5810834937c478a9a44ad36ac90c | [
"Apache-2.0"
] | null | null | null | ravens/agents/transporter.py | YunchuZhang/Learning-to-use-different-tools-for-objects-rearrangement | 3759664cd77b5810834937c478a9a44ad36ac90c | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2021 The Ravens Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transporter Agent."""
import os
import numpy as np
from ravens.models.attention import Attention
from ravens.models.transport import Transport
from ravens.models.toolnet import Toolnet
from ravens.models.bcnet import BCnet
from ravens.models.transport_ablation import TransportPerPixelLoss
from ravens.models.transport_goal import TransportGoal
from ravens.tasks import cameras
from ravens.utils import utils
from PIL import Image
from ravens.utils.color_jitter import ColorJitter,adjust_hue
import tensorflow as tf
import matplotlib.pyplot as plt
import io
import cv2
def transfer(arg):
return tf.convert_to_tensor(arg)
class TransporterAgent:
"""Agent that uses Transporter Networks."""
def __init__(self, name, task, root_dir, n_rotations=36):
self.name = name
self.task = task
self.total_steps = 0
self.crop_size = 64
self.n_rotations = n_rotations
self.pix_size = 0.003125
self.pix_size = 0.005
self.pix_size = 0.0015625
self.pix_size = 0.0015
self.in_shape = (320, 160, 6)
self.in_shape = (640, 480, 6)
self.in_shape = (360, 720, 6)
self.cam_config = cameras.RealSenseD415.CONFIG
self.cam_config = cameras.Real.CONFIG
self.models_dir = os.path.join(root_dir, 'checkpoints', self.name)
self.bounds = np.array([[0.25, 0.75], [-0.5, 0.5], [0, 0.28]])
self.bounds = np.array([[-0.35, 0.45], [-1., 0.6], [0.4, 1]])
self.bounds = np.array([[-0.33, 0.42], [-0.5, 0.5], [0.4, 1]])
self.bounds = np.array([[-0.58, 0.5], [-0.325, 0.215], [0.4, 1]])
def get_image(self, obs, jitter = False):
"""Stack color and height images image."""
# if self.use_goal_image:
# colormap_g, heightmap_g = utils.get_fused_heightmap(goal, configs)
# goal_image = self.concatenate_c_h(colormap_g, heightmap_g)
# input_image = np.concatenate((input_image, goal_image), axis=2)
# assert input_image.shape[2] == 12, input_image.shape
# Get color and height maps from RGB-D images.
cmap, hmap = utils.get_fused_heightmap(
obs, self.cam_config, self.bounds, self.pix_size)
img = np.concatenate((cmap,
hmap[Ellipsis, None],
hmap[Ellipsis, None],
hmap[Ellipsis, None]), axis=2)
assert img.shape == self.in_shape, img.shape
_transform_dict = {'brightness':0.2, 'contrast':0.2, 'sharpness':0.2, 'color':0.2}
_color_jitter = ColorJitter(_transform_dict)
if jitter:
# import ipdb;ipdb.set_trace()
img_ = Image.fromarray(np.uint8(img[:,:,:3]))
img_ = _color_jitter(img_)
hue_factor = np.random.uniform(-0.5,0.5)
img_ = adjust_hue(img_,hue_factor)
img_ = np.array(img_)
# import ipdb;ipdb.set_trace()
return_img = np.concatenate((img_,
hmap[Ellipsis, None],
hmap[Ellipsis, None],
hmap[Ellipsis, None]), axis=2)
return return_img
return img
def get_sample(self, dataset, augment=True):
"""Get a dataset sample.
Args:
dataset: a ravens.Dataset (train or validation)
augment: if True, perform data augmentation.
Returns:
tuple of data for training:
(input_image, p0, p0_theta, p1, p1_theta)
tuple additionally includes (z, roll, pitch) if self.six_dof
if self.use_goal_image, then the goal image is stacked with the
current image in `input_image`. If splitting up current and goal
images is desired, it should be done outside this method.
"""
(obs, act, _, _), _ = dataset.sample()
img = self.get_image(obs)
# Get training labels from data sample.
p0_xyz, p0_xyzw = act['pose0']
p1_xyz, p1_xyzw = act['pose1']
# import ipdb;ipdb.set_trace()
p0 = utils.xyz_to_pix(p0_xyz, self.bounds, self.pix_size)
# use positive??
p0_theta = np.float32(utils.quatXYZW_to_eulerXYZ(p0_xyzw)[2])
p1 = utils.xyz_to_pix(p1_xyz, self.bounds, self.pix_size)
p1_theta = np.float32(utils.quatXYZW_to_eulerXYZ(p1_xyzw)[2])
p1_theta = p1_theta - p0_theta
p0_theta = 0
# Data augmentation.
if augment:
img, _, (p0, p1), _ = utils.perturb(img, [p0, p1])
return img, p0, p0_theta, p1, p1_theta
def plot_to_tensor(self,feature,figsize=(3.2,1.6)):
draw_feat = feature.numpy()
cmap = plt.get_cmap('inferno')
figure = plt.figure(figsize=figsize)
plt.imshow(draw_feat[0],cmap = cmap)
buf = io.BytesIO()
plt.savefig(buf, format='png')
plt.close(figure)
buf.seek(0)
image = tf.image.decode_png(buf.getvalue(), channels=4)
# Add the batch dimension
image = tf.expand_dims(image, 0)
return image
def train(self, dataset, writer=None):
"""Train on a dataset sample for 1 iteration.
Args:
dataset: a ravens.Dataset.
writer: a TF summary writer (for tensorboard).
"""
h,w,c = self.in_shape
tf.keras.backend.set_learning_phase(1)
img, p0, p0_theta, p1, p1_theta = self.get_sample(dataset)
# import ipdb;ipdb.set_trace()
# Get training losses.
step = self.total_steps + 1
loss0,feature0 = self.attention.train(img, p0, p0_theta)
if isinstance(self.transport, Attention):
loss1 = self.transport.train(img, p1, p1_theta)
else:
loss1, feature1 = self.transport.train(img, p0, p1, p1_theta)
with writer.as_default():
sc = tf.summary.scalar
rgb = tf.reshape(img[:,:,:3],[1, h, w,3])
rgb = tf.cast(rgb, dtype=tf.uint8)
depth = tf.reshape(img[:,:,3],[1, h, w, 1])
depth = (depth - tf.reduce_min(depth))/(tf.reduce_max(depth) - tf.reduce_min(depth))
feature0 = (feature0 - tf.reduce_min(feature0))/(tf.reduce_max(feature0) - tf.reduce_min(feature0))
pick_feat = tf.reshape(feature0,[1, h, w,1])
# tf.unravel_index(indices, dims, name=None)
angle = tf.math.argmax(tf.reshape(feature1,(h*w,36)),axis=0)
place_feat = tf.reshape(feature1[:,:,:,tf.math.argmax(angle)],[1,h,w,1])
place_feat = (place_feat - tf.reduce_min(place_feat))/(tf.reduce_max(place_feat) - tf.reduce_min(place_feat))
if step %100 == 0:
tf.summary.image("rgb", rgb, step=step)
tf.summary.image("depth", depth, step=step)
# tf.summary.image("pick_feat_color", self.plot_to_tensor(pick_feat,figsize=(12.8,6.4)), step=step)
# tf.summary.image("place_feat_color", self.plot_to_tensor(place_feat,figsize=(12.8,6.4)), step=step)
tf.summary.image("pick_feat", pick_feat, step=step)
tf.summary.image("place_feat",place_feat, step=step)
# rgb_pick = rgb
# rgb_place = rgb
# f0_min = tf.reduce_min(pick_feat)
# f0_max = tf.reduce_max(pick_feat)
# f1_min = tf.reduce_min(place_feat)
# f1_max = tf.reduce_max(place_feat)
# new_tensor = tf.Variable(rgb_pick)
# new_tensor[:,:,:,1].assign(rgb_pick[:,:,:,1]+tf.squeeze(tf.cast((pick_feat-f0_min)/(f0_max-f0_min)*255,dtype=tf.uint8),-1))
# rgb_pick = transfer(new_tensor)
# new_tensor = tf.Variable(rgb_place)
# new_tensor[:,:,:,1].assign(rgb_place[:,:,:,1]+tf.squeeze(tf.cast((place_feat-f1_min)/(f1_max-f1_min)*255,dtype=tf.uint8),-1))
# rgb_place = transfer(new_tensor)
# rgb_pick[:,:,:,1] += tf.squeeze(tf.cast((pick_feat-f0_min)/(f0_max-f0_min)*255,dtype=tf.uint8),-1)
# rgb_place[:,:,:,1] += tf.squeeze(tf,cast((place_feat-f1_min)/(f1_max-f1_min)*255,dtype=tf.uint8),-1)
# tf.summary.image("rgb_pick", rgb_pick, step=step)
# tf.summary.image("rgb_place", rgb_place, step=step)
# tf.summary.image("norm_pick", (pick_feat-f0_min)/(f0_max-f0_min), step=step)
# tf.summary.image("norm_place", (place_feat-f1_min)/(f1_max-f1_min), step=step)
sc('train_loss/attention', loss0, step)
sc('train_loss/transport', loss1, step)
print(f'Train Iter: {step} Loss: {loss0:.4f} {loss1:.4f}')
self.total_steps = step
def validate(self, dataset, writer=None): # pylint: disable=unused-argument
"""Test on a validation dataset for 10 iterations."""
print('Skipping validation.')
def test(self, obs, cur=0, info=None, goal=None, vis=False, p = None): # pylint: disable=unused-argument
"""Run inference and return best action given visual observations."""
tf.keras.backend.set_learning_phase(0)
# Get heightmap from RGB-D images.
img = self.get_image(obs)
rgb = img[:,:,:3][:,:,::-1]
rgb = np.array(rgb, dtype=np.uint8)
depth = img[:,:,3]
depth = (depth - np.min(depth))/(np.max(depth) - np.min(depth))
depth = np.uint8(depth*255)
# Attention model forward pass.
pick_conf = self.attention.forward(img)
argmax = np.argmax(pick_conf)
argmax = np.unravel_index(argmax, shape=pick_conf.shape)
p0_pix = argmax[:2]
p0_theta = argmax[2] * (2 * np.pi / pick_conf.shape[2])
print("predict p0_theta:{}",p0_theta)
if p != None:
p0_pix = p
# Transport model forward pass.
place_conf = self.transport.forward(img, p0_pix)
argmax = np.argmax(place_conf)
argmax = np.unravel_index(argmax, shape=place_conf.shape)
p1_pix = argmax[:2]
p1_theta = argmax[2] * (2 * np.pi / place_conf.shape[2])
print("predict p1_theta:{}",p1_theta*180/np.pi)
if vis:
pick_conf = (pick_conf - pick_conf.min())/(pick_conf.max()-pick_conf.min())
pick_conf = np.uint8(pick_conf*255)
place_conf = place_conf[:,:,argmax[2]]
place_conf = (place_conf - place_conf.min())/(place_conf.max()-place_conf.min())
place_conf = np.uint8(place_conf*255)
rgb = cv2.circle(rgb, p0_pix[::-1], 4, (0,0,255), 1)
# rgb = cv2.circle(rgb, [230,265], 4, (0,0,255), 1)
rgb = cv2.circle(rgb, p1_pix[::-1], 4, (0,255,0), 1)
angle = str(int(p1_theta*180/np.pi))
cv2.imwrite("logs/pick_conf{}.png".format(cur),pick_conf)
cv2.imwrite("logs/place_conf{}_{}.png".format(cur,angle),place_conf)
cv2.imwrite("logs/rgb{}.png".format(cur),rgb)
cv2.imwrite("logs/depth{}.png".format(cur),depth)
# Pixels to end effector poses.
hmap = img[:, :, 3]
p0_xyz = utils.pix_to_xyz(p0_pix, hmap, self.bounds, self.pix_size)
p1_xyz = utils.pix_to_xyz(p1_pix, hmap, self.bounds, self.pix_size)
p0_xyzw = utils.eulerXYZ_to_quatXYZW((0, 0, p0_theta))
p1_xyzw = utils.eulerXYZ_to_quatXYZW((0, 0, p1_theta))
return {
'pose0': (np.asarray(p0_xyz), np.asarray(p0_xyzw)),
'pose1': (np.asarray(p1_xyz), np.asarray(p1_xyzw))
}
def act(self, obs, info=None, goal=None): # pylint: disable=unused-argument
"""Run inference and return best action given visual observations."""
tf.keras.backend.set_learning_phase(0)
# Get heightmap from RGB-D images.
img = self.get_image(obs)
# Attention model forward pass.
pick_conf = self.attention.forward(img)
argmax = np.argmax(pick_conf)
argmax = np.unravel_index(argmax, shape=pick_conf.shape)
p0_pix = argmax[:2]
p0_theta = argmax[2] * (2 * np.pi / pick_conf.shape[2])
# Transport model forward pass.
place_conf = self.transport.forward(img, p0_pix)
argmax = np.argmax(place_conf)
argmax = np.unravel_index(argmax, shape=place_conf.shape)
p1_pix = argmax[:2]
p1_theta = argmax[2] * (2 * np.pi / place_conf.shape[2])
# Pixels to end effector poses.
hmap = img[:, :, 3]
p0_xyz = utils.pix_to_xyz(p0_pix, hmap, self.bounds, self.pix_size)
p1_xyz = utils.pix_to_xyz(p1_pix, hmap, self.bounds, self.pix_size)
p0_xyzw = utils.eulerXYZ_to_quatXYZW((0, 0, -p0_theta))
p1_xyzw = utils.eulerXYZ_to_quatXYZW((0, 0, -p1_theta))
return {
'pose0': (np.asarray(p0_xyz), np.asarray(p0_xyzw)),
'pose1': (np.asarray(p1_xyz), np.asarray(p1_xyzw))
}
# TODO(andyzeng) cleanup goal-conditioned model.
# Make a goal image if needed, and for consistency stack with input.
# if self.use_goal_image:
# cmap_g, hmap_g = utils.get_fused_heightmap(goal, self.cam_config)
# goal_image = self.concatenate_c_h(colormap_g, heightmap_g)
# input_image = np.concatenate((input_image, goal_image), axis=2)
# assert input_image.shape[2] == 12, input_image.shape
# if self.use_goal_image:
# half = int(input_image.shape[2] / 2)
# input_only = input_image[:, :, :half] # ignore goal portion
# pick_conf = self.attention.forward(input_only)
# else:
# if isinstance(self.transport, TransportGoal):
# half = int(input_image.shape[2] / 2)
# img_curr = input_image[:, :, :half]
# img_goal = input_image[:, :, half:]
# place_conf = self.transport.forward(img_curr, img_goal, p0_pix)
def load(self, n_iter):
"""Load pre-trained models."""
print(f'Loading pre-trained model at {n_iter} iterations.')
attention_fname = 'attention-ckpt-%d.h5' % n_iter
transport_fname = 'transport-ckpt-%d.h5' % n_iter
attention_fname = os.path.join(self.models_dir, attention_fname)
transport_fname = os.path.join(self.models_dir, transport_fname)
self.attention.load(attention_fname)
self.transport.load(transport_fname)
self.total_steps = n_iter
def save(self):
"""Save models."""
if not tf.io.gfile.exists(self.models_dir):
tf.io.gfile.makedirs(self.models_dir)
attention_fname = 'attention-ckpt-%d.h5' % self.total_steps
transport_fname = 'transport-ckpt-%d.h5' % self.total_steps
attention_fname = os.path.join(self.models_dir, attention_fname)
transport_fname = os.path.join(self.models_dir, transport_fname)
self.attention.save(attention_fname)
self.transport.save(transport_fname)
#-----------------------------------------------------------------------------
# Other Transporter Variants
#-----------------------------------------------------------------------------
class ToolTransporterAgent(TransporterAgent):
def __init__(self, name, task, n_rotations=36):
super().__init__(name, task, n_rotations)
self.tool_num = 3
self.attention = Toolnet(
in_shape=self.in_shape,
n_rotations=1,
preprocess=utils.preprocess,
tool_num=self.tool_num)
self.transport = Transport(
in_shape=self.in_shape,
n_rotations=self.n_rotations,
crop_size=self.crop_size,
preprocess=utils.preprocess)
def get_image(self, obs):
"""Stack color and height images image."""
# if self.use_goal_image:
# colormap_g, heightmap_g = utils.get_fused_heightmap(goal, configs)
# goal_image = self.concatenate_c_h(colormap_g, heightmap_g)
# input_image = np.concatenate((input_image, goal_image), axis=2)
# assert input_image.shape[2] == 12, input_image.shape
# Get color and height maps from RGB-D images.
cmap, hmap = utils.get_fused_heightmap(
obs, self.cam_config, self.bounds, self.pix_size)
img = np.concatenate((cmap,
hmap[Ellipsis, None],
hmap[Ellipsis, None],
hmap[Ellipsis, None]), axis=2)
assert img.shape == self.in_shape, img.shape
return img
def get_sample(self, dataset, augment=True):
"""Get a dataset sample.
Args:
dataset: a ravens.Dataset (train or validation)
augment: if True, perform data augmentation.
Returns:
tuple of data for training:
(input_image, p0, p0_theta, p1, p1_theta)
tuple additionally includes (z, roll, pitch) if self.six_dof
if self.use_goal_image, then the goal image is stacked with the
current image in `input_image`. If splitting up current and goal
images is desired, it should be done outside this method.
"""
(obs, act, _, _), _ = dataset.sample()
img = self.get_image(obs)
# Get training labels from data sample.
p0_xyz, p0_xyzw = act['pose0']
p1_xyz, p1_xyzw = act['pose1']
tool_id = act['tid']
is_mix = act['is_mix']
# import ipdb;ipdb.set_trace()
p0 = utils.xyz_to_pix(p0_xyz, self.bounds, self.pix_size)
# use positive??
p0_theta = np.float32(utils.quatXYZW_to_eulerXYZ(p0_xyzw)[2])
p1 = utils.xyz_to_pix(p1_xyz, self.bounds, self.pix_size)
p1_theta = np.float32(utils.quatXYZW_to_eulerXYZ(p1_xyzw)[2])
p1_theta = p1_theta - p0_theta
p0_theta = 0
# Data augmentation.
if augment:
img, _, (p0, p1), _ = utils.perturb(img, [p0, p1])
return img, p0, p0_theta, p1, p1_theta, tool_id, is_mix
def plot_to_tensor(self,feature,figsize=(3.2,1.6)):
draw_feat = feature.numpy()
cmap = plt.get_cmap('inferno')
figure = plt.figure(figsize=figsize)
plt.imshow(draw_feat[0],cmap = cmap)
buf = io.BytesIO()
plt.savefig(buf, format='png')
plt.close(figure)
buf.seek(0)
image = tf.image.decode_png(buf.getvalue(), channels=4)
# Add the batch dimension
image = tf.expand_dims(image, 0)
return image
def train(self, dataset, writer=None):
"""Train on a dataset sample for 1 iteration.
Args:
dataset: a ravens.Dataset.
writer: a TF summary writer (for tensorboard).
"""
h,w,c = self.in_shape
tf.keras.backend.set_learning_phase(1)
img, p0, p0_theta, p1, p1_theta, tool_id, is_mix = self.get_sample(dataset)
# import ipdb;ipdb.set_trace()
# Get training losses.
step = self.total_steps + 1
loss0,feature0 = self.attention.train(img, p0, p0_theta, tool_id = tool_id, is_mix = is_mix)
if isinstance(self.transport, Attention):
loss1 = self.transport.train(img, p1, p1_theta)
else:
loss1, feature1 = self.transport.train(img, p0, p1, p1_theta)
with writer.as_default():
sc = tf.summary.scalar
rgb = tf.reshape(img[:,:,:3],[1, h, w,3])
rgb = tf.cast(rgb, dtype=tf.uint8)
depth = tf.reshape(img[:,:,3],[1, h, w, 1])
depth = (depth - tf.reduce_min(depth))/(tf.reduce_max(depth) - tf.reduce_min(depth))
# visualize pick feat
assert (self.tool_num == feature0.shape[0])
pick_feats = []
feature0 = (feature0 - tf.reduce_min(feature0))/(tf.reduce_max(feature0) - tf.reduce_min(feature0))
for tid in range(self.tool_num):
feat = feature0[tid:tid+1]
pick_feat = tf.reshape(feat,[1, h, w,1])
pick_feats.append(pick_feat)
# visualize place feat
angle = tf.math.argmax(tf.reshape(feature1,(h*w,36)),axis=0)
place_feat = tf.reshape(feature1[:,:,:,tf.math.argmax(angle)],[1,h,w,1])
place_feat = (place_feat - tf.reduce_min(place_feat))/(tf.reduce_max(place_feat) - tf.reduce_min(place_feat))
if step %100 == 0:
tf.summary.image("rgb", rgb, step=step)
tf.summary.image("depth", depth, step=step)
for tid in range(self.tool_num):
tf.summary.image("pick_feat_tool{0}".format(tid), pick_feats[tid], step=step)
tf.summary.image("place_feat",place_feat, step=step)
sc('train_loss/attention', loss0, step)
sc('train_loss/transport', loss1, step)
print(f'Train Iter: {step} Loss: {loss0:.4f} {loss1:.4f}')
self.total_steps = step
def test(self, obs, cur=0, info=None, goal=None, vis=False): # pylint: disable=unused-argument
"""Run inference and return best action given visual observations."""
tf.keras.backend.set_learning_phase(0)
# Get heightmap from RGB-D images.
img = self.get_image(obs)
rgb = img[:,:,:3][:,:,::-1]
rgb = np.array(rgb, dtype=np.uint8)
depth = img[:,:,3]
depth = (depth - np.min(depth))/(np.max(depth) - np.min(depth))
depth = np.uint8(depth*255)
# Attention model forward pass.
pick_conf = self.attention.forward(img) # original: 360,720,1. ours: 3,360,720
# import ipdb;ipdb.set_trace()
do_integrate = False
if do_integrate:
pass
else:
# argmax = np.argmax(pick_conf[:,:,:400])
argmax = np.argmax(pick_conf)
argmax = np.unravel_index(argmax, shape=pick_conf.shape)
# argmax = np.unravel_index(argmax, shape=(3,360,400))
tool_id = argmax[0]
p0_pix = argmax[1:]
p0_theta = 0.0
print("predict p0_theta:{0}, picked tool:{1}".format(p0_theta, tool_id))
# if p != None:
# p0_pix = p
# Transport model forward pass.
place_conf = self.transport.forward(img, p0_pix)
argmax = np.argmax(place_conf)
argmax = np.unravel_index(argmax, shape=place_conf.shape)
p1_pix = argmax[:2]
p1_theta = argmax[2] * (2 * np.pi / place_conf.shape[2])
print("predict p1_theta:{}",p1_theta*180/np.pi)
if vis:
pick_confs = []
pick_conf = (pick_conf - pick_conf.min())/(pick_conf.max()-pick_conf.min()) # single max out of 3 tools
for tid in range(self.tool_num):
#pick_conf_i = (pick_conf[tid] - pick_conf[tid].min())/(pick_conf[tid].max()-pick_conf[tid].min())
pick_conf_i = np.uint8(pick_conf[tid]*255)
pick_conf_i = np.expand_dims(pick_conf_i, axis=-1)
pick_confs.append(pick_conf_i)
place_conf = place_conf[:,:,argmax[2]]
place_conf = (place_conf - place_conf.min())/(place_conf.max()-place_conf.min())
place_conf = np.uint8(place_conf*255)
rgb = cv2.circle(rgb, p0_pix[::-1], 4, (0,0,255), 1)
rgb = cv2.circle(rgb, p1_pix[::-1], 4, (0,255,0), 1)
angle = str(int(p1_theta*180/np.pi))
for tid in range(self.tool_num):
cv2.imwrite("logs/pick_conf{}_tool{}.png".format(cur, tid),pick_confs[tid])
cv2.imwrite("logs/place_conf{}_{}.png".format(cur,angle),place_conf)
cv2.imwrite("logs/rgb{}.png".format(cur),rgb)
cv2.imwrite("logs/depth{}.png".format(cur),depth)
# Pixels to end effector poses.
hmap = img[:, :, 3]
p0_xyz = utils.pix_to_xyz(p0_pix, hmap, self.bounds, self.pix_size)
p1_xyz = utils.pix_to_xyz(p1_pix, hmap, self.bounds, self.pix_size)
p0_xyzw = utils.eulerXYZ_to_quatXYZW((0, 0, p0_theta))
p1_xyzw = utils.eulerXYZ_to_quatXYZW((0, 0, p1_theta))
# import ipdb; ipdb.set_trace()
return {
'pose0': (np.asarray(p0_xyz), np.asarray(p0_xyzw)),
'pose1': (np.asarray(p1_xyz), np.asarray(p1_xyzw)),
"tool_id": tool_id
}
class BCTransporterAgent(TransporterAgent):
def __init__(self, name, task, n_rotations=36):
super().__init__(name, task, n_rotations)
self.tool_num = 3
self.attention = BCnet(
in_shape=self.in_shape,
n_rotations=1,
preprocess=utils.preprocess,
tool_num=self.tool_num)
def get_image(self, obs):
"""Stack color and height images image."""
# if self.use_goal_image:
# colormap_g, heightmap_g = utils.get_fused_heightmap(goal, configs)
# goal_image = self.concatenate_c_h(colormap_g, heightmap_g)
# input_image = np.concatenate((input_image, goal_image), axis=2)
# assert input_image.shape[2] == 12, input_image.shape
# Get color and height maps from RGB-D images.
cmap, hmap = utils.get_fused_heightmap(
obs, self.cam_config, self.bounds, self.pix_size)
img = np.concatenate((cmap,
hmap[Ellipsis, None],
hmap[Ellipsis, None],
hmap[Ellipsis, None]), axis=2)
assert img.shape == self.in_shape, img.shape
return img
def get_sample(self, dataset, augment=True):
"""Get a dataset sample.
Args:
dataset: a ravens.Dataset (train or validation)
augment: if True, perform data augmentation.
Returns:
tuple of data for training:
(input_image, p0, p0_theta, p1, p1_theta)
tuple additionally includes (z, roll, pitch) if self.six_dof
if self.use_goal_image, then the goal image is stacked with the
current image in `input_image`. If splitting up current and goal
images is desired, it should be done outside this method.
"""
(obs, act, _, _), _ = dataset.sample()
img = self.get_image(obs)
# Get training labels from data sample.
p0_xyz, p0_xyzw = act['pose0']
p1_xyz, p1_xyzw = act['pose1']
tool_id = act['tid']
is_mix = act['is_mix']
# import ipdb;ipdb.set_trace()
p0 = utils.xyz_to_pix(p0_xyz, self.bounds, self.pix_size)
# use positive??
p0_theta = np.float32(utils.quatXYZW_to_eulerXYZ(p0_xyzw)[2])
p1 = utils.xyz_to_pix(p1_xyz, self.bounds, self.pix_size)
p1_theta = np.float32(utils.quatXYZW_to_eulerXYZ(p1_xyzw)[2])
p1_theta = p1_theta - p0_theta
p0_theta = 0
# Data augmentation.
if augment:
img, _, (p0, p1), _ = utils.perturb(img, [p0, p1])
p0_xyz = utils.pix_to_xyz(p0, img[:, :, 3], self.bounds, self.pix_size)
p1_xyz = utils.pix_to_xyz(p1, img[:, :, 3], self.bounds, self.pix_size)
return img, p0_xyz, p0_theta, p1_xyz, p1_theta, tool_id, is_mix
def plot_to_tensor(self,feature,figsize=(3.2,1.6)):
draw_feat = feature.numpy()
cmap = plt.get_cmap('inferno')
figure = plt.figure(figsize=figsize)
plt.imshow(draw_feat[0],cmap = cmap)
buf = io.BytesIO()
plt.savefig(buf, format='png')
plt.close(figure)
buf.seek(0)
image = tf.image.decode_png(buf.getvalue(), channels=4)
# Add the batch dimension
image = tf.expand_dims(image, 0)
return image
def train(self, dataset, writer=None):
"""Train on a dataset sample for 1 iteration.
Args:
dataset: a ravens.Dataset.
writer: a TF summary writer (for tensorboard).
"""
h,w,c = self.in_shape
tf.keras.backend.set_learning_phase(1)
img, p0, p0_theta, p1, p1_theta, tool_id, is_mix = self.get_sample(dataset)
# Get training losses.
step = self.total_steps + 1
loss0,feature0 = self.attention.train(img, p0, p0_theta, p1, p1_theta, tool_id = tool_id, is_mix = is_mix)
with writer.as_default():
sc = tf.summary.scalar
if step %100 == 0:
sc('train_loss/attention', loss0, step)
print(f'Train Iter: {step} Loss: {loss0:.4f} ')
self.total_steps = step
def test(self, obs, cur=0, info=None, goal=None, vis=False): # pylint: disable=unused-argument
"""Run inference and return best action given visual observations."""
tf.keras.backend.set_learning_phase(0)
# Get heightmap from RGB-D images.
img = self.get_image(obs)
rgb = img[:,:,:3][:,:,::-1]
rgb = np.array(rgb, dtype=np.uint8)
depth = img[:,:,3]
depth = (depth - np.min(depth))/(np.max(depth) - np.min(depth))
depth = np.uint8(depth*255)
# Attention model forward pass.
# outputs = self.attention.forward(img) # original: 360,720,1. ours: 3,360,720
# outputs = outputs.numpy()[0]
# # import ipdb;ipdb.set_trace()
# p0_pix = (outputs[:2]).astype(int)
# p1_pix = (outputs[2:4]).astype(int)
# tool_id = np.argmax(outputs[5:7])
# p1_theta = 0
# p0_theta = 0
outputs = self.attention.forward(img) # original: 360,720,1. ours: 3,360,720
outputs = outputs.numpy()[0]
# import ipdb;ipdb.set_trace()
p0_xyz = (outputs[:3]).astype(int)
p1_xyz = (outputs[3:6]).astype(int)
tool_id = np.argmax(outputs[6:9])
p1_theta = 0
p0_theta = 0
p0_pix = utils.xyz_to_pix(p0_xyz, self.bounds, self.pix_size)
p1_pix = utils.xyz_to_pix(p1_xyz, self.bounds, self.pix_size)
print("predict p0_theta:{0}, picked tool:{1}".format(p0_theta, tool_id))
print("predict p1_theta:{}",p1_theta*180/np.pi)
if vis:
rgb = cv2.circle(rgb, p0_pix[::-1], 4, (0,0,255), 1)
rgb = cv2.circle(rgb, p1_pix[::-1], 4, (0,255,0), 1)
angle = str(int(p1_theta*180/np.pi))
cv2.imwrite("logs/rgb{}.png".format(cur),rgb)
cv2.imwrite("logs/depth{}.png".format(cur),depth)
# Pixels to end effector poses.
hmap = img[:, :, 3]
p0_xyz = utils.pix_to_xyz(p0_pix, hmap, self.bounds, self.pix_size)
p1_xyz = utils.pix_to_xyz(p1_pix, hmap, self.bounds, self.pix_size)
p0_xyzw = utils.eulerXYZ_to_quatXYZW((0, 0, p0_theta))
p1_xyzw = utils.eulerXYZ_to_quatXYZW((0, 0, p1_theta))
# import ipdb; ipdb.set_trace()
return {
'pose0': (np.asarray(p0_xyz), np.asarray(p0_xyzw)),
'pose1': (np.asarray(p1_xyz), np.asarray(p1_xyzw)),
}
def load(self, n_iter):
"""Load pre-trained models."""
print(f'Loading pre-trained model at {n_iter} iterations.')
attention_fname = 'attention-ckpt-%d.h5' % n_iter
# transport_fname = 'transport-ckpt-%d.h5' % n_iter
attention_fname = os.path.join(self.models_dir, attention_fname)
# transport_fname = os.path.join(self.models_dir, transport_fname)
self.attention.load(attention_fname)
# self.transport.load(transport_fname)
self.total_steps = n_iter
def save(self):
"""Save models."""
if not tf.io.gfile.exists(self.models_dir):
tf.io.gfile.makedirs(self.models_dir)
attention_fname = 'attention-ckpt-%d.h5' % self.total_steps
# transport_fname = 'transport-ckpt-%d.h5' % self.total_steps
attention_fname = os.path.join(self.models_dir, attention_fname)
# transport_fname = os.path.join(self.models_dir, transport_fname)
self.attention.save(attention_fname)
# self.transport.save(transport_fname)
class OriginalTransporterAgent(TransporterAgent):
def __init__(self, name, task, n_rotations=36):
super().__init__(name, task, n_rotations)
self.attention = Attention(
in_shape=self.in_shape,
n_rotations=1,
preprocess=utils.preprocess)
self.transport = Transport(
in_shape=self.in_shape,
n_rotations=self.n_rotations,
crop_size=self.crop_size,
preprocess=utils.preprocess)
class NoTransportTransporterAgent(TransporterAgent):
def __init__(self, name, task, n_rotations=36):
super().__init__(name, task, n_rotations)
self.attention = Attention(
in_shape=self.in_shape,
n_rotations=1,
preprocess=utils.preprocess)
self.transport = Attention(
in_shape=self.in_shape,
n_rotations=self.n_rotations,
preprocess=utils.preprocess)
class PerPixelLossTransporterAgent(TransporterAgent):
def __init__(self, name, task, n_rotations=36):
super().__init__(name, task, n_rotations)
self.attention = Attention(
in_shape=self.in_shape,
n_rotations=1,
preprocess=utils.preprocess)
self.transport = TransportPerPixelLoss(
in_shape=self.in_shape,
n_rotations=self.n_rotations,
crop_size=self.crop_size,
preprocess=utils.preprocess)
class GoalTransporterAgent(TransporterAgent):
"""Goal-Conditioned Transporters supporting a separate goal FCN."""
def __init__(self, name, task, n_rotations=36):
super().__init__(name, task, n_rotations)
self.attention = Attention(
in_shape=self.in_shape,
n_rotations=1,
preprocess=utils.preprocess)
self.transport = TransportGoal(
in_shape=self.in_shape,
n_rotations=self.n_rotations,
crop_size=self.crop_size,
preprocess=utils.preprocess)
class GoalNaiveTransporterAgent(TransporterAgent):
"""Naive version which stacks current and goal images through normal Transport."""
def __init__(self, name, task, n_rotations=36):
super().__init__(name, task, n_rotations)
# Stack the goal image for the vanilla Transport module.
t_shape = (self.in_shape[0], self.in_shape[1],
int(self.in_shape[2] * 2))
self.attention = Attention(
in_shape=self.in_shape,
n_rotations=1,
preprocess=utils.preprocess)
self.transport = Transport(
in_shape=t_shape,
n_rotations=self.n_rotations,
crop_size=self.crop_size,
preprocess=utils.preprocess,
per_pixel_loss=False,
use_goal_image=True)
| 35.792009 | 130 | 0.690568 |
910f8c52ad25f046fbcb50a7f52b66183610a798 | 1,434 | py | Python | tests/terraform/checks/resource/gcp/test_GoogleSubnetworkPrivateGoogleEnabled.py | pmalkki/checkov | b6cdf386dd976fe27c16fed6d550756a678a5d7b | [
"Apache-2.0"
] | 1 | 2022-02-20T21:20:39.000Z | 2022-02-20T21:20:39.000Z | tests/terraform/checks/resource/gcp/test_GoogleSubnetworkPrivateGoogleEnabled.py | pmalkki/checkov | b6cdf386dd976fe27c16fed6d550756a678a5d7b | [
"Apache-2.0"
] | 3 | 2022-03-07T20:37:31.000Z | 2022-03-21T20:20:14.000Z | tests/terraform/checks/resource/gcp/test_GoogleSubnetworkPrivateGoogleEnabled.py | pmalkki/checkov | b6cdf386dd976fe27c16fed6d550756a678a5d7b | [
"Apache-2.0"
] | null | null | null | import unittest
from pathlib import Path
from checkov.runner_filter import RunnerFilter
from checkov.terraform.checks.resource.gcp.GoogleSubnetworkPrivateGoogleEnabled import check
from checkov.terraform.runner import Runner
class TestGoogleSubnetworkPrivateGoogleEnabled(unittest.TestCase):
def test(self):
# given
test_files_dir = Path(__file__).parent / "example_GoogleSubnetworkPrivateGoogleEnabled"
# when
report = Runner().run(root_folder=str(test_files_dir), runner_filter=RunnerFilter(checks=[check.id]))
# then
summary = report.get_summary()
passing_resources = {
"google_compute_subnetwork.pass",
}
failing_resources = {
"google_compute_subnetwork.fail",
"google_compute_subnetwork.fail2",
}
passed_check_resources = {c.resource for c in report.passed_checks}
failed_check_resources = {c.resource for c in report.failed_checks}
self.assertEqual(summary["passed"], 1)
self.assertEqual(summary["failed"], 2)
self.assertEqual(summary["skipped"], 0)
self.assertEqual(summary["parsing_errors"], 0)
self.assertEqual(summary["resource_count"], 3) # 1 unknown
self.assertEqual(passing_resources, passed_check_resources)
self.assertEqual(failing_resources, failed_check_resources)
if __name__ == "__main__":
unittest.main()
| 32.590909 | 109 | 0.702232 |
a0592ef239ba2822dfec2cfb1cd8f5bf9a6218d4 | 1,340 | py | Python | savu/plugins/ptychography/dummy_ptycho.py | nghia-vo/Savu | 1cf7343c141224643b2e1fb2f05e74448bc4fd58 | [
"Apache-2.0"
] | null | null | null | savu/plugins/ptychography/dummy_ptycho.py | nghia-vo/Savu | 1cf7343c141224643b2e1fb2f05e74448bc4fd58 | [
"Apache-2.0"
] | null | null | null | savu/plugins/ptychography/dummy_ptycho.py | nghia-vo/Savu | 1cf7343c141224643b2e1fb2f05e74448bc4fd58 | [
"Apache-2.0"
] | null | null | null | # Copyright 2014 Diamond Light Source Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module:: dummy_ptycho
:platform: Unix
:synopsis: A plugin to fit peaks
.. moduleauthor:: Aaron Parsons <scientificsoftware@diamond.ac.uk>
"""
from savu.plugins.utils import register_plugin
from savu.plugins.ptychography.base_ptycho import BasePtycho
import numpy as np
@register_plugin
class DummyPtycho(BasePtycho):
def __init__(self):
super(DummyPtycho, self).__init__("DummyPtycho")
def process_frames(self, data):
data = data[0]
probe = data[0]
#print "probe is "+str(probe.shape)
object_transmission = np.random.random(self.obj_shape).squeeze()
positions = self.get_positions()
return [probe, object_transmission, positions]#] add fourier error, realspace error
| 32.682927 | 91 | 0.730597 |
9ef78092b6aa347df80690b88a1894d400542a97 | 1,416 | py | Python | app/tests/test_pitch.py | shizukane/pitch | 0793278ef43e55a10623e006eae8b12249dd6039 | [
"MIT"
] | null | null | null | app/tests/test_pitch.py | shizukane/pitch | 0793278ef43e55a10623e006eae8b12249dd6039 | [
"MIT"
] | null | null | null | app/tests/test_pitch.py | shizukane/pitch | 0793278ef43e55a10623e006eae8b12249dd6039 | [
"MIT"
] | null | null | null | import unittest
from app.models import Pitch,User,Comment
class TestPitch(unittest.TestCase):
"""
This is the class which we will use to do tests for the Pitch
"""
def setUp(self):
"""
This will create an instance of the User and Pitch before each test case
"""
self.new_user = User(username = "Joan")
self.new_pitch = Pitch(title = "pitch", user = self.new_user)
def tearDown(self):
"""
Will delete all the info from the db
"""
Pitch.query.delete()
User.query.delete()
Comment.query.delete()
def test_instance(self):
"""
Will test whether the new_pitch is an instance of Pitch
"""
self.assertTrue(isinstance(self.new_pitch, Pitch))
def test_init(self):
"""
Will test whether the new_pitch is instantiated correctly
"""
self.assertEquals(self.new_pitch.title, "pitch")
def test_save_pitch(self):
"""
Will test whether the user is saved into the database
"""
self.new_pitch.save_pitch()
pitches = Pitch.query.all()
self.assertTrue(len(pitches) > 0)
def test_relationship_user(self):
"""
Will test whether the pitch is correctly related to the user who posted it
"""
user = self.new_pitch.user.username
self.assertTrue(user == "Joan") | 28.32 | 82 | 0.600989 |
0f937f017c28fb0af262468d8a22e7a20615b0d5 | 5,506 | py | Python | sysidentpy/basis_function/_basis_function.py | neylsoncrepalde/sysidentpy | 3d241ff3c460a8e01f9bd8afbaf17f27ec3937f3 | [
"BSD-3-Clause"
] | 107 | 2020-05-19T12:59:56.000Z | 2022-03-29T05:25:27.000Z | sysidentpy/basis_function/_basis_function.py | nataliakeles/sysidentpy | d1af4243e7c3d2c0b456fb9b4fe120965a7ededc | [
"BSD-3-Clause"
] | 20 | 2020-05-24T15:56:15.000Z | 2022-03-05T19:54:02.000Z | sysidentpy/basis_function/_basis_function.py | nataliakeles/sysidentpy | d1af4243e7c3d2c0b456fb9b4fe120965a7ededc | [
"BSD-3-Clause"
] | 25 | 2020-05-19T14:02:17.000Z | 2022-03-15T20:17:58.000Z | import numpy as np
from itertools import combinations_with_replacement
from sysidentpy.narmax_base import InformationMatrix
class Polynomial(InformationMatrix):
"""Build polynomial basis function.
Generate a new feature matrix consisting of all polynomial combinations
of the features with degree less than or equal to the specified degree.
..math:
y_k = \sum_{i=1}^{p}\Theta_i \times \prod_{j=0}^{n_x}u_{k-j}^{b_i, j}\prod_{l=1}^{n_e}e_{k-l}^{d_i, l}\prod_{m=1}^{n_y}y_{k-m}^{a_i, m}
\label{eq5:narx}
where :math:`p` is the number of regressors, :math:`\Theta_i` are the
model parameters, and :math:`a_i, m, b_i, j` and :math:`d_i, l \in \mathbb{N}`
are the exponents of the output, input and noise terms, respectively.
Parameters
----------
degree : int (max_degree), default=2
The maximum degree of the polynomial features.
Notes
-----
Be aware that the number of features in the output array scales
significantly as the number of inputs, the max lag of the input and output, and
degree increases. High degrees can cause overfitting.
"""
def __init__(
self,
degree=2,
):
self.degree = degree
def fit(self, data, max_lag, predefined_regressors=None):
"""Build the Polynomial information matrix.
Each columns of the information matrix represents a candidate
regressor. The set of candidate regressors are based on xlag,
ylag, and degree defined by the user.
Parameters
----------
data : ndarray of floats
The lagged matrix built with respect to each lag and column.
max_lag : int
Target data used on training phase.
predefined_regressors : ndarray of int
The index of the selected regressors by the Model Structure
Selection algorithm.
Returns
-------
psi = ndarray of floats
The lagged matrix built in respect with each lag and column.
"""
# Create combinations of all columns based on its index
iterable_list = range(data.shape[1])
combinations = list(combinations_with_replacement(iterable_list, self.degree))
if predefined_regressors is not None:
combinations = [combinations[index] for index in predefined_regressors]
psi = np.column_stack(
[
np.prod(data[:, combinations[i]], axis=1)
for i in range(len(combinations))
]
)
psi = psi[max_lag:, :]
return psi
def transform(self, data, max_lag, predefined_regressors=None):
return self.fit(data, max_lag, predefined_regressors)
class Fourier:
"""Build Fourier basis function.
Generate a new feature matrix consisting of all Fourier features
with respect to the number of harmonics.
Parameters
----------
degree : int (max_degree), default=2
The maximum degree of the polynomial features.
Notes
-----
Be aware that the number of features in the output array scales
significantly as the number of inputs, the max lag of the input and output.
"""
def __init__(self, n=1, p=2 * np.pi, degree=1, ensemble=True):
self.n = n
self.p = p
self.degree = degree
self.ensemble = ensemble
def _fourier_expansion(self, data, n):
base = np.column_stack(
[
np.cos(2 * np.pi * data * n / self.p),
np.sin(2 * np.pi * data * n / self.p),
]
)
return base
def fit(self, data, max_lag, predefined_regressors=None):
"""Build the Polynomial information matrix.
Each columns of the information matrix represents a candidate
regressor. The set of candidate regressors are based on xlag,
ylag, and degree defined by the user.
Parameters
----------
data : ndarray of floats
The lagged matrix built with respect to each lag and column.
max_lag : int
Target data used on training phase.
predefined_regressors : ndarray of int
The index of the selected regressors by the Model Structure
Selection algorithm.
Returns
-------
psi = ndarray of floats
The lagged matrix built in respect with each lag and column.
"""
# remove intercept (because the data always have the intercept)
if self.degree > 1:
data = Polynomial().fit(data, max_lag, predefined_regressors=None)
data = data[:, 1:]
else:
data = data[max_lag:, 1:]
columns = list(range(data.shape[1]))
harmonics = list(range(1, self.n + 1))
psi = np.zeros([len(data), 1])
for col in columns:
base_col = np.column_stack(
[self._fourier_expansion(data[:, col], h) for h in harmonics]
)
psi = np.column_stack([psi, base_col])
self.repetition = self.n * 2
if self.ensemble:
psi = psi[:, 1:]
psi = np.column_stack([data, psi])
else:
psi = psi[:, 1:]
if predefined_regressors is None:
return psi, self.ensemble
else:
return psi[:, predefined_regressors], self.ensemble
def transform(self, data, max_lag, predefined_regressors=None):
return self.fit(data, max_lag, predefined_regressors)
| 34.4125 | 143 | 0.608609 |
b6cf8f7e8745be6498338524a8d6cf8fbd89d2af | 165 | py | Python | enhterm/__version__.py | pyl1b/enhterm | b4eacc25ef1bdfecab9a662b5269d016070d4e6b | [
"MIT"
] | null | null | null | enhterm/__version__.py | pyl1b/enhterm | b4eacc25ef1bdfecab9a662b5269d016070d4e6b | [
"MIT"
] | null | null | null | enhterm/__version__.py | pyl1b/enhterm | b4eacc25ef1bdfecab9a662b5269d016070d4e6b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
The version of this package. It is read by setup.py.
"""
major = 0
minor = 2
patch = 1
__version__ = '%d.%d.%d' % (major, minor, patch)
| 16.5 | 52 | 0.587879 |
450d223dd3c34d4e432e2d59bae4997d739c0eeb | 2,092 | py | Python | python_scripts/tests/cli_wallet/tests/utils/cmd_args.py | Blurt-Blockchain/steem | fbffd373cdb0f6192aa8806d07e8671e219c3767 | [
"MIT"
] | 2 | 2020-04-21T03:10:06.000Z | 2020-04-21T05:49:46.000Z | python_scripts/tests/cli_wallet/tests/utils/cmd_args.py | Blurt-Blockchain/steem | fbffd373cdb0f6192aa8806d07e8671e219c3767 | [
"MIT"
] | 4 | 2020-04-22T05:14:18.000Z | 2020-04-22T07:59:20.000Z | python_scripts/tests/cli_wallet/tests/utils/cmd_args.py | Blurt-Blockchain/steem | fbffd373cdb0f6192aa8806d07e8671e219c3767 | [
"MIT"
] | 2 | 2020-04-22T05:04:29.000Z | 2020-10-23T13:58:19.000Z | import argparse
args = None
parser = argparse.ArgumentParser(description='Blurtd cli wallet test args.')
parser.add_argument('--path-to-cli' , dest='path' , help ='Path to cli_wallet executable')
parser.add_argument('--creator' , dest='creator' , help ='Account to create proposals with')
parser.add_argument('--wif' , dest='wif' , help ='Private key for creator account')
parser.add_argument('--server-rpc-endpoint', dest="server_rpc_endpoint", help = "Set server endpoint [=ws://127.0.0.1:8090]", default ="ws://127.0.0.1:8090")
parser.add_argument('--cert-auth' , dest="cert_auth" , help = "Set cert auth [=_default]" , default ="_default")
#this argument causes error
#parser.add_argument('--rpc-endpoint' , dest="rpc_endpoint" , help = "Set rpc endpoint [=127.0.0.1:8091]" , default ="127.0.0.1:8091")
parser.add_argument('--rpc-tls-endpoint' , dest="rpc_tls_endpoint" , help = "Set rpc tle endpont [=127.0.0.1:8092]" , default ="127.0.0.1:8092")
parser.add_argument('--rpc-tls-cert' , dest="rpc_tls_cert" , help = "Set rpc tls cert [=server.pem]" , default ="server.pem")
parser.add_argument('--rpc-http-endpoint' , dest="rpc_http_endpoint" , help = "Set rpc http endpoint [=127.0.0.1:8093]" , default ="127.0.0.1:8093")
parser.add_argument('--deamon' , dest="deamon" , help = "Set to work as deamon [=False]" , default =False)
parser.add_argument('--rpc-allowip' , dest="rpc_allowip" , help = "Set allowed rpc ip [=[]]" , default =[])
parser.add_argument('--wallet-file' , dest="wallet_file" , help = "Set wallet name [=wallet.json]" , default ="wallet.json")
parser.add_argument('--chain-id' , dest="chain_id" , help = "Set chain id [=18dcf0a285365fc58b71f18b3d3fec954aa0c141c44e4e5cb4cf777b9eab274e]", default ="18dcf0a285365fc58b71f18b3d3fec954aa0c141c44e4e5cb4cf777b9eab274e")
args = parser.parse_args() | 99.619048 | 240 | 0.620459 |
cdbb838ef2f6a840d5d0c40c3cb01114c1174133 | 1,214 | py | Python | Introduccion_Python/02_contenedores_datos/contenedores.py | iarielduarte/Python | 871cdaf287a583baad7c88e274e09821396d0bbb | [
"CNRI-Python"
] | null | null | null | Introduccion_Python/02_contenedores_datos/contenedores.py | iarielduarte/Python | 871cdaf287a583baad7c88e274e09821396d0bbb | [
"CNRI-Python"
] | null | null | null | Introduccion_Python/02_contenedores_datos/contenedores.py | iarielduarte/Python | 871cdaf287a583baad7c88e274e09821396d0bbb | [
"CNRI-Python"
] | null | null | null | '''
Created on 21/05/2012
@author: Willis Polanco
'''
def main():
print("Contenedores en Python.")
tupla = (1,2,3,4,5)
tupla2 = 6,
tupla3 = tuple(range(20))
print(tupla)
print(tupla3)
print(len(tupla3))
print(min(tupla))
print(max(tupla))
lista = [3,4,5,6,7,8]
listax = list(range(50))
lista2 = [9,10,11]
print(lista)
print(len(lista))
print(len(lista2))
print(min(lista2))
print(max(lista2))
for x in lista:
print(x)
print(11 not in lista)
lista.extend(range(10))
print(lista)
lista.insert(0, 1000)
print(lista)
lista.remove(1)
print(lista)
print(lista.pop(9))
print(lista)
print(lista.count(5))
print(lista.index(5))
print(listax)
dic = {'uno':1, 'dos':2, 'tres':3}
dic2 = dict(cuatro=4, cinco=5)
dic3 = dict(seis=6, siete=7, **dic2)
print(dic)
print(dic2)
print(dic3)
for i, d in dic3.items():
print(i,d)
print('seis' in dic3)
print(dic2.get('cuatro'))
print(dic2)
print(dic2.pop('cuatro'))
print(dic2)
del dic3['seis']
print(dic3)
if __name__ == '__main__': main() | 19.901639 | 40 | 0.547776 |
47b83edcf68a2942cd9cc1b752876bf79103b6ee | 2,924 | py | Python | src/sqlfluff/rules/L042.py | fbb-oc/sqlfluff | f50e72b748dcf700483d0e937aa2abcfb0a56e9e | [
"MIT"
] | 1 | 2022-03-03T02:29:11.000Z | 2022-03-03T02:29:11.000Z | src/sqlfluff/rules/L042.py | clairetaylor352/sqlfluff | 62900332228db323da323ce20df0c5e17ba9fcbf | [
"MIT"
] | null | null | null | src/sqlfluff/rules/L042.py | clairetaylor352/sqlfluff | 62900332228db323da323ce20df0c5e17ba9fcbf | [
"MIT"
] | null | null | null | """Implementation of Rule L042."""
from typing import Optional
from sqlfluff.core.rules.base import BaseRule, LintResult, RuleContext
from sqlfluff.core.rules.doc_decorators import document_configuration
from sqlfluff.core.rules.functional.segment_predicates import is_type
@document_configuration
class Rule_L042(BaseRule):
"""Join/From clauses should not contain subqueries. Use CTEs instead.
By default this rule is configured to allow subqueries within ``FROM``
clauses but not within ``JOIN`` clauses. If you prefer a stricter lint
then this is configurable.
.. note::
Some dialects don't allow CTEs, and for those dialects
this rule makes no sense and should be disabled.
**Anti-pattern**
.. code-block:: sql
select
a.x, a.y, b.z
from a
join (
select x, z from b
) using(x)
**Best practice**
.. code-block:: sql
with c as (
select x, z from b
)
select
a.x, a.y, c.z
from a
join c using(x)
"""
config_keywords = ["forbid_subquery_in"]
_config_mapping = {
"join": ["join_clause"],
"from": ["from_expression"],
"both": ["join_clause", "from_expression"],
}
def _eval(self, context: RuleContext) -> Optional[LintResult]:
"""Join/From clauses should not contain subqueries. Use CTEs instead.
NB: No fix for this routine because it would be very complex to
implement reliably.
"""
parent_types = self._config_mapping[self.forbid_subquery_in] # type: ignore
for parent_type in parent_types:
if context.segment.is_type(parent_type):
# Get the referenced table segment
from_expression_element = context.functional.segment.children(
is_type("from_expression_element")
).children(is_type("table_expression"))
# Is it bracketed? If so, lint that instead.
bracketed_expression = from_expression_element.children(
is_type("bracketed")
)
if bracketed_expression:
from_expression_element = bracketed_expression
# If we find a child with a "problem" type, raise an issue.
# If not, we're fine.
seg = from_expression_element.children(
is_type(
"with_compound_statement",
"set_expression",
"select_statement",
)
)
if seg:
return LintResult(
anchor=seg[0],
description=f"{parent_type} clauses should not contain "
"subqueries. Use CTEs instead",
)
return None
| 31.782609 | 84 | 0.568057 |
9f1c8d0a324d134dca5d0716e1daa3d9f79c7927 | 4,014 | py | Python | corehq/apps/users/cases.py | johan--/commcare-hq | 86ee99c54f55ee94e4c8f2f6f30fc44e10e69ebd | [
"BSD-3-Clause"
] | null | null | null | corehq/apps/users/cases.py | johan--/commcare-hq | 86ee99c54f55ee94e4c8f2f6f30fc44e10e69ebd | [
"BSD-3-Clause"
] | 1 | 2022-03-12T01:03:25.000Z | 2022-03-12T01:03:25.000Z | corehq/apps/users/cases.py | johan--/commcare-hq | 86ee99c54f55ee94e4c8f2f6f30fc44e10e69ebd | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import
from couchdbkit import ResourceNotFound
from corehq.apps.groups.models import Group
from corehq.apps.users.models import CouchUser, CommCareUser, WebUser
from corehq.apps.hqcase.utils import assign_cases
def user_db():
return CouchUser.get_db()
def get_owner_id(case):
return case.owner_id or case.user_id
def get_wrapped_owner(owner_id):
"""
Returns the wrapped user or group object for a given ID, or None
if the id isn't a known owner type.
"""
if not owner_id:
return None
def _get_class(doc_type):
return {
'CommCareUser': CommCareUser,
'WebUser': WebUser,
'Group': Group,
}.get(doc_type)
try:
owner_doc = user_db().get(owner_id)
except ResourceNotFound:
return None
cls = _get_class(owner_doc['doc_type'])
return cls.wrap(owner_doc) if cls else None
def get_owning_users(owner_id):
"""
Given an owner ID, get a list of the owning users, regardless of whether
it's a user or group.
"""
owner = get_wrapped_owner(owner_id)
if not owner:
return []
elif isinstance(owner, Group):
return owner.get_users()
else:
return [owner]
def reconcile_ownership(case, user, recursive=True, existing_groups=None):
"""
Reconciles ownership of a case (and optionally its subcases) by the following rules:
0. If the case is owned by the user, do nothing.
1. If the case has no owner, make the user the owner.
2. If the case has an owner that is a user create a new case sharing group,
add that user and the new user to the case sharing group make the group the owner.
3. If the case has an owner that is a group, and the user is in the group, do nothing.
4. If the case has an owner that is a group, and the user is not in the group,
add the user to the group and the leave the owner untouched.
Will recurse through subcases if asked to.
Existing groups, if specified, will be checked first for satisfying the ownership
criteria in scenario 2 before creating a new group (this is mainly used by the
recursive calls)
"""
existing_groups = {} if existing_groups is None else existing_groups
def _get_matching_group(groups, user_ids):
"""
Given a list of groups and user_ids, returns any group that contains
all of the user_ids, or None if no match is found.
"""
for group in groups:
if all(user in group.users for user in user_ids):
return group
return None
owner = get_wrapped_owner(get_owner_id(case))
if owner and owner._id == user._id:
pass
elif owner is None:
# assign to user
_assign_case(case, user._id, user)
elif isinstance(owner, CommCareUser):
needed_owners = [owner._id, user._id]
matched = _get_matching_group(existing_groups.values(), needed_owners)
if matched:
_assign_case(case, matched._id, user)
else:
new_group = Group(
domain=case.domain,
name="{case} Owners (system)".format(case=case.name or case.type),
users=[owner._id, user._id],
case_sharing=True,
reporting=False,
metadata={
'hq-system': True,
}
)
new_group.save()
existing_groups[new_group._id] = new_group
_assign_case(case, new_group._id, user)
else:
assert isinstance(owner, Group)
if user._id not in owner.users:
owner.users.append(user._id)
owner.save()
existing_groups[owner._id] = owner
if recursive:
for subcase in case.get_subcases():
reconcile_ownership(subcase, user, recursive, existing_groups)
def _assign_case(case, new_owner_id, acting_user):
return assign_cases([case], new_owner_id, acting_user)
| 33.45 | 90 | 0.642501 |
42b2446eb827b2a04f5eb156b1b113aac5c31e2b | 642 | py | Python | test/test_agent.py | Factern/factern-client-python | 2453dbf0d683417142fe98514ef6de2742f14f92 | [
"MIT"
] | null | null | null | test/test_agent.py | Factern/factern-client-python | 2453dbf0d683417142fe98514ef6de2742f14f92 | [
"MIT"
] | null | null | null | test/test_agent.py | Factern/factern-client-python | 2453dbf0d683417142fe98514ef6de2742f14f92 | [
"MIT"
] | 2 | 2018-07-20T15:02:06.000Z | 2018-08-01T20:38:38.000Z | # coding: utf-8
"""
Factern API
"""
from __future__ import absolute_import
import unittest
import factern_client
from factern_client.com.factern.model.agent import Agent # noqa: E501
from factern_client.rest import ApiException
class TestAgent(unittest.TestCase):
"""Agent unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testAgent(self):
"""Test Agent"""
# FIXME: construct object with mandatory attributes with example values
# model = factern_client.models.agent.Agent() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 18.342857 | 79 | 0.67134 |
c9adbe854094defd81b3d78f43a17ff0a178f794 | 4,083 | py | Python | nssrc/com/citrix/netscaler/nitro/resource/config/ns/nsmigration.py | guardicore/nitro-python | 5346a5086134aead80968f15a41ff527adaa0ec1 | [
"Apache-2.0"
] | null | null | null | nssrc/com/citrix/netscaler/nitro/resource/config/ns/nsmigration.py | guardicore/nitro-python | 5346a5086134aead80968f15a41ff527adaa0ec1 | [
"Apache-2.0"
] | null | null | null | nssrc/com/citrix/netscaler/nitro/resource/config/ns/nsmigration.py | guardicore/nitro-python | 5346a5086134aead80968f15a41ff527adaa0ec1 | [
"Apache-2.0"
] | null | null | null | #
# Copyright (c) 2021 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class nsmigration(base_resource) :
""" Configuration for Migration operation resource. """
#------- Read only Parameter ---------
def __init__(self) :
self._migrationstatus = None
self._migrationstarttime = None
self._migrationendtime = None
self._migrationrollbackstarttime = None
@property
def migrationstatus(self) :
r"""ISSU Migration Status.<br/>Possible values = Migration is not yet started, Migration is in progress and Failover is not yet done, Migration is in progress and Failover is completed, Rollback is initiated, Migration is completed.
"""
try :
return self._migrationstatus
except Exception as e:
raise e
@property
def migrationstarttime(self) :
r"""Timestamp for start migration.<br/>Minimum length = 1.
"""
try :
return self._migrationstarttime
except Exception as e:
raise e
@property
def migrationendtime(self) :
r"""Timestamp for migration complete.<br/>Minimum length = 1.
"""
try :
return self._migrationendtime
except Exception as e:
raise e
@property
def migrationrollbackstarttime(self) :
r"""Timestamp for start migration rollback.<br/>Minimum length = 1.
"""
try :
return self._migrationrollbackstarttime
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
r""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(nsmigration_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.nsmigration
except Exception as e :
raise e
def _get_object_name(self) :
r""" Returns the value of object identifier argument
"""
try :
return 0
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
r""" Use this API to fetch all the nsmigration resources that are configured on netscaler.
"""
try :
if not name :
obj = nsmigration()
response = obj.get_resources(client, option_)
return response
except Exception as e :
raise e
class Migrationstatus:
Migration_is_not_yet_started = "Migration is not yet started"
Migration_is_in_progress_and_Failover_is_not_yet_done = "Migration is in progress and Failover is not yet done"
Migration_is_in_progress_and_Failover_is_completed = "Migration is in progress and Failover is completed"
Rollback_is_initiated = "Rollback is initiated"
Migration_is_completed = "Migration is completed"
class nsmigration_response(base_response) :
def __init__(self, length=1) :
self.nsmigration = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.nsmigration = [nsmigration() for _ in range(length)]
| 32.149606 | 234 | 0.739652 |
9636c6b4d28fad5ecd7cc2bbb6619ba959fd7406 | 33,072 | py | Python | Lib/asyncio/proactor_events.py | finefoot/cpython | ffcc7cd57f6a52c6074ecc9f0a9f0177fb1dbfee | [
"0BSD"
] | 2 | 2022-03-27T14:52:48.000Z | 2022-03-27T17:35:22.000Z | Lib/asyncio/proactor_events.py | finefoot/cpython | ffcc7cd57f6a52c6074ecc9f0a9f0177fb1dbfee | [
"0BSD"
] | 8 | 2022-01-07T11:31:11.000Z | 2022-03-04T00:07:16.000Z | Lib/asyncio/proactor_events.py | finefoot/cpython | ffcc7cd57f6a52c6074ecc9f0a9f0177fb1dbfee | [
"0BSD"
] | 1 | 2022-03-27T18:34:54.000Z | 2022-03-27T18:34:54.000Z | """Event loop using a proactor and related classes.
A proactor is a "notify-on-completion" multiplexer. Currently a
proactor is only implemented on Windows with IOCP.
"""
__all__ = 'BaseProactorEventLoop',
import io
import os
import socket
import warnings
import signal
import threading
import collections
from . import base_events
from . import constants
from . import futures
from . import exceptions
from . import protocols
from . import sslproto
from . import transports
from . import trsock
from .log import logger
def _set_socket_extra(transport, sock):
transport._extra['socket'] = trsock.TransportSocket(sock)
try:
transport._extra['sockname'] = sock.getsockname()
except socket.error:
if transport._loop.get_debug():
logger.warning(
"getsockname() failed on %r", sock, exc_info=True)
if 'peername' not in transport._extra:
try:
transport._extra['peername'] = sock.getpeername()
except socket.error:
# UDP sockets may not have a peer name
transport._extra['peername'] = None
class _ProactorBasePipeTransport(transports._FlowControlMixin,
transports.BaseTransport):
"""Base class for pipe and socket transports."""
def __init__(self, loop, sock, protocol, waiter=None,
extra=None, server=None):
super().__init__(extra, loop)
self._set_extra(sock)
self._sock = sock
self.set_protocol(protocol)
self._server = server
self._buffer = None # None or bytearray.
self._read_fut = None
self._write_fut = None
self._pending_write = 0
self._conn_lost = 0
self._closing = False # Set when close() called.
self._eof_written = False
if self._server is not None:
self._server._attach()
self._loop.call_soon(self._protocol.connection_made, self)
if waiter is not None:
# only wake up the waiter when connection_made() has been called
self._loop.call_soon(futures._set_result_unless_cancelled,
waiter, None)
def __repr__(self):
info = [self.__class__.__name__]
if self._sock is None:
info.append('closed')
elif self._closing:
info.append('closing')
if self._sock is not None:
info.append(f'fd={self._sock.fileno()}')
if self._read_fut is not None:
info.append(f'read={self._read_fut!r}')
if self._write_fut is not None:
info.append(f'write={self._write_fut!r}')
if self._buffer:
info.append(f'write_bufsize={len(self._buffer)}')
if self._eof_written:
info.append('EOF written')
return '<{}>'.format(' '.join(info))
def _set_extra(self, sock):
self._extra['pipe'] = sock
def set_protocol(self, protocol):
self._protocol = protocol
def get_protocol(self):
return self._protocol
def is_closing(self):
return self._closing
def close(self):
if self._closing:
return
self._closing = True
self._conn_lost += 1
if not self._buffer and self._write_fut is None:
self._loop.call_soon(self._call_connection_lost, None)
if self._read_fut is not None:
self._read_fut.cancel()
self._read_fut = None
def __del__(self, _warn=warnings.warn):
if self._sock is not None:
_warn(f"unclosed transport {self!r}", ResourceWarning, source=self)
self.close()
def _fatal_error(self, exc, message='Fatal error on pipe transport'):
try:
if isinstance(exc, OSError):
if self._loop.get_debug():
logger.debug("%r: %s", self, message, exc_info=True)
else:
self._loop.call_exception_handler({
'message': message,
'exception': exc,
'transport': self,
'protocol': self._protocol,
})
finally:
self._force_close(exc)
def _force_close(self, exc):
if self._empty_waiter is not None and not self._empty_waiter.done():
if exc is None:
self._empty_waiter.set_result(None)
else:
self._empty_waiter.set_exception(exc)
if self._closing:
return
self._closing = True
self._conn_lost += 1
if self._write_fut:
self._write_fut.cancel()
self._write_fut = None
if self._read_fut:
self._read_fut.cancel()
self._read_fut = None
self._pending_write = 0
self._buffer = None
self._loop.call_soon(self._call_connection_lost, exc)
def _call_connection_lost(self, exc):
try:
self._protocol.connection_lost(exc)
finally:
# XXX If there is a pending overlapped read on the other
# end then it may fail with ERROR_NETNAME_DELETED if we
# just close our end. First calling shutdown() seems to
# cure it, but maybe using DisconnectEx() would be better.
if hasattr(self._sock, 'shutdown') and self._sock.fileno() != -1:
self._sock.shutdown(socket.SHUT_RDWR)
self._sock.close()
self._sock = None
server = self._server
if server is not None:
server._detach()
self._server = None
def get_write_buffer_size(self):
size = self._pending_write
if self._buffer is not None:
size += len(self._buffer)
return size
class _ProactorReadPipeTransport(_ProactorBasePipeTransport,
transports.ReadTransport):
"""Transport for read pipes."""
def __init__(self, loop, sock, protocol, waiter=None,
extra=None, server=None, buffer_size=65536):
self._pending_data_length = -1
self._paused = True
super().__init__(loop, sock, protocol, waiter, extra, server)
self._data = bytearray(buffer_size)
self._loop.call_soon(self._loop_reading)
self._paused = False
def is_reading(self):
return not self._paused and not self._closing
def pause_reading(self):
if self._closing or self._paused:
return
self._paused = True
# bpo-33694: Don't cancel self._read_fut because cancelling an
# overlapped WSASend() loss silently data with the current proactor
# implementation.
#
# If CancelIoEx() fails with ERROR_NOT_FOUND, it means that WSASend()
# completed (even if HasOverlappedIoCompleted() returns 0), but
# Overlapped.cancel() currently silently ignores the ERROR_NOT_FOUND
# error. Once the overlapped is ignored, the IOCP loop will ignores the
# completion I/O event and so not read the result of the overlapped
# WSARecv().
if self._loop.get_debug():
logger.debug("%r pauses reading", self)
def resume_reading(self):
if self._closing or not self._paused:
return
self._paused = False
if self._read_fut is None:
self._loop.call_soon(self._loop_reading, None)
length = self._pending_data_length
self._pending_data_length = -1
if length > -1:
# Call the protocol method after calling _loop_reading(),
# since the protocol can decide to pause reading again.
self._loop.call_soon(self._data_received, self._data[:length], length)
if self._loop.get_debug():
logger.debug("%r resumes reading", self)
def _eof_received(self):
if self._loop.get_debug():
logger.debug("%r received EOF", self)
try:
keep_open = self._protocol.eof_received()
except (SystemExit, KeyboardInterrupt):
raise
except BaseException as exc:
self._fatal_error(
exc, 'Fatal error: protocol.eof_received() call failed.')
return
if not keep_open:
self.close()
def _data_received(self, data, length):
if self._paused:
# Don't call any protocol method while reading is paused.
# The protocol will be called on resume_reading().
assert self._pending_data_length == -1
self._pending_data_length = length
return
if length == 0:
self._eof_received()
return
if isinstance(self._protocol, protocols.BufferedProtocol):
try:
protocols._feed_data_to_buffered_proto(self._protocol, data)
except (SystemExit, KeyboardInterrupt):
raise
except BaseException as exc:
self._fatal_error(exc,
'Fatal error: protocol.buffer_updated() '
'call failed.')
return
else:
self._protocol.data_received(data)
def _loop_reading(self, fut=None):
length = -1
data = None
try:
if fut is not None:
assert self._read_fut is fut or (self._read_fut is None and
self._closing)
self._read_fut = None
if fut.done():
# deliver data later in "finally" clause
length = fut.result()
if length == 0:
# we got end-of-file so no need to reschedule a new read
return
data = self._data[:length]
else:
# the future will be replaced by next proactor.recv call
fut.cancel()
if self._closing:
# since close() has been called we ignore any read data
return
# bpo-33694: buffer_updated() has currently no fast path because of
# a data loss issue caused by overlapped WSASend() cancellation.
if not self._paused:
# reschedule a new read
self._read_fut = self._loop._proactor.recv_into(self._sock, self._data)
except ConnectionAbortedError as exc:
if not self._closing:
self._fatal_error(exc, 'Fatal read error on pipe transport')
elif self._loop.get_debug():
logger.debug("Read error on pipe transport while closing",
exc_info=True)
except ConnectionResetError as exc:
self._force_close(exc)
except OSError as exc:
self._fatal_error(exc, 'Fatal read error on pipe transport')
except exceptions.CancelledError:
if not self._closing:
raise
else:
if not self._paused:
self._read_fut.add_done_callback(self._loop_reading)
finally:
if length > -1:
self._data_received(data, length)
class _ProactorBaseWritePipeTransport(_ProactorBasePipeTransport,
transports.WriteTransport):
"""Transport for write pipes."""
_start_tls_compatible = True
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
self._empty_waiter = None
def write(self, data):
if not isinstance(data, (bytes, bytearray, memoryview)):
raise TypeError(
f"data argument must be a bytes-like object, "
f"not {type(data).__name__}")
if self._eof_written:
raise RuntimeError('write_eof() already called')
if self._empty_waiter is not None:
raise RuntimeError('unable to write; sendfile is in progress')
if not data:
return
if self._conn_lost:
if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES:
logger.warning('socket.send() raised exception.')
self._conn_lost += 1
return
# Observable states:
# 1. IDLE: _write_fut and _buffer both None
# 2. WRITING: _write_fut set; _buffer None
# 3. BACKED UP: _write_fut set; _buffer a bytearray
# We always copy the data, so the caller can't modify it
# while we're still waiting for the I/O to happen.
if self._write_fut is None: # IDLE -> WRITING
assert self._buffer is None
# Pass a copy, except if it's already immutable.
self._loop_writing(data=bytes(data))
elif not self._buffer: # WRITING -> BACKED UP
# Make a mutable copy which we can extend.
self._buffer = bytearray(data)
self._maybe_pause_protocol()
else: # BACKED UP
# Append to buffer (also copies).
self._buffer.extend(data)
self._maybe_pause_protocol()
def _loop_writing(self, f=None, data=None):
try:
if f is not None and self._write_fut is None and self._closing:
# XXX most likely self._force_close() has been called, and
# it has set self._write_fut to None.
return
assert f is self._write_fut
self._write_fut = None
self._pending_write = 0
if f:
f.result()
if data is None:
data = self._buffer
self._buffer = None
if not data:
if self._closing:
self._loop.call_soon(self._call_connection_lost, None)
if self._eof_written:
self._sock.shutdown(socket.SHUT_WR)
# Now that we've reduced the buffer size, tell the
# protocol to resume writing if it was paused. Note that
# we do this last since the callback is called immediately
# and it may add more data to the buffer (even causing the
# protocol to be paused again).
self._maybe_resume_protocol()
else:
self._write_fut = self._loop._proactor.send(self._sock, data)
if not self._write_fut.done():
assert self._pending_write == 0
self._pending_write = len(data)
self._write_fut.add_done_callback(self._loop_writing)
self._maybe_pause_protocol()
else:
self._write_fut.add_done_callback(self._loop_writing)
if self._empty_waiter is not None and self._write_fut is None:
self._empty_waiter.set_result(None)
except ConnectionResetError as exc:
self._force_close(exc)
except OSError as exc:
self._fatal_error(exc, 'Fatal write error on pipe transport')
def can_write_eof(self):
return True
def write_eof(self):
self.close()
def abort(self):
self._force_close(None)
def _make_empty_waiter(self):
if self._empty_waiter is not None:
raise RuntimeError("Empty waiter is already set")
self._empty_waiter = self._loop.create_future()
if self._write_fut is None:
self._empty_waiter.set_result(None)
return self._empty_waiter
def _reset_empty_waiter(self):
self._empty_waiter = None
class _ProactorWritePipeTransport(_ProactorBaseWritePipeTransport):
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
self._read_fut = self._loop._proactor.recv(self._sock, 16)
self._read_fut.add_done_callback(self._pipe_closed)
def _pipe_closed(self, fut):
if fut.cancelled():
# the transport has been closed
return
assert fut.result() == b''
if self._closing:
assert self._read_fut is None
return
assert fut is self._read_fut, (fut, self._read_fut)
self._read_fut = None
if self._write_fut is not None:
self._force_close(BrokenPipeError())
else:
self.close()
class _ProactorDatagramTransport(_ProactorBasePipeTransport,
transports.DatagramTransport):
max_size = 256 * 1024
def __init__(self, loop, sock, protocol, address=None,
waiter=None, extra=None):
self._address = address
self._empty_waiter = None
self._buffer_size = 0
# We don't need to call _protocol.connection_made() since our base
# constructor does it for us.
super().__init__(loop, sock, protocol, waiter=waiter, extra=extra)
# The base constructor sets _buffer = None, so we set it here
self._buffer = collections.deque()
self._loop.call_soon(self._loop_reading)
def _set_extra(self, sock):
_set_socket_extra(self, sock)
def get_write_buffer_size(self):
return self._buffer_size
def abort(self):
self._force_close(None)
def sendto(self, data, addr=None):
if not isinstance(data, (bytes, bytearray, memoryview)):
raise TypeError('data argument must be bytes-like object (%r)',
type(data))
if not data:
return
if self._address is not None and addr not in (None, self._address):
raise ValueError(
f'Invalid address: must be None or {self._address}')
if self._conn_lost and self._address:
if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES:
logger.warning('socket.sendto() raised exception.')
self._conn_lost += 1
return
# Ensure that what we buffer is immutable.
self._buffer.append((bytes(data), addr))
self._buffer_size += len(data)
if self._write_fut is None:
# No current write operations are active, kick one off
self._loop_writing()
# else: A write operation is already kicked off
self._maybe_pause_protocol()
def _loop_writing(self, fut=None):
try:
if self._conn_lost:
return
assert fut is self._write_fut
self._write_fut = None
if fut:
# We are in a _loop_writing() done callback, get the result
fut.result()
if not self._buffer or (self._conn_lost and self._address):
# The connection has been closed
if self._closing:
self._loop.call_soon(self._call_connection_lost, None)
return
data, addr = self._buffer.popleft()
self._buffer_size -= len(data)
if self._address is not None:
self._write_fut = self._loop._proactor.send(self._sock,
data)
else:
self._write_fut = self._loop._proactor.sendto(self._sock,
data,
addr=addr)
except OSError as exc:
self._protocol.error_received(exc)
except Exception as exc:
self._fatal_error(exc, 'Fatal write error on datagram transport')
else:
self._write_fut.add_done_callback(self._loop_writing)
self._maybe_resume_protocol()
def _loop_reading(self, fut=None):
data = None
try:
if self._conn_lost:
return
assert self._read_fut is fut or (self._read_fut is None and
self._closing)
self._read_fut = None
if fut is not None:
res = fut.result()
if self._closing:
# since close() has been called we ignore any read data
data = None
return
if self._address is not None:
data, addr = res, self._address
else:
data, addr = res
if self._conn_lost:
return
if self._address is not None:
self._read_fut = self._loop._proactor.recv(self._sock,
self.max_size)
else:
self._read_fut = self._loop._proactor.recvfrom(self._sock,
self.max_size)
except OSError as exc:
self._protocol.error_received(exc)
except exceptions.CancelledError:
if not self._closing:
raise
else:
if self._read_fut is not None:
self._read_fut.add_done_callback(self._loop_reading)
finally:
if data:
self._protocol.datagram_received(data, addr)
class _ProactorDuplexPipeTransport(_ProactorReadPipeTransport,
_ProactorBaseWritePipeTransport,
transports.Transport):
"""Transport for duplex pipes."""
def can_write_eof(self):
return False
def write_eof(self):
raise NotImplementedError
class _ProactorSocketTransport(_ProactorReadPipeTransport,
_ProactorBaseWritePipeTransport,
transports.Transport):
"""Transport for connected sockets."""
_sendfile_compatible = constants._SendfileMode.TRY_NATIVE
def __init__(self, loop, sock, protocol, waiter=None,
extra=None, server=None):
super().__init__(loop, sock, protocol, waiter, extra, server)
base_events._set_nodelay(sock)
def _set_extra(self, sock):
_set_socket_extra(self, sock)
def can_write_eof(self):
return True
def write_eof(self):
if self._closing or self._eof_written:
return
self._eof_written = True
if self._write_fut is None:
self._sock.shutdown(socket.SHUT_WR)
class BaseProactorEventLoop(base_events.BaseEventLoop):
def __init__(self, proactor):
super().__init__()
logger.debug('Using proactor: %s', proactor.__class__.__name__)
self._proactor = proactor
self._selector = proactor # convenient alias
self._self_reading_future = None
self._accept_futures = {} # socket file descriptor => Future
proactor.set_loop(self)
self._make_self_pipe()
if threading.current_thread() is threading.main_thread():
# wakeup fd can only be installed to a file descriptor from the main thread
signal.set_wakeup_fd(self._csock.fileno())
def _make_socket_transport(self, sock, protocol, waiter=None,
extra=None, server=None):
return _ProactorSocketTransport(self, sock, protocol, waiter,
extra, server)
def _make_ssl_transport(
self, rawsock, protocol, sslcontext, waiter=None,
*, server_side=False, server_hostname=None,
extra=None, server=None,
ssl_handshake_timeout=None,
ssl_shutdown_timeout=None):
ssl_protocol = sslproto.SSLProtocol(
self, protocol, sslcontext, waiter,
server_side, server_hostname,
ssl_handshake_timeout=ssl_handshake_timeout,
ssl_shutdown_timeout=ssl_shutdown_timeout)
_ProactorSocketTransport(self, rawsock, ssl_protocol,
extra=extra, server=server)
return ssl_protocol._app_transport
def _make_datagram_transport(self, sock, protocol,
address=None, waiter=None, extra=None):
return _ProactorDatagramTransport(self, sock, protocol, address,
waiter, extra)
def _make_duplex_pipe_transport(self, sock, protocol, waiter=None,
extra=None):
return _ProactorDuplexPipeTransport(self,
sock, protocol, waiter, extra)
def _make_read_pipe_transport(self, sock, protocol, waiter=None,
extra=None):
return _ProactorReadPipeTransport(self, sock, protocol, waiter, extra)
def _make_write_pipe_transport(self, sock, protocol, waiter=None,
extra=None):
# We want connection_lost() to be called when other end closes
return _ProactorWritePipeTransport(self,
sock, protocol, waiter, extra)
def close(self):
if self.is_running():
raise RuntimeError("Cannot close a running event loop")
if self.is_closed():
return
if threading.current_thread() is threading.main_thread():
signal.set_wakeup_fd(-1)
# Call these methods before closing the event loop (before calling
# BaseEventLoop.close), because they can schedule callbacks with
# call_soon(), which is forbidden when the event loop is closed.
self._stop_accept_futures()
self._close_self_pipe()
self._proactor.close()
self._proactor = None
self._selector = None
# Close the event loop
super().close()
async def sock_recv(self, sock, n):
return await self._proactor.recv(sock, n)
async def sock_recv_into(self, sock, buf):
return await self._proactor.recv_into(sock, buf)
async def sock_recvfrom(self, sock, bufsize):
return await self._proactor.recvfrom(sock, bufsize)
async def sock_recvfrom_into(self, sock, buf, nbytes=0):
if not nbytes:
nbytes = len(buf)
return await self._proactor.recvfrom_into(sock, buf, nbytes)
async def sock_sendall(self, sock, data):
return await self._proactor.send(sock, data)
async def sock_sendto(self, sock, data, address):
return await self._proactor.sendto(sock, data, 0, address)
async def sock_connect(self, sock, address):
return await self._proactor.connect(sock, address)
async def sock_accept(self, sock):
return await self._proactor.accept(sock)
async def _sock_sendfile_native(self, sock, file, offset, count):
try:
fileno = file.fileno()
except (AttributeError, io.UnsupportedOperation) as err:
raise exceptions.SendfileNotAvailableError("not a regular file")
try:
fsize = os.fstat(fileno).st_size
except OSError:
raise exceptions.SendfileNotAvailableError("not a regular file")
blocksize = count if count else fsize
if not blocksize:
return 0 # empty file
blocksize = min(blocksize, 0xffff_ffff)
end_pos = min(offset + count, fsize) if count else fsize
offset = min(offset, fsize)
total_sent = 0
try:
while True:
blocksize = min(end_pos - offset, blocksize)
if blocksize <= 0:
return total_sent
await self._proactor.sendfile(sock, file, offset, blocksize)
offset += blocksize
total_sent += blocksize
finally:
if total_sent > 0:
file.seek(offset)
async def _sendfile_native(self, transp, file, offset, count):
resume_reading = transp.is_reading()
transp.pause_reading()
await transp._make_empty_waiter()
try:
return await self.sock_sendfile(transp._sock, file, offset, count,
fallback=False)
finally:
transp._reset_empty_waiter()
if resume_reading:
transp.resume_reading()
def _close_self_pipe(self):
if self._self_reading_future is not None:
self._self_reading_future.cancel()
self._self_reading_future = None
self._ssock.close()
self._ssock = None
self._csock.close()
self._csock = None
self._internal_fds -= 1
def _make_self_pipe(self):
# A self-socket, really. :-)
self._ssock, self._csock = socket.socketpair()
self._ssock.setblocking(False)
self._csock.setblocking(False)
self._internal_fds += 1
def _loop_self_reading(self, f=None):
try:
if f is not None:
f.result() # may raise
if self._self_reading_future is not f:
# When we scheduled this Future, we assigned it to
# _self_reading_future. If it's not there now, something has
# tried to cancel the loop while this callback was still in the
# queue (see windows_events.ProactorEventLoop.run_forever). In
# that case stop here instead of continuing to schedule a new
# iteration.
return
f = self._proactor.recv(self._ssock, 4096)
except exceptions.CancelledError:
# _close_self_pipe() has been called, stop waiting for data
return
except (SystemExit, KeyboardInterrupt):
raise
except BaseException as exc:
self.call_exception_handler({
'message': 'Error on reading from the event loop self pipe',
'exception': exc,
'loop': self,
})
else:
self._self_reading_future = f
f.add_done_callback(self._loop_self_reading)
def _write_to_self(self):
# This may be called from a different thread, possibly after
# _close_self_pipe() has been called or even while it is
# running. Guard for self._csock being None or closed. When
# a socket is closed, send() raises OSError (with errno set to
# EBADF, but let's not rely on the exact error code).
csock = self._csock
if csock is None:
return
try:
csock.send(b'\0')
except OSError:
if self._debug:
logger.debug("Fail to write a null byte into the "
"self-pipe socket",
exc_info=True)
def _start_serving(self, protocol_factory, sock,
sslcontext=None, server=None, backlog=100,
ssl_handshake_timeout=None,
ssl_shutdown_timeout=None):
def loop(f=None):
try:
if f is not None:
conn, addr = f.result()
if self._debug:
logger.debug("%r got a new connection from %r: %r",
server, addr, conn)
protocol = protocol_factory()
if sslcontext is not None:
self._make_ssl_transport(
conn, protocol, sslcontext, server_side=True,
extra={'peername': addr}, server=server,
ssl_handshake_timeout=ssl_handshake_timeout,
ssl_shutdown_timeout=ssl_shutdown_timeout)
else:
self._make_socket_transport(
conn, protocol,
extra={'peername': addr}, server=server)
if self.is_closed():
return
f = self._proactor.accept(sock)
except OSError as exc:
if sock.fileno() != -1:
self.call_exception_handler({
'message': 'Accept failed on a socket',
'exception': exc,
'socket': trsock.TransportSocket(sock),
})
sock.close()
elif self._debug:
logger.debug("Accept failed on socket %r",
sock, exc_info=True)
except exceptions.CancelledError:
sock.close()
else:
self._accept_futures[sock.fileno()] = f
f.add_done_callback(loop)
self.call_soon(loop)
def _process_events(self, event_list):
# Events are processed in the IocpProactor._poll() method
pass
def _stop_accept_futures(self):
for future in self._accept_futures.values():
future.cancel()
self._accept_futures.clear()
def _stop_serving(self, sock):
future = self._accept_futures.pop(sock.fileno(), None)
if future:
future.cancel()
self._proactor._stop_serving(sock)
sock.close()
| 37.117845 | 87 | 0.571239 |
0dc26147a4328d76df7205f4b52d80916c48fab5 | 514 | py | Python | backend/src/webdoctor/urls.py | CSCapstone2019/WebDoctor | cda9e4e2bd2c4e22dc4a4aa9c0758e67cdee62d5 | [
"MIT"
] | 4 | 2019-09-13T14:50:22.000Z | 2019-11-27T03:19:44.000Z | backend/src/webdoctor/urls.py | CSCapstone2019/WebDoctor | cda9e4e2bd2c4e22dc4a4aa9c0758e67cdee62d5 | [
"MIT"
] | 8 | 2019-09-15T23:02:21.000Z | 2022-02-10T09:26:10.000Z | backend/src/webdoctor/urls.py | CSCapstone2019/WebDoctor | cda9e4e2bd2c4e22dc4a4aa9c0758e67cdee62d5 | [
"MIT"
] | null | null | null | from django.contrib import admin
from django.urls import path, include
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
path('api-auth/', include('rest_framework.urls')),
path('admin/', admin.site.urls),
path('api/', include('patients.api.urls')),
path('chat/', include('chat.api.urls')),
path('', include('accounts.urls')),
]
# Serving media urls
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | 28.555556 | 80 | 0.712062 |
94d0ece7e10fcfc5cf178858e1c9faab18add179 | 7,000 | py | Python | pyprob/trace.py | SwapneelM/pyprob | 4d93441ea838c3491a49050ae05d218a34708e6d | [
"BSD-2-Clause"
] | 268 | 2017-10-16T13:09:12.000Z | 2021-12-01T19:03:12.000Z | pyprob/trace.py | SwapneelM/pyprob | 4d93441ea838c3491a49050ae05d218a34708e6d | [
"BSD-2-Clause"
] | 11 | 2017-12-05T21:50:30.000Z | 2019-02-25T19:52:39.000Z | pyprob/trace.py | SwapneelM/pyprob | 4d93441ea838c3491a49050ae05d218a34708e6d | [
"BSD-2-Clause"
] | 33 | 2017-10-21T16:32:00.000Z | 2021-11-24T13:42:53.000Z | import torch
from . import util
class Variable():
def __init__(self, distribution=None, value=None, address_base=None, address=None, instance=None, log_prob=None, log_importance_weight=None, control=False, replace=False, name=None, observed=False, reused=False, tagged=False):
self.distribution = distribution
if value is None:
self.value = None
else:
self.value = util.to_tensor(value)
self.address_base = address_base
self.address = address
self.instance = instance
if log_prob is None:
self.log_prob = None
else:
self.log_prob = util.to_tensor(log_prob)
if log_importance_weight is None:
self.log_importance_weight = None
else:
self.log_importance_weight = float(log_importance_weight)
self.control = control
self.replace = replace
self.name = name
self.observable = ((not tagged) and (name is not None)) or observed
self.observed = observed
self.reused = reused
self.tagged = tagged
def __repr__(self):
# The 'Unknown' cases below are for handling pruned variables in offline training datasets
return 'Variable(name:{}, control:{}, replace:{}, observable:{}, observed:{}, tagged:{}, address:{}, distribution:{}, value:{}: log_prob:{})'.format(
self.name if hasattr(self, 'name') else 'Unknown',
self.control if hasattr(self, 'control') else 'Unknown',
self.replace if hasattr(self, 'replace') else 'Unknown',
self.observable if hasattr(self, 'observable') else 'Unknown',
self.observed if hasattr(self, 'observed') else 'Unknown',
self.tagged if hasattr(self, 'tagged') else 'Unknown',
self.address if hasattr(self, 'address') else 'Unknown',
str(self.distribution) if hasattr(self, 'distribution') else 'Unknown',
str(self.value) if hasattr(self, 'value') else 'Unknown',
str(self.log_prob) if hasattr(self, 'log_prob') else 'Unknown')
def to(self, device):
if self.value is not None:
self.value.to(device=device)
# if self.distribution is not None:
# self.distribution.to(device=device)
def __hash__(self):
return hash(self.address + str(self.value) + str(self.control) + str(self.replace) + str(self.observed) + str(self.tagged))
def __eq__(self, other):
return hash(self) == hash(other)
class Trace():
def __init__(self):
self.variables = []
self.variables_controlled = []
self.variables_uncontrolled = []
self.variables_replaced = []
self.variables_observed = []
self.variables_observable = []
self.variables_tagged = []
self.variables_dict_address = {}
self.variables_dict_address_base = {}
self.named_variables = {}
self.result = None
self.log_prob = 0.
self.log_prob_observed = 0.
self.log_importance_weight = 0.
self.length = 0
self.length_controlled = 0
self.execution_time_sec = None
def __repr__(self):
# The 'Unknown' cases below are for handling pruned traces in offline training datasets
return 'Trace(all:{:,}, controlled:{:,}, replaced:{}, observeable:{}, observed:{}, tagged:{}, uncontrolled:{}, log_prob:{}, log_importance_weight:{})'.format(
self.length,
self.length_controlled,
'{:,}'.format(len(self.variables_replaced)) if hasattr(self, 'variables_replaced') else 'Unknown',
'{:,}'.format(len(self.variables_observed)) if hasattr(self, 'variables_observed') else 'Unknown',
'{:,}'.format(len(self.variables_observable)) if hasattr(self, 'variables_observable') else 'Unknown',
'{:,}'.format(len(self.variables_tagged)) if hasattr(self, 'variables_tagged') else 'Unknown',
'{:,}'.format(len(self.variables_uncontrolled)) if hasattr(self, 'variables_uncontrolled') else 'Unknown',
str(self.log_prob) if hasattr(self, 'log_prob') else 'Unknown',
str(self.log_importance_weight) if hasattr(self, 'log_importance_weight') else 'Unknown')
def add(self, variable):
self.variables.append(variable)
self.variables_dict_address[variable.address] = variable
self.variables_dict_address_base[variable.address_base] = variable
def end(self, result, execution_time_sec):
self.result = result
self.execution_time_sec = execution_time_sec
replaced_indices = []
for i in range(len(self.variables)):
variable = self.variables[i]
if variable.name is not None:
self.named_variables[variable.name] = variable
if variable.control and i not in replaced_indices:
if variable.replace:
for j in range(i + 1, len(self.variables)):
if self.variables[j].address_base == variable.address_base:
self.variables_replaced.append(variable)
variable = self.variables[j]
replaced_indices.append(j)
self.variables_controlled.append(variable)
self.variables_uncontrolled = [v for v in self.variables if (not v.control) and (not v.observed) and (not v.tagged)]
self.variables_observed = [v for v in self.variables if v.observed]
self.variables_observable = [v for v in self.variables if v.observable]
self.variables_tagged = [v for v in self.variables if v.tagged]
self.log_prob = sum([torch.sum(v.log_prob) for v in self.variables if v.control or v.observed])
self.log_prob_observed = sum([torch.sum(v.log_prob) for v in self.variables_observed])
self.length = len(self.variables)
self.length_controlled = len(self.variables_controlled)
replaced_log_importance_weights = {}
for variable in self.variables:
if variable.log_importance_weight is not None:
if variable.replace:
replaced_log_importance_weights[variable.address_base] = variable.log_importance_weight
else:
self.log_importance_weight += variable.log_importance_weight
for _, log_importance_weight in replaced_log_importance_weights.items():
self.log_importance_weight += log_importance_weight
def last_instance(self, address_base):
if address_base in self.variables_dict_address_base:
return self.variables_dict_address_base[address_base].instance
else:
return 0
def to(self, device):
for variable in self.variables:
variable.to(device)
def __hash__(self):
h = [hash(variable) for variable in self.variables]
return hash(sum(h))
def __eq__(self, other):
return hash(self) == hash(other)
| 47.619048 | 230 | 0.634714 |
454dbfdea10c9dd8259a91bb845438b0dad52ef4 | 744 | py | Python | cookies.py | tunir27/Attendr-Hardware-Scripts | cdc9293157d1810c2a9c8af0318b04203a8b2bf5 | [
"Apache-2.0"
] | 1 | 2018-08-15T06:27:53.000Z | 2018-08-15T06:27:53.000Z | cookies.py | tunir27/Attendr-Hardware-Scripts | cdc9293157d1810c2a9c8af0318b04203a8b2bf5 | [
"Apache-2.0"
] | null | null | null | cookies.py | tunir27/Attendr-Hardware-Scripts | cdc9293157d1810c2a9c8af0318b04203a8b2bf5 | [
"Apache-2.0"
] | null | null | null | import pickle
import requests
import os
def save_cookies(requests_cookiejar, filename):
with open(filename, 'wb') as f:
pickle.dump(requests_cookiejar, f)
def load_cookies(filename):
with open(filename, 'rb') as f:
return pickle.load(f)
#save cookies
def p():
f=os.stat("cookies.txt").st_size == 0
filename="cookies.txt"
if f:
r = requests.post('https://www.bhoracademy.com/api_auth')
#print(r.content)
save_cookies(r.cookies, filename)
return r.content
else:
#load cookies and do a request
r=requests.post('https://www.bhoracademy.com/api_auth', cookies=load_cookies(filename))
return r.content
if __name__ == "__main__":
d=p()
print(d)
| 26.571429 | 95 | 0.649194 |
7b71181ac01e1b0c8b89cbd226147310e6df9c15 | 3,432 | py | Python | kea/test_utils/base_test.py | hgomersall/Kea | bdddcfad170ae65f4ef23aea1cf495348458a738 | [
"BSD-3-Clause-Clear",
"BSD-3-Clause"
] | 1 | 2018-12-11T12:05:25.000Z | 2018-12-11T12:05:25.000Z | kea/test_utils/base_test.py | hgomersall/Kea | bdddcfad170ae65f4ef23aea1cf495348458a738 | [
"BSD-3-Clause-Clear",
"BSD-3-Clause"
] | null | null | null | kea/test_utils/base_test.py | hgomersall/Kea | bdddcfad170ae65f4ef23aea1cf495348458a738 | [
"BSD-3-Clause-Clear",
"BSD-3-Clause"
] | 1 | 2019-02-12T12:07:54.000Z | 2019-02-12T12:07:54.000Z | from veriutils.tests.base_hdl_test import HDLTestCase
from veriutils import myhdl_cosimulation
from ovenbird.cosimulation import (
vivado_vhdl_cosimulation, vivado_verilog_cosimulation)
from ovenbird import VIVADO_EXECUTABLE
import unittest
import os
VIVADO_DISABLE_REASON = ''
try:
if os.environ['USE_VIVADO'] == '0':
USE_VIVADO = False
VIVADO_DISABLE_REASON = 'USE_VIVADO environment variable was set to 0'
else:
USE_VIVADO = True
except KeyError:
# default to trying to use Vivado
USE_VIVADO = True
class KeaTestCase(HDLTestCase):
testing_using_vivado = False
def cosimulate(self, sim_cycles, dut_factory, ref_factory, args,
arg_types, **kwargs):
return myhdl_cosimulation(
sim_cycles, dut_factory, ref_factory, args, arg_types, **kwargs)
def tearDown(self):
# FIXME
# This is horrible. MyHDL should _not_ keep every historic simulation
# in a global like this. I made a foray into fixing this at
# https://github.com/hgomersall/myhdl/tree/globals_free_sim
# but at the time the appetite for this was not enough to complete
# the work properly.
# Here we have a very clear use case: Running all the Jackdaw tests
# causes the system to run out of memory!
# At some point, we need to fix this properly in MyHDL. A simpler
# fix than the previous work would be simply to allow the state to
# be cleared using a manual call. Not very elegant but would work.
#
# This should work because each test is notionally standalone, so
# there is no problem in simply clearing the simulator state between
# each run.
import myhdl._simulator
myhdl._simulator._signals = []
myhdl._simulator._blocks = []
myhdl._simulator._siglist = []
myhdl._simulator._futureEvents = []
myhdl._simulator._time = 0
myhdl._simulator._cosim = 0
myhdl._simulator._tracing = 0
myhdl._simulator._tf = None
class KeaVivadoVHDLTestCase(HDLTestCase):
testing_using_vivado = True
def cosimulate(self, sim_cycles, dut_factory, ref_factory, args,
arg_types, **kwargs):
if not USE_VIVADO:
raise unittest.SkipTest(
'Vivado tests have been disabled: %s' % VIVADO_DISABLE_REASON)
if VIVADO_EXECUTABLE is None:
raise unittest.SkipTest(
'Vivado executable not available: Running VHDL tests in '
'Vivado requires the Vivado executable to be in the path.')
return vivado_vhdl_cosimulation(
sim_cycles, dut_factory, ref_factory, args, arg_types, **kwargs)
class KeaVivadoVerilogTestCase(HDLTestCase):
testing_using_vivado = True
def cosimulate(self, sim_cycles, dut_factory, ref_factory, args,
arg_types, **kwargs):
if not USE_VIVADO:
raise unittest.SkipTest(
'Vivado tests have been disabled: %s' % VIVADO_DISABLE_REASON)
if VIVADO_EXECUTABLE is None:
raise unittest.SkipTest(
'Vivado executable not available: Running Verilog tests in '
'Vivado requires the Vivado executable to be in the path.')
return vivado_verilog_cosimulation(
sim_cycles, dut_factory, ref_factory, args, arg_types, **kwargs)
| 34.666667 | 78 | 0.664044 |
bb9b1840640aa63d8768ece6785d64989b1dbc12 | 4,902 | py | Python | neuraxle/steps/column_transformer.py | guillaume-chevalier/Neuraxle | 5645e53bbe98aac367c8fe19f41dc14b21206fbb | [
"Apache-2.0"
] | 2 | 2019-04-14T18:40:01.000Z | 2020-06-02T09:36:59.000Z | neuraxle/steps/column_transformer.py | guillaume-chevalier/Neuraxle | 5645e53bbe98aac367c8fe19f41dc14b21206fbb | [
"Apache-2.0"
] | null | null | null | neuraxle/steps/column_transformer.py | guillaume-chevalier/Neuraxle | 5645e53bbe98aac367c8fe19f41dc14b21206fbb | [
"Apache-2.0"
] | null | null | null | """
Neuraxle's Column Transformer Steps
====================================
Pipeline steps to apply N-Dimensional column transformations to different columns.
..
Copyright 2019, Neuraxio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
..
Thanks to Umaneo Technologies Inc. for their contributions to this Machine Learning
project, visit https://www.umaneo.com/ for more information on Umaneo Technologies Inc.
"""
from typing import List, Tuple, Union
import numpy as np
from neuraxle.base import BaseStep, NonFittableMixin, MetaStepMixin
from neuraxle.pipeline import Pipeline
from neuraxle.steps.loop import ForEachDataInput
from neuraxle.union import FeatureUnion
ColumnSelectionType = Union[Tuple[int, BaseStep], Tuple[List[int], BaseStep], Tuple[slice, BaseStep]]
ColumnChooserTupleList = List[ColumnSelectionType]
class ColumnSelector2D(NonFittableMixin, BaseStep):
"""
A ColumnSelector2D selects column in a sequence.
"""
def __init__(self, columns_selection: ColumnSelectionType):
super().__init__()
self.column_selection = columns_selection
def transform(self, data_inputs):
if isinstance(self.column_selection, range):
self.column_selection = slice(
self.column_selection.start,
self.column_selection.stop,
self.column_selection.step
)
if isinstance(self.column_selection, int):
return np.expand_dims(np.array(data_inputs)[:, self.column_selection], axis=-1)
if isinstance(self.column_selection, slice):
return np.array(data_inputs)[:, self.column_selection]
if isinstance(self.column_selection, list):
columns = [
np.expand_dims(np.array(data_inputs)[:, i], axis=-1)
for i in self.column_selection
]
return np.concatenate(columns, axis=-1)
if self.column_selection is None:
return data_inputs
raise ValueError(
'column selection type not supported : {0}\nSupported types'.format(
self.column_selection,
repr(ColumnSelectionType)
))
class ColumnsSelectorND(MetaStepMixin, BaseStep):
"""
ColumnSelectorND wraps a ColumnSelector2D by as many ForEachDataInput step
as needed to select the last dimension.
"""
def __init__(self, columns_selection, n_dimension=3):
BaseStep.__init__(self)
col_selector = ColumnSelector2D(columns_selection=columns_selection)
for _ in range(min(0, n_dimension - 2)):
col_selector = ForEachDataInput(col_selector)
MetaStepMixin.__init__(self, col_selector)
self.n_dimension = n_dimension
class ColumnTransformer(FeatureUnion):
"""
A ColumnChooser can apply custom transformations to different columns.
The ColumnChooser accepts a list of tuples for the transformations,
and will name the steps accordingly (because of the TruncableSteps' constructor)
by converting each indexer object to a string. Indexer objects can be ranges, an int, or a list of ints.
The input data can be `N`-dimensionnal (ND), in which case the axis must be specified. The columns
data passed to the sub-steps will still be ND.
Usage example:
.. code-block:: python
ColumnChooser([
(range(0, 2), CyclicTimes()),
(3, CategoricalEnum(categories_count=5, starts_at_zero=True)),
(4, CategoricalEnum(categories_count=5, starts_at_zero=True)),
([10, 13, 15], CategoricalEnum(categories_count=5, starts_at_zero=True)),
])
.. seealso::
:class:`FeatureUnion`,
"""
def __init__(self, column_chooser_steps_as_tuple: ColumnChooserTupleList, n_dimension: int = 3):
# Make unique names from the indices in case we have many steps for transforming the same column(s).
self.string_indices = [
str(name) + "_" + str(step.__class__.__name__)
for name, step in column_chooser_steps_as_tuple
]
FeatureUnion.__init__(self, [
(string_indices, Pipeline([
ColumnsSelectorND(indices, n_dimension=n_dimension),
step
]))
for string_indices, (indices, step) in zip(self.string_indices, column_chooser_steps_as_tuple)
])
| 36.311111 | 108 | 0.676867 |
b59108b4f3b707212130f6033c7174f5c0ebbe6d | 114 | py | Python | utils/data/__init__.py | chenwenxiao/DOI | 14bdedd0b1b886efe77737cfb62695f03ee17c58 | [
"MIT"
] | 1 | 2021-08-13T22:14:10.000Z | 2021-08-13T22:14:10.000Z | utils/data/__init__.py | chenwenxiao/DOI | 14bdedd0b1b886efe77737cfb62695f03ee17c58 | [
"MIT"
] | null | null | null | utils/data/__init__.py | chenwenxiao/DOI | 14bdedd0b1b886efe77737cfb62695f03ee17c58 | [
"MIT"
] | null | null | null | from . import mappers
from .datasets import *
from .image_utils import *
from .misc import *
from .types import *
| 19 | 26 | 0.745614 |
6f816617fdf8efdb23f75a09c6f7297e22b4e16b | 10,444 | py | Python | tensorflow_probability/python/distributions/inverse_gamma.py | cafeal/probability | f968a32d601d29ec31a10568ccfe30263cf91ef2 | [
"Apache-2.0"
] | null | null | null | tensorflow_probability/python/distributions/inverse_gamma.py | cafeal/probability | f968a32d601d29ec31a10568ccfe30263cf91ef2 | [
"Apache-2.0"
] | null | null | null | tensorflow_probability/python/distributions/inverse_gamma.py | cafeal/probability | f968a32d601d29ec31a10568ccfe30263cf91ef2 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""The InverseGamma distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.distributions import distribution
from tensorflow_probability.python.internal import assert_util
from tensorflow_probability.python.internal import distribution_util
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import reparameterization
from tensorflow_probability.python.internal import tensor_util
__all__ = [
'InverseGamma',
]
class InverseGamma(distribution.Distribution):
"""InverseGamma distribution.
The `InverseGamma` distribution is defined over positive real numbers using
parameters `concentration` (aka "alpha") and `scale` (aka "beta").
#### Mathematical Details
The probability density function (pdf) is,
```none
pdf(x; alpha, beta, x > 0) = x**(-alpha - 1) exp(-beta / x) / Z
Z = Gamma(alpha) beta**-alpha
```
where:
* `concentration = alpha`,
* `scale = beta`,
* `Z` is the normalizing constant, and,
* `Gamma` is the [gamma function](
https://en.wikipedia.org/wiki/Gamma_function).
The cumulative density function (cdf) is,
```none
cdf(x; alpha, beta, x > 0) = GammaInc(alpha, beta / x) / Gamma(alpha)
```
where `GammaInc` is the [upper incomplete Gamma function](
https://en.wikipedia.org/wiki/Incomplete_gamma_function).
The parameters can be intuited via their relationship to mean and variance
when these moments exist,
```none
mean = beta / (alpha - 1) when alpha > 1
variance = beta**2 / (alpha - 1)**2 / (alpha - 2) when alpha > 2
```
i.e., under the same conditions:
```none
alpha = mean**2 / variance + 2
beta = mean * (mean**2 / variance + 1)
```
Distribution parameters are automatically broadcast in all functions; see
examples for details.
Samples of this distribution are reparameterized (pathwise differentiable).
The derivatives are computed using the approach described in the paper
[Michael Figurnov, Shakir Mohamed, Andriy Mnih.
Implicit Reparameterization Gradients, 2018](https://arxiv.org/abs/1805.08498)
#### Examples
```python
tfd = tfp.distributions
dist = tfd.InverseGamma(concentration=3.0, scale=2.0)
dist2 = tfd.InverseGamma(concentration=[3.0, 4.0], scale=[2.0, 3.0])
```
Compute the gradients of samples w.r.t. the parameters:
```python
tfd = tfp.distributions
concentration = tf.constant(3.0)
scale = tf.constant(2.0)
dist = tfd.InverseGamma(concentration, scale)
samples = dist.sample(5) # Shape [5]
loss = tf.reduce_mean(tf.square(samples)) # Arbitrary loss function
# Unbiased stochastic gradients of the loss function
grads = tf.gradients(loss, [concentration, scale])
```
"""
def __init__(self,
concentration,
scale=None,
validate_args=False,
allow_nan_stats=True,
name='InverseGamma'):
"""Construct InverseGamma with `concentration` and `scale` parameters.
The parameters `concentration` and `scale` must be shaped in a way that
supports broadcasting (e.g. `concentration + scale` is a valid operation).
Args:
concentration: Floating point tensor, the concentration params of the
distribution(s). Must contain only positive values.
scale: Floating point tensor, the scale params of the distribution(s).
Must contain only positive values.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
TypeError: if `concentration` and `scale` are different dtypes.
"""
parameters = dict(locals())
with tf.name_scope(name) as name:
dtype = dtype_util.common_dtype(
[concentration, scale], dtype_hint=tf.float32)
self._concentration = tensor_util.convert_nonref_to_tensor(
concentration, dtype=dtype, name='concentration')
self._scale = tensor_util.convert_nonref_to_tensor(
scale, dtype=dtype, name='scale')
super(InverseGamma, self).__init__(
dtype=self._concentration.dtype,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
reparameterization_type=reparameterization.FULLY_REPARAMETERIZED,
parameters=parameters,
name=name)
@staticmethod
def _param_shapes(sample_shape):
return dict(
zip(('concentration', 'scale'),
([tf.convert_to_tensor(sample_shape, dtype=tf.int32)] * 2)))
@classmethod
def _params_event_ndims(cls):
return dict(concentration=0, scale=0)
@property
def concentration(self):
"""Concentration parameter."""
return self._concentration
@property
def scale(self):
"""Scale parameter."""
return self._scale
def _batch_shape_tensor(self):
return tf.broadcast_dynamic_shape(
tf.shape(self.concentration), tf.shape(self.scale))
def _batch_shape(self):
return tf.broadcast_static_shape(self.concentration.shape,
self.scale.shape)
def _event_shape_tensor(self):
return tf.constant([], dtype=tf.int32)
def _event_shape(self):
return tf.TensorShape([])
@distribution_util.AppendDocstring(
"""Note: See `tf.random_gamma` docstring for sampling details and
caveats.""")
def _sample_n(self, n, seed=None):
return 1. / tf.random.gamma(
shape=[n],
alpha=self.concentration,
beta=self.scale,
dtype=self.dtype,
seed=seed)
def _log_prob(self, x):
concentration = tf.convert_to_tensor(self.concentration)
scale = tf.convert_to_tensor(self.scale)
unnormalized_prob = -(1. + concentration) * tf.math.log(x) - scale / x
normalization = (
tf.math.lgamma(concentration) - concentration * tf.math.log(scale))
return unnormalized_prob - normalization
def _cdf(self, x):
# Note that igammac returns the upper regularized incomplete gamma
# function Q(a, x), which is what we want for the CDF.
return tf.math.igammac(self.concentration, self.scale / x)
def _entropy(self):
concentration = tf.convert_to_tensor(self.concentration)
scale = tf.convert_to_tensor(self.scale)
return (concentration + tf.math.log(scale) +
tf.math.lgamma(concentration) -
((1. + concentration) * tf.math.digamma(concentration)))
@distribution_util.AppendDocstring(
"""The mean of an inverse gamma distribution is
`scale / (concentration - 1)`, when `concentration > 1`, and `NaN`
otherwise. If `self.allow_nan_stats` is `False`, an exception will be
raised rather than returning `NaN`""")
def _mean(self):
concentration = tf.convert_to_tensor(self.concentration)
scale = tf.convert_to_tensor(self.scale)
mean = scale / (concentration - 1.)
if self.allow_nan_stats:
assertions = []
else:
assertions = [assert_util.assert_less(
tf.ones([], self.dtype), concentration,
message='mean undefined when any concentration <= 1')]
with tf.control_dependencies(assertions):
return tf.where(
concentration > 1.,
mean,
dtype_util.as_numpy_dtype(self.dtype)(np.nan))
@distribution_util.AppendDocstring(
"""Variance for inverse gamma is defined only for `concentration > 2`. If
`self.allow_nan_stats` is `False`, an exception will be raised rather
than returning `NaN`.""")
def _variance(self):
concentration = tf.convert_to_tensor(self.concentration)
scale = tf.convert_to_tensor(self.scale)
var = (
tf.square(scale) / tf.square(concentration - 1.) /
(concentration - 2.))
if self.allow_nan_stats:
assertions = []
else:
assertions = [assert_util.assert_less(
tf.constant(2., dtype=self.dtype),
concentration,
message='variance undefined when any concentration <= 2')]
with tf.control_dependencies(assertions):
return tf.where(
concentration > 2.,
var,
dtype_util.as_numpy_dtype(self.dtype)(np.nan))
@distribution_util.AppendDocstring(
"""The mode of an inverse gamma distribution is `scale / (concentration +
1)`.""")
def _mode(self):
return self.scale / (1. + self.concentration)
def _sample_control_dependencies(self, x):
assertions = []
if not self.validate_args:
return assertions
assertions.append(assert_util.assert_non_negative(
x, message='Sample must be non-negative.'))
return assertions
def _parameter_control_dependencies(self, is_init):
if not self.validate_args:
return []
assertions = []
if is_init != tensor_util.is_ref(self.concentration):
assertions.append(assert_util.assert_positive(
self.concentration,
message='Argument `concentration` must be positive.'))
if is_init != tensor_util.is_ref(self.scale):
assertions.append(assert_util.assert_positive(
self.scale,
message='Argument `scale` must be positive.'))
return assertions
| 34.813333 | 80 | 0.681731 |
64a5dcfe0b0dc2499c9417df271d4cfa444e9f18 | 1,558 | py | Python | setup.py | mriedem/mkdocs_macros_plugin | b8bcfafb59d23e734991ac03acd408a7d98ee272 | [
"MIT"
] | null | null | null | setup.py | mriedem/mkdocs_macros_plugin | b8bcfafb59d23e734991ac03acd408a7d98ee272 | [
"MIT"
] | null | null | null | setup.py | mriedem/mkdocs_macros_plugin | b8bcfafb59d23e734991ac03acd408a7d98ee272 | [
"MIT"
] | null | null | null | # --------------------------------------------
# Setup file for the package
#
# Laurent Franceschetti (c) 2018-2020
# --------------------------------------------
import os
from setuptools import setup, find_packages
VERSION_NUMBER = '0.4.7b'
def read_file(fname):
"Read a local file"
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='mkdocs-macros-plugin',
version=VERSION_NUMBER,
description="Unleash the power of MkDocs with macros and variables",
long_description=read_file('README.md'),
long_description_content_type='text/markdown',
keywords='mkdocs python markdown macros',
url='https://github.com/fralau/mkdocs_macros_plugin',
author='Laurent Franceschetti',
author_email='info@settlenext.com',
license='MIT',
python_requires='>=3.5',
install_requires=[
'mkdocs>=0.17',
'repackage',
'jinja2',
'termcolor',
'pyyaml',
'mkdocs-material'
],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.5',
],
include_package_data=True,
packages=find_packages(exclude=['*.tests']),
entry_points={
'mkdocs.plugins': [
'macros = macros.plugin:MacrosPlugin'
]
}
)
| 28.327273 | 72 | 0.598203 |
a46ead118e9ab142fd5c86ff1ee2acb1b28c8781 | 227 | py | Python | amquery/core/sample_map/__init__.py | nromashchenko/nir | 4b0c91d670462ca33a9b224740a2977e99546440 | [
"MIT"
] | 3 | 2016-09-13T16:31:05.000Z | 2016-09-14T06:36:44.000Z | amquery/core/sample_map/__init__.py | nromashchenko/nir | 4b0c91d670462ca33a9b224740a2977e99546440 | [
"MIT"
] | 36 | 2016-09-14T06:26:20.000Z | 2017-05-04T19:11:30.000Z | amquery/core/sample_map/__init__.py | nromashchenko/amquery | 4b0c91d670462ca33a9b224740a2977e99546440 | [
"MIT"
] | null | null | null | from ._sample_map import SampleMap
__license__ = "MIT"
__version__ = "0.2.1"
__author__ = "Nikolay Romashchenko"
__maintainer__ = "Nikolay Romashchenko"
__email__ = "nikolay.romashchenko@gmail.com"
__status__ = "Development"
| 22.7 | 44 | 0.779736 |
fb80aa9c1164d8028b1223941552ab9e8c23eb50 | 2,156 | py | Python | tests/ignite/contrib/metrics/regression/test_maximum_absolute_error.py | Devanshu24/ignite | 2f0ba3e65cfa36b43bc87b315733fd3f3585e430 | [
"BSD-3-Clause"
] | null | null | null | tests/ignite/contrib/metrics/regression/test_maximum_absolute_error.py | Devanshu24/ignite | 2f0ba3e65cfa36b43bc87b315733fd3f3585e430 | [
"BSD-3-Clause"
] | null | null | null | tests/ignite/contrib/metrics/regression/test_maximum_absolute_error.py | Devanshu24/ignite | 2f0ba3e65cfa36b43bc87b315733fd3f3585e430 | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
import pytest
import torch
from ignite.contrib.metrics.regression import MaximumAbsoluteError
from ignite.exceptions import NotComputableError
def test_zero_sample():
m = MaximumAbsoluteError()
with pytest.raises(
NotComputableError, match=r"MaximumAbsoluteError must have at least one example before it can be computed"
):
m.compute()
def test_wrong_input_shapes():
m = MaximumAbsoluteError()
with pytest.raises(ValueError, match=r"Input data shapes should be the same, but given"):
m.update((torch.rand(4, 1, 2), torch.rand(4, 1)))
with pytest.raises(ValueError, match=r"Input data shapes should be the same, but given"):
m.update((torch.rand(4, 1), torch.rand(4, 1, 2)))
with pytest.raises(ValueError, match=r"Input data shapes should be the same, but given"):
m.update((torch.rand(4, 1, 2), torch.rand(4,),))
with pytest.raises(ValueError, match=r"Input data shapes should be the same, but given"):
m.update((torch.rand(4,), torch.rand(4, 1, 2),))
def test_maximum_absolute_error():
a = np.random.randn(4)
b = np.random.randn(4)
c = np.random.randn(4)
d = np.random.randn(4)
ground_truth = np.random.randn(4)
m = MaximumAbsoluteError()
np_ans = -1
m.update((torch.from_numpy(a), torch.from_numpy(ground_truth)))
np_max = np.max(np.abs((a - ground_truth)))
np_ans = np_max if np_max > np_ans else np_ans
assert m.compute() == pytest.approx(np_ans)
m.update((torch.from_numpy(b), torch.from_numpy(ground_truth)))
np_max = np.max(np.abs((b - ground_truth)))
np_ans = np_max if np_max > np_ans else np_ans
assert m.compute() == pytest.approx(np_ans)
m.update((torch.from_numpy(c), torch.from_numpy(ground_truth)))
np_max = np.max(np.abs((c - ground_truth)))
np_ans = np_max if np_max > np_ans else np_ans
assert m.compute() == pytest.approx(np_ans)
m.update((torch.from_numpy(d), torch.from_numpy(ground_truth)))
np_max = np.max(np.abs((d - ground_truth)))
np_ans = np_max if np_max > np_ans else np_ans
assert m.compute() == pytest.approx(np_ans)
| 34.222222 | 114 | 0.68321 |
20f68ee9912c546bf47d1e98aabd4d74a5c8bbef | 2,967 | py | Python | toucan/canary_utils/lib/pptx.py | toucan-project/TOUCAN | d562e1191b5ef10480be819ba8c584034c25259b | [
"MIT"
] | 4 | 2019-08-28T14:36:23.000Z | 2019-08-30T09:49:12.000Z | toucan/canary_utils/lib/pptx.py | toucan-project/TOUCAN | d562e1191b5ef10480be819ba8c584034c25259b | [
"MIT"
] | 2 | 2021-04-20T17:09:30.000Z | 2021-09-23T23:26:22.000Z | toucan/canary_utils/lib/pptx.py | toucan-project/TOUCAN | d562e1191b5ef10480be819ba8c584034c25259b | [
"MIT"
] | 1 | 2020-01-22T20:01:58.000Z | 2020-01-22T20:01:58.000Z | #!/usr/bin/env python3
from zipfile import ZipFile
from defusedxml.minidom import parseString
from canary_api.settings import TEMPLATE_DIR
from canary_utils.lib.util import has_access
from canary_utils.lib.util import get_next_rid, find_highest_xml
from canary_utils.lib.util import open_zip, create_child, fix_metadata
def inject_pic_slide(slide_xml, pic, rid):
"""Inject picture into slide."""
s = parseString(slide_xml)
for child in pic.childNodes:
if child.tagName == 'p:blipFill':
node = child
for child in node.childNodes:
if child.tagName == 'a:blip':
node = child
node.attributes['r:embed'].value = rid
child = s.lastChild.firstChild.firstChild
if not child.tagName == 'p:spTree':
raise ValidationError('Could not find PPTX tag p:spTree')
child.appendChild(pic)
return s.toxml()
def inject_pic_rels(slide_xml_rels, rid, target):
"""Inject picture relationships."""
s = parseString(slide_xml_rels)
attr = {'Id': rid,
'Target': target,
'TargetMode': 'External',
'Type': 'http://schemas.openxmlformats.org/officeDocument/'
'2006/relationships/image'}
return create_child(s, 'Relationship', attr)
def read_xml_pic(xml_pic):
"""Read XML picture."""
s = parseString(xml_pic)
return s.lastChild.lastChild
def make_ppt_canary(infile, outfile, canary, force, metadata):
"""Create powerpoint canary from input file."""
z = open_zip(infile)
zout = ZipFile(outfile, 'w', compression=8)
overwrite = ['ppt/slides/', 'ppt/slides/_rels/',
'docProps/custom.xml']
targets = []
hi_slide = f"slide{find_highest_xml(z, 'slides')}.xml"
rid = get_next_rid(z.read(f"{overwrite[1]}{hi_slide}.rels"))
for name in z.namelist():
if 'ppt/slideMasters/_rels/' in name:
targets.append(name)
if len(targets) == 0:
raise ValidationError('Could not find PPTX image to backload')
return False
items = z.infolist()
for item in items:
if item.filename == f"{overwrite[0]}{hi_slide}":
with open(f"{TEMPLATE_DIR}/xml/slidePic.xml", 'r') as fd:
pic = read_xml_pic(fd.read())
buffer = inject_pic_slide(z.read(item.filename), pic, rid)
zout.writestr(item.filename, buffer)
elif item.filename == f"{overwrite[1]}{hi_slide}.rels":
buffer = inject_pic_rels(z.read(item.filename), rid, canary)
zout.writestr(item.filename, buffer)
elif item.filename == overwrite[2]:
if metadata:
buffer = fix_metadata()
else:
buffer = z.read(item.filename)
zout.writestr(item.filename, buffer)
elif item.filename not in overwrite:
buffer = z.read(item.filename)
zout.writestr(item.filename, buffer)
return outfile
| 27.220183 | 72 | 0.628918 |
d012eee3286b9188e1aea097f0452b42e81805aa | 463 | py | Python | data/scripts/templates/object/mobile/shared_dressed_raider_trandoshan_male_01.py | obi-two/GameServer | 7d37024e2291a97d49522610cd8f1dbe5666afc2 | [
"MIT"
] | 20 | 2015-02-23T15:11:56.000Z | 2022-03-18T20:56:48.000Z | data/scripts/templates/object/mobile/shared_dressed_raider_trandoshan_male_01.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | null | null | null | data/scripts/templates/object/mobile/shared_dressed_raider_trandoshan_male_01.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | 20 | 2015-04-04T16:35:59.000Z | 2022-03-24T14:54:37.000Z | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_dressed_raider_trandoshan_male_01.iff"
result.attribute_template_id = 9
result.stfName("npc_name","trandoshan_base_male")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | 27.235294 | 79 | 0.740821 |
a29926a57df847fd6553e0813a5e2dfeebb3885e | 78,788 | py | Python | tensorflow/python/training/saver.py | wenming2014/tensorflow | a102a6a71844e194f3946f6318768c5367f1f16b | [
"Apache-2.0"
] | 5 | 2018-07-04T22:14:02.000Z | 2018-07-04T22:21:43.000Z | tensorflow/python/training/saver.py | wenming2014/tensorflow | a102a6a71844e194f3946f6318768c5367f1f16b | [
"Apache-2.0"
] | null | null | null | tensorflow/python/training/saver.py | wenming2014/tensorflow | a102a6a71844e194f3946f6318768c5367f1f16b | [
"Apache-2.0"
] | 2 | 2019-02-26T16:21:15.000Z | 2020-12-04T17:48:17.000Z | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""Save and restore variables."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os.path
import time
import uuid
import numpy as np
import six
from tensorflow.core.protobuf import checkpointable_object_graph_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import errors
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_io_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import saveable_object
from tensorflow.python.training import training_util
from tensorflow.python.training.checkpointable import base as checkpointable
from tensorflow.python.util import compat
from tensorflow.python.util.tf_export import tf_export
# TODO(allenl): Remove these aliases once all users are migrated off.
get_checkpoint_state = checkpoint_management.get_checkpoint_state
update_checkpoint_state = checkpoint_management.update_checkpoint_state
generate_checkpoint_state_proto = (
checkpoint_management.generate_checkpoint_state_proto)
latest_checkpoint = checkpoint_management.latest_checkpoint
checkpoint_exists = checkpoint_management.checkpoint_exists
get_checkpoint_mtimes = checkpoint_management.get_checkpoint_mtimes
remove_checkpoint = checkpoint_management.remove_checkpoint
# Op names which identify variable reads which should be saved.
_VARIABLE_OPS = set(["Variable",
"VariableV2",
"AutoReloadVariable",
"VarHandleOp",
"ReadVariableOp"])
def _set_cpu0(device_string):
"""Creates a new device string based on `device_string` but using /CPU:0.
If the device is already on /CPU:0, this is a no-op.
Args:
device_string: A device string.
Returns:
A device string.
"""
parsed_device = pydev.DeviceSpec.from_string(device_string)
parsed_device.device_type = "CPU"
parsed_device.device_index = 0
return parsed_device.to_string()
class BaseSaverBuilder(object):
"""Base class for Savers.
Can be extended to create different Ops.
"""
SaveSpec = saveable_object.SaveSpec
SaveableObject = saveable_object.SaveableObject
class VariableSaveable(SaveableObject):
"""SaveableObject implementation that handles Variables."""
def __init__(self, var, slice_spec, name):
spec = BaseSaverBuilder.SaveSpec(var, slice_spec, name, dtype=var.dtype)
super(BaseSaverBuilder.VariableSaveable, self).__init__(var, [spec], name)
def restore(self, restored_tensors, restored_shapes):
restored_tensor = restored_tensors[0]
if restored_shapes is not None:
restored_tensor = array_ops.reshape(restored_tensor, restored_shapes[0])
return state_ops.assign(
self.op,
restored_tensor,
validate_shape=restored_shapes is None and
self.op.get_shape().is_fully_defined())
class ResourceVariableSaveable(SaveableObject):
"""SaveableObject implementation that handles ResourceVariables."""
def __init__(self, var, slice_spec, name):
self._var_device = var.device
self._var_shape = var.shape
if isinstance(var, ops.Tensor):
self.handle_op = var.op.inputs[0]
tensor = var
elif isinstance(var, resource_variable_ops.ResourceVariable):
def _read_variable_closure(v):
def f():
with ops.device(v.device):
x = v.read_value()
# To allow variables placed on non-CPU devices to be checkpointed,
# we copy them to CPU on the same machine first.
with ops.device("/device:CPU:0"):
return array_ops.identity(x)
return f
self.handle_op = var.handle
tensor = _read_variable_closure(var)
else:
raise ValueError(
"Saveable is neither a resource variable nor a read operation."
" Got: %s" % repr(var))
spec = BaseSaverBuilder.SaveSpec(tensor, slice_spec, name,
dtype=var.dtype)
super(BaseSaverBuilder.ResourceVariableSaveable, self).__init__(
var, [spec], name)
def restore(self, restored_tensors, restored_shapes):
restored_tensor = restored_tensors[0]
if restored_shapes is not None:
restored_tensor = array_ops.reshape(restored_tensor, restored_shapes[0])
# Copy the restored tensor to the variable's device.
with ops.device(self._var_device):
restored_tensor = array_ops.identity(restored_tensor)
return resource_variable_ops.shape_safe_assign_variable_handle(
self.handle_op, self._var_shape, restored_tensor)
def __init__(self, write_version=saver_pb2.SaverDef.V2):
self._write_version = write_version
def save_op(self, filename_tensor, saveables):
"""Create an Op to save 'saveables'.
This is intended to be overridden by subclasses that want to generate
different Ops.
Args:
filename_tensor: String Tensor.
saveables: A list of BaseSaverBuilder.SaveableObject objects.
Returns:
An Operation that save the variables.
Raises:
RuntimeError: (implementation detail) if "self._write_version" is an
unexpected value.
"""
# pylint: disable=protected-access
tensor_names = []
tensors = []
tensor_slices = []
for saveable in saveables:
for spec in saveable.specs:
tensor_names.append(spec.name)
tensors.append(spec.tensor)
tensor_slices.append(spec.slice_spec)
if self._write_version == saver_pb2.SaverDef.V1:
return io_ops._save(
filename=filename_tensor,
tensor_names=tensor_names,
tensors=tensors,
tensor_slices=tensor_slices)
elif self._write_version == saver_pb2.SaverDef.V2:
# "filename_tensor" is interpreted *NOT AS A FILENAME*, but as a prefix
# of a V2 checkpoint: e.g. "/fs/train/ckpt-<step>/tmp/worker<i>-<step>".
return io_ops.save_v2(filename_tensor, tensor_names, tensor_slices,
tensors)
else:
raise RuntimeError("Unexpected write_version: " + self._write_version)
def bulk_restore(self, filename_tensor, saveables, preferred_shard,
restore_sequentially):
"""Restore all tensors contained in saveables.
By default, this issues separate calls to `restore_op` for each saveable.
Subclasses may override to load multiple saveables in a single call.
Args:
filename_tensor: String Tensor.
saveables: List of BaseSaverBuilder.SaveableObject objects.
preferred_shard: Int. Shard to open first when loading a sharded file.
restore_sequentially: Unused. Bool. If true, each restore is sequential.
Returns:
A list of Tensors resulting from reading 'saveable' from
'filename'.
"""
del restore_sequentially
all_tensors = []
for saveable in saveables:
with ops.device(_set_cpu0(saveable.device) if saveable.device else None):
all_tensors.extend(
self.restore_op(filename_tensor, saveable, preferred_shard))
return all_tensors
# pylint: disable=unused-argument
def restore_op(self, filename_tensor, saveable, preferred_shard):
"""Create ops to restore 'saveable'.
This is intended to be overridden by subclasses that want to generate
different Ops.
Args:
filename_tensor: String Tensor.
saveable: A BaseSaverBuilder.SaveableObject object.
preferred_shard: Int. Shard to open first when loading a sharded file.
Returns:
A list of Tensors resulting from reading 'saveable' from
'filename'.
"""
# pylint: disable=protected-access
tensors = []
for spec in saveable.specs:
tensors.append(
io_ops.restore_v2(
filename_tensor,
[spec.name],
[spec.slice_spec],
[spec.dtype])[0])
return tensors
# pylint: enable=unused-argument
def sharded_filename(self, filename_tensor, shard, num_shards):
"""Append sharding information to a filename.
Args:
filename_tensor: A string tensor.
shard: Integer. The shard for the filename.
num_shards: An int Tensor for the number of shards.
Returns:
A string tensor.
"""
return gen_io_ops.sharded_filename(filename_tensor, shard, num_shards)
def _AddSaveOps(self, filename_tensor, saveables):
"""Add ops to save variables that are on the same shard.
Args:
filename_tensor: String Tensor.
saveables: A list of SaveableObject objects.
Returns:
A tensor with the filename used to save.
"""
save = self.save_op(filename_tensor, saveables)
return control_flow_ops.with_dependencies([save], filename_tensor)
def _AddShardedSaveOpsForV2(self, checkpoint_prefix, per_device):
"""Add ops to save the params per shard, for the V2 format.
Note that the sharded save procedure for the V2 format is different from
V1: there is a special "merge" step that merges the small metadata produced
from each device.
Args:
checkpoint_prefix: scalar String Tensor. Interpreted *NOT AS A
FILENAME*, but as a prefix of a V2 checkpoint;
per_device: A list of (device, BaseSaverBuilder.VarToSave) pairs, as
returned by _GroupByDevices().
Returns:
An op to save the variables, which, when evaluated, returns the prefix
"<user-fed prefix>" only and does not include the sharded spec suffix.
"""
# IMPLEMENTATION DETAILS: most clients should skip.
#
# Suffix for any well-formed "checkpoint_prefix", when sharded.
# Transformations:
# * Users pass in "save_path" in save() and restore(). Say "myckpt".
# * checkpoint_prefix gets fed <save_path><_SHARDED_SUFFIX>.
#
# Example:
# During runtime, a temporary directory is first created, which contains
# files
#
# <train dir>/myckpt_temp/
# part-?????-of-?????{.index, .data-00000-of-00001}
#
# Before .save() finishes, they will be (hopefully, atomically) renamed to
#
# <train dir>/
# myckpt{.index, .data-?????-of-?????}
#
# Users only need to interact with the user-specified prefix, which is
# "<train dir>/myckpt" in this case. Save() and Restore() work with the
# prefix directly, instead of any physical pathname. (On failure and
# subsequent restore, an outdated and orphaned temporary directory can be
# safely removed.)
_SHARDED_SUFFIX = "_temp_%s/part" % uuid.uuid4().hex
tmp_checkpoint_prefix = string_ops.string_join(
[checkpoint_prefix, _SHARDED_SUFFIX])
num_shards = len(per_device)
sharded_saves = []
sharded_prefixes = []
num_shards_tensor = constant_op.constant(num_shards, name="num_shards")
last_device = None
for shard, (device, saveables) in enumerate(per_device):
last_device = device
with ops.device(_set_cpu0(device)):
sharded_filename = self.sharded_filename(tmp_checkpoint_prefix, shard,
num_shards_tensor)
sharded_prefixes.append(sharded_filename)
sharded_saves.append(self._AddSaveOps(sharded_filename, saveables))
with ops.control_dependencies([x.op for x in sharded_saves]):
# Co-locates the merge step with the last device.
with ops.device(_set_cpu0(last_device)):
# V2 format write path consists of a metadata merge step. Once merged,
# attempts to delete the temporary directory, "<user-fed prefix>_temp".
merge_step = gen_io_ops.merge_v2_checkpoints(
sharded_prefixes, checkpoint_prefix, delete_old_dirs=True)
with ops.control_dependencies([merge_step]):
# Returns the prefix "<user-fed prefix>" only. DOES NOT include the
# sharded spec suffix.
return array_ops.identity(checkpoint_prefix)
def _AddShardedSaveOps(self, filename_tensor, per_device):
"""Add ops to save the params per shard.
Args:
filename_tensor: a scalar String Tensor.
per_device: A list of (device, BaseSaverBuilder.SaveableObject) pairs, as
returned by _GroupByDevices().
Returns:
An op to save the variables.
"""
if self._write_version == saver_pb2.SaverDef.V2:
return self._AddShardedSaveOpsForV2(filename_tensor, per_device)
num_shards = len(per_device)
sharded_saves = []
num_shards_tensor = constant_op.constant(num_shards, name="num_shards")
for shard, (device, saveables) in enumerate(per_device):
with ops.device(device):
sharded_filename = self.sharded_filename(filename_tensor, shard,
num_shards_tensor)
sharded_saves.append(self._AddSaveOps(sharded_filename, saveables))
# Return the sharded name for the save path.
with ops.control_dependencies([x.op for x in sharded_saves]):
return gen_io_ops.sharded_filespec(filename_tensor, num_shards_tensor)
def _AddRestoreOps(self,
filename_tensor,
saveables,
restore_sequentially,
reshape,
preferred_shard=-1,
name="restore_all"):
"""Add operations to restore saveables.
Args:
filename_tensor: Tensor for the path of the file to load.
saveables: A list of SaveableObject objects.
restore_sequentially: True if we want to restore variables sequentially
within a shard.
reshape: True if we want to reshape loaded tensors to the shape of
the corresponding variable.
preferred_shard: Shard to open first when loading a sharded file.
name: Name for the returned op.
Returns:
An Operation that restores the variables.
"""
all_tensors = self.bulk_restore(filename_tensor, saveables, preferred_shard,
restore_sequentially)
assign_ops = []
idx = 0
# Load and optionally reshape on the CPU, as string tensors are not
# available on the GPU.
# TODO(touts): Re-enable restore on GPU when we can support annotating
# string tensors as "HostMemory" inputs.
for saveable in saveables:
shapes = None
if reshape:
# Compute the shapes, let the restore op decide if and how to do
# the reshape.
shapes = []
for spec in saveable.specs:
v = spec.tensor
shape = v.get_shape()
if not shape.is_fully_defined():
shape = array_ops.shape(v)
shapes.append(shape)
saveable_tensors = all_tensors[idx:idx + len(saveable.specs)]
idx += len(saveable.specs)
assign_ops.append(saveable.restore(saveable_tensors, shapes))
# Create a Noop that has control dependencies from all the updates.
return control_flow_ops.group(*assign_ops, name=name)
def _AddShardedRestoreOps(self, filename_tensor, per_device,
restore_sequentially, reshape):
"""Add Ops to restore variables from multiple devices.
Args:
filename_tensor: Tensor for the path of the file to load.
per_device: A list of (device, SaveableObject) pairs, as
returned by _GroupByDevices().
restore_sequentially: True if we want to restore variables sequentially
within a shard.
reshape: True if we want to reshape loaded tensors to the shape of
the corresponding variable.
Returns:
An Operation that restores the variables.
"""
sharded_restores = []
for shard, (device, saveables) in enumerate(per_device):
with ops.device(device):
sharded_restores.append(
self._AddRestoreOps(
filename_tensor,
saveables,
restore_sequentially,
reshape,
preferred_shard=shard,
name="restore_shard"))
return control_flow_ops.group(*sharded_restores, name="restore_all")
@staticmethod
def _IsVariable(v):
return isinstance(v, ops.Tensor) and v.op.type in _VARIABLE_OPS
def _GroupByDevices(self, saveables):
"""Group Variable tensor slices per device.
TODO(touts): Make sure that all the devices found are on different
job/replica/task/cpu|gpu. It would be bad if 2 were on the same device.
It can happen if the devices are unspecified.
Args:
saveables: A list of BaseSaverBuilder.SaveableObject objects.
Returns:
A list of tuples: (device_name, BaseSaverBuilder.SaveableObject) tuples.
The list is sorted by ascending device_name.
Raises:
ValueError: If the tensors of a saveable are on different devices.
"""
per_device = collections.defaultdict(lambda: [])
for saveable in saveables:
canonical_device = set(
pydev.canonical_name(spec.tensor.device) for spec in saveable.specs)
if len(canonical_device) != 1:
raise ValueError("All tensors of a saveable object must be "
"on the same device: %s" % saveable.name)
per_device[canonical_device.pop()].append(saveable)
return sorted(per_device.items(), key=lambda t: t[0])
@staticmethod
def OpListToDict(op_list, convert_variable_to_tensor=True):
"""Create a dictionary of names to operation lists.
Args:
op_list: A list, tuple, or set of Variables or SaveableObjects.
convert_variable_to_tensor: Whether or not to convert single Variables
with no slice info into Tensors.
Returns:
A dictionary of names to the operations that must be saved under
that name. Variables with save_slice_info are grouped together under the
same key in no particular order.
Raises:
TypeError: If the type of op_list or its elements is not supported.
ValueError: If at least two saveables share the same name.
"""
if not isinstance(op_list, (list, tuple, set)):
raise TypeError("Variables to save should be passed in a dict or a "
"list: %s" % op_list)
# When ResourceVariables are converted to Tensors, read ops are added to the
# graph. Sorting the op_list ensures that the resulting graph is always
# constructed in a deterministic way:
op_list = sorted(op_list, key=lambda x: x.name)
names_to_saveables = {}
# pylint: disable=protected-access
for var in op_list:
if isinstance(var, BaseSaverBuilder.SaveableObject):
names_to_saveables[var.name] = var
elif isinstance(var, variables.PartitionedVariable):
if var.name in names_to_saveables:
raise ValueError("At least two variables have the same name: %s" %
var.name)
names_to_saveables[var.name] = var
elif isinstance(var, variables.Variable) and var._save_slice_info:
name = var._save_slice_info.full_name
if name in names_to_saveables:
if not isinstance(names_to_saveables[name], list):
raise ValueError("Mixing slices and non-slices with the same name: "
"%s" % name)
names_to_saveables[name].append(var)
else:
names_to_saveables[name] = [var]
elif (isinstance(var, checkpointable.CheckpointableBase)
and not isinstance(var, variables.Variable)):
checkpointable_saveables = [
(factory() if callable(factory) else factory)
for factory in var._gather_saveables_for_checkpoint().values()]
names_to_saveables.update(
BaseSaverBuilder.OpListToDict(checkpointable_saveables))
else:
if context.executing_eagerly():
if not isinstance(var, resource_variable_ops.ResourceVariable):
raise ValueError(
"Can only save/restore ResourceVariables when eager execution "
"is enabled, type: %s." % type(var))
set_var = names_to_saveables.setdefault(var._shared_name, var)
if set_var is not var:
raise ValueError(
("Two different ResourceVariable objects with the same "
"shared_name '%s' were passed to the Saver. This likely means "
"that they were created in different Graphs or isolation "
"contexts, and may not be checkpointed together.") %
(var._shared_name,))
else:
if convert_variable_to_tensor:
if isinstance(var, resource_variable_ops.ResourceVariable):
var = var._graph_element # pylint: disable=protected-access
else:
var = ops.internal_convert_to_tensor(var, as_ref=True)
if not BaseSaverBuilder._IsVariable(var):
raise TypeError("Variable to save is not a Variable: %s" % var)
if var.op.type == "ReadVariableOp":
name = var.op.inputs[0].op.name
else:
name = var.op.name
if name in names_to_saveables:
raise ValueError("At least two variables have the same name: %s" %
name)
names_to_saveables[name] = var
# pylint: enable=protected-access
return names_to_saveables
@staticmethod
def SaveableObjectsForOp(op, name):
"""Create `SaveableObject`s from an operation.
Args:
op: A variable, operation, or SaveableObject to coerce into a
SaveableObject.
name: A string name for the SaveableObject.
Yields:
`SaveableObject`s which together save/restore `op`.
Raises:
TypeError: If `name` is not a string.
ValueError: For operations with no known conversion to SaveableObject.
"""
if not isinstance(name, six.string_types):
raise TypeError(
"names_to_saveables must be a dict mapping string names to "
"checkpointable operations. Name is not a string: %s" % name)
if isinstance(op, BaseSaverBuilder.SaveableObject):
yield op
elif isinstance(op, (list, tuple, variables.PartitionedVariable)):
if isinstance(op, variables.PartitionedVariable):
op = list(op)
# A set of slices.
slice_name = None
# pylint: disable=protected-access
for variable in op:
if not isinstance(variable, variables.Variable):
raise ValueError("Slices must all be Variables: %s" % variable)
if not variable._save_slice_info:
raise ValueError("Slices must all be slices: %s" % variable)
if slice_name is None:
slice_name = variable._save_slice_info.full_name
elif slice_name != variable._save_slice_info.full_name:
raise ValueError(
"Slices must all be from the same tensor: %s != %s" %
(slice_name, variable._save_slice_info.full_name))
if variable.op.type in ["Variable", "VariableV2",
"AutoReloadVariable"]:
yield BaseSaverBuilder.VariableSaveable(
variable, variable._save_slice_info.spec, name)
else:
yield BaseSaverBuilder.ResourceVariableSaveable(
variable, variable._save_slice_info.spec, name)
# pylint: enable=protected-access
elif isinstance(op, checkpointable.CheckpointableBase) and not isinstance(
op, variables.Variable):
# pylint: disable=protected-access
for attr, factory in op._gather_saveables_for_checkpoint().items():
if attr == checkpointable.VARIABLE_VALUE_KEY:
# Keep original name for classes masquerading as variables.
full_name = name
else:
full_name = name + "_" + attr
op = (factory(full_name) if callable(factory) else factory)
for op in BaseSaverBuilder.SaveableObjectsForOp(op, op.name):
yield op
# pylint: enable=protected-access
else:
# A variable or tensor.
if context.executing_eagerly():
if not isinstance(op, resource_variable_ops.ResourceVariable):
raise ValueError("Can only save/restore ResourceVariable eager "
"mode is enabled, type: %s." % type(op))
yield BaseSaverBuilder.ResourceVariableSaveable(op, "", name)
else:
if isinstance(op, resource_variable_ops.ResourceVariable):
variable = op._graph_element # pylint: disable=protected-access
else:
variable = ops.internal_convert_to_tensor(op, as_ref=True)
if not BaseSaverBuilder._IsVariable(variable):
raise TypeError("names_to_saveables must be a dict mapping string "
"names to Tensors/Variables. Not a variable: %s" %
variable)
if variable.op.type in ["Variable", "VariableV2",
"AutoReloadVariable"]:
yield BaseSaverBuilder.VariableSaveable(variable, "", name)
else:
yield BaseSaverBuilder.ResourceVariableSaveable(
variable, "", name)
def _ValidateAndSliceInputs(self, names_to_saveables):
"""Returns the variables and names that will be used for a Saver.
Args:
names_to_saveables: A dict (k, v) where k is the name of an operation and
v is an operation to save or a BaseSaverBuilder.Saver.
Returns:
A list of BaseSaverBuilder.SaveableObject objects.
Raises:
TypeError: If any of the keys are not strings or any of the
values are not one of Tensor or Variable or a checkpointable operation.
ValueError: If the same operation is given in more than one value
(this also applies to slices of SlicedVariables).
"""
if not isinstance(names_to_saveables, dict):
names_to_saveables = BaseSaverBuilder.OpListToDict(names_to_saveables)
saveables = []
seen_ops = set()
for name, op in sorted(names_to_saveables.items(),
# Avoid comparing ops, sort only by name.
key=lambda x: x[0]):
for converted_saveable_object in self.SaveableObjectsForOp(op, name):
self._AddSaveable(saveables, seen_ops, converted_saveable_object)
return saveables
def _AddSaveable(self, saveables, seen_ops, saveable):
"""Adds the saveable to the saveables list.
Args:
saveables: List to append the SaveableObject to.
seen_ops: Set of the ops of the saveables already processed. Used to
check that each saveable is only saved once.
saveable: The saveable.
Raises:
ValueError: If the saveable has already been processed.
"""
if saveable.op in seen_ops:
raise ValueError("The same saveable will be restored with two names: %s" %
saveable.name)
saveables.append(saveable)
seen_ops.add(saveable.op)
def build(self,
names_to_saveables,
reshape=False,
sharded=False,
max_to_keep=5,
keep_checkpoint_every_n_hours=10000.0,
name=None,
restore_sequentially=False,
filename="model"):
"""Builds save/restore graph nodes or runs save/restore in eager mode.
Args:
names_to_saveables: A dictionary mapping name to a Variable or
SaveableObject. Each name will be associated with the
corresponding variable in the checkpoint.
reshape: If True, allow restoring parameters from a checkpoint
that where the parameters have a different shape. This is
only needed when you try to restore from a Dist-Belief checkpoint,
and only some times.
sharded: If True, shard the checkpoints, one per device that has
Variable nodes.
max_to_keep: Maximum number of checkpoints to keep. As new checkpoints
are created, old ones are deleted. If None or 0, no checkpoints are
deleted from the filesystem but only the last one is kept in the
`checkpoint` file. Presently the number is only roughly enforced. For
example in case of restarts more than max_to_keep checkpoints may be
kept.
keep_checkpoint_every_n_hours: How often checkpoints should be kept.
Defaults to 10,000 hours.
name: String. Optional name to use as a prefix when adding operations.
restore_sequentially: A Bool, which if true, causes restore of different
variables to happen sequentially within each device.
filename: If known at graph construction time, filename used for variable
loading/saving. If None, then the default name "model" will be used.
Returns:
A SaverDef proto.
Raises:
TypeError: If 'names_to_saveables' is not a dictionary mapping string
keys to variable Tensors.
ValueError: If any of the keys or values in 'names_to_saveables' is not
unique.
"""
return self._build_internal(
names_to_saveables=names_to_saveables,
reshape=reshape,
sharded=sharded,
max_to_keep=max_to_keep,
keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours,
name=name,
restore_sequentially=restore_sequentially,
filename=filename)
def _build_internal(self,
names_to_saveables,
reshape=False,
sharded=False,
max_to_keep=5,
keep_checkpoint_every_n_hours=10000.0,
name=None,
restore_sequentially=False,
filename="model",
build_save=True,
build_restore=True):
"""build() with option to only perform save and restore."""
if not context.executing_eagerly() and (not build_save or
not build_restore):
raise ValueError("save and restore operations need to be built together "
" when eager execution is not enabled.")
saveables = self._ValidateAndSliceInputs(names_to_saveables)
if max_to_keep is None:
max_to_keep = 0
with ops.name_scope(name, "save",
[saveable.op for saveable in saveables]) as name:
# Add a placeholder string tensor for the filename.
filename_tensor = array_ops.placeholder_with_default(
filename or "model", shape=(), name="filename")
# Keep the name "Const" for backwards compatibility.
filename_tensor = array_ops.placeholder_with_default(
filename_tensor, shape=(), name="Const")
# Add the save ops.
if sharded:
per_device = self._GroupByDevices(saveables)
if build_save:
save_tensor = self._AddShardedSaveOps(filename_tensor, per_device)
if build_restore:
restore_op = self._AddShardedRestoreOps(filename_tensor, per_device,
restore_sequentially, reshape)
else:
if build_save:
save_tensor = self._AddSaveOps(filename_tensor, saveables)
if build_restore:
restore_op = self._AddRestoreOps(filename_tensor, saveables,
restore_sequentially, reshape)
# In the following use case, it's possible to have restore_ops be called
# something else:
# - Build inference graph and export a meta_graph.
# - Import the inference meta_graph
# - Extend the inference graph to a train graph.
# - Export a new meta_graph.
# Now the second restore_op will be called "restore_all_1".
# As such, comment out the assert for now until we know whether supporting
# such usage model makes sense.
#
# assert restore_op.name.endswith("restore_all"), restore_op.name
if context.executing_eagerly():
# Store the tensor values to the tensor_names.
save_tensor_name = save_tensor.numpy() if build_save else ""
return saver_pb2.SaverDef(
filename_tensor_name=filename_tensor.numpy(),
save_tensor_name=save_tensor_name,
restore_op_name="",
max_to_keep=max_to_keep,
sharded=sharded,
keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours,
version=self._write_version)
else:
graph = ops.get_default_graph()
# Do some sanity checking on collections containing
# PartitionedVariables. If a saved collection has a PartitionedVariable,
# the GraphDef needs to include concat ops to get the value (or there'll
# be a lookup error on load).
check_collection_list = graph.get_all_collection_keys()
for collection_type in check_collection_list:
for element in graph.get_collection(collection_type):
if isinstance(element, variables.PartitionedVariable):
try:
graph.get_operation_by_name(element.name)
except KeyError:
# Create a concat op for this PartitionedVariable. The user may
# not need it, but we'll try looking it up on MetaGraph restore
# since it's in a collection.
element.as_tensor()
return saver_pb2.SaverDef(
filename_tensor_name=filename_tensor.name,
save_tensor_name=save_tensor.name,
restore_op_name=restore_op.name,
max_to_keep=max_to_keep,
sharded=sharded,
keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours,
version=self._write_version)
class BulkSaverBuilder(BaseSaverBuilder):
"""SaverBuilder with support for bulk restoring multiple saveables."""
def bulk_restore(self, filename_tensor, saveables, preferred_shard,
restore_sequentially):
# Ignored: bulk restore is internally sequential.
del restore_sequentially
restore_specs = []
for saveable in saveables:
for spec in saveable.specs:
restore_specs.append((spec.name, spec.slice_spec, spec.dtype))
names, slices, dtypes = zip(*restore_specs)
# Load all tensors onto CPU 0 for compatibility with existing code.
with ops.device("cpu:0"):
return io_ops.restore_v2(filename_tensor, names, slices, dtypes)
def _get_saver_or_default():
"""Returns the saver from SAVERS collection, or creates a default one.
This method is used by other members of the training module, such as
`Scaffold`, or `CheckpointSaverHook`.
Returns:
`Saver`.
Raises:
RuntimeError: If the SAVERS collection already has more than one items.
"""
collection_key = ops.GraphKeys.SAVERS
savers = ops.get_collection(collection_key)
if savers:
if len(savers) > 1:
raise RuntimeError(
"More than one item in collection {}. "
"Please indicate which one to use by passing it to the constructor.".
format(collection_key))
return savers[0]
saver = Saver(sharded=True, allow_empty=True)
if saver is not None:
ops.add_to_collection(collection_key, saver)
return saver
@tf_export(v1=["train.Saver"])
class Saver(object):
"""Saves and restores variables.
See [Variables](https://tensorflow.org/guide/variables)
for an overview of variables, saving and restoring.
The `Saver` class adds ops to save and restore variables to and from
*checkpoints*. It also provides convenience methods to run these ops.
Checkpoints are binary files in a proprietary format which map variable names
to tensor values. The best way to examine the contents of a checkpoint is to
load it using a `Saver`.
Savers can automatically number checkpoint filenames with a provided counter.
This lets you keep multiple checkpoints at different steps while training a
model. For example you can number the checkpoint filenames with the training
step number. To avoid filling up disks, savers manage checkpoint files
automatically. For example, they can keep only the N most recent files, or
one checkpoint for every N hours of training.
You number checkpoint filenames by passing a value to the optional
`global_step` argument to `save()`:
```python
saver.save(sess, 'my-model', global_step=0) ==> filename: 'my-model-0'
...
saver.save(sess, 'my-model', global_step=1000) ==> filename: 'my-model-1000'
```
Additionally, optional arguments to the `Saver()` constructor let you control
the proliferation of checkpoint files on disk:
* `max_to_keep` indicates the maximum number of recent checkpoint files to
keep. As new files are created, older files are deleted. If None or 0,
no checkpoints are deleted from the filesystem but only the last one is
kept in the `checkpoint` file. Defaults to 5 (that is, the 5 most recent
checkpoint files are kept.)
* `keep_checkpoint_every_n_hours`: In addition to keeping the most recent
`max_to_keep` checkpoint files, you might want to keep one checkpoint file
for every N hours of training. This can be useful if you want to later
analyze how a model progressed during a long training session. For
example, passing `keep_checkpoint_every_n_hours=2` ensures that you keep
one checkpoint file for every 2 hours of training. The default value of
10,000 hours effectively disables the feature.
Note that you still have to call the `save()` method to save the model.
Passing these arguments to the constructor will not save variables
automatically for you.
A training program that saves regularly looks like:
```python
...
# Create a saver.
saver = tf.train.Saver(...variables...)
# Launch the graph and train, saving the model every 1,000 steps.
sess = tf.Session()
for step in xrange(1000000):
sess.run(..training_op..)
if step % 1000 == 0:
# Append the step number to the checkpoint name:
saver.save(sess, 'my-model', global_step=step)
```
In addition to checkpoint files, savers keep a protocol buffer on disk with
the list of recent checkpoints. This is used to manage numbered checkpoint
files and by `latest_checkpoint()`, which makes it easy to discover the path
to the most recent checkpoint. That protocol buffer is stored in a file named
'checkpoint' next to the checkpoint files.
If you create several savers, you can specify a different filename for the
protocol buffer file in the call to `save()`.
"""
def __init__(self,
var_list=None,
reshape=False,
sharded=False,
max_to_keep=5,
keep_checkpoint_every_n_hours=10000.0,
name=None,
restore_sequentially=False,
saver_def=None,
builder=None,
defer_build=False,
allow_empty=False,
write_version=saver_pb2.SaverDef.V2,
pad_step_number=False,
save_relative_paths=False,
filename=None):
"""Creates a `Saver`.
The constructor adds ops to save and restore variables.
`var_list` specifies the variables that will be saved and restored. It can
be passed as a `dict` or a list:
* A `dict` of names to variables: The keys are the names that will be
used to save or restore the variables in the checkpoint files.
* A list of variables: The variables will be keyed with their op name in
the checkpoint files.
For example:
```python
v1 = tf.Variable(..., name='v1')
v2 = tf.Variable(..., name='v2')
# Pass the variables as a dict:
saver = tf.train.Saver({'v1': v1, 'v2': v2})
# Or pass them as a list.
saver = tf.train.Saver([v1, v2])
# Passing a list is equivalent to passing a dict with the variable op names
# as keys:
saver = tf.train.Saver({v.op.name: v for v in [v1, v2]})
```
The optional `reshape` argument, if `True`, allows restoring a variable from
a save file where the variable had a different shape, but the same number
of elements and type. This is useful if you have reshaped a variable and
want to reload it from an older checkpoint.
The optional `sharded` argument, if `True`, instructs the saver to shard
checkpoints per device.
Args:
var_list: A list of `Variable`/`SaveableObject`, or a dictionary mapping
names to `SaveableObject`s. If `None`, defaults to the list of all
saveable objects.
reshape: If `True`, allows restoring parameters from a checkpoint
where the variables have a different shape.
sharded: If `True`, shard the checkpoints, one per device.
max_to_keep: Maximum number of recent checkpoints to keep.
Defaults to 5.
keep_checkpoint_every_n_hours: How often to keep checkpoints.
Defaults to 10,000 hours.
name: String. Optional name to use as a prefix when adding operations.
restore_sequentially: A `Bool`, which if true, causes restore of different
variables to happen sequentially within each device. This can lower
memory usage when restoring very large models.
saver_def: Optional `SaverDef` proto to use instead of running the
builder. This is only useful for specialty code that wants to recreate
a `Saver` object for a previously built `Graph` that had a `Saver`.
The `saver_def` proto should be the one returned by the
`as_saver_def()` call of the `Saver` that was created for that `Graph`.
builder: Optional `SaverBuilder` to use if a `saver_def` was not provided.
Defaults to `BulkSaverBuilder()`.
defer_build: If `True`, defer adding the save and restore ops to the
`build()` call. In that case `build()` should be called before
finalizing the graph or using the saver.
allow_empty: If `False` (default) raise an error if there are no
variables in the graph. Otherwise, construct the saver anyway and make
it a no-op.
write_version: controls what format to use when saving checkpoints. It
also affects certain filepath matching logic. The V2 format is the
recommended choice: it is much more optimized than V1 in terms of
memory required and latency incurred during restore. Regardless of
this flag, the Saver is able to restore from both V2 and V1 checkpoints.
pad_step_number: if True, pads the global step number in the checkpoint
filepaths to some fixed width (8 by default). This is turned off by
default.
save_relative_paths: If `True`, will write relative paths to the
checkpoint state file. This is needed if the user wants to copy the
checkpoint directory and reload from the copied directory.
filename: If known at graph construction time, filename used for variable
loading/saving.
Raises:
TypeError: If `var_list` is invalid.
ValueError: If any of the keys or values in `var_list` are not unique.
RuntimeError: If eager execution is enabled and`var_list` does not specify
a list of varialbes to save.
@compatibility(eager)
When eager execution is enabled, `var_list` must specify a `list` or `dict`
of variables to save. Otherwise, a `RuntimeError` will be raised.
@end_compatibility
"""
if defer_build and var_list:
raise ValueError(
"If `var_list` is provided then build cannot be deferred. "
"Either set defer_build=False or var_list=None.")
if context.executing_eagerly() and var_list is None:
raise RuntimeError(
"When eager execution is enabled, `var_list` must specify a list or "
"dict of variables to save")
self._var_list = var_list
self._reshape = reshape
self._sharded = sharded
self._max_to_keep = max_to_keep
self._keep_checkpoint_every_n_hours = keep_checkpoint_every_n_hours
self._name = name
self._restore_sequentially = restore_sequentially
self.saver_def = saver_def
self._builder = builder
self._is_built = False
self._allow_empty = allow_empty
self._is_empty = None
self._write_version = write_version
self._pad_step_number = pad_step_number
self._filename = filename
self._last_checkpoints = []
self._checkpoints_to_be_deleted = []
if context.executing_eagerly():
self._next_checkpoint_time = (
time.time() + self._keep_checkpoint_every_n_hours * 3600)
elif not defer_build:
self.build()
if self.saver_def:
self._check_saver_def()
self._write_version = self.saver_def.version
self._save_relative_paths = save_relative_paths
# For compatibility with object-based checkpoints, we may build a second
# Saver to read the renamed keys.
self._object_restore_saver = None
def build(self):
if context.executing_eagerly():
raise RuntimeError("Use save/restore instead of build in eager mode.")
self._build(self._filename, build_save=True, build_restore=True)
def _build_eager(self, checkpoint_path, build_save, build_restore):
self._build(
checkpoint_path, build_save=build_save, build_restore=build_restore)
def _build(self, checkpoint_path, build_save, build_restore):
"""Builds saver_def."""
if not context.executing_eagerly():
if self._is_built:
return
self._is_built = True
if not self.saver_def or context.executing_eagerly():
if self._builder is None:
self._builder = BulkSaverBuilder(self._write_version)
if self._var_list is None:
# pylint: disable=protected-access
self._var_list = variables._all_saveable_objects()
if not self._var_list:
if self._allow_empty:
self._is_empty = True
return
else:
raise ValueError("No variables to save")
self._is_empty = False
self.saver_def = self._builder._build_internal( # pylint: disable=protected-access
self._var_list,
reshape=self._reshape,
sharded=self._sharded,
max_to_keep=self._max_to_keep,
keep_checkpoint_every_n_hours=self._keep_checkpoint_every_n_hours,
name=self._name,
restore_sequentially=self._restore_sequentially,
filename=checkpoint_path,
build_save=build_save, build_restore=build_restore)
elif self.saver_def and self._name:
# Since self._name is used as a name_scope by builder(), we are
# overloading the use of this field to represent the "import_scope" as
# well.
self.saver_def.filename_tensor_name = ops.prepend_name_scope(
self.saver_def.filename_tensor_name, self._name)
self.saver_def.save_tensor_name = ops.prepend_name_scope(
self.saver_def.save_tensor_name, self._name)
self.saver_def.restore_op_name = ops.prepend_name_scope(
self.saver_def.restore_op_name, self._name)
self._check_saver_def()
if not context.executing_eagerly():
# Updates next checkpoint time.
# Set in __init__ when executing eagerly.
self._next_checkpoint_time = (
time.time() + self.saver_def.keep_checkpoint_every_n_hours * 3600)
def _check_saver_def(self):
if not isinstance(self.saver_def, saver_pb2.SaverDef):
raise ValueError("saver_def must be a saver_pb2.SaverDef: %s" %
self.saver_def)
if not context.executing_eagerly():
if not self.saver_def.save_tensor_name:
raise ValueError("saver_def must specify the save_tensor_name: %s" %
str(self.saver_def))
if not self.saver_def.restore_op_name:
raise ValueError("saver_def must specify the restore_op_name: %s" %
str(self.saver_def))
def _CheckpointFilename(self, p):
"""Returns the checkpoint filename given a `(filename, time)` pair.
Args:
p: (filename, time) pair.
Returns:
Checkpoint file name.
"""
name, _ = p
return name
def _RecordLastCheckpoint(self, latest_save_path):
"""Manages the list of the latest checkpoints."""
if not self.saver_def.max_to_keep:
return
# Remove first from list if the same name was used before.
for p in self._last_checkpoints:
if latest_save_path == self._CheckpointFilename(p):
self._last_checkpoints.remove(p)
# Append new path to list
self._last_checkpoints.append((latest_save_path, time.time()))
# If more than max_to_keep, remove oldest.
if len(self._last_checkpoints) > self.saver_def.max_to_keep:
self._checkpoints_to_be_deleted.append(self._last_checkpoints.pop(0))
def _MaybeDeleteOldCheckpoints(self, meta_graph_suffix="meta"):
"""Deletes old checkpoints if necessary.
`self._checkpoints_to_be_deleted` is going to contain checkpoints that are
over `max_to_keep`. They are going to be deleted. If
`keep_checkpoint_every_n_hours` was specified, keep an additional checkpoint
every `N` hours. For example, if `N` is 0.5, an additional checkpoint is
kept for every 0.5 hours of training; if `N` is 10, an additional
checkpoint is kept for every 10 hours of training.
Args:
meta_graph_suffix: Suffix for `MetaGraphDef` file. Defaults to 'meta'.
"""
if self._checkpoints_to_be_deleted:
p = self._checkpoints_to_be_deleted.pop(0)
# Do not delete the file if we keep_checkpoint_every_n_hours is set and we
# have reached N hours of training.
should_keep = p[1] > self._next_checkpoint_time
if should_keep:
self._next_checkpoint_time += (
self.saver_def.keep_checkpoint_every_n_hours * 3600)
return
# Otherwise delete the files.
try:
checkpoint_management.remove_checkpoint(
self._CheckpointFilename(p), self.saver_def.version,
meta_graph_suffix)
except Exception as e: # pylint: disable=broad-except
logging.warning("Ignoring: %s", str(e))
def as_saver_def(self):
"""Generates a `SaverDef` representation of this saver.
Returns:
A `SaverDef` proto.
"""
return self.saver_def
def to_proto(self, export_scope=None):
"""Converts this `Saver` to a `SaverDef` protocol buffer.
Args:
export_scope: Optional `string`. Name scope to remove.
Returns:
A `SaverDef` protocol buffer.
"""
if export_scope is None:
return self.saver_def
if not (self.saver_def.filename_tensor_name.startswith(export_scope) and
self.saver_def.save_tensor_name.startswith(export_scope) and
self.saver_def.restore_op_name.startswith(export_scope)):
return None
saver_def = saver_pb2.SaverDef()
saver_def.CopyFrom(self.saver_def)
saver_def.filename_tensor_name = ops.strip_name_scope(
saver_def.filename_tensor_name, export_scope)
saver_def.save_tensor_name = ops.strip_name_scope(
saver_def.save_tensor_name, export_scope)
saver_def.restore_op_name = ops.strip_name_scope(
saver_def.restore_op_name, export_scope)
return saver_def
@staticmethod
def from_proto(saver_def, import_scope=None):
"""Returns a `Saver` object created from `saver_def`.
Args:
saver_def: a `SaverDef` protocol buffer.
import_scope: Optional `string`. Name scope to use.
Returns:
A `Saver` built from saver_def.
"""
return Saver(saver_def=saver_def, name=import_scope)
@property
def last_checkpoints(self):
"""List of not-yet-deleted checkpoint filenames.
You can pass any of the returned values to `restore()`.
Returns:
A list of checkpoint filenames, sorted from oldest to newest.
"""
return list(self._CheckpointFilename(p) for p in self._last_checkpoints)
def set_last_checkpoints(self, last_checkpoints):
"""DEPRECATED: Use set_last_checkpoints_with_time.
Sets the list of old checkpoint filenames.
Args:
last_checkpoints: A list of checkpoint filenames.
Raises:
AssertionError: If last_checkpoints is not a list.
"""
assert isinstance(last_checkpoints, list)
# We use a timestamp of +inf so that this checkpoint will never be
# deleted. This is both safe and backwards compatible to a previous
# version of the code which used s[1] as the "timestamp".
self._last_checkpoints = [(s, np.inf) for s in last_checkpoints]
def set_last_checkpoints_with_time(self, last_checkpoints_with_time):
"""Sets the list of old checkpoint filenames and timestamps.
Args:
last_checkpoints_with_time: A list of tuples of checkpoint filenames and
timestamps.
Raises:
AssertionError: If last_checkpoints_with_time is not a list.
"""
assert isinstance(last_checkpoints_with_time, list)
self._last_checkpoints = last_checkpoints_with_time
def recover_last_checkpoints(self, checkpoint_paths):
"""Recovers the internal saver state after a crash.
This method is useful for recovering the "self._last_checkpoints" state.
Globs for the checkpoints pointed to by `checkpoint_paths`. If the files
exist, use their mtime as the checkpoint timestamp.
Args:
checkpoint_paths: a list of checkpoint paths.
"""
mtimes = checkpoint_management.get_checkpoint_mtimes(checkpoint_paths)
self.set_last_checkpoints_with_time(list(zip(checkpoint_paths, mtimes)))
def save(self,
sess,
save_path,
global_step=None,
latest_filename=None,
meta_graph_suffix="meta",
write_meta_graph=True,
write_state=True,
strip_default_attrs=False):
# pylint: disable=line-too-long
"""Saves variables.
This method runs the ops added by the constructor for saving variables.
It requires a session in which the graph was launched. The variables to
save must also have been initialized.
The method returns the path prefix of the newly created checkpoint files.
This string can be passed directly to a call to `restore()`.
Args:
sess: A Session to use to save the variables.
save_path: String. Prefix of filenames created for the checkpoint.
global_step: If provided the global step number is appended to
`save_path` to create the checkpoint filenames. The optional argument
can be a `Tensor`, a `Tensor` name or an integer.
latest_filename: Optional name for the protocol buffer file that will
contains the list of most recent checkpoints. That file,
kept in the same directory as the checkpoint files, is automatically
managed by the saver to keep track of recent checkpoints. Defaults to
'checkpoint'.
meta_graph_suffix: Suffix for `MetaGraphDef` file. Defaults to 'meta'.
write_meta_graph: `Boolean` indicating whether or not to write the meta
graph file.
write_state: `Boolean` indicating whether or not to write the
`CheckpointStateProto`.
strip_default_attrs: Boolean. If `True`, default-valued attributes will be
removed from the NodeDefs. For a detailed guide, see
[Stripping Default-Valued Attributes](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md#stripping-default-valued-attributes).
Returns:
A string: path prefix used for the checkpoint files. If the saver is
sharded, this string ends with: '-?????-of-nnnnn' where 'nnnnn'
is the number of shards created.
If the saver is empty, returns None.
Raises:
TypeError: If `sess` is not a `Session`.
ValueError: If `latest_filename` contains path components, or if it
collides with `save_path`.
RuntimeError: If save and restore ops weren't built.
"""
# pylint: enable=line-too-long
if not self._is_built and not context.executing_eagerly():
raise RuntimeError(
"`build()` should be called before save if defer_build==True")
if latest_filename is None:
latest_filename = "checkpoint"
if self._write_version != saver_pb2.SaverDef.V2:
logging.warning("*******************************************************")
logging.warning("TensorFlow's V1 checkpoint format has been deprecated.")
logging.warning("Consider switching to the more efficient V2 format:")
logging.warning(" `tf.train.Saver(write_version=tf.train.SaverDef.V2)`")
logging.warning("now on by default.")
logging.warning("*******************************************************")
if os.path.split(latest_filename)[0]:
raise ValueError("'latest_filename' must not contain path components")
if global_step is not None:
if not isinstance(global_step, compat.integral_types):
global_step = training_util.global_step(sess, global_step)
checkpoint_file = "%s-%d" % (save_path, global_step)
if self._pad_step_number:
# Zero-pads the step numbers, so that they are sorted when listed.
checkpoint_file = "%s-%s" % (save_path, "{:08d}".format(global_step))
else:
checkpoint_file = save_path
if os.path.basename(
save_path) == latest_filename and not self._sharded:
# Guard against collision between data file and checkpoint state file.
raise ValueError(
"'latest_filename' collides with 'save_path': '%s' and '%s'" %
(latest_filename, save_path))
if (not context.executing_eagerly() and
not isinstance(sess, session.SessionInterface)):
raise TypeError("'sess' must be a Session; %s" % sess)
save_path_parent = os.path.dirname(save_path)
if not self._is_empty:
try:
if context.executing_eagerly():
self._build_eager(
checkpoint_file, build_save=True, build_restore=False)
model_checkpoint_path = self.saver_def.save_tensor_name
else:
model_checkpoint_path = sess.run(
self.saver_def.save_tensor_name,
{self.saver_def.filename_tensor_name: checkpoint_file})
model_checkpoint_path = compat.as_str(model_checkpoint_path)
if write_state:
self._RecordLastCheckpoint(model_checkpoint_path)
checkpoint_management.update_checkpoint_state_internal(
save_dir=save_path_parent,
model_checkpoint_path=model_checkpoint_path,
all_model_checkpoint_paths=self.last_checkpoints,
latest_filename=latest_filename,
save_relative_paths=self._save_relative_paths)
self._MaybeDeleteOldCheckpoints(meta_graph_suffix=meta_graph_suffix)
except (errors.FailedPreconditionError, errors.NotFoundError) as exc:
if not gfile.IsDirectory(save_path_parent):
exc = ValueError(
"Parent directory of {} doesn't exist, can't save.".format(
save_path))
raise exc
if write_meta_graph:
meta_graph_filename = checkpoint_management.meta_graph_filename(
checkpoint_file, meta_graph_suffix=meta_graph_suffix)
if not context.executing_eagerly():
with sess.graph.as_default():
self.export_meta_graph(
meta_graph_filename, strip_default_attrs=strip_default_attrs)
if self._is_empty:
return None
else:
return model_checkpoint_path
def export_meta_graph(self,
filename=None,
collection_list=None,
as_text=False,
export_scope=None,
clear_devices=False,
clear_extraneous_savers=False,
strip_default_attrs=False):
# pylint: disable=line-too-long
"""Writes `MetaGraphDef` to save_path/filename.
Args:
filename: Optional meta_graph filename including the path.
collection_list: List of string keys to collect.
as_text: If `True`, writes the meta_graph as an ASCII proto.
export_scope: Optional `string`. Name scope to remove.
clear_devices: Whether or not to clear the device field for an `Operation`
or `Tensor` during export.
clear_extraneous_savers: Remove any Saver-related information from the
graph (both Save/Restore ops and SaverDefs) that are not associated
with this Saver.
strip_default_attrs: Boolean. If `True`, default-valued attributes will be
removed from the NodeDefs. For a detailed guide, see
[Stripping Default-Valued Attributes](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md#stripping-default-valued-attributes).
Returns:
A `MetaGraphDef` proto.
"""
# pylint: enable=line-too-long
return export_meta_graph(
filename=filename,
graph_def=ops.get_default_graph().as_graph_def(add_shapes=True),
saver_def=self.saver_def,
collection_list=collection_list,
as_text=as_text,
export_scope=export_scope,
clear_devices=clear_devices,
clear_extraneous_savers=clear_extraneous_savers,
strip_default_attrs=strip_default_attrs)
def restore(self, sess, save_path):
"""Restores previously saved variables.
This method runs the ops added by the constructor for restoring variables.
It requires a session in which the graph was launched. The variables to
restore do not have to have been initialized, as restoring is itself a way
to initialize variables.
The `save_path` argument is typically a value previously returned from a
`save()` call, or a call to `latest_checkpoint()`.
Args:
sess: A `Session` to use to restore the parameters. None in eager mode.
save_path: Path where parameters were previously saved.
Raises:
ValueError: If save_path is None or not a valid checkpoint.
"""
if self._is_empty:
return
if save_path is None:
raise ValueError("Can't load save_path when it is None.")
if not checkpoint_management.checkpoint_exists(compat.as_text(save_path)):
raise ValueError("The passed save_path is not a valid checkpoint: "
+ compat.as_text(save_path))
logging.info("Restoring parameters from %s", compat.as_text(save_path))
try:
if context.executing_eagerly():
self._build_eager(save_path, build_save=False, build_restore=True)
else:
sess.run(self.saver_def.restore_op_name,
{self.saver_def.filename_tensor_name: save_path})
except errors.NotFoundError as err:
# There are three common conditions that might cause this error:
# 0. The file is missing. We ignore here, as this is checked above.
# 1. This is an object-based checkpoint trying name-based loading.
# 2. The graph has been altered and a variable or other name is missing.
# 1. The checkpoint would not be loaded successfully as is. Try to parse
# it as an object-based checkpoint.
try:
names_to_keys = object_graph_key_mapping(save_path)
except errors.NotFoundError:
# 2. This is not an object-based checkpoint, which likely means there
# is a graph mismatch. Re-raise the original error with
# a helpful message (b/110263146)
raise _wrap_restore_error_with_msg(
err, "a Variable name or other graph key that is missing")
# This is an object-based checkpoint. We'll print a warning and then do
# the restore.
logging.warning(
"Restoring an object-based checkpoint using a name-based saver. This "
"may be somewhat fragile, and will re-build the Saver. Instead, "
"consider loading object-based checkpoints using "
"tf.train.Checkpoint().")
self._object_restore_saver = saver_from_object_based_checkpoint(
checkpoint_path=save_path,
var_list=self._var_list,
builder=self._builder,
names_to_keys=names_to_keys,
cached_saver=self._object_restore_saver)
self._object_restore_saver.restore(sess=sess, save_path=save_path)
except errors.InvalidArgumentError as err:
# There is a mismatch between the graph and the checkpoint being loaded.
# We add a more reasonable error message here to help users (b/110263146)
raise _wrap_restore_error_with_msg(
err, "a mismatch between the current graph and the graph")
@staticmethod
def _add_collection_def(meta_graph_def, key, export_scope=None):
"""Adds a collection to MetaGraphDef protocol buffer.
Args:
meta_graph_def: MetaGraphDef protocol buffer.
key: One of the GraphKeys or user-defined string.
export_scope: Optional `string`. Name scope to remove.
"""
meta_graph.add_collection_def(meta_graph_def, key,
export_scope=export_scope)
@tf_export(v1=["train.import_meta_graph"])
def import_meta_graph(meta_graph_or_file, clear_devices=False,
import_scope=None, **kwargs):
"""Recreates a Graph saved in a `MetaGraphDef` proto.
This function takes a `MetaGraphDef` protocol buffer as input. If
the argument is a file containing a `MetaGraphDef` protocol buffer ,
it constructs a protocol buffer from the file content. The function
then adds all the nodes from the `graph_def` field to the
current graph, recreates all the collections, and returns a saver
constructed from the `saver_def` field.
In combination with `export_meta_graph()`, this function can be used to
* Serialize a graph along with other Python objects such as `QueueRunner`,
`Variable` into a `MetaGraphDef`.
* Restart training from a saved graph and checkpoints.
* Run inference from a saved graph and checkpoints.
```Python
...
# Create a saver.
saver = tf.train.Saver(...variables...)
# Remember the training_op we want to run by adding it to a collection.
tf.add_to_collection('train_op', train_op)
sess = tf.Session()
for step in xrange(1000000):
sess.run(train_op)
if step % 1000 == 0:
# Saves checkpoint, which by default also exports a meta_graph
# named 'my-model-global_step.meta'.
saver.save(sess, 'my-model', global_step=step)
```
Later we can continue training from this saved `meta_graph` without building
the model from scratch.
```Python
with tf.Session() as sess:
new_saver = tf.train.import_meta_graph('my-save-dir/my-model-10000.meta')
new_saver.restore(sess, 'my-save-dir/my-model-10000')
# tf.get_collection() returns a list. In this example we only want the
# first one.
train_op = tf.get_collection('train_op')[0]
for step in xrange(1000000):
sess.run(train_op)
```
NOTE: Restarting training from saved `meta_graph` only works if the
device assignments have not changed.
Args:
meta_graph_or_file: `MetaGraphDef` protocol buffer or filename (including
the path) containing a `MetaGraphDef`.
clear_devices: Whether or not to clear the device field for an `Operation`
or `Tensor` during import.
import_scope: Optional `string`. Name scope to add. Only used when
initializing from protocol buffer.
**kwargs: Optional keyed arguments.
Returns:
A saver constructed from `saver_def` in `MetaGraphDef` or None.
A None value is returned if no variables exist in the `MetaGraphDef`
(i.e., there are no variables to restore).
Raises:
RuntimeError: If called with eager execution enabled.
@compatibility(eager)
Exporting/importing meta graphs is not supported. No graph exists when eager
execution is enabled.
@end_compatibility
""" # pylint: disable=g-doc-exception
return _import_meta_graph_with_return_elements(
meta_graph_or_file, clear_devices, import_scope, **kwargs)[0]
def _import_meta_graph_with_return_elements(
meta_graph_or_file, clear_devices=False, import_scope=None,
return_elements=None, **kwargs):
"""Import MetaGraph, and return both a saver and returned elements."""
if context.executing_eagerly():
raise RuntimeError("Exporting/importing meta graphs is not supported when "
"eager execution is enabled. No graph exists when eager "
"execution is enabled.")
if not isinstance(meta_graph_or_file, meta_graph_pb2.MetaGraphDef):
meta_graph_def = meta_graph.read_meta_graph_file(meta_graph_or_file)
else:
meta_graph_def = meta_graph_or_file
imported_vars, imported_return_elements = (
meta_graph.import_scoped_meta_graph_with_return_elements(
meta_graph_def,
clear_devices=clear_devices,
import_scope=import_scope,
return_elements=return_elements,
**kwargs))
saver = _create_saver_from_imported_meta_graph(
meta_graph_def, import_scope, imported_vars)
return saver, imported_return_elements
def _create_saver_from_imported_meta_graph(
meta_graph_def, import_scope, imported_vars):
"""Return a saver for restoring variable values to an imported MetaGraph."""
if meta_graph_def.HasField("saver_def"):
# Infer the scope that is prepended by `import_scoped_meta_graph`.
scope = import_scope
var_names = list(imported_vars.keys())
if var_names:
sample_key = var_names[0]
sample_var = imported_vars[sample_key]
scope = sample_var.name[:-len(sample_key)]
return Saver(saver_def=meta_graph_def.saver_def, name=scope)
else:
if variables._all_saveable_objects(scope=import_scope): # pylint: disable=protected-access
# Return the default saver instance for all graph variables.
return Saver()
else:
# If no graph variables exist, then a Saver cannot be constructed.
logging.info("Saver not created because there are no variables in the"
" graph to restore")
return None
@tf_export(v1=["train.export_meta_graph"])
def export_meta_graph(filename=None,
meta_info_def=None,
graph_def=None,
saver_def=None,
collection_list=None,
as_text=False,
graph=None,
export_scope=None,
clear_devices=False,
clear_extraneous_savers=False,
strip_default_attrs=False,
**kwargs):
# pylint: disable=line-too-long
"""Returns `MetaGraphDef` proto. Optionally writes it to filename.
This function exports the graph, saver, and collection objects into
`MetaGraphDef` protocol buffer with the intention of it being imported
at a later time or location to restart training, run inference, or be
a subgraph.
Args:
filename: Optional filename including the path for writing the
generated `MetaGraphDef` protocol buffer.
meta_info_def: `MetaInfoDef` protocol buffer.
graph_def: `GraphDef` protocol buffer.
saver_def: `SaverDef` protocol buffer.
collection_list: List of string keys to collect.
as_text: If `True`, writes the `MetaGraphDef` as an ASCII proto.
graph: The `Graph` to export. If `None`, use the default graph.
export_scope: Optional `string`. Name scope under which to extract
the subgraph. The scope name will be striped from the node definitions
for easy import later into new name scopes. If `None`, the whole graph
is exported. graph_def and export_scope cannot both be specified.
clear_devices: Whether or not to clear the device field for an `Operation`
or `Tensor` during export.
clear_extraneous_savers: Remove any Saver-related information from the
graph (both Save/Restore ops and SaverDefs) that are not associated
with the provided SaverDef.
strip_default_attrs: Boolean. If `True`, default-valued attributes will be
removed from the NodeDefs. For a detailed guide, see
[Stripping Default-Valued Attributes](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md#stripping-default-valued-attributes).
**kwargs: Optional keyed arguments.
Returns:
A `MetaGraphDef` proto.
Raises:
ValueError: When the `GraphDef` is larger than 2GB.
RuntimeError: If called with eager execution enabled.
@compatibility(eager)
Exporting/importing meta graphs is not supported. No graph exists when eager
execution is enabled.
@end_compatibility
"""
# pylint: enable=line-too-long
if context.executing_eagerly():
raise RuntimeError("Exporting/importing meta graphs is not supported when "
"eager execution is enabled. No graph exists when eager "
"execution is enabled.")
meta_graph_def, _ = meta_graph.export_scoped_meta_graph(
filename=filename,
meta_info_def=meta_info_def,
graph_def=graph_def,
saver_def=saver_def,
collection_list=collection_list,
as_text=as_text,
graph=graph,
export_scope=export_scope,
clear_devices=clear_devices,
clear_extraneous_savers=clear_extraneous_savers,
strip_default_attrs=strip_default_attrs,
**kwargs)
return meta_graph_def
def _wrap_restore_error_with_msg(err, extra_verbiage):
err_msg = ("Restoring from checkpoint failed. This is most likely "
"due to {} from the checkpoint. Please ensure that you "
"have not altered the graph expected based on the checkpoint. "
"Original error:\n\n{}").format(extra_verbiage, err.message)
return err.__class__(err.node_def, err.op, err_msg)
ops.register_proto_function(
ops.GraphKeys.SAVERS,
proto_type=saver_pb2.SaverDef,
to_proto=Saver.to_proto,
from_proto=Saver.from_proto)
def object_graph_key_mapping(checkpoint_path):
"""Return name to key mappings from the checkpoint.
Args:
checkpoint_path: string, path to object-based checkpoint
Returns:
Dictionary mapping tensor names to checkpoint keys.
"""
reader = pywrap_tensorflow.NewCheckpointReader(checkpoint_path)
object_graph_string = reader.get_tensor(
checkpointable.OBJECT_GRAPH_PROTO_KEY)
object_graph_proto = (
checkpointable_object_graph_pb2.CheckpointableObjectGraph())
object_graph_proto.ParseFromString(object_graph_string)
names_to_keys = {}
for node in object_graph_proto.nodes:
for attribute in node.attributes:
names_to_keys[attribute.full_name] = attribute.checkpoint_key
return names_to_keys
def saver_from_object_based_checkpoint(
checkpoint_path, var_list=None, builder=None, names_to_keys=None,
cached_saver=None):
"""Return a `Saver` which reads from an object-based checkpoint.
This function validates that all variables in the variables list are remapped
in the object-based checkpoint (or `names_to_keys` dict if provided). A
saver will be created with the list of remapped variables.
The `cached_saver` argument allows the user to pass in a previously created
saver, so multiple `saver.restore()` calls don't pollute the graph when graph
building. This assumes that keys are consistent, meaning that the
1) `checkpoint_path` checkpoint, and
2) checkpoint used to create the `cached_saver`
are the same type of object-based checkpoint. If this argument is set, this
function will simply validate that all variables have been remapped by the
checkpoint at `checkpoint_path`.
Note that in general, `tf.train.Checkpoint` should be used to restore/save an
object-based checkpoint.
Args:
checkpoint_path: string, path to object-based checkpoint
var_list: list of `Variables` that appear in the checkpoint. If `None`,
`var_list` will be set to all saveable objects.
builder: a `BaseSaverBuilder` instance. If `None`, a new `BulkSaverBuilder`
will be created.
names_to_keys: dict mapping string tensor names to checkpooint keys. If
`None`, this dict will be generated from the checkpoint file.
cached_saver: Cached `Saver` object with remapped variables.
Returns:
`Saver` with remapped variables for reading from an object-based checkpoint.
Raises:
ValueError if the checkpoint provided is not an object-based checkpoint.
NotFoundError: If one of the variables in `var_list` can not be found in the
checkpoint. This could mean the checkpoint or `names_to_keys` mapping is
missing the variable.
"""
if names_to_keys is None:
try:
names_to_keys = object_graph_key_mapping(checkpoint_path)
except errors.NotFoundError:
raise ValueError("Checkpoint in %s not an object-based checkpoint."
% checkpoint_path)
if var_list is None:
var_list = variables._all_saveable_objects() # pylint: disable=protected-access
if builder is None:
builder = BulkSaverBuilder()
saveables = builder._ValidateAndSliceInputs(var_list) # pylint: disable=protected-access
for saveable in saveables:
for spec in saveable.specs:
if spec.name not in names_to_keys:
raise errors.NotFoundError(
None, None,
message=("Attempting to load an object-based checkpoint using "
"variable names, but could not find %s in the "
"checkpoint.") % spec.name)
spec.name = names_to_keys[spec.name]
if cached_saver is None:
return Saver(saveables)
return cached_saver
| 41.142559 | 176 | 0.68678 |
1dedc40800f0ee04f2464be09dd0c7c32f0e669f | 2,741 | py | Python | Temporal Difference/taki_v2/taxi-v2.py | tahmidbintaslim/deep-reinforcement-learning | ed817b463b9742b1d9c8d7eca5735b1f6e9b9beb | [
"MIT"
] | 7 | 2020-02-13T19:52:32.000Z | 2021-12-04T08:01:43.000Z | Temporal Difference/taki_v2/taxi-v2.py | sourcecode369/Deep-RL | ed817b463b9742b1d9c8d7eca5735b1f6e9b9beb | [
"MIT"
] | null | null | null | Temporal Difference/taki_v2/taxi-v2.py | sourcecode369/Deep-RL | ed817b463b9742b1d9c8d7eca5735b1f6e9b9beb | [
"MIT"
] | 2 | 2020-04-22T01:58:02.000Z | 2020-06-12T02:18:43.000Z | import gym
import sys
import numpy as np
import warnings
warnings.filterwarnings("ignore")
from collections import defaultdict, deque
import random
import math
env = gym.make('Taxi-v2')
def epsilon_greedy(Q,state,nA,epsilon):
if random.random()>epsilon:
return np.argmax(Q[state])
else:
return np.random.choice(env.action_space.n)
def update_q(env,Q,nA,alpha,gamma,state,action,reward,next_state=None):
current = Q[state][action]
Qsa_next = np.max(Q[next_state]) if next_state is not None else 0
target = (reward + (gamma*Qsa_next))
new_value = current + alpha * (target - current)
return new_value
def q_learning(env,num_episodes,alpha,gamma,plot_every=100):
nA = env.action_space.n
Q = defaultdict(lambda: np.zeros(nA))
tmp_score = deque(maxlen=plot_every)
avg_score = deque(maxlen=num_episodes)
best_avg_reward = -math.inf
epsilon = 0.005
for i_episode in range(1,num_episodes+1):
state = env.reset()
score = 0
while True:
action = epsilon_greedy(Q,state,nA,epsilon)
next_state, reward, done, info = env.step(action)
score += reward
Q[state][action] = update_q(env,Q,nA,alpha,gamma,state,action,reward,next_state)
state = next_state
if done:
tmp_score.append(score)
break
# if(i_episode%100)==0:
# avg_score.append(np.mean(tmp_score))
if (i_episode >= 100):
avg_reward = np.mean(tmp_score)
avg_score.append(avg_reward)
if avg_reward > best_avg_reward:
best_avg_reward = avg_reward
print("\rEpisode {}/{} || Best average reward {}".format(i_episode, num_episodes, best_avg_reward), end="")
sys.stdout.flush()
if best_avg_reward >= 9.7:
print('\nEnvironment solved in {} episodes.'.format(i_episode), end="")
break
if i_episode == num_episodes: print('\n')
return Q, avg_score, best_avg_reward
def play_taxiv2(env,Q,num_episodes):
rewards = []
for i_episode in range(num_episodes):
state = env.reset()
total_rewards = 0
while True:
env.render()
action = np.argmax(Q[state])
next_state, reward, done, info = env.step(action)
total_rewards += reward
if done:
rewards.append(total_rewards)
print("Score: ",total_rewards)
break
state= next_state
env.close()
print("Score over time: ",sum(rewards)/num_episodes)
Q, avg_reward, best_reward = q_learning(env,10000,0.2,1)
play_taxiv2(env,Q,1000)
| 31.872093 | 115 | 0.608172 |
63f77624f8570502f2bdc62a7d639e91eb01d161 | 18,235 | py | Python | mars/worker/quota.py | sighingnow/mars | c7897fbd144d230fff5edabc1494fb3ff44aa0d2 | [
"Apache-2.0"
] | null | null | null | mars/worker/quota.py | sighingnow/mars | c7897fbd144d230fff5edabc1494fb3ff44aa0d2 | [
"Apache-2.0"
] | null | null | null | mars/worker/quota.py | sighingnow/mars | c7897fbd144d230fff5edabc1494fb3ff44aa0d2 | [
"Apache-2.0"
] | null | null | null | # Copyright 1999-2018 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import logging
import sys
import time
from collections import namedtuple
from .. import resource, promise
from ..compat import OrderedDict3
from ..utils import log_unhandled
from .utils import WorkerActor
logger = logging.getLogger(__name__)
QuotaDumpType = namedtuple('QuotaDumpType', 'allocations requests proc_sizes hold_sizes')
class QuotaActor(WorkerActor):
"""
Actor handling quota request and assignment
"""
def __init__(self, total_size):
super(QuotaActor, self).__init__()
self._status_ref = None
self._requests = OrderedDict3()
self._total_size = total_size
self._allocations = dict()
self._allocated_size = 0
self._proc_sizes = dict()
self._total_proc = 0
self._hold_sizes = dict()
self._total_hold = 0
def post_create(self):
from .status import StatusActor
super(QuotaActor, self).post_create()
status_ref = self.ctx.actor_ref(StatusActor.default_uid())
if self.ctx.has_actor(status_ref):
self._status_ref = status_ref
def _has_space(self, delta):
return self._allocated_size + delta <= self._total_size
def _log_allocate(self, msg, *args, **kwargs):
args += (self._allocated_size, self._total_size)
logger.debug(msg + ' Allocated: %s, Total size: %s', *args, **kwargs)
@promise.reject_on_exception
@log_unhandled
def request_batch_quota(self, batch, callback=None):
"""
Request for resources in a batch
:param batch: the request dict in form {request_key: request_size, ...}
:param callback: promise callback
:return: if request is returned immediately, return True, otherwise False
"""
all_allocated = True
# check if the request is already allocated
for key, size in batch.items():
if key not in self._allocations or size > self._allocations.get(key):
all_allocated = False
break
# if all requested and allocation can still be applied, apply directly
if all_allocated and self._has_space(0):
self._log_allocate('Quota request %r already allocated.', batch)
if callback is not None:
self.tell_promise(callback)
return True
self._log_allocate('Receive batch quota request %r on %s.', batch, self.uid)
sorted_req = sorted(batch.items(), key=lambda tp: tp[0])
keys = tuple(tp[0] for tp in sorted_req)
values = tuple(tp[1] for tp in sorted_req)
delta = sum(v - self._allocations.get(k, 0) for k, v in batch.items())
# make allocated requests the highest priority to be allocated
return self._request_quota(keys, values, delta, callback, multiple=True,
make_first=all_allocated)
@promise.reject_on_exception
@log_unhandled
def request_quota(self, key, quota_size, callback=None):
"""
Request for resource
:param key: request key
:param quota_size: size of request quota
:param callback: promise callback
:return: if request is returned immediately, return True, otherwise False
"""
self._log_allocate('Receive quota request for key %s on %s.', key, self.uid)
quota_size = int(quota_size)
make_first = False
# check if the request is already allocated
if key in self._allocations:
old_size = self._allocations[key]
# if all requested and allocation can still be applied, apply directly
if old_size >= quota_size and self._has_space(0):
if callback is not None:
self.tell_promise(callback)
return True
else:
# make allocated requests the highest priority to be allocated
make_first = True
else:
old_size = 0
return self._request_quota(key, quota_size, quota_size - old_size, callback,
make_first=make_first)
def _request_quota(self, keys, quota_sizes, delta, callback, multiple=False,
make_first=False):
"""
Actually process requests
:param keys: request keys
:param quota_sizes: request sizes
:param delta: increase of allocate size
:param callback: promise callback
:param make_first: whether to move request keys to the highest priority
:return: if request is returned immediately, return True, otherwise False
"""
if delta > self._total_size:
raise ValueError('Cannot allocate size larger than the total capacity.')
if keys in self._requests:
# already in request queue, store callback and quit
if callback is not None:
self._requests[keys][-1].append(callback)
if make_first:
self._requests.move_to_end(keys, False)
return False
if self._has_space(delta):
if not self._requests:
# if no previous requests, we can apply directly
allocated = True
self._log_allocate('Quota request met for key %r on %s.', keys, self.uid)
alter_allocation = self.alter_allocations if multiple else self.alter_allocation
alter_allocation(keys, quota_sizes, allocate=True)
if callback:
self.tell_promise(callback)
else:
# otherwise, previous requests are satisfied first
allocated = False
self._log_allocate('Quota request queued for key %r on %s.', keys, self.uid)
self._enqueue_request(keys, (quota_sizes, delta, time.time(), multiple, []),
callback=callback, make_first=make_first)
self._process_requests()
return allocated
else:
# current free space cannot satisfy the request, the request is queued
self._log_allocate('Quota request unmet for key %r on %s.', keys, self.uid)
self._enqueue_request(keys, (quota_sizes, delta, time.time(), multiple, []),
callback=callback, make_first=make_first)
return False
def _enqueue_request(self, keys, items, callback=None, make_first=False):
if keys not in self._requests:
self._requests[keys] = items
if callback is not None:
self._requests[keys][-1].append(callback)
if make_first:
self._requests.move_to_end(keys, False)
@log_unhandled
def cancel_requests(self, keys, reject_exc=None):
"""
Cancel a request if it is not assigned
:param keys: request keys
:param reject_exc: the exception to pass to the original callbacks
"""
# normalize key as sorted tuple
keys = tuple(sorted(keys))
# clean up requests from request_batch_quota() whose key is a tuple
keys = keys + (keys,)
for k in keys:
try:
if reject_exc:
for cb in self._requests[k][-1]:
self.tell_promise(cb, *reject_exc, **dict(_accept=False))
del self._requests[k]
logger.debug('Quota request %s cancelled', k)
except KeyError:
pass
self._process_requests()
@log_unhandled
def process_quota(self, key):
"""
Mark request quota as being processed
:param key: request key
"""
if key not in self._allocations:
return
alloc_size = self._allocations[key]
self._total_proc += alloc_size - self._proc_sizes.get(key, 0)
self._proc_sizes[key] = alloc_size
@log_unhandled
def hold_quota(self, key):
"""
Mark request quota as already been hold
:param key: request key
"""
if key not in self._allocations:
return
alloc_size = self._allocations[key]
self._total_hold += alloc_size - self._hold_sizes.get(key, 0)
self._hold_sizes[key] = alloc_size
if key in self._proc_sizes:
self._total_proc -= self._proc_sizes[key]
del self._proc_sizes[key]
@log_unhandled
def release_quota(self, key):
"""
Release allocated quota
:param key: request key
"""
if key not in self._allocations:
return
alloc_size = self._allocations[key]
self._allocated_size -= alloc_size
del self._allocations[key]
if key in self._proc_sizes:
self._total_proc -= self._proc_sizes[key]
del self._proc_sizes[key]
if key in self._hold_sizes:
self._total_hold -= self._hold_sizes[key]
del self._hold_sizes[key]
self._process_requests()
self._log_allocate('Quota key %s released on %s.', key, self.uid)
@log_unhandled
def release_quotas(self, keys):
"""
Release allocated quota in batch
:param keys: request keys
"""
for k in keys:
self.release_quota(k)
def dump_data(self):
return QuotaDumpType(self._allocations, self._requests, self._proc_sizes, self._hold_sizes)
def get_allocated_size(self):
# get total allocated size, for debug purpose
return self._allocated_size
def alter_allocations(self, keys, quota_sizes=None, handle_shrink=True, new_keys=None,
allocate=False, process_quota=False):
"""
Alter multiple requests
:param keys: keys to update
:param quota_sizes: new quota sizes, if None, no changes will be made
:param handle_shrink: if True and the quota size less than the original, process requests in the queue
:param new_keys: new allocation keys to replace current keys, if None, no changes will be made
:param allocate: if True, will allocate resources for new items
:param process_quota: call process_quota() after allocated
:return:
"""
quota_sizes = quota_sizes or itertools.repeat(None)
new_keys = new_keys or itertools.repeat(None)
shrink = False
for k, s, nk in zip(keys, quota_sizes, new_keys):
cur_shrink = self.alter_allocation(
k, s, handle_shrink=False, new_key=nk, allocate=allocate, process_quota=process_quota)
shrink = shrink or cur_shrink
if shrink and handle_shrink:
self._process_requests()
@log_unhandled
def alter_allocation(self, key, quota_size=None, handle_shrink=True, new_key=None,
allocate=False, process_quota=False):
"""
Alter a single request by changing its name or request size
:param key: request key
:param quota_size: requested quota size
:param handle_shrink: if True and the quota size less than the original, process requests in the queue
:param new_key: new allocation key to replace current key
:param allocate: if True, will allocate resources for new items
:param process_quota: call process_quota() after allocated
"""
old_size = self._allocations.get(key, 0)
if not allocate and key not in self._allocations:
return
if quota_size is not None and quota_size != old_size:
quota_size = int(quota_size)
size_diff = quota_size - old_size
self._allocated_size += size_diff
self._allocations[key] = quota_size
if key in self._proc_sizes:
self._total_proc += quota_size - self._proc_sizes[key]
self._proc_sizes[key] = quota_size
if key in self._hold_sizes:
self._total_hold += quota_size - self._hold_sizes[key]
self._hold_sizes[key] = quota_size
self._log_allocate('Quota key %s applied on %s. Diff: %s,', key, self.uid, size_diff)
if process_quota:
self.process_quota(key)
if new_key is not None and new_key != key:
self._allocations[new_key] = self._allocations[key]
del self._allocations[key]
try:
self._proc_sizes[new_key] = self._proc_sizes[key]
del self._proc_sizes[key]
except KeyError:
pass
try:
self._hold_sizes[new_key] = self._hold_sizes[key]
del self._hold_sizes[key]
except KeyError:
pass
if quota_size is not None and quota_size < old_size:
if handle_shrink:
self._process_requests()
return True
return False
@log_unhandled
def _process_requests(self):
"""
Process quota requests in the queue
"""
removed = []
for k, req in self._requests.items():
req_size, delta, req_time, multiple, callbacks = req
try:
if self._has_space(delta):
alter_allocation = self.alter_allocations if multiple else self.alter_allocation
alter_allocation(k, req_size, handle_shrink=False, allocate=True)
for cb in callbacks:
self.tell_promise(cb)
if self._status_ref:
self._status_ref.update_mean_stats(
'wait_time.' + self.uid.replace('Actor', ''), time.time() - req_time,
_tell=True, _wait=False)
removed.append(k)
else:
# Quota left cannot satisfy the next request, we quit
break
except: # noqa: E722
removed.append(k)
# just in case the quota is allocated
self.release_quota(k)
for cb in callbacks:
self.tell_promise(cb, *sys.exc_info(), **dict(_accept=False))
for k in removed:
self._requests.pop(k, None)
class MemQuotaActor(QuotaActor):
"""
Actor handling worker memory quota
"""
def __init__(self, total_size, overall_size=None, refresh_time=None):
super(MemQuotaActor, self).__init__(total_size)
self._overall_size = overall_size or total_size
self._last_memory_available = 0
self._refresh_time = refresh_time or 10
self._dispatch_ref = None
def post_create(self):
from .dispatcher import DispatchActor
super(MemQuotaActor, self).post_create()
self.update_mem_stats()
self._dispatch_ref = self.promise_ref(DispatchActor.default_uid())
self._update_status(allocated=self._allocated_size, hold=self._total_hold, total=self._total_size)
def update_mem_stats(self):
"""
Refresh memory usage
"""
cur_mem_available = resource.virtual_memory().available
if cur_mem_available > self._last_memory_available:
# memory usage reduced: try reallocate existing requests
self._process_requests()
self._last_memory_available = cur_mem_available
self.ref().update_mem_stats(_tell=True, _delay=self._refresh_time)
def _has_space(self, delta):
mem_stats = resource.virtual_memory()
# calc available physical memory
available_size = mem_stats.available - max(0, mem_stats.total - self._overall_size) \
- self._total_proc
if max(delta, 0) >= available_size:
logger.warning('%s met hard memory limitation: request %d, available %d, hard limit %d',
self.uid, delta, available_size, self._overall_size)
for slot in self._dispatch_ref.get_slots('process_helper'):
self.ctx.actor_ref(slot).free_mkl_buffers(_tell=True, _wait=False)
return False
return super(MemQuotaActor, self)._has_space(delta)
def _log_allocate(self, msg, *args, **kwargs):
mem_stats = resource.virtual_memory()
# calc available physical memory
available_size = mem_stats.available - max(0, mem_stats.total - self._overall_size) \
- self._total_proc
args += (self._allocated_size, self._total_size, mem_stats.available, available_size,
self._overall_size, self._total_proc)
logger.debug(
msg + ' Allocated: %s, Total size: %s, Phy available: %s, Hard available: %s,'
' Hard limit: %s, Processing: %s',
*args, **kwargs
)
def _update_status(self, **kwargs):
if self._status_ref:
self._status_ref.set_mem_quota_allocations(kwargs, _tell=True, _wait=False)
def alter_allocation(self, key, quota_size=None, handle_shrink=True, new_key=None,
allocate=False, process_quota=False):
ret = super(MemQuotaActor, self).alter_allocation(
key, quota_size, handle_shrink=handle_shrink, new_key=new_key,
allocate=allocate, process_quota=process_quota)
if quota_size:
self._update_status(
allocated=self._allocated_size, hold=self._total_hold, total=self._total_size)
return ret
def release_quota(self, key):
ret = super(MemQuotaActor, self).release_quota(key)
self._update_status(allocated=self._allocated_size, total=self._total_size)
return ret
| 39.641304 | 110 | 0.616178 |
63479ec285324cced57d87c5b990f6739df2cc8f | 2,861 | py | Python | UKR-LP/visualizer_lp.py | tanacchi/ml_scratch | 79782ece44f8e742bbc04a83c761da0851360ee5 | [
"MIT"
] | 1 | 2020-12-02T02:31:22.000Z | 2020-12-02T02:31:22.000Z | UKR-LP/visualizer_lp.py | tanacchi/ml_scratch | 79782ece44f8e742bbc04a83c761da0851360ee5 | [
"MIT"
] | 1 | 2020-10-30T07:56:10.000Z | 2020-10-30T07:56:10.000Z | UKR-LP/visualizer_lp.py | tanacchi/ml_scratch | 79782ece44f8e742bbc04a83c761da0851360ee5 | [
"MIT"
] | null | null | null | import numpy as np
from matplotlib import pyplot as plt
from matplotlib.animation import FuncAnimation
def visualize_history(X, history, save_gif=False):
Y_history = history.Y
F_history = history.F
f_history = history.f
Zeta_history = history.Zeta
Z_history = history.Z
input_dim, latent_dim = X.shape[1], Z_history[0].shape[1]
input_projection_type = '3d' if input_dim > 2 else 'rectilinear'
fig = plt.figure(figsize=(10, 5))
input_ax = fig.add_subplot(1, 2, 1, projection=input_projection_type)
latent_ax = fig.add_subplot(1, 2, 2)
num_epoch = len(F_history)
if input_dim == 3 and latent_dim == 2:
F_history = np.array(F_history).reshape((num_epoch, 10, 10, input_dim))
f_history = np.array(f_history).reshape((num_epoch, 10, 10, input_dim))
observable_drawer = [None, None, draw_observable_2D, draw_observable_3D][input_dim]
latent_drawer = [None, draw_latent_1D, draw_latent_2D][latent_dim]
ani = FuncAnimation(fig, update_graph,
frames=num_epoch, repeat=True,
fargs=(observable_drawer, latent_drawer,
X, Y_history, F_history, f_history, Zeta_history, Z_history,
fig, input_ax, latent_ax, num_epoch))
plt.show()
if save_gif:
ani.save("tmp.gif", writer='pillow')
def update_graph(epoch, observable_drawer, latent_drawer,
X, Y_history, F_history, f_history, Zeta_history, Z_history,
fig, input_ax, latent_ax, num_epoch):
fig.suptitle(f"epoch: {epoch}")
input_ax.cla()
input_ax.view_init(azim=(epoch*400 / num_epoch), elev=30)
latent_ax.cla()
f, F, Z = f_history[epoch], F_history[epoch], Z_history[epoch]
Y, Zeta = Y_history[epoch], Zeta_history[epoch]
colormap = X[:, 0]
observable_drawer(input_ax, X, Y, f, colormap)
latent_drawer(latent_ax, Zeta, Z, colormap)
def draw_observable_3D(ax, X, Y, f, colormap):
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=colormap)
ax.scatter(Y[:, 0], Y[:, 1], Y[:, 2], c='red', alpha=1, s=50)
if len(f.shape) == 3:
ax.plot_wireframe(f[:, :, 0], f[:, :, 1], f[:, :, 2], color='black')
# else:
# ax.plot(F[:, 0], F[:, 1], F[:, 2], color='black')
# ax.plot(F[:, 0], F[:, 1], F[:, 2], color='black')
# ax.plot_wireframe(F[:, :, 0], F[:, :, 1], F[:, :, 2], color='black')
def draw_observable_2D(ax, X, F, colormap):
ax.scatter(X[:, 0], X[:, 1], c=colormap)
ax.plot(F[:, 0], F[:, 1], c='black')
def draw_latent_2D(ax, Zeta, Z, colormap):
# ax.set_xlim(-5, 5)
# ax.set_ylim(-5, 5)
ax.scatter(Zeta[:, 0], Zeta[:, 1], c='red')
ax.scatter(Z[:, 0], Z[:, 1], c=colormap)
def draw_latent_1D(ax, Z, colormap):
ax.scatter(Z, np.zeros(Z.shape), c=colormap)
ax.set_ylim(-1, 1)
| 36.21519 | 91 | 0.608179 |
868dd591e47f8f66065e886dff88812afb28b04c | 1,370 | py | Python | week_1/02_find_alphabet_occurrence_array.py | swcide/algorithm | 8eb518f2ced6121f6b35a8da655bbf954d143211 | [
"Unlicense"
] | null | null | null | week_1/02_find_alphabet_occurrence_array.py | swcide/algorithm | 8eb518f2ced6121f6b35a8da655bbf954d143211 | [
"Unlicense"
] | null | null | null | week_1/02_find_alphabet_occurrence_array.py | swcide/algorithm | 8eb518f2ced6121f6b35a8da655bbf954d143211 | [
"Unlicense"
] | null | null | null | from builtins import range
input = "hello my name is sparta"
'''
str.isalpha() ํจ์๋ฅผ ์ด์ฉํ๋ฉด ๋ฌธ์์ด์ด ์ํ๋ฒณ์ธ์ง ํ์ธ๊ฐ๋ฅ!
print("a".isalpha()) # True
print("1".isalpha()) # False
s = "abcdefg"
print(s[0].isalpha()) # True
# ๋ด์ฅ ํจ์ ord() ์ด์ฉํด์ ์์คํค ๊ฐ ๋ฐ๊ธฐ
print(ord('a')) # 97
print(ord('a') - ord('a')) # 97-97 -> 0
print(ord('b') - ord('a')) # 98-97 -> 1
print(chr(65)) # A
print(chr(65)) # Z
'''
'''
์ํ๋ฒณ ๋ณ ์ธ๋ฑ์ค์ ๊ฐ์ ์ถ๊ฐ ํ๋ค!
how to??
ord๋ก ๋ฝ์ ๋ฒํธ๊ฐ input๋ ์ํ๋ฒณ์ ์ธ๋ฑ์ค ๋ฒํธ!
๊ทธ ์ธ๋ฑ์ค ๋ฒํธ์ ๊ฐ์ 1์ฉ ์ถ๊ฐํด์ฃผ๋ฉด ๋ ๊ฒ ๊ฐ๋ค.
์ ๋ต์ ๋ณด๋ ์๋ฌธ์๋ง์ด๋ค..
๋๋ฌธ์๋ ํฌํจ์ํค๋ ค๋ฉด lower ์ด์ฉํ๋ฉด ๋ ๊ฒ๊ฐ๋ค.
'''
def find_alphabet_occurrence_array(string):
alphabet_occurrence_array = [0] * 26
for i in string:
if not i.isalpha():
continue
arr_index = ord(i)-ord('a')
alphabet_occurrence_array[arr_index] += 1
return alphabet_occurrence_array
print("์ ๋ต = [3, 1, 0, 0, 2, 0, 0, 0, 1, 0, 0, 2, 2, 1, 1, 1, 0, 1, 2, 1, 0, 0, 0, 0, 1, 0] \nํ์ฌ ํ์ด ๊ฐ =", find_alphabet_occurrence_array("Hello my name is sparta"))
print("์ ๋ต = [2, 1, 2, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0] \nํ์ฌ ํ์ด ๊ฐ =",
find_alphabet_occurrence_array("Sparta coding club"))
print("์ ๋ต = [2, 2, 0, 0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 3, 3, 0, 0, 0, 0, 0, 0] \nํ์ฌ ํ์ด ๊ฐ =",
find_alphabet_occurrence_array("best of best sparta"))
| 25.37037 | 163 | 0.556204 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.