hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f6499d720ab4c629a6fba5fd2379ec8997d2d78e | 4,619 | py | Python | Money/settings.py | Firexd2/control-money | c72626f057c39766f8d750fc96e5f7accf1f5810 | [
"MIT"
] | null | null | null | Money/settings.py | Firexd2/control-money | c72626f057c39766f8d750fc96e5f7accf1f5810 | [
"MIT"
] | null | null | null | Money/settings.py | Firexd2/control-money | c72626f057c39766f8d750fc96e5f7accf1f5810 | [
"MIT"
] | null | null | null | """
Django settings for Money project.
Generated by 'django-admin startproject' using Django 2.0.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
from logging.handlers import SysLogHandler
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '8oz&(rk^79_-pmn0t2$pj6bs$coh8wuh&b0^*j_k=z-z$9(8te'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['control-money.com', '127.0.0.1']
# Application definition
INSTALLED_APPS = [
'admin_tools',
'admin_tools.theming',
'admin_tools.menu',
'admin_tools.dashboard',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'Core',
'Auth',
'widget_tweaks',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Core.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'Core.context_proccesor.get_last_version'
],
'loaders': [
'admin_tools.template_loaders.Loader',
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
},
},
]
WSGI_APPLICATION = 'Money.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Admin settings
ADMIN_TOOLS_THEMING_CSS = '/css/admin-theme.css'
ADMIN_TOOLS_INDEX_DASHBOARD = {
'django.contrib.admin.site': 'Core.custom_admin.dashboard.CustomIndexDashboard',
}
ADMIN_TOOLS_MENU = {
'django.contrib.admin.site': 'Core.custom_admin.menu.CustomMenu',
}
# Auth settings
AUTH_USER_MODEL = 'Auth.User'
LOGIN_REDIRECT_URL = '/panel/'
LOGOUT_REDIRECT_URL = '/auth/login/'
LOGIN_URL = '/auth/login/'
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'ru-ru'
TIME_ZONE = 'Europe/Moscow'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
try:
from .dev_settings import *
except:
pass
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'class': 'logging.StreamHandler',
},
'file': {
'level': 'ERROR',
'class': 'logging.FileHandler',
'filename': 'log.django',
},
},
'loggers': {
'django': {
'handlers': ['console', 'file'],
'level': os.getenv('DJANGO_LOG_LEVEL', 'ERROR'),
},
},
}
| 25.103261 | 91 | 0.662481 |
3546611bac5e92c85400508a5a081b7e2f3bf398 | 370 | py | Python | src/chip8_dasm/__main__.py | jeffnyman/chip8-dasm | 62e824ed686989db1bb4d350f9676f7c89aa30ca | [
"MIT"
] | null | null | null | src/chip8_dasm/__main__.py | jeffnyman/chip8-dasm | 62e824ed686989db1bb4d350f9676f7c89aa30ca | [
"MIT"
] | null | null | null | src/chip8_dasm/__main__.py | jeffnyman/chip8-dasm | 62e824ed686989db1bb4d350f9676f7c89aa30ca | [
"MIT"
] | null | null | null | """Entry point module for the disassembler."""
import sys
from chip8_dasm.cli import main
if sys.version_info < (3, 7):
sys.stderr.write("\nc8dasm requires Python 3.7 or later.\n")
sys.stderr.write(
"Your current version is: "
f"{sys.version_info.major}.{sys.version_info.minor}\n"
)
sys.exit(1)
if __name__ == "__main__":
main()
| 21.764706 | 64 | 0.648649 |
4f7f4d9d8bdbd1b7b7a6bd30cac09368e2d738fa | 2,000 | py | Python | stanCode_Projects/my_photoshop/best_photoshop_award.py | yschang306/sc-projects | a57cc5dd0fce80b286820324997234a0391f23c4 | [
"MIT"
] | null | null | null | stanCode_Projects/my_photoshop/best_photoshop_award.py | yschang306/sc-projects | a57cc5dd0fce80b286820324997234a0391f23c4 | [
"MIT"
] | null | null | null | stanCode_Projects/my_photoshop/best_photoshop_award.py | yschang306/sc-projects | a57cc5dd0fce80b286820324997234a0391f23c4 | [
"MIT"
] | null | null | null | """
File: best_photoshop_award.py
----------------------------------
This file creates a photoshopped image
that is going to compete for the Best
Photoshop Award for SC001.
Please put all the images you will use in the image_contest folder
and make sure to choose the right folder when loading your images.
"""
from simpleimage import SimpleImage
# Controls the threshold of detecting green screen pixel
THRESHOLD = 1.4
# Controls the upper bound for black pixel
BLACK_PIXEL = 120
def combine(fg, bg):
"""
This function will replace the green pixels of the figure image
with the pixels of background image
---------------------------------------------
: param1 fg: SimpleImage, the green screen figure image
: param2 bg: SimpleImage, the background image
: return fg: SimpleImage, the green screen pixels are replaced by pixels of background image
"""
for x in range(fg.width):
for y in range(fg.height):
pixel_fg = fg.get_pixel(x, y)
avg = (pixel_fg.red + pixel_fg.blue + pixel_fg.green) // 3
total = pixel_fg.red + pixel_fg.blue + pixel_fg.green
if pixel_fg.green > avg * THRESHOLD and total > BLACK_PIXEL:
pixel_bg = bg.get_pixel(x, y)
pixel_fg.red = pixel_bg.red
pixel_fg.green = pixel_bg.green
pixel_fg.blue = pixel_bg.blue
return fg
def main():
"""
This program will replace the green screen
which can photoshop a person onto any background
-----------------------------------------------------------
Concept: My face is round and I like to eat soft-boiled egg.
As a result, I turn the egg yolk into my face, soaking
in the ramen and feeling comfortable.
"""
fg = SimpleImage('image_contest/me.jpg')
bg = SimpleImage('image_contest/ramen.jpg')
bg.make_as_big_as(fg)
combined_img = combine(fg, bg)
combined_img.show()
if __name__ == '__main__':
main()
| 33.333333 | 96 | 0.6215 |
b63de76a1c780def81f3293cc1ebf0f46269bcd7 | 924 | py | Python | venv/Scripts/rst2xetex.py | ishatserka/MachineLearningAndDataAnalysisCoursera | e82e772df2f4aec162cb34ac6127df10d14a625a | [
"MIT"
] | null | null | null | venv/Scripts/rst2xetex.py | ishatserka/MachineLearningAndDataAnalysisCoursera | e82e772df2f4aec162cb34ac6127df10d14a625a | [
"MIT"
] | null | null | null | venv/Scripts/rst2xetex.py | ishatserka/MachineLearningAndDataAnalysisCoursera | e82e772df2f4aec162cb34ac6127df10d14a625a | [
"MIT"
] | null | null | null | #!E:\git\MachineLearningAndDataAnalysisCoursera\venv\Scripts\python.exe
# $Id: rst2xetex.py 7847 2015-03-17 17:30:47Z milde $
# Author: Guenter Milde
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing Lua/XeLaTeX code.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline
description = ('Generates LaTeX documents from standalone reStructuredText '
'sources for compilation with the Unicode-aware TeX variants '
'XeLaTeX or LuaLaTeX. '
'Reads from <source> (default is stdin) and writes to '
'<destination> (default is stdout). See '
'<http://docutils.sourceforge.net/docs/user/latex.html> for '
'the full reference.')
publish_cmdline(writer_name='xetex', description=description)
| 33 | 77 | 0.683983 |
20311c74b645de57d4ea2e0c55e7076c9e01e76e | 5,673 | py | Python | configs/faster_rcnn_vgg16_citypersons.py | LiGangszu/PedestrianDetection-HGPD | 3874e331c8afe4cc20fc49de7ebdbe77db277c98 | [
"Apache-2.0"
] | 9 | 2021-04-02T12:21:38.000Z | 2021-08-19T07:55:19.000Z | configs/faster_rcnn_vgg16_citypersons.py | LiGangszu/PedestrianDetection-HGPD | 3874e331c8afe4cc20fc49de7ebdbe77db277c98 | [
"Apache-2.0"
] | 1 | 2021-05-02T18:34:06.000Z | 2021-05-12T04:04:57.000Z | configs/faster_rcnn_vgg16_citypersons.py | LiGangszu/PedestrianDetection-HGPD | 3874e331c8afe4cc20fc49de7ebdbe77db277c98 | [
"Apache-2.0"
] | 2 | 2021-04-28T09:27:45.000Z | 2021-06-07T12:02:01.000Z | # model settings
model = dict(
type='FasterRCNN',
pretrained='modelzoo://vgg16',
backbone=dict(
type='VGG',
depth=16,
frozen_stages=1),
neck=None,
rpn_head=dict(
type='RPNHead',
in_channels=512,
feat_channels=512,
anchor_scales=[4., 5.4, 7.2, 9.8, 13.2, 17.9, 24.2, 33.0, 44.1, 59.6, 80.0],
anchor_ratios=[2.44],
anchor_strides=[8],
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0],
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
out_channels=512,
featmap_strides=[8]),
bbox_head=dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=512,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=2,
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2],
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=2.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)))
# model training and testing settings
train_cfg = dict(
rpn_stage1=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=0.5),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_stage2=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=0.7),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_num=2000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=0.5),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False))
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=12000,
nms_post=2000,
max_num=2000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
score_thr=0.05, nms=dict(type='nms', iou_thr=0.5), max_per_img=100)
# soft-nms is also supported for rcnn testing
# score_thr=0.05, nms=dict(type='soft_nms', iou_thr=0.5, min_score=0.05), max_per_img=100)
)
# dataset settings
dataset_type = 'CocoDataset'
data_root = '' # specify the path to CityPersons
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(2048, 1024), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_bboxes_ignore', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(2048, 1024),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
imgs_per_gpu=2,
workers_per_gpu=4,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train.json',
img_prefix=data_root + 'images/train/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/val_gt.json',
img_prefix=data_root + 'images/val/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val.json',
img_prefix=data_root + 'images/val/',
pipeline=test_pipeline))
evaluation = dict(interval=1, start=8, metric='bbox')
# optimizer
optimizer = dict(type='SGD', lr=0.002, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=200,
warmup_ratio=1.0 / 3,
step=[10])
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
total_epochs = 14
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
| 30.664865 | 94 | 0.588049 |
6c62469480b6486abced8fc11f590d4da0dea4dc | 11,919 | py | Python | dort-core/timelord/timelord_state.py | Dortchain/dort-blockchian | 14f16e321a60f9d70f849f58e4e9964fa337a084 | [
"Apache-2.0"
] | 1 | 2021-09-05T18:21:09.000Z | 2021-09-05T18:21:09.000Z | dort-core/timelord/timelord_state.py | Dortchain/dort-blockchian | 14f16e321a60f9d70f849f58e4e9964fa337a084 | [
"Apache-2.0"
] | 1 | 2021-07-11T03:04:25.000Z | 2021-07-11T03:04:25.000Z | dort-core/timelord/timelord_state.py | Dortchain/dort-blockchian | 14f16e321a60f9d70f849f58e4e9964fa337a084 | [
"Apache-2.0"
] | null | null | null | import logging
from typing import List, Optional, Tuple, Union
from Dort.consensus.constants import ConsensusConstants
from Dort.protocols import timelord_protocol
from Dort.timelord.iters_from_block import iters_from_block
from Dort.timelord.types import Chain, StateType
from Dort.types.blockchain_format.classgroup import ClassgroupElement
from Dort.types.blockchain_format.sized_bytes import bytes32
from Dort.types.blockchain_format.slots import ChallengeBlockInfo
from Dort.types.blockchain_format.sub_epoch_summary import SubEpochSummary
from Dort.types.end_of_slot_bundle import EndOfSubSlotBundle
from Dort.util.ints import uint8, uint32, uint64, uint128
log = logging.getLogger(__name__)
class LastState:
"""
Represents the state that the timelord is in, and should execute VDFs on top of. A state can be one of three types:
1. A "peak" or a block
2. An end of sub-slot
3. None, if it's the first sub-slot and there are no blocks yet
Timelords execute VDFs until they reach the next block or sub-slot, at which point the state is changed again.
The state can also be changed arbitrarily to a sub-slot or peak, for example in the case the timelord receives
a new block in the future.
"""
def __init__(self, constants: ConsensusConstants):
self.state_type: StateType = StateType.FIRST_SUB_SLOT
self.peak: Optional[timelord_protocol.NewPeakTimelord] = None
self.subslot_end: Optional[EndOfSubSlotBundle] = None
self.last_ip: uint64 = uint64(0)
self.deficit: uint8 = constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK
self.sub_epoch_summary: Optional[SubEpochSummary] = None
self.constants: ConsensusConstants = constants
self.last_weight: uint128 = uint128(0)
self.last_height: uint32 = uint32(0)
self.total_iters: uint128 = uint128(0)
self.last_challenge_sb_or_eos_total_iters = uint128(0)
self.last_block_total_iters: Optional[uint128] = None
self.last_peak_challenge: bytes32 = constants.GENESIS_CHALLENGE
self.difficulty: uint64 = constants.DIFFICULTY_STARTING
self.sub_slot_iters: uint64 = constants.SUB_SLOT_ITERS_STARTING
self.reward_challenge_cache: List[Tuple[bytes32, uint128]] = [(constants.GENESIS_CHALLENGE, uint128(0))]
self.new_epoch = False
self.passed_ses_height_but_not_yet_included = False
self.infused_ses = False
def set_state(self, state: Union[timelord_protocol.NewPeakTimelord, EndOfSubSlotBundle]):
if isinstance(state, timelord_protocol.NewPeakTimelord):
self.state_type = StateType.PEAK
self.peak = state
self.subslot_end = None
_, self.last_ip = iters_from_block(
self.constants,
state.reward_chain_block,
state.sub_slot_iters,
state.difficulty,
)
self.deficit = state.deficit
self.sub_epoch_summary = state.sub_epoch_summary
self.last_weight = state.reward_chain_block.weight
self.last_height = state.reward_chain_block.height
self.total_iters = state.reward_chain_block.total_iters
self.last_peak_challenge = state.reward_chain_block.get_hash()
self.difficulty = state.difficulty
self.sub_slot_iters = state.sub_slot_iters
if state.reward_chain_block.is_transaction_block:
self.last_block_total_iters = self.total_iters
self.reward_challenge_cache = state.previous_reward_challenges
self.last_challenge_sb_or_eos_total_iters = self.peak.last_challenge_sb_or_eos_total_iters
self.new_epoch = False
if (self.peak.reward_chain_block.height + 1) % self.constants.SUB_EPOCH_BLOCKS == 0:
self.passed_ses_height_but_not_yet_included = True
else:
self.passed_ses_height_but_not_yet_included = state.passes_ses_height_but_not_yet_included
elif isinstance(state, EndOfSubSlotBundle):
self.state_type = StateType.END_OF_SUB_SLOT
if self.peak is not None:
self.total_iters = uint128(self.total_iters - self.get_last_ip() + self.sub_slot_iters)
else:
self.total_iters = uint128(self.total_iters + self.sub_slot_iters)
self.peak = None
self.subslot_end = state
self.last_ip = uint64(0)
self.deficit = state.reward_chain.deficit
if state.challenge_chain.new_difficulty is not None:
assert state.challenge_chain.new_sub_slot_iters is not None
self.difficulty = state.challenge_chain.new_difficulty
self.sub_slot_iters = state.challenge_chain.new_sub_slot_iters
self.new_epoch = True
else:
self.new_epoch = False
if state.challenge_chain.subepoch_summary_hash is not None:
self.infused_ses = True
self.passed_ses_height_but_not_yet_included = False
else:
self.infused_ses = False
self.passed_ses_height_but_not_yet_included = self.passed_ses_height_but_not_yet_included
self.last_challenge_sb_or_eos_total_iters = self.total_iters
else:
self.passed_ses_height_but_not_yet_included = self.passed_ses_height_but_not_yet_included
self.new_epoch = False
self.reward_challenge_cache.append((self.get_challenge(Chain.REWARD_CHAIN), self.total_iters))
log.info(f"Updated timelord peak to {self.get_challenge(Chain.REWARD_CHAIN)}, total iters: {self.total_iters}")
while len(self.reward_challenge_cache) > 2 * self.constants.MAX_SUB_SLOT_BLOCKS:
self.reward_challenge_cache.pop(0)
def get_sub_slot_iters(self) -> uint64:
return self.sub_slot_iters
def can_infuse_block(self, overflow: bool) -> bool:
if overflow and self.new_epoch:
# No overflows in new epoch
return False
if self.state_type == StateType.FIRST_SUB_SLOT or self.state_type == StateType.END_OF_SUB_SLOT:
return True
ss_start_iters = self.get_total_iters() - self.get_last_ip()
already_infused_count: int = 0
for _, total_iters in self.reward_challenge_cache:
if total_iters > ss_start_iters:
already_infused_count += 1
if already_infused_count >= self.constants.MAX_SUB_SLOT_BLOCKS:
return False
return True
def get_weight(self) -> uint128:
return self.last_weight
def get_height(self) -> uint32:
return self.last_height
def get_total_iters(self) -> uint128:
return self.total_iters
def get_last_peak_challenge(self) -> Optional[bytes32]:
return self.last_peak_challenge
def get_difficulty(self) -> uint64:
return self.difficulty
def get_last_ip(self) -> uint64:
return self.last_ip
def get_deficit(self) -> uint8:
return self.deficit
def just_infused_sub_epoch_summary(self) -> bool:
"""
Returns true if state is an end of sub-slot, and that end of sub-slot infused a sub epoch summary
"""
return self.state_type == StateType.END_OF_SUB_SLOT and self.infused_ses
def get_next_sub_epoch_summary(self) -> Optional[SubEpochSummary]:
if self.state_type == StateType.FIRST_SUB_SLOT or self.state_type == StateType.END_OF_SUB_SLOT:
# Can only infuse SES after a peak (in an end of sub slot)
return None
assert self.peak is not None
if self.passed_ses_height_but_not_yet_included and self.get_deficit() == 0:
# This will mean we will include the ses in the next sub-slot
return self.sub_epoch_summary
return None
def get_last_block_total_iters(self) -> Optional[uint128]:
return self.last_block_total_iters
def get_passed_ses_height_but_not_yet_included(self) -> bool:
return self.passed_ses_height_but_not_yet_included
def get_challenge(self, chain: Chain) -> Optional[bytes32]:
if self.state_type == StateType.FIRST_SUB_SLOT:
assert self.peak is None and self.subslot_end is None
if chain == Chain.CHALLENGE_CHAIN:
return self.constants.GENESIS_CHALLENGE
elif chain == Chain.REWARD_CHAIN:
return self.constants.GENESIS_CHALLENGE
elif chain == Chain.INFUSED_CHALLENGE_CHAIN:
return None
elif self.state_type == StateType.PEAK:
assert self.peak is not None
reward_chain_block = self.peak.reward_chain_block
if chain == Chain.CHALLENGE_CHAIN:
return reward_chain_block.challenge_chain_ip_vdf.challenge
elif chain == Chain.REWARD_CHAIN:
return reward_chain_block.get_hash()
elif chain == Chain.INFUSED_CHALLENGE_CHAIN:
if reward_chain_block.infused_challenge_chain_ip_vdf is not None:
return reward_chain_block.infused_challenge_chain_ip_vdf.challenge
elif self.peak.deficit == self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK - 1:
return ChallengeBlockInfo(
reward_chain_block.proof_of_space,
reward_chain_block.challenge_chain_sp_vdf,
reward_chain_block.challenge_chain_sp_signature,
reward_chain_block.challenge_chain_ip_vdf,
).get_hash()
return None
elif self.state_type == StateType.END_OF_SUB_SLOT:
assert self.subslot_end is not None
if chain == Chain.CHALLENGE_CHAIN:
return self.subslot_end.challenge_chain.get_hash()
elif chain == Chain.REWARD_CHAIN:
return self.subslot_end.reward_chain.get_hash()
elif chain == Chain.INFUSED_CHALLENGE_CHAIN:
if self.subslot_end.reward_chain.deficit < self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK:
assert self.subslot_end.infused_challenge_chain is not None
return self.subslot_end.infused_challenge_chain.get_hash()
return None
return None
def get_initial_form(self, chain: Chain) -> Optional[ClassgroupElement]:
if self.state_type == StateType.FIRST_SUB_SLOT:
return ClassgroupElement.get_default_element()
elif self.state_type == StateType.PEAK:
assert self.peak is not None
reward_chain_block = self.peak.reward_chain_block
if chain == Chain.CHALLENGE_CHAIN:
return reward_chain_block.challenge_chain_ip_vdf.output
if chain == Chain.REWARD_CHAIN:
return ClassgroupElement.get_default_element()
if chain == Chain.INFUSED_CHALLENGE_CHAIN:
if reward_chain_block.infused_challenge_chain_ip_vdf is not None:
return reward_chain_block.infused_challenge_chain_ip_vdf.output
elif self.peak.deficit == self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK - 1:
return ClassgroupElement.get_default_element()
else:
return None
elif self.state_type == StateType.END_OF_SUB_SLOT:
if chain == Chain.CHALLENGE_CHAIN or chain == Chain.REWARD_CHAIN:
return ClassgroupElement.get_default_element()
if chain == Chain.INFUSED_CHALLENGE_CHAIN:
assert self.subslot_end is not None
if self.subslot_end.reward_chain.deficit < self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK:
return ClassgroupElement.get_default_element()
else:
return None
return None
| 49.870293 | 119 | 0.675392 |
a901e0dde19197f94d5722556a76067cf9908c67 | 1,421 | py | Python | src/bilbyui/migrations/0011_auto_20200720_0109.py | gravitationalwavedc/gwcloud_bilby | f5074fe60ff2a3cfa6a7e8d3e97c9573a6152563 | [
"MIT"
] | 1 | 2020-10-26T02:35:26.000Z | 2020-10-26T02:35:26.000Z | src/bilbyui/migrations/0011_auto_20200720_0109.py | gravitationalwavedc/gwcloud_bilby | f5074fe60ff2a3cfa6a7e8d3e97c9573a6152563 | [
"MIT"
] | 31 | 2020-05-04T05:57:45.000Z | 2022-02-23T04:35:35.000Z | src/bilbyui/migrations/0011_auto_20200720_0109.py | gravitationalwavedc/gwcloud_bilby | f5074fe60ff2a3cfa6a7e8d3e97c9573a6152563 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.13 on 2020-07-20 01:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bilbyui', '0010_merge_20200514_0412'),
]
operations = [
migrations.CreateModel(
name='Label',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('description', models.TextField(blank=True, null=True)),
],
options={
'db_table': 'bilby_label',
},
),
migrations.AlterField(
model_name='samplerparameter',
name='name',
field=models.CharField(choices=[['nlive', 'Number of live points'], ['nact', 'Number of auto-correlation steps'], ['maxmcmc', 'Maximum number of steps'], ['walks', 'Minimum number of walks'], ['dlogz', 'Stopping criteria'], ['cpus', 'Number of CPUs to use for parallelisation']], max_length=50),
),
migrations.AlterField(
model_name='samplerparameter',
name='value',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='bilbyjob',
name='labels',
field=models.ManyToManyField(to='bilbyui.Label'),
),
]
| 35.525 | 307 | 0.567206 |
0f753e5773ae086a5c9c602d85703d4741acc2db | 161 | py | Python | Parte1/Cap2/name_1.py | fabianoflorentino/python-CursoIntensivoDePython | 822288cc4b382936dde1bc647e3f8c2b925ced70 | [
"Apache-2.0"
] | null | null | null | Parte1/Cap2/name_1.py | fabianoflorentino/python-CursoIntensivoDePython | 822288cc4b382936dde1bc647e3f8c2b925ced70 | [
"Apache-2.0"
] | null | null | null | Parte1/Cap2/name_1.py | fabianoflorentino/python-CursoIntensivoDePython | 822288cc4b382936dde1bc647e3f8c2b925ced70 | [
"Apache-2.0"
] | 1 | 2020-02-05T13:07:08.000Z | 2020-02-05T13:07:08.000Z | first_name = "Fabiano"
last_name = "Florentino"
full_name = first_name + " " + last_name
print(f'{full_name}\n')
print(f"Hello ," + full_name.title() + "!\n" ) | 23 | 46 | 0.658385 |
78e402f5d35fec4a7ff734ae2b0c04c93f5731be | 127 | py | Python | mne/io/egi/__init__.py | fmamashli/mne-python | 52f064415e7c9fa8fe243d22108dcdf3d86505b9 | [
"BSD-3-Clause"
] | 3 | 2021-01-04T08:45:56.000Z | 2021-05-19T12:25:59.000Z | mne/io/egi/__init__.py | fmamashli/mne-python | 52f064415e7c9fa8fe243d22108dcdf3d86505b9 | [
"BSD-3-Clause"
] | 28 | 2020-05-07T00:58:34.000Z | 2020-08-29T23:02:17.000Z | mne/io/egi/__init__.py | fmamashli/mne-python | 52f064415e7c9fa8fe243d22108dcdf3d86505b9 | [
"BSD-3-Clause"
] | 3 | 2019-01-28T13:48:00.000Z | 2019-07-10T16:02:11.000Z | """EGI module for conversion to FIF."""
# Author: Denis A. Engemann <denis.engemann@gmail.com>
from .egi import read_raw_egi
| 21.166667 | 54 | 0.732283 |
d437d3eab6dd2df32e315d45b98d335ef236052c | 5,521 | py | Python | src/fine_tune/fine_tune.py | KennethEnevoldsen/snp-compression | 0cccecd25bc301530b814e6e10958d5a8ea836e7 | [
"BSD-3-Clause"
] | null | null | null | src/fine_tune/fine_tune.py | KennethEnevoldsen/snp-compression | 0cccecd25bc301530b814e6e10958d5a8ea836e7 | [
"BSD-3-Clause"
] | null | null | null | src/fine_tune/fine_tune.py | KennethEnevoldsen/snp-compression | 0cccecd25bc301530b814e6e10958d5a8ea836e7 | [
"BSD-3-Clause"
] | null | null | null | """
Fine-tune pre-trained networks on new data samples
"""
import os
import sys
from pathlib import Path
import xarray as xr
import pandas as pd
import numpy as np
import dask.array as da
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.loggers import WandbLogger
import wandb
sys.path.append(".")
sys.path.append("../../.")
from src.apply.validate import load_model
from src.data.dataloaders import load_dataset, DaskIterableDataset
from src.models.classifier import OneHotClassifier
from src.fine_tune.baselines import phenotypes, pheno_path
from src.util import create_argparser, config_yaml_to_dict
def load_encoders():
models = {
6: {"name": "rich-thunder-72"},
2: {"name": "clear-oath-74"},
1: {"name": "ruby-sea-73"},
}
for chrom in models:
print("Loading Model")
mdl = load_model(models[chrom]["name"], config={"chromosome": chrom})
# mdl.to(device=torch.device("cuda"))
models[chrom]["model"] = mdl.model.encoder
return models
def load_geno():
path = "/home/kce/NLPPred/snp-compression/data/interim/genotype.zarr"
zds = xr.open_zarr(path)
geno = zds.genotype
return geno
def load_dask_geno(chrom=[1, 2, 6]):
geno = []
for c in chrom:
ds, val, test = load_dataset(c, p_val=0, p_test=0) # no test val
geno.append(ds.X)
return da.concatenate(geno, axis=1)
def load_pheno(path: str, geno, dask_geno, split="train"):
"""
dask geno is much more efficient than geno, but geno has all the metadata attached.
"""
path = Path(path).with_suffix("." + split)
df = pd.read_csv(path, sep=" ", header=None)
assert sum(df[0] == df[1]) == len(df[0]) # FID == IID
df.columns = ["FID", "IID", "PHENO"]
df["IID"] = df["IID"].astype(int)
overlapping_ids = geno.iid.astype(int).isin(df["IID"]).compute()
pheno_mapping = {iid: pheno for iid, pheno in zip(df["IID"], df["PHENO"])}
out = geno[overlapping_ids]
X = dask_geno[overlapping_ids]
y = np.array(
[pheno_mapping[i] for i in out.coords["iid"].astype(int).compute().data]
)
return X, y, out
def create_data_loaders(phenotype, chrom=[1, 2, 6]):
geno = load_geno()
dask_geno = load_dask_geno()
X, y, meta = load_pheno(pheno_path / phenotype, geno, dask_geno, split="train")
train = DaskIterableDataset(
X[:-20_000], y[:-20_000]
) # TODO: fix this to a random mask
val = DaskIterableDataset(X[-20_000:], y[-20_000:])
X_test, y_test, meta_test = load_pheno(
pheno_path / phenotype, geno, dask_geno, split="test"
)
test = DaskIterableDataset(X_test, y_test)
metadata = {c: (meta.chrom == str(c)).sum().compute() for c in chrom}
return train, val, test, metadata
def create_model(metadata, train, val, config):
i = 0
chrom_to_snp_indexes = {}
for chrom, value in metadata.items():
chrom_to_snp_indexes[chrom] = i, i + value
i += value
clf = OneHotClassifier(
encoders=load_encoders(),
chrom_to_snp_indexes=chrom_to_snp_indexes,
learning_rate=config.learning_rate,
optimizer=config.optimizer,
train_loader=train,
val_loader=val,
)
return clf
def create_trainer(config) -> Trainer:
wandb_logger = WandbLogger()
callbacks = [ModelCheckpoint(monitor="val_loss", mode="min")]
if config.patience:
early_stopping = EarlyStopping("val_loss", patience=config.patience)
if callbacks is None:
callbacks = []
callbacks.append(early_stopping)
trainer = Trainer(
logger=wandb_logger,
log_every_n_steps=config.log_step,
val_check_interval=config.val_check_interval,
callbacks=callbacks,
gpus=config.gpus,
profiler=config.profiler,
max_epochs=config.max_epochs,
default_root_dir=config.default_root_dir,
weights_save_path=os.path.join(config.default_root_dir, config.run_name),
precision=config.precision,
auto_lr_find=config.auto_lr_find,
check_val_every_n_epoch=config.check_val_every_n_epoch,
)
return trainer
def main():
# Create config
yml_path = Path(__file__).parent / ".." / "configs" / "default_clf_config.yaml"
parser = create_argparser(yml_path)
arguments = parser.parse_args()
wandb.init(
config=arguments,
project=f"snp-classifiers-{arguments.phenotype}",
dir=arguments.default_root_dir,
allow_val_change=True,
)
config = wandb.config
config.run_name = wandb.run.name
# if config is specified update arguments according to config.
if config.config:
hyperparameter_config = config_yaml_to_dict(config.config)
config.update(hyperparameter_config, allow_val_change=True)
# Create model, dataset, trainer
train_loader, val_loader, test_loader, metadata = create_data_loaders(
config.phenotype
)
model = create_model(metadata, train_loader, val_loader)
trainer = create_trainer(config)
# Train
if config.auto_lr_find:
lr_finder = trainer.tuner.lr_find(model)
config.update({"learning_rate": lr_finder.suggestion()}, allow_val_change=True)
fig = lr_finder.plot(suggest=True)
wandb.log({"lr_finder.plot": fig})
trainer.fit(model, train_dataloaders=train_loader, val_dataloaders=val_loader)
if __name__ == "__main__":
main()
| 30.843575 | 87 | 0.670712 |
e8f7534a95f4dfcd0fdcf6432713ba180cd9afa2 | 496 | py | Python | ecom/store/migrations/0002_auto_20200904_1603.py | shamsher4499/website | f81d0564f88059e5c55f8a1194913572c61a7247 | [
"MIT"
] | null | null | null | ecom/store/migrations/0002_auto_20200904_1603.py | shamsher4499/website | f81d0564f88059e5c55f8a1194913572c61a7247 | [
"MIT"
] | null | null | null | ecom/store/migrations/0002_auto_20200904_1603.py | shamsher4499/website | f81d0564f88059e5c55f8a1194913572c61a7247 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.5 on 2020-09-04 10:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('store', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='customer',
name='mobile',
),
migrations.AddField(
model_name='customer',
name='mobile_number',
field=models.CharField(max_length=12, null=True),
),
]
| 21.565217 | 61 | 0.568548 |
ddc42284046a4a368378654bea45b9521e80faab | 598 | py | Python | scripts/server.py | 3coins/jupyterlab-plugin-playground | 152dbda0f0285fc7ebe62b4366b66962845e3f8c | [
"BSD-3-Clause"
] | 16 | 2020-01-13T14:38:37.000Z | 2020-10-14T10:49:23.000Z | scripts/server.py | 3coins/jupyterlab-plugin-playground | 152dbda0f0285fc7ebe62b4366b66962845e3f8c | [
"BSD-3-Clause"
] | 16 | 2021-10-12T04:53:17.000Z | 2022-03-24T14:26:54.000Z | scripts/server.py | 3coins/jupyterlab-plugin-playground | 152dbda0f0285fc7ebe62b4366b66962845e3f8c | [
"BSD-3-Clause"
] | 4 | 2021-10-12T04:43:06.000Z | 2022-02-27T01:18:45.000Z | #!/usr/bin/env python3
# encoding: utf-8
"""Use instead of `python3 -m http.server` when you need CORS"""
from http.server import HTTPServer, SimpleHTTPRequestHandler
class CORSRequestHandler(SimpleHTTPRequestHandler):
def end_headers(self):
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header('Access-Control-Allow-Methods', 'GET')
self.send_header('Cache-Control', 'no-store, no-cache, must-revalidate')
return super(CORSRequestHandler, self).end_headers()
httpd = HTTPServer(('localhost', 8003), CORSRequestHandler)
httpd.serve_forever() | 37.375 | 80 | 0.730769 |
1c944ae05929ddf6e8b8a6a177a32dda3945fe48 | 505 | py | Python | final_project_face_recognition/cli.py | dlwoalsgg/final_project_face_recognition | beea03d89755a96ab51f96938ec037783cff5a75 | [
"MIT"
] | null | null | null | final_project_face_recognition/cli.py | dlwoalsgg/final_project_face_recognition | beea03d89755a96ab51f96938ec037783cff5a75 | [
"MIT"
] | null | null | null | final_project_face_recognition/cli.py | dlwoalsgg/final_project_face_recognition | beea03d89755a96ab51f96938ec037783cff5a75 | [
"MIT"
] | null | null | null | """Console script for final_project_face_recognition."""
import argparse
import sys
def main():
"""Console script for final_project_face_recognition."""
parser = argparse.ArgumentParser()
parser.add_argument('_', nargs='*')
args = parser.parse_args()
print("Arguments: " + str(args._))
print("Replace this message by putting your code into "
"final_project_face_recognition.cli.main")
return 0
if __name__ == "__main__":
sys.exit(main()) # pragma: no cover
| 25.25 | 60 | 0.685149 |
02f67060e18b78a9fb790fb994cd9b99d5c341bc | 26 | py | Python | text-classification/bert-sentiment-sst5/bert_sentiment/__init__.py | ripplesaround/ETDI_NL | 4b77444d8ef5dee1f020e4d30e6cfc24fb5945c9 | [
"MIT"
] | 49 | 2019-10-10T16:12:49.000Z | 2022-01-16T07:11:18.000Z | text-classification/bert-sentiment-sst5/bert_sentiment/__init__.py | ripplesaround/ETDI_NL | 4b77444d8ef5dee1f020e4d30e6cfc24fb5945c9 | [
"MIT"
] | 4 | 2019-11-25T09:27:46.000Z | 2020-07-04T02:10:42.000Z | text-classification/bert-sentiment-sst5/bert_sentiment/__init__.py | ripplesaround/ETDI_NL | 4b77444d8ef5dee1f020e4d30e6cfc24fb5945c9 | [
"MIT"
] | 10 | 2019-10-11T05:46:12.000Z | 2021-04-13T06:30:04.000Z | from . import data, train
| 13 | 25 | 0.730769 |
a6f93c67083535fae60fa08b58f1ee1b17abea5b | 21,649 | py | Python | tunobase/core/migrations/0008_auto__chg_field_htmlbanner_rich_content__chg_field_contentmodel_rich_c.py | unomena/tunobase-core | fd24e378c87407131805fa56ade8669fceec8dfa | [
"BSD-3-Clause"
] | null | null | null | tunobase/core/migrations/0008_auto__chg_field_htmlbanner_rich_content__chg_field_contentmodel_rich_c.py | unomena/tunobase-core | fd24e378c87407131805fa56ade8669fceec8dfa | [
"BSD-3-Clause"
] | null | null | null | tunobase/core/migrations/0008_auto__chg_field_htmlbanner_rich_content__chg_field_contentmodel_rich_c.py | unomena/tunobase-core | fd24e378c87407131805fa56ade8669fceec8dfa | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'HTMLBanner.rich_content'
db.alter_column(u'core_htmlbanner', 'rich_content', self.gf('redactor.fields.RedactorTextField')(null=True))
# Changing field 'ContentModel.rich_content'
db.alter_column(u'core_contentmodel', 'rich_content', self.gf('redactor.fields.RedactorTextField')(null=True))
def backwards(self, orm):
# Changing field 'HTMLBanner.rich_content'
#db.alter_column(u'core_htmlbanner', 'rich_content', self.gf('ckeditor.fields.RichTextField')(null=True))
# Changing field 'ContentModel.rich_content'
#db.alter_column(u'core_contentmodel', 'rich_content', self.gf('ckeditor.fields.RichTextField')(null=True))
pass
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'authentication.enduser': {
'Meta': {'object_name': 'EndUser'},
'city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'company': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'crop_from': ('django.db.models.fields.CharField', [], {'default': "'center'", 'max_length': '10', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'effect': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'enduser_related'", 'null': 'True', 'to': u"orm['photologue.PhotoEffect']"}),
'email': ('django.db.models.fields.EmailField', [], {'db_index': 'True', 'max_length': '255', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_admin': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_console_user': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_regular_user': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'job_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'mobile_number': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'phone_number': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'state_province': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'street_address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'web_address': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'zip_postal_code': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'core.contentblock': {
'Meta': {'ordering': "['order', '-publish_at']", 'object_name': 'ContentBlock', '_ormbases': [u'core.ContentModel']},
'alternative_title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'content_block_parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'content_blocks'", 'null': 'True', 'to': u"orm['core.ContentBlockSet']"}),
u'contentmodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['core.ContentModel']", 'unique': 'True', 'primary_key': 'True'})
},
u'core.contentblockset': {
'Meta': {'ordering': "['order', '-publish_at']", 'object_name': 'ContentBlockSet', '_ormbases': [u'core.ContentModel']},
u'contentmodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['core.ContentModel']", 'unique': 'True', 'primary_key': 'True'})
},
u'core.contentmodel': {
'Meta': {'ordering': "['order', '-publish_at']", 'object_name': 'ContentModel'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'contentmodel_created_content'", 'null': 'True', 'to': u"orm['authentication.EndUser']"}),
'crop_from': ('django.db.models.fields.CharField', [], {'default': "'center'", 'max_length': '10', 'blank': 'True'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'effect': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'contentmodel_related'", 'null': 'True', 'to': u"orm['photologue.PhotoEffect']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'image_name': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'leaf_content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']", 'null': 'True'}),
'meta_description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'meta_keywords': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'contentmodel_modified_content'", 'null': 'True', 'to': u"orm['authentication.EndUser']"}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'plain_content': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'publish_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'retract_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'rich_content': ('redactor.fields.RedactorTextField', [], {'null': 'True', 'blank': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'state': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
u'core.defaultimage': {
'Meta': {'object_name': 'DefaultImage'},
'category': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'crop_from': ('django.db.models.fields.CharField', [], {'default': "'center'", 'max_length': '10', 'blank': 'True'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'effect': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'defaultimage_related'", 'null': 'True', 'to': u"orm['photologue.PhotoEffect']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'publish_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'retract_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
u'core.gallery': {
'Meta': {'ordering': "['order', '-publish_at']", 'object_name': 'Gallery', '_ormbases': [u'core.ContentModel']},
u'contentmodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['core.ContentModel']", 'unique': 'True', 'primary_key': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'galleries'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['core.GalleryImage']"})
},
u'core.galleryimage': {
'Meta': {'ordering': "['order', '-publish_at']", 'object_name': 'GalleryImage'},
'crop_from': ('django.db.models.fields.CharField', [], {'default': "'center'", 'max_length': '10', 'blank': 'True'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'effect': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'galleryimage_related'", 'null': 'True', 'to': u"orm['photologue.PhotoEffect']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'image_name': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'publish_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'retract_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
u'core.htmlbanner': {
'Meta': {'ordering': "['order', '-publish_at']", 'object_name': 'HTMLBanner'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'plain_content': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'publish_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'retract_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'rich_content': ('redactor.fields.RedactorTextField', [], {'null': 'True', 'blank': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'core.htmlbannerset': {
'Meta': {'ordering': "['order', '-publish_at']", 'object_name': 'HTMLBannerSet'},
'banners': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'banner_sets'", 'symmetrical': 'False', 'to': u"orm['core.HTMLBanner']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'publish_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'retract_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'state': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
u'core.imagebanner': {
'Meta': {'ordering': "['order', '-publish_at']", 'object_name': 'ImageBanner'},
'crop_from': ('django.db.models.fields.CharField', [], {'default': "'center'", 'max_length': '10', 'blank': 'True'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'effect': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'imagebanner_related'", 'null': 'True', 'to': u"orm['photologue.PhotoEffect']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'image_name': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'publish_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'retract_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
u'core.imagebannerset': {
'Meta': {'ordering': "['order', '-publish_at']", 'object_name': 'ImageBannerSet'},
'banners': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'banner_sets'", 'symmetrical': 'False', 'to': u"orm['core.ImageBanner']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'publish_at': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'retract_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'state': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
u'core.version': {
'Meta': {'object_name': 'Version'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'number': ('django.db.models.fields.PositiveIntegerField', [], {}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'series': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'versions'", 'to': u"orm['core.VersionSeries']"}),
'state': ('django.db.models.fields.PositiveSmallIntegerField', [], {})
},
u'core.versionseries': {
'Meta': {'object_name': 'VersionSeries'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'staged_slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'photologue.photoeffect': {
'Meta': {'object_name': 'PhotoEffect'},
'background_color': ('django.db.models.fields.CharField', [], {'default': "'#FFFFFF'", 'max_length': '7'}),
'brightness': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'color': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'contrast': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'filters': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'reflection_size': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'reflection_strength': ('django.db.models.fields.FloatField', [], {'default': '0.6'}),
'sharpness': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'transpose_method': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'})
},
u'sites.site': {
'Meta': {'ordering': "(u'domain',)", 'object_name': 'Site', 'db_table': "u'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['core']
| 90.204167 | 204 | 0.57259 |
1d854fdb8a3195e78af32c128df8b5f30612e1a1 | 69,670 | py | Python | libtiff/libtiff_ctypes.py | sephalon/pylibtiff | 7dad04e1e8ff67cb06c605ee1e746d48ac6d88eb | [
"BSD-3-Clause"
] | null | null | null | libtiff/libtiff_ctypes.py | sephalon/pylibtiff | 7dad04e1e8ff67cb06c605ee1e746d48ac6d88eb | [
"BSD-3-Clause"
] | null | null | null | libtiff/libtiff_ctypes.py | sephalon/pylibtiff | 7dad04e1e8ff67cb06c605ee1e746d48ac6d88eb | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
"""
Ctypes based wrapper to libtiff library.
See TIFF.__doc__ for usage information.
Homepage: http://pylibtiff.googlecode.com/
"""
__author__ = 'Pearu Peterson'
__date__ = 'April 2009'
__license__ = 'BSD'
__version__ = '0.3-svn'
__all__ = ['libtiff', 'TIFF']
import os
import sys
import numpy as np
from numpy import ctypeslib
import ctypes
import ctypes.util
import struct
import collections
if os.name=='nt':
# assume that the directory of libtiff3.dll is in PATH.
lib = ctypes.util.find_library('libtiff3')
if lib is None:
# try default installation path:
lib = r'C:\Program Files\GnuWin32\bin\libtiff3.dll'
if os.path.isfile (lib):
print 'You should add %r to PATH environment variable and reboot.' % (os.path.dirname (lib))
else:
lib = None
else:
if hasattr(sys, 'frozen') and sys.platform == 'darwin' and os.path.exists('../Frameworks/libtiff.dylib'):
# py2app support, see Issue 8.
lib = '../Frameworks/libtiff.dylib'
else:
lib = ctypes.util.find_library('tiff')
if lib is None:
raise ImportError('Failed to find TIFF library. Make sure that libtiff is installed and its location is listed in PATH|LD_LIBRARY_PATH|..')
libtiff = ctypes.cdll.LoadLibrary(lib)
libtiff.TIFFGetVersion.restype = ctypes.c_char_p
libtiff.TIFFGetVersion.argtypes = []
libtiff_version_str = libtiff.TIFFGetVersion()
i = libtiff_version_str.lower().split().index('version')
assert i!=-1,`libtiff_version_str`
libtiff_version = libtiff_version_str.split()[i+1]
tiff_h_name = 'tiff_h_%s' % (libtiff_version.replace ('.','_'))
try:
exec 'import %s as tiff_h' % (tiff_h_name)
except ImportError:
tiff_h = None
if tiff_h is None:
include_tiff_h = os.path.join(os.path.split(lib)[0], '..', 'include', 'tiff.h')
if not os.path.isfile(include_tiff_h):
# fix me for windows:
include_tiff_h = os.path.join('/usr','include','tiff.h')
if not os.path.isfile(include_tiff_h):
import glob
include_tiff_h = (glob.glob(os.path.join('/usr','include','*linux-gnu','tiff.h')) + [include_tiff_h])[0]
if not os.path.isfile(include_tiff_h):
# Base it off of the python called
include_tiff_h = os.path.realpath(os.path.join(os.path.split(sys.executable)[0], '..', 'include', 'tiff.h'))
if not os.path.isfile(include_tiff_h):
raise ValueError('Failed to find TIFF header file (may be need to run: sudo apt-get install libtiff4-dev)')
# Read TIFFTAG_* constants for the header file:
f = open (include_tiff_h, 'r')
l = []
d = {}
for line in f.readlines():
if not line.startswith('#define'): continue
words = line[7:].lstrip().split()
if len(words)>2:
words[1] = ''.join(words[1:])
del words[2:]
if len (words)!=2: continue
name, value = words
i = value.find('/*')
if i!=-1: value = value[:i]
if value in d:
value = d[value]
else:
try:
value = eval(value)
except:
print `value, line`
raise
d[name] = value
l.append('%s = %s' % (name, value))
f.close()
fn = os.path.join (os.path.dirname (os.path.abspath (__file__)), tiff_h_name+'.py')
print 'Generating %r' % (fn)
f = open(fn, 'w')
f.write ('\n'.join(l) + '\n')
f.close()
else:
d = tiff_h.__dict__
d['TIFFTAG_CZ_LSMINFO'] = 34412
define_to_name_map = dict(Orientation={}, Compression={},
PhotoMetric={}, PlanarConfig={},
SampleFormat={}, FillOrder={},
FaxMode={}, TiffTag = {}
)
name_to_define_map = dict(Orientation={}, Compression={},
PhotoMetric={}, PlanarConfig={},
SampleFormat={}, FillOrder={},
FaxMode={}, TiffTag = {}
)
for name, value in d.items():
if name.startswith ('_'): continue
exec '%s = %s' % (name, value)
for n in define_to_name_map:
if name.startswith(n.upper()):
define_to_name_map[n][value] = name
name_to_define_map[n][name] = value
# types defined by tiff.h
class c_ttag_t(ctypes.c_uint): pass
class c_tdir_t(ctypes.c_uint16): pass
class c_tsample_t(ctypes.c_uint16): pass
class c_tstrip_t(ctypes.c_uint32): pass
class c_ttile_t(ctypes.c_uint32): pass
class c_tsize_t(ctypes.c_int32): pass
class c_toff_t(ctypes.c_int32): pass
class c_tdata_t(ctypes.c_void_p): pass
class c_thandle_t(ctypes.c_void_p): pass
# types defined for creating custom tags
FIELD_CUSTOM = 65
class TIFFDataType(object):
"""Place holder for the enum in C.
typedef enum {
TIFF_NOTYPE = 0, /* placeholder */
TIFF_BYTE = 1, /* 8-bit unsigned integer */
TIFF_ASCII = 2, /* 8-bit bytes w/ last byte null */
TIFF_SHORT = 3, /* 16-bit unsigned integer */
TIFF_LONG = 4, /* 32-bit unsigned integer */
TIFF_RATIONAL = 5, /* 64-bit unsigned fraction */
TIFF_SBYTE = 6, /* !8-bit signed integer */
TIFF_UNDEFINED = 7, /* !8-bit untyped data */
TIFF_SSHORT = 8, /* !16-bit signed integer */
TIFF_SLONG = 9, /* !32-bit signed integer */
TIFF_SRATIONAL = 10, /* !64-bit signed fraction */
TIFF_FLOAT = 11, /* !32-bit IEEE floating point */
TIFF_DOUBLE = 12, /* !64-bit IEEE floating point */
TIFF_IFD = 13 /* %32-bit unsigned integer (offset) */
} TIFFDataType;
"""
ctype = ctypes.c_int
TIFF_NOTYPE = 0
TIFF_BYTE = 1
TIFF_ASCII = 2
TIFF_SHORT = 3
TIFF_LONG = 4
TIFF_RATIONAL = 5
TIFF_SBYTE = 6
TIFF_UNDEFINED = 7
TIFF_SSHORT = 8
TIFF_SLONG = 9
TIFF_SRATIONAL = 10
TIFF_FLOAT = 11
TIFF_DOUBLE = 12
TIFF_IFD = 13
ttype2ctype = {
TIFFDataType.TIFF_NOTYPE : None,
TIFFDataType.TIFF_BYTE : ctypes.c_ubyte,
TIFFDataType.TIFF_ASCII : ctypes.c_char_p,
TIFFDataType.TIFF_SHORT : ctypes.c_uint16,
TIFFDataType.TIFF_LONG : ctypes.c_uint32,
TIFFDataType.TIFF_RATIONAL : ctypes.c_double, # Should be unsigned
TIFFDataType.TIFF_SBYTE : ctypes.c_byte,
TIFFDataType.TIFF_UNDEFINED : ctypes.c_char,
TIFFDataType.TIFF_SSHORT : ctypes.c_int16,
TIFFDataType.TIFF_SLONG : ctypes.c_int32,
TIFFDataType.TIFF_SRATIONAL : ctypes.c_double,
TIFFDataType.TIFF_FLOAT : ctypes.c_float,
TIFFDataType.TIFF_DOUBLE : ctypes.c_double,
TIFFDataType.TIFF_IFD : ctypes.c_uint32
}
class TIFFFieldInfo(ctypes.Structure):
"""
typedef struct {
ttag_t field_tag; /* field's tag */
short field_readcount; /* read count/TIFF_VARIABLE/TIFF_SPP */
short field_writecount; /* write count/TIFF_VARIABLE */
TIFFDataType field_type; /* type of associated data */
unsigned short field_bit; /* bit in fieldsset bit vector */
unsigned char field_oktochange; /* if true, can change while writing */
unsigned char field_passcount; /* if true, pass dir count on set */
char *field_name; /* ASCII name */
} TIFFFieldInfo;
"""
_fields_ = [
("field_tag", ctypes.c_uint32),
("field_readcount", ctypes.c_short),
("field_writecount", ctypes.c_short),
("field_type", TIFFDataType.ctype),
("field_bit", ctypes.c_ushort),
("field_oktochange", ctypes.c_ubyte),
("field_passcount", ctypes.c_ubyte),
("field_name", ctypes.c_char_p)
]
# Custom Tags
class TIFFExtender(object):
def __init__(self, new_tag_list):
self._ParentExtender = None
self.new_tag_list = new_tag_list
def extender_pyfunc(tiff_struct):
libtiff.TIFFMergeFieldInfo(tiff_struct, self.new_tag_list, len(self.new_tag_list))
if self._ParentExtender:
self._ParentExtender(tiff_struct)
# Just make being a void function more obvious
return
# ctypes callback function prototype (return void, arguments void pointer)
self.EXT_FUNC = ctypes.CFUNCTYPE(None, ctypes.c_void_p)
# ctypes callback function instance
self.EXT_FUNC_INST = self.EXT_FUNC(extender_pyfunc)
libtiff.TIFFSetTagExtender.restype = ctypes.CFUNCTYPE(None, ctypes.c_void_p)
self._ParentExtender = libtiff.TIFFSetTagExtender(self.EXT_FUNC_INST)
def add_tags(tag_list):
tag_list_array = (TIFFFieldInfo * len(tag_list))(*tag_list)
for field_info in tag_list_array:
name = "TIFFTAG_" + str(field_info.field_name).upper()
exec 'global %s; %s = %s' % (name, name, field_info.field_tag)
if field_info.field_writecount > 1 and field_info.field_type != TIFFDataType.TIFF_ASCII:
tifftags[field_info.field_tag] = (ttype2ctype[field_info.field_type]*field_info.field_writecount, lambda d:d.contents[:])
else:
tifftags[field_info.field_tag] = (ttype2ctype[field_info.field_type], lambda d:d.value)
return TIFFExtender(tag_list_array)
tifftags = {
#TODO:
#TIFFTAG_DOTRANGE 2 uint16*
#TIFFTAG_HALFTONEHINTS 2 uint16*
#TIFFTAG_PAGENUMBER 2 uint16*
#TIFFTAG_YCBCRSUBSAMPLING 2 uint16*
#TIFFTAG_EXTRASAMPLES 2 uint16*,uint16** count & types array
#TIFFTAG_FAXFILLFUNC 1 TIFFFaxFillFunc* G3/G4 compression pseudo-tag
#TIFFTAG_JPEGTABLES 2 u_short*,void** count & tables
#TIFFTAG_SUBIFD 2 uint16*,uint32** count & offsets array
#TIFFTAG_TRANSFERFUNCTION 1 or 3 uint16** 1<<BitsPerSample entry arrays
#TIFFTAG_ICCPROFILE 2 uint32*,void** count, profile data
# TIFFTAG: type, conversion
# 3 uint16* for Set, 3 uint16** for Get; size:(1<<BitsPerSample arrays)
TIFFTAG_COLORMAP: (ctypes.c_uint16, lambda d:(d[0].contents[:],d[1].contents[:],d[2].contents[:])),
TIFFTAG_ARTIST: (ctypes.c_char_p, lambda d:d.value),
TIFFTAG_COPYRIGHT: (ctypes.c_char_p, lambda d:d.value),
TIFFTAG_DATETIME: (ctypes.c_char_p, lambda d:d.value),
TIFFTAG_DOCUMENTNAME: (ctypes.c_char_p, lambda d:d.value),
TIFFTAG_HOSTCOMPUTER: (ctypes.c_char_p, lambda d:d.value),
TIFFTAG_IMAGEDESCRIPTION: (ctypes.c_char_p, lambda d:d.value),
TIFFTAG_INKNAMES: (ctypes.c_char_p, lambda d:d.value),
TIFFTAG_MAKE: (ctypes.c_char_p, lambda d:d.value),
TIFFTAG_MODEL: (ctypes.c_char_p, lambda d:d.value),
TIFFTAG_PAGENAME: (ctypes.c_char_p, lambda d:d.value),
TIFFTAG_SOFTWARE: (ctypes.c_char_p, lambda d:d.value),
TIFFTAG_TARGETPRINTER: (ctypes.c_char_p, lambda d:d.value),
TIFFTAG_BADFAXLINES: (ctypes.c_uint32, lambda d:d.value),
TIFFTAG_CONSECUTIVEBADFAXLINES: (ctypes.c_uint32, lambda d:d.value),
TIFFTAG_GROUP3OPTIONS: (ctypes.c_uint32, lambda d:d.value),
TIFFTAG_GROUP4OPTIONS: (ctypes.c_uint32, lambda d:d.value),
TIFFTAG_IMAGEDEPTH: (ctypes.c_uint32, lambda d:d.value),
TIFFTAG_IMAGEWIDTH: (ctypes.c_uint32, lambda d:d.value),
TIFFTAG_IMAGELENGTH: (ctypes.c_uint32, lambda d:d.value),
TIFFTAG_SAMPLESPERPIXEL: (ctypes.c_uint32, lambda d:d.value),
TIFFTAG_ROWSPERSTRIP: (ctypes.c_uint32, lambda d:d.value),
TIFFTAG_SUBFILETYPE: (ctypes.c_uint32, lambda d:d.value),
TIFFTAG_TILEDEPTH: (ctypes.c_uint32, lambda d:d.value),
TIFFTAG_TILELENGTH: (ctypes.c_uint32, lambda d:d.value),
TIFFTAG_TILEWIDTH: (ctypes.c_uint32, lambda d:d.value),
TIFFTAG_DNGVERSION: (ctypes.c_ubyte*4, lambda d:d.contents[:]),
TIFFTAG_BLACKLEVEL: (ctypes.POINTER(ctypes.c_double), lambda d:d.contents),
TIFFTAG_WHITELEVEL: (ctypes.POINTER(ctypes.c_uint32), lambda d:d.contents.value),
TIFFTAG_STRIPBYTECOUNTS: (ctypes.POINTER(ctypes.c_uint32), lambda d:d.contents),
TIFFTAG_STRIPOFFSETS: (ctypes.POINTER(ctypes.c_uint32), lambda d:d.contents),
TIFFTAG_TILEBYTECOUNTS: (ctypes.POINTER(ctypes.c_uint32), lambda d:d.contents),
TIFFTAG_TILEOFFSETS: (ctypes.POINTER(ctypes.c_uint32), lambda d:d.contents),
TIFFTAG_BITSPERSAMPLE: (ctypes.c_uint16, lambda d:d.value),
TIFFTAG_CLEANFAXDATA: (ctypes.c_uint16, lambda d:d.value),
TIFFTAG_COMPRESSION: (ctypes.c_uint16, lambda d:d.value),
TIFFTAG_DATATYPE: (ctypes.c_uint16, lambda d:d.value),
TIFFTAG_FILLORDER: (ctypes.c_uint16, lambda d:d.value),
TIFFTAG_INKSET: (ctypes.c_uint16, lambda d:d.value),
TIFFTAG_MATTEING: (ctypes.c_uint16, lambda d:d.value),
TIFFTAG_MAXSAMPLEVALUE: (ctypes.c_uint16, lambda d:d.value),
TIFFTAG_MINSAMPLEVALUE: (ctypes.c_uint16, lambda d:d.value),
TIFFTAG_ORIENTATION: (ctypes.c_uint16, lambda d:d.value),
TIFFTAG_PHOTOMETRIC: (ctypes.c_uint16, lambda d:d.value),
TIFFTAG_PLANARCONFIG: (ctypes.c_uint16, lambda d:d.value),
TIFFTAG_PREDICTOR: (ctypes.c_uint16, lambda d:d.value),
TIFFTAG_RESOLUTIONUNIT: (ctypes.c_uint16, lambda d:d.value),
TIFFTAG_SAMPLEFORMAT: (ctypes.c_uint16, lambda d:d.value),
TIFFTAG_YCBCRPOSITIONING: (ctypes.c_uint16, lambda d:d.value),
TIFFTAG_JPEGQUALITY: (ctypes.c_int, lambda d:d.value),
TIFFTAG_JPEGCOLORMODE: (ctypes.c_int, lambda d:d.value),
TIFFTAG_JPEGTABLESMODE: (ctypes.c_int, lambda d:d.value),
TIFFTAG_FAXMODE: (ctypes.c_int, lambda d:d.value),
TIFFTAG_SMAXSAMPLEVALUE: (ctypes.c_double, lambda d:d.value),
TIFFTAG_SMINSAMPLEVALUE: (ctypes.c_double, lambda d:d.value),
TIFFTAG_STONITS: (ctypes.c_double, lambda d:d.value),
TIFFTAG_XPOSITION: (ctypes.c_float, lambda d:d.value),
TIFFTAG_XRESOLUTION: (ctypes.c_float, lambda d:d.value),
TIFFTAG_YPOSITION: (ctypes.c_float, lambda d:d.value),
TIFFTAG_YRESOLUTION: (ctypes.c_float, lambda d:d.value),
TIFFTAG_PRIMARYCHROMATICITIES: (ctypes.c_float*6, lambda d:d.contents[:]),
TIFFTAG_REFERENCEBLACKWHITE: (ctypes.c_float*6, lambda d:d.contents[:]),
TIFFTAG_WHITEPOINT: (ctypes.c_float*2, lambda d:d.contents[:]),
TIFFTAG_YCBCRCOEFFICIENTS: (ctypes.c_float*3, lambda d:d.contents[:]),
TIFFTAG_CZ_LSMINFO: (c_toff_t, lambda d:d.value), # offset to CZ_LSMINFO record
TIFFTAG_CFAPATTERN: (ctypes.c_char*4, lambda d:d.contents[:]),
TIFFTAG_CFAREPEATPATTERNDIM: (ctypes.c_uint16*2, lambda d:d.contents[:]),
TIFFTAG_COLORMATRIX1: (ctypes.c_float*9, lambda d:d.contents[:]),
TIFFTAG_ASSHOTNEUTRAL: (ctypes.c_float*3, lambda d:d.contents[:]),
TIFFTAG_ASSHOTWHITEXY: (ctypes.c_float*2, lambda d:d.contents[:])
}
def debug(func):
return func
def new_func(*args, **kws):
print 'Calling',func.__name__
r = func (*args, **kws)
return r
return new_func
class TIFF(ctypes.c_void_p):
""" Holds a pointer to TIFF object.
To open a tiff file for reading, use
tiff = TIFF.open (filename, more='r')
To read an image from a tiff file, use
image = tiff.read_image()
where image will be a numpy array.
To read all images from a tiff file, use
for image in tiff.iter_images():
# do stuff with image
To creat a tiff file containing numpy array as image, use
tiff = TIFF.open(filename, mode='w')
tiff.write_image(array)
tiff.close()
To copy and change tags from a tiff file:
tiff_in = TIFF.open(filename_in)
tiff_in.copy (filename_out, compression=, bitspersample=, sampleformat=,...)
"""
@staticmethod
def get_tag_name(tagvalue):
for kind in define_to_name_map:
tagname = define_to_name_map[kind].get (tagvalue)
if tagname is not None:
return tagname
@staticmethod
def get_tag_define(tagname):
if '_' in tagname:
kind, name = tagname.rsplit('_',1)
return name_to_define_map[kind.title()][tagname.upper()]
for kind in define_to_name_map:
tagvalue = name_to_define_map[kind].get((kind+'_'+tagname).upper ())
if tagvalue is not None:
return tagvalue
@classmethod
def open(cls, filename, mode='r'):
""" Open tiff file as TIFF.
"""
tiff = libtiff.TIFFOpen(filename, mode)
if tiff.value is None:
raise TypeError ('Failed to open file '+`filename`)
return tiff
@staticmethod
def get_numpy_type(bits, sample_format=None):
""" Return numpy dtype corresponding to bits and sample format.
"""
typ = None
if bits % 8 != 0:
raise NotImplementedError("bits = %d" % bits)
if sample_format == SAMPLEFORMAT_IEEEFP:
typ = getattr(np,'float%s' % (bits))
elif sample_format==SAMPLEFORMAT_UINT or sample_format is None:
typ = getattr(np,'uint%s' % (bits))
elif sample_format==SAMPLEFORMAT_INT:
typ = getattr(np,'int%s' % (bits))
elif sample_format==SAMPLEFORMAT_COMPLEXIEEEFP:
typ = getattr(np,'complex%s' % (bits))
else:
raise NotImplementedError (`sample_format`)
return typ
@debug
def read_image(self, verbose=False):
""" Read image from TIFF and return it as an array.
"""
width = self.GetField('ImageWidth')
height = self.GetField('ImageLength')
samples_pp = self.GetField('SamplesPerPixel') # this number includes extra samples
if samples_pp is None: # default is 1
samples_pp = 1
# Note: In the TIFF specification, BitsPerSample and SampleFormat are
# per samples. However, libtiff doesn't support mixed format, so it will
# always return just one value (or raise an error).
bits = self.GetField('BitsPerSample')
sample_format = self.GetField('SampleFormat')
planar_config = self.GetField('PlanarConfig')
if planar_config is None: # default is contig
planar_config = PLANARCONFIG_CONTIG
compression = self.GetField('Compression')
if compression is None: # default is no compression
compression = COMPRESSION_NONE
# TODO: rotate according to orientation
# TODO: might need special support if bits < 8
typ = self.get_numpy_type(bits, sample_format)
if samples_pp == 1:
# only 2 dimensions array
arr = np.empty((height, width), typ)
else:
if planar_config == PLANARCONFIG_CONTIG:
arr = np.empty((height, width, samples_pp), typ)
elif planar_config == PLANARCONFIG_SEPARATE:
arr = np.empty((samples_pp, height, width), typ)
else:
raise IOError("Unexpected PlanarConfig = %d" % planar_config)
size = arr.nbytes
if compression == COMPRESSION_NONE:
ReadStrip = self.ReadRawStrip
else:
ReadStrip = self.ReadEncodedStrip
pos = 0
for strip in range(self.NumberOfStrips()):
elem = ReadStrip(strip, arr.ctypes.data + pos, max(size - pos, 0))
pos += elem
return arr
@staticmethod
def _fix_compression(value):
if isinstance(value, int):
return value
elif value is None:
return COMPRESSION_NONE
elif isinstance(value, str):
return name_to_define_map['Compression']['COMPRESSION_'+value.upper()]
else:
raise NotImplementedError(`value`)
@staticmethod
def _fix_sampleformat(value):
if isinstance(value, int):
return value
elif value is None:
return SAMPLEFORMAT_UINT
elif isinstance(value, str):
return dict(int=SAMPLEFORMAT_INT, uint=SAMPLEFORMAT_UINT,
float=SAMPLEFORMAT_IEEEFP, complex=SAMPLEFORMAT_COMPLEXIEEEFP)[value.lower()]
else:
raise NotImplementedError(`value`)
def write_image(self, arr, compression=None, write_rgb=False):
""" Write array as TIFF image.
Parameters
----------
arr : :numpy:`ndarray`
Specify image data of rank 1 to 3.
compression : {None, 'ccittrle', 'ccittfax3','ccitt_t4','ccittfax4','ccitt_t6','lzw','ojpeg','jpeg','next','ccittrlew','packbits','thunderscan','it8ctpad','it8lw','it8mp','it8bl','pixarfilm','pixarlog','deflate','adobe_deflate','dcs','jbig','sgilog','sgilog24','jp2000'}
write_rgb: bool
Write rgb image if data has 3 dimensions (otherwise, writes a multipage TIFF).
"""
COMPRESSION = self._fix_compression (compression)
arr = np.ascontiguousarray(arr)
sample_format = None
if arr.dtype in np.sctypes['float']:
sample_format = SAMPLEFORMAT_IEEEFP
elif arr.dtype in np.sctypes['uint']+[np.bool]:
sample_format = SAMPLEFORMAT_UINT
elif arr.dtype in np.sctypes['int']:
sample_format = SAMPLEFORMAT_INT
elif arr.dtype in np.sctypes['complex']:
sample_format = SAMPLEFORMAT_COMPLEXIEEEFP
else:
raise NotImplementedError(`arr.dtype`)
shape=arr.shape
bits = arr.itemsize * 8
if compression==COMPRESSION_NONE:
WriteStrip = self.WriteRawStrip
else:
WriteStrip = self.WriteEncodedStrip
if len(shape)==1:
width, = shape
size = width * arr.itemsize
self.SetField(TIFFTAG_IMAGEWIDTH, width)
self.SetField(TIFFTAG_IMAGELENGTH, 1)
self.SetField(TIFFTAG_BITSPERSAMPLE, bits)
self.SetField(TIFFTAG_COMPRESSION, COMPRESSION)
self.SetField(TIFFTAG_PHOTOMETRIC, PHOTOMETRIC_MINISBLACK)
self.SetField(TIFFTAG_ORIENTATION, ORIENTATION_TOPLEFT)
self.SetField(TIFFTAG_PLANARCONFIG, PLANARCONFIG_CONTIG)
if sample_format is not None:
self.SetField(TIFFTAG_SAMPLEFORMAT, sample_format)
WriteStrip(0, arr.ctypes.data, size)
self.WriteDirectory()
elif len(shape)==2:
height, width = shape
size = width * height * arr.itemsize
self.SetField(TIFFTAG_IMAGEWIDTH, width)
self.SetField(TIFFTAG_IMAGELENGTH, height)
self.SetField(TIFFTAG_BITSPERSAMPLE, bits)
self.SetField(TIFFTAG_COMPRESSION, COMPRESSION)
#self.SetField(TIFFTAG_SAMPLESPERPIXEL, 1)
self.SetField(TIFFTAG_PHOTOMETRIC, PHOTOMETRIC_MINISBLACK)
self.SetField(TIFFTAG_ORIENTATION, ORIENTATION_TOPLEFT)
self.SetField(TIFFTAG_PLANARCONFIG, PLANARCONFIG_CONTIG)
if sample_format is not None:
self.SetField(TIFFTAG_SAMPLEFORMAT, sample_format)
WriteStrip(0, arr.ctypes.data, size)
self.WriteDirectory()
elif len(shape)==3:
if write_rgb:
# Guess the planar config, with a preference for separate planes
if shape[2] == 3 or shape[2] == 4:
planar_config = PLANARCONFIG_CONTIG
height, width, depth = shape
size = arr.nbytes
else:
planar_config = PLANARCONFIG_SEPARATE
depth, height, width = shape
size = width * height * arr.itemsize
self.SetField(TIFFTAG_BITSPERSAMPLE, bits)
self.SetField(TIFFTAG_COMPRESSION, COMPRESSION)
self.SetField(TIFFTAG_PHOTOMETRIC, PHOTOMETRIC_RGB)
self.SetField(TIFFTAG_ORIENTATION, ORIENTATION_TOPLEFT)
self.SetField(TIFFTAG_IMAGEWIDTH, width)
self.SetField(TIFFTAG_IMAGELENGTH, height)
self.SetField(TIFFTAG_SAMPLESPERPIXEL, depth)
self.SetField(TIFFTAG_PLANARCONFIG, planar_config)
if sample_format is not None:
self.SetField(TIFFTAG_SAMPLEFORMAT, sample_format)
if depth == 4: # RGBA
self.SetField(TIFFTAG_EXTRASAMPLES, [EXTRASAMPLE_UNASSALPHA],
count=1)
elif depth > 4: # No idea...
self.SetField(TIFFTAG_EXTRASAMPLES,
[EXTRASAMPLE_UNSPECIFIED] * (depth - 3),
count=(depth - 3))
if planar_config == PLANARCONFIG_CONTIG:
WriteStrip(0, arr.ctypes.data, size)
else:
for n in range(depth):
WriteStrip(n, arr[n, :, :].ctypes.data, size)
self.WriteDirectory()
else:
depth, height, width = shape
size = width * height * arr.itemsize
for n in range(depth):
self.SetField(TIFFTAG_IMAGEWIDTH, width)
self.SetField(TIFFTAG_IMAGELENGTH, height)
self.SetField(TIFFTAG_BITSPERSAMPLE, bits)
self.SetField(TIFFTAG_COMPRESSION, COMPRESSION)
#self.SetField(TIFFTAG_SAMPLESPERPIXEL, 1)
self.SetField(TIFFTAG_PHOTOMETRIC, PHOTOMETRIC_MINISBLACK)
self.SetField(TIFFTAG_ORIENTATION, ORIENTATION_TOPLEFT)
self.SetField(TIFFTAG_PLANARCONFIG, PLANARCONFIG_CONTIG)
if sample_format is not None:
self.SetField(TIFFTAG_SAMPLEFORMAT, sample_format)
WriteStrip(0, arr[n].ctypes.data, size)
self.WriteDirectory()
else:
raise NotImplementedError (`shape`)
def write_tiles(self, arr):
num_tcols = self.GetField("TileWidth")
if num_tcols is None:
raise ValueError("TIFFTAG_TILEWIDTH must be set to write tiles")
num_trows = self.GetField("TileLength")
if num_trows is None:
num_trows = 1
num_irows = self.GetField("ImageLength")
if num_irows is None:
num_irows = 1
num_icols = self.GetField("ImageWidth")
if num_icols is None:
raise ValueError("TIFFTAG_TILEWIDTH must be set to write tiles")
num_idepth = self.GetField("ImageDepth")
if num_idepth is None:
num_idepth = 1
if len(arr.shape) == 1 and arr.shape[0] != num_icols:
raise ValueError("Input array %r must have the same shape as the image tags %r" % (arr.shape,(num_icols,)))
if len(arr.shape) == 2 and (arr.shape[0] != num_irows or arr.shape[1] != num_icols):
raise ValueError("Input array %r must have same shape as image tags %r" % (arr.shape,(num_irows,num_icols)))
if len(arr.shape) == 3 and (arr.shape[0] != num_idepth or arr.shape[1] != num_irows or arr.shape[2] != num_icols):
raise ValueError("Input array %r must have same shape as image tags %r" % (arr.shape,(num_idepth,num_irows,num_icols)))
if len(arr.shape) > 3:
raise ValueError("Can not write tiles for more than 3 dimensions")
status = 0
tile_arr = np.zeros((num_trows, num_tcols), dtype=arr.dtype)
# z direction / depth
for z in range(0, num_idepth):
# Rows
for y in range(0, num_irows, num_trows):
# Cols
for x in range(0, num_icols, num_tcols):
# If we are over the edge of the image, use 0 as fill
tile_arr[:] = 0
if len(arr.shape) == 3:
if ((y + num_trows) > num_irows) or ((x + num_tcols) > num_icols):
tile_arr[:num_irows-y,:num_icols-x] = arr[z,y:y+num_trows,x:x+num_tcols]
else:
tile_arr[:,:] = arr[z,y:y+num_trows,x:x+num_tcols]
elif len(arr.shape) == 2:
if ((y + num_trows) > num_irows) or ((x + num_tcols) > num_icols):
tile_arr[:num_irows-y,:num_icols-x] = arr[y:y+num_trows, x:x+num_tcols]
else:
tile_arr[:,:] = arr[y:y+num_trows,x:x+num_tcols]
elif len(arr.shape) == 1:
# This doesn't make much sense for 1D arrays, waste of space if tiles are 2D
if ((x + num_tcols) > num_icols):
tile_arr[0,:num_icols-x] = arr[x:x+num_tcols]
else:
tile_arr[0,:] = arr[x:x+num_tcols]
tile_arr = np.ascontiguousarray(tile_arr)
r = libtiff.TIFFWriteTile(self, tile_arr.ctypes.data, x, y, z, 0)
status = status + r.value
return status
def read_tiles(self, dtype=np.uint8):
num_tcols = self.GetField("TileWidth")
if num_tcols is None:
raise ValueError("TIFFTAG_TILEWIDTH must be set to write tiles")
num_trows = self.GetField("TileLength")
if num_trows is None:
num_trows = 1
num_icols = self.GetField("ImageWidth")
if num_icols is None:
raise ValueError("TIFFTAG_TILEWIDTH must be set to write tiles")
num_irows = self.GetField("ImageLength")
if num_irows is None:
num_irows = 1
num_idepth = self.GetField("ImageDepth")
if num_idepth is None:
num_idepth = 1
if num_idepth == 1 and num_irows == 1:
# 1D
full_image = np.zeros((num_icols,), dtype=dtype)
elif num_idepth == 1:
# 2D
full_image = np.zeros((num_irows,num_icols), dtype=dtype)
else:
# 3D
full_image = np.zeros((num_idepth,num_irows,num_icols), dtype=dtype)
tmp_tile = np.zeros((num_trows,num_tcols), dtype=dtype)
tmp_tile = np.ascontiguousarray(tmp_tile)
for z in range(0, num_idepth):
for y in range(0, num_irows, num_trows):
for x in range(0, num_icols, num_tcols):
r = libtiff.TIFFReadTile(self, tmp_tile.ctypes.data, x, y, z, 0)
if not r:
raise ValueError("Could not read tile x:%d,y:%d,z:%d from file" % (x,y,z))
if ((y + num_trows) > num_irows) or ((x + num_tcols) > num_icols):
# We only need part of the tile because we are on the edge
if num_idepth == 1 and num_irows == 1:
full_image[x:x+num_tcols] = tmp_tile[0,:num_icols-x]
elif num_idepth == 1:
full_image[y:y+num_trows,x:x+num_tcols] = tmp_tile[:num_irows-y,:num_icols-x]
else:
full_image[z,y:y+num_trows,x:x+num_tcols] = tmp_tile[:num_irows-y,:num_icols-x]
else:
if num_idepth == 1 and num_irows == 1:
full_image[x:x+num_tcols] = tmp_tile[0,:]
elif num_idepth == 1:
full_image[y:y+num_trows, x:x+num_tcols] = tmp_tile[:,:]
else:
full_image[z,y:y+num_trows, x:x+num_tcols] = tmp_tile[:,:]
return full_image
def iter_images(self, verbose=False):
""" Iterator of all images in a TIFF file.
"""
yield self.read_image(verbose=verbose)
while not self.LastDirectory():
self.ReadDirectory()
yield self.read_image(verbose=verbose)
self.SetDirectory(0)
def __del__(self):
self.close()
@debug
def FileName(self): return libtiff.TIFFFileName(self)
@debug
def CurrentRow(self): return libtiff.TIFFCurrentRow(self)
@debug
def CurrentStrip(self): return libtiff.TIFFCurrentStrip(self)
@debug
def CurrentTile(self): return libtiff.TIFFCurrentTile(self)
@debug
def CurrentDirectory(self): return libtiff.TIFFCurrentDirectory(self)
@debug
def LastDirectory(self): return libtiff.TIFFLastDirectory(self)
@debug
def ReadDirectory(self): return libtiff.TIFFReadDirectory(self)
@debug
def WriteDirectory(self):
r = libtiff.TIFFWriteDirectory(self)
assert r==1, `r`
@debug
def SetDirectory(self, dirnum): return libtiff.TIFFSetDirectory(self, dirnum)
@debug
def Fileno(self): return libtiff.TIFFFileno(self)
@debug
def GetMode(self): return libtiff.TIFFGetMode(self)
@debug
def IsTiled(self): return libtiff.TIFFIsTiled(self)
@debug
def IsByteSwapped(self): return libtiff.TIFFIsByteSwapped(self)
@debug
def IsUpSampled(self): return libtiff.TIFFIsUpSampled(self)
@debug
def IsMSB2LSB(self): return libtiff.TIFFIsMSB2LSB(self)
@debug
def NumberOfStrips(self): return libtiff.TIFFNumberOfStrips(self).value
#@debug
def ReadRawStrip(self, strip, buf, size):
return libtiff.TIFFReadRawStrip(self, strip, buf, size).value
def ReadEncodedStrip(self, strip, buf, size):
return libtiff.TIFFReadEncodedStrip(self, strip, buf, size).value
def StripSize(self):
return libtiff.TIFFStripSize(self).value
def RawStripSize(self, strip):
return libtiff.TIFFStripSize(self, strip).value
@debug
def WriteRawStrip(self, strip, buf, size):
r = libtiff.TIFFWriteRawStrip(self, strip, buf, size)
assert r.value==size,`r.value, size`
@debug
def WriteEncodedStrip(self, strip, buf, size):
r = libtiff.TIFFWriteEncodedStrip(self, strip, buf, size)
assert r.value==size,`r.value, size`
closed = False
def close(self, libtiff=libtiff):
if not self.closed and self.value is not None:
libtiff.TIFFClose(self)
self.closed = True
return
#def (self): return libtiff.TIFF(self)
#@debug
def GetField(self, tag, ignore_undefined_tag=True, count=None):
""" Return TIFF field value with tag.
tag can be numeric constant TIFFTAG_<tagname> or a
string containing <tagname>.
"""
if tag in ['PixelSizeX', 'PixelSizeY', 'RelativeTime']:
descr = self.GetField('ImageDescription')
if not descr:
return
i = descr.find (tag)
if i==-1:
return
value = eval(descr[i+len (tag):].lstrip().split()[0])
return value
if isinstance(tag, str):
tag = eval('TIFFTAG_' + tag.upper())
t = tifftags.get(tag)
if t is None:
if not ignore_undefined_tag:
print 'Warning: no tag %r defined' % (tag)
return
data_type, convert = t
if tag == TIFFTAG_COLORMAP:
bps = self.GetField("BitsPerSample")
if bps is None:
print "Warning: BitsPerSample is required to get ColorMap, assuming 8 bps..."
bps = 8
num_cmap_elems = 1 << bps
data_type = data_type * num_cmap_elems
pdt = ctypes.POINTER(data_type)
rdata = pdt()
gdata = pdt()
bdata = pdt()
rdata_ptr = ctypes.byref(rdata)
gdata_ptr = ctypes.byref(gdata)
bdata_ptr = ctypes.byref(bdata)
# ignore count, it's not used for colormap
libtiff.TIFFGetField.argtypes = libtiff.TIFFGetField.argtypes[:2] + [ctypes.c_void_p]*3
r = libtiff.TIFFGetField(self, tag, rdata_ptr, gdata_ptr, bdata_ptr)
data = (rdata,gdata,bdata)
else:
if issubclass(data_type, ctypes.Array):
pdt = ctypes.POINTER(data_type)
data = pdt()
else:
data = data_type()
print '-------------------------', data
if count is None:
libtiff.TIFFGetField.argtypes = libtiff.TIFFGetField.argtypes[:2] + [ctypes.c_void_p]
r = libtiff.TIFFGetField(self, tag, ctypes.byref(data))
else:
count = ctypes.c_int(count)
libtiff.TIFFGetField.argtypes = libtiff.TIFFGetField.argtypes[:2] + [ctypes.POINTER(ctypes.c_int), ctypes.c_void_p]
r = libtiff.TIFFGetField(self, tag, ctypes.byref(count), ctypes.byref(data))
if not r: # tag not defined for current directory
if not ignore_undefined_tag:
print 'Warning: tag %r not defined in currect directory' % (tag)
return None
return convert(data)
#@debug
def SetField(self, tag, value, count=None):
""" Set TIFF field value with tag.
tag can be numeric constant TIFFTAG_<tagname> or a
string containing <tagname>.
"""
if isinstance(tag, str):
tag = eval('TIFFTAG_' + tag.upper())
t = tifftags.get(tag)
if t is None:
print 'Warning: no tag %r defined' % (tag)
return
data_type, convert = t
#if data_type == ctypes.c_float:
# data_type = ctypes.c_double
if tag == TIFFTAG_COLORMAP:
# ColorMap passes 3 values each a c_uint16 pointer
try:
r_arr,g_arr,b_arr = value
except (TypeError, ValueError):
print "Error: TIFFTAG_COLORMAP expects 3 uint16* arrays as a list/tuple of lists"
r_arr,g_arr,b_arr = None,None,None
if r_arr is None:
return
bps = self.GetField("BitsPerSample")
if bps is None:
print "Warning: BitsPerSample is required to get ColorMap, assuming 8 bps..."
bps = 8
num_cmap_elems = 1 << bps
data_type = data_type * num_cmap_elems
r_ptr = data_type(*r_arr)
g_ptr = data_type(*g_arr)
b_ptr = data_type(*b_arr)
libtiff.TIFFSetField.argtypes = libtiff.TIFFSetField.argtypes[:2] + [ctypes.POINTER(data_type)]*3
r = libtiff.TIFFSetField(self, tag, r_ptr, g_ptr, b_ptr)
else:
if issubclass(data_type, ctypes.Array):
data = data_type(*value)
elif issubclass(data_type, ctypes._Pointer): # does not include c_char_p
# convert to the base type, ctypes will take care of actually
# sending it by reference
base_type = data_type._type_
if isinstance(value, collections.Iterable):
data = base_type(*value)
else:
data = base_type(value)
else:
data = data_type(value)
# TODO: for most of the tags, count is len(value), so it shouldn't be needed
if count is None:
libtiff.TIFFSetField.argtypes = libtiff.TIFFSetField.argtypes[:2] + [data_type]
r = libtiff.TIFFSetField(self, tag, data)
else:
libtiff.TIFFSetField.argtypes = libtiff.TIFFSetField.argtypes[:2] + [ctypes.c_uint, data_type]
r = libtiff.TIFFSetField(self, tag, count, data)
return r
def info(self):
""" Return a string containing <tag name: field value> map.
"""
l = []
l.append ('FileName: %s' % (self.FileName()))
for tagname in ['Artist', 'CopyRight', 'DateTime', 'DocumentName',
'HostComputer', 'ImageDescription', 'InkNames',
'Make', 'Model', 'PageName', 'Software', 'TargetPrinter',
'BadFaxLines', 'ConsecutiveBadFaxLines',
'Group3Options', 'Group4Options',
'ImageDepth', 'ImageWidth', 'ImageLength',
'RowsPerStrip', 'SubFileType',
'TileDepth', 'TileLength', 'TileWidth',
'StripByteCounts', 'StripOffSets',
'TileByteCounts', 'TileOffSets',
'BitsPerSample', 'CleanFaxData', 'Compression',
'DataType', 'FillOrder', 'InkSet', 'Matteing',
'MaxSampleValue', 'MinSampleValue', 'Orientation',
'PhotoMetric', 'PlanarConfig', 'Predictor',
'ResolutionUnit', 'SampleFormat', 'YCBCRPositioning',
'JPEGQuality', 'JPEGColorMode', 'JPEGTablesMode',
'FaxMode', 'SMaxSampleValue', 'SMinSampleValue',
#'Stonits',
'XPosition', 'YPosition', 'XResolution', 'YResolution',
'PrimaryChromaticities', 'ReferenceBlackWhite',
'WhitePoint', 'YCBCRCoefficients',
'PixelSizeX','PixelSizeY', 'RelativeTime',
'CZ_LSMInfo'
]:
v = self.GetField(tagname)
if v:
if isinstance (v, int):
v = define_to_name_map.get(tagname, {}).get(v, v)
l.append('%s: %s' % (tagname, v))
if tagname=='CZ_LSMInfo':
print CZ_LSMInfo(self)
return '\n'.join(l)
def copy(self, filename, **kws):
""" Copy opened TIFF file to a new file.
Use keyword arguments to redefine tag values.
Parameters
----------
filename : str
Specify the name of file where TIFF file is copied to.
compression : {'none', 'lzw', 'deflate', ...}
Specify compression scheme.
bitspersample : {8,16,32,64,128,256}
Specify bit size of a sample.
sampleformat : {'uint', 'int', 'float', 'complex'}
Specify sample format.
"""
other = TIFF.open(filename, mode='w')
define_rewrite = {}
for name, value in kws.items():
define = TIFF.get_tag_define(name)
assert define is not None
if name=='compression':
value = TIFF._fix_compression(value)
if name=='sampleformat':
value = TIFF._fix_sampleformat(value)
define_rewrite[define] = value
name_define_list = name_to_define_map['TiffTag'].items()
self.SetDirectory(0)
self.ReadDirectory()
while 1:
other.SetDirectory(self.CurrentDirectory())
bits = self.GetField('BitsPerSample')
sample_format = self.GetField('SampleFormat')
assert bits >=8, `bits, sample_format, dtype`
itemsize = bits // 8
dtype = self.get_numpy_type(bits, sample_format)
for name, define in name_define_list:
orig_value = self.GetField(define)
if orig_value is None and define not in define_rewrite:
continue
if name.endswith('OFFSETS') or name.endswith('BYTECOUNTS'):
continue
if define in define_rewrite:
value = define_rewrite[define]
else:
value = orig_value
if value is None:
continue
other.SetField(define, value)
new_bits = other.GetField('BitsPerSample')
new_sample_format = other.GetField('SampleFormat')
new_dtype = other.get_numpy_type(new_bits, new_sample_format)
assert new_bits >=8, `new_bits, new_sample_format, new_dtype`
new_itemsize = new_bits // 8
strip_size = self.StripSize()
new_strip_size = self.StripSize()
buf = np.zeros(strip_size // itemsize, dtype)
for strip in range(self.NumberOfStrips()):
elem = self.ReadEncodedStrip(strip, buf.ctypes.data, strip_size)
if elem>0:
new_buf = buf.astype(new_dtype)
other.WriteEncodedStrip(strip, new_buf.ctypes.data, (elem * new_itemsize)//itemsize)
self.ReadDirectory()
if self.LastDirectory ():
break
other.close ()
class TIFF3D(TIFF):
""" subclass of TIFF for handling import of 3D (multi-directory) files.
like TIFF, but TIFF3D.read_image() will attempt to restore a 3D numpy array
when given a multi-image TIFF file; performing the inverse of
TIFF_instance.write(numpy.zeros((40, 200, 200)))
like so:
arr = TIFF3D_instance.read_image()
arr.shape # gives (40, 200, 200)
if you tried this with a normal TIFF instance, you would get this:
arr = TIFF_instance.read_image()
arr.shape # gives (200, 200)
and you would have to loop over each image by hand with TIFF.iter_images().
"""
@classmethod
def open(cls, filename, mode='r'):
""" just like TIFF.open, except returns a TIFF3D instance.
"""
# monkey-patch the restype:
old_restype = libtiff.TIFFOpen.restype
libtiff.TIFFOpen.restype = TIFF3D
# actually call the library function:
tiff = libtiff.TIFFOpen(filename, mode)
# restore the old restype:
libtiff.TIFFOpen.restype = old_restype
if tiff.value is None:
raise TypeError ('Failed to open file '+`filename`)
return tiff
@debug
def read_image(self, verbose=False, as3d=True):
""" Read image from TIFF and return it as a numpy array.
If as3d is passed True (default), will attempt to read multiple
directories, and restore as slices in a 3D array. ASSUMES that all
images in the tiff file have the same width, height, bits-per-sample,
compression, and so on. If you get a segfault, this is probably the
problem.
"""
if not as3d:
return TIFF.read_image(self, verbose)
# Code is initially copy-paste from TIFF:
width = self.GetField('ImageWidth')
height = self.GetField('ImageLength')
bits = self.GetField('BitsPerSample')
sample_format = self.GetField('SampleFormat')
compression = self.GetField('Compression')
typ = self.get_numpy_type(bits, sample_format)
if typ is None:
if bits==1:
typ = np.uint8
itemsize = 1
elif bits==4:
typ = np.uint32
itemsize = 4
else:
raise NotImplementedError (`bits`)
else:
itemsize = bits/8
# in order to allocate the numpy array, we must count the directories:
# code borrowed from TIFF.iter_images():
depth = 0
while True:
depth += 1
if self.LastDirectory():
break
self.ReadDirectory()
self.SetDirectory(0)
# we proceed assuming all directories have the same properties from above.
layer_size = width * height * itemsize
total_size = layer_size * depth
arr = np.zeros((depth, height, width), typ)
if compression == COMPRESSION_NONE:
ReadStrip = self.ReadRawStrip
else:
ReadStrip = self.ReadEncodedStrip
layer = 0
while True:
pos = 0
elem = None
for strip in range (self.NumberOfStrips()):
if elem is None:
elem = ReadStrip(strip, arr.ctypes.data + layer * layer_size + pos, layer_size)
elif elem:
elem = ReadStrip(strip, arr.ctypes.data + layer * layer_size + pos, min(layer_size - pos, elem))
pos += elem
if self.LastDirectory():
break
self.ReadDirectory()
layer += 1
self.SetDirectory(0)
return arr
class CZ_LSMInfo:
def __init__(self, tiff):
self.tiff = tiff
self.filename = tiff.FileName()
self.offset = tiff.GetField(TIFFTAG_CZ_LSMINFO)
self.extract_info()
def extract_info (self):
if self.offset is None:
return
f = libtiff.TIFFFileno(self.tiff)
fd = os.fdopen(f, 'r')
pos = fd.tell()
self.offset = self.tiff.GetField(TIFFTAG_CZ_LSMINFO)
print os.lseek(f, 0, 1)
print pos
#print libtiff.TIFFSeekProc(self.tiff, 0, 1)
fd.seek(0)
print struct.unpack ('HH', fd.read (4))
print struct.unpack('I',fd.read (4))
print struct.unpack('H',fd.read (2))
fd.seek(self.offset)
d = [('magic_number', 'i4'),
('structure_size', 'i4')]
print pos, np.rec.fromfile(fd, d, 1)
fd.seek(pos)
#print hex (struct.unpack('I', fd.read (4))[0])
#fd.close()
def __str__ (self):
return '%s: %s' % (self.filename, self.offset)
libtiff.TIFFOpen.restype = TIFF
libtiff.TIFFOpen.argtypes = [ctypes.c_char_p, ctypes.c_char_p]
libtiff.TIFFFileName.restype = ctypes.c_char_p
libtiff.TIFFFileName.argtypes = [TIFF]
libtiff.TIFFFileno.restype = ctypes.c_int
libtiff.TIFFFileno.argtypes = [TIFF]
libtiff.TIFFCurrentRow.restype = ctypes.c_uint32
libtiff.TIFFCurrentRow.argtypes = [TIFF]
libtiff.TIFFCurrentStrip.restype = c_tstrip_t
libtiff.TIFFCurrentStrip.argtypes = [TIFF]
libtiff.TIFFCurrentTile.restype = c_ttile_t
libtiff.TIFFCurrentTile.argtypes = [TIFF]
libtiff.TIFFCurrentDirectory.restype = c_tdir_t
libtiff.TIFFCurrentDirectory.argtypes = [TIFF]
libtiff.TIFFLastDirectory.restype = ctypes.c_int
libtiff.TIFFLastDirectory.argtypes = [TIFF]
libtiff.TIFFReadDirectory.restype = ctypes.c_int
libtiff.TIFFReadDirectory.argtypes = [TIFF]
libtiff.TIFFWriteDirectory.restype = ctypes.c_int
libtiff.TIFFWriteDirectory.argtypes = [TIFF]
libtiff.TIFFSetDirectory.restype = ctypes.c_int
libtiff.TIFFSetDirectory.argtypes = [TIFF, c_tdir_t]
libtiff.TIFFFileno.restype = ctypes.c_int
libtiff.TIFFFileno.argtypes = [TIFF]
libtiff.TIFFGetMode.restype = ctypes.c_int
libtiff.TIFFGetMode.argtypes = [TIFF]
libtiff.TIFFIsTiled.restype = ctypes.c_int
libtiff.TIFFIsTiled.argtypes = [TIFF]
libtiff.TIFFIsByteSwapped.restype = ctypes.c_int
libtiff.TIFFIsByteSwapped.argtypes = [TIFF]
libtiff.TIFFIsUpSampled.restype = ctypes.c_int
libtiff.TIFFIsUpSampled.argtypes = [TIFF]
libtiff.TIFFIsMSB2LSB.restype = ctypes.c_int
libtiff.TIFFIsMSB2LSB.argtypes = [TIFF]
libtiff.TIFFGetField.restype = ctypes.c_int
libtiff.TIFFGetField.argtypes = [TIFF, c_ttag_t, ctypes.c_void_p]
libtiff.TIFFSetField.restype = ctypes.c_int
libtiff.TIFFSetField.argtypes = [TIFF, c_ttag_t, ctypes.c_void_p] # last item is reset in TIFF.SetField method
libtiff.TIFFNumberOfStrips.restype = c_tstrip_t
libtiff.TIFFNumberOfStrips.argtypes = [TIFF]
libtiff.TIFFReadRawStrip.restype = c_tsize_t
libtiff.TIFFReadRawStrip.argtypes = [TIFF, c_tstrip_t, c_tdata_t, c_tsize_t]
libtiff.TIFFWriteRawStrip.restype = c_tsize_t
libtiff.TIFFWriteRawStrip.argtypes = [TIFF, c_tstrip_t, c_tdata_t, c_tsize_t]
libtiff.TIFFReadEncodedStrip.restype = c_tsize_t
libtiff.TIFFReadEncodedStrip.argtypes = [TIFF, c_tstrip_t, c_tdata_t, c_tsize_t]
libtiff.TIFFWriteEncodedStrip.restype = c_tsize_t
libtiff.TIFFWriteEncodedStrip.argtypes = [TIFF, c_tstrip_t, c_tdata_t, c_tsize_t]
libtiff.TIFFStripSize.restype = c_tsize_t
libtiff.TIFFStripSize.argtypes = [TIFF]
libtiff.TIFFRawStripSize.restype = c_tsize_t
libtiff.TIFFRawStripSize.argtypes = [TIFF, c_tstrip_t]
# For adding custom tags (must be void pointer otherwise callback seg faults
libtiff.TIFFMergeFieldInfo.restype = ctypes.c_int32
libtiff.TIFFMergeFieldInfo.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_uint32]
# Tile Support
# TODO:
# TIFFTileRowSize64
# TIFFTileSize64
# TIFFVTileSize
# TIFFVTileSize64
libtiff.TIFFTileRowSize.restype = c_tsize_t
libtiff.TIFFTileRowSize.argtypes = [TIFF]
libtiff.TIFFTileSize.restype = c_tsize_t
libtiff.TIFFTileSize.argtypes = [TIFF]
libtiff.TIFFComputeTile.restype = c_ttile_t
libtiff.TIFFComputeTile.argtypes = [TIFF, ctypes.c_uint32, ctypes.c_uint32, ctypes.c_uint32, c_tsample_t]
libtiff.TIFFCheckTile.restype = ctypes.c_int
libtiff.TIFFCheckTile.argtypes = [TIFF, ctypes.c_uint32, ctypes.c_uint32, ctypes.c_uint32, c_tsample_t]
libtiff.TIFFNumberOfTiles.restype = c_ttile_t
libtiff.TIFFNumberOfTiles.argtypes = [TIFF]
libtiff.TIFFReadTile.restype = c_tsize_t
libtiff.TIFFReadTile.argtypes = [TIFF, c_tdata_t, ctypes.c_uint32, ctypes.c_uint32, ctypes.c_uint32, c_tsample_t]
libtiff.TIFFWriteTile.restype = c_tsize_t
libtiff.TIFFWriteTile.argtypes = [TIFF, c_tdata_t, ctypes.c_uint32, ctypes.c_uint32, ctypes.c_uint32, c_tsample_t]
libtiff.TIFFReadEncodedTile.restype = ctypes.c_int
libtiff.TIFFReadEncodedTile.argtypes = [TIFF, ctypes.c_ulong, ctypes.c_char_p, ctypes.c_ulong]
libtiff.TIFFReadRawTile.restype = c_tsize_t
libtiff.TIFFReadRawTile.argtypes = [TIFF, c_ttile_t, c_tdata_t, c_tsize_t]
libtiff.TIFFReadRGBATile.restype = ctypes.c_int
libtiff.TIFFReadRGBATile.argtypes = [TIFF, ctypes.c_uint32, ctypes.c_uint32, ctypes.POINTER(ctypes.c_uint32)]
libtiff.TIFFWriteEncodedTile.restype = c_tsize_t
libtiff.TIFFWriteEncodedTile.argtypes = [TIFF, c_ttile_t, c_tdata_t, c_tsize_t]
libtiff.TIFFWriteRawTile.restype = c_tsize_t
libtiff.TIFFWriteRawTile.argtypes = [TIFF, c_ttile_t, c_tdata_t, c_tsize_t]
libtiff.TIFFDefaultTileSize.restype = None
libtiff.TIFFDefaultTileSize.argtypes = [TIFF, ctypes.c_uint32, ctypes.c_uint32]
libtiff.TIFFClose.restype = None
libtiff.TIFFClose.argtypes = [TIFF]
# Support for TIFF warning and error handlers:
TIFFWarningHandler = ctypes.CFUNCTYPE(None,
ctypes.c_char_p, # Module
ctypes.c_char_p, # Format
ctypes.c_void_p) # va_list
TIFFErrorHandler = ctypes.CFUNCTYPE(None,
ctypes.c_char_p, # Module
ctypes.c_char_p, # Format
ctypes.c_void_p) # va_list
# This has to be at module scope so it is not garbage-collected
_null_warning_handler = TIFFWarningHandler(lambda module, fmt, va_list: None)
_null_error_handler = TIFFErrorHandler(lambda module, fmt, va_list: None)
def suppress_warnings():
libtiff.TIFFSetWarningHandler(_null_warning_handler)
def suppress_errors():
libtiff.TIFFSetErrorHandler(_null_error_handler)
def _test_custom_tags():
def _tag_write():
a = TIFF.open("/tmp/libtiff_test_custom_tags.tif", "w")
a.SetField("ARTIST", "MY NAME")
a.SetField("LibtiffTestByte", 42)
a.SetField("LibtiffTeststr", "FAKE")
a.SetField("LibtiffTestuint16", 42)
a.SetField("LibtiffTestMultiuint32", (1,2,3,4,5,6,7,8,9,10))
a.SetField("XPOSITION", 42.0)
a.SetField("PRIMARYCHROMATICITIES", (1.0, 2, 3, 4, 5, 6))
arr = np.ones((512,512), dtype=np.uint8)
arr[:,:] = 255
a.write_image(arr)
print "Tag Write: SUCCESS"
def _tag_read():
a = TIFF.open("/tmp/libtiff_test_custom_tags.tif", "r")
tmp = a.read_image()
assert tmp.shape==(512,512),"Image read was wrong shape (%r instead of (512,512))" % (tmp.shape,)
tmp = a.GetField("XPOSITION")
assert tmp == 42.0,"XPosition was not read as 42.0"
tmp = a.GetField("ARTIST")
assert tmp=="MY NAME","Artist was not read as 'MY NAME'"
tmp = a.GetField("LibtiffTestByte")
assert tmp==42,"LibtiffTestbyte was not read as 42"
tmp = a.GetField("LibtiffTestuint16")
assert tmp==42,"LibtiffTestuint16 was not read as 42"
tmp = a.GetField("LibtiffTestMultiuint32")
assert tmp==[1,2,3,4,5,6,7,8,9,10],"LibtiffTestMultiuint32 was not read as [1,2,3,4,5,6,7,8,9,10]"
tmp = a.GetField("LibtiffTeststr")
assert tmp=="FAKE","LibtiffTeststr was not read as 'FAKE'"
tmp = a.GetField("PRIMARYCHROMATICITIES")
assert tmp==[1.0,2.0,3.0,4.0,5.0,6.0],"PrimaryChromaticities was not read as [1.0,2.0,3.0,4.0,5.0,6.0]"
print "Tag Read: SUCCESS"
# Define a C structure that says how each tag should be used
test_tags = [
TIFFFieldInfo(40100, 1, 1, TIFFDataType.TIFF_BYTE, FIELD_CUSTOM, True, False, "LibtiffTestByte"),
TIFFFieldInfo(40103, 10, 10, TIFFDataType.TIFF_LONG, FIELD_CUSTOM, True, False, "LibtiffTestMultiuint32"),
TIFFFieldInfo(40102, 1, 1, TIFFDataType.TIFF_SHORT, FIELD_CUSTOM, True, False, "LibtiffTestuint16"),
TIFFFieldInfo(40101, -1, -1, TIFFDataType.TIFF_ASCII, FIELD_CUSTOM, True, False, "LibtiffTeststr")
]
# Add tags to the libtiff library
test_extender = add_tags(test_tags) # Keep pointer to extender object, no gc
_tag_write()
_tag_read()
def _test_tile_write():
a = TIFF.open("/tmp/libtiff_test_tile_write.tiff", "w")
# 1D Arrays (doesn't make much sense to tile)
assert a.SetField("ImageWidth", 3000)==1,"could not set ImageWidth tag" #1D,2D,3D
assert a.SetField("ImageLength", 1)==1,"could not set ImageLength tag" #1D
assert a.SetField("ImageDepth", 1)==1,"could not set ImageDepth tag" #1D,2D
# Must be multiples of 16
assert a.SetField("TileWidth", 512)==1,"could not set TileWidth tag"
assert a.SetField("TileLength", 528)==1,"could not set TileLength tag"
assert a.SetField("BitsPerSample", 8)==1,"could not set BitsPerSample tag"
assert a.SetField("Compression", COMPRESSION_NONE)==1,"could not set Compression tag"
data_array = np.array(range(500)*6).astype(np.uint8)
assert a.write_tiles(data_array)==(512*528)*6,"could not write tile images" #1D
a.WriteDirectory()
print "Tile Write: Wrote array of shape %r" % (data_array.shape,)
# 2D Arrays
assert a.SetField("ImageWidth", 3000)==1,"could not set ImageWidth tag" #1D,2D,3D
assert a.SetField("ImageLength", 2500)==1,"could not set ImageLength tag" #2D,3D
assert a.SetField("ImageDepth", 1)==1,"could not set ImageDepth tag" #1D,2D
# Must be multiples of 16
assert a.SetField("TileWidth", 512)==1,"could not set TileWidth tag"
assert a.SetField("TileLength", 528)==1,"could not set TileLength tag"
assert a.SetField("BitsPerSample", 8)==1,"could not set BitsPerSample tag"
assert a.SetField("Compression", COMPRESSION_NONE)==1,"could not set Compression tag"
data_array = np.tile(range(500), (2500,6)).astype(np.uint8)
assert a.write_tiles(data_array)==(512*528) * 5 * 6,"could not write tile images" #2D
a.WriteDirectory()
print "Tile Write: Wrote array of shape %r" % (data_array.shape,)
# 3D Arrays
assert a.SetField("ImageWidth", 3000)==1,"could not set ImageWidth tag" #1D,2D,3D
assert a.SetField("ImageLength", 2500)==1,"could not set ImageLength tag" #2D,3D
assert a.SetField("ImageDepth", 3)==1,"could not set ImageDepth tag" #3D
assert a.SetField("TileWidth", 512)==1,"could not set TileWidth tag"
assert a.SetField("TileLength", 528)==1,"could not set TileLength tag"
assert a.SetField("BitsPerSample", 8)==1,"could not set BitsPerSample tag"
assert a.SetField("Compression", COMPRESSION_NONE)==1,"could not set Compression tag"
data_array = np.tile(range(500), (3,2500,6)).astype(np.uint8)
assert a.write_tiles(data_array)==(512*528) * 5 * 6 * 3,"could not write tile images" #3D
a.WriteDirectory()
print "Tile Write: Wrote array of shape %r" % (data_array.shape,)
print "Tile Write: SUCCESS"
def _test_tile_read(filename=None):
import sys
if filename is None:
if len(sys.argv) != 2:
print "Run `libtiff.py <filename>` for testing."
return
filename = sys.argv[1]
a = TIFF.open(filename, "r")
# 1D Arrays (doesn't make much sense to tile)
a.SetDirectory(0)
iwidth = tmp = a.GetField("ImageWidth")
assert tmp is not None,"ImageWidth tag must be defined for reading tiles"
ilength = tmp = a.GetField("ImageLength")
assert tmp is not None,"ImageLength tag must be defined for reading tiles"
idepth = tmp = a.GetField("ImageDepth")
assert tmp is not None,"ImageDepth tag must be defined for reading tiles"
tmp = a.GetField("TileWidth")
assert tmp is not None,"TileWidth tag must be defined for reading tiles"
tmp = a.GetField("TileLength")
assert tmp is not None,"TileLength tag must be defined for reading tiles"
tmp = a.GetField("BitsPerSample")
assert tmp is not None,"BitsPerSample tag must be defined for reading tiles"
tmp = a.GetField("Compression")
assert tmp is not None,"Compression tag must be defined for reading tiles"
data_array = a.read_tiles()
print "Tile Read: Read array of shape %r" % (data_array.shape,)
assert data_array.shape==(iwidth,),"tile data read was the wrong shape"
test_array = np.array(range(500)*6).astype(np.uint8).flatten()
assert np.nonzero(data_array.flatten() != test_array)[0].shape[0] == 0,"tile data read was not the same as the expected data"
print "Tile Read: Data is the same as expected from tile write test"
# 2D Arrays (doesn't make much sense to tile)
a.SetDirectory(1)
iwidth = tmp = a.GetField("ImageWidth")
assert tmp is not None,"ImageWidth tag must be defined for reading tiles"
ilength = tmp = a.GetField("ImageLength")
assert tmp is not None,"ImageLength tag must be defined for reading tiles"
idepth = tmp = a.GetField("ImageDepth")
assert tmp is not None,"ImageDepth tag must be defined for reading tiles"
tmp = a.GetField("TileWidth")
assert tmp is not None,"TileWidth tag must be defined for reading tiles"
tmp = a.GetField("TileLength")
assert tmp is not None,"TileLength tag must be defined for reading tiles"
tmp = a.GetField("BitsPerSample")
assert tmp is not None,"BitsPerSample tag must be defined for reading tiles"
tmp = a.GetField("Compression")
assert tmp is not None,"Compression tag must be defined for reading tiles"
data_array = a.read_tiles()
print "Tile Read: Read array of shape %r" % (data_array.shape,)
assert data_array.shape==(ilength,iwidth),"tile data read was the wrong shape"
test_array = np.tile(range(500), (2500,6)).astype(np.uint8).flatten()
assert np.nonzero(data_array.flatten() != test_array)[0].shape[0] == 0,"tile data read was not the same as the expected data"
print "Tile Read: Data is the same as expected from tile write test"
# 3D Arrays (doesn't make much sense to tile)
a.SetDirectory(2)
iwidth = tmp = a.GetField("ImageWidth")
assert tmp is not None,"ImageWidth tag must be defined for reading tiles"
ilength = tmp = a.GetField("ImageLength")
assert tmp is not None,"ImageLength tag must be defined for reading tiles"
idepth = tmp = a.GetField("ImageDepth")
assert tmp is not None,"ImageDepth tag must be defined for reading tiles"
tmp = a.GetField("TileWidth")
assert tmp is not None,"TileWidth tag must be defined for reading tiles"
tmp = a.GetField("TileLength")
assert tmp is not None,"TileLength tag must be defined for reading tiles"
tmp = a.GetField("BitsPerSample")
assert tmp is not None,"BitsPerSample tag must be defined for reading tiles"
tmp = a.GetField("Compression")
assert tmp is not None,"Compression tag must be defined for reading tiles"
data_array = a.read_tiles()
print "Tile Read: Read array of shape %r" % (data_array.shape,)
assert data_array.shape==(idepth,ilength,iwidth),"tile data read was the wrong shape"
test_array = np.tile(range(500), (3,2500,6)).astype(np.uint8).flatten()
assert np.nonzero(data_array.flatten() != test_array)[0].shape[0] == 0,"tile data read was not the same as the expected data"
print "Tile Read: Data is the same as expected from tile write test"
print "Tile Read: SUCCESS"
def _test_tags_write():
tiff = TIFF.open('/tmp/libtiff_tags_write.tiff', mode='w')
tmp = tiff.SetField("Artist", "A Name")
assert tmp==1,"Tag 'Artist' was not written properly"
tmp = tiff.SetField("DocumentName", "")
assert tmp==1,"Tag 'DocumentName' with empty string was not written properly"
tmp = tiff.SetField("PrimaryChromaticities", [1,2,3,4,5,6])
assert tmp==1,"Tag 'PrimaryChromaticities' was not written properly"
tmp = tiff.SetField("BitsPerSample", 8)
assert tmp==1,"Tag 'BitsPerSample' was not written properly"
tmp = tiff.SetField("ColorMap", [[ x*256 for x in range(256) ]]*3)
assert tmp==1,"Tag 'ColorMap' was not written properly"
arr = np.zeros((100,100), np.uint8)
tiff.write_image(arr)
print "Tag Write: SUCCESS"
def _test_tags_read(filename=None):
import sys
if filename is None:
if len(sys.argv) != 2:
filename = '/tmp/libtiff_tags_write.tiff'
if not os.path.isfile (filename):
print 'Run `%s <filename>` for testing.' % (__file__)
return
else:
filename = sys.argv[1]
tiff = TIFF.open(filename)
tmp = tiff.GetField("Artist")
assert tmp=="A Name","Tag 'Artist' did not read the correct value (Got '%s'; Expected 'A Name')" % (tmp,)
tmp = tiff.GetField("DocumentName")
assert tmp=="","Tag 'DocumentName' did not read the correct value (Got '%s'; Expected empty string)" % (tmp,)
tmp = tiff.GetField("PrimaryChromaticities")
assert tmp==[1,2,3,4,5,6],"Tag 'PrimaryChromaticities' did not read the correct value (Got '%r'; Expected '[1,2,3,4,5,6]'" % (tmp,)
tmp = tiff.GetField("BitsPerSample")
assert tmp==8,"Tag 'BitsPerSample' did not read the correct value (Got %s; Expected 8)" % (str(tmp),)
tmp = tiff.GetField("ColorMap")
try:
assert len(tmp) == 3,"Tag 'ColorMap' should be three arrays, found %d" % len(tmp)
assert len(tmp[0])==256,"Tag 'ColorMap' should be three arrays of 256 elements, found %d elements" % len(tmp[0])
assert len(tmp[1])==256,"Tag 'ColorMap' should be three arrays of 256 elements, found %d elements" % len(tmp[1])
assert len(tmp[2])==256,"Tag 'ColorMap' should be three arrays of 256 elements, found %d elements" % len(tmp[2])
except TypeError:
print "Tag 'ColorMap' has the wrong shape of 3 arrays of 256 elements each"
return
print "Tag Read: SUCCESS"
def _test_read(filename=None):
import sys
import time
if filename is None:
if len(sys.argv) != 2:
filename = '/tmp/libtiff_test_write.tiff'
if not os.path.isfile (filename):
print 'Run `libtiff.py <filename>` for testing.'
return
else:
filename = sys.argv[1]
print 'Trying to open', filename, '...',
tiff = TIFF.open(filename)
print 'ok'
print 'Trying to show info ...\n','-'*10
print tiff.info()
print '-'*10,'ok'
print 'Trying show images ...'
t = time.time ()
i = 0
for image in tiff.iter_images(verbose=True):
#print image.min(), image.max(), image.mean ()
i += 1
print '\tok',(time.time ()-t)*1e3,'ms',i,'images'
def _test_write():
tiff = TIFF.open('/tmp/libtiff_test_write.tiff', mode='w')
arr = np.zeros ((5,6), np.uint32)
for i in range(arr.shape[0]):
for j in range (arr.shape[1]):
arr[i,j] = i + 10*j
print arr
tiff.write_image(arr)
del tiff
def _test_write_float():
tiff = TIFF.open('/tmp/libtiff_test_write.tiff', mode='w')
arr = np.zeros ((5,6), np.float64)
for i in range(arr.shape[0]):
for j in range (arr.shape[1]):
arr[i,j] = i + 10*j
print arr
tiff.write_image(arr)
del tiff
tiff = TIFF.open('/tmp/libtiff_test_write.tiff', mode='r')
print tiff.info()
arr2 = tiff.read_image()
print arr2
def _test_copy():
tiff = TIFF.open('/tmp/libtiff_test_compression.tiff', mode='w')
arr = np.zeros ((5,6), np.uint32)
for i in range(arr.shape[0]):
for j in range (arr.shape[1]):
arr[i,j] = 1+i + 10*j
#from scipy.stats import poisson
#arr = poisson.rvs (arr)
tiff.SetField('ImageDescription', 'Hey\nyou')
tiff.write_image(arr, compression='lzw')
del tiff
tiff = TIFF.open('/tmp/libtiff_test_compression.tiff', mode='r')
print tiff.info()
arr2 = tiff.read_image()
assert (arr==arr2).all(),'arrays not equal'
for compression in ['none','lzw','deflate']:
for sampleformat in ['int','uint','float']:
for bitspersample in [256,128,64,32,16,8]:
if sampleformat=='float' and (bitspersample < 32 or bitspersample > 128):
continue
if sampleformat in ['int','uint'] and bitspersample > 64:
continue
#print compression, sampleformat, bitspersample
tiff.copy ('/tmp/libtiff_test_copy2.tiff',
compression=compression,
imagedescription='hoo',
sampleformat=sampleformat,
bitspersample=bitspersample)
tiff2 = TIFF.open('/tmp/libtiff_test_copy2.tiff', mode='r')
arr3 = tiff2.read_image()
assert (arr==arr3).all(),'arrays not equal %r' % ((compression, sampleformat, bitspersample),)
print 'test copy ok'
if __name__=='__main__':
_test_custom_tags()
_test_tile_write()
_test_tile_read()
_test_tags_write()
_test_tags_read()
_test_write_float()
_test_write()
_test_read()
_test_copy()
| 41.519666 | 278 | 0.621114 |
f13866ba88e199cf6d962c4ed0b31db9a7287d2f | 398 | py | Python | _src/Chapter15/code/indices_management.py | paullewallencom/elasticsearch-978-1-7899-5650-4 | f6072e71777b1dcbcbf66f19e1c0cba58d9c6884 | [
"Apache-2.0"
] | 43 | 2019-11-14T15:22:04.000Z | 2022-03-10T06:35:58.000Z | _src/Chapter15/code/indices_management.py | paullewallencom/elasticsearch-978-1-7899-5650-4 | f6072e71777b1dcbcbf66f19e1c0cba58d9c6884 | [
"Apache-2.0"
] | 4 | 2020-05-16T07:47:06.000Z | 2021-12-09T21:28:40.000Z | _src/Chapter15/code/indices_management.py | paullewallencom/elasticsearch-978-1-7899-5650-4 | f6072e71777b1dcbcbf66f19e1c0cba58d9c6884 | [
"Apache-2.0"
] | 32 | 2020-01-10T06:56:46.000Z | 2022-02-10T06:45:39.000Z | import elasticsearch
es = elasticsearch.Elasticsearch()
index_name = "my_index"
if es.indices.exists(index_name):
es.indices.delete(index_name)
es.indices.create(index_name)
es.cluster.health(wait_for_status="yellow")
es.indices.close(index_name)
es.indices.open(index_name)
es.cluster.health(wait_for_status="yellow")
es.indices.forcemerge(index_name)
es.indices.delete(index_name)
| 16.583333 | 43 | 0.791457 |
27093cd1fe1614e462e5f180133fd101e286f8a7 | 450 | py | Python | zeus/brewery/serializers.py | sdivakarrajesh/Zeus | 7a6ddd3d0375f3a2f131f6fa46539faafbd73766 | [
"MIT"
] | null | null | null | zeus/brewery/serializers.py | sdivakarrajesh/Zeus | 7a6ddd3d0375f3a2f131f6fa46539faafbd73766 | [
"MIT"
] | 5 | 2021-03-19T01:10:37.000Z | 2021-09-22T18:47:10.000Z | zeus/brewery/serializers.py | sdivakarrajesh/Zeus | 7a6ddd3d0375f3a2f131f6fa46539faafbd73766 | [
"MIT"
] | null | null | null | from .models import *
from rest_framework import serializers
from django.conf import settings
import os
class DrinkSerializer(serializers.ModelSerializer):
drink_type = serializers.SerializerMethodField()
class Meta:
model = Drink
fields = ("name", "image", "drink_type")
def get_drink_type(self,obj):
drink_types = obj.drink_type.values_list('title', flat=True)
return ", ".join(drink_types)
| 25 | 68 | 0.693333 |
08924e01b8fcf8cbe7e1c20fade26199a7e2d75e | 267 | py | Python | thesaurus/__main__.py | Leoberti/thesaurus | 07de6d9d29e71a565c84cc49a248d4e3ca8f4426 | [
"Unlicense"
] | null | null | null | thesaurus/__main__.py | Leoberti/thesaurus | 07de6d9d29e71a565c84cc49a248d4e3ca8f4426 | [
"Unlicense"
] | null | null | null | thesaurus/__main__.py | Leoberti/thesaurus | 07de6d9d29e71a565c84cc49a248d4e3ca8f4426 | [
"Unlicense"
] | null | null | null | import click
from flask.cli import FlaskGroup
from . import create_app_wsgi
@click.group(cls=FlaskGroup, create_app=create_app_wsgi)
def main():
"""Management script for the thesaurus application."""
if __name__ == "__main__": # pragma: no cover
main()
| 19.071429 | 58 | 0.734082 |
ef74e6764d9ebdf35a72bd62304208aacc79e122 | 2,218 | py | Python | scripts/MSVD/generate_res_feature.py | WingsBrokenAngel/general-professional-learning-model | c4b892033b814b99c36f1f33b36df787f715ff14 | [
"MIT"
] | 39 | 2020-01-03T09:46:53.000Z | 2022-01-26T14:00:31.000Z | scripts/MSVD/generate_res_feature.py | WingsBrokenAngel/general-professional-learning-model | c4b892033b814b99c36f1f33b36df787f715ff14 | [
"MIT"
] | 7 | 2020-02-21T09:21:56.000Z | 2020-10-13T05:59:15.000Z | scripts/MSVD/generate_res_feature.py | WingsBrokenAngel/general-professional-learning-model | c4b892033b814b99c36f1f33b36df787f715ff14 | [
"MIT"
] | 13 | 2020-01-21T07:54:17.000Z | 2021-11-27T10:02:34.000Z | # -*- encoding: utf-8 -*-
"""这是用来产生MSVD特征的脚本"""
import cv2
import numpy as np
import tensorflow as tf
import tensornets as nets
from tensornets.utils import load_img
import math
import scipy.io as sio
import time
import random
import itertools
import os
from pprint import pprint
import pickle
batch_size = 32
seg_size = 32
path1 = '/home/chenhaoran/data/MSVD_frames'
path2 = './dict_youtube_mapping.pkl'
dims = [batch_size, 224, 224, 3]
def generate_feat(inputx, model, out_feats, sess, dmap):
"""
用model自带的imgload读入每个视频的图像
接着用model对这些图像进行处理得到输出特征
最后将这些特征保存为npy格式的文件
"""
# 读入视频列表
vid_names = os.listdir(path1)
res_feats = np.zeros([1970, seg_size, 2048], np.float32)
# 对每个视频均匀读入seg_size张的图片,一个视频为一批进行处理
for idx in range(0, len(vid_names)):
# 读入该视频所有图片名,均匀选择seg_size张图片
input_imgs = np.zeros(shape=dims, dtype=np.float32)
name = vid_names[idx]
vidpath = os.path.join(path1, name)
frm_names = os.listdir(vidpath)
frm_names = [f for f in frm_names if f[-4:]!='.npy']
frm_len = len(frm_names)
delta = frm_len / seg_size
idx_list = [int(i*delta) for i in range(seg_size)]
print(idx, name, frm_len, max(idx_list))
name_list = [os.path.join(vidpath, frm_names[i]) for i in idx_list]
# 用load_img读入列表里的图像, model.preprocess进行预处理
for idx2, img_path in enumerate(name_list):
img = load_img(img_path, target_size=256, crop_size=224)
input_imgs[idx2,:,:,:] = model.preprocess(img)
feats = sess.run(out_feats, {inputx: input_imgs})
res_feats[int(dmap[vid_names[idx]][3:])-1] = feats
np.save('ResNeXt101c64_32frm_feats.npy', res_feats)
if __name__ == "__main__":
# 模型文件
inputx = tf.placeholder(tf.float32, [None, 224, 224, 3])
model = nets.ResNeXt101c64(inputx, is_training=False)
out_feats = model.get_outputs()[-3]
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
sess.run(tf.global_variables_initializer())
sess.run(model.pretrained())
# 产生特征
dmap = pickle.load(open(path2, 'rb'))
generate_feat(inputx, model, out_feats, sess, dmap)
| 29.573333 | 75 | 0.674482 |
9a8660a77b479da3ad672997e5f4777f984705bf | 5,801 | py | Python | ls_improved/lsi_itemloader.py | conao3/ls-Improved | 9917565e8817989159241974942e90731d772379 | [
"MIT"
] | null | null | null | ls_improved/lsi_itemloader.py | conao3/ls-Improved | 9917565e8817989159241974942e90731d772379 | [
"MIT"
] | null | null | null | ls_improved/lsi_itemloader.py | conao3/ls-Improved | 9917565e8817989159241974942e90731d772379 | [
"MIT"
] | null | null | null | import os
from glob import glob
import unicodedata
from .config import Config
class LsiItemLoader():
def __init__(self):
"""
Item Loader
Return
------
children : List[children_d, children_f]
children_d : List[items]
item : Dict
dict.keys(default) = ['path', 'type', 'depth']
dict.keys(optional) = ['description', 'auth', 'children']
"""
# Set Config
self.config = Config()
def _get_children(self, dir, show_all=False, get_only_directories=False, get_only_files=False):
"""
Get children files and directories of the directory.
Parameters
----------
dir : String
Directory Path
show_all : Boolean (Optional)
True -> Get all files and directories.
Returns
-------
status : Int
0 == success
1 == error
children : List [children_d, children_f]
children_d : List[String]
children (directories)
children_f : List[String]
children (files)
"""
# Get children
pathes = glob(dir+'*')
if show_all:
pathes += glob(dir+'.*')
children_d = [p for p in pathes if os.path.isdir(p)] if not get_only_files else []
children_f = [p for p in pathes if os.path.isfile(p)] if not get_only_directories else []
if children_d+children_f == []:
status = 1
return status, [[],[]]
# Prepare output
children = [sorted(children_d), sorted(children_f)]
status = 0
return status, children
def _read_description(self, dir):
"""
Get .description.lsi of dir.
Parameters
----------
dir : String
Directory Path
Returns
-------
status : Int
0 == success
1 == description file not exists
2 == permission denied
3 == error
description : String
description string of directory.
"""
dir = dir+'/' if dir[-1]!='/' else dir
desc_path = dir + self.config.description_name
try:
with open(desc_path, 'r') as f:
description = f.read()
if description == '':
description = None
status = 0
except Exception as e:
e = str(e)
if 'no such file or directory' in e:
description = None
status = 1
elif 'Permission denied' in e:
description = ';w;Dir ' + self.config.get_color('red') + '(Permission denied)'
status = 2
else:
description = None
status = 3
return status, description
def _create_item(self, path):
"""
Create directory or file dictionary.
Parameters
----------
path : String
directory or file path.
Returns
-------
status : Int
0 == path is dir
1 == path is file
2 == error
item : Dict
dict.keys(default) = ['path', 'type', 'depth']
dict.keys(optional) = ['description', 'auth', 'children']
"""
def get_east_asian_width_count(text):
count = 0
for c in text:
if unicodedata.east_asian_width(c) in 'FWA':
count += 2
else:
count += 1
return count
base_path = path.split('/')[-1]
item = {
'path': base_path,
'path_length': get_east_asian_width_count(base_path),
'depth': 0
}
if os.path.isdir(path):
s, description = self._read_description(path)
has_desc = True if description is not None else False
if has_desc:
item['description'] = description
item['type'] = 'Dir'
status = 0
elif os.path.isfile(path):
item['type'] = 'File'
status = 1
else:
item = {}
status = 2
return status, item
def get_items(self, dir, show_all=False, show_only_directories=False, show_only_files=False):
"""
Repeat self._create_item() along all children of 'dir'.
Parameters
----------
dir : String
Directory Path
show_all : Boolean (Optional)
Whether show hidden files or not.
Return
------
status : Int
0 == success
1 == error
top_item : Dict
keys : path, children_d, children_f
path : Strin (path of top directory)
children_d : List[items]
children_f : List[items]
"""
if not os.path.isdir(dir):
print('error: cannot open directory \''+dir+'\': No such directory.')
exit()
if not os.access(dir, os.R_OK):
print('error: cannot open directory \''+dir+'\': Permission denied.')
exit()
status, children = self._get_children(dir, show_all=show_all, get_only_directories=show_only_directories, get_only_files=show_only_files)
children_d, children_f = children
children_d = [self._create_item(child)[1] for child in children_d]
children_f = [self._create_item(child)[1] for child in children_f]
top_item = {
'path': dir,
'children_d': children_d,
'children_f': children_f
}
return status, top_item
| 30.531579 | 145 | 0.495432 |
1c81394291b2921d09f79943fc56a17d437b2291 | 2,849 | py | Python | stubs.min/Autodesk/Revit/DB/__init___parts/TableCellCombinedParameterData.py | denfromufa/ironpython-stubs | 4d2b405eda3ceed186e8adca55dd97c332c6f49d | [
"MIT"
] | 1 | 2017-07-07T11:15:45.000Z | 2017-07-07T11:15:45.000Z | stubs.min/Autodesk/Revit/DB/__init___parts/TableCellCombinedParameterData.py | hdm-dt-fb/ironpython-stubs | 4d2b405eda3ceed186e8adca55dd97c332c6f49d | [
"MIT"
] | null | null | null | stubs.min/Autodesk/Revit/DB/__init___parts/TableCellCombinedParameterData.py | hdm-dt-fb/ironpython-stubs | 4d2b405eda3ceed186e8adca55dd97c332c6f49d | [
"MIT"
] | null | null | null | class TableCellCombinedParameterData(object,IDisposable):
""" The TableCellCombinedParameterData stores the data for combined parameters """
@staticmethod
def Create():
"""
Create() -> TableCellCombinedParameterData
construct a TableCellCombinedParameterData
"""
pass
def Dispose(self):
""" Dispose(self: TableCellCombinedParameterData) """
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: TableCellCombinedParameterData,disposing: bool) """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
CategoryId=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Category id for this parameter
Get: CategoryId(self: TableCellCombinedParameterData) -> ElementId
Set: CategoryId(self: TableCellCombinedParameterData)=value
"""
IsValidObject=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Specifies whether the .NET object represents a valid Revit entity.
Get: IsValidObject(self: TableCellCombinedParameterData) -> bool
"""
ParamId=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The parameter id
Get: ParamId(self: TableCellCombinedParameterData) -> ElementId
Set: ParamId(self: TableCellCombinedParameterData)=value
"""
Prefix=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The prefix for this parameter
Get: Prefix(self: TableCellCombinedParameterData) -> str
Set: Prefix(self: TableCellCombinedParameterData)=value
"""
SampleValue=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The sample/example value for the parameter in text form
Get: SampleValue(self: TableCellCombinedParameterData) -> str
Set: SampleValue(self: TableCellCombinedParameterData)=value
"""
Separator=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The separator for this parameter
Get: Separator(self: TableCellCombinedParameterData) -> str
Set: Separator(self: TableCellCombinedParameterData)=value
"""
Suffix=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The suffix for this parameter
Get: Suffix(self: TableCellCombinedParameterData) -> str
Set: Suffix(self: TableCellCombinedParameterData)=value
"""
| 33.517647 | 215 | 0.725869 |
6a7c07b0513c322c4652ba9b39b2156434a1d823 | 2,098 | py | Python | instabot/web_elements.py | jakerobinson19/instabot | a5a2d23cc3791d79a2cfeb469144eb92c24e402c | [
"MIT"
] | 1 | 2020-03-03T03:54:56.000Z | 2020-03-03T03:54:56.000Z | instabot/web_elements.py | jakerobinson19/instabot | a5a2d23cc3791d79a2cfeb469144eb92c24e402c | [
"MIT"
] | null | null | null | instabot/web_elements.py | jakerobinson19/instabot | a5a2d23cc3791d79a2cfeb469144eb92c24e402c | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
xpath = {}
xpath['get_count'] = {
'followers':'span',
'following':'span'
}
xpath['get_pic'] = {
'all_shown': "//div[@class='v1Nh3 kIKUG _bz0w']",
'recent': '//*[@id="react-root"]/section/main/article/div[2]/div/div[1]/div[1]/a',
'top': '//*[@id="react-root"]/section/main/article/div[1]/div/div/div[1]/div[1]/a'
}
xpath['comments'] = {
'comments_on_pic':"//a[@class='FPmhX notranslate TlrDj']",
'comment':'//*/div/li/div/div[1]/div[2]/h3/a',
'comment_section':"//div[@class='C4VMK']"
}
xpath['like_button'] = {
'like_button':"//span[@class='fr66n']",
'heart_outline': "//span[@class='fr66n']",
'heart_filled': "//span[@class='FY9nT fr66n']"
# old x_paths
# 'heart_outline': "//button/span[@class='glyphsSpriteHeart__outline__24__grey_9 u-__7']",
# 'heart_filled': "//button/span[@class='glyphsSpriteHeart__filled__24__red_5 u-__7']",
}
xpath['status'] = {
'following':"//*/button[contains(text(), 'Follow')]",
'unfollow':"//button[text()='Unfollow']"
}
xpath['notification_wall'] = {
'not_now':"//button[text()='Not Now']",
'turn_on':"//div/h2[text()='Turn on Notifications']"
}
xpath['buttons'] = {
'first_next':'/html/body/div[3]/div[1]/div/div/a',
'next': '/html/body/div[3]/div[1]/div/div/a[2]'
}
xpath['get_following_status'] = {
"follow_button_XP":"//button[text()='Following' or \
text()='Requested' or \
text()='Follow' or \
text()='Follow Back' or \
text()='Unblock']"
}
xpath['post'] = {
'video_identifier': '//video',
'video_tag': "//div[@class'ZyFrc']",
'timestamp':"//time[@class='_1o9PC Nzb55']"
}
selector = {}
selector['login_elem'] = {
'username': 'form input',
'login': 'form input'
}
selector['elements'] = {
'comment_box': "textarea.Ypffh",
'profile_username':'div.e1e1d',
'datetime_stamp':'_1o9PC Nzb55'
}
| 28.739726 | 94 | 0.541945 |
813339bad6c9725a111fc36ce3244245df4b96d7 | 9,796 | py | Python | custom_components/alarmdotcom/config_flow.py | mjmicks/alarmdotcom | 150042457fcace0a5cd0c2bf7cc7dd510d58ee29 | [
"MIT"
] | 1 | 2022-01-23T15:04:47.000Z | 2022-01-23T15:04:47.000Z | custom_components/alarmdotcom/config_flow.py | mjmicks/alarmdotcom | 150042457fcace0a5cd0c2bf7cc7dd510d58ee29 | [
"MIT"
] | null | null | null | custom_components/alarmdotcom/config_flow.py | mjmicks/alarmdotcom | 150042457fcace0a5cd0c2bf7cc7dd510d58ee29 | [
"MIT"
] | null | null | null | """Config flow to configure Alarmdotcom."""
from __future__ import annotations
import logging
from typing import Any
from homeassistant import config_entries
from homeassistant.const import CONF_CODE, CONF_PASSWORD, CONF_USERNAME
from homeassistant.data_entry_flow import FlowResult
import voluptuous as vol
from pyalarmdotcomajax.const import ArmingOption as ADCArmingOption
from pyalarmdotcomajax.errors import AuthenticationFailed
from . import const as adci
from .controller import get_controller
log = logging.getLogger(__name__)
class ADCFlowHandler(config_entries.ConfigFlow, domain=adci.DOMAIN): # type: ignore
"""Handle a Alarmdotcom config flow."""
def __init__(self) -> None:
"""Initialize the Alarmdotcom flow."""
self.adc = None
self.config: dict = {}
self.system_id: str | None = None
self.sensor_data: dict | None = {}
self._config_title: str | None = None
self._existing_entry: config_entries.ConfigEntry | None = None
self._imported_options = None
@staticmethod
def async_get_options_flow(
config_entry: config_entries.ConfigEntry,
) -> ADCOptionsFlowHandler:
"""Tell Home Assistant that this integration supports configuration options."""
return ADCOptionsFlowHandler(config_entry)
async def async_step_user(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Gather configuration data when flow is initiated via the user interface."""
errors = {}
if user_input is not None:
self.config = {
adci.CONF_USERNAME: user_input[adci.CONF_USERNAME],
adci.CONF_PASSWORD: user_input[adci.CONF_PASSWORD],
adci.CONF_2FA_COOKIE: user_input.get(adci.CONF_2FA_COOKIE),
}
try:
api = await get_controller(
self.hass,
self.config[adci.CONF_USERNAME],
self.config[adci.CONF_PASSWORD],
self.config[adci.CONF_2FA_COOKIE],
)
self._existing_entry = await self.async_set_unique_id(
f"{api.provider_name}:{api.user_id}"
)
self._config_title = f"{api.provider_name}: {api.user_email}"
except ConnectionError as err:
log.debug(
"%s: get_controller failed with CannotConnect exception: %s",
__name__,
err,
)
errors["base"] = "invalid_auth"
except AuthenticationFailed as err:
log.debug(
"%s: get_controller failed with InvalidAuth exception: %s",
__name__,
err,
)
errors["base"] = "cannot_connect"
return await self.async_step_final()
creds_schema = vol.Schema(
{
vol.Required(adci.CONF_USERNAME): str,
vol.Required(adci.CONF_PASSWORD): str,
vol.Optional(adci.CONF_2FA_COOKIE): str,
}
)
return self.async_show_form(
step_id="user", data_schema=creds_schema, errors=errors, last_step=True
)
async def async_step_final(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Create configuration entry using entered data."""
if self._existing_entry:
self.hass.config_entries.async_update_entry(
self._existing_entry, data=user_input
)
await self.hass.config_entries.async_reload(self._existing_entry.entry_id)
return self.async_abort(reason="reauth_successful")
# TODO: For non-imported flows, set options to defaults as defined in options flow handler.
# TODO: For imported flows, validate options through schema.
return self.async_create_entry(
title=self._config_title, data=self.config, options=self._imported_options
)
# #
# Import from configuration.yaml
# #
# https://github.com/home-assistant/core/blob/56bda80e0a799404001efe309f52ea1f8a20f479/homeassistant/components/version/config_flow.py
async def async_step_import(self, import_config: dict[str, Any]) -> FlowResult:
"""Import a config entry from configuration.yaml."""
log.debug("%s: Importing configuration data from configuration.yaml.", __name__)
self.config = _convert_imported_configuration(import_config)
self._imported_options = _convert_imported_options(import_config)
self._async_abort_entries_match({**self.config})
return await self.async_step_final()
# #
# Reauthentication Steps
# #
async def async_step_reauth(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Perform reauth upon an API authentication error."""
return await self.async_step_reauth_confirm(user_input)
async def async_step_reauth_confirm(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Dialog that informs the user that reauth is required."""
if user_input is None:
return self.async_show_form(
step_id="reauth_confirm",
data_schema=vol.Schema({}),
)
return await self.async_step_user()
class ADCOptionsFlowHandler(config_entries.OptionsFlow): # type: ignore
"""Handle option configuration via Integrations page."""
def __init__(self, config_entry: config_entries.ConfigEntry) -> None:
"""Initialize options flow."""
self.config_entry = config_entry
self.options = dict(config_entry.options)
async def async_step_init(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Manage the options for the custom component."""
errors: dict = {}
if user_input is not None:
self.options.update(user_input)
return await self._update_options()
return self.async_show_form(
step_id="init",
data_schema=self.schema,
errors=errors,
last_step=True,
)
async def _update_options(self) -> FlowResult:
return self.async_create_entry(title="", data=self.options)
@property
def schema(self) -> vol.Schema:
"""Input schema for integration options."""
return vol.Schema(
{
vol.Optional(
adci.CONF_ARM_CODE,
default=self.options.get(adci.CONF_ARM_CODE, None),
): str,
vol.Required(
adci.CONF_FORCE_BYPASS,
default=self.options.get(
adci.CONF_FORCE_BYPASS, adci.ADCIArmingOption.NEVER.value
),
): vol.In(
[
adci.ADCIArmingOption.NEVER.value,
adci.ADCIArmingOption.ALWAYS.value,
adci.ADCIArmingOption.STAY.value,
adci.ADCIArmingOption.AWAY.value,
]
),
vol.Required(
adci.CONF_NO_DELAY,
default=self.options.get(
adci.CONF_NO_DELAY, adci.ADCIArmingOption.NEVER.value
),
): vol.In(
[
adci.ADCIArmingOption.NEVER.value,
adci.ADCIArmingOption.ALWAYS.value,
adci.ADCIArmingOption.STAY.value,
adci.ADCIArmingOption.AWAY.value,
]
),
vol.Required(
adci.CONF_SILENT_ARM,
default=self.options.get(
adci.CONF_SILENT_ARM, adci.ADCIArmingOption.NEVER.value
),
): vol.In(
[
adci.ADCIArmingOption.NEVER.value,
adci.ADCIArmingOption.ALWAYS.value,
adci.ADCIArmingOption.STAY.value,
adci.ADCIArmingOption.AWAY.value,
]
),
}
)
def _convert_imported_configuration(config: dict[str, Any]) -> Any:
"""Convert a key from the imported configuration."""
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
two_factor_cookie = config.get(adci.LEGACY_CONF_TWO_FACTOR_COOKIE)
data: dict = {}
data[adci.CONF_USERNAME] = username
data[adci.CONF_PASSWORD] = password
if two_factor_cookie:
data[adci.CONF_2FA_COOKIE] = two_factor_cookie
return data
def _convert_imported_options(config: dict[str, Any]) -> Any:
"""Convert a key from the imported configuration."""
code: str | None = config.get(CONF_CODE)
force_bypass: ADCArmingOption | None = config.get(adci.LEGACY_CONF_FORCE_BYPASS)
no_entry_delay: ADCArmingOption | None = config.get(adci.LEGACY_CONF_NO_ENTRY_DELAY)
silent_arming: ADCArmingOption | None = config.get(adci.LEGACY_CONF_SILENT_ARMING)
data: dict = {}
if code:
data[adci.CONF_ARM_CODE] = code
if force_bypass:
data[adci.CONF_FORCE_BYPASS] = adci.ADCIArmingOption.from_config_yaml(
force_bypass
).value
if no_entry_delay:
data[adci.CONF_NO_DELAY] = adci.ADCIArmingOption.from_config_yaml(
no_entry_delay
).value
if silent_arming:
data[adci.CONF_SILENT_ARM] = adci.ADCIArmingOption.from_config_yaml(
silent_arming
).value
return data
| 34.737589 | 138 | 0.595243 |
d4eec8b3bc4d21596016961a11facbed29e7ec29 | 41,424 | py | Python | hydrus/client/db/ClientDBTagSearch.py | sttollgrin/hydrus | dab05074dacdbd1f9574c4afa6b35bf31f1ee36b | [
"WTFPL"
] | 1 | 2021-05-16T16:06:48.000Z | 2021-05-16T16:06:48.000Z | hydrus/client/db/ClientDBTagSearch.py | sttollgrin/hydrus | dab05074dacdbd1f9574c4afa6b35bf31f1ee36b | [
"WTFPL"
] | 5 | 2021-03-31T05:48:17.000Z | 2021-04-30T05:51:34.000Z | hydrus/client/db/ClientDBTagSearch.py | sttollgrin/hydrus | dab05074dacdbd1f9574c4afa6b35bf31f1ee36b | [
"WTFPL"
] | 2 | 2021-03-28T23:17:50.000Z | 2021-05-16T15:46:52.000Z | import itertools
import sqlite3
import typing
from hydrus.core import HydrusConstants as HC
from hydrus.core import HydrusData
from hydrus.core import HydrusDB
from hydrus.core import HydrusDBBase
from hydrus.core import HydrusGlobals as HG
from hydrus.client import ClientConstants as CC
from hydrus.client import ClientSearch
from hydrus.client.db import ClientDBMaster
from hydrus.client.db import ClientDBModule
from hydrus.client.db import ClientDBServices
from hydrus.client.db import ClientDBTagDisplay
from hydrus.client.metadata import ClientTags
# Sqlite can handle -( 2 ** 63 ) -> ( 2 ** 63 ) - 1
MIN_CACHED_INTEGER = - ( 2 ** 63 )
MAX_CACHED_INTEGER = ( 2 ** 63 ) - 1
def CanCacheInteger( num ):
return MIN_CACHED_INTEGER <= num and num <= MAX_CACHED_INTEGER
def ConvertWildcardToSQLiteLikeParameter( wildcard ):
like_param = wildcard.replace( '*', '%' )
return like_param
def GenerateCombinedFilesIntegerSubtagsTableName( tag_service_id ):
name = 'combined_files_integer_subtags_cache'
integer_subtags_table_name = 'external_caches.{}_{}'.format( name, tag_service_id )
return integer_subtags_table_name
def GenerateCombinedFilesSubtagsFTS4TableName( tag_service_id ):
name = 'combined_files_subtags_fts4_cache'
subtags_fts4_table_name = 'external_caches.{}_{}'.format( name, tag_service_id )
return subtags_fts4_table_name
def GenerateCombinedFilesSubtagsSearchableMapTableName( tag_service_id ):
name = 'combined_files_subtags_searchable_map_cache'
subtags_searchable_map_table_name = 'external_caches.{}_{}'.format( name, tag_service_id )
return subtags_searchable_map_table_name
def GenerateCombinedFilesTagsTableName( tag_service_id ):
name = 'combined_files_tags_cache'
tags_table_name = 'external_caches.{}_{}'.format( name, tag_service_id )
return tags_table_name
def GenerateCombinedTagsTagsTableName( file_service_id ):
name = 'combined_tags_tags_cache'
tags_table_name = 'external_caches.{}_{}'.format( name, file_service_id )
return tags_table_name
def GenerateSpecificIntegerSubtagsTableName( file_service_id, tag_service_id ):
name = 'specific_integer_subtags_cache'
suffix = '{}_{}'.format( file_service_id, tag_service_id )
integer_subtags_table_name = 'external_caches.{}_{}'.format( name, suffix )
return integer_subtags_table_name
def GenerateSpecificSubtagsFTS4TableName( file_service_id, tag_service_id ):
name = 'specific_subtags_fts4_cache'
suffix = '{}_{}'.format( file_service_id, tag_service_id )
subtags_fts4_table_name = 'external_caches.{}_{}'.format( name, suffix )
return subtags_fts4_table_name
def GenerateSpecificSubtagsSearchableMapTableName( file_service_id, tag_service_id ):
name = 'specific_subtags_searchable_map_cache'
suffix = '{}_{}'.format( file_service_id, tag_service_id )
subtags_searchable_map_table_name = 'external_caches.{}_{}'.format( name, suffix )
return subtags_searchable_map_table_name
def GenerateSpecificTagsTableName( file_service_id, tag_service_id ):
name = 'specific_tags_cache'
suffix = '{}_{}'.format( file_service_id, tag_service_id )
tags_table_name = 'external_caches.{}_{}'.format( name, suffix )
return tags_table_name
def WildcardHasFTS4SearchableCharacters( wildcard: str ):
# fts4 says it can do alphanumeric or unicode with a value >= 128
for c in wildcard:
if c.isalnum() or ord( c ) >= 128 or c == '*':
return True
return False
class ClientDBTagSearch( ClientDBModule.ClientDBModule ):
CAN_REPOPULATE_ALL_MISSING_DATA = True
def __init__( self, cursor: sqlite3.Cursor, modules_services: ClientDBServices.ClientDBMasterServices, modules_tags: ClientDBMaster.ClientDBMasterTags, modules_tag_display: ClientDBTagDisplay.ClientDBTagDisplay ):
self.modules_services = modules_services
self.modules_tags = modules_tags
self.modules_tag_display = modules_tag_display
ClientDBModule.ClientDBModule.__init__( self, 'client tag search', cursor )
self._missing_tag_search_service_pairs = set()
def _GetServiceIndexGenerationDictSingle( self, file_service_id, tag_service_id ) -> dict:
tags_table_name = self.GetTagsTableName( file_service_id, tag_service_id )
subtags_fts4_table_name = self.GetSubtagsFTS4TableName( file_service_id, tag_service_id )
subtags_searchable_map_table_name = self.GetSubtagsSearchableMapTableName( file_service_id, tag_service_id )
integer_subtags_table_name = self.GetIntegerSubtagsTableName( file_service_id, tag_service_id )
index_generation_dict = {}
index_generation_dict[ tags_table_name ] = [
( [ 'namespace_id', 'subtag_id' ], True, 465 ),
( [ 'subtag_id' ], False, 465 )
]
index_generation_dict[ subtags_searchable_map_table_name ] = [
( [ 'searchable_subtag_id' ], False, 465 )
]
index_generation_dict[ integer_subtags_table_name ] = [
( [ 'integer_subtag' ], False, 465 )
]
return index_generation_dict
def _GetServiceIndexGenerationDict( self, service_id ) -> dict:
tag_service_id = service_id
index_generation_dict = {}
file_service_ids = list( self.modules_services.GetServiceIds( HC.FILE_SERVICES_WITH_SPECIFIC_MAPPING_CACHES ) )
file_service_ids.append( self.modules_services.combined_file_service_id )
for file_service_id in file_service_ids:
single_index_dict = self._GetServiceIndexGenerationDictSingle( file_service_id, tag_service_id )
index_generation_dict.update( single_index_dict )
return index_generation_dict
def _GetServiceTableGenerationDictSingle( self, file_service_id, tag_service_id ):
tags_table_name = self.GetTagsTableName( file_service_id, tag_service_id )
subtags_fts4_table_name = self.GetSubtagsFTS4TableName( file_service_id, tag_service_id )
subtags_searchable_map_table_name = self.GetSubtagsSearchableMapTableName( file_service_id, tag_service_id )
integer_subtags_table_name = self.GetIntegerSubtagsTableName( file_service_id, tag_service_id )
table_dict = {
tags_table_name : ( 'CREATE TABLE IF NOT EXISTS {} ( tag_id INTEGER PRIMARY KEY, namespace_id INTEGER, subtag_id INTEGER );', 465 ),
subtags_fts4_table_name : ( 'CREATE VIRTUAL TABLE IF NOT EXISTS {} USING fts4( subtag );', 465 ),
subtags_searchable_map_table_name : ( 'CREATE TABLE IF NOT EXISTS {} ( subtag_id INTEGER PRIMARY KEY, searchable_subtag_id INTEGER );', 465 ),
integer_subtags_table_name : ( 'CREATE TABLE IF NOT EXISTS {} ( subtag_id INTEGER PRIMARY KEY, integer_subtag INTEGER );', 465 )
}
return table_dict
def _GetServiceTableGenerationDict( self, service_id ) -> dict:
tag_service_id = service_id
table_dict = {}
file_service_ids = list( self.modules_services.GetServiceIds( HC.FILE_SERVICES_WITH_SPECIFIC_MAPPING_CACHES ) )
file_service_ids.append( self.modules_services.combined_file_service_id )
for file_service_id in file_service_ids:
single_table_dict = self._GetServiceTableGenerationDictSingle( file_service_id, tag_service_id )
table_dict.update( single_table_dict )
return table_dict
def _GetServiceIdsWeGenerateDynamicTablesFor( self ):
return self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
def _RepairRepopulateTables( self, table_names, cursor_transaction_wrapper: HydrusDBBase.DBCursorTransactionWrapper ):
file_service_ids = list( self.modules_services.GetServiceIds( HC.FILE_SERVICES_WITH_SPECIFIC_TAG_LOOKUP_CACHES ) )
file_service_ids.append( self.modules_services.combined_file_service_id )
tag_service_ids = list( self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES ) )
for tag_service_id in tag_service_ids:
for file_service_id in file_service_ids:
table_dict_for_this = self._GetServiceTableGenerationDictSingle( file_service_id, tag_service_id )
table_names_for_this = set( table_dict_for_this.keys() )
if not table_names_for_this.isdisjoint( table_names ):
self._missing_tag_search_service_pairs.add( ( file_service_id, tag_service_id ) )
def AddTags( self, file_service_id, tag_service_id, tag_ids ):
if len( tag_ids ) == 0:
return
tags_table_name = self.GetTagsTableName( file_service_id, tag_service_id )
actually_new_tag_ids = set()
for tag_id in tag_ids:
self._Execute( 'INSERT OR IGNORE INTO {} ( tag_id, namespace_id, subtag_id ) SELECT tag_id, namespace_id, subtag_id FROM tags WHERE tag_id = ?;'.format( tags_table_name ), ( tag_id, ) )
if self._GetRowCount() > 0:
actually_new_tag_ids.add( tag_id )
if len( actually_new_tag_ids ) > 0:
if file_service_id == self.modules_services.combined_file_service_id:
self._Execute( 'UPDATE service_info SET info = info + ? WHERE service_id = ? AND info_type = ?;', ( len( actually_new_tag_ids ), tag_service_id, HC.SERVICE_INFO_NUM_TAGS ) )
with self._MakeTemporaryIntegerTable( actually_new_tag_ids, 'tag_id' ) as temp_tag_ids_table_name:
# temp tags to fast tag definitions to subtags
subtag_ids_and_subtags = self._Execute( 'SELECT subtag_id, subtag FROM {} CROSS JOIN {} USING ( tag_id ) CROSS JOIN subtags USING ( subtag_id );'.format( temp_tag_ids_table_name, tags_table_name ) ).fetchall()
subtags_fts4_table_name = self.GetSubtagsFTS4TableName( file_service_id, tag_service_id )
subtags_searchable_map_table_name = self.GetSubtagsSearchableMapTableName( file_service_id, tag_service_id )
integer_subtags_table_name = self.GetIntegerSubtagsTableName( file_service_id, tag_service_id )
for ( subtag_id, subtag ) in subtag_ids_and_subtags:
searchable_subtag = ClientSearch.ConvertSubtagToSearchable( subtag )
if searchable_subtag != subtag:
searchable_subtag_id = self.modules_tags.GetSubtagId( searchable_subtag )
self._Execute( 'INSERT OR IGNORE INTO {} ( subtag_id, searchable_subtag_id ) VALUES ( ?, ? );'.format( subtags_searchable_map_table_name ), ( subtag_id, searchable_subtag_id ) )
#
self._Execute( 'INSERT OR IGNORE INTO {} ( docid, subtag ) VALUES ( ?, ? );'.format( subtags_fts4_table_name ), ( subtag_id, searchable_subtag ) )
if subtag.isdecimal():
try:
integer_subtag = int( subtag )
if CanCacheInteger( integer_subtag ):
self._Execute( 'INSERT OR IGNORE INTO {} ( subtag_id, integer_subtag ) VALUES ( ?, ? );'.format( integer_subtags_table_name ), ( subtag_id, integer_subtag ) )
except ValueError:
pass
def DeleteTags( self, file_service_id, tag_service_id, tag_ids ):
if len( tag_ids ) == 0:
return
if not isinstance( tag_ids, set ):
tag_ids = set( tag_ids )
#
# we always include all chained guys regardless of count
chained_tag_ids = self.modules_tag_display.GetChainsMembers( ClientTags.TAG_DISPLAY_ACTUAL, tag_service_id, tag_ids )
tag_ids = tag_ids.difference( chained_tag_ids )
#
tags_table_name = self.GetTagsTableName( file_service_id, tag_service_id )
subtags_fts4_table_name = self.GetSubtagsFTS4TableName( file_service_id, tag_service_id )
subtags_searchable_map_table_name = self.GetSubtagsSearchableMapTableName( file_service_id, tag_service_id )
integer_subtags_table_name = self.GetIntegerSubtagsTableName( file_service_id, tag_service_id )
with self._MakeTemporaryIntegerTable( tag_ids, 'tag_id' ) as temp_tag_ids_table_name:
# temp tag ids to tag definitions
subtag_ids = self._STS( self._Execute( 'SELECT subtag_id FROM {} CROSS JOIN {} USING ( tag_id );'.format( temp_tag_ids_table_name, tags_table_name ) ) )
#
self._ExecuteMany( 'DELETE FROM {} WHERE tag_id = ?;'.format( tags_table_name ), ( ( tag_id, ) for tag_id in tag_ids ) )
num_deleted = self._GetRowCount()
if num_deleted > 0:
if file_service_id == self.modules_services.combined_file_service_id:
self._Execute( 'UPDATE service_info SET info = info - ? WHERE service_id = ? AND info_type = ?;', ( num_deleted, tag_service_id, HC.SERVICE_INFO_NUM_TAGS ) )
#
# subtags may exist under other namespaces, so exclude those that do
with self._MakeTemporaryIntegerTable( subtag_ids, 'subtag_id' ) as temp_subtag_ids_table_name:
still_existing_subtag_ids = self._STS( self._Execute( 'SELECT subtag_id FROM {} CROSS JOIN {} USING ( subtag_id );'.format( temp_subtag_ids_table_name, tags_table_name ) ) )
deletee_subtag_ids = subtag_ids.difference( still_existing_subtag_ids )
self._ExecuteMany( 'DELETE FROM {} WHERE docid = ?;'.format( subtags_fts4_table_name ), ( ( subtag_id, ) for subtag_id in deletee_subtag_ids ) )
self._ExecuteMany( 'DELETE FROM {} WHERE subtag_id = ?;'.format( subtags_searchable_map_table_name ), ( ( subtag_id, ) for subtag_id in deletee_subtag_ids ) )
self._ExecuteMany( 'DELETE FROM {} WHERE subtag_id = ?;'.format( integer_subtags_table_name ), ( ( subtag_id, ) for subtag_id in deletee_subtag_ids ) )
def Drop( self, file_service_id, tag_service_id ):
tags_table_name = self.GetTagsTableName( file_service_id, tag_service_id )
self._Execute( 'DROP TABLE IF EXISTS {};'.format( tags_table_name ) )
subtags_fts4_table_name = self.GetSubtagsFTS4TableName( file_service_id, tag_service_id )
self._Execute( 'DROP TABLE IF EXISTS {};'.format( subtags_fts4_table_name ) )
subtags_searchable_map_table_name = self.GetSubtagsSearchableMapTableName( file_service_id, tag_service_id )
self._Execute( 'DROP TABLE IF EXISTS {};'.format( subtags_searchable_map_table_name ) )
integer_subtags_table_name = self.GetIntegerSubtagsTableName( file_service_id, tag_service_id )
self._Execute( 'DROP TABLE IF EXISTS {};'.format( integer_subtags_table_name ) )
def FilterExistingTagIds( self, file_service_id, tag_service_id, tag_ids_table_name ):
tags_table_name = self.GetTagsTableName( file_service_id, tag_service_id )
return self._STS( self._Execute( 'SELECT tag_id FROM {} CROSS JOIN {} USING ( tag_id );'.format( tag_ids_table_name, tags_table_name ) ) )
def Generate( self, file_service_id, tag_service_id ):
table_generation_dict = self._GetServiceTableGenerationDictSingle( file_service_id, tag_service_id )
for ( table_name, ( create_query_without_name, version_added ) ) in table_generation_dict.items():
self._Execute( create_query_without_name.format( table_name ) )
index_generation_dict = self._GetServiceIndexGenerationDictSingle( file_service_id, tag_service_id )
for ( table_name, columns, unique, version_added ) in self._FlattenIndexGenerationDict( index_generation_dict ):
self._CreateIndex( table_name, columns, unique = unique )
def GetIntegerSubtagsTableName( self, file_service_id, tag_service_id ):
if file_service_id == self.modules_services.combined_file_service_id:
integer_subtags_table_name = GenerateCombinedFilesIntegerSubtagsTableName( tag_service_id )
else:
if self.modules_services.FileServiceIsCoveredByAllLocalFiles( file_service_id ):
file_service_id = self.modules_services.combined_local_file_service_id
integer_subtags_table_name = GenerateSpecificIntegerSubtagsTableName( file_service_id, tag_service_id )
return integer_subtags_table_name
def GetMissingTagSearchServicePairs( self ):
return self._missing_tag_search_service_pairs
def GetNamespaceIdsFromWildcard( self, namespace_wildcard ):
if namespace_wildcard == '*':
return self._STL( self._Execute( 'SELECT namespace_id FROM namespaces;' ) )
elif '*' in namespace_wildcard:
like_param = ConvertWildcardToSQLiteLikeParameter( namespace_wildcard )
return self._STL( self._Execute( 'SELECT namespace_id FROM namespaces WHERE namespace LIKE ?;', ( like_param, ) ) )
else:
if self.modules_tags.NamespaceExists( namespace_wildcard ):
namespace_id = self.modules_tags.GetNamespaceId( namespace_wildcard )
return [ namespace_id ]
else:
return []
def GetQueryPhraseForTagIds( self, file_service_id, tag_service_id ):
tags_table_name = self.GetTagsTableName( file_service_id, tag_service_id )
return 'SELECT tag_id FROM {}'.format( tags_table_name )
def GetSubtagIdsFromWildcard( self, file_service_id: int, tag_service_id: int, subtag_wildcard, job_key = None ):
if tag_service_id == self.modules_services.combined_tag_service_id:
search_tag_service_ids = self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
else:
search_tag_service_ids = ( tag_service_id, )
result_subtag_ids = set()
for search_tag_service_id in search_tag_service_ids:
if '*' in subtag_wildcard:
subtags_fts4_table_name = self.GetSubtagsFTS4TableName( file_service_id, search_tag_service_id )
wildcard_has_fts4_searchable_characters = WildcardHasFTS4SearchableCharacters( subtag_wildcard )
if subtag_wildcard == '*':
# hellmode, but shouldn't be called normally
cursor = self._Execute( 'SELECT docid FROM {};'.format( subtags_fts4_table_name ) )
elif ClientSearch.IsComplexWildcard( subtag_wildcard ) or not wildcard_has_fts4_searchable_characters:
# FTS4 does not support complex wildcards, so instead we'll search our raw subtags
# however, since we want to search 'searchable' text, we use the 'searchable subtags map' to cross between real and searchable
like_param = ConvertWildcardToSQLiteLikeParameter( subtag_wildcard )
if subtag_wildcard.startswith( '*' ) or not wildcard_has_fts4_searchable_characters:
# this is a SCAN, but there we go
# a potential optimisation here, in future, is to store fts4 of subtags reversed, then for '*amus', we can just search that reverse cache for 'suma*'
# and this would only double the size of the fts4 cache, the largest cache in the whole db! a steal!
# it also would not fix '*amu*', but with some cleverness could speed up '*amus ar*'
query = 'SELECT docid FROM {} WHERE subtag LIKE ?;'.format( subtags_fts4_table_name )
cursor = self._Execute( query, ( like_param, ) )
else:
# we have an optimisation here--rather than searching all subtags for bl*ah, let's search all the bl* subtags for bl*ah!
prefix_fts4_wildcard = subtag_wildcard.split( '*' )[0]
prefix_fts4_wildcard_param = '"{}*"'.format( prefix_fts4_wildcard )
query = 'SELECT docid FROM {} WHERE subtag MATCH ? AND subtag LIKE ?;'.format( subtags_fts4_table_name )
cursor = self._Execute( query, ( prefix_fts4_wildcard_param, like_param ) )
else:
# we want the " " wrapping our search text to keep whitespace words connected and in order
# "samus ar*" should not match "around samus"
# simple 'sam*' style subtag, so we can search fts4 no prob
subtags_fts4_param = '"{}"'.format( subtag_wildcard )
cursor = self._Execute( 'SELECT docid FROM {} WHERE subtag MATCH ?;'.format( subtags_fts4_table_name ), ( subtags_fts4_param, ) )
cancelled_hook = None
if job_key is not None:
cancelled_hook = job_key.IsCancelled
loop_of_subtag_ids = self._STL( HydrusDB.ReadFromCancellableCursor( cursor, 1024, cancelled_hook = cancelled_hook ) )
else:
# old notes from before we had searchable subtag map. I deleted that map once, albeit in an older and less efficient form. *don't delete it again, it has use*
#
# NOTE: doing a subtag = 'blah' lookup on subtags_fts4 tables is ultra slow, lmao!
# attempts to match '/a/' to 'a' with clever FTS4 MATCHing (i.e. a MATCH on a*\b, then an '= a') proved not super successful
# in testing, it was still a bit slow. my guess is it is still iterating through all the nodes for ^a*, the \b just makes it a bit more efficient sometimes
# in tests '^a\b' was about twice as fast as 'a*', so the \b might not even be helping at all
# so, I decided to move back to a lean and upgraded searchable subtag map, and here we are
subtags_searchable_map_table_name = self.GetSubtagsSearchableMapTableName( file_service_id, search_tag_service_id )
searchable_subtag = subtag_wildcard
if self.modules_tags.SubtagExists( searchable_subtag ):
searchable_subtag_id = self.modules_tags.GetSubtagId( searchable_subtag )
loop_of_subtag_ids = self._STS( self._Execute( 'SELECT subtag_id FROM {} WHERE searchable_subtag_id = ?;'.format( subtags_searchable_map_table_name ), ( searchable_subtag_id, ) ) )
loop_of_subtag_ids.add( searchable_subtag_id )
else:
loop_of_subtag_ids = set()
if job_key is not None and job_key.IsCancelled():
return set()
result_subtag_ids.update( loop_of_subtag_ids )
return result_subtag_ids
def GetSubtagIdsFromWildcardIntoTable( self, file_service_id: int, tag_service_id: int, subtag_wildcard, subtag_id_table_name, job_key = None ):
if tag_service_id == self.modules_services.combined_tag_service_id:
search_tag_service_ids = self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
else:
search_tag_service_ids = ( tag_service_id, )
for search_tag_service_id in search_tag_service_ids:
if '*' in subtag_wildcard:
subtags_fts4_table_name = self.GetSubtagsFTS4TableName( file_service_id, search_tag_service_id )
wildcard_has_fts4_searchable_characters = WildcardHasFTS4SearchableCharacters( subtag_wildcard )
if subtag_wildcard == '*':
# hellmode, but shouldn't be called normally
cursor = self._Execute( 'SELECT docid FROM {};'.format( subtags_fts4_table_name ) )
elif ClientSearch.IsComplexWildcard( subtag_wildcard ) or not wildcard_has_fts4_searchable_characters:
# FTS4 does not support complex wildcards, so instead we'll search our raw subtags
# however, since we want to search 'searchable' text, we use the 'searchable subtags map' to cross between real and searchable
like_param = ConvertWildcardToSQLiteLikeParameter( subtag_wildcard )
if subtag_wildcard.startswith( '*' ) or not wildcard_has_fts4_searchable_characters:
# this is a SCAN, but there we go
# a potential optimisation here, in future, is to store fts4 of subtags reversed, then for '*amus', we can just search that reverse cache for 'suma*'
# and this would only double the size of the fts4 cache, the largest cache in the whole db! a steal!
# it also would not fix '*amu*', but with some cleverness could speed up '*amus ar*'
query = 'SELECT docid FROM {} WHERE subtag LIKE ?;'.format( subtags_fts4_table_name )
cursor = self._Execute( query, ( like_param, ) )
else:
# we have an optimisation here--rather than searching all subtags for bl*ah, let's search all the bl* subtags for bl*ah!
prefix_fts4_wildcard = subtag_wildcard.split( '*' )[0]
prefix_fts4_wildcard_param = '"{}*"'.format( prefix_fts4_wildcard )
query = 'SELECT docid FROM {} WHERE subtag MATCH ? AND subtag LIKE ?;'.format( subtags_fts4_table_name )
cursor = self._Execute( query, ( prefix_fts4_wildcard_param, like_param ) )
else:
# we want the " " wrapping our search text to keep whitespace words connected and in order
# "samus ar*" should not match "around samus"
# simple 'sam*' style subtag, so we can search fts4 no prob
subtags_fts4_param = '"{}"'.format( subtag_wildcard )
cursor = self._Execute( 'SELECT docid FROM {} WHERE subtag MATCH ?;'.format( subtags_fts4_table_name ), ( subtags_fts4_param, ) )
cancelled_hook = None
if job_key is not None:
cancelled_hook = job_key.IsCancelled
loop_of_subtag_id_tuples = HydrusDB.ReadFromCancellableCursor( cursor, 1024, cancelled_hook = cancelled_hook )
self._ExecuteMany( 'INSERT OR IGNORE INTO {} ( subtag_id ) VALUES ( ? );'.format( subtag_id_table_name ), loop_of_subtag_id_tuples )
else:
# old notes from before we had searchable subtag map. I deleted that map once, albeit in an older and less efficient form. *don't delete it again, it has use*
#
# NOTE: doing a subtag = 'blah' lookup on subtags_fts4 tables is ultra slow, lmao!
# attempts to match '/a/' to 'a' with clever FTS4 MATCHing (i.e. a MATCH on a*\b, then an '= a') proved not super successful
# in testing, it was still a bit slow. my guess is it is still iterating through all the nodes for ^a*, the \b just makes it a bit more efficient sometimes
# in tests '^a\b' was about twice as fast as 'a*', so the \b might not even be helping at all
# so, I decided to move back to a lean and upgraded searchable subtag map, and here we are
searchable_subtag = subtag_wildcard
if self.modules_tags.SubtagExists( searchable_subtag ):
searchable_subtag_id = self.modules_tags.GetSubtagId( searchable_subtag )
self._Execute( 'INSERT OR IGNORE INTO {} ( subtag_id ) VALUES ( ? );'.format( subtag_id_table_name ), ( searchable_subtag_id, ) )
subtags_searchable_map_table_name = self.GetSubtagsSearchableMapTableName( file_service_id, search_tag_service_id )
self._Execute( 'INSERT OR IGNORE INTO {} ( subtag_id ) SELECT subtag_id FROM {} WHERE searchable_subtag_id = ?;'.format( subtag_id_table_name, subtags_searchable_map_table_name ), ( searchable_subtag_id, ) )
if job_key is not None and job_key.IsCancelled():
self._Execute( 'DELETE FROM {};'.format( subtag_id_table_name ) )
return
def GetSubtagsFTS4TableName( self, file_service_id, tag_service_id ):
if file_service_id == self.modules_services.combined_file_service_id:
subtags_fts4_table_name = GenerateCombinedFilesSubtagsFTS4TableName( tag_service_id )
else:
if self.modules_services.FileServiceIsCoveredByAllLocalFiles( file_service_id ):
file_service_id = self.modules_services.combined_local_file_service_id
subtags_fts4_table_name = GenerateSpecificSubtagsFTS4TableName( file_service_id, tag_service_id )
return subtags_fts4_table_name
def GetSubtagsSearchableMapTableName( self, file_service_id, tag_service_id ):
if file_service_id == self.modules_services.combined_file_service_id:
subtags_searchable_map_table_name = GenerateCombinedFilesSubtagsSearchableMapTableName( tag_service_id )
else:
if self.modules_services.FileServiceIsCoveredByAllLocalFiles( file_service_id ):
file_service_id = self.modules_services.combined_local_file_service_id
subtags_searchable_map_table_name = GenerateSpecificSubtagsSearchableMapTableName( file_service_id, tag_service_id )
return subtags_searchable_map_table_name
def GetTablesAndColumnsThatUseDefinitions( self, content_type: int ) -> typing.List[ typing.Tuple[ str, str ] ]:
tables_and_columns = []
if HC.CONTENT_TYPE_TAG:
tag_service_ids = self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
for tag_service_id in tag_service_ids:
table_dict = {}
file_service_ids = list( self.modules_services.GetServiceIds( HC.FILE_SERVICES_WITH_SPECIFIC_MAPPING_CACHES ) )
file_service_ids.append( self.modules_services.combined_file_service_id )
for file_service_id in file_service_ids:
tags_table_name = self.GetTagsTableName( file_service_id, tag_service_id )
subtags_fts4_table_name = self.GetSubtagsFTS4TableName( file_service_id, tag_service_id )
tables_and_columns.append( ( tags_table_name, 'tag_id' ) )
tables_and_columns.append( ( subtags_fts4_table_name, 'docid' ) )
return tables_and_columns
def GetTagAsNumSubtagIds( self, file_service_id, tag_service_id, operator, num ):
integer_subtags_table_name = self.GetIntegerSubtagsTableName( file_service_id, tag_service_id )
return self._STS( self._Execute( 'SELECT subtag_id FROM {} WHERE integer_subtag {} {};'.format( integer_subtags_table_name, operator, num ) ) )
def GetTagCount( self, file_service_id, tag_service_id ):
tags_table_name = self.GetTagsTableName( file_service_id, tag_service_id )
( count, ) = self._Execute( 'SELECT COUNT( * ) FROM {};'.format( tags_table_name ) ).fetchone()
return count
def GetTagsTableName( self, file_service_id, tag_service_id ):
if file_service_id == self.modules_services.combined_file_service_id:
tags_table_name = GenerateCombinedFilesTagsTableName( tag_service_id )
else:
if self.modules_services.FileServiceIsCoveredByAllLocalFiles( file_service_id ):
file_service_id = self.modules_services.combined_local_file_service_id
tags_table_name = GenerateSpecificTagsTableName( file_service_id, tag_service_id )
return tags_table_name
def HasTag( self, file_service_id, tag_service_id, tag_id ):
tags_table_name = self.GetTagsTableName( file_service_id, tag_service_id )
result = self._Execute( 'SELECT 1 FROM {} WHERE tag_id = ?;'.format( tags_table_name ), ( tag_id, ) ).fetchone()
return result is not None
def RegenerateSearchableSubtagMap( self, file_service_id, tag_service_id, status_hook = None ):
subtags_fts4_table_name = self.GetSubtagsFTS4TableName( file_service_id, tag_service_id )
subtags_searchable_map_table_name = self.GetSubtagsSearchableMapTableName( file_service_id, tag_service_id )
self._Execute( 'DELETE FROM {};'.format( subtags_searchable_map_table_name ) )
query = 'SELECT docid FROM {};'.format( subtags_fts4_table_name )
BLOCK_SIZE = 10000
for ( group_of_subtag_ids, num_done, num_to_do ) in HydrusDB.ReadLargeIdQueryInSeparateChunks( self._c, query, BLOCK_SIZE ):
for subtag_id in group_of_subtag_ids:
result = self._Execute( 'SELECT subtag FROM subtags WHERE subtag_id = ?;', ( subtag_id, ) ).fetchone()
if result is None:
continue
( subtag, ) = result
searchable_subtag = ClientSearch.ConvertSubtagToSearchable( subtag )
if searchable_subtag != subtag:
searchable_subtag_id = self.modules_tags.GetSubtagId( searchable_subtag )
self._Execute( 'INSERT OR IGNORE INTO {} ( subtag_id, searchable_subtag_id ) VALUES ( ?, ? );'.format( subtags_searchable_map_table_name ), ( subtag_id, searchable_subtag_id ) )
message = HydrusData.ConvertValueRangeToPrettyString( num_done, num_to_do )
HG.client_controller.frame_splash_status.SetSubtext( message )
if status_hook is not None:
status_hook( message )
def RepopulateMissingSubtags( self, file_service_id, tag_service_id ):
tags_table_name = self.GetTagsTableName( file_service_id, tag_service_id )
subtags_fts4_table_name = self.GetSubtagsFTS4TableName( file_service_id, tag_service_id )
subtags_searchable_map_table_name = self.GetSubtagsSearchableMapTableName( file_service_id, tag_service_id )
integer_subtags_table_name = self.GetIntegerSubtagsTableName( file_service_id, tag_service_id )
missing_subtag_ids = self._STS( self._Execute( 'SELECT subtag_id FROM {} EXCEPT SELECT docid FROM {};'.format( tags_table_name, subtags_fts4_table_name ) ) )
for subtag_id in missing_subtag_ids:
result = self._Execute( 'SELECT subtag FROM subtags WHERE subtag_id = ?;', ( subtag_id, ) ).fetchone()
if result is None:
continue
( subtag, ) = result
searchable_subtag = ClientSearch.ConvertSubtagToSearchable( subtag )
if searchable_subtag != subtag:
searchable_subtag_id = self.modules_tags.GetSubtagId( searchable_subtag )
self._Execute( 'INSERT OR IGNORE INTO {} ( subtag_id, searchable_subtag_id ) VALUES ( ?, ? );'.format( subtags_searchable_map_table_name ), ( subtag_id, searchable_subtag_id ) )
#
self._Execute( 'INSERT OR IGNORE INTO {} ( docid, subtag ) VALUES ( ?, ? );'.format( subtags_fts4_table_name ), ( subtag_id, searchable_subtag ) )
if subtag.isdecimal():
try:
integer_subtag = int( subtag )
if CanCacheInteger( integer_subtag ):
self._Execute( 'INSERT OR IGNORE INTO {} ( subtag_id, integer_subtag ) VALUES ( ?, ? );'.format( integer_subtags_table_name ), ( subtag_id, integer_subtag ) )
except ValueError:
pass
if len( missing_subtag_ids ) > 0:
HydrusData.ShowText( 'Repopulated {} missing subtags for {}_{}.'.format( HydrusData.ToHumanInt( len( missing_subtag_ids ) ), file_service_id, tag_service_id ) )
| 44.686084 | 227 | 0.584154 |
6724e8ff585887f32ee6b583ec5e235afe9fa7bc | 8,396 | py | Python | plugins/IngestAPI/DairycompDataIngest/dc_event_import.py | OscarTHZhang/docker-airflow | d1e70b3084b1d80cefac6c42111a9a69df42c66a | [
"Apache-2.0"
] | null | null | null | plugins/IngestAPI/DairycompDataIngest/dc_event_import.py | OscarTHZhang/docker-airflow | d1e70b3084b1d80cefac6c42111a9a69df42c66a | [
"Apache-2.0"
] | null | null | null | plugins/IngestAPI/DairycompDataIngest/dc_event_import.py | OscarTHZhang/docker-airflow | d1e70b3084b1d80cefac6c42111a9a69df42c66a | [
"Apache-2.0"
] | null | null | null | import logging
from sqlalchemy.sql import text
from IngestAPI.DairycompDataIngest import dc_file_fix
import ntpath
__author__ = "Steven Wangen, Oscar Zhang"
__version__ = "1.0.2"
__email__ = "srwangen@wisc.edu, tzhang383@wisc.edu"
__status__ = "Development"
logger = logging.getLogger(__name__)
def import_events(filename, filelist, db_engine):
fixed_filename = check_for_fixed_file(filename, filelist)
if fixed_filename is not None:
parse_events(fixed_filename, db_engine)
def check_for_fixed_file(filename, filelist):
# if this one isn't fixed
if ntpath.basename(filename).split('.')[-1] == 'fixed':
return filename
else:
# and there isn't an equivilant fixed file in the list
if filename + ".fixed" not in filelist:
# create a fixed file
return dc_file_fix.fix_event_file(filename)
else:
# it'll get to the fixed on on it's own
return None
def parse_events(event_csv, db_engine):
# import data
temp_event_table_name = 'dairy_comp.temp_import_events'
create_temp_event_table(temp_event_table_name, db_engine)
populate_table_from_csv(temp_event_table_name, event_csv, db_engine)
# stage and transfer data
temp_staging_table_name = 'dairy_comp.temp_staging_events'
create_temp_staging_event_table(temp_staging_table_name, db_engine)
transfer_events_moving_window(temp_event_table_name, temp_staging_table_name, db_engine)
perform_final_transfer(db_engine)
# drop temp tables
drop_table(temp_event_table_name, db_engine)
drop_table(temp_staging_table_name, db_engine)
def create_temp_event_table(table_name, db_engine):
# drop table if it exists
drop_table(table_name, db_engine)
# create the new table
create_temp_table_statement = text( \
'CREATE TABLE ' + table_name + ' ('\
'id INT,'\
'bdat VARCHAR(12),'\
'lact INT,'\
'rc INT,'\
'pen INT,'\
'event VARCHAR(32),'\
'dim INT,'\
'date VARCHAR(12),'\
'remark VARCHAR(32),'\
'r VARCHAR(12),'\
't INT,'\
'b VARCHAR(12)'\
');'
)
create_table(db_engine, table_name, create_temp_table_statement)
def create_temp_staging_event_table(table_name, db_engine):
# drop table if it exists
drop_table(table_name, db_engine)
# create the new table
create_temp_table_statement = text( \
'CREATE TABLE ' + table_name + ' ('\
'id INT,'\
'bdat DATE,'\
'lact INT,'\
'rc INT,'\
'pen INT,'\
'event VARCHAR(32),'\
'dim INT,'\
'date DATE,'\
'remark VARCHAR(32),'\
'r VARCHAR(12),'\
't INT,'\
'b VARCHAR(12)'\
');'
)
create_table(db_engine, table_name, create_temp_table_statement)
def populate_table_from_csv(table_name, csv_location, db_engine):
# 'copy_from' example from https://www.dataquest.io/blog/loading-data-into-postgres/
# adapted to sqlalchemy using https://stackoverflow.com/questions/13125236/sqlalchemy-psycopg2-and-postgresql-copy
with db_engine.connect() as con:
# isolate a connection
connection = db_engine.connect().connection
# get the cursor
cursor = connection.cursor()
try:
with open(csv_location, 'r') as f:
next(f) # Skip the header row.
cursor.copy_from(f, table_name, sep=',', null='')
connection.commit()
except Exception as e:
logger.error("Error importing the table " + table_name + " in " + db_engine.url.database + " database from " + csv_location + "!")
logger.error(e.args)
exit(1)
def transfer_events_moving_window(from_table, to_table, db_engine):
transfer_statement = "insert into " + to_table + " (id, bdat, lact, rc, pen, event, dim, date, remark, r, t, b) "\
"select distinct id, to_date(bdat,'MM/DD/YY'), lact, rc, pen, trim(event), dim, to_date(date,'MM/DD/YY'), trim(remark), trim(r), t, trim(b) "\
"from " + from_table + " "\
"where to_date(date,'MM/DD/YY') > ((select max(to_date(date,'MM/DD/YY')) from dairy_comp.temp_import_events) - integer '7');"
with db_engine.connect() as con:
try:
con.execute(transfer_statement)
except Exception as e:
logger.error("Error inserting data from the " + from_table + " table "\
"to the " + to_table + " table in the " + db_engine.url.database + " database!")
logger.error(e.args)
exit(1)
def transfer_events(from_table, to_table, db_engine):
transfer_statement = "insert into " + to_table + " (id, bdat, lact, rc, pen, event, dim, date, remark, r, t, b) "\
"select distinct id, to_date(bdat,'MM/DD/YY'), lact, rc, pen, trim(event), dim, to_date(date,'MM/DD/YY'), trim(remark), trim(r), t, trim(b) "\
"from " + from_table + " "\
"where to_date(date,'MM/DD/YY') > ((select max(to_date(date,'MM/DD/YY')) from dairy_comp.temp_import_events) - integer '7');"
with db_engine.connect() as con:
try:
con.execute(transfer_statement)
except Exception as e:
logger.error("Error inserting data from the " + from_table + " table "\
"to the " + to_table + " table in the " + db_engine.url.database + " database!")
logger.error(e.args)
exit(1)
def perform_final_transfer(db_engine):
# delete pre-existing recoreds from dairy_comp.events
delete_statement = 'delete from dairy_comp.events where date in (select distinct date from dairy_comp.temp_staging_events);'
with db_engine.connect() as con:
try:
logger.info("Deleting events from dairy_comp.events where date in dairy_comp.temp_staging_events...")
logger.debug('delete_statement = ' + str(delete_statement))
con.execute(delete_statement)
except Exception as e:
logger.error("Error deleting overlapping dates from dairy_comp.events!")
logger.error(e.args)
exit(1)
# final transfer from staging table to main event table
insert_statement = 'insert into dairy_comp.events (id, bdat, lact, rc, pen, event, dim, date, remark, r, t, b) '\
'select id, bdat, lact, rc, pen, event, dim, date, remark, r, t, b '\
'from dairy_comp.temp_staging_events;'
with db_engine.connect() as con:
try:
logger.info("Transferring from dairy_comp.temp_staging_events to dairy_comp.events in " + db_engine.url.database + " database...")
logger.debug('insert_statement = ' + str(insert_statement))
con.execute(insert_statement)
except Exception as e:
logger.error("Error transferring from dairy_comp.temp_staging_events to dairy_comp.events in " + db_engine.url.database + " database!")
logger.error(e.args)
exit(1)
def drop_table(table_name, db_engine):
if db_engine.has_table(table_name.split('.')[1], schema=table_name.split('.')[0]):
logger.debug("Deleting old (pre-existing) table: " + table_name + "...")
statement = "drop table if exists {};"
with db_engine.connect() as con:
try:
con.execute(statement.format(table_name))
except Exception as e:
logger.error("dc_event_import.drop_table(): Error deleting table " + table_name + " from database!")
logger.error(e.args)
exit(1)
def create_table(db_engine, table_name, sql_statement):
# check and delete if table already exists
drop_table(table_name, db_engine)
# create new temp table
with db_engine.connect() as con:
try:
logger.info("Creating table " + table_name + " in " + db_engine.url.database + " database...")
logger.debug('create_temp_table_statement = ' + str(sql_statement))
con.execute(sql_statement)
except Exception as e:
logger.error("Error creating the table " + table_name + " in " + db_engine.url.database + " database!")
logger.error(e.args)
exit(1) | 38.87037 | 147 | 0.621963 |
d7391b070e7543ba75601a531d534f0e1e7562c1 | 2,404 | py | Python | tasks/dm_math.py | RobertCsordas/modules | efdb8790b074862581e035c9ab5bf889440a8023 | [
"BSD-3-Clause"
] | 22 | 2020-10-19T07:40:01.000Z | 2022-03-24T15:26:34.000Z | tasks/dm_math.py | xdever/modules | efdb8790b074862581e035c9ab5bf889440a8023 | [
"BSD-3-Clause"
] | 6 | 2020-10-19T23:57:23.000Z | 2022-03-12T00:51:58.000Z | tasks/dm_math.py | xdever/modules | efdb8790b074862581e035c9ab5bf889440a8023 | [
"BSD-3-Clause"
] | 5 | 2020-11-30T00:17:34.000Z | 2021-06-26T11:43:24.000Z | import dataset
import torch
from .transformer_task import TransformerTask
from .task import TaskDataset
class DeepmindMathTask(TransformerTask):
TRAIN_NUM_WORKERS = 2
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def create_datasets(self):
self.batch_dim = 1
self.train_set = dataset.DeepmindMathDataset(self.helper.opt.dm_math.tasks, sets=[f"train_{s}" \
for s in self.helper.opt.dm_math.train_splits])
self.valid_sets.interpolate = dataset.DeepmindMathDataset(self.helper.opt.dm_math.tasks, sets=["interpolate"])
if len(self.helper.opt.dm_math.tasks)==1:
self.valid_sets.iid = dataset.DeepmindMathDataset(self.helper.opt.dm_math.tasks, sets=[f"test_{s}" \
for s in self.helper.opt.dm_math.train_splits])
self.valid_sets.hard = dataset.DeepmindMathDataset(self.helper.opt.dm_math.tasks, sets=["test_hard"])
else:
for task in self.helper.opt.dm_math.tasks:
self.valid_sets[f"iid_{task}"] = dataset.DeepmindMathDataset([task], sets=[f"test_{s}" for s in
self.helper.opt.dm_math.train_splits])
self.valid_sets[f"hard_{task}"] = dataset.DeepmindMathDataset([task], sets=["test_hard"])
extrapolate = dataset.DeepmindMathDataset(self.helper.opt.dm_math.tasks, sets=["extrapolate"])
if len(extrapolate)!=0:
self.valid_sets.extrapolate = extrapolate
self.tasks.append(TaskDataset("hard",
dataset.DeepmindMathDataset(self.helper.opt.dm_math.tasks, sets=[f"train_{s}" \
for s in self.helper.opt.dm_math.masks_splits]),
dataset.DeepmindMathDataset(self.helper.opt.dm_math.tasks,
sets=[f"test_{s}" for s in self.helper.opt.dm_math.masks_splits])
))
def create_optimizer(self):
self.set_optimizer(torch.optim.Adam(self.model.model_parameters.values(), self.helper.opt.lr, eps=1e-9,
betas=(0.9, 0.995)))
def get_n_mask_samples(self):
return 8 | 51.148936 | 118 | 0.574043 |
ffeca635f0b2d8e8f11612e4bb4b0ac9c4a262d0 | 197 | py | Python | output/models/ms_data/datatypes/facets/unsigned_int/unsigned_int_min_exclusive003_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 1 | 2021-08-14T17:59:21.000Z | 2021-08-14T17:59:21.000Z | output/models/ms_data/datatypes/facets/unsigned_int/unsigned_int_min_exclusive003_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 4 | 2020-02-12T21:30:44.000Z | 2020-04-15T20:06:46.000Z | output/models/ms_data/datatypes/facets/unsigned_int/unsigned_int_min_exclusive003_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | null | null | null | from output.models.ms_data.datatypes.facets.unsigned_int.unsigned_int_min_exclusive003_xsd.unsigned_int_min_exclusive003 import (
FooType,
Test,
)
__all__ = [
"FooType",
"Test",
]
| 19.7 | 129 | 0.746193 |
0eb1db0937ef83d93608896477a6a1c5fecdfc1e | 532 | py | Python | openpeerpower/components/vacuum/group.py | pcaston/core | e74d946cef7a9d4e232ae9e0ba150d18018cfe33 | [
"Apache-2.0"
] | 1 | 2021-07-08T20:09:55.000Z | 2021-07-08T20:09:55.000Z | openpeerpower/components/vacuum/group.py | pcaston/core | e74d946cef7a9d4e232ae9e0ba150d18018cfe33 | [
"Apache-2.0"
] | 47 | 2021-02-21T23:43:07.000Z | 2022-03-31T06:07:10.000Z | openpeerpower/components/vacuum/group.py | OpenPeerPower/core | f673dfac9f2d0c48fa30af37b0a99df9dd6640ee | [
"Apache-2.0"
] | null | null | null | """Describe group states."""
from openpeerpower.components.group import GroupIntegrationRegistry
from openpeerpower.const import STATE_OFF, STATE_ON
from openpeerpower.core import OpenPeerPower, callback
from . import STATE_CLEANING, STATE_ERROR, STATE_RETURNING
@callback
def async_describe_on_off_states(
opp: OpenPeerPower, registry: GroupIntegrationRegistry
) -> None:
"""Describe group on off states."""
registry.on_off_states(
{STATE_CLEANING, STATE_ON, STATE_RETURNING, STATE_ERROR}, STATE_OFF
)
| 28 | 75 | 0.783835 |
c3937c011e1dac24f1eed970208502c445acd859 | 909 | py | Python | dvcli/user/collection.py | gdcc/dvcli | 25aabb31db631208411d3cdfefcabe13201bbe9a | [
"Apache-2.0"
] | 2 | 2020-02-08T12:59:31.000Z | 2020-02-10T15:22:29.000Z | dvcli/user/collection.py | GlobalDataverseCommunityConsortium/dvcli | 25aabb31db631208411d3cdfefcabe13201bbe9a | [
"Apache-2.0"
] | 4 | 2020-02-08T13:13:01.000Z | 2020-07-22T20:17:46.000Z | dvcli/user/collection.py | GlobalDataverseCommunityConsortium/dvcli | 25aabb31db631208411d3cdfefcabe13201bbe9a | [
"Apache-2.0"
] | null | null | null | """
(C) Copyright 2020 Forschungszentrum Jülich GmbH and others.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed
under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import click
@click.group()
@click.pass_context
def collection(ctx):
"""
Basic Dataverse collection tasks.
"""
# ensure that ctx.obj exists and is a dict
ctx.ensure_object(dict)
# @collection.command(name="list-all")
# def list_all():
# """
# List all dataverse accessible to you.
# """
| 29.322581 | 82 | 0.738174 |
eb3e9bc3f8ecdd090c604f7835ef22c6fc7f51a2 | 15,288 | py | Python | pysnmp/MITEL-IPFILTER-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 11 | 2021-02-02T16:27:16.000Z | 2021-08-31T06:22:49.000Z | pysnmp/MITEL-IPFILTER-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 75 | 2021-02-24T17:30:31.000Z | 2021-12-08T00:01:18.000Z | pysnmp/MITEL-IPFILTER-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module MITEL-IPFILTER-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/MITEL-IPFILTER-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 20:03:00 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ValueRangeConstraint, ConstraintsIntersection, SingleValueConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ValueRangeConstraint", "ConstraintsIntersection", "SingleValueConstraint", "ConstraintsUnion")
ifIndex, = mibBuilder.importSymbols("IF-MIB", "ifIndex")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
Integer32, IpAddress, MibIdentifier, Bits, TimeTicks, Unsigned32, enterprises, NotificationType, iso, Gauge32, MibScalar, MibTable, MibTableRow, MibTableColumn, ObjectIdentity, Counter64, Counter32, ModuleIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "Integer32", "IpAddress", "MibIdentifier", "Bits", "TimeTicks", "Unsigned32", "enterprises", "NotificationType", "iso", "Gauge32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ObjectIdentity", "Counter64", "Counter32", "ModuleIdentity")
TextualConvention, DisplayString, RowStatus = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString", "RowStatus")
mitelIpGrpFilterGroup = ModuleIdentity((1, 3, 6, 1, 4, 1, 1027, 4, 8, 1, 1, 1))
mitelIpGrpFilterGroup.setRevisions(('2003-03-24 09:25', '1999-03-01 00:00',))
if mibBuilder.loadTexts: mitelIpGrpFilterGroup.setLastUpdated('200303240925Z')
if mibBuilder.loadTexts: mitelIpGrpFilterGroup.setOrganization('MITEL Corporation')
mitel = MibIdentifier((1, 3, 6, 1, 4, 1, 1027))
mitelProprietary = MibIdentifier((1, 3, 6, 1, 4, 1, 1027, 4))
mitelPropIpNetworking = MibIdentifier((1, 3, 6, 1, 4, 1, 1027, 4, 8))
mitelIpNetRouter = MibIdentifier((1, 3, 6, 1, 4, 1, 1027, 4, 8, 1))
mitelRouterIpGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 1027, 4, 8, 1, 1))
mitelIdentification = MibIdentifier((1, 3, 6, 1, 4, 1, 1027, 1))
mitelIdCallServers = MibIdentifier((1, 3, 6, 1, 4, 1, 1027, 1, 2))
mitelIdCsIpera1000 = MibIdentifier((1, 3, 6, 1, 4, 1, 1027, 1, 2, 4))
mitelFltGrpAccessRestrictEnable = MibScalar((1, 3, 6, 1, 4, 1, 1027, 4, 8, 1, 1, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mitelFltGrpAccessRestrictEnable.setStatus('current')
mitelFltGrpLogicalTable = MibTable((1, 3, 6, 1, 4, 1, 1027, 4, 8, 1, 1, 1, 2), )
if mibBuilder.loadTexts: mitelFltGrpLogicalTable.setStatus('current')
mitelFltGrpLogicalEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1027, 4, 8, 1, 1, 1, 2, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: mitelFltGrpLogicalEntry.setStatus('current')
mitelLogTableAccessDef = MibTableColumn((1, 3, 6, 1, 4, 1, 1027, 4, 8, 1, 1, 1, 2, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("filter", 1), ("forward", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mitelLogTableAccessDef.setStatus('current')
mitelLogTableAllowSrcRouting = MibTableColumn((1, 3, 6, 1, 4, 1, 1027, 4, 8, 1, 1, 1, 2, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2))).clone(2)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mitelLogTableAllowSrcRouting.setStatus('current')
mitelFltGrpAccessRestrictTable = MibTable((1, 3, 6, 1, 4, 1, 1027, 4, 8, 1, 1, 1, 3), )
if mibBuilder.loadTexts: mitelFltGrpAccessRestrictTable.setStatus('current')
mitelFltGrpAccessRestrictEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1027, 4, 8, 1, 1, 1, 3, 1), ).setIndexNames((0, "MITEL-IPFILTER-MIB", "mitelAccResTableIfIndex"), (0, "MITEL-IPFILTER-MIB", "mitelAccResTableOrder"))
if mibBuilder.loadTexts: mitelFltGrpAccessRestrictEntry.setStatus('current')
mitelAccResTableIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 1027, 4, 8, 1, 1, 1, 3, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mitelAccResTableIfIndex.setStatus('current')
mitelAccResTableOrder = MibTableColumn((1, 3, 6, 1, 4, 1, 1027, 4, 8, 1, 1, 1, 3, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mitelAccResTableOrder.setStatus('current')
mitelAccResTableType = MibTableColumn((1, 3, 6, 1, 4, 1, 1027, 4, 8, 1, 1, 1, 3, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("filter", 1), ("forward", 2), ("neither", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mitelAccResTableType.setStatus('current')
mitelAccResTableSrcAddrFrom = MibTableColumn((1, 3, 6, 1, 4, 1, 1027, 4, 8, 1, 1, 1, 3, 1, 4), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mitelAccResTableSrcAddrFrom.setStatus('current')
mitelAccResTableSrcAddrTo = MibTableColumn((1, 3, 6, 1, 4, 1, 1027, 4, 8, 1, 1, 1, 3, 1, 5), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mitelAccResTableSrcAddrTo.setStatus('current')
mitelAccResTableSrcAddrOutsideRange = MibTableColumn((1, 3, 6, 1, 4, 1, 1027, 4, 8, 1, 1, 1, 3, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2))).clone(2)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mitelAccResTableSrcAddrOutsideRange.setStatus('current')
mitelAccResTableDstAddrFrom = MibTableColumn((1, 3, 6, 1, 4, 1, 1027, 4, 8, 1, 1, 1, 3, 1, 7), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mitelAccResTableDstAddrFrom.setStatus('current')
mitelAccResTableDstAddrTo = MibTableColumn((1, 3, 6, 1, 4, 1, 1027, 4, 8, 1, 1, 1, 3, 1, 8), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mitelAccResTableDstAddrTo.setStatus('current')
mitelAccResTableDstAddrOutsideRange = MibTableColumn((1, 3, 6, 1, 4, 1, 1027, 4, 8, 1, 1, 1, 3, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2))).clone(2)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mitelAccResTableDstAddrOutsideRange.setStatus('current')
mitelAccResTableProtocolFrom = MibTableColumn((1, 3, 6, 1, 4, 1, 1027, 4, 8, 1, 1, 1, 3, 1, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mitelAccResTableProtocolFrom.setStatus('current')
mitelAccResTableProtocolTo = MibTableColumn((1, 3, 6, 1, 4, 1, 1027, 4, 8, 1, 1, 1, 3, 1, 11), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mitelAccResTableProtocolTo.setStatus('current')
mitelAccResTableProtocolOutsideRange = MibTableColumn((1, 3, 6, 1, 4, 1, 1027, 4, 8, 1, 1, 1, 3, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2))).clone(2)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mitelAccResTableProtocolOutsideRange.setStatus('current')
mitelAccResTableSrcPortFrom = MibTableColumn((1, 3, 6, 1, 4, 1, 1027, 4, 8, 1, 1, 1, 3, 1, 13), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mitelAccResTableSrcPortFrom.setStatus('current')
mitelAccResTableSrcPortTo = MibTableColumn((1, 3, 6, 1, 4, 1, 1027, 4, 8, 1, 1, 1, 3, 1, 14), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mitelAccResTableSrcPortTo.setStatus('current')
mitelAccResTableSrcPortOutsideRange = MibTableColumn((1, 3, 6, 1, 4, 1, 1027, 4, 8, 1, 1, 1, 3, 1, 15), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2))).clone(2)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mitelAccResTableSrcPortOutsideRange.setStatus('current')
mitelAccResTableDstPortFrom = MibTableColumn((1, 3, 6, 1, 4, 1, 1027, 4, 8, 1, 1, 1, 3, 1, 16), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mitelAccResTableDstPortFrom.setStatus('current')
mitelAccResTableDstPortTo = MibTableColumn((1, 3, 6, 1, 4, 1, 1027, 4, 8, 1, 1, 1, 3, 1, 17), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mitelAccResTableDstPortTo.setStatus('current')
mitelAccResTableDstPortOutsideRange = MibTableColumn((1, 3, 6, 1, 4, 1, 1027, 4, 8, 1, 1, 1, 3, 1, 18), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2))).clone(2)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mitelAccResTableDstPortOutsideRange.setStatus('current')
mitelAccResTableTcpSyn = MibTableColumn((1, 3, 6, 1, 4, 1, 1027, 4, 8, 1, 1, 1, 3, 1, 19), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("any", 1), ("zero", 2), ("one", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mitelAccResTableTcpSyn.setStatus('current')
mitelAccResTableTcpAck = MibTableColumn((1, 3, 6, 1, 4, 1, 1027, 4, 8, 1, 1, 1, 3, 1, 20), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("any", 1), ("zero", 2), ("one", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mitelAccResTableTcpAck.setStatus('current')
mitelAccResTableTcpFin = MibTableColumn((1, 3, 6, 1, 4, 1, 1027, 4, 8, 1, 1, 1, 3, 1, 21), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("any", 1), ("zero", 2), ("one", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mitelAccResTableTcpFin.setStatus('current')
mitelAccResTableTcpRst = MibTableColumn((1, 3, 6, 1, 4, 1, 1027, 4, 8, 1, 1, 1, 3, 1, 22), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("any", 1), ("zero", 2), ("one", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mitelAccResTableTcpRst.setStatus('current')
mitelAccResTableMatchIn = MibTableColumn((1, 3, 6, 1, 4, 1, 1027, 4, 8, 1, 1, 1, 3, 1, 23), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mitelAccResTableMatchIn.setStatus('current')
mitelAccResTableMatchOut = MibTableColumn((1, 3, 6, 1, 4, 1, 1027, 4, 8, 1, 1, 1, 3, 1, 24), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mitelAccResTableMatchOut.setStatus('current')
mitelAccResTableLog = MibTableColumn((1, 3, 6, 1, 4, 1, 1027, 4, 8, 1, 1, 1, 3, 1, 25), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mitelAccResTableLog.setStatus('current')
mitelAccResTableTrap = MibTableColumn((1, 3, 6, 1, 4, 1, 1027, 4, 8, 1, 1, 1, 3, 1, 26), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: mitelAccResTableTrap.setStatus('current')
mitelAccResTableStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 1027, 4, 8, 1, 1, 1, 3, 1, 27), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: mitelAccResTableStatus.setStatus('current')
mitelAccResTableCount = MibTableColumn((1, 3, 6, 1, 4, 1, 1027, 4, 8, 1, 1, 1, 3, 1, 28), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mitelAccResTableCount.setStatus('current')
mitelIpera1000Notifications = NotificationGroup((1, 3, 6, 1, 4, 1, 1027, 1, 2, 4, 0)).setObjects(("MITEL-IPFILTER-MIB", "mitelAccResTableTrapped"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
mitelIpera1000Notifications = mitelIpera1000Notifications.setStatus('current')
mitelAccResTableTrapped = NotificationType((1, 3, 6, 1, 4, 1, 1027, 1, 2, 4, 0, 402)).setObjects(("MITEL-IPFILTER-MIB", "mitelAccResTableIfIndex"), ("MITEL-IPFILTER-MIB", "mitelAccResTableOrder"))
if mibBuilder.loadTexts: mitelAccResTableTrapped.setStatus('current')
mibBuilder.exportSymbols("MITEL-IPFILTER-MIB", mitelAccResTableDstPortOutsideRange=mitelAccResTableDstPortOutsideRange, mitelIdCsIpera1000=mitelIdCsIpera1000, mitelAccResTableMatchIn=mitelAccResTableMatchIn, mitelAccResTableProtocolOutsideRange=mitelAccResTableProtocolOutsideRange, mitelAccResTableDstPortFrom=mitelAccResTableDstPortFrom, mitelAccResTableDstAddrFrom=mitelAccResTableDstAddrFrom, mitelAccResTableTrap=mitelAccResTableTrap, mitelAccResTableTcpSyn=mitelAccResTableTcpSyn, mitelFltGrpLogicalTable=mitelFltGrpLogicalTable, mitelAccResTableOrder=mitelAccResTableOrder, mitelAccResTableTrapped=mitelAccResTableTrapped, mitelAccResTableDstAddrTo=mitelAccResTableDstAddrTo, mitelAccResTableCount=mitelAccResTableCount, mitel=mitel, mitelIpNetRouter=mitelIpNetRouter, mitelAccResTableIfIndex=mitelAccResTableIfIndex, mitelAccResTableProtocolFrom=mitelAccResTableProtocolFrom, mitelAccResTableTcpRst=mitelAccResTableTcpRst, mitelPropIpNetworking=mitelPropIpNetworking, PYSNMP_MODULE_ID=mitelIpGrpFilterGroup, mitelIdCallServers=mitelIdCallServers, mitelAccResTableSrcPortTo=mitelAccResTableSrcPortTo, mitelAccResTableDstPortTo=mitelAccResTableDstPortTo, mitelProprietary=mitelProprietary, mitelAccResTableStatus=mitelAccResTableStatus, mitelFltGrpAccessRestrictEntry=mitelFltGrpAccessRestrictEntry, mitelAccResTableProtocolTo=mitelAccResTableProtocolTo, mitelFltGrpAccessRestrictTable=mitelFltGrpAccessRestrictTable, mitelAccResTableTcpFin=mitelAccResTableTcpFin, mitelAccResTableDstAddrOutsideRange=mitelAccResTableDstAddrOutsideRange, mitelAccResTableType=mitelAccResTableType, mitelFltGrpLogicalEntry=mitelFltGrpLogicalEntry, mitelLogTableAccessDef=mitelLogTableAccessDef, mitelRouterIpGroup=mitelRouterIpGroup, mitelAccResTableTcpAck=mitelAccResTableTcpAck, mitelIpera1000Notifications=mitelIpera1000Notifications, mitelAccResTableSrcAddrOutsideRange=mitelAccResTableSrcAddrOutsideRange, mitelIpGrpFilterGroup=mitelIpGrpFilterGroup, mitelAccResTableSrcAddrTo=mitelAccResTableSrcAddrTo, mitelAccResTableMatchOut=mitelAccResTableMatchOut, mitelLogTableAllowSrcRouting=mitelLogTableAllowSrcRouting, mitelAccResTableLog=mitelAccResTableLog, mitelAccResTableSrcPortFrom=mitelAccResTableSrcPortFrom, mitelAccResTableSrcAddrFrom=mitelAccResTableSrcAddrFrom, mitelAccResTableSrcPortOutsideRange=mitelAccResTableSrcPortOutsideRange, mitelIdentification=mitelIdentification, mitelFltGrpAccessRestrictEnable=mitelFltGrpAccessRestrictEnable)
| 148.427184 | 2,440 | 0.774071 |
efac6895df5f5ea061849d967bc2d2504d107024 | 26,639 | py | Python | integration-test/1686-building-merging.py | roman-ianivskyy/vector-datasource | 3d59c0d9856d6bc2a78c4a9273b4e850c2e41d92 | [
"MIT"
] | null | null | null | integration-test/1686-building-merging.py | roman-ianivskyy/vector-datasource | 3d59c0d9856d6bc2a78c4a9273b4e850c2e41d92 | [
"MIT"
] | null | null | null | integration-test/1686-building-merging.py | roman-ianivskyy/vector-datasource | 3d59c0d9856d6bc2a78c4a9273b4e850c2e41d92 | [
"MIT"
] | null | null | null | # -*- encoding: utf-8 -*-
from . import FixtureTest
class BuildingMergingTest(FixtureTest):
def test_sao_paulo(self):
import dsl
from shapely.wkt import loads as wkt_loads
z, x, y = (15, 12132, 18590)
self.generate_fixtures(
# https://www.openstreetmap.org/way/520091576
dsl.way(520091576, wkt_loads(
'POLYGON(('
'-46.70706510543823242 -23.55252732329748255,'
'-46.70701280236244202 -23.55263059139736015,'
'-46.70701950788497925 -23.5526330501606509,'
'-46.70699670910835266 -23.55267976665450647,'
'-46.7069108784198761 -23.55264411459491214,'
'-46.70686796307563782 -23.55272894189323551,'
'-46.7070382833480835 -23.55280147533679269,'
'-46.70707583427429199 -23.55272771251249253,'
'-46.70710265636444092 -23.55273877693876727,'
'-46.70713886618614197 -23.55266624346062088,'
'-46.70714825391769409 -23.55266993160454092,'
'-46.70719251036643982 -23.55258141612181078,'
'-46.70706510543823242 -23.55252732329748255))'), {
'building': 'yes',
'height': '4.51',
'source': 'openstreetmap.org',
}),
# https://www.openstreetmap.org/way/520086815
dsl.way(520086815, wkt_loads(
'POLYGON(('
'-46.70652061700820923 -23.5524621660022575,'
'-46.7064763605594635 -23.55256666353558614,'
'-46.70661851763725281 -23.55262690325233166,'
'-46.70663058757781982 -23.55261706819842971,'
'-46.70670568943023682 -23.55264903212093941,'
'-46.70673251152038574 -23.55259370994242829,'
'-46.70669630169868469 -23.55257772797541804,'
'-46.70670703053474426 -23.55255559909480212,'
'-46.70674458146095276 -23.5525691223000706,'
'-46.70674726366996765 -23.55256297538875288,'
'-46.70677542686462402 -23.55250642379121473,'
'-46.70673251152038574 -23.55246462476868885,'
'-46.70669093728065491 -23.5525445346531086,'
'-46.70667082071304321 -23.55253838774063979,'
'-46.706676185131073 -23.55252240576690781,'
'-46.70652061700820923 -23.5524621660022575))'), {
'building': 'yes',
'height': '6.03',
'source': 'openstreetmap.org',
}),
# https://www.openstreetmap.org/way/520086785
dsl.way(520086785, wkt_loads(
'POLYGON((-46.7069457471370697 -23.55248429489859063,'
'-46.70687064528465271 -23.55249412996244018,'
'-46.70686528086662292 -23.55250396502553656,'
'-46.70688673853874207 -23.5525138000879366,'
'-46.70687198638916016 -23.55254330527063189,'
'-46.70682236552238464 -23.55252240576690781,'
'-46.70677945017814636 -23.55260969190750586,'
'-46.70673385262489319 -23.55259125117839858,'
'-46.70673251152038574 -23.55259370994242829,'
'-46.70670032501220703 -23.55266009655385062,'
'-46.70685186982154846 -23.55272525375096393,'
'-46.70693099498748779 -23.55256789291783548,'
'-46.70688539743423462 -23.55254945218285911,'
'-46.70690149068832397 -23.55251871761882398,'
'-46.70695245265960693 -23.55254084650566426,'
'-46.70696988701820374 -23.55250888255683606,'
'-46.7069457471370697 -23.55248429489859063))'), {
'building': 'yes',
'height': '3.31',
'source': 'openstreetmap.org',
}),
# https://www.openstreetmap.org/way/520086778
dsl.way(520086778, wkt_loads(
'POLYGON((-46.70694842934608459 -23.5521093325406099,'
'-46.70686393976211548 -23.55218063694022135,'
'-46.70697122812271118 -23.5522912816216774,'
'-46.70699000358581543 -23.55227652900285307,'
'-46.70701682567596436 -23.55230357546942344,'
'-46.70702621340751648 -23.55229619916090655,'
'-46.70704096555709839 -23.55231218116217917,'
'-46.70700743794441223 -23.55234045700575507,'
'-46.70703426003456116 -23.55236750345916619,'
'-46.70705705881118774 -23.5523908617553559,'
'-46.70711740851402283 -23.55234045700575507,'
'-46.70705437660217285 -23.55227529961788946,'
'-46.70708522200584412 -23.55225071191595987,'
'-46.70704364776611328 -23.55220891281211948,'
'-46.70705974102020264 -23.5521953895698033,'
'-46.70702621340751648 -23.55216219615093109,'
'-46.70701280236244202 -23.55217449001075636,'
'-46.70694842934608459 -23.5521093325406099))'), {
'building': 'yes',
'height': '6.09',
'source': 'openstreetmap.org',
}),
# https://www.openstreetmap.org/way/520087792
dsl.way(520087792, wkt_loads(
'POLYGON((-46.70773833990097046 -23.55201098158111961,'
'-46.70764848589897156 -23.55205401013495248,'
'-46.70765653252601624 -23.55206753339182058,'
'-46.70764312148094177 -23.55207368032631621,'
'-46.70762702822685242 -23.55204540442530003,'
'-46.70757874846458435 -23.55206876277873107,'
'-46.70763909816741943 -23.55217571939667209,'
'-46.70765787363052368 -23.55216711369500615,'
'-46.70769408345222473 -23.55223104175111359,'
'-46.70774504542350769 -23.55220645404089908,'
'-46.70773431658744812 -23.55218801325523259,'
'-46.70781612396240234 -23.55214867290379743,'
'-46.70773833990097046 -23.55201098158111961))'), {
'building': 'yes',
'height': '5.46',
'source': 'openstreetmap.org',
}),
# https://www.openstreetmap.org/way/520091545
dsl.way(520091545, wkt_loads(
'POLYGON((-46.70750364661216736 -23.55166429386198956,'
'-46.70743122696876526 -23.55169871679699156,'
'-46.70743793249130249 -23.551708551919603,'
'-46.70740172266960144 -23.55172576338243573,'
'-46.70742854475975037 -23.5517737095884172,'
'-46.7074151337146759 -23.55177985653662631,'
'-46.70746877789497375 -23.55187451950293109,'
'-46.70762166380882263 -23.5518019855478542,'
'-46.70756801962852478 -23.55170609313901764,'
'-46.70753583312034607 -23.55172207521201244,'
'-46.70750364661216736 -23.55166429386198956))'), {
'building': 'yes',
'height': '5.76',
'source': 'openstreetmap.org',
}),
# https://www.openstreetmap.org/way/520090963
dsl.way(520090963, wkt_loads(
'POLYGON((-46.70716434717178345 -23.55230111669996518,'
'-46.70711740851402283 -23.55234045700575507,'
'-46.70705705881118774 -23.5523908617553559,'
'-46.70710667967796326 -23.55244003710217271,'
'-46.70712277293205261 -23.5524265138836455,'
'-46.70726627111434937 -23.55257281044671913,'
'-46.70734673738479614 -23.55250642379121473,'
'-46.70729175209999084 -23.55245110155267696,'
'-46.70727431774139404 -23.55246585415190452,'
'-46.7072528600692749 -23.55244372525244501,'
'-46.70726627111434937 -23.5524326608013439,'
'-46.70720189809799194 -23.55236750345916619,'
'-46.70712947845458984 -23.5524265138836455,'
'-46.70710533857345581 -23.55240192620999551,'
'-46.70717105269432068 -23.55234660392748935,'
'-46.70716568827629089 -23.55234291577447436,'
'-46.70718714594841003 -23.55232570439247297,'
'-46.70716434717178345 -23.55230111669996518))'), {
'building': 'yes',
'height': '3.39',
'source': 'openstreetmap.org',
}),
# https://www.openstreetmap.org/way/520091551
dsl.way(520091551, wkt_loads(
'POLYGON((-46.70706376433372498 -23.55200729341869703,'
'-46.70696720480918884 -23.55208843296787791,'
'-46.7071080207824707 -23.55223104175111359,'
'-46.70720458030700684 -23.55214867290379743,'
'-46.70714825391769409 -23.55209089174130099,'
'-46.70713886618614197 -23.55209949744798337,'
'-46.70712143182754517 -23.55208105664729601,'
'-46.70713081955909729 -23.55207490971315565,'
'-46.70706376433372498 -23.55200729341869703))'), {
'building': 'yes',
'height': '6.37',
'source': 'openstreetmap.org',
}),
# https://www.openstreetmap.org/way/520091549
dsl.way(520091549, wkt_loads(
'POLYGON((-46.70718446373939514 -23.55193967708946445,'
'-46.70710265636444092 -23.55200852280617596,'
'-46.70727699995040894 -23.5521843250977696,'
'-46.70726090669631958 -23.55219661895552008,'
'-46.70727431774139404 -23.55220891281211948,'
'-46.70732259750366211 -23.55216957246696552,'
'-46.70727431774139404 -23.55212162640536633,'
'-46.70732259750366211 -23.55208105664729601,'
'-46.70718446373939514 -23.55193967708946445))'), {
'building': 'yes',
'height': '6.86',
'source': 'openstreetmap.org',
}),
# https://www.openstreetmap.org/way/520087793
dsl.way(520087793, wkt_loads(
'POLYGON((-46.7076323926448822 -23.55182042638769246,'
'-46.70760422945022583 -23.55183394966859112,'
'-46.70761629939079285 -23.55185607867068143,'
'-46.70748621225357056 -23.55191877748976026,'
'-46.70754924416542053 -23.5520318811661582,'
'-46.70770883560180664 -23.55195565913406597,'
'-46.7076323926448822 -23.55182042638769246))'), {
'building': 'yes',
'height': '5.5',
'source': 'openstreetmap.org',
}),
# https://www.openstreetmap.org/way/520091311
dsl.way(520091311, wkt_loads(
'POLYGON((-46.70752778649330139 -23.5520847448076438,'
'-46.70743927359580994 -23.55215850799275756,'
'-46.7075800895690918 -23.55230111669996518,'
'-46.70765385031700134 -23.55223964744858733,'
'-46.70762166380882263 -23.55220768342651638,'
'-46.70763641595840454 -23.5521953895698033,'
'-46.70752778649330139 -23.5520847448076438))'), {
'building': 'yes',
'height': '4.01',
'source': 'openstreetmap.org',
}),
# https://www.openstreetmap.org/way/520086782
dsl.way(520086782, wkt_loads(
'POLYGON((-46.70685455203056335 -23.55220522465526756,'
'-46.7067660391330719 -23.55227775838783089,'
'-46.7068183422088623 -23.55233185131487517,'
'-46.70683175325393677 -23.55232078685433805,'
'-46.70692160725593567 -23.55241176127999836,'
'-46.70699670910835266 -23.55234906269608075,'
'-46.70685455203056335 -23.55220522465526756))'), {
'building': 'yes',
'height': '3.2',
'source': 'openstreetmap.org',
}),
# https://www.openstreetmap.org/way/520090964
dsl.way(520090964, wkt_loads(
'POLYGON((-46.70733600854873657 -23.55231463993141006,'
'-46.70728906989097595 -23.55235398023314985,'
'-46.70728102326393127 -23.55234783331178505,'
'-46.7072528600692749 -23.55237119161147064,'
'-46.70726090669631958 -23.55237979729977837,'
'-46.70725017786026001 -23.55238840298753189,'
'-46.70736953616142273 -23.55251011193961119,'
'-46.70745670795440674 -23.55243757833527241,'
'-46.70733600854873657 -23.55231463993141006))'), {
'building': 'yes',
'height': '6.26',
'source': 'openstreetmap.org',
}),
# https://www.openstreetmap.org/way/520090966
dsl.way(520090966, wkt_loads(
'POLYGON((-46.70744463801383972 -23.5522003071126278,'
'-46.70735880732536316 -23.55227284084789119,'
'-46.70748084783554077 -23.55239700867470276,'
'-46.70756667852401733 -23.55232570439247297,'
'-46.70744463801383972 -23.5522003071126278))'), {
'building': 'yes',
'height': '6.69',
'source': 'openstreetmap.org',
}),
# https://www.openstreetmap.org/way/520086834
dsl.way(520086834, wkt_loads(
'POLYGON((-46.70675396919250488 -23.5522839053124784,'
'-46.70665472745895386 -23.55236750345916619,'
'-46.70674324035644531 -23.5524584778524968,'
'-46.70677006244659424 -23.55243511956831526,'
'-46.70678213238716125 -23.55244741340261783,'
'-46.70683577656745911 -23.55240315559379383,'
'-46.70676335692405701 -23.55232939254595692,'
'-46.70678213238716125 -23.55231341054678751,'
'-46.70675396919250488 -23.5522839053124784))'), {
'building': 'yes',
'height': '3.42',
'source': 'openstreetmap.org',
}),
# https://www.openstreetmap.org/way/520091546
dsl.way(520091546, wkt_loads(
'POLYGON((-46.70727163553237915 -23.55183272027947794,'
'-46.70717641711235046 -23.5519126305480313,'
'-46.70722201466560364 -23.55195688852202807,'
'-46.70723274350166321 -23.55194828280603758,'
'-46.70728102326393127 -23.55199868770600347,'
'-46.70736551284790039 -23.55192861259591552,'
'-46.70727163553237915 -23.55183272027947794))'), {
'building': 'yes',
'height': '5.78',
'source': 'openstreetmap.org',
}),
# https://www.openstreetmap.org/way/520090962
dsl.way(520090962, wkt_loads(
'POLYGON((-46.70705705881118774 -23.5523908617553559,'
'-46.7069457471370697 -23.55248429489859063,'
'-46.70699402689933777 -23.55253347021047716,'
'-46.70710667967796326 -23.55244003710217271,'
'-46.70705705881118774 -23.5523908617553559))'), {
'building': 'yes',
'height': '3.48',
'source': 'openstreetmap.org',
}),
# https://www.openstreetmap.org/way/520091543
dsl.way(520091543, wkt_loads(
'POLYGON((-46.70746207237243652 -23.55190156605220864,'
'-46.70744463801383972 -23.55191017177126866,'
'-46.70739904046058655 -23.5519310713723371,'
'-46.70749157667160034 -23.5520269636170525,'
'-46.70751839876174927 -23.55205646890902926,'
'-46.7075425386428833 -23.55204540442530003,'
'-46.70746207237243652 -23.55190156605220864))'), {
'building': 'yes',
'height': '5.52',
'source': 'openstreetmap.org',
}),
# https://www.openstreetmap.org/way/520091312
dsl.way(520091312, wkt_loads(
'POLYGON((-46.70749157667160034 -23.5520269636170525,'
'-46.70741379261016846 -23.55209212112799833,'
'-46.70738160610198975 -23.55211916763251168,'
'-46.70739904046058655 -23.55213883781411255,'
'-46.7074178159236908 -23.55215727860668551,'
'-46.70746609568595886 -23.55211547947314443,'
'-46.70746073126792908 -23.5521105619271367,'
'-46.70752242207527161 -23.55205892768306342,'
'-46.70751839876174927 -23.55205646890902926,'
'-46.70749157667160034 -23.5520269636170525))'), {
'building': 'yes',
'height': '3.67',
'source': 'openstreetmap.org',
}),
# https://www.openstreetmap.org/way/520091309
dsl.way(520091309, wkt_loads(
'POLYGON((-46.70738160610198975 -23.55211916763251168,'
'-46.70732259750366211 -23.55216957246696552,'
'-46.70727431774139404 -23.55220891281211948,'
'-46.70731455087661743 -23.55225071191595987,'
'-46.7073802649974823 -23.55219661895552008,'
'-46.70737087726593018 -23.55218678386941633,'
'-46.70737624168395996 -23.55218063694022135,'
'-46.70736417174339294 -23.55216834308097873,'
'-46.70739904046058655 -23.55213883781411255,'
'-46.70738160610198975 -23.55211916763251168))'), {
'building': 'yes',
'height': '5.02',
'source': 'openstreetmap.org',
}),
# https://www.openstreetmap.org/way/520085168
dsl.way(520085168, wkt_loads(
'POLYGON((-46.70696988701820374 -23.55255805785949974,'
'-46.7069283127784729 -23.55263796768710449,'
'-46.70698195695877075 -23.55266009655385062,'
'-46.70702353119850159 -23.55258018673970355,'
'-46.70696988701820374 -23.55255805785949974))'), {
'building': 'yes',
'source': 'openstreetmap.org',
}),
# https://www.openstreetmap.org/way/520091544
dsl.way(520091544, wkt_loads(
'POLYGON((-46.70742988586425781 -23.5518376378358596,'
'-46.70734673738479614 -23.55187697828036164,'
'-46.70739904046058655 -23.5519310713723371,'
'-46.70744463801383972 -23.55191017177126866,'
'-46.70743256807327271 -23.5518868133896575,'
'-46.70745134353637695 -23.55187820766907691,'
'-46.70742988586425781 -23.5518376378358596))'), {
'building': 'yes',
'height': '5.9',
'source': 'openstreetmap.org',
}),
# https://www.openstreetmap.org/way/520090961
dsl.way(520090961, wkt_loads(
'POLYGON((-46.70703426003456116 -23.55236750345916619,'
'-46.70692294836044312 -23.55246093661901341,'
'-46.7069457471370697 -23.55248429489859063,'
'-46.70705705881118774 -23.5523908617553559,'
'-46.70703426003456116 -23.55236750345916619))'), {
'building': 'yes',
'height': '3.106',
'source': 'openstreetmap.org',
}),
# https://www.openstreetmap.org/way/520086812
dsl.way(520086812, wkt_loads(
'POLYGON((-46.70688539743423462 -23.55242159634927646,'
'-46.70683175325393677 -23.55246708353509177,'
'-46.70685991644859314 -23.55249535934537164,'
'-46.70687064528465271 -23.55249412996244018,'
'-46.7069457471370697 -23.55248429489859063,'
'-46.70692294836044312 -23.55246093661901341,'
'-46.70688539743423462 -23.55242159634927646))'), {
'building': 'yes',
'height': '3.111',
'source': 'openstreetmap.org',
}),
# https://www.openstreetmap.org/way/520090965
dsl.way(520090965, wkt_loads(
'POLYGON((-46.70722737908363342 -23.55224825314552106,'
'-46.70718580484390259 -23.55228267592757163,'
'-46.70716434717178345 -23.55230111669996518,'
'-46.70718714594841003 -23.55232570439247297,'
'-46.7072179913520813 -23.55235643900162756,'
'-46.7072179913520813 -23.55231709870061252,'
'-46.70725956559181213 -23.55228144654265066,'
'-46.70722737908363342 -23.55224825314552106))'), {
'building': 'yes',
'height': '3.73',
'source': 'openstreetmap.org',
}),
# https://www.openstreetmap.org/way/520091548
dsl.way(520091548, wkt_loads(
'POLYGON((-46.70735880732536316 -23.55203556932788445,'
'-46.707325279712677 -23.55206261584402228,'
'-46.70738160610198975 -23.55211916763251168,'
'-46.70741379261016846 -23.55209212112799833,'
'-46.70735880732536316 -23.55203556932788445))'), {
'building': 'yes',
'height': '4.14',
'source': 'openstreetmap.org',
}),
# https://www.openstreetmap.org/way/520087097
dsl.way(520087097, wkt_loads(
'POLYGON((-46.7068210244178772 -23.55250027687696956,'
'-46.70677542686462402 -23.55250642379121473,'
'-46.70674726366996765 -23.55256297538875288,'
'-46.70678213238716125 -23.55257772797541804,'
'-46.7068210244178772 -23.55250027687696956))'), {
'building': 'yes',
'height': '3.001',
'source': 'openstreetmap.org',
}),
# https://www.openstreetmap.org/way/520085150
dsl.way(520085150, wkt_loads(
'POLYGON((-46.70728504657745361 -23.55224948253075468,'
'-46.70728102326393127 -23.55225071191595987,'
'-46.70726090669631958 -23.55226546453768321,'
'-46.70729577541351318 -23.55230480485414546,'
'-46.70730516314506531 -23.55230972239286302,'
'-46.70730918645858765 -23.55230849300819784,'
'-46.70731857419013977 -23.55230603423883906,'
'-46.70733198523521423 -23.5522925110064989,'
'-46.70729711651802063 -23.55225317068637025,'
'-46.70729175209999084 -23.55225071191595987,'
'-46.70728504657745361 -23.55224948253075468))'), {
'building': 'yes',
'source': 'openstreetmap.org',
}),
# https://www.openstreetmap.org/way/520085140
dsl.way(520085140, wkt_loads(
'POLYGON((-46.70739367604255676 -23.55214621413145437,'
'-46.70736685395240784 -23.55216834308097873,'
'-46.70741111040115356 -23.55221260096887193,'
'-46.70743793249130249 -23.55219047202680827,'
'-46.70739367604255676 -23.55214621413145437))'), {
'building': 'yes',
'source': 'openstreetmap.org',
}),
# https://www.openstreetmap.org/way/520090960
dsl.way(520090960, wkt_loads(
'POLYGON((-46.70714020729064941 -23.55223595929260227,'
'-46.70711874961853027 -23.55225562945670958,'
'-46.70716434717178345 -23.55230111669996518,'
'-46.70718580484390259 -23.55228267592757163,'
'-46.70714020729064941 -23.55223595929260227))'), {
'building': 'yes',
'height': '3.07',
'source': 'openstreetmap.org',
}),
# https://www.openstreetmap.org/way/520085149
dsl.way(520085149, wkt_loads(
'POLYGON((-46.70722737908363342 -23.55217817816847514,'
'-46.70721128582954407 -23.5521929307983271,'
'-46.70721665024757385 -23.5522027658839761,'
'-46.70721530914306641 -23.55221383035446081,'
'-46.70721262693405151 -23.5522212066675678,'
'-46.70719921588897705 -23.55223350052187925,'
'-46.70721396803855896 -23.55224702376028745,'
'-46.70725688338279724 -23.55221137158331146,'
'-46.70722737908363342 -23.55217817816847514))'), {
'building': 'yes',
'source': 'openstreetmap.org',
}),
# https://www.openstreetmap.org/way/520085148
dsl.way(520085148, wkt_loads(
'POLYGON((-46.70734807848930359 -23.55209212112799833,'
'-46.707325279712677 -23.55211302070016188,'
'-46.70732393860816956 -23.55211793824607014,'
'-46.70732393860816956 -23.55212408517819256,'
'-46.7073427140712738 -23.5521425259728403,'
'-46.70737221837043762 -23.55211916763251168,'
'-46.70734807848930359 -23.55209212112799833))'), {
'building': 'yes',
'source': 'openstreetmap.org',
}),
)
with self.features_in_tile_layer(z, x, y, 'buildings') as features:
# have to use assertTrue here rather than the more natural
# assertEqual so that when this is run as --download-only the test
# case class can skip this test.
self.assertTrue(len(features) == 1)
| 53.707661 | 78 | 0.548144 |
a78297b560a02053438755f40c2d20db6330014b | 1,803 | py | Python | ulfs/logging.py | asappresearch/neural-ilm | fd7e09960525391f4084a5753429deabd7ff00aa | [
"MIT"
] | null | null | null | ulfs/logging.py | asappresearch/neural-ilm | fd7e09960525391f4084a5753429deabd7ff00aa | [
"MIT"
] | null | null | null | ulfs/logging.py | asappresearch/neural-ilm | fd7e09960525391f4084a5753429deabd7ff00aa | [
"MIT"
] | 2 | 2021-02-25T04:42:14.000Z | 2021-02-25T04:43:06.000Z | # import yaml
import json
from os import path
import os
from os.path import join
import time
import datetime
class Logger(object):
def __init__(self, flush_every_seconds=10):
self.flush_every_seconds = flush_every_seconds
def add_to_parser(self, parser):
parser.add_argument('--logfile', type=str, default='logs/{name}%Y%m%d_%H%M%S.log')
def eat_args(self, name, args):
self.logfile = datetime.datetime.strftime(datetime.datetime.now(), args.logfile)
self.name = name
if self.name is not None and self.name != '':
self.logfile = self.logfile.format(
name=self.name + '_')
else:
self.logfile = self.logfile.format(
name='')
del args.__dict__['logfile']
if not path.isdir(path.dirname(self.logfile)):
os.makedirs(path.dirname(self.logfile))
self.f = open(self.logfile, 'a')
self.last_flush = time.time()
def log(self, datadict):
# with open(self.logfile, 'a') as f:
self.f.write(json.dumps(datadict) + '\n')
if time.time() - self.last_flush >= self.flush_every_seconds:
self.f.flush()
self.last_flush = time.time()
def log_dicts(self, dicts):
alldict = {}
for name, adict in dicts.items():
for k, v in adict.items():
alldict[name + k] = v
# with open(self.logfile, 'a') as f:
try:
self.f.write(json.dumps(alldict) + '\n')
except Exception as e:
print(e)
for k, v in alldict.items():
print(k, type(v), v)
raise e
if time.time() - self.last_flush >= self.flush_every_seconds:
self.f.flush()
self.last_flush = time.time()
| 32.781818 | 90 | 0.570715 |
4198da385be81d5f23bcd4f31b02ad7d3d5c51f7 | 1,082 | py | Python | app/utils.py | blasferna/pyrucsql | e52c880e85c3011143f9817d5246c121e1aa163e | [
"MIT"
] | null | null | null | app/utils.py | blasferna/pyrucsql | e52c880e85c3011143f9817d5246c121e1aa163e | [
"MIT"
] | null | null | null | app/utils.py | blasferna/pyrucsql | e52c880e85c3011143f9817d5246c121e1aa163e | [
"MIT"
] | 1 | 2022-02-19T10:17:53.000Z | 2022-02-19T10:17:53.000Z | import base64
import json
import typing
from datetime import datetime, timedelta
from Crypto.Cipher import AES
from Crypto.Util import Padding
from starlette.responses import Response
class PrettyJSONResponse(Response):
media_type = "application/json"
def render(self, content: typing.Any) -> bytes:
return json.dumps(
content,
ensure_ascii=False,
allow_nan=False,
indent=4,
separators=(", ", ": "),
).encode("utf-8")
def int_to_datestr(value):
s = str(value)
fecnac = datetime(year=int(s[0:4]), month=int(s[4:6]), day=int(s[6:8]))
return fecnac.strftime("%d/%m/%Y")
def encrypt_param(param, variant="ruc"):
encryption_key = base64.b64decode("aCIbjMuVGtwF8nlSKoPydE==")
text = json.dumps({variant: param}).encode()
text_padded = Padding.pad(text, AES.block_size)
iv = base64.b64decode("JAwlt7SNbYLycmPRqeDFou==")
cipher = AES.new(encryption_key, AES.MODE_CBC, iv)
cipher_enc = cipher.encrypt(text_padded)
return base64.b64encode(cipher_enc).decode()
| 28.473684 | 75 | 0.670055 |
fa568f84ff57b6e4e717b5e0f4e9b238cdc46008 | 1,835 | py | Python | lib/core/api2object/alliance.py | mrcrgl/gge-storage | a8471624c1a865d4f7eeb00415bd4cd2a91ea310 | [
"MIT"
] | null | null | null | lib/core/api2object/alliance.py | mrcrgl/gge-storage | a8471624c1a865d4f7eeb00415bd4cd2a91ea310 | [
"MIT"
] | 1 | 2015-04-09T15:58:19.000Z | 2015-04-14T06:37:02.000Z | lib/core/api2object/alliance.py | mrcrgl/gge-storage | a8471624c1a865d4f7eeb00415bd4cd2a91ea310 | [
"MIT"
] | null | null | null | __author__ = 'mriegel'
from gge_proxy_manager.models import Alliance
from lib.cache import cache
from lib.core import DATA_IMPORT_LOCK_TIME
import logging
logger = logging.getLogger(__name__)
def import_alliance(oi, kingdom):
response = import_alliance_(oi, kingdom)
logger.info("Import alliance AID=%r response=%r", oi.get('AID', None), response)
return response
def import_alliance_(oi, kingdom):
if not oi.get("AID") or int(oi.get("AID")) < 0:
return None
key = "-".join(("update", "alliance", str(kingdom.game_id), str(oi.get("AID", "null"))))
alliance_id = cache.get(key)
if alliance_id:
logger.info("Import alliance key=%s AID=%r cached=True alliance_id=%r", key, oi.get('AID', None), alliance_id)
return alliance_id
if not isinstance(alliance_id, int) and alliance_id is not None:
logger.info("Import alliance AID=%r skipped=True alliance_id=%r", oi.get('AID', None), alliance_id)
return None
alliance, created = Alliance.objects.get_or_create(game_id=kingdom.game_id, gge_id=int(oi.get("AID")),
defaults={
"name": oi.get("AN"),
"fame": int(oi.get("ACF", 0)) #,
#"level": int(oi.get("AR"))
})
if not created:
alliance.name = oi.get("AN")
alliance.fame = int(oi.get("ACF", 0))
alliance.level = 0
alliance.save()
logger.info("Import alliance key=%s AID=%r created=%r alliance_id=%r", key, oi.get('AID', None), created, alliance.id)
cache.set(key, alliance.id, DATA_IMPORT_LOCK_TIME)
return alliance.pk | 38.229167 | 122 | 0.562943 |
3a6eb6f27a617933c0e21c09911bd4cbde8c56a8 | 4,823 | py | Python | sdk/python/pulumi_azure_native/media/v20180330preview/get_media_service.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/media/v20180330preview/get_media_service.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/media/v20180330preview/get_media_service.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetMediaServiceResult',
'AwaitableGetMediaServiceResult',
'get_media_service',
]
@pulumi.output_type
class GetMediaServiceResult:
"""
A Media Services account.
"""
def __init__(__self__, id=None, location=None, media_service_id=None, name=None, storage_accounts=None, tags=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if media_service_id and not isinstance(media_service_id, str):
raise TypeError("Expected argument 'media_service_id' to be a str")
pulumi.set(__self__, "media_service_id", media_service_id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if storage_accounts and not isinstance(storage_accounts, list):
raise TypeError("Expected argument 'storage_accounts' to be a list")
pulumi.set(__self__, "storage_accounts", storage_accounts)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
The Azure Region of the resource.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="mediaServiceId")
def media_service_id(self) -> str:
"""
The Media Services account ID.
"""
return pulumi.get(self, "media_service_id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="storageAccounts")
def storage_accounts(self) -> Optional[Sequence['outputs.StorageAccountResponse']]:
"""
The storage accounts for this resource.
"""
return pulumi.get(self, "storage_accounts")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
class AwaitableGetMediaServiceResult(GetMediaServiceResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetMediaServiceResult(
id=self.id,
location=self.location,
media_service_id=self.media_service_id,
name=self.name,
storage_accounts=self.storage_accounts,
tags=self.tags,
type=self.type)
def get_media_service(account_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetMediaServiceResult:
"""
A Media Services account.
:param str account_name: The Media Services account name.
:param str resource_group_name: The name of the resource group within the Azure subscription.
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:media/v20180330preview:getMediaService', __args__, opts=opts, typ=GetMediaServiceResult).value
return AwaitableGetMediaServiceResult(
id=__ret__.id,
location=__ret__.location,
media_service_id=__ret__.media_service_id,
name=__ret__.name,
storage_accounts=__ret__.storage_accounts,
tags=__ret__.tags,
type=__ret__.type)
| 33.262069 | 144 | 0.639851 |
58b2dc1642b0017215122130d99314646d099e13 | 528 | py | Python | 16/demo16.3.py | BillZong/CorePythonProgrammingDemos | c97b6f2c1533a3eeaf35d2de39902b95e969a411 | [
"MIT"
] | null | null | null | 16/demo16.3.py | BillZong/CorePythonProgrammingDemos | c97b6f2c1533a3eeaf35d2de39902b95e969a411 | [
"MIT"
] | null | null | null | 16/demo16.3.py | BillZong/CorePythonProgrammingDemos | c97b6f2c1533a3eeaf35d2de39902b95e969a411 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# encoding: UTF-8
"""Demo 16.3 for chapter 16."""
# UDP 时间戳服务器 (tsUserv.py)
# 创建一个能接收客户的消息,在消息前加一个时间戳后返回的 UDP 服务器。
from socket import *
from time import ctime
HOST = ''
PORT = 21567
BUFSIZE = 1024
ADDR = (HOST, PORT)
udpSerSock = socket(AF_INET, SOCK_DGRAM)
udpSerSock.bind(ADDR)
while True:
print 'waiting for message...'
data, addr = udpSerSock.recvfrom(BUFSIZE)
udpSerSock.sendto('[%s] %s' % (ctime(), data), addr)
print '...received from and return to:', addr
udpSerSock.close()
| 19.555556 | 56 | 0.681818 |
14446a7a776b052aa273ece7f0f9463e8b6f6ab2 | 6,548 | py | Python | tests/_common.py | Ouranosinc/clisops | d78c127e07503877ae87c40e3548146fb06258ff | [
"BSD-3-Clause"
] | null | null | null | tests/_common.py | Ouranosinc/clisops | d78c127e07503877ae87c40e3548146fb06258ff | [
"BSD-3-Clause"
] | null | null | null | tests/_common.py | Ouranosinc/clisops | d78c127e07503877ae87c40e3548146fb06258ff | [
"BSD-3-Clause"
] | null | null | null | import os
import tempfile
from pathlib import Path
import pytest
from jinja2 import Template
from clisops.utils import get_file
ROOCS_CFG = Path(tempfile.gettempdir(), "roocs.ini").as_posix()
TESTS_HOME = Path(__file__).parent.absolute().as_posix()
DEFAULT_CMIP5_ARCHIVE_BASE = Path(
TESTS_HOME, "mini-esgf-data/test_data/badc/cmip5/data"
).as_posix()
REAL_C3S_CMIP5_ARCHIVE_BASE = "/gws/nopw/j04/cp4cds1_vol1/data/"
DEFAULT_CMIP6_ARCHIVE_BASE = Path(
TESTS_HOME, "mini-esgf-data/test_data/badc/cmip6/data"
).as_posix()
def assert_vars_equal(var_id, *ds_list, extras=None):
"""Extract variable/DataArray `var_id` from each Dataset in the `ds_list`.
Check they are all the same by comparing the arrays and common attributes.
`extras` is an optional list of extra attributes to check.
"""
if not extras:
extras = []
if len(ds_list) == 1:
raise Exception("Only one Dataset passed to: _ds_var_check()")
das = [ds[var_id] for ds in ds_list]
ref_da = das[0]
# Create a list of attributes to compare
attrs = ["standard_name", "long_name", "units", "cell_methods"] + extras
for da in das[1:]:
assert da.values.tolist() == ref_da.values.tolist()
for attr in attrs:
if attr in ref_da.attrs:
assert ref_da.attrs[attr] == da.attrs.get(attr)
# This is now only required for json files
XCLIM_TESTS_DATA = Path(TESTS_HOME, "xclim-testdata/testdata").as_posix()
MINI_ESGF_CACHE_DIR = Path.home() / ".mini-esgf-data"
MINI_ESGF_MASTER_DIR = os.path.join(MINI_ESGF_CACHE_DIR, "master")
def _check_output_nc(result, fname="output_001.nc"):
assert fname in [Path(_).name for _ in result]
def write_roocs_cfg():
cfg_templ = """
[project:cmip5]
base_dir = {{ base_dir }}/test_data/badc/cmip5/data/cmip5
[project:cmip6]
base_dir = {{ base_dir }}/test_data/badc/cmip6/data/CMIP6
[project:cordex]
base_dir = {{ base_dir }}/test_data/badc/cordex/data/cordex
[project:c3s-cmip5]
base_dir = {{ base_dir }}/test_data/gws/nopw/j04/cp4cds1_vol1/data/c3s-cmip5
[project:c3s-cmip6]
base_dir = {{ base_dir }}/test_data/badc/cmip6/data/CMIP6
[project:c3s-cordex]
base_dir = {{ base_dir }}/test_data/pool/data/CORDEX/data/cordex
"""
cfg = Template(cfg_templ).render(base_dir=MINI_ESGF_MASTER_DIR)
with open(ROOCS_CFG, "w") as fp:
fp.write(cfg)
# point to roocs cfg in environment
os.environ["ROOCS_CONFIG"] = ROOCS_CFG
def cmip5_archive_base():
if "CMIP5_ARCHIVE_BASE" in os.environ:
return Path(os.environ["CMIP5_ARCHIVE_BASE"]).as_posix()
return DEFAULT_CMIP5_ARCHIVE_BASE
def cmip6_archive_base():
if "CMIP6_ARCHIVE_BASE" in os.environ:
return Path(os.environ["CMIP6_ARCHIVE_BASE"]).as_posix()
return DEFAULT_CMIP6_ARCHIVE_BASE
CMIP5_ARCHIVE_BASE = cmip5_archive_base()
CMIP5_ZOSTOGA = Path(
MINI_ESGF_CACHE_DIR,
"master/test_data/badc/cmip5/data/cmip5/output1/INM/inmcm4/rcp45/mon/ocean/Omon/r1i1p1/latest/zostoga/zostoga_Omon_inmcm4_rcp45_r1i1p1_200601-210012.nc",
).as_posix()
CMIP5_TAS = Path(
MINI_ESGF_CACHE_DIR,
"master/test_data/badc/cmip5/data/cmip5/output1/MOHC/HadGEM2-ES/rcp85/mon/atmos/Amon/r1i1p1/latest/tas/*.nc",
).as_posix()
CMIP5_RH = Path(
MINI_ESGF_CACHE_DIR,
"master/test_data/badc/cmip5/data/cmip5/output1/MOHC/HadGEM2-ES/historical/mon/land/Lmon/r1i1p1/latest/rh/*.nc",
).as_posix()
CMIP6_ARCHIVE_BASE = cmip6_archive_base()
CMIP6_RLDS = Path(
MINI_ESGF_CACHE_DIR,
"master/test_data/badc/cmip6/data/CMIP6/CMIP/IPSL/IPSL-CM6A-LR/historical/r1i1p1f1/Amon/rlds/gr/v20180803/rlds_Amon_IPSL-CM6A-LR_historical_r1i1p1f1_gr_185001-201412.nc",
).as_posix()
CMIP6_RLDS_ONE_TIME_STEP = Path(
MINI_ESGF_CACHE_DIR,
"master/test_data/badc/cmip6/data/CMIP6/CMIP/IPSL/IPSL-CM6A-LR/historical/r1i1p1f1/Amon/rlds/gr/v20180803/rlds_Amon_IPSL-CM6A-LR_historical_r1i1p1f1_gr_185001.nc",
).as_posix()
CMIP6_MRSOFC = Path(
MINI_ESGF_CACHE_DIR,
"master/test_data/badc/cmip6/data/CMIP6/ScenarioMIP/IPSL/IPSL-CM6A-LR/ssp119/r1i1p1f1/fx/mrsofc/gr/v20190410"
"/mrsofc_fx_IPSL-CM6A-LR_ssp119_r1i1p1f1_gr.nc",
).as_posix()
CMIP6_SICONC = Path(
MINI_ESGF_CACHE_DIR,
"master/test_data/badc/cmip6/data/CMIP6/CMIP/CCCma/CanESM5/historical/r1i1p1f1/SImon/siconc/gn/latest/siconc_SImon_CanESM5_historical_r1i1p1f1_gn_185001-201412.nc",
).as_posix()
CMIP6_SICONC_DAY = Path(
MINI_ESGF_CACHE_DIR,
"master/test_data/badc/cmip6/data/CMIP6/CMIP/CCCma/CanESM5/historical/r1i1p1f1/SIday/siconc/gn/v20190429/siconc_SIday_CanESM5_historical_r1i1p1f1_gn_18500101-20141231.nc",
).as_posix()
CMIP6_TA = Path(
MINI_ESGF_CACHE_DIR,
"master/test_data/badc/cmip6/data/CMIP6/ScenarioMIP/MIROC/MIROC6/ssp119/r1i1p1f1/Amon/ta/gn/files/d20190807/ta_Amon_MIROC6_ssp119_r1i1p1f1_gn_201501-202412.nc",
).as_posix()
C3S_CORDEX_AFR_TAS = Path(
MINI_ESGF_CACHE_DIR,
"master/test_data/pool/data/CORDEX/data/cordex/output/AFR-22/GERICS/MPI-M-MPI-ESM-LR/historical/r1i1p1/GERICS-REMO2015/v1/day/tas/v20201015/*.nc",
).as_posix()
C3S_CORDEX_NAM_PR = Path(
MINI_ESGF_CACHE_DIR,
"master/test_data/pool/data/CORDEX/data/cordex/output/NAM-22/OURANOS/NOAA-GFDL-GFDL-ESM2M/rcp45/r1i1p1/OURANOS-CRCM5/v1/day/pr/v20200831/*.nc",
).as_posix()
C3S_CORDEX_EUR_ZG500 = Path(
MINI_ESGF_CACHE_DIR,
"master/test_data/pool/data/CORDEX/data/cordex/output/EUR-11/IPSL/IPSL-IPSL-CM5A-MR/rcp85/r1i1p1/IPSL-WRF381P/v1/day/zg500/v20190919/*.nc",
).as_posix()
C3S_CORDEX_ANT_SFC_WIND = Path(
MINI_ESGF_CACHE_DIR,
"master/test_data/pool/data/CORDEX/data/cordex/output/ANT-44/KNMI/ECMWF-ERAINT/evaluation/r1i1p1/KNMI-RACMO21P/v1/day/sfcWind/v20201001/*.nc",
).as_posix()
C3S_CMIP5_TSICE = Path(
REAL_C3S_CMIP5_ARCHIVE_BASE,
"c3s-cmip5/output1/NCC/NorESM1-ME/rcp60/mon/seaIce/OImon/r1i1p1/tsice/v20120614/*.nc",
).as_posix()
C3S_CMIP5_TOS = Path(
REAL_C3S_CMIP5_ARCHIVE_BASE,
"c3s-cmip5/output1/BCC/bcc-csm1-1-m/historical/mon/ocean/Omon/r1i1p1/tos/v20120709/*.nc",
).as_posix()
CMIP6_TOS = Path(
MINI_ESGF_CACHE_DIR,
"master/test_data/badc/cmip6/data/CMIP6/CMIP/MPI-M/MPI-ESM1-2-LR/historical/r1i1p1f1/Omon/tos/gn/v20190710/tos_Omon_MPI-ESM1-2-LR_historical_r1i1p1f1_gn_185001-186912.nc",
).as_posix()
CMIP6_TOS_ONE_TIME_STEP = Path(
MINI_ESGF_CACHE_DIR,
"master/test_data/badc/cmip6/data/CMIP6/CMIP/MPI-M/MPI-ESM1-2-HR/historical/r1i1p1f1/Omon/tos/gn/v20190710/tos_Omon_MPI-ESM1-2-HR_historical_r1i1p1f1_gn_185001.nc",
).as_posix()
| 35.016043 | 175 | 0.747862 |
8331d0b89e96a09363f63e6c68d3f165ff908c8c | 1,096 | py | Python | scriptlib/scriptjson.py | Orbtial/scriptlib | 9c3d1754da1eb875ecd89a6b6213457b9aa221d6 | [
"MIT"
] | null | null | null | scriptlib/scriptjson.py | Orbtial/scriptlib | 9c3d1754da1eb875ecd89a6b6213457b9aa221d6 | [
"MIT"
] | null | null | null | scriptlib/scriptjson.py | Orbtial/scriptlib | 9c3d1754da1eb875ecd89a6b6213457b9aa221d6 | [
"MIT"
] | null | null | null | '''
ScriptJSON
Json Manager for the scriptlib package
Developed by Orbtial
'''
#Custom Imports
from . import scriptfile
#Standard Imports
import json
def loadJson(ptpdir, path, filename):
"""
Returns a dictionary based on data from the specified .json file
:param ptpdir: String generated from initPTPDIR().
:param path: String representing path of .json file relative to current working directory.
:param filename: String representing name of .json file.
:returns: Dictionary representing data from specified file.
"""
return json.loads(scriptfile.rFileData(ptpdir, path, filename))
def writeJson(ptpdir, path, filename, dictionary):
"""
Overwrites a .json file with a JSON-formatted dictionary.
:param ptpdir: String generated from initPTPDIR().
:param path: String representing path of .json file relative to current working directory.
:param filename: String representing name of .json file.
:param dictionary: Dictionary containing data of which to be written to the file in JSON format.
"""
scriptfile.wFileData(ptpdir, path, filename, json.dumps(dictionary), True)
| 32.235294 | 97 | 0.771898 |
39d5fb8abacc9acf414d333ae1fd9ba3f1382bab | 3,827 | py | Python | getTweets.py | feiyue33/Mapping-Amman | 1c2cf9d7534f3a89978fd6512928407b5a7fe99d | [
"MIT"
] | null | null | null | getTweets.py | feiyue33/Mapping-Amman | 1c2cf9d7534f3a89978fd6512928407b5a7fe99d | [
"MIT"
] | null | null | null | getTweets.py | feiyue33/Mapping-Amman | 1c2cf9d7534f3a89978fd6512928407b5a7fe99d | [
"MIT"
] | null | null | null | import urllib.request
import json
import dml
import prov.model
import datetime
import uuid
class getTweets(dml.Algorithm):
contributor = 'gaotian_xli33'
reads = []
writes = ['emmaliu_gaotian_xli33_yuyangl.tweets']
@staticmethod
def execute(trial=False):
'''Retrieve some data sets (not using the API here for the sake of simplicity).'''
startTime = datetime.datetime.now()
# Set up the database connection.
client = dml.pymongo.MongoClient()
repo = client.repo
repo.authenticate('emmaliu_gaotian_xli33_yuyangl', 'emmaliu_gaotian_xli33_yuyangl')
url = 'http://datamechanics.io/data/tweets_amman.json'
response = urllib.request.urlopen(url).read().decode("utf-8")
# print(response)
r = json.loads(response)
s = json.dumps(r, sort_keys=True, indent=2)
repo.dropCollection("tweets")
repo.createCollection("tweets")
repo['emmaliu_gaotian_xli33_yuyangl.tweets'].insert_many(r)
repo['emmaliu_gaotian_xli33_yuyangl.tweets'].metadata({'complete': True})
print(repo['emmaliu_gaotian_xli33_yuyangl.tweets'].metadata())
repo.logout()
endTime = datetime.datetime.now()
return {"start": startTime, "end": endTime}
@staticmethod
def provenance(doc=prov.model.ProvDocument(), startTime=None, endTime=None):
'''
Create the provenance document describing everything happening
in this script. Each run of the script will generate a new
document describing that invocation event.
'''
# Set up the database connection.
client = dml.pymongo.MongoClient()
repo = client.repo
repo.authenticate('emmaliu_gaotian_xli33_yuyangl', 'emmaliu_gaotian_xli33_yuyangl')
doc.add_namespace('alg', 'http://datamechanics.io/algorithm/emmaliu_gaotian_xli33_yuyangl') # The scripts are in <folder>#<filename> format.
doc.add_namespace('dat', 'http://datamechanics.io/data/emmaliu_gaotian_xli33_yuyangl') # The data sets are in <user>#<collection> format.
doc.add_namespace('ont', 'http://datamechanics.io/ontology#') # 'Extension', 'DataResource', 'DataSet', 'Retrieval', 'Query', or 'Computation'.
doc.add_namespace('log', 'http://datamechanics.io/log/') # The event log.
doc.add_namespace('bdp', 'https://developer.twitter.com/en/docs/tweets/search/api-reference/get-search-tweets')
this_script = doc.agent('alg:emmaliu_gaotian_xli33_yuyangl#getTweets',
{prov.model.PROV_TYPE: prov.model.PROV['SoftwareAgent'], 'ont:Extension': 'py'})
resource = doc.entity('bdp:twitter API',
{'prov:label': '311, Service Requests', prov.model.PROV_TYPE: 'ont:DataResource',
'ont:Extension': 'json'})
get_tweets = doc.activity('log:uuid' + str(uuid.uuid4()), startTime, endTime)
doc.wasAssociatedWith(get_tweets, this_script)
doc.usage(get_tweets, resource, startTime, None,
{prov.model.PROV_TYPE: 'ont:Retrieval',
'ont:Query': '?type=Animal+Found&$select=type,latitude,longitude,OPEN_DT'
}
)
tweets = doc.entity('dat:emmaliu_gaotian_xli33_yuyangl#get_tweets',
{prov.model.PROV_LABEL: 'tweets from Amman', prov.model.PROV_TYPE: 'ont:DataSet'})
doc.wasAttributedTo(tweets, this_script)
doc.wasGeneratedBy(tweets, get_tweets, endTime)
doc.wasDerivedFrom(tweets, resource, get_tweets, get_tweets, get_tweets)
repo.logout()
return doc
getTweets.execute()
# doc = getTweets.provenance()
# print(doc.get_provn())
# print(json.dumps(json.loads(doc.serialize()), indent=4))
## eof | 43.488636 | 152 | 0.651685 |
93fa920e562b7de8e742c1cb13c9e473ab7495c5 | 2,018 | py | Python | drfs/tests/test_path.py | datarevenue-berlin/drfs | d44274b0ae6e1b802b7763b5088825a83cc12fa6 | [
"MIT"
] | 2 | 2021-07-29T10:38:30.000Z | 2021-09-08T11:48:39.000Z | drfs/tests/test_path.py | datarevenue-berlin/drfs | d44274b0ae6e1b802b7763b5088825a83cc12fa6 | [
"MIT"
] | 2 | 2020-10-07T07:47:31.000Z | 2021-11-15T17:52:33.000Z | drfs/tests/test_path.py | datarevenue-berlin/drfs | d44274b0ae6e1b802b7763b5088825a83cc12fa6 | [
"MIT"
] | null | null | null | import pytest
from drfs import config
from drfs.path import DRPath
def test_is_wildcard():
assert not DRPath("/home").is_wildcard
assert DRPath("/home/*").is_wildcard
assert DRPath("/home/*/yo").is_wildcard
assert DRPath("/home/**/yo").is_wildcard
assert DRPath("/home/*.csv").is_wildcard
assert not DRPath("s3://bucket").is_wildcard
assert DRPath("s3://bucket/*").is_wildcard
assert DRPath("s3://bucket/*/yo").is_wildcard
assert DRPath("s3://bucket/**/yo").is_wildcard
assert DRPath("s3://bucket/*.csv").is_wildcard
def test_is_template():
assert not DRPath("/home").is_template
assert not DRPath("/home/abc{").is_template
assert not DRPath("/home/abc}{").is_template
assert DRPath("/home/abc{}").is_template
assert DRPath("{}/home/abc").is_template
assert DRPath("/home/abc{yo}").is_template
assert DRPath("/home/abc{yo/100:.2f}").is_template
def test_remote_div(s3):
p1 = DRPath("s3://test-bucket/")
config["fs_opts"]["s3"] = {"access_key": "test"}
assert p1.storage_options == config["fs_opts"]["s3"].get(dict)
config["fs_opts"] = {}
opts = {"key": "abc", "secret": "def"}
p2 = DRPath("s3://test-bucket", storage_options=opts)
assert p2.storage_options == opts
assert p2._acc_real is None
p2.exists() # create fs instance with storage options
assert p2._acc_real is not None
assert p2._acc_real.fs.key == opts["key"]
assert p2._acc_real.fs.secret == opts["secret"]
p3 = p2 / "test.txt"
assert p3.storage_options == p2.storage_options
assert p3._acc_real is not None
assert p3._acc_real is p2._acc_real
@pytest.mark.parametrize(("str_path",), [("s3://test_bucket",), ("/home/test_dir",)])
def test_path_get_item(str_path):
p = DRPath(str_path)
assert p[:5] == str_path[:5]
@pytest.mark.parametrize(("str_path",), [("s3://test_bucket",), ("/home/test_dir",)])
def test_path_startswith(str_path):
p = DRPath(str_path)
assert p.startswith(str_path[:5])
| 31.53125 | 85 | 0.666006 |
d04798128a64fc8698b465a22c59b90386253d21 | 768 | py | Python | main.py | clean-code-craft-tcq-2/well-named-in-py-kumarSudhirTCQ | 20df82c9d9234477fceeef744c6ce8e573e33234 | [
"MIT"
] | null | null | null | main.py | clean-code-craft-tcq-2/well-named-in-py-kumarSudhirTCQ | 20df82c9d9234477fceeef744c6ce8e573e33234 | [
"MIT"
] | null | null | null | main.py | clean-code-craft-tcq-2/well-named-in-py-kumarSudhirTCQ | 20df82c9d9234477fceeef744c6ce8e573e33234 | [
"MIT"
] | null | null | null | from color_coding import*
from color_manual import*
def test_number_to_pair(pair_number, expected_major_color, expected_minor_color):
major_color, minor_color = get_color_from_pair_number(pair_number)
assert(major_color == expected_major_color)
assert(minor_color == expected_minor_color)
def test_pair_to_number(major_color, minor_color, expected_pair_number):
pair_number = get_pair_number_from_color(major_color, minor_color)
assert(pair_number == expected_pair_number)
if __name__ == '__main__':
test_number_to_pair(4, 'White', 'Brown')
test_number_to_pair(5, 'White', 'Slate')
test_pair_to_number('Black', 'Orange', 12)
test_pair_to_number('Violet', 'Slate', 25)
test_pair_to_number('Red', 'Orange', 7)
print_manual()
print('Done :)')
| 34.909091 | 81 | 0.783854 |
fe7cadf3f131c76bfc6dc1020df91f4b39068968 | 472 | py | Python | google/exceptions.py | rohitkhatri/google-python-sdk | 6e0348bc5a519bf686bf674a3e9677a3cf81d4c5 | [
"MIT"
] | 2 | 2019-02-25T02:04:13.000Z | 2019-04-05T20:22:30.000Z | google/exceptions.py | rohitkhatri/google-python-sdk | 6e0348bc5a519bf686bf674a3e9677a3cf81d4c5 | [
"MIT"
] | null | null | null | google/exceptions.py | rohitkhatri/google-python-sdk | 6e0348bc5a519bf686bf674a3e9677a3cf81d4c5 | [
"MIT"
] | null | null | null | class GoogleException(Exception):
def __init__(self, code, message, response):
self.status_code = code
self.error_type = message
self.message = message
self.response = response
self.get_error_type()
def get_error_type(self):
json_response = self.response.json()
if 'error' in json_response and 'errors' in json_response['error']:
self.error_type = json_response['error']['errors'][0]['reason']
| 33.714286 | 75 | 0.646186 |
a264a841682b8a4db6c2324d739e1b97fc112142 | 3,746 | py | Python | solstice/tagger/editors/shaderseditor.py | tpoveda/solstice | ccccc376cebd6701d038fdd6ebaabc33ebdf259f | [
"MIT"
] | null | null | null | solstice/tagger/editors/shaderseditor.py | tpoveda/solstice | ccccc376cebd6701d038fdd6ebaabc33ebdf259f | [
"MIT"
] | null | null | null | solstice/tagger/editors/shaderseditor.py | tpoveda/solstice | ccccc376cebd6701d038fdd6ebaabc33ebdf259f | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module that contains implementation for shaders editor
"""
from __future__ import print_function, division, absolute_import
__author__ = "Tomas Poveda"
__license__ = "MIT"
__maintainer__ = "Tomas Poveda"
__email__ = "tpovedatd@gmail.com"
from functools import partial
from Qt.QtWidgets import *
import tpDcc as tp
import artellapipe
from artellapipe.tools.tagger.widgets import taggereditor
class ShadersEditor(taggereditor.TaggerEditor, object):
EDITOR_TYPE = 'Shaders'
def __init__(self, project, parent=None):
super(ShadersEditor, self).__init__(project=project, parent=parent)
def ui(self):
super(ShadersEditor, self).ui()
self._update_shaders_btn = QPushButton('Update Shaders')
self.main_layout.addWidget(self._update_shaders_btn)
def setup_signals(self):
self._update_shaders_btn.clicked.connect(partial(self.update_data, None))
def initialize(self):
"""
Initializes tagger editor
"""
pass
def reset(self):
"""
Function that resets all editor information
"""
pass
def update_tag_buttons_state(self, sel=None):
"""
Updates the selection tag attribute of the tag data node
:param name: str, name of the selection tag to add/remove
"""
tag_data_node = artellapipe.TagsMgr().get_tag_data_node_from_current_selection(sel)
if tag_data_node is None:
return
attr_exists = tp.Dcc.attribute_exists(node=tag_data_node, attribute_name='shaders')
if attr_exists:
pass
# raise NotImplementedError('Shaders Update functionality not implemented yet!')
# description = cmds.getAttr(tag_data_node + '.description')
# if description is not None and description != '':
# self._description_text.setText(description)
def fill_tag_node(self, tag_data_node, *args, **kwargs):
"""
Fills given tag node with the data managed by this editor
:param tag_data_node: str
"""
sel = kwargs.pop('sel', None)
tag_data_node = artellapipe.TagsMgr().get_tag_data_node_from_current_selection(sel)
if tag_data_node is None:
return
attr_exists = tp.Dcc.attribute_exists(node=tag_data_node, attribute_name='shaders')
if not attr_exists:
tp.Dcc.add_string_attribute(node=tag_data_node, attribute_name='shaders')
asset_groups = tp.Dcc.list_nodes(node_name='*_grp', node_type='transform')
if not asset_groups or len(asset_groups) <= 0:
return
# all_shading_groups = list()
# json_data = dict()
# for grp in asset_groups:
# json_data[grp] = dict()
# children = cmds.listRelatives(grp, type='transform', allDescendents=True, fullPath=True)
# for child in children:
# child_shapes = cmds.listRelatives(child, shapes=True, fullPath=True)
# for shape in child_shapes:
# json_data[grp][shape] = dict()
# shading_groups = cmds.listConnections(shape, type='shadingEngine')
# for shading_grp in shading_groups:
# shading_grp_mat = cmds.ls(cmds.listConnections(shading_grp), materials=True)
# json_data[grp][shape][shading_grp] = shading_grp_mat
# cmds.setAttr(tag_data_node + '.description', lock=False)
# cmds.setAttr(tag_data_node + '.description', self._description_text.toPlainText(), type='string')
# cmds.setAttr(tag_data_node + '.description', lock=True)
self.dataUpdated.emit()
| 33.446429 | 107 | 0.647624 |
b9a622558061edefae590bc0f10f599097c720a3 | 574 | py | Python | swingers/conf/project_template/project_name/urls.py | jawaidm/pbs | 87f5c535c976d6a5eccbfbbf2073589b6e366d04 | [
"Apache-2.0"
] | null | null | null | swingers/conf/project_template/project_name/urls.py | jawaidm/pbs | 87f5c535c976d6a5eccbfbbf2073589b6e366d04 | [
"Apache-2.0"
] | 12 | 2019-10-22T23:16:38.000Z | 2022-03-11T23:17:45.000Z | swingers/conf/project_template/project_name/urls.py | jawaidm/pbs | 87f5c535c976d6a5eccbfbbf2073589b6e366d04 | [
"Apache-2.0"
] | 5 | 2019-12-19T06:18:42.000Z | 2022-01-07T01:16:18.000Z | from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'dec_template.views.home', name='home'),
# url(r'^dec_template/', include('dec_template.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
)
| 31.888889 | 71 | 0.686411 |
5c9156f6108cb32aa76810a1431e34f5fbb1e4eb | 11,855 | py | Python | TP_01/ejercicio_2/ejercicio_2.py | AgustinNormand/recuperacion-de-informacion | 511ff6a83a929621792ee684aa5a55bcad512c9d | [
"MIT"
] | null | null | null | TP_01/ejercicio_2/ejercicio_2.py | AgustinNormand/recuperacion-de-informacion | 511ff6a83a929621792ee684aa5a55bcad512c9d | [
"MIT"
] | null | null | null | TP_01/ejercicio_2/ejercicio_2.py | AgustinNormand/recuperacion-de-informacion | 511ff6a83a929621792ee684aa5a55bcad512c9d | [
"MIT"
] | null | null | null | import sys
import pathlib
import re
import matplotlib.pyplot as plt
class Collection:
def __init__(self, dirpath):
self.dirpath = dirpath
self.corpus_path = pathlib.Path(dirpath)
self.documents = []
self.process_documents()
def process_documents(self):
for file_path in self.corpus_path.iterdir():
self.documents.append(Document(file_path))
def get_document_count(self):
return len(self.documents)
def get_documents(self):
return self.documents
def get_shortest_document(self):
shortest_document = self.documents[0]
shortest_token_count = self.documents[0].get_token_count()
for document in self.documents:
if document.get_token_count() < shortest_token_count:
shortest_document = document
shortest_token_count = document.get_token_count()
return shortest_document
def get_longest_document(self):
longest_document = self.documents[0]
longest_token_count = self.documents[0].get_token_count()
for document in self.documents:
if document.get_token_count() > longest_token_count:
longest_document = document
longest_token_count = document.get_token_count()
return longest_document
def get_token_count(self, token_type="all"):
counter = 0
for document in self.documents:
counter += document.get_token_count(token_type)
return counter
def check_email_consistency(self):
"""
Verifico si la cantidad de @ del documento, coincide con la cantidad de tokens de email reconocidos.
Como en muchos casos se encuentra la siguiente secuencia: @import
En realidad verifico si la cantidad de @ del documento - (menos) la cantidad de @import, coincide con la cantidad de
tokens de email encontrados.
"""
for document in self.documents:
import_count = count = document.get_file_content().count("@import")
count = document.get_file_content().count("@")
if (count - import_count) != len(document.get_email_tokens()):
print("Diferente. Email token count:{}. Arroba ocurrences {}. File: {}. Email tokens: {}".format(
len(document.get_email_tokens()), count, document.get_path(), document.get_email_tokens()))
print("\r\n")
print("\r\n")
print("\r\n")
# ...boletín oficial @import... No es email.
# ...omitiendo @ y lo que le sigue...
# ...oba username: @nexo.unnoba.edu.ar pass... ?
# ...pción: seminarios07@unsam.edu.ar +info cursos de i... Faltaba agregar numeros en la RE
# ...proyecto incluirt-@ el p...
# ...twitter: @prensa_unsj...
# ...itorial @import ...
def check_url_consistency(self):
"""
Inicialmente, verifico si la cantidaad de :// del documento, coincide con la cantidad de tokens url encontrados.
Luego, verifico si la cantidad de www del documento coincide con la cantidad de tokens.
"""
for document in self.documents:
patterns = ["://", "www"]
for pattern in patterns:
count = document.get_file_content().count(pattern)
if count != document.get_token_count("url"):
print("Diferente cantidad de "+pattern+" y urls detectadas. File: {}. URL tokens: {}".format(document.get_path(), document.get_tokens("url")))
print("\r\n")
# ... http://www.unsam.edu.ar/home/isa-vacaclonada-iib-inta-23junio2011-unsam.pdf ... Falaban guiones en RE
# http://revistacyt.unne.edu.ar/noticia_bio7.php Faltabn guiones bajos
# http://www.youtube.comhttp://www.youtube.com
# www.unt.edu.ar/fcsnat/insue
class Document:
def __init__(self, path):
self.path = path
self.word_list = []
self.parse_words()
self.token_dictionary = {}
self.token_dictionary["all"] = []
self.token_dictionary["email"] = []
self.token_dictionary["general"] = []
self.token_dictionary["number"] = []
self.token_dictionary["url"] = []
self.token_dictionary["abbreviation"] = []
def parse_words(self):
with open(self.path, "r") as f:
for line in f.readlines():
self.word_list.extend(line.strip().split())
def get_words_list(self):
return self.word_list
def get_words_count(self):
return len(self.word_list)
def set_tokens(self, tokens):
for token in tokens:
self.token_dictionary["all"].append(token[0])
self.token_dictionary[token[1]].append(token[0])
def get_token_count(self, token_type="all"):
return len(self.token_dictionary[token_type])
def get_tokens(self, token_type="all"):
return self.token_dictionary[token_type]
def get_path(self):
return self.path
def get_file_content(self):
with open(self.path, "r") as f:
return f.read()
class Text_analyzer:
def __init__(self, dirpath, delete_empty_words, empty_words_path):
self.palabras_vacias = []
if delete_empty_words == "True":
with open(empty_words_path, "r") as f:
for line in f.readlines():
self.palabras_vacias.extend(line.split(","))
self.collection = Collection(dirpath)
self.term_file_path = "terminos.txt"
self.statistics_file_path = "estadisticas.txt"
self.frequencies_file_path = "frecuencias.txt"
def generate_term_file(self):
self.frequencies = self.obtain_frequencies()
self.export_frequencies()
def obtain_frequencies(self):
frequencies = {}
documents = self.collection.get_documents()
for document in documents:
document_word_list = document.get_words_list() ## Nombres propios
document_tokens = []
for document_word in document_word_list:
if document_word not in self.palabras_vacias:
document_token = [document_word,
self.get_token_type(document_word)]
document_tokens.append(document_token)
document.set_tokens(document_tokens)
frequencies = self.increment_frequency(
frequencies, document_tokens)
return frequencies
def get_token_type(self, document_word):
regular_expressions = [
["([a-zA-Z0-9]+@[a-z.]+)", "email"],
["(https?://[a-zA-Z./0-9-_?=]+)", "url"],
["([A-Z][a-z]+\.)", "abbreviation"], # Dr. Lic.
["([A-Z]\.[A-Z]\.)", "abbreviation"], #S.A. #Este tiene que ir antes del de "etc."
["([a-z]+\.)", "abbreviation"], # etc.
["([A-Z]{4})", "abbreviation"], # NASA
["([a-zA-Z]+[0-9]*[^A-Za-z0-9]*)", "general"], # Matcheo todo lo que no sea numeros
["([0-9]+)", "number"], # Este no funcaba bien ( [0-9]+ ) matchea solo si tiene espacios adelante y atrás
["([0-9]+\.[0-9]+)", "number"], # Decimales "(\b[0-9]+\.[0-9]+\b)"
#([a-zA-Z0-9$&+,:;=?@#|'<>.^*()%!-/]+)
]
for regular_expression, token_type in regular_expressions:
m = re.search(regular_expression, document_word)
if m != None:
return token_type
return "general"
def increment_frequency(self, frequencies, document_tokens):
unique_document_tokens = []
for token, token_type in document_tokens:
if token not in unique_document_tokens:
unique_document_tokens.append(token)
if token in frequencies.keys():
# Incremento la frecuencia en la colección
frequencies[token][0] += 1
# Incremento la frecuencia en el documento
frequencies[token][1] += 1
else:
frequencies[token] = [1, 1] # Inicializo ambas
else:
if token in frequencies.keys():
# Incremento la frecuencia en la colección
frequencies[token][0] += 1
else:
print("Warning: Entró a un if que no debería")
return frequencies
def increment_document_frequency(frequencies, tokens_list):
document_words = []
for token in tokens_list:
if token not in document_words:
document_words.append(token)
if token in frequencies.keys():
frequencies[token] += 1
else:
frequencies[token] = 1
return frequencies
def export_frequencies(self):
sort_frequencies = sorted(self.frequencies.items(), key=lambda x: x[0])
with open(self.term_file_path, "w") as f:
for sort_frequency in sort_frequencies:
f.write("{} {} {}".format(
sort_frequency[0], sort_frequency[1][0], sort_frequency[1][1]))
f.write("\r\n")
print("Archivo {} exportado.".format(self.term_file_path))
def generate_statistics_file(self):
with open(self.statistics_file_path, "w") as f:
f.write(str(self.collection.get_document_count())+"\r\n")
f.write(str(len(self.frequencies))+"\r\n")
f.write(str(len(self.frequencies) /
self.collection.get_document_count())+"\r\n")
f.write(str(self.calculate_average_len_term())+"\r\n")
f.write(
str(self.collection.get_longest_document().get_token_count())+"\r\n")
f.write(
str(self.collection.get_shortest_document().get_token_count())+"\r\n")
f.write(str(self.get_count_once_tokens())+"\r\n")
print("Archivo {} exportado.".format(self.statistics_file_path))
print("\r\n")
print("----------------------------------------------------------------")
print("Estadisticas de consola para debug")
print("Document count {}.".format(self.collection.get_document_count()))
print("Cantidad de tokens {}".format(len(self.frequencies)))
print("Promedio de tokens de los docummentos {}.".format(len(self.frequencies) / self.collection.get_document_count()))
print("Largo promedio de un termino {}.".format(self.calculate_average_len_term()))
print("Cantidad de tokens del documento mas largo {}.".format(self.collection.get_longest_document().get_token_count()))
print("Cantidad de tokens del documento mas corto {}.".format(self.collection.get_shortest_document().get_token_count()))
print("Cantidad de terminos que aparecen solo 1 vez {}.".format(self.get_count_once_tokens()))
print("Cantidad de tokens de tipo {}: {}".format("abbreviation", self.collection.get_token_count("abbreviation")))
print("Cantidad de tokens de tipo {}: {}".format("email", self.collection.get_token_count("email")))
print("Cantidad de tokens de tipo {}: {}".format("url", self.collection.get_token_count("url")))
print("Cantidad de tokens de tipo {}: {}".format("number", self.collection.get_token_count("number")))
print("Imprimiendo archivos de tokens")
token_types = ["abbreviation", "email", "url", "number"]
for token_type in token_types:
with open("{}.txt".format(token_type), "w") as f:
tokens = self.collection.get_tokens() # TODO
#self.collection.save_token_files()
#print(self.collection.get_token_count("url"))
#print(self.collection.get_email_token_count())
#self.collection.check_url_consistency()
print("----------------------------------------------------------------")
print("\r\n")
def calculate_average_len_term(self):
total_len = 0
for frequency in self.frequencies:
total_len += len(frequency)
return total_len / len(self.frequencies)
def get_count_once_tokens(self):
counter = 0
for frequency in self.frequencies:
if self.frequencies[frequency][0] == 1:
counter += 1
return counter
def generate_frequencies_file(self):
sort_frequencies = sorted(
self.frequencies.items(), key=lambda x: x[1][0])
with open(self.frequencies_file_path, "w") as f:
for frequent_frequency in sorted(sort_frequencies[-10:], key=lambda x: x[1][0], reverse=True):
f.write(
str("{} {}".format(frequent_frequency[0], frequent_frequency[1][0]))+"\r\n")
f.write("\r\n")
for non_frequent_frecuency in sort_frequencies[:10]:
f.write(str("{} {}".format(
non_frequent_frecuency[0], non_frequent_frecuency[1][0]))+"\r\n")
print("Archivo {} exportado.".format(self.frequencies_file_path))
if __name__ == '__main__':
if len(sys.argv) < 3:
print('Es necesario pasar los siguientes argumentos:')
print('Path a un directorio')
print('True or False eliminar palabras vacias')
sys.exit(0)
empty_words_path = None
if (sys.argv[2] == 'True'):
if len(sys.argv) < 4:
print('Indicar el Path al archivo de palabras vacias')
sys.exit(0)
else:
empty_words_path = sys.argv[3]
dirpath = sys.argv[1]
delete_empty_words = sys.argv[2]
ta = Text_analyzer(dirpath, delete_empty_words, empty_words_path)
ta.generate_term_file()
ta.generate_statistics_file()
ta.generate_frequencies_file()
| 34.066092 | 147 | 0.691016 |
d49c2099bae1165e755512fd2ea8f950a153e09f | 8,321 | py | Python | salt/modules/splunk.py | casselt/salt | d8a2ef4e0cd544656489d23d161928879b1fc1c0 | [
"Apache-2.0"
] | 12 | 2015-01-21T00:18:25.000Z | 2021-07-11T07:35:26.000Z | salt/modules/splunk.py | casselt/salt | d8a2ef4e0cd544656489d23d161928879b1fc1c0 | [
"Apache-2.0"
] | 86 | 2017-01-27T11:54:46.000Z | 2020-05-20T06:25:26.000Z | salt/modules/splunk.py | casselt/salt | d8a2ef4e0cd544656489d23d161928879b1fc1c0 | [
"Apache-2.0"
] | 12 | 2015-01-05T09:50:42.000Z | 2019-08-19T01:43:40.000Z | # -*- coding: utf-8 -*-
'''
Module for interop with the Splunk API
.. versionadded:: 2016.3.0.
:depends: - splunk-sdk python module
:configuration: Configure this module by specifying the name of a configuration
profile in the minion config, minion pillar, or master config. The module
will use the 'splunk' key by default, if defined.
For example:
.. code-block:: yaml
splunk:
username: alice
password: abc123
host: example.splunkcloud.com
port: 8080
'''
from __future__ import absolute_import, unicode_literals, print_function
# Import python libs
import logging
import hmac
import base64
import subprocess
# Import 3rd-party libs
from salt.ext import six
HAS_LIBS = False
try:
import splunklib.client
from splunklib.client import AuthenticationError
from splunklib.binding import HTTPError
HAS_LIBS = True
except ImportError:
pass
log = logging.getLogger(__name__)
__virtualname__ = 'splunk'
SERVICE_NAME = "splunk"
ALLOWED_FIELDS_FOR_MODIFICATION = [
'realname',
'roles',
'defaultApp',
'tz',
#'capabilities',
'name'
]
REQUIRED_FIELDS_FOR_CREATE = [
'realname',
'name',
'roles'
]
def __virtual__():
'''
Only load this module if splunk is installed on this minion.
'''
if HAS_LIBS:
return __virtualname__
return (False, 'The splunk execution module failed to load: '
'requires splunk python library to be installed.')
def _get_secret_key(profile):
config = __salt__['config.option'](profile)
return config.get('password_secret_key')
def _generate_password(email):
m = hmac.new(base64.b64decode(_get_secret_key('splunk')), six.text_type([email, SERVICE_NAME]))
return base64.urlsafe_b64encode(m.digest()).strip().replace('=', '')
def _send_email(name, email):
"send a email to inform user of account creation"
config = __salt__['config.option']('splunk')
email_object = config.get('email')
if email_object:
cc = email_object.get('cc')
subject = email_object.get('subject')
message = email_object.get('message').format(name, name, _generate_password(email), name)
try:
mail_process = subprocess.Popen(['mail', '-s', subject, '-c', cc, email], stdin=subprocess.PIPE)
except Exception as e:
log.error("unable to send email to %s: %s", email, e)
mail_process.communicate(message)
log.info("sent account creation email to %s", email)
def _populate_cache(profile="splunk"):
config = __salt__['config.option'](profile)
key = "splunk.users.{0}".format(
config.get('host')
)
if key not in __context__:
client = _get_splunk(profile)
kwargs = {'sort_key': 'realname', 'sort_dir': 'asc'}
users = client.users.list(count=-1, **kwargs)
result = {}
for user in users:
result[user.email.lower()] = user
__context__[key] = result
return True
def _get_splunk(profile):
'''
Return the splunk client, cached into __context__ for performance
'''
config = __salt__['config.option'](profile)
key = "splunk.{0}:{1}:{2}:{3}".format(
config.get('host'),
config.get('port'),
config.get('username'),
config.get('password')
)
if key not in __context__:
__context__[key] = splunklib.client.connect(
host=config.get('host'),
port=config.get('port'),
username=config.get('username'),
password=config.get('password'))
return __context__[key]
def list_users(profile="splunk"):
'''
List all users in the splunk DB
CLI Example:
salt myminion splunk.list_users
'''
config = __salt__['config.option'](profile)
key = "splunk.users.{0}".format(
config.get('host')
)
if key not in __context__:
_populate_cache(profile)
return __context__[key]
def get_user(email, profile="splunk", **kwargs):
'''
Get a splunk user by name/email
CLI Example:
salt myminion splunk.get_user 'user@example.com' user_details=false
salt myminion splunk.get_user 'user@example.com' user_details=true
'''
user_map = list_users(profile)
user_found = email.lower() in user_map.keys()
if not kwargs.get('user_details', False) and user_found:
# The user is in splunk group, just return
return True
elif kwargs.get('user_details', False) and user_found:
user = user_map[email.lower()]
response = {}
for field in ['defaultApp', 'realname', 'name', 'email']:
response[field] = user[field]
response['roles'] = []
for role in user.role_entities:
response['roles'].append(role.name)
return response
return False
def create_user(email, profile="splunk", **kwargs):
'''
create a splunk user by name/email
CLI Example:
salt myminion splunk.create_user user@example.com roles=['user'] realname="Test User" name=testuser
'''
client = _get_splunk(profile)
email = email.lower()
user = list_users(profile).get(email)
if user:
log.error("User is already present %s", email)
return False
property_map = {}
for field in ALLOWED_FIELDS_FOR_MODIFICATION:
if kwargs.get(field):
property_map[field] = kwargs.get(field)
try:
# create
for req_field in REQUIRED_FIELDS_FOR_CREATE:
if not property_map.get(req_field):
log.error("Missing required params %s",
', '.join([six.text_type(k) for k in REQUIRED_FIELDS_FOR_CREATE]))
return False
newuser = client.users.create(username=property_map['name'],
password=_generate_password(email),
roles=property_map['roles'],
email=email,
realname=property_map['realname'])
_send_email(newuser.name, newuser.email)
response = {}
for field in ['email', 'password', 'realname', 'roles']:
response[field] = newuser[field]
except Exception as e:
log.error("Caught exception %s", e)
return False
def update_user(email, profile="splunk", **kwargs):
'''
Create a splunk user by email
CLI Example:
salt myminion splunk.update_user example@domain.com roles=['user'] realname="Test User"
'''
client = _get_splunk(profile)
email = email.lower()
user = list_users(profile).get(email)
if not user:
log.error("Failed to retrieve user {0}".format(email))
return False
property_map = {}
for field in ALLOWED_FIELDS_FOR_MODIFICATION:
if kwargs.get(field):
property_map[field] = kwargs.get(field)
# update
kwargs = {}
roles = [role.name for role in user.role_entities]
for k, v in property_map.items():
resource_value = user[k]
if resource_value is not None:
# you can't update the username in update api call
if k.lower() == 'name':
continue
if k.lower() == 'roles':
if isinstance(v, six.string_types):
v = v.split(',')
if set(roles) != set(v):
kwargs['roles'] = list(set(v))
elif resource_value != v:
kwargs[k] = v
if len(kwargs) > 0:
user.update(**kwargs).refresh()
fields_modified = {}
for field in ALLOWED_FIELDS_FOR_MODIFICATION:
fields_modified[field] = user[field]
else:
#succeeded, no change
return True
def delete_user(email, profile="splunk"):
'''
Delete a splunk user by email
CLI Example:
salt myminion splunk_user.delete 'user@example.com'
'''
client = _get_splunk(profile)
user = list_users(profile).get(email)
if user:
try:
client.users.delete(user.name)
except (AuthenticationError, HTTPError) as e:
log.info('Exception: %s', e)
return False
else:
return False
return user.name not in client.users
| 25.446483 | 108 | 0.60786 |
de01366d32b9936f9c798b4b557febdabffd0293 | 1,300 | py | Python | cirq/google/engine/env_config_test.py | matpompili/Cirq | b9ce387a7fc1f571b3d6e903c46543c3578677cb | [
"Apache-2.0"
] | 1 | 2021-02-16T13:14:16.000Z | 2021-02-16T13:14:16.000Z | cirq/google/engine/env_config_test.py | matpompili/Cirq | b9ce387a7fc1f571b3d6e903c46543c3578677cb | [
"Apache-2.0"
] | null | null | null | cirq/google/engine/env_config_test.py | matpompili/Cirq | b9ce387a7fc1f571b3d6e903c46543c3578677cb | [
"Apache-2.0"
] | 1 | 2020-03-12T07:06:14.000Z | 2020-03-12T07:06:14.000Z | # Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from unittest import mock
import pytest
import cirq
@mock.patch('cirq.google.engine.client.quantum.QuantumEngineServiceClient')
def test_engine_from_environment(build):
# Default project id present.
with mock.patch.dict(os.environ, {
'CIRQ_QUANTUM_ENGINE_DEFAULT_PROJECT_ID': 'project!',
},
clear=True):
eng = cirq.google.engine_from_environment()
assert eng.project_id == 'project!'
# Nothing present.
with mock.patch.dict(os.environ, {}, clear=True):
with pytest.raises(EnvironmentError,
match='CIRQ_QUANTUM_ENGINE_DEFAULT_PROJECT_ID'):
_ = cirq.google.engine_from_environment()
| 35.135135 | 75 | 0.709231 |
2140d289dc09681671315a500262f9ed2ddc6e47 | 1,668 | py | Python | gisutils/tests/test_validate.py | phobson/gisutils | 260a1f32bcd057239b3bc4fefb878487a362cb6c | [
"BSD-3-Clause"
] | null | null | null | gisutils/tests/test_validate.py | phobson/gisutils | 260a1f32bcd057239b3bc4fefb878487a362cb6c | [
"BSD-3-Clause"
] | null | null | null | gisutils/tests/test_validate.py | phobson/gisutils | 260a1f32bcd057239b3bc4fefb878487a362cb6c | [
"BSD-3-Clause"
] | null | null | null | import numpy
from matplotlib import pyplot
import pytest
import numpy.testing as nptest
from gisutils import validate
from .helpers import raises
@pytest.mark.parametrize(('value', 'expected'), [
(1, numpy.array([1])),
(1., numpy.array([1.])),
(None, numpy.array([None])),
('test', numpy.array(['test'])),
([1, 2, 3], numpy.array([1, 2, 3])),
(numpy.array([1, 2, 3]), numpy.array([1, 2, 3])),
(numpy.array([[1, 2, 3], [4, 5, 6]]), numpy.array([1, 2, 3, 4, 5, 6])),
])
def test_is_vector(value, expected):
result = validate.is_vector(value)
nptest.assert_array_equal(result, expected)
def test_mpl_axes_invalid():
with pytest.raises(ValueError):
validate.mpl_axes('junk')
def test_mpl_axes_with_ax():
fig, ax = pyplot.subplots()
fig1, ax1 = validate.mpl_axes(ax)
assert isinstance(ax1, pyplot.Axes)
assert isinstance(fig1, pyplot.Figure)
assert ax1 is ax
assert fig1 is fig
def test_mpl_axes_with_None():
fig1, ax1 = validate.mpl_axes(None)
assert isinstance(ax1, pyplot.Axes)
assert isinstance(fig1, pyplot.Figure)
@pytest.mark.parametrize(('obj', 'expected', 'err'), [
([1, 2, 3], [1, 2, 3], None),
(1, [1], None),
(None, None, ValueError),
([], None, ValueError),
])
def test_non_empty_list_default(obj, expected, err):
with raises(err):
result = validate.non_empty_list(obj)
assert result == expected
@pytest.mark.parametrize('obj', [None, []])
@pytest.mark.parametrize('on_fail', ['empty', 'create'])
def test_non_empty_list_create(obj, on_fail):
result = validate.non_empty_list(obj, on_fail=on_fail)
assert result == []
| 26.903226 | 75 | 0.649281 |
6bffbcc3e542e2ad96f183e4f2e5472fda60e846 | 845 | py | Python | coex/utils_coex.py | ibaiGorordo/ONNX-CoEx-Stereo-Depth-estimation | ac6890a9369bea47e49dda0c7f016a438859f346 | [
"MIT"
] | 5 | 2021-09-08T07:24:34.000Z | 2022-01-10T05:34:57.000Z | coex/utils_coex.py | ibaiGorordo/ONNX-CoEx-Stereo-Depth-estimation | ac6890a9369bea47e49dda0c7f016a438859f346 | [
"MIT"
] | null | null | null | coex/utils_coex.py | ibaiGorordo/ONNX-CoEx-Stereo-Depth-estimation | ac6890a9369bea47e49dda0c7f016a438859f346 | [
"MIT"
] | 1 | 2021-09-09T00:13:22.000Z | 2021-09-09T00:13:22.000Z | import numpy as np
import cv2
import urllib
from dataclasses import dataclass
@dataclass
class CameraConfig:
baseline: float
f: float
def load_img(url):
req = urllib.request.urlopen(url)
arr = np.asarray(bytearray(req.read()), dtype=np.uint8)
return cv2.imdecode(arr, -1) # 'Load it as it is'
def draw_disparity(disparity_map):
disparity_map = disparity_map.astype(np.uint8)
norm_disparity_map = (255*((disparity_map-np.min(disparity_map))/(np.max(disparity_map) - np.min(disparity_map))))
return cv2.applyColorMap(cv2.convertScaleAbs(norm_disparity_map,1), cv2.COLORMAP_MAGMA)
def draw_depth(depth_map, max_dist):
norm_depth_map = 255*(1-depth_map/max_dist)
norm_depth_map[norm_depth_map < 0] =0
norm_depth_map[depth_map == 0] =0
return cv2.applyColorMap(cv2.convertScaleAbs(norm_depth_map,1), cv2.COLORMAP_MAGMA)
| 25.606061 | 115 | 0.768047 |
4f7c13dc49790317f3f92b75f56e127a8ac7c39d | 9,260 | py | Python | LatLongUTMconversion.py | blockchainhelppro/Segwit-Development- | e4dfe990a69a2a1fcdd902b82121c2ecb1aa332d | [
"MIT"
] | null | null | null | LatLongUTMconversion.py | blockchainhelppro/Segwit-Development- | e4dfe990a69a2a1fcdd902b82121c2ecb1aa332d | [
"MIT"
] | null | null | null | LatLongUTMconversion.py | blockchainhelppro/Segwit-Development- | e4dfe990a69a2a1fcdd902b82121c2ecb1aa332d | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# this file is from http://pygps.org/
# Lat Long - UTM, UTM - Lat Long conversions
from math import pi, sin, cos, tan, sqrt
# LatLong- UTM conversion..h
# definitions for lat/long to UTM and UTM to lat/lng conversions
# include <string.h>
_deg2rad = pi / 180.0
_rad2deg = 180.0 / pi
_EquatorialRadius = 2
_eccentricitySquared = 3
_ellipsoid = [
# id, Ellipsoid name, Equatorial Radius, square of eccentricity
# first once is a placeholder only, To allow array indices to match id numbers
[-1, "Placeholder", 0, 0],
[1, "Airy", 6377563, 0.00667054],
[2, "Australian National", 6378160, 0.006694542],
[3, "Bessel 1841", 6377397, 0.006674372],
[4, "Bessel 1841 (Nambia] ", 6377484, 0.006674372],
[5, "Clarke 1866", 6378206, 0.006768658],
[6, "Clarke 1880", 6378249, 0.006803511],
[7, "Everest", 6377276, 0.006637847],
[8, "Fischer 1960 (Mercury] ", 6378166, 0.006693422],
[9, "Fischer 1968", 6378150, 0.006693422],
[10, "GRS 1967", 6378160, 0.006694605],
[11, "GRS 1980", 6378137, 0.00669438],
[12, "Helmert 1906", 6378200, 0.006693422],
[13, "Hough", 6378270, 0.00672267],
[14, "International", 6378388, 0.00672267],
[15, "Krassovsky", 6378245, 0.006693422],
[16, "Modified Airy", 6377340, 0.00667054],
[17, "Modified Everest", 6377304, 0.006637847],
[18, "Modified Fischer 1960", 6378155, 0.006693422],
[19, "South American 1969", 6378160, 0.006694542],
[20, "WGS 60", 6378165, 0.006693422],
[21, "WGS 66", 6378145, 0.006694542],
[22, "WGS-72", 6378135, 0.006694318],
[23, "WGS-84", 6378137, 0.00669438]
]
# Reference ellipsoids derived from Peter H. Dana's website-
# http://www.utexas.edu/depts/grg/gcraft/notes/datum/elist.html
# Department of Geography, University of Texas at Austin
# Internet: pdana@mail.utexas.edu
# 3/22/95
# Source
# Defense Mapping Agency. 1987b. DMA Technical Report: Supplement to Department of Defense World Geodetic System
# 1984 Technical Report. Part I and II. Washington, DC: Defense Mapping Agency
# def LLtoUTM(int ReferenceEllipsoid, const double Lat, const double Long,
# double &UTMNorthing, double &UTMEasting, char* UTMZone)
def LLtoUTM(ReferenceEllipsoid, Lat, Long, zone=None):
"""converts lat/long to UTM coords. Equations from USGS Bulletin 1532
East Longitudes are positive, West longitudes are negative.
North latitudes are positive, South latitudes are negative
Lat and Long are in decimal degrees
Written by Chuck Gantz- chuck.gantz@globalstar.com"""
a = _ellipsoid[ReferenceEllipsoid][_EquatorialRadius]
eccSquared = _ellipsoid[ReferenceEllipsoid][_eccentricitySquared]
k0 = 0.9996
# Make sure the longitude is between -180.00 .. 179.9
LongTemp = (Long + 180) - int((Long + 180) / 360) * 360 - 180 # -180.00 .. 179.9
LatRad = Lat * _deg2rad
LongRad = LongTemp * _deg2rad
if zone is None:
ZoneNumber = int((LongTemp + 180) / 6) + 1
else:
ZoneNumber = zone
if Lat >= 56.0 and Lat < 64.0 and LongTemp >= 3.0 and LongTemp < 12.0:
ZoneNumber = 32
# Special zones for Svalbard
if Lat >= 72.0 and Lat < 84.0:
if LongTemp >= 0.0 and LongTemp < 9.0:
ZoneNumber = 31
elif LongTemp >= 9.0 and LongTemp < 21.0:
ZoneNumber = 33
elif LongTemp >= 21.0 and LongTemp < 33.0:
ZoneNumber = 35
elif LongTemp >= 33.0 and LongTemp < 42.0:
ZoneNumber = 37
LongOrigin = (ZoneNumber - 1) * 6 - 180 + 3 # +3 puts origin in middle of zone
LongOriginRad = LongOrigin * _deg2rad
# compute the UTM Zone from the latitude and longitude
UTMZone = "%d%c" % (ZoneNumber, _UTMLetterDesignator(Lat))
eccPrimeSquared = (eccSquared) / (1 - eccSquared)
N = a / sqrt(1 - eccSquared * sin(LatRad) * sin(LatRad))
T = tan(LatRad) * tan(LatRad)
C = eccPrimeSquared * cos(LatRad) * cos(LatRad)
A = cos(LatRad) * (LongRad - LongOriginRad)
M = a * ((1
- eccSquared / 4
- 3 * eccSquared * eccSquared / 64
- 5 * eccSquared * eccSquared * eccSquared / 256) * LatRad
- (3 * eccSquared / 8
+ 3 * eccSquared * eccSquared / 32
+ 45 * eccSquared * eccSquared * eccSquared / 1024) * sin(2 * LatRad)
+ (15 * eccSquared * eccSquared / 256 + 45 * eccSquared * eccSquared * eccSquared / 1024) * sin(4 * LatRad)
- (35 * eccSquared * eccSquared * eccSquared / 3072) * sin(6 * LatRad))
UTMEasting = (k0 * N * (A + (1 - T + C) * A * A * A / 6
+ (5 - 18 * T + T * T + 72 * C - 58 * eccPrimeSquared) * A * A * A * A * A / 120)
+ 500000.0)
UTMNorthing = (k0 * (M + N * tan(LatRad) * (A * A / 2 + (5 - T + 9 * C + 4 * C * C) * A * A * A * A / 24
+ (61
- 58 * T
+ T * T
+ 600 * C
- 330 * eccPrimeSquared) * A * A * A * A * A * A / 720)))
if Lat < 0:
UTMNorthing = UTMNorthing + 10000000.0; # 10000000 meter offset for southern hemisphere
return (UTMZone, UTMEasting, UTMNorthing)
def _UTMLetterDesignator(Lat):
"""This routine determines the correct UTM letter designator for the given
latitude returns 'Z' if latitude is outside the UTM limits of 84N to 80S
Written by Chuck Gantz- chuck.gantz@globalstar.com"""
if 84 >= Lat >= 72:
return 'X'
elif 72 > Lat >= 64:
return 'W'
elif 64 > Lat >= 56:
return 'V'
elif 56 > Lat >= 48:
return 'U'
elif 48 > Lat >= 40:
return 'T'
elif 40 > Lat >= 32:
return 'S'
elif 32 > Lat >= 24:
return 'R'
elif 24 > Lat >= 16:
return 'Q'
elif 16 > Lat >= 8:
return 'P'
elif 8 > Lat >= 0:
return 'N'
elif 0 > Lat >= -8:
return 'M'
elif -8 > Lat >= -16:
return 'L'
elif -16 > Lat >= -24:
return 'K'
elif -24 > Lat >= -32:
return 'J'
elif -32 > Lat >= -40:
return 'H'
elif -40 > Lat >= -48:
return 'G'
elif -48 > Lat >= -56:
return 'F'
elif -56 > Lat >= -64:
return 'E'
elif -64 > Lat >= -72:
return 'D'
elif -72 > Lat >= -80:
return 'C'
else:
return 'Z' # if the Latitude is outside the UTM limits
# void UTMtoLL(int ReferenceEllipsoid, const double UTMNorthing, const double UTMEasting, const char* UTMZone,
# double& Lat, double& Long )
def UTMtoLL(ReferenceEllipsoid, northing, easting, zone):
"""converts UTM coords to lat/long. Equations from USGS Bulletin 1532
East Longitudes are positive, West longitudes are negative.
North latitudes are positive, South latitudes are negative
Lat and Long are in decimal degrees.
Written by Chuck Gantz- chuck.gantz@globalstar.com
Converted to Python by Russ Nelson <nelson@crynwr.com>"""
k0 = 0.9996
a = _ellipsoid[ReferenceEllipsoid][_EquatorialRadius]
eccSquared = _ellipsoid[ReferenceEllipsoid][_eccentricitySquared]
e1 = (1 - sqrt(1 - eccSquared)) / (1 + sqrt(1 - eccSquared))
# NorthernHemisphere; //1 for northern hemispher, 0 for southern
x = easting - 500000.0 # remove 500,000 meter offset for longitude
y = northing
ZoneLetter = zone[-1]
ZoneNumber = int(zone[:-1])
if ZoneLetter >= 'N':
NorthernHemisphere = 1 # point is in northern hemisphere
else:
NorthernHemisphere = 0 # point is in southern hemisphere
y -= 10000000.0 # remove 10,000,000 meter offset used for southern hemisphere
LongOrigin = (ZoneNumber - 1) * 6 - 180 + 3 # +3 puts origin in middle of zone
eccPrimeSquared = (eccSquared) / (1 - eccSquared)
M = y / k0
mu = M / (
a * (1 - eccSquared / 4 - 3 * eccSquared * eccSquared / 64 - 5 * eccSquared * eccSquared * eccSquared / 256))
phi1Rad = (mu + (3 * e1 / 2 - 27 * e1 * e1 * e1 / 32) * sin(2 * mu)
+ (21 * e1 * e1 / 16 - 55 * e1 * e1 * e1 * e1 / 32) * sin(4 * mu)
+ (151 * e1 * e1 * e1 / 96) * sin(6 * mu))
phi1 = phi1Rad * _rad2deg;
N1 = a / sqrt(1 - eccSquared * sin(phi1Rad) * sin(phi1Rad))
T1 = tan(phi1Rad) * tan(phi1Rad)
C1 = eccPrimeSquared * cos(phi1Rad) * cos(phi1Rad)
R1 = a * (1 - eccSquared) / pow(1 - eccSquared * sin(phi1Rad) * sin(phi1Rad), 1.5)
D = x / (N1 * k0)
Lat = phi1Rad - (N1 * tan(phi1Rad) / R1) * (
D * D / 2 - (5 + 3 * T1 + 10 * C1 - 4 * C1 * C1 - 9 * eccPrimeSquared) * D * D * D * D / 24
+ (61 + 90 * T1 + 298 * C1 + 45 * T1 * T1 - 252 * eccPrimeSquared - 3 * C1 * C1) * D * D * D * D * D * D / 720)
Lat = Lat * _rad2deg
Long = (D - (1 + 2 * T1 + C1) * D * D * D / 6 + (
5 - 2 * C1 + 28 * T1 - 3 * C1 * C1 + 8 * eccPrimeSquared + 24 * T1 * T1)
* D * D * D * D * D / 120) / cos(phi1Rad)
Long = LongOrigin + Long * _rad2deg
return (Lat, Long)
if __name__ == '__main__':
(z, e, n) = LLtoUTM(23, 45.00, -75.00)
print z, e, n
print UTMtoLL(23, n, e, z)
| 37.489879 | 120 | 0.574622 |
e4c04f2661d913d886fe31bc70cae1f861fb0e34 | 794 | py | Python | apps/backend/healthz/tasks.py | ZhuoZhuoCrayon/bk-nodeman | 76cb71fcc971c2a0c2be161fcbd6b019d4a7a8ab | [
"MIT"
] | 31 | 2021-07-28T13:06:11.000Z | 2022-03-10T12:16:44.000Z | apps/backend/healthz/tasks.py | ZhuoZhuoCrayon/bk-nodeman | 76cb71fcc971c2a0c2be161fcbd6b019d4a7a8ab | [
"MIT"
] | 483 | 2021-07-29T03:17:44.000Z | 2022-03-31T13:03:04.000Z | apps/backend/healthz/tasks.py | ZhuoZhuoCrayon/bk-nodeman | 76cb71fcc971c2a0c2be161fcbd6b019d4a7a8ab | [
"MIT"
] | 29 | 2021-07-28T13:06:21.000Z | 2022-03-25T06:18:18.000Z | # coding: utf-8
"""
TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-节点管理(BlueKing-BK-NODEMAN) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at https://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from apps.backend.celery import app
@app.task
def healthz(n):
return n
| 44.111111 | 115 | 0.787154 |
a8dff34d18ab5e9f66696012156fb71ebb9dddb3 | 1,870 | py | Python | packages/mbed-greentea/setup.py | vmedcy/mbed-os-tools | 5d63e5a2914f79511e98d41adc5ffd6e3c935173 | [
"Apache-2.0"
] | null | null | null | packages/mbed-greentea/setup.py | vmedcy/mbed-os-tools | 5d63e5a2914f79511e98d41adc5ffd6e3c935173 | [
"Apache-2.0"
] | null | null | null | packages/mbed-greentea/setup.py | vmedcy/mbed-os-tools | 5d63e5a2914f79511e98d41adc5ffd6e3c935173 | [
"Apache-2.0"
] | null | null | null | """
This module defines the attributes of the
PyPI package for the mbed SDK test suite
"""
"""
mbed SDK
Copyright (c) 2011-2015 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Author: Przemyslaw Wirkus <Przemyslaw.Wirkus@arm.com>
"""
import os
from io import open
from distutils.core import setup
from setuptools import find_packages
DESCRIPTION = "mbed 3.0 onwards test suite, codename Greentea. The test suite is a collection of tools that enable automated testing on mbed-enabled platforms"
OWNER_NAMES = 'Anna Bridge, Azim Khan'
OWNER_EMAILS = 'Anna.Bridge@arm.com, Azim.Khan@arm.com'
# Utility function to cat in a file (used for the README)
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname), encoding="utf-8").read()
setup(name='mbed-greentea',
version='1.6.0',
description=DESCRIPTION,
long_description=read('README.md'),
author=OWNER_NAMES,
author_email=OWNER_EMAILS,
maintainer=OWNER_NAMES,
maintainer_email=OWNER_EMAILS,
url='https://github.com/ARMmbed/mbed-os-tools',
packages=find_packages(),
license="Apache-2.0",
test_suite = 'test',
entry_points={
"console_scripts": ["mbedgt=mbed_greentea.mbed_greentea_cli:main",],
},
install_requires=[
"mbed-os-tools==0.0.1"
],
tests_require = [
"mock>=2"
]
)
| 30.655738 | 159 | 0.714439 |
f06f8d1ebb98b0c0e9d0d21bda3dafe24ead857a | 16,872 | py | Python | dionaeaSqliteToJson.py | eval2A/dionaeaToJSON | b900163b29c23aecd253091f332f06a6332655a6 | [
"MIT"
] | 1 | 2021-12-25T17:13:39.000Z | 2021-12-25T17:13:39.000Z | dionaeaSqliteToJson.py | eval2A/dionaeaToJSON | b900163b29c23aecd253091f332f06a6332655a6 | [
"MIT"
] | null | null | null | dionaeaSqliteToJson.py | eval2A/dionaeaToJSON | b900163b29c23aecd253091f332f06a6332655a6 | [
"MIT"
] | 5 | 2018-12-06T15:21:46.000Z | 2021-03-25T01:36:05.000Z | #!/usr/bin/python3
# Version: 1.2
# Source: https://github.com/eval2A/dionaeaToJSON
# Scripted for Dionaea 0.6.0, but should also work for Dionaea 0.8.0
# Description:
# Converts the SQLite database produced by Dionaea to a JSON format suitable for the ELK stack.
# The JSON log files includes details about connections, downloads, logins, SQL commands, etc.
# Requirements for running the script:
# Python 3
# SQLite logging enabled in Dionaea
# This script is meant to run every minute as a cronjob. However, it may be a little heavy
# to run this script the first time, so it is advised that this is done manually.
# This is what you should put in your crontab, it will make the script run every minute:
# */1 * * * * /usr/bin/python3 /path/to/dionaeaSqliteToJson.py
import sqlite3
import json
from datetime import datetime
import time
import re
import os
import subprocess
# Path of the Dionaea SQLite database file (make sure this is the correct path)
dionaeaSQLite = '/opt/dionaea/var/dionaea/dionaea.sqlite'
# Path to where to store the json log files (optional path)
dionaeaLogPath = '/opt/dionaea/var/dionaea/json'
# Path to binaries captured by Dionaea
dionaeaBinariesPath = '/opt/dionaea/var/dionaea/binaries'
# Configure the SQL database tables to extract information from.
# The configuration is set up for a defaul installation of Dionaea 0.6.0, with Virus Total enabled.
sqlTables = {
0:{
'table':'dcerpcbinds',
'index':'dcerpcbind',
'joins':{
'dcerpcservices':{
'joinTable':'dcerpcservices',
'parentIndex':'dcerpcbind_uuid',
'joinIndex':'dcerpcservice_uuid',
'joins':{
'dcerpcserviceops':{
'joinTable':'dcerpcserviceops',
'parentIndex':'dcerpcservice',
'joinIndex':'dcerpcservice'
}
}
}
}
},
1:{
'table':'dcerpcrequests',
'index':'dcerpcrequest',
'joins':{
'dcerpcservices':{
'joinTable':'dcerpcservices',
'parentIndex':'dcerpcrequest_uuid',
'joinIndex':'dcerpcservice_uuid',
'joins':{
'dcerpcserviceops':{
'joinTable':'dcerpcserviceops',
'parentIndex':'dcerpcservice',
'joinIndex':'dcerpcservice'
}
}
}
}
},
2:{
'table':'downloads',
'index':'download',
'removeHTMLFiles':True,
'virusTotal':True
},
3:{
'table':'emu_profiles',
'index':'emu_profile'
},
4:{
'table':'emu_services',
# 'index':'emu_service' There is a typo in the Dionaea SQLite DB. Bug report: https://github.com/DinoTools/dionaea/issues/139
'index':'emu_serivce'
},
5:{
'table':'logins',
'index':'login'
},
6:{
'table':'mqtt_fingerprints',
'index':'mqtt_fingerprint'
},
7:{
'table':'mqtt_publish_commands',
'index':'mqtt_publish_command'
},
8:{
'table':'mqtt_subscribe_commands',
'index':'mqtt_subscribe_command'
},
9:{
'table':'mssql_commands',
'index':'mssql_command'
},
10:{
'table':'mssql_fingerprints',
'index':'mssql_fingerprint'
},
11:{
'table':'mysql_commands',
'index':'mysql_command',
'joins':{
'mysql_command_args':{
'joinTable':'mysql_command_args',
'parentIndex':'mysql_command',
'joinIndex':'mysql_command'
},
'mysql_command_ops':{
'joinTable':'mysql_command_ops',
'parentIndex':'mysql_command_cmd',
'joinIndex':'mysql_command_cmd'
}
}
},
12:{
'table':'offers',
'index':'offer'
},
13:{
'table':'p0fs',
'index':'p0f'
},
14:{
'table':'resolves',
'index':'resolve'
},
15:{
'table':'sip_commands',
'index':'sip_command',
'joins':{
'sip_addrs':{
'joinTable':'sip_addrs',
'parentIndex':'sip_command',
'joinIndex':'sip_command'
},
'sip_sdp_connectiondatas':{
'joinTable':'sip_sdp_connectiondatas',
'parentIndex':'sip_command',
'joinIndex':'sip_command'
},
'sip_sdp_medias':{
'joinTable':'sip_sdp_medias',
'parentIndex':'sip_command',
'joinIndex':'sip_command'
},
'sip_sdp_origins':{
'joinTable':'sip_sdp_origins',
'parentIndex':'sip_command',
'joinIndex':'sip_command'
},
'sip_vias':{
'joinTable':'sip_vias',
'parentIndex':'sip_command',
'joinIndex':'sip_command'
}
}
},
# Custom config for other services/ihandlers.
# x:{
# 'table':'',
# 'index':''
# },
#
# Custom config for other services/ihandlers that require data from other tables as well.
# x:{
# 'table':'',
# 'index':'',
# 'joins':{
# '':{
# 'joinTable':'',
# 'parentIndex':'',
# 'joinIndex':'',
# 'joins':{
# '':{
# 'joinTable':'',
# 'parentIndex':'',
# 'joinIndex':'',
# 'joins':{
# '':{
# 'joinTable':'',
# 'parentIndex':'',
# 'joinIndex':''
# },
# '':{
# 'joinTable':'',
# 'parentIndex':'',
# 'joinIndex':''
# },
# '':{
# 'joinTable':'',
# 'parentIndex':'',
# 'joinIndex':''
# }
# }
# },
# '':{
# 'joinTable':'',
# 'parentIndex':'',
# 'joinIndex':''
# }
# }
# },
# '':{
# 'joinTable':'',
# 'parentIndex':'',
# 'joinIndex':''
# }
# }
# },
# Connections must be the last table in order to avoid duplicate entries.
100:{
'table':'connections',
'index':'connection'
}
}
# Check if the SQLite database file is located
if os.path.isfile(dionaeaSQLite):
# Assure that the log directory exists
if not os.path.isdir(dionaeaLogPath):
os.makedirs(dionaeaLogPath)
# To avoid having to process data from the sqlite db file already processed,
# this script keeps a registry database over where it last extracted data from the configured SQL tables.
# To initiate this index process, we need to prepare an array for this sessions index checkup.
currentIndex = {}
# Check for if the database of the previous indexes exists
registryExists = os.path.isfile(dionaeaLogPath + '/dionaea.registry.json')
# Import the index database created by the script, or prepare to create one.
if registryExists:
previousIndex = json.load(open(dionaeaLogPath + '/dionaea.registry.json'))
else:
previousIndex = {}
# Create a temporary list over connections used, so that we can avoid including those twice.
usedConnections = []
# Function to turn the results into a dictionary
def dictFactory(cursor, row):
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
# Iniate a connection to the database
connection = sqlite3.connect(dionaeaSQLite)
connection.row_factory = dictFactory
cursor = connection.cursor()
# Function to loop through joins from config and generate the required sql join commands
def joinsLoop(parentTableIdentifier, joinsConfig):
sql = ''
for joinIdentifier, joinConfig in joinsConfig.items():
sql += 'LEFT JOIN '+joinConfig['joinTable']+' '+joinIdentifier+' ON'+' '+parentTableIdentifier+'.'+joinConfig['parentIndex']+'='+joinIdentifier+'.'+joinConfig['joinIndex']+' '
if 'joins' in joinConfig:
sql += (joinsLoop(joinIdentifier, joinConfig['joins']))
return (sql)
# A counter to count total entries
totalEntries = 0
totalNewEntries = 0
# Build the sql commands and execute them, then log the results
for sqlTableIdentifier, sqlTableConfig in sqlTables.items():
# Convert the sqlTableIdentifier to a string as it is automatically converted from int when storing it to the registy file
sqlTableIdentifier = str(sqlTableIdentifier)
print ('=========================================')
print ('# Processing ' + sqlTableConfig['index'] + ':')
print ('# ---------------------------------------')
# Get the current index of the table
cursor.execute('SELECT COALESCE(MAX(' + sqlTableConfig['index'] + ')+0, 0) FROM ' + sqlTableConfig['table'])
results = cursor.fetchone()
# Convert the result into a string (probably not the best way to this, but hey..)
coalMaxString = str(results)
# Extract all integers from that string
coalMaxIntegers = (re.findall('\d+', coalMaxString ))
# Store the correct integer in the currentIndex
currentIndex[sqlTableIdentifier] = coalMaxIntegers[2]
# Update the total entries counter
totalEntries += int(currentIndex[sqlTableIdentifier])
# Print the total numbers of entries
print ('# Total entries: ' + currentIndex[sqlTableIdentifier])
# Build the SQL query
# Check if there are any new inputs in the database and limit the results
if sqlTableIdentifier in previousIndex:
newResults = int(currentIndex[sqlTableIdentifier]) - int(previousIndex[sqlTableIdentifier])
sqlLimit = 'LIMIT ' + previousIndex[sqlTableIdentifier] + ',' + str(newResults)
else:
newResults = currentIndex[sqlTableIdentifier]
sqlLimit = False
# Update the total new entries counter
totalNewEntries += int(newResults)
# Print status about new results
print ('# New entries to process: ' + str(newResults))
# Building the SQL queries
if sqlTableConfig['index'] == 'connection':
usedConnectionsString = ', '.join(str(e) for e in usedConnections)
sql = 'SELECT * FROM connections WHERE NOT connection in (' + usedConnectionsString + ') '
else:
sql = 'SELECT * FROM ' + sqlTableConfig['table'] + ' '
if 'joins' in sqlTableConfig:
sql += joinsLoop(sqlTableConfig['table'], sqlTableConfig['joins'])
# Include data from connections
sql += 'LEFT JOIN connections USING (connection) '
# Set a limit to the query
if sqlLimit:
sql += sqlLimit
# Execute and fetch results
cursor.execute(sql)
results = cursor.fetchall()
# Iterate the results and correct a few things
for result in results:
# Add result event id
result['eventid'] = sqlTableConfig['index']
# Create a time stamp in ISO 8601 format with accordance to ELK
result['timestamp'] = (datetime.fromtimestamp(result['connection_timestamp']).isoformat() + 'Z')
# Create a date-format to used for the log names
timestampYMD = time.strftime('%Y-%m-%d', time.localtime(result['connection_timestamp']))
result.pop('connection_timestamp', None)
ignoreDownload = False
if sqlTableConfig['index'] == 'download':
# Check download for filetype
if os.path.isfile(dionaeaBinariesPath + '/' + result['download_md5_hash']):
result['download_filetype'] = subprocess.check_output('file -b ' + dionaeaBinariesPath + '/' + result['download_md5_hash'], shell=True)
if isinstance(result['download_filetype'], bytes):
result['download_filetype'] = result['download_filetype'].decode('utf-8', 'replace')
# Remove HTML files
if 'removeHTMLFiles' in sqlTableConfig and sqlTableConfig['removeHTMLFiles']:
if result['download_filetype'].startswith('HTML'):
os.system('rm ' + dionaeaBinariesPath + '/' + result['download_md5_hash'])
ignoreDownload = True
# Check if VT is enabled
if 'virusTotal' in sqlTableConfig and sqlTableConfig['virusTotal']:
result['virustotal'] = {}
cursor.execute('SELECT * FROM virustotals WHERE virustotal_md5_hash="' + result['download_md5_hash'] + '" ORDER BY virustotal_timestamp DESC')
virusTotalsResult = cursor.fetchone()
if virusTotalsResult:
result['virustotal_total_scanners'] = 0
result['virustotal_total_positive_results'] = 0
virusTotalsResult['virustotal_timestamp'] = (datetime.fromtimestamp(virusTotalsResult['virustotal_timestamp']).isoformat() + 'Z')
result.update(virusTotalsResult)
cursor.execute('SELECT virustotalscan_scanner, virustotalscan_result FROM virustotalscans WHERE virustotal=' + str(virusTotalsResult['virustotal']))
virusTotalsScanResults = cursor.fetchall()
for virusTotalsScanResult in virusTotalsScanResults:
result['virustotal_total_scanners'] += 1
if virusTotalsScanResult['virustotalscan_result']:
result['virustotal_total_positive_results'] += 1
result.update({virusTotalsScanResult['virustotalscan_scanner']:virusTotalsScanResult['virustotalscan_result']})
# Clean the results for byte string issues
for i in result:
if isinstance(result[i], bytes):
result[i] = result[i].decode('utf-8', 'replace')
# Generate empty log files if they don't exist
if not os.path.isfile(dionaeaLogPath + '/dionaea.json.' + timestampYMD):
open(dionaeaLogPath + '/dionaea.json.' + timestampYMD,'a').close()
print ('# Created log file:')
print ('# ' + dionaeaLogPath + '/dionaea.json.' + timestampYMD)
# Append the result to the log file
if not ignoreDownload:
with open(dionaeaLogPath + '/dionaea.json.' + timestampYMD, 'a') as file:
file.write(json.dumps(result) + '\n')
# Append connection ID used to the connection ID dictionary
if not sqlTableIdentifier == '100':
usedConnections.append(result['connection'])
# Close the sqlite3 connection
connection.close()
# Register the current indexes
with open(dionaeaLogPath + "/dionaea.registry.json", "w") as file:
file.write(json.dumps(currentIndex))
# End of code
print ('=========================================')
print ('# Finished! ')
print ('# ---------------------------------------')
print ('# Total entries: ' + str(totalEntries))
print ('# Total processed entries: ' + str(totalNewEntries))
print ('# ---------------------------------------')
print ('# The logs are located in:')
print ('# ' + dionaeaLogPath)
print ('=========================================')
# SQLite database file not located
else:
print ('=========================================')
print ('# Unable to locate the sqlite db file:')
print ('# ' + dionaeaSQLite)
print ('# ---------------------------------------')
print ('# Make sure that:')
print ('# - Dionaea is installed')
print ('# - SQLite is logging enabled')
print ('# - The script uses the correct path')
print ('=========================================')
| 38.965358 | 187 | 0.534732 |
bbf821e10ca8a4474985ec24322442aa758cb969 | 11,327 | py | Python | taskapp/views.py | Fa67/djing | 6e84640d7294be9f2f17f73e796713e93e43bb68 | [
"Unlicense"
] | 1 | 2020-12-05T01:55:56.000Z | 2020-12-05T01:55:56.000Z | taskapp/views.py | Fa67/djing | 6e84640d7294be9f2f17f73e796713e93e43bb68 | [
"Unlicense"
] | null | null | null | taskapp/views.py | Fa67/djing | 6e84640d7294be9f2f17f73e796713e93e43bb68 | [
"Unlicense"
] | 2 | 2020-03-07T12:00:17.000Z | 2020-12-05T01:56:01.000Z | from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django.db.models import Count
from django.shortcuts import redirect, get_object_or_404, resolve_url
from django.contrib import messages
from django.utils.decorators import method_decorator
from django.views.generic import ListView, CreateView
from django.utils.translation import ugettext as _
from django.conf import settings
from datetime import datetime
from django.views.generic.edit import FormMixin, DeleteView, UpdateView
from guardian.decorators import permission_required_or_403 as permission_required
from chatbot.models import MessageQueue
from abonapp.models import Abon
from djing import httpresponse_to_referrer
from djing.lib import safe_int, MultipleException, RuTimedelta
from djing.lib.decorators import only_admins, json_view
from .handle import TaskException
from .models import Task, ExtraComment
from .forms import TaskFrm, ExtraCommentForm
login_decs = login_required, only_admins
@method_decorator(login_decs, name='dispatch')
@method_decorator(permission_required('taskapp.view_task'), name='dispatch')
class NewTasksView(ListView):
"""
Show new tasks
"""
http_method_names = ('get',)
paginate_by = getattr(settings, 'PAGINATION_ITEMS_PER_PAGE', 10)
template_name = 'taskapp/tasklist.html'
context_object_name = 'tasks'
def get_queryset(self):
return Task.objects.filter(recipients=self.request.user, state='S') \
.annotate(comment_count=Count('extracomment')) \
.select_related('abon', 'abon__street', 'abon__group', 'author')
@method_decorator(login_decs, name='dispatch')
@method_decorator(permission_required('taskapp.view_task'), name='dispatch')
class FailedTasksView(NewTasksView):
"""
Show crashed tasks
"""
template_name = 'taskapp/tasklist_failed.html'
context_object_name = 'tasks'
def get_queryset(self):
return Task.objects.filter(recipients=self.request.user, state='C') \
.select_related('abon', 'abon__street', 'abon__group', 'author')
@method_decorator(login_decs, name='dispatch')
@method_decorator(permission_required('taskapp.view_task'), name='dispatch')
class FinishedTaskListView(NewTasksView):
template_name = 'taskapp/tasklist_finish.html'
def get_queryset(self):
return Task.objects.filter(recipients=self.request.user, state='F') \
.select_related('abon', 'abon__street', 'abon__group', 'author')
@method_decorator(login_decs, name='dispatch')
@method_decorator(permission_required('taskapp.view_task'), name='dispatch')
class OwnTaskListView(NewTasksView):
template_name = 'taskapp/tasklist_own.html'
def get_queryset(self):
# Attached and not finished tasks
return Task.objects.filter(author=self.request.user) \
.exclude(state='F') \
.select_related('abon', 'abon__street', 'abon__group')
@method_decorator(login_decs, name='dispatch')
@method_decorator(permission_required('taskapp.view_task'), name='dispatch')
class MyTaskListView(NewTasksView):
template_name = 'taskapp/tasklist.html'
def get_queryset(self):
# Tasks in which I participated
return Task.objects.filter(recipients=self.request.user) \
.select_related('abon', 'abon__street', 'abon__group', 'author')
@method_decorator(login_decs, name='dispatch')
@method_decorator(permission_required('taskapp.can_viewall'), name='dispatch')
class AllTasksListView(ListView):
http_method_names = ('get',)
paginate_by = getattr(settings, 'PAGINATION_ITEMS_PER_PAGE', 10)
template_name = 'taskapp/tasklist_all.html'
context_object_name = 'tasks'
def get_queryset(self):
return Task.objects.annotate(comment_count=Count('extracomment')) \
.select_related('abon', 'abon__street', 'abon__group', 'author')
@method_decorator(login_decs, name='dispatch')
@method_decorator(permission_required('taskapp.view_task'), name='dispatch')
class EmptyTasksListView(NewTasksView):
template_name = 'taskapp/tasklist_empty.html'
def get_queryset(self):
return Task.objects.annotate(reccount=Count('recipients')).filter(reccount__lt=1)
@login_required
@only_admins
@permission_required('taskapp.delete_task')
def task_delete(request, task_id):
task = get_object_or_404(Task, id=task_id)
# prevent to delete task that assigned to me
if request.user.is_superuser or request.user not in task.recipients.all():
task.delete()
else:
messages.warning(request, _('You cannot delete task that assigned to you'))
return redirect('taskapp:home')
@method_decorator(login_decs, name='dispatch')
class TaskUpdateView(UpdateView):
http_method_names = ('get', 'post')
template_name = 'taskapp/add_edit_task.html'
form_class = TaskFrm
context_object_name = 'task'
def get_object(self, queryset=None):
task_id = safe_int(self.kwargs.get('task_id'))
if task_id == 0:
uname = self.request.GET.get('uname')
if uname:
self.selected_abon = Abon.objects.get(username=uname)
return
else:
task = get_object_or_404(Task, pk=task_id)
self.selected_abon = task.abon
return task
def dispatch(self, request, *args, **kwargs):
task_id = safe_int(self.kwargs.get('task_id', 0))
if task_id == 0:
if not request.user.has_perm('taskapp.add_task'):
raise PermissionDenied
else:
if not request.user.has_perm('taskapp.change_task'):
raise PermissionDenied
try:
return super(TaskUpdateView, self).dispatch(request, *args, **kwargs)
except TaskException as e:
messages.error(request, e)
return httpresponse_to_referrer(request)
def get_form_kwargs(self):
kwargs = super(TaskUpdateView, self).get_form_kwargs()
if hasattr(self, 'selected_abon'):
kwargs.update({'initial_abon': self.selected_abon})
return kwargs
def form_valid(self, form):
try:
self.object = form.save()
if self.object.author is None:
self.object.author = self.request.user
self.object.save(update_fields=('author',))
task_id = safe_int(self.kwargs.get('task_id', 0))
if task_id == 0:
log_text = _('Task has successfully created')
else:
log_text = _('Task has changed successfully')
messages.add_message(self.request, messages.SUCCESS, log_text)
self.object.send_notification()
except MultipleException as e:
for err in e.err_list:
messages.add_message(self.request, messages.WARNING, err)
except TaskException as e:
messages.add_message(self.request, messages.ERROR, e)
return FormMixin.form_valid(self, form)
def get_context_data(self, **kwargs):
if hasattr(self, 'selected_abon'):
selected_abon = self.selected_abon
else:
selected_abon = None
now_date = datetime.now().date()
task = self.object
if task:
if task.out_date > now_date:
time_diff = "%s: %s" % (_('time left'), RuTimedelta(task.out_date - now_date))
else:
time_diff = _("Expired timeout -%(time_left)s") % {'time_left': RuTimedelta(now_date - task.out_date)}
else:
time_diff = None
context = {
'selected_abon': selected_abon,
'time_diff': time_diff,
'comments': ExtraComment.objects.filter(task=task),
'comment_form': ExtraCommentForm()
}
context.update(kwargs)
return super(TaskUpdateView, self).get_context_data(**context)
def get_success_url(self):
task_id = safe_int(self.kwargs.get('task_id'))
if task_id == 0:
return resolve_url('taskapp:own_tasks')
else:
return resolve_url('taskapp:edit', task_id)
def form_invalid(self, form):
messages.add_message(self.request, messages.ERROR, _('fix form errors'))
return super(TaskUpdateView, self).form_invalid(form)
@login_required
@only_admins
def task_finish(request, task_id):
try:
task = get_object_or_404(Task, id=task_id)
task.finish(request.user)
task.send_notification()
except MultipleException as errs:
for err in errs.err_list:
messages.add_message(request, messages.constants.ERROR, err)
except TaskException as e:
messages.error(request, e)
return redirect('taskapp:home')
@login_required
@only_admins
def task_failed(request, task_id):
try:
task = get_object_or_404(Task, id=task_id)
task.do_fail(request.user)
task.send_notification()
except TaskException as e:
messages.error(request, e)
return redirect('taskapp:home')
@login_required
@only_admins
@permission_required('taskapp.can_remind')
def remind(request, task_id):
try:
task = get_object_or_404(Task, id=task_id)
task.save(update_fields=('state',))
task.send_notification()
messages.success(request, _('Task has been reminded'))
except MultipleException as errs:
for err in errs.err_list:
messages.add_message(request, messages.constants.ERROR, err)
except TaskException as e:
messages.error(request, e)
return redirect('taskapp:home')
@json_view
def check_news(request):
if request.user.is_authenticated and request.user.is_admin:
msg = MessageQueue.objects.pop(user=request.user, tag='taskap')
if msg is not None:
r = {
'auth': True,
'exist': True,
'content': msg,
'title': _('Task')
}
else:
r = {'auth': True, 'exist': False}
else:
r = {'auth': False}
return r
@method_decorator(login_decs, name='dispatch')
@method_decorator(permission_required('taskapp.add_extracomment'), name='dispatch')
class NewCommentView(CreateView):
form_class = ExtraCommentForm
model = ExtraComment
http_method_names = ('get', 'post')
def form_valid(self, form):
self.task = get_object_or_404(Task, pk=self.kwargs.get('task_id'))
self.object = form.make_save(
author=self.request.user,
task=self.task
)
return FormMixin.form_valid(self, form)
@method_decorator(login_decs, name='dispatch')
@method_decorator(permission_required('taskapp.delete_extracomment'), name='dispatch')
class DeleteCommentView(DeleteView):
model = ExtraComment
pk_url_kwarg = 'comment_id'
http_method_names = ('get', 'post')
template_name = 'taskapp/comments/extracomment_confirm_delete.html'
def get_context_data(self, **kwargs):
context = {
'task_id': self.kwargs.get('task_id')
}
context.update(kwargs)
return super(DeleteCommentView, self).get_context_data(**context)
def get_success_url(self):
task_id = self.kwargs.get('task_id')
return resolve_url('taskapp:edit', task_id)
| 35.396875 | 118 | 0.675113 |
50244908106988bd41f1fca40f8c1d89a9a55908 | 432 | py | Python | functions/distance-fare.py | Aditya0804-dot/python-class-11 | cfb118d01cc1872153ceffa562fcd042b87235e3 | [
"MIT"
] | null | null | null | functions/distance-fare.py | Aditya0804-dot/python-class-11 | cfb118d01cc1872153ceffa562fcd042b87235e3 | [
"MIT"
] | null | null | null | functions/distance-fare.py | Aditya0804-dot/python-class-11 | cfb118d01cc1872153ceffa562fcd042b87235e3 | [
"MIT"
] | null | null | null | def AssignFare(c,a,d):
if d<500:
t_fare=(a*200)+(c*(200/2))
elif (d<1000) and (d>=500):
t_fare=(a*300)+(c*(300/2))
else:
t_fare=(a*500)+(c*(500/2))
return t_fare
child=int(input("enter the number of children: "))
adult=int(input("enter the number of adults: "))
distance=float(input("enter he distance travelled: "))
trip_fare=AssignFare(child,adult,distance)
print(trip_fare)
| 28.8 | 55 | 0.611111 |
04ab027db40cb6ff050d1ed98b062acbe8907b98 | 9,310 | py | Python | rootfs/usr/lib/python3/dist-packages/serial/loopback_connection.py | kappaIO-Dev/kappaIO-sdk-armhf-crosscompile | 66fc5fc21e6235f7a3be72a7ccac68e2224b7fb2 | [
"MIT"
] | null | null | null | rootfs/usr/lib/python3/dist-packages/serial/loopback_connection.py | kappaIO-Dev/kappaIO-sdk-armhf-crosscompile | 66fc5fc21e6235f7a3be72a7ccac68e2224b7fb2 | [
"MIT"
] | 1 | 2018-04-15T22:59:15.000Z | 2018-04-15T22:59:15.000Z | rootfs/usr/lib/python3/dist-packages/serial/loopback_connection.py | kappaIO-Dev/kappaIO-sdk-armhf-crosscompile | 66fc5fc21e6235f7a3be72a7ccac68e2224b7fb2 | [
"MIT"
] | null | null | null | #! python
#
# Python Serial Port Extension for Win32, Linux, BSD, Jython
# see __init__.py
#
# This module implements a loop back connection receiving itself what it sent.
#
# The purpose of this module is.. well... You can run the unit tests with it.
# and it was so easy to implement ;-)
#
# (C) 2001-2009 Chris Liechti <cliechti@gmx.net>
# this is distributed under a free software license, see license.txt
#
# URL format: loop://[option[/option...]]
# options:
# - "debug" print diagnostic messages
from .serialutil import *
import threading
import time
import logging
# map log level names to constants. used in fromURL()
LOGGER_LEVELS = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
}
class LoopbackSerial(SerialBase):
"""Serial port implementation for plain sockets."""
BAUDRATES = (50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800,
9600, 19200, 38400, 57600, 115200)
def open(self):
"""Open port with current settings. This may throw a SerialException
if the port cannot be opened."""
self.logger = None
self.buffer_lock = threading.Lock()
self.loop_buffer = bytearray()
self.cts = False
self.dsr = False
if self._port is None:
raise SerialException("Port must be configured before it can be used.")
# not that there is anything to open, but the function applies the
# options found in the URL
self.fromURL(self.port)
# not that there anything to configure...
self._reconfigurePort()
# all things set up get, now a clean start
self._isOpen = True
if not self._rtscts:
self.setRTS(True)
self.setDTR(True)
self.flushInput()
self.flushOutput()
def _reconfigurePort(self):
"""Set communication parameters on opened port. for the loop://
protocol all settings are ignored!"""
# not that's it of any real use, but it helps in the unit tests
if not isinstance(self._baudrate, int) or not 0 < self._baudrate < 2**32:
raise ValueError("invalid baudrate: %r" % (self._baudrate))
if self.logger:
self.logger.info('_reconfigurePort()')
def close(self):
"""Close port"""
if self._isOpen:
self._isOpen = False
# in case of quick reconnects, give the server some time
time.sleep(0.3)
def makeDeviceName(self, port):
raise SerialException("there is no sensible way to turn numbers into URLs")
def fromURL(self, url):
"""extract host and port from an URL string"""
if url.lower().startswith("loop://"): url = url[7:]
try:
# process options now, directly altering self
for option in url.split('/'):
if '=' in option:
option, value = option.split('=', 1)
else:
value = None
if not option:
pass
elif option == 'logging':
logging.basicConfig() # XXX is that good to call it here?
self.logger = logging.getLogger('pySerial.loop')
self.logger.setLevel(LOGGER_LEVELS[value])
self.logger.debug('enabled logging')
else:
raise ValueError('unknown option: %r' % (option,))
except ValueError as e:
raise SerialException('expected a string in the form "[loop://][option[/option...]]": %s' % e)
# - - - - - - - - - - - - - - - - - - - - - - - -
def inWaiting(self):
"""Return the number of characters currently in the input buffer."""
if not self._isOpen: raise portNotOpenError
if self.logger:
# attention the logged value can differ from return value in
# threaded environments...
self.logger.debug('inWaiting() -> %d' % (len(self.loop_buffer),))
return len(self.loop_buffer)
def read(self, size=1):
"""Read size bytes from the serial port. If a timeout is set it may
return less characters as requested. With no timeout it will block
until the requested number of bytes is read."""
if not self._isOpen: raise portNotOpenError
if self._timeout is not None:
timeout = time.time() + self._timeout
else:
timeout = None
data = bytearray()
while len(data) < size:
self.buffer_lock.acquire()
try:
block = to_bytes(self.loop_buffer[:size])
del self.loop_buffer[:size]
finally:
self.buffer_lock.release()
data += block
# check for timeout now, after data has been read.
# useful for timeout = 0 (non blocking) read
if timeout and time.time() > timeout:
break
return bytes(data)
def write(self, data):
"""Output the given string over the serial port. Can block if the
connection is blocked. May raise SerialException if the connection is
closed."""
if not self._isOpen: raise portNotOpenError
# calculate aprox time that would be used to send the data
time_used_to_send = 10.0*len(data) / self._baudrate
# when a write timeout is configured check if we would be successful
# (not sending anything, not even the part that would have time)
if self._writeTimeout is not None and time_used_to_send > self._writeTimeout:
time.sleep(self._writeTimeout) # must wait so that unit test succeeds
raise writeTimeoutError
self.buffer_lock.acquire()
try:
self.loop_buffer += bytes(data)
finally:
self.buffer_lock.release()
return len(data)
def flushInput(self):
"""Clear input buffer, discarding all that is in the buffer."""
if not self._isOpen: raise portNotOpenError
if self.logger:
self.logger.info('flushInput()')
self.buffer_lock.acquire()
try:
del self.loop_buffer[:]
finally:
self.buffer_lock.release()
def flushOutput(self):
"""Clear output buffer, aborting the current output and
discarding all that is in the buffer."""
if not self._isOpen: raise portNotOpenError
if self.logger:
self.logger.info('flushOutput()')
def sendBreak(self, duration=0.25):
"""Send break condition. Timed, returns to idle state after given
duration."""
if not self._isOpen: raise portNotOpenError
def setBreak(self, level=True):
"""Set break: Controls TXD. When active, to transmitting is
possible."""
if not self._isOpen: raise portNotOpenError
if self.logger:
self.logger.info('setBreak(%r)' % (level,))
def setRTS(self, level=True):
"""Set terminal status line: Request To Send"""
if not self._isOpen: raise portNotOpenError
if self.logger:
self.logger.info('setRTS(%r) -> state of CTS' % (level,))
self.cts = level
def setDTR(self, level=True):
"""Set terminal status line: Data Terminal Ready"""
if not self._isOpen: raise portNotOpenError
if self.logger:
self.logger.info('setDTR(%r) -> state of DSR' % (level,))
self.dsr = level
def getCTS(self):
"""Read terminal status line: Clear To Send"""
if not self._isOpen: raise portNotOpenError
if self.logger:
self.logger.info('getCTS() -> state of RTS (%r)' % (self.cts,))
return self.cts
def getDSR(self):
"""Read terminal status line: Data Set Ready"""
if not self._isOpen: raise portNotOpenError
if self.logger:
self.logger.info('getDSR() -> state of DTR (%r)' % (self.dsr,))
return self.dsr
def getRI(self):
"""Read terminal status line: Ring Indicator"""
if not self._isOpen: raise portNotOpenError
if self.logger:
self.logger.info('returning dummy for getRI()')
return False
def getCD(self):
"""Read terminal status line: Carrier Detect"""
if not self._isOpen: raise portNotOpenError
if self.logger:
self.logger.info('returning dummy for getCD()')
return True
# - - - platform specific - - -
# None so far
# assemble Serial class with the platform specific implementation and the base
# for file-like behavior. for Python 2.6 and newer, that provide the new I/O
# library, derive from io.RawIOBase
try:
import io
except ImportError:
# classic version with our own file-like emulation
class Serial(LoopbackSerial, FileLike):
pass
else:
# io library present
class Serial(LoopbackSerial, io.RawIOBase):
pass
# simple client test
if __name__ == '__main__':
import sys
s = Serial('socket://localhost:7000')
sys.stdout.write('%s\n' % s)
sys.stdout.write("write...\n")
s.write("hello\n")
s.flush()
sys.stdout.write("read: %s\n" % s.read(5))
s.close()
| 35.670498 | 106 | 0.5971 |
0ed89f71bdb3aee1b0f569051800853bdf89b8ba | 6,977 | py | Python | reports/old_scripts/191015_APAM_dPCA_eg.py | Mateo-Lopez-Espejo/context_probe_analysis | 55461057fd01f00124aa46682b335313af9cc0f8 | [
"RSA-MD"
] | null | null | null | reports/old_scripts/191015_APAM_dPCA_eg.py | Mateo-Lopez-Espejo/context_probe_analysis | 55461057fd01f00124aa46682b335313af9cc0f8 | [
"RSA-MD"
] | null | null | null | reports/old_scripts/191015_APAM_dPCA_eg.py | Mateo-Lopez-Espejo/context_probe_analysis | 55461057fd01f00124aa46682b335313af9cc0f8 | [
"RSA-MD"
] | null | null | null | import matplotlib.pyplot as plt
import numpy as np
import src.visualization.fancy_plots
from src.data.load import load
from src.metrics.reliability import signal_reliability
from src.data import dPCA as cdPCA
from src.visualization import fancy_plots as cplt
"""
plots an example dPCA analysis with variance explained, context and probe marginalization projections
and hybrid raster/PSTH plot for the most weigted cell in the context marginalization
does this for two example site/probes
"""
plt.rcParams['svg.fonttype'] = 'none'
CB_color_cycle = ['#377eb8', '#ff7f00', '#4daf4a', '#a65628', # blue, orange, green, brow,
'#984ea3', '#999999', '#e41a1c', '#dede00'] # purple, gray, scarlet, lime
trans_color_map = {'silence': '#377eb8', # blue
'continuous': '#ff7f00', # orange
'similar': '#4daf4a', # green
'sharp': '#a65628'} # brown
# meta parameter
meta = {'reliability': 0.1, # r value
'smoothing_window': 0, # ms
'raster_fs': 30,
'transitions': ['silence', 'continuous', 'similar', 'sharp'],
'probes_to_plot': [2, 3, 5, 6],
'significance': False,
'zscore': False}
transitions = {'P2': {'silence': 0,
'continuous': 1,
'similar': 3,
'sharp': 6},
'P3': {'silence': 0,
'continuous': 2,
'similar': 1,
'sharp': 5},
'P5': {'silence': 0,
'continuous': 4,
'similar': 6,
'sharp': 3},
'P6': {'silence': 0,
'continuous': 5,
'similar': 4,
'sharp': 2}}
code_to_name = {'t': 'Probe', 'ct': 'Context'}
for site, probe in zip(['AMT029a', 'ley070a'], [5, 2]):
# load and format triplets from a site
# site = 'AMT030a' # low responses, Ok but not as good
recs = load(site)
rec = recs['trip0']
sig = rec['resp']
# calculates response realiability and select only good cells to improve analysis
r_vals, goodcells = signal_reliability(sig.rasterize(), r'\ASTIM_*', threshold=meta['reliability'])
goodcells = goodcells.tolist()
# get a specific probe after a set of different transitions
Z, trialZ, significance_masks, dpca = cdPCA.tran_dpca(sig, probe, channels=goodcells,
transitions=meta['transitions'],
smooth_window=meta['smoothing_window'],
significance=meta['significance'],
raster_fs=meta['raster_fs'],
part='probe',
zscore=meta['zscore'])
expl_var = dpca.explained_variance_ratio_
# plots the first PC projection of each context, for each marginalization
fig = plt.figure()
T = trialZ['ct'].shape[-1]
time = np.linspace(0, T / meta['raster_fs'], T, endpoint=False)
for vv, (marginalization, arr) in enumerate(Z.items()):
for pc in range(1): # first 3 principal components
PC_ax = plt.subplot2grid((2, 2), (vv, pc + 1), 1, 1, fig=fig)
for c in range(arr.shape[1]): # for each context
toplot = arr[pc, c, :] * meta['raster_fs'] # FixMe hardcoded firing rate
PC_ax.plot(time, toplot, label=meta['transitions'][c], color=CB_color_cycle[c], linewidth=2)
PC_ax.tick_params(labelsize='15')
if meta['significance']:
if marginalization in significance_masks:
left, right = PC_ax.get_xlim()
bottom, top = PC_ax.get_ylim()
Ychunk = (top - bottom) / 10
PC_ax.set_ylim(bottom - Ychunk, top)
PC_ax.imshow(significance_masks[marginalization][pc][None, :],
extent=[0, 1, bottom - Ychunk, bottom], aspect='auto', )
## Hide the right and top spines
PC_ax.spines['right'].set_visible(False)
PC_ax.spines['top'].set_visible(False)
# formats axes labels and ticks
if pc == 0: # y labels
PC_ax.set_ylabel(f'{code_to_name[marginalization]} dependent\nnormalized firing rate (Hz)', fontsize=20)
else:
PC_ax.axes.get_yaxis().set_visible(False)
if vv == len(Z) - 1:
PC_ax.set_xlabel('time (s)', fontsize=20)
if pc == 2: # bottom right coner subplt:
PC_ax.legend()
elif vv == 0:
PC_ax.set_title(f'{pc + 1}th principal component', fontsize=20)
else:
PC_ax.axes.get_xaxis().set_visible(False)
# plots variance explained
var_ax = plt.subplot2grid((2, 4), (0, 0), 1, 1, fig=fig)
src.visualization.fancy_plots.variance_explained(dpca, ax=var_ax, names=['probe', 'context'], colors=['gray', 'green'])
var_ax.set_title('variance explained')
var_ax.spines['right'].set_visible(False)
var_ax.spines['top'].set_visible(False)
var_ax.tick_params(labelsize=15)
var_ax.title.set_size(20)
var_ax.xaxis.label.set_size(20)
var_ax.yaxis.label.set_size(20)
# plots example raster
epoch_names = [f"C{transitions[f'P{probe}'][trans]}_P{probe}" for trans in meta['transitions']]
topcell = goodcells[np.argmax(np.abs(dpca.D['ct'][:, 0]))]
colors = [trans_color_map[trans] for trans in meta['transitions']]
raster_ax = plt.subplot2grid((2, 2), (1, 0), 1, 1, fig=fig)
cplt.hybrid(sig, epoch_names=epoch_names, channels=topcell, time_strech=[1, 2], colors=colors, axes=[raster_ax])
raster_ax = raster_ax
raster_ax.spines['right'].set_visible(False)
raster_ax.spines['top'].set_visible(False)
raster_ax.tick_params(labelsize=15)
raster_ax.title.set_size(20)
raster_ax.xaxis.label.set_size(20)
raster_ax.yaxis.label.set_size(20)
suptitle = f"{site} probe {probe} dPCA zscore-{meta['zscore']}"
fig.suptitle(suptitle, fontsize=20)
analysis = f"dPCA_examples_{meta['raster_fs']}Hz_zscore-{meta['zscore']}"
# set figure to full size in tenrec screen
fig.set_size_inches(9, 7)
# root = pl.Path(f'/home/mateo/Pictures/APAM/{analysis}')
# if not root.exists(): root.mkdir(parents=True, exist_ok=True)
# png = root.joinpath(suptitle).with_suffix('.png')
# fig.savefig(png, transparent=True, dpi=100)
# svg = png = root.joinpath(suptitle).with_suffix('.svg')
# fig.savefig(svg, transparent=True)
| 42.542683 | 124 | 0.55339 |
6b9a0636a7474ac0101b23cae37daa1dccb3c3da | 1,105 | py | Python | neuralnet/fine-tuning_transfer-learning/src/mylib/validation_utils.py | hsmtknj/ml | ad8e050cd754e5a1c73ed5df3bc223a1f6dc4148 | [
"MIT"
] | null | null | null | neuralnet/fine-tuning_transfer-learning/src/mylib/validation_utils.py | hsmtknj/ml | ad8e050cd754e5a1c73ed5df3bc223a1f6dc4148 | [
"MIT"
] | null | null | null | neuralnet/fine-tuning_transfer-learning/src/mylib/validation_utils.py | hsmtknj/ml | ad8e050cd754e5a1c73ed5df3bc223a1f6dc4148 | [
"MIT"
] | null | null | null | """
Validate functions of utils.py
"""
# register src directory path to PYTHONPATH
import sys
from os import path, pardir
current_dir = path.abspath(path.dirname(__file__))
parent_dir = path.abspath(path.join(current_dir, pardir))
parent_parent_dir = path.abspath(path.join(parent_dir, pardir))
sys.path.append(parent_dir)
import numpy as np
import pandas as pd
def validate_vec_length_is_larger(in_len, lowwer_bound_len, OPT):
"""
validate input length is larger than lowwer bound length
hopely input length is larger than lowwer bound length
:param in_len : int, input length
:param lowwer_bound_len : int, lowwer bound length
:param OPT : '>' or '>='
"""
if (OPT == '>'):
assert (in_len > lowwer_bound_len), (
'Input length is smaller than or equal to lowwer bound length.'
)
elif (OPT == '>='):
assert (in_len >= lowwer_bound_len), (
'Input length is smaller than lowwer bound length.'
)
else:
print('Please set variable "OPT" correctly. OPT must be ">" or ">=".')
| 29.864865 | 78 | 0.652489 |
6e5c90a9ad0ef07d5efd9c39f2eabb2ea135a891 | 910 | py | Python | mmdet/models/detectors/__init__.py | Hanawh/CondInst_mmdetection | 16c718f7d17de96d7def85102394beee67cda4b4 | [
"Apache-2.0"
] | 16 | 2020-08-04T04:44:16.000Z | 2022-03-31T11:51:37.000Z | mmdet/models/detectors/__init__.py | Hanawh/CondInst_mmdetection | 16c718f7d17de96d7def85102394beee67cda4b4 | [
"Apache-2.0"
] | null | null | null | mmdet/models/detectors/__init__.py | Hanawh/CondInst_mmdetection | 16c718f7d17de96d7def85102394beee67cda4b4 | [
"Apache-2.0"
] | null | null | null | from .atss import ATSS
from .base import BaseDetector
from .cascade_rcnn import CascadeRCNN
from .double_head_rcnn import DoubleHeadRCNN
from .fast_rcnn import FastRCNN
from .faster_rcnn import FasterRCNN
from .fcos import FCOS
from .fovea import FOVEA
from .grid_rcnn import GridRCNN
from .htc import HybridTaskCascade
from .mask_rcnn import MaskRCNN
from .mask_scoring_rcnn import MaskScoringRCNN
from .reppoints_detector import RepPointsDetector
from .retinanet import RetinaNet
from .rpn import RPN
from .single_stage import SingleStageDetector
from .two_stage import TwoStageDetector
from .condinst import CondInst
__all__ = [
'ATSS', 'BaseDetector', 'SingleStageDetector', 'TwoStageDetector', 'RPN',
'FastRCNN', 'FasterRCNN', 'MaskRCNN', 'CascadeRCNN', 'HybridTaskCascade',
'DoubleHeadRCNN', 'RetinaNet', 'FCOS', 'GridRCNN', 'MaskScoringRCNN',
'RepPointsDetector', 'FOVEA', 'CondInst'
]
| 35 | 77 | 0.796703 |
3b3f07bc9b879e0ba12c2a15856300a36dc714b8 | 5,592 | py | Python | agent.py | glennmatlin/cwcf | 7ecfce1e017584ad717a59c39e1ede0b04a40def | [
"MIT"
] | null | null | null | agent.py | glennmatlin/cwcf | 7ecfce1e017584ad717a59c39e1ede0b04a40def | [
"MIT"
] | null | null | null | agent.py | glennmatlin/cwcf | 7ecfce1e017584ad717a59c39e1ede0b04a40def | [
"MIT"
] | null | null | null | import numpy as np
import scipy.special
from config import config
all_agents = np.arange(config.AGENTS)
class Agent:
def __init__(self, env, pool, brain):
self.env = env
self.pool = pool
self.brain = brain
self.epsilon = config.EPSILON_START
self.idx = np.zeros(config.AGENTS, dtype=np.int32)
self.S = np.zeros(
(config.AGENTS, config.FEATURE_DIM + 1, 2, config.FEATURE_DIM),
dtype=np.float32,
)
self.A = np.zeros((config.AGENTS, config.FEATURE_DIM + 1), dtype=np.int64)
self.R = np.zeros((config.AGENTS, config.FEATURE_DIM + 1), dtype=np.float32)
self.U = np.zeros((config.AGENTS, config.FEATURE_DIM + 1), dtype=np.float32)
self.NA = np.zeros(
(config.AGENTS, config.FEATURE_DIM + 1, config.ACTION_DIM), dtype=np.bool
)
s, na = self.env.reset()
self.S[all_agents, self.idx] = s
self.NA[all_agents, self.idx] = na
def act(self, s, na):
q = self.brain.predict_np(s)
p = (
q - config.MAX_MASK_CONST * na
) # select an action not considering those already performed
a = np.argmax(p, axis=1)
rand_agents = np.random.rand(config.AGENTS) < self.epsilon
rand_number = np.random.rand(
config.AGENTS
) # rand() call is expensive, better to do it at once
possible_actions_count = config.ACTION_DIM - np.sum(na, axis=1)
u = (1 - self.epsilon) + (self.epsilon / possible_actions_count)
for i in range(config.AGENTS):
if rand_agents[i]: # random action
possible_actions = np.where(na[i] == False)[
0
] # select a random action, don't repeat an action
w = int(rand_number[i] * possible_actions_count[i])
a_ = possible_actions[w]
if a[i] == a_:
u[i] = (1 - self.epsilon) + (
self.epsilon / possible_actions_count[i]
) # randomly selected the maximizing action
else:
a[i] = a_
u[i] = (
self.epsilon / possible_actions_count[i]
) # probability of taking a random action
return a, u
def step(self):
s = self.S[all_agents, self.idx]
na = self.NA[all_agents, self.idx]
a, u = self.act(s, na)
s_, r, na_, done, info = self.env.step(a)
self.A[all_agents, self.idx] = a
self.R[all_agents, self.idx] = r
self.U[all_agents, self.idx] = u
for i in np.where(done)[0]: # truncate & store the finished episode i
idx = self.idx[i] + 1
_s = self.S[i, :idx].copy()
_a = self.A[i, :idx].copy()
_r = self.R[i, :idx].copy()
_u = self.U[i, :idx].copy()
_na = self.NA[i, :idx].copy()
# extract the true state
_x = np.broadcast_to(self.env.x[i].copy(), (idx, config.FEATURE_DIM))
_y = np.repeat(self.env.y[i], idx)
self.pool.put((_s, _a, _r, _u, _na, _x, _y))
self.idx = (done == 0) * (
self.idx + 1
) # advance idx by 1 and reset to 0 for finished episodes
self.NA[all_agents, self.idx] = na_ # unavailable actions
self.S[all_agents, self.idx] = s_
return s, a, r, s_, done, info
def update_epsilon(self, epoch):
if epoch >= config.EPSILON_EPOCHS:
self.epsilon = config.EPSILON_END
else:
self.epsilon = (
config.EPSILON_START
+ epoch
* (config.EPSILON_END - config.EPSILON_START)
/ config.EPSILON_EPOCHS
)
class PerfAgent(Agent):
def __init__(self, env, brain):
self.env = env
self.brain = brain
self.idx = np.zeros(config.AGENTS, dtype=np.int32)
self.S = np.zeros(
(config.AGENTS, config.FEATURE_DIM + 1, 2, config.FEATURE_DIM),
dtype=np.float32,
)
self.NA = np.zeros(
(config.AGENTS, config.FEATURE_DIM + 1, config.ACTION_DIM), dtype=np.bool
)
s, na = self.env.reset()
self.S[all_agents, self.idx] = s
self.NA[all_agents, self.idx] = na
def act(self, s, na):
q = self.brain.predict_np(s)
p = (
q - config.MAX_MASK_CONST * na
) # select an action not considering those already performed
# print('p.shape(): ', p.shape)
# print('len(p): ', len(p))
# print('p: ', p)
a = np.argmax(p, axis=1)
softmax = scipy.special.softmax(p, axis=1)
# print('softmax 0', softmax[0], 'softmax 1', softmax[1], 'softmax sum', softmax[0]+softmax[1])
# print('a: ', a)
p_yes = softmax[:, 1]
p_no = softmax[:, 0]
p1 = p_yes/(p_no + p_yes)
# print('p no', p_no[:10])
# print('p yes', p_yes[:10])
print('len of p1', len(p1))
print('p1', p1)
return a, 1.0
def step(self):
s = self.S[all_agents, self.idx]
na = self.NA[all_agents, self.idx]
a, u = self.act(s, na)
s_, r, na_, done, info = self.env.step(a)
self.idx = (done == 0) * (
self.idx + 1
) # advance idx by 1 and reset to 0 for finished episodes
self.NA[all_agents, self.idx] = na_ # unavailable actions
self.S[all_agents, self.idx] = s_
return s, a, r, s_, done, info
| 32.701754 | 103 | 0.528612 |
b0deed4e9a8d05e2f062faa6b7748cde73153722 | 5,755 | py | Python | pypyr/steps/tar.py | FooBarQuaxx/pypyr | ebe56b2200a53e2f38c78bbb42d466bb1556c37c | [
"Apache-2.0"
] | null | null | null | pypyr/steps/tar.py | FooBarQuaxx/pypyr | ebe56b2200a53e2f38c78bbb42d466bb1556c37c | [
"Apache-2.0"
] | null | null | null | pypyr/steps/tar.py | FooBarQuaxx/pypyr | ebe56b2200a53e2f38c78bbb42d466bb1556c37c | [
"Apache-2.0"
] | null | null | null | """Archive and extract tars."""
import logging
import tarfile
from pypyr.errors import KeyNotInContextError
# logger means the log level will be set correctly
logger = logging.getLogger(__name__)
def run_step(context):
"""Archive and/or extract tars with or without compression.
Args:
context: dictionary-like. Mandatory.
Expects the following context:
tar:
extract:
- in: /path/my.tar
out: /out/path
archive:
- in: /dir/to/archive
out: /out/destination.tar
format: ''
tar['format'] - if not specified, defaults to lzma/xz
Available options:
- '' - no compression
- gz (gzip)
- bz2 (bzip2)
- xz (lzma)
This step will run whatever combination of Extract and Archive you specify.
Regardless of combination, execution order is Extract, Archive.
Source and destination paths support {key} string interpolation.
Never extract archives from untrusted sources without prior inspection.
It is possible that files are created outside of path, e.g. members that
have absolute filenames starting with "/" or filenames with two dots "..".
"""
logger.debug("started")
assert context, f"context must have value for {__name__}"
found_at_least_one = False
context.assert_key_has_value('tar', __name__)
tar = context['tar']
if tar.get('extract', None):
found_at_least_one = True
tar_extract(context)
if tar.get('archive', None):
found_at_least_one = True
tar_archive(context)
if not found_at_least_one:
# This will raise exception on first item with a problem.
raise KeyNotInContextError('pypyr.steps.tar must have either extract '
'or archive specified under the tar key. '
'Or both of these. It has neither.')
logger.debug("done")
def get_file_mode_for_reading(context):
"""Get file mode for reading from tar['format'].
This should return r:*, r:gz, r:bz2 or r:xz. If user specified something
wacky in tar.Format, that's their business.
In theory r:* will auto-deduce the correct format.
"""
format = context['tar'].get('format', None)
if format or format == '':
mode = f"r:{context.get_formatted_string(format)}"
else:
mode = 'r:*'
return mode
def get_file_mode_for_writing(context):
"""Get file mode for writing from tar['format'].
This should return w:, w:gz, w:bz2 or w:xz. If user specified something
wacky in tar.Format, that's their business.
"""
format = context['tar'].get('format', None)
# slightly weird double-check because falsy format could mean either format
# doesn't exist in input, OR that it exists and is empty. Exists-but-empty
# has special meaning - default to no compression.
if format or format == '':
mode = f"w:{context.get_formatted_string(format)}"
else:
mode = 'w:xz'
return mode
def tar_archive(context):
"""Archive specified path to a tar archive.
Args:
context: dictionary-like. context is mandatory.
context['tar']['archive'] must exist. It's a dictionary.
keys are the paths to archive.
values are the destination output paths.
Example:
tar:
archive:
- in: path/to/dir
out: path/to/destination.tar.xs
- in: another/my.file
out: ./my.tar.xs
This will archive directory path/to/dir to path/to/destination.tar.xs,
and also archive file another/my.file to ./my.tar.xs
"""
logger.debug("start")
mode = get_file_mode_for_writing(context)
for item in context['tar']['archive']:
# value is the destination tar. Allow string interpolation.
destination = context.get_formatted_string(item['out'])
# key is the source to archive
source = context.get_formatted_string(item['in'])
with tarfile.open(destination, mode) as archive_me:
logger.debug("Archiving '%s' to '%s'", source, destination)
archive_me.add(source, arcname='.')
logger.info("Archived '%s' to '%s'", source, destination)
logger.debug("end")
def tar_extract(context):
"""Extract all members of tar archive to specified path.
Args:
context: dictionary-like. context is mandatory.
context['tar']['extract'] must exist. It's a dictionary.
keys are the path to the tar to extract.
values are the destination paths.
Example:
tar:
extract:
- in: path/to/my.tar.xs
out: /path/extract/here
- in: another/tar.xs
out: .
This will extract path/to/my.tar.xs to /path/extract/here, and also
extract another/tar.xs to $PWD.
"""
logger.debug("start")
mode = get_file_mode_for_reading(context)
for item in context['tar']['extract']:
# in is the path to the tar to extract. Allows string interpolation.
source = context.get_formatted_string(item['in'])
# out is the outdir, dhur. Allows string interpolation.
destination = context.get_formatted_string(item['out'])
with tarfile.open(source, mode) as extract_me:
logger.debug("Extracting '%s' to '%s'", source, destination)
extract_me.extractall(destination)
logger.info("Extracted '%s' to '%s'", source, destination)
logger.debug("end")
| 32.331461 | 79 | 0.608341 |
755691b57647e1365065175864a44dc0f61c0a2a | 1,341 | py | Python | test/same-target-name-different-directory/gyptest-all.py | chlorm-forks/gyp | a8921fcaab1a18c8cf7e4ab09ceb940e336918ec | [
"BSD-3-Clause"
] | 2,151 | 2020-04-18T07:31:17.000Z | 2022-03-31T08:39:18.000Z | test/same-target-name-different-directory/gyptest-all.py | chlorm-forks/gyp | a8921fcaab1a18c8cf7e4ab09ceb940e336918ec | [
"BSD-3-Clause"
] | 1,432 | 2017-06-21T04:08:48.000Z | 2020-08-25T16:21:15.000Z | test/same-target-name-different-directory/gyptest-all.py | chlorm-forks/gyp | a8921fcaab1a18c8cf7e4ab09ceb940e336918ec | [
"BSD-3-Clause"
] | 338 | 2020-04-18T08:03:10.000Z | 2022-03-29T12:33:22.000Z | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Test cases when multiple targets in different directories have the same name.
"""
import TestGyp
test = TestGyp.TestGyp(formats=['ninja', 'make'])
# xcode-ninja fails to generate a project due to id collisions
# cf. https://code.google.com/p/gyp/issues/detail?id=461
if test.format == 'xcode-ninja':
test.skip_test()
test.run_gyp('subdirs.gyp', chdir='src')
test.relocate('src', 'relocate/src')
# Test that we build all targets.
test.build('subdirs.gyp', 'target', chdir='relocate/src')
test.must_exist('relocate/src/subdir1/action1.txt')
test.must_exist('relocate/src/subdir2/action2.txt')
# Test that we build all targets using the correct actions, even if they have
# the same names.
test.build('subdirs.gyp', 'target_same_action_name', chdir='relocate/src')
test.must_exist('relocate/src/subdir1/action.txt')
test.must_exist('relocate/src/subdir2/action.txt')
# Test that we build all targets using the correct rules, even if they have
# the same names.
test.build('subdirs.gyp', 'target_same_rule_name', chdir='relocate/src')
test.must_exist('relocate/src/subdir1/rule.txt')
test.must_exist('relocate/src/subdir2/rule.txt')
test.pass_test()
| 31.928571 | 77 | 0.750186 |
7c6d469537588d8899553ccac2a749bce92d5219 | 4,942 | py | Python | doc/source/enforcer.py | spielkind/python-otcextensions | 47ba917df2d85db6cb347f2038fd7f79a8a806b7 | [
"Apache-2.0"
] | null | null | null | doc/source/enforcer.py | spielkind/python-otcextensions | 47ba917df2d85db6cb347f2038fd7f79a8a806b7 | [
"Apache-2.0"
] | null | null | null | doc/source/enforcer.py | spielkind/python-otcextensions | 47ba917df2d85db6cb347f2038fd7f79a8a806b7 | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import importlib
import os
from bs4 import BeautifulSoup
from sphinx import errors
from sphinx.util import logging
LOG = logging.getLogger(__name__)
# NOTE: We do this because I can't find any way to pass "-v"
# into sphinx-build through pbr ...
if os.getenv("ENFORCER_DEBUG"):
DEBUG = True
LOG.info("ENFORCER: Debugging is on.")
else:
DEBUG = False
WRITTEN_METHODS = set()
class EnforcementError(errors.SphinxError):
"""A mismatch between what exists and what's documented"""
category = "Enforcer"
def get_proxy_methods():
"""Return a set of public names on all proxies"""
names = ["otcextensions.sdk.anti_ddos.v1._proxy",
"otcextensions.sdk.auto_scaling.v1._proxy",
"otcextensions.sdk.cce.v1._proxy",
"otcextensions.sdk.cts.v1._proxy",
"otcextensions.sdk.dcs.v1._proxy",
"otcextensions.sdk.dms.v1._proxy",
"otcextensions.sdk.dns.v2._proxy",
"otcextensions.sdk.kms.v1._proxy",
"otcextensions.sdk.obs.v1._proxy",
"otcextensions.sdk.rds.v1._proxy",
"otcextensions.sdk.rds.v3._proxy",
"otcextensions.sdk.volume_backup.v2._proxy"
]
modules = (importlib.import_module(name) for name in names)
methods = set()
for module in modules:
# We're not going to use the Proxy for anything other than a `dir`
# so just pass a dummy value so we can create the instance.
instance = module.Proxy("")
# We only document public names
names = [name for name in dir(instance) if not name.startswith("_")]
good_names = [module.__name__ + ".Proxy." + name for name in names]
methods.update(good_names)
return methods
def page_context(app, pagename, templatename, context, doctree):
"""Handle html-page-context-event
This event is emitted once the builder has the contents to create
an HTML page, but before the template is rendered. This is the point
where we'll know what documentation is going to be written, so
gather all of the method names that are about to be included
so we can check which ones were or were not processed earlier
by autodoc.
"""
if "users/proxies" in pagename:
soup = BeautifulSoup(context["body"], "html.parser")
dts = soup.find_all("dt")
ids = [dt.get("id") for dt in dts]
written = 0
for id in ids:
if id is not None and "_proxy.Proxy" in id:
WRITTEN_METHODS.add(id)
written += 1
if DEBUG:
LOG.info("ENFORCER: Wrote %d proxy methods for %s" % (
written, pagename))
def build_finished(app, exception):
"""Handle build-finished event
This event is emitted once the builder has written all of the output.
At this point we just compare what we know was written to what we know
exists within the modules and share the results.
When enforcer_warnings_as_errors=True in conf.py, this method
will raise EnforcementError on any failures in order to signal failure.
"""
all_methods = get_proxy_methods()
LOG.info("ENFORCER: %d proxy methods exist" % len(all_methods))
LOG.info("ENFORCER: %d proxy methods written" % len(WRITTEN_METHODS))
missing = all_methods - WRITTEN_METHODS
missing_count = len(missing)
LOG.info("ENFORCER: Found %d missing proxy methods "
"in the output" % missing_count)
# TODO(shade) This is spewing a bunch of content for missing thing that
# are not actually missing. Leave it as info rather than warn so that the
# gate doesn't break ... but we should figure out why this is broken and
# fix it.
# We also need to deal with Proxy subclassing keystoneauth.adapter.Adapter
# now - some of the warnings come from Adapter elements.
for name in sorted(missing):
if DEBUG:
LOG.info("ENFORCER: %s was not included in the output" % name)
if app.config.enforcer_warnings_as_errors and missing_count > 0:
raise EnforcementError(
"There are %d undocumented proxy methods" % missing_count)
def setup(app):
app.add_config_value("enforcer_warnings_as_errors", False, "env")
app.connect("html-page-context", page_context)
app.connect("build-finished", build_finished)
| 35.811594 | 78 | 0.669365 |
a7d91d0432248cbf541553a4cc466e0a78449c25 | 8,708 | py | Python | test/functional/p2p_invalid_messages.py | KaSt/emircoin | 5d05003dfde81eb6cacc8505f55b2e6b816e698a | [
"MIT"
] | null | null | null | test/functional/p2p_invalid_messages.py | KaSt/emircoin | 5d05003dfde81eb6cacc8505f55b2e6b816e698a | [
"MIT"
] | null | null | null | test/functional/p2p_invalid_messages.py | KaSt/emircoin | 5d05003dfde81eb6cacc8505f55b2e6b816e698a | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2015-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test node responses to invalid network messages."""
import asyncio
import os
import struct
from test_framework import messages
from test_framework.mininode import P2PDataStore, NetworkThread
from test_framework.test_framework import BitcoinTestFramework
class msg_unrecognized:
"""Nonsensical message. Modeled after similar types in test_framework.messages."""
command = b'badmsg'
def __init__(self, *, str_data):
self.str_data = str_data.encode() if not isinstance(str_data, bytes) else str_data
def serialize(self):
return messages.ser_string(self.str_data)
def __repr__(self):
return "{}(data={})".format(self.command, self.str_data)
class InvalidMessagesTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def run_test(self):
"""
. Test msg header
0. Send a bunch of large (4MB) messages of an unrecognized type. Check to see
that it isn't an effective DoS against the node.
1. Send an oversized (4MB+) message and check that we're disconnected.
2. Send a few messages with an incorrect data size in the header, ensure the
messages are ignored.
"""
self.test_magic_bytes()
self.test_checksum()
self.test_size()
self.test_command()
node = self.nodes[0]
self.node = node
node.add_p2p_connection(P2PDataStore())
conn2 = node.add_p2p_connection(P2PDataStore())
msg_limit = 4 * 1000 * 1000 # 4MB, per MAX_PROTOCOL_MESSAGE_LENGTH
valid_data_limit = msg_limit - 5 # Account for the 4-byte length prefix
#
# 0.
#
# Send as large a message as is valid, ensure we aren't disconnected but
# also can't exhaust resources.
#
msg_at_size = msg_unrecognized(str_data="b" * valid_data_limit)
assert len(msg_at_size.serialize()) == msg_limit
increase_allowed = 0.5
if [s for s in os.environ.get("EMIRCOIN_CONFIG", "").split(" ") if "--with-sanitizers" in s and "address" in s]:
increase_allowed = 3.5
with node.assert_memory_usage_stable(increase_allowed=increase_allowed):
self.log.info(
"Sending a bunch of large, junk messages to test "
"memory exhaustion. May take a bit...")
# Run a bunch of times to test for memory exhaustion.
for _ in range(80):
node.p2p.send_message(msg_at_size)
# Check that, even though the node is being hammered by nonsense from one
# connection, it can still service other peers in a timely way.
for _ in range(20):
conn2.sync_with_ping(timeout=2)
# Peer 1, despite serving up a bunch of nonsense, should still be connected.
self.log.info("Waiting for node to drop junk messages.")
node.p2p.sync_with_ping(timeout=120)
assert node.p2p.is_connected
#
# 1.
#
# Send an oversized message, ensure we're disconnected.
#
msg_over_size = msg_unrecognized(str_data="b" * (valid_data_limit + 1))
assert len(msg_over_size.serialize()) == (msg_limit + 1)
with node.assert_debug_log(["Oversized message from peer=4, disconnecting"]):
# An unknown message type (or *any* message type) over
# MAX_PROTOCOL_MESSAGE_LENGTH should result in a disconnect.
node.p2p.send_message(msg_over_size)
node.p2p.wait_for_disconnect(timeout=4)
node.disconnect_p2ps()
conn = node.add_p2p_connection(P2PDataStore())
conn.wait_for_verack()
#
# 2.
#
# Send messages with an incorrect data size in the header.
#
actual_size = 100
msg = msg_unrecognized(str_data="b" * actual_size)
# TODO: handle larger-than cases. I haven't been able to pin down what behavior to expect.
for wrong_size in (2, 77, 78, 79):
self.log.info("Sending a message with incorrect size of {}".format(wrong_size))
# Unmodified message should submit okay.
node.p2p.send_and_ping(msg)
# A message lying about its data size results in a disconnect when the incorrect
# data size is less than the actual size.
#
# TODO: why does behavior change at 78 bytes?
#
node.p2p.send_raw_message(self._tweak_msg_data_size(msg, wrong_size))
# For some reason unknown to me, we sometimes have to push additional data to the
# peer in order for it to realize a disconnect.
try:
node.p2p.send_message(messages.msg_ping(nonce=123123))
except IOError:
pass
node.p2p.wait_for_disconnect(timeout=10)
node.disconnect_p2ps()
node.add_p2p_connection(P2PDataStore())
# Node is still up.
conn = node.add_p2p_connection(P2PDataStore())
conn.sync_with_ping()
def test_magic_bytes(self):
conn = self.nodes[0].add_p2p_connection(P2PDataStore())
def swap_magic_bytes():
conn._on_data = lambda: None # Need to ignore all incoming messages from now, since they come with "invalid" magic bytes
conn.magic_bytes = b'\x00\x11\x22\x32'
# Call .result() to block until the atomic swap is complete, otherwise
# we might run into races later on
asyncio.run_coroutine_threadsafe(asyncio.coroutine(swap_magic_bytes)(), NetworkThread.network_event_loop).result()
with self.nodes[0].assert_debug_log(['PROCESSMESSAGE: INVALID MESSAGESTART ping']):
conn.send_message(messages.msg_ping(nonce=0xff))
conn.wait_for_disconnect(timeout=1)
self.nodes[0].disconnect_p2ps()
def test_checksum(self):
conn = self.nodes[0].add_p2p_connection(P2PDataStore())
with self.nodes[0].assert_debug_log(['ProcessMessages(badmsg, 2 bytes): CHECKSUM ERROR expected 78df0a04 was ffffffff']):
msg = conn.build_message(msg_unrecognized(str_data="d"))
cut_len = (
4 + # magic
12 + # command
4 #len
)
# modify checksum
msg = msg[:cut_len] + b'\xff' * 4 + msg[cut_len + 4:]
self.nodes[0].p2p.send_raw_message(msg)
conn.sync_with_ping(timeout=1)
self.nodes[0].disconnect_p2ps()
def test_size(self):
conn = self.nodes[0].add_p2p_connection(P2PDataStore())
with self.nodes[0].assert_debug_log(['']):
msg = conn.build_message(msg_unrecognized(str_data="d"))
cut_len = (
4 + # magic
12 # command
)
# modify len to MAX_SIZE + 1
msg = msg[:cut_len] + struct.pack("<I", 0x02000000 + 1) + msg[cut_len + 4:]
self.nodes[0].p2p.send_raw_message(msg)
conn.wait_for_disconnect(timeout=1)
self.nodes[0].disconnect_p2ps()
def test_command(self):
conn = self.nodes[0].add_p2p_connection(P2PDataStore())
with self.nodes[0].assert_debug_log(['PROCESSMESSAGE: ERRORS IN HEADER']):
msg = msg_unrecognized(str_data="d")
msg.command = b'\xff' * 12
msg = conn.build_message(msg)
# Modify command
msg = msg[:7] + b'\x00' + msg[7 + 1:]
self.nodes[0].p2p.send_raw_message(msg)
conn.sync_with_ping(timeout=1)
self.nodes[0].disconnect_p2ps()
def _tweak_msg_data_size(self, message, wrong_size):
"""
Return a raw message based on another message but with an incorrect data size in
the message header.
"""
raw_msg = self.node.p2p.build_message(message)
bad_size_bytes = struct.pack("<I", wrong_size)
num_header_bytes_before_size = 4 + 12
# Replace the correct data size in the message with an incorrect one.
raw_msg_with_wrong_size = (
raw_msg[:num_header_bytes_before_size] +
bad_size_bytes +
raw_msg[(num_header_bytes_before_size + len(bad_size_bytes)):]
)
assert len(raw_msg) == len(raw_msg_with_wrong_size)
return raw_msg_with_wrong_size
if __name__ == '__main__':
InvalidMessagesTest().main()
| 38.702222 | 133 | 0.62345 |
a27414a98d0b52f624db10d35525a10442601628 | 849 | py | Python | imagepy/menus/Process/repair_plg.py | Pad0y/imagepy | 23f41b64ade02f94b566b0d23a4b6459c1a1578d | [
"BSD-4-Clause"
] | null | null | null | imagepy/menus/Process/repair_plg.py | Pad0y/imagepy | 23f41b64ade02f94b566b0d23a4b6459c1a1578d | [
"BSD-4-Clause"
] | null | null | null | imagepy/menus/Process/repair_plg.py | Pad0y/imagepy | 23f41b64ade02f94b566b0d23a4b6459c1a1578d | [
"BSD-4-Clause"
] | null | null | null | from sciapp.action import Filter
import numpy as np
import scipy.ndimage as ndimg
from imagepy.ipyalg import distance_transform_edt
class Plugin(Filter):
title = "Fragment Repair"
note = ["all", "req_roi", "auto_msk", "auto_snap", "preview"]
para = {"mode": "nearest"}
view = [(list, "mode", ["nearest", "mean"], str, "replace by", "pix")]
def run(self, ips, snap, img, para=None):
msk = ips.mask()
if self.para["mode"] == "nearest":
rr, cc = ndimg.distance_transform_edt(
msk, return_distances=False, return_indices=True
)
img[:] = snap[rr, cc]
else:
lab1, n = ndimg.label(msk)
lab2 = ndimg.maximum_filter(lab1, 3)
idx = ndimg.mean(img, lab2 - lab1, np.arange(1, n + 1))
img[msk] = idx[lab1[msk] - 1]
| 33.96 | 74 | 0.570082 |
205434d7361cf11e0d43c2c733e89c65b92b8bce | 493 | py | Python | curseword.py | janakhpon/LearnPyDacity | b096f9a50496a0c01353bf953209e766de312187 | [
"MIT"
] | null | null | null | curseword.py | janakhpon/LearnPyDacity | b096f9a50496a0c01353bf953209e766de312187 | [
"MIT"
] | null | null | null | curseword.py | janakhpon/LearnPyDacity | b096f9a50496a0c01353bf953209e766de312187 | [
"MIT"
] | null | null | null | import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
http = urllib3.PoolManager()
def read_text():
quotes = open("./Folder/curse.txt")
contents = quotes.read()
print(contents)
quotes.close()
check_profanity(contents)
def check_profanity(txt_data):
connection = http.request('GET', "https://www.purgomalum.com/service/containsprofanity?text="+txt_data)
output = connection.read()
print(output)
connection.close()
read_text() | 25.947368 | 107 | 0.730223 |
7b14ba7ecbefa8a6f25b475a2dec2367309e43a6 | 31,441 | py | Python | generate.py | rjlohan/cfn-tf-custom-types | 7bd19adc2459814b5eaa181b2645fafc57c1fc95 | [
"MIT"
] | null | null | null | generate.py | rjlohan/cfn-tf-custom-types | 7bd19adc2459814b5eaa181b2645fafc57c1fc95 | [
"MIT"
] | null | null | null | generate.py | rjlohan/cfn-tf-custom-types | 7bd19adc2459814b5eaa181b2645fafc57c1fc95 | [
"MIT"
] | null | null | null | import requests
import subprocess
import os
import pprint
import json
import re
import tempfile
import time
import sys, traceback
import multiprocessing
from pathlib import Path
PROVIDERS_MAP = {
'random': ['Random','Random'],
'digitalocean': ['DigitalOcean','DigitalOcean'],
'oci': ['OCI','Oracle Cloud Infrastructure'],
'aws': ['AWS','AWS'],
'opsgenie': ['OpsGenie','OpsGenie'],
'dnsimple': ['DNSimple','DNSimple'],
'vsphere': ['VSphere','VMware vSphere'],
'consul': ['Consul','Consul'],
'cloudstack': ['CloudStack','CloudStack'],
'tls': ['TLS','TLS'],
'cobbler': ['Cobbler','Cobbler'],
'azurerm': ['AzureRM','Azure'],
'nomad': ['Nomad','Nomad'],
'ovh': ['OVH','OVH'],
'scaleway': ['Scaleway','Scaleway'],
'bitbucket': ['Bitbucket','Bitbucket'],
'logentries': ['Logentries','Logentries'],
'datadog': ['Datadog','Datadog'],
'pagerduty': ['PagerDuty','PagerDuty'],
'oneandone': ['OneAndOne','1&1'],
'chef': ['Chef','Chef'],
'ultradns': ['UltraDNS','UltraDNS'],
'profitbricks': ['ProfitBricks','ProfitBricks'],
'postgresql': ['PostgreSQL','PostgreSQL'],
'google': ['Google','Google Cloud'],
'dme': ['DME','DNSMadeEasy'],
'triton': ['Triton','Triton'],
'circonus': ['Circonus','Circonus'],
'dyn': ['Dyn','Dyn'],
'mailgun': ['Mailgun','Mailgun'],
'influxdb': ['InfluxDB','InfluxDB'],
'alicloud': ['Alicloud','Alicloud'],
'rundeck': ['Rundeck','Rundeck'],
'grafana': ['Grafana','Grafana'],
'rabbitmq': ['RabbitMQ','RabbitMQ'],
'arukas': ['Arukas','Arukas'],
'vcd': ['VCD','VMware vCloud Director'],
'powerdns': ['PowerDNS','PowerDNS'],
'atlas': ['Atlas','Atlas'],
'dns': ['DNS','DNS'],
'newrelic': ['NewRelic','NewRelic'],
'github': ['GitHub','GitHub'],
'librato': ['Librato','Librato'],
'openstack': ['OpenStack','OpenStack'],
'heroku': ['Heroku','Heroku'],
'packet': ['Packet','Packet'],
'clc': ['CLC','CenturyLinkCloud'],
'template': ['Template','Template'],
'icinga2': ['Icinga2','Icinga2'],
'softlayer': ['SoftLayer','SoftLayer'],
'spotinst': ['Spotinst','Spotinst'],
'cloudflare': ['Cloudflare','Cloudflare'],
'mysql': ['MySQL','MySQL'],
'kubernetes': ['Kubernetes','Kubernetes'],
'opc': ['OPC','Oracle Public Cloud'],
'vault': ['Vault','Vault'],
'gitlab': ['Gitlab','Gitlab'],
'statuscake': ['StatusCake','StatusCake'],
'local': ['Local','Local'],
'ns1': ['NS1','NS1'],
'fastly': ['Fastly','Fastly'],
'docker': ['Docker','Docker'],
'rancher': ['Rancher','Rancher'],
'logicmonitor': ['LogicMonitor','LogicMonitor'],
'cloudscale': ['CloudScale','CloudScale'],
'netlify': ['Netlify','Netlify'],
'opentelekomcloud': ['OpenTelekomCloud','OpenTelekomCloud'],
'panos': ['Panos','Palo Alto Networks'],
'oraclepaas': ['OraclePaaS','Oracle Cloud Platform'],
'nsxt': ['NSXT','VMware NSX-T'],
'runscope': ['RunScope','RunScope'],
'flexibleengine': ['FlexibleEngine','FlexibleEngine'],
'hcloud': ['HCloud','Hetzner Cloud'],
'azurestack': ['AzureStack','Azure Stack'],
'telefonicaopencloud': ['TelefonicaOpenCloud','TelefonicaOpenCloud'],
'huaweicloud': ['HuaweiCloud','HuaweiCloud'],
'brightbox': ['Brightbox','Brightbox'],
'tfe': ['Tfe','Terraform Enterprise'],
'acme': ['ACME','ACME'],
'rightscale': ['RightScale','RightScale'],
'bigip': ['BIGIP','F5 BIG-IP'],
'tencentcloud': ['TencentCloud','TencentCloud'],
'nutanix': ['Nutanix','Nutanix'],
'linode': ['Linode','Linode'],
'selvpc': ['SelVPC','Selectel'],
'skytap': ['Skytap','Skytap'],
'hedvig': ['Hedvig','Hedvig'],
'ucloud': ['UCloud','UCloud'],
'azuread': ['AzureAD','Azure Active Directory']
}
def tf_to_cfn_str(obj):
return re.sub(r'(?:^|_)(\w)', lambda x: x.group(1).upper(), obj)
def tf_type_to_cfn_type(tf_name, provider_name):
split_provider_name = tf_name.split("_")
split_provider_name.pop(0)
cfn_provider_name = PROVIDERS_MAP[provider_name][0]
return "Terraform::" + cfn_provider_name + "::" + tf_to_cfn_str("_".join(split_provider_name))
def check_call(args, cwd, inputstr):
proc = subprocess.Popen(args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=cwd)
if inputstr:
proc.stdin.write(inputstr)
stdout, stderr = proc.communicate()
if proc.returncode != 0:
raise subprocess.CalledProcessError(
returncode=proc.returncode,
cmd=args)
return stdout
def jsonschema_type(attrtype):
if attrtype == "string":
return {
'type': 'string'
}
elif attrtype == "number":
return {
'type': 'number'
}
elif attrtype == "bool":
return {
'type': 'boolean'
}
elif len(attrtype) == 2 and attrtype[0] == "list":
return {
'type': 'array',
'insertionOrder': False,
'items': jsonschema_type(attrtype[1])
}
elif len(attrtype) == 2 and attrtype[0] == "set":
return {
'type': 'array',
'insertionOrder': True,
'items': jsonschema_type(attrtype[1])
}
elif len(attrtype) == 2 and attrtype[0] == "object":
properties = {}
for k,v in attrtype[1].items():
cfnattrname = tf_to_cfn_str(k)
properties[cfnattrname] = jsonschema_type(v)
return {
'type': 'object',
'additionalProperties': False,
'properties': properties
}
elif len(attrtype) == 2 and attrtype[0] == "map":
return {
'type': 'array',
'insertionOrder': True,
'items': {
'type': 'object',
'additionalProperties': False,
'properties': {
'MapKey': {
'type': 'string'
},
'MapValue': jsonschema_type(attrtype[1])
},
'required': [
'MapKey',
'MapValue'
]
}
} # TODO: Handle this in the handlers
else:
print("ERROR: Unknown attribute type")
print(attrtype)
return {
'type': 'string'
}
def checkout(url, provider_name):
try:
check_call(['git', 'clone', url, '/tmp/' + provider_name + '/'], '/tmp', None)
except:
check_call(['git', 'pull', url], '/tmp/' + provider_name, None)
def process_provider(provider_type):
tmpdir = tempfile.TemporaryDirectory()
tempdir = Path(tmpdir.name)
with open(tempdir / "base.tf", "w") as f:
f.write('''
provider "{provider}" {{}}
'''.format(provider=provider_type))
print("Downloading latest provider version...")
check_call(['terraform', 'init'], tempdir.absolute(), None)
tfschema = json.loads(check_call(['terraform', 'providers', 'schema', '-json'], tempdir.absolute(), None))
check_call(['git', 'clone', 'https://github.com/terraform-providers/terraform-provider-{}.git'.format(provider_type), provider_type], tempdir.absolute(), None)
outstandingblocks = {}
schema = {}
doc_resources = generate_docs(tempdir, provider_type, tfschema)
for k,v in tfschema['provider_schemas'][provider_type]['resource_schemas'].items():
endnaming = tf_to_cfn_str(k)
if k.startswith(provider_type + "_"):
endnaming = tf_to_cfn_str(k[(len(provider_type)+1):])
cfntypename = "Terraform::" + PROVIDERS_MAP[provider_type][0] + "::" + endnaming
cfndirname = "Terraform-" + PROVIDERS_MAP[provider_type][0] + "-" + endnaming
try:
providerdir = Path('.') / 'resources' / provider_type / cfndirname
if not providerdir.exists():
providerdir.mkdir(parents=True, exist_ok=True)
check_call(['cfn', 'init'], providerdir.absolute(), "{}\n4\nY\n".format(cfntypename).encode('utf-8'))
schema = {
"typeName": cfntypename,
"description": "CloudFormation equivalent of {}".format(k),
"sourceUrl": "https://github.com/iann0036/cfn-tf-custom-types.git",
"definitions": {},
"properties": {
"tfcfnid": {
"description": "Internal identifier for tracking resource changes. Do not use.",
"type": "string"
}
},
"additionalProperties": False,
"required": [],
"readOnlyProperties": [
"/properties/tfcfnid"
],
"primaryIdentifier": [
"/properties/tfcfnid"
],
"handlers": {
"create": {
"permissions": [
"s3:PutObject",
"secretsmanager:GetSecretValue"
]
},
"read": {
"permissions": []
},
"update": {
"permissions": [
"s3:GetObject",
"s3:PutObject",
"secretsmanager:GetSecretValue"
]
},
"delete": {
"permissions": [
"s3:GetObject",
"s3:DeleteObject",
"secretsmanager:GetSecretValue"
]
},
"list": {
"permissions": []
}
}
}
if k in doc_resources and len(doc_resources[k]['description']) > 10:
schema['description'] = doc_resources[k]['description']
if provider_type == "aws":
schema['handlers'] = {
"create": {"permissions": ["*"]},
"read": {"permissions": ["*"]},
"update": {"permissions": ["*"]},
"delete": {"permissions": ["*"]},
"list": {"permissions": ["*"]}
}
if 'attributes' in v['block']:
for attrname,attr in v['block']['attributes'].items():
cfnattrname = tf_to_cfn_str(attrname)
attrtype = attr['type']
computed = False
optional = None
if attrname == "id":
computed = True
#schema['primaryIdentifier'] = ["/properties/Id"]
schema['readOnlyProperties'].append("/properties/Id")
else:
if 'optional' in attr:
if not attr['optional']:
schema['required'].append(cfnattrname)
optional = False
else:
optional = True
elif 'required' in attr:
if attr['required']:
schema['required'].append(cfnattrname)
if 'computed' in attr:
if attr['computed']:
computed = True
if not optional:
schema['readOnlyProperties'].append("/properties/" + cfnattrname)
schema['properties'][cfnattrname] = jsonschema_type(attrtype)
if k in doc_resources:
for docarg in doc_resources[k]['arguments']:
if docarg['name'] == attrname and docarg['property_of'] is None:
schema['properties'][cfnattrname]['description'] = docarg['description']
if 'block_types' in v['block']:
outstandingblocks.update(v['block']['block_types'])
while len(outstandingblocks):
blockname = next(iter(outstandingblocks))
block = outstandingblocks.pop(blockname)
cfnblockname = tf_to_cfn_str(blockname)
schema['definitions'][cfnblockname] = {
'type': 'object',
'additionalProperties': False,
'properties': {},
'required': []
}
if 'attributes' in block['block']:
for attrname,attr in block['block']['attributes'].items():
cfnattrname = tf_to_cfn_str(attrname)
attrtype = attr['type']
computed = False
optional = None
if 'optional' in attr:
if not attr['optional']:
schema['definitions'][cfnblockname]['required'].append(cfnattrname)
optional = False
else:
optional = True
elif 'required' in attr:
if attr['required']:
schema['definitions'][cfnblockname]['required'].append(cfnattrname)
if 'computed' in attr:
if attr['computed']:
computed = True
if not optional:
continue # read-only props in subdefs are skipped from model
schema['definitions'][cfnblockname]['properties'][cfnattrname] = jsonschema_type(attrtype)
if k in doc_resources:
for docarg in doc_resources[k]['arguments']:
if docarg['name'] == attrname and docarg['property_of'] == blockname:
schema['definitions'][cfnblockname]['properties'][cfnattrname]['description'] = docarg['description']
if 'block_types' in block['block']:
outstandingblocks.update(block['block']['block_types'])
for subblockname,subblock in block['block']['block_types'].items():
cfnsubblockname = tf_to_cfn_str(subblockname)
if subblock['nesting_mode'] == "list":
schema['definitions'][cfnblockname]['properties'][cfnsubblockname] = {
'type': 'array',
'insertionOrder': True,
'items': {
'$ref': '#/definitions/' + cfnsubblockname
}
}
elif subblock['nesting_mode'] == "set":
schema['definitions'][cfnblockname]['properties'][cfnsubblockname] = {
'type': 'array',
'insertionOrder': False,
'items': {
'$ref': '#/definitions/' + cfnsubblockname
}
}
elif subblock['nesting_mode'] == "single":
schema['definitions'][cfnblockname]['properties'][cfnsubblockname] = {
'$ref': '#/definitions/' + cfnsubblockname
}
else:
print("Unknown subblock nesting_mode: " + subblock['nesting_mode'])
if 'max_items' in subblock:
schema['definitions'][cfnblockname]['properties'][cfnsubblockname]['maxItems'] = subblock['max_items']
if 'min_items' in subblock:
schema['definitions'][cfnblockname]['properties'][cfnsubblockname]['minItems'] = subblock['min_items']
if not bool(schema['definitions'][cfnblockname]['properties']):
if bool(block['block']):
del schema['definitions'][cfnblockname] # no properties found
print("Skipped propertyless block: " + cfnblockname)
continue
else:
schema['definitions'][cfnblockname]['properties']['IsPropertyDefined'] = {
'type': 'boolean'
} # TODO: Handle this in handlers
print("Retained propertyless block: " + cfnblockname)
if block['nesting_mode'] == "list":
schema['properties'][cfnblockname] = {
'type': 'array',
'insertionOrder': False,
'items': {
'$ref': '#/definitions/' + cfnblockname
}
}
elif block['nesting_mode'] == "set":
schema['properties'][cfnblockname] = {
'type': 'array',
'insertionOrder': True,
'items': {
'$ref': '#/definitions/' + cfnblockname
}
}
elif block['nesting_mode'] == "single":
schema['properties'][cfnblockname] = {
'$ref': '#/definitions/' + cfnblockname
}
else:
print("Unknown nesting_mode: " + block['nesting_mode'])
if 'max_items' in block:
schema['properties'][cfnblockname]['maxItems'] = block['max_items']
if 'min_items' in block:
schema['properties'][cfnblockname]['minItems'] = block['min_items']
# TODO: Block descriptions
with open(providerdir / (cfndirname.lower() + ".json"), "w") as f:
f.write(json.dumps(schema, indent=4))
check_call(['cfn', 'generate'], providerdir.absolute(), None)
# update handlers.py
with open("handlers.py.template", "r") as handlerstemplate:
with open(providerdir / "src" / cfndirname.lower().replace("-","_") / "handlers.py", "w") as f:
template = handlerstemplate.read().replace("###CFNTYPENAME###",cfntypename).replace("###TFTYPENAME###",k).replace("###PROVIDERTYPENAME###",provider_type)
f.write(template)
print("Generated " + cfntypename)
except KeyboardInterrupt:
quit()
except:
traceback.print_exc(file=sys.stdout)
print("Failed to generate " + cfntypename)
# Docs
def process_resource_docs(provider_name, file_contents, provider_readme_items):
section = ""
resource_type = ""
description = ""
example = ""
arguments = []
argument_lines = []
attributes = {}
lines = file_contents.split("\n")
for line in lines:
if line.startswith("# " + provider_name):
resource_type = line[2:].replace("\\", "")
section = "description"
elif line.startswith("# Resource: " + provider_name): # aws docs differences
resource_type = line[len("# Resource: "):].replace("\\", "")
section = "description"
elif line == "## Example Usage":
section = "example"
elif line == "## Argument Reference":
section = "arguments"
elif line == "## Attributes Reference":
section = "attributes"
elif line.startswith("##"):
section = ""
elif section == "description":
description += line + "\n"
elif section == "example":
example += line + "\n"
elif section == "arguments":
argument_lines.append(line)
elif section == "attributes":
if line.strip().startswith("* "):
startpos = line.strip().find("`")
endpos = line.strip().find("`", startpos+1)
if startpos != -1 and endpos != -1:
attribute_name = line.strip()[startpos+1:endpos]
if line.strip()[endpos+1:].strip().startswith("- ") or line.strip()[endpos+1:].strip().startswith("= "):
attribute_description = line.strip()[endpos+1:].strip()[2:]
if attribute_description[-1] != ".":
attribute_description += "."
attributes[attribute_name] = attribute_description
# process arguments
argument_names = []
argument_block = None
for line_number, line in enumerate(argument_lines):
if line.strip().startswith("* ") or line.strip().startswith("- "):
startpos = line.strip().find("`")
endpos = line.strip().find("`", startpos+1)
if startpos != -1 and endpos != -1:
argument_name = line.strip()[startpos+1:endpos]
argument_names.append(argument_name)
if line.strip()[endpos+1:].strip().startswith("- ") or line.strip()[endpos+1:].strip().startswith("= "):
argument_description = line.strip()[endpos+1:].strip()[2:]
# concat lines in newlines for description of attribute
line_num_iterator = 1
while len(argument_lines) > line_number+line_num_iterator and (argument_lines[line_number+line_num_iterator].strip() != "" and not argument_lines[line_number+line_num_iterator].startswith("* ") and not argument_lines[line_number+line_num_iterator].startswith("#")):
argument_description += "\n" + argument_lines[line_number+line_num_iterator].strip()
line_num_iterator += 1
argument_attributes = []
argument_description = argument_description.strip()
if argument_description[0] == "(":
endbracked_index = argument_description.find(')')
argument_attributes = map(str.strip, argument_description[1:endbracked_index].split(","))
argument_description = argument_description[endbracked_index+1:].strip()
if argument_description and len(argument_description) > 2:
if argument_description[-1] != ".":
argument_description += "."
else:
argument_description = None
arguments.append({
'name': argument_name,
'description': argument_description,
'property_of': argument_block,
'attributes': argument_attributes
})
if line.strip().endswith(":") and argument_lines[line_number+1].strip() == "":
for argument_name in argument_names:
if "`{}`".format(argument_name) in line:
argument_block = argument_name
if resource_type != "":
if provider_name not in PROVIDERS_MAP:
return
description = description.strip()
return {
'resource_type': resource_type,
'description': description,
'example': example,
'arguments': arguments,
'attributes': attributes
}
return None
def generate_docs(tempdir, provider_type, tfschema):
resources_path = (tempdir / provider_type / "website" / "docs" / "r").absolute()
index_path = (tempdir / provider_type / "website" / "docs" / "index.html.markdown").absolute()
provider_reference_path = (tempdir / provider_type / "website" / "docs" / "provider_reference.html.markdown").absolute()
provider_readme_items = []
ret = {}
if os.path.isdir(resources_path) and provider_type in PROVIDERS_MAP:
with open(Path("docs") / "{}.md".format(provider_type), 'w') as provider_readme:
readable_provider_name = PROVIDERS_MAP[provider_type][1]
# provider info
with open(index_path, 'r') as f:
section = ""
first_argument_found = False
arguments = []
index_file_contents = f.read()
lines = index_file_contents.split("\n")
for line in lines:
if line.startswith("*") and section == "arguments":
first_argument_found = True
if line.startswith("## Argument Reference") or line.startswith("## Arguments Reference") or line.startswith("## Configuration Reference") or "the following arguments:" in line or "provide the following credentials:" in line:
section = "arguments"
elif line.startswith("#"):
section = ""
elif section == "arguments" and first_argument_found:
arguments.append(line)
# try provider reference (eg. google)
if len(arguments) == 0:
try:
with open(provider_reference_path, 'r') as f:
section = ""
first_argument_found = False
arguments = []
index_file_contents = f.read()
lines = index_file_contents.split("\n")
for line in lines:
if (line.startswith("*") or line.startswith("-")) and section == "arguments":
first_argument_found = True
if line.startswith("## Argument Reference") or line.startswith("## Arguments Reference") or line.startswith("## Configuration Reference") or "the following arguments:" in line or "provide the following credentials:" in line:
section = "arguments"
elif line.startswith("#"):
section = ""
elif section == "arguments" and first_argument_found and not "navigation to the left" in line:
if line.startswith("-"):
line[0] = "*"
arguments.append(line)
except:
pass
# remove environmental variable references
argument_text = "\n".join(arguments)
if provider_type not in ['digitalocean', 'fastly', 'flexibleengine', 'google', 'oneandone', 'profitbricks']:
sentences = argument_text.split(".")
i = 0
while len(sentences) > i:
if ("environment variable" in sentences[i] or "environmental variable" in sentences[i] or "Can be sourced from" in sentences[i]):
del sentences[i]
else:
i+=1
argument_text = ".".join(sentences)
# replace tf references
if provider_type in ['aws']:
argument_text = re.sub(r"(\`%s\_.+\`)" % provider_type, lambda x: "`" + tf_type_to_cfn_type(x.group(1), provider_type), argument_text) # TODO - why only one backtick used?!?
has_required_arguments = False
if "required" in argument_text.lower() and provider_type not in ['aws']:
has_required_arguments = True
provider_readme.write("# {} Provider\n\n".format(readable_provider_name))
if provider_type == "aws":
provider_readme.write("> For the AWS provider, credentials will be inherited from the executor role, meaning you are not required to provide credentials in a configuration secret.\n\n")
provider_readme.write("## Configuration\n\n")
if len(arguments) == 0:
provider_readme.write("No configuration is required for this provider.\n\n")
elif not has_required_arguments:
provider_readme.write("To configure this resource, you may optionally create an AWS Secrets Manager secret with the name **terraform/{}**. The below arguments may be included as the key/value or JSON properties in the secret or metadata object:\n\n".format(provider_type))
provider_readme.write(argument_text + "\n\n")
else:
provider_readme.write("To configure this resource, you must create an AWS Secrets Manager secret with the name **terraform/{}**. The below arguments may be included as the key/value or JSON properties in the secret or metadata object:\n\n".format(provider_type))
provider_readme.write(argument_text + "\n\n")
# iterate provider resources
provider_readme.write("## Supported Resources\n\n")
provider_readme_items = []
files = [f for f in os.listdir(resources_path) if os.path.isfile(os.path.join(resources_path, f))]
for filename in files:
with open(os.path.join(resources_path, filename), 'r') as f:
#print(filename)
resource_file_contents = f.read()
resource_properties = process_resource_docs(provider_type, resource_file_contents, provider_readme_items)
if resource_properties:
ret[resource_properties['resource_type']] = resource_properties
# provider index
for k,v in tfschema['provider_schemas'][provider_type]['resource_schemas'].items():
split_provider_name = k.split("_")
split_provider_name.pop(0)
endnaming = tf_to_cfn_str(k)
if k.startswith(provider_type + "_"):
endnaming = tf_to_cfn_str(k[(len(provider_type)+1):])
cfn_type = "Terraform::" + PROVIDERS_MAP[provider_type][0] + "::" + endnaming
provider_readme_items.append("* [{cfn_type}](../resources/{provider_name}/{type_stub}/docs/README.md)".format(
cfn_type=cfn_type,
provider_name=provider_type,
type_stub=tf_type_to_cfn_type(provider_type + "_" + "_".join(split_provider_name), provider_type).replace("::","-")
))
provider_readme_items = list(set(provider_readme_items))
provider_readme_items.sort()
provider_readme.write("\n".join(provider_readme_items))
return ret
def main():
if sys.argv[1] == "all":
provider_list = PROVIDERS_MAP.keys()
with multiprocessing.Pool(multiprocessing.cpu_count()) as p: # CPU warmer :S
list(p.imap_unordered(process_provider, provider_list))
else:
process_provider(sys.argv[1])
if __name__ == "__main__":
main() | 44.408192 | 288 | 0.506504 |
eda983b601ef06407ed87f117e7446a27fba9b63 | 4,795 | py | Python | lib/googlecloudsdk/core/survey/survey_check.py | kustodian/google-cloud-sdk | b6bae4137d4b58030adb3dcb1271216dfb19f96d | [
"Apache-2.0"
] | null | null | null | lib/googlecloudsdk/core/survey/survey_check.py | kustodian/google-cloud-sdk | b6bae4137d4b58030adb3dcb1271216dfb19f96d | [
"Apache-2.0"
] | 11 | 2020-02-29T02:51:12.000Z | 2022-03-30T23:20:08.000Z | lib/googlecloudsdk/core/survey/survey_check.py | kustodian/google-cloud-sdk | b6bae4137d4b58030adb3dcb1271216dfb19f96d | [
"Apache-2.0"
] | 1 | 2020-07-24T18:47:35.000Z | 2020-07-24T18:47:35.000Z | # -*- coding: utf-8 -*- #
# Copyright 2018 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module manages the survey prompting."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import os
import time
from googlecloudsdk.core import config
from googlecloudsdk.core import log
from googlecloudsdk.core import yaml
from googlecloudsdk.core.util import files as file_utils
SURVEY_PROMPT_INTERVAL = 86400 * 14 # 14 days
SURVEY_PROMPT_INTERVAL_AFTER_ANSWERED = 86400 * 30 * 3 # 90 days
class PromptRecord(object):
"""The survey prompt record.
Attributes:
_cache_file_path: cache file path.
last_answer_survey_time: the time user most recently answered the survey
(epoch time).
last_prompt_time: the time when user is most recently prompted (epoch time).
dirty: bool, True if record in the cache file should be updated. Otherwise,
False.
"""
def __init__(self):
self._cache_file_path = config.Paths().survey_prompting_cache_path
self._last_prompt_time, self._last_answer_survey_time = (
self.ReadPromptRecordFromFile())
self._dirty = False
def ReadPromptRecordFromFile(self):
"""Loads the prompt record from the cache file.
Returns:
Two-value tuple (last_prompt_time, last_answer_survey_time)
"""
if not os.path.isfile(self._cache_file_path):
return None, None
try:
with file_utils.FileReader(self._cache_file_path) as f:
data = yaml.load(f)
return (data.get('last_prompt_time', None),
data.get('last_answer_survey_time', None))
except Exception: # pylint:disable=broad-except
log.debug('Failed to parse survey prompt cache. '
'Using empty cache instead.')
return None, None
def SavePromptRecordToFile(self):
"""Serializes data to the cache file."""
if not self._dirty:
return
with file_utils.FileWriter(self._cache_file_path) as f:
yaml.dump(self._ToDictionary(), stream=f)
self._dirty = False
def _ToDictionary(self):
res = {}
if self._last_prompt_time is not None:
res['last_prompt_time'] = self._last_prompt_time
if self._last_answer_survey_time is not None:
res['last_answer_survey_time'] = self._last_answer_survey_time
return res
@property
def last_answer_survey_time(self):
return self._last_answer_survey_time
@last_answer_survey_time.setter
def last_answer_survey_time(self, value):
self._last_answer_survey_time = value
self._dirty = True
@property
def last_prompt_time(self):
return self._last_prompt_time
@last_prompt_time.setter
def last_prompt_time(self, value):
self._last_prompt_time = value
self._dirty = True
@property
def dirty(self):
return self._dirty
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.SavePromptRecordToFile()
class SurveyPrompter(object):
"""Manages prompting user for survey.
Attributes:
_prompt_record: PromptRecord, the record of the survey prompt history.
_prompt_message: str, the prompting message.
"""
_DEFAULT_SURVEY_PROMPT_MSG = ('To take a quick anonymous survey, run:\n'
' $ gcloud alpha survey')
def __init__(self, msg=_DEFAULT_SURVEY_PROMPT_MSG):
self._prompt_record = PromptRecord()
self._prompt_message = msg
def PrintPromptMsg(self):
log.status.write('\n\n' + self._prompt_message + '\n\n')
def ShouldPrompt(self):
"""Check if the user should be prompted."""
if not (log.out.isatty() and log.err.isatty()):
return False
last_prompt_time = self._prompt_record.last_prompt_time
last_answer_survey_time = self._prompt_record.last_answer_survey_time
now = time.time()
if last_prompt_time and (now - last_prompt_time) < SURVEY_PROMPT_INTERVAL:
return False
if last_answer_survey_time and (now - last_answer_survey_time <
SURVEY_PROMPT_INTERVAL_AFTER_ANSWERED):
return False
return True
def PromptForSurvey(self):
if self.ShouldPrompt():
self.PrintPromptMsg()
with self._prompt_record as pr:
pr.last_prompt_time = time.time()
| 31.546053 | 80 | 0.717205 |
e0469bc76358acfccd5b6aa237235c395e7369d4 | 1,804 | py | Python | emily/emily_modules/yes_no_parser.py | ngmcfarland/emily | 381847feb8b3746bd80100dfe4ad396d8946630b | [
"Apache-2.0"
] | 3 | 2017-03-25T01:08:37.000Z | 2018-06-28T18:06:07.000Z | emily/emily_modules/yes_no_parser.py | ngmcfarland/emily | 381847feb8b3746bd80100dfe4ad396d8946630b | [
"Apache-2.0"
] | null | null | null | emily/emily_modules/yes_no_parser.py | ngmcfarland/emily | 381847feb8b3746bd80100dfe4ad396d8946630b | [
"Apache-2.0"
] | null | null | null | from fuzzywuzzy import fuzz
from . import utils
import string
import json
import sys
import re
import os
curdir = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(curdir, 'data/yesno.json')) as json_yesno_data:
yesno_data = json.load(json_yesno_data)
with open(os.path.join(curdir, 'data/verb.json')) as json_verb_data:
verb_data = json.load(json_verb_data)
def check_input(user_input):
user_input = utils.remove_punctuation(user_input).upper()
user_input += " "
cutoff_ratio = 100
for yesno in sorted(yesno_data['yesno'], key = lambda yesno: len(yesno['alias']), reverse = True):
match_ratio = fuzz.partial_ratio(yesno['alias'] + " ", user_input)
if len(user_input) < 2:
return {'result': None}
if match_ratio >= cutoff_ratio:
if user_input.find(yesno['alias']) <= 1:
user_input = user_input.replace(yesno['alias'] + " ", '', 1)
user_input = user_input.rstrip()
if user_input == "" or user_input == " ":
if yesno['meaning'] == 'YES':
return {'result': 'yes'}
else:
return {'result': 'no'}
for verb in verb_data['verb']:
match_r = fuzz.partial_ratio(verb, user_input)
if match_r >= cutoff_ratio:
if yesno['meaning'] == 'YES':
return {'result': 'yes_prime', 'user_input': user_input}
else:
return {'result': 'no_prime', 'user_input': user_input}
if yesno['meaning'] == 'YES':
return {'result': 'yes', 'user_input': user_input}
else:
return {'result': 'no', 'user_input': user_input}
return {'result': None}
| 39.217391 | 102 | 0.566519 |
d2d3d2061ace346c129d7e77c840d691c21c4ed4 | 2,655 | py | Python | pasee/storage_interface.py | FGuillet/pasee | c4918444ae20971deb3a0581f88ef5366284ad91 | [
"MIT"
] | 1 | 2019-07-22T12:15:26.000Z | 2019-07-22T12:15:26.000Z | pasee/storage_interface.py | FGuillet/pasee | c4918444ae20971deb3a0581f88ef5366284ad91 | [
"MIT"
] | 4 | 2019-11-26T14:22:25.000Z | 2021-07-05T13:49:14.000Z | pasee/storage_interface.py | FGuillet/pasee | c4918444ae20971deb3a0581f88ef5366284ad91 | [
"MIT"
] | 1 | 2021-04-12T13:35:42.000Z | 2021-04-12T13:35:42.000Z | """Abstract class representing Storage backend
"""
from abc import abstractmethod
from typing import AsyncContextManager, List, Any
class StorageBackend(AsyncContextManager): # pylint: disable=inherit-non-class
# (see https://github.com/PyCQA/pylint/issues/2472)
"""Abstract class for representing an Storage backend"""
def __init__(self, options: dict, **kwargs: Any) -> None:
self.options = options
super().__init__(**kwargs) # type: ignore # mypy issue 4335
@abstractmethod
async def get_authorizations_for_user(self, user) -> List[str]:
"""get list the list of group a user identity belongs to"""
@abstractmethod
async def create_group(self, group_name):
"""Add group"""
@abstractmethod
async def get_groups(self, last_element: str = "") -> List[str]:
"""Get groups paginated by group name in alphabetical order
List of groups is returned by page of 20
last_element is the last know element returned in previous page
So passing the last element to this function will retrieve the next page
"""
@abstractmethod
async def get_groups_of_user(self, user: str, last_element: str = "") -> List[str]:
"""Get groups of user"""
@abstractmethod
async def delete_group(self, group: str):
"""Delete group"""
@abstractmethod
async def get_users(self, last_element: str = ""):
"""Get users"""
@abstractmethod
async def get_user(self, username: str = ""):
"""Get user"""
@abstractmethod
async def get_members_of_group(self, group) -> List[str]:
"""Get members of group"""
@abstractmethod
async def group_exists(self, group) -> bool:
"""Assert group exists"""
@abstractmethod
async def user_exists(self, user) -> bool:
"""Assert user exists"""
@abstractmethod
async def create_user(self, username):
"""Create user"""
@abstractmethod
async def delete_user(self, username):
"""Delete user"""
@abstractmethod
async def is_user_in_group(self, user, group) -> bool:
"""Verify that user is in group"""
@abstractmethod
async def add_member_to_group(self, member, group) -> bool:
"""
staff adds member to group
"""
@abstractmethod
async def delete_member_in_group(self, member, group):
"""Delete member in group"""
@abstractmethod
async def delete_members_in_group(self, group):
"""Delete all members of group"""
@abstractmethod
async def ban_user(self, username: str, ban: bool = True):
"""Ban user"""
| 29.5 | 87 | 0.645198 |
06d405fac459e547b41dcc3de802f054a1742eba | 7,940 | py | Python | autofaiss/external/build.py | Evaia/autofaiss | ad164b7be30ddf9ce45ab616d31cb4365fe7f5ab | [
"Apache-2.0"
] | null | null | null | autofaiss/external/build.py | Evaia/autofaiss | ad164b7be30ddf9ce45ab616d31cb4365fe7f5ab | [
"Apache-2.0"
] | null | null | null | autofaiss/external/build.py | Evaia/autofaiss | ad164b7be30ddf9ce45ab616d31cb4365fe7f5ab | [
"Apache-2.0"
] | null | null | null | """ gather functions necessary to build an index """
import re
from typing import Optional, Tuple, Union
import faiss
from autofaiss.external.metadata import IndexMetadata
from autofaiss.datasets.readers.local_iterators import read_embeddings_local, read_shapes_local
from autofaiss.datasets.readers.remote_iterators import read_embeddings_remote, read_filenames
from autofaiss.external.optimize import (
get_optimal_batch_size,
get_optimal_index_keys_v2,
get_optimal_train_size,
set_search_hyperparameters,
)
from autofaiss.indices.index_factory import index_factory
from autofaiss.utils.cast import (
cast_bytes_to_memory_string,
cast_memory_to_bytes,
to_faiss_metric_type,
to_readable_time,
)
from autofaiss.utils.decorators import Timeit
def estimate_memory_required_for_index_creation(
nb_vectors: int, vec_dim: int, index_key: Optional[str] = None, max_index_memory_usage: Optional[str] = None
) -> Tuple[int, str]:
"""
Estimates the RAM necessary to create the index
The value returned is in Bytes
"""
if index_key is None:
if max_index_memory_usage is not None:
index_key = get_optimal_index_keys_v2(nb_vectors, vec_dim, max_index_memory_usage)[0]
else:
raise ValueError("you should give max_index_memory_usage value if no index_key is given")
metadata = IndexMetadata(index_key, nb_vectors, vec_dim)
index_memory = metadata.estimated_index_size_in_bytes()
needed_for_adding = min(index_memory * 0.1, 10 ** 9)
index_overhead = index_memory * 0.1
# Compute the smallest number of vectors required to train the index given
# the maximal memory constraint
nb_vectors_train = get_optimal_train_size(nb_vectors, index_key, "1K", vec_dim)
memory_for_training = 4 * nb_vectors_train * vec_dim + index_memory * 0.5
return (int(index_overhead + max(index_memory + needed_for_adding, memory_for_training))), index_key
def get_estimated_download_time_infos(
embeddings_hdfs_path: str, bandwidth_gbytes_per_sec: float = 1.0, indent: int = 0
) -> Tuple[str, Tuple[int, int]]:
"""
Gives a general approximation of the download time (and preprocessing time) of embeddings
"""
nb_vectors_approx, vec_dim = get_nb_vectors_approx_and_dim_from_hdfs(embeddings_hdfs_path)
size = 4 * nb_vectors_approx * vec_dim
download = 1.1 * size / (bandwidth_gbytes_per_sec * 1024 ** 3) # seconds
preprocess = 1.6 * download # seconds
infos = (
f"-> Download: {to_readable_time(download, rounding=True)}\n"
f"-> Preprocess: {to_readable_time(preprocess, rounding=True)}\n"
f"Total: {to_readable_time(download + preprocess, rounding=True)}"
" (< 1 minute if files are already cached)"
)
tab = "\t" * indent
infos = tab + infos.replace("\n", "\n" + tab)
return infos, (nb_vectors_approx, vec_dim)
def get_estimated_construction_time_infos(nb_vectors: int, vec_dim: int, indent: int = 0) -> str:
"""
Gives a general approximation of the construction time of the index
"""
size = 4 * nb_vectors * vec_dim
train = 1000 # seconds, depends on the number of points for training
add = 450 * size / (150 * 1024 ** 3) # seconds, Linear approx (450s for 150GB in classic conditions)
infos = (
f"-> Train: {to_readable_time(train, rounding=True)}\n"
f"-> Add: {to_readable_time(add, rounding=True)}\n"
f"Total: {to_readable_time(train + add, rounding=True)}"
)
tab = "\t" * indent
infos = tab + infos.replace("\n", "\n" + tab)
return infos
def get_nb_vectors_approx_and_dim_from_hdfs(parquet_embeddings_path: str) -> Tuple[int, int]:
"""legacy function to give the dimensions of a parquet file
Still useful for tests"""
# Get information for one partition
avg_batch_length, vec_dim = next(read_embeddings_remote(parquet_embeddings_path, verbose=False)).shape
# Count the number of files
nb_files = len(read_filenames(parquet_embeddings_path))
nb_vectors_approx = nb_files * avg_batch_length
return nb_vectors_approx, vec_dim
def get_nb_vectors_and_dim(embeddings_path: str) -> Tuple[int, int]:
"""
Function that gives the total shape of the embeddings array
"""
tot_vec = 0
vec_dim = -1
for shape in read_shapes_local(embeddings_path):
batch_length, dim = shape
tot_vec += batch_length
vec_dim = dim
return tot_vec, vec_dim
def build_index(
embeddings_path: str,
index_key: str,
metric_type: Union[str, int],
nb_vectors: int,
current_memory_available: str,
use_gpu: bool = False,
):
"""
Function that returns an index on the numpy arrays stored on disk in the embeddings_path path.
"""
# Instanciate the index
with Timeit(f"-> Instanciate the index {index_key}", indent=2):
# Convert metric_type to faiss type
metric_type = to_faiss_metric_type(metric_type)
# Get information for one partition
_, vec_dim = next(read_shapes_local(embeddings_path))
# Instanciate the index
index = index_factory(vec_dim, index_key, metric_type)
metadata = IndexMetadata(index_key, nb_vectors, vec_dim)
print(
f"The index size will be approximately {cast_bytes_to_memory_string(metadata.estimated_index_size_in_bytes())}"
)
# Extract training vectors
with Timeit("-> Extract training vectors", indent=2):
memory_available_for_training = cast_bytes_to_memory_string(
cast_memory_to_bytes(current_memory_available) - metadata.estimated_index_size_in_bytes() * 0.5
)
# Determine the number of vectors necessary to train the index
train_size = get_optimal_train_size(nb_vectors, index_key, memory_available_for_training, vec_dim)
print(
f"Will use {train_size} vectors to train the index, "
f"{cast_bytes_to_memory_string(train_size*vec_dim*4)} of memory"
)
# Extract training vectors
train_vectors = next(read_embeddings_local(embeddings_path, batch_size=train_size, verbose=True))
# Instanciate the index and train it
# pylint: disable=no-member
if use_gpu:
# if this fails, it means that the GPU version was not comp.
assert (
faiss.StandardGpuResources
), "FAISS was not compiled with GPU support, or loading _swigfaiss_gpu.so failed"
res = faiss.StandardGpuResources()
dev_no = 0
# transfer to GPU (may be partial).
index = faiss.index_cpu_to_gpu(res, dev_no, index)
with Timeit(
f"-> Training the index with {train_vectors.shape[0]} vectors of dim {train_vectors.shape[1]}", indent=2
):
index.train(train_vectors)
del train_vectors
memory_available_for_adding = cast_bytes_to_memory_string(
cast_memory_to_bytes(current_memory_available) - metadata.estimated_index_size_in_bytes()
)
print(
f"The memory available for adding the vectors is {memory_available_for_adding}"
"(total available - used by the index)"
)
print("Will be using at most 1GB of ram for adding")
# Add the vectors to the index.
with Timeit("-> Adding the vectors to the index", indent=2):
batch_size = get_optimal_batch_size(vec_dim, memory_available_for_adding)
print(
f"Using a batch size of {batch_size} (memory overhead {cast_bytes_to_memory_string(batch_size*vec_dim*4)})"
)
for vec_batch in read_embeddings_local(embeddings_path, batch_size=batch_size, verbose=True):
index.add(vec_batch)
# Give standard values for index hyperparameters if possible.
if any(re.findall(r"OPQ\d+_\d+,IVF\d+_HNSW\d+,PQ\d+", index_key)):
set_search_hyperparameters(index, f"nprobe={64},efSearch={128},ht={2048}", use_gpu)
# return the index.
return index
| 35.605381 | 119 | 0.704282 |
266ed3d1e405b5c6721c25b7d331d531b3d4504a | 1,756 | py | Python | fuzzinator/ui/cli/cli_listener.py | elecro/fuzzinator | 2ed30127c364d50af960ad9f5cecbbae5cde2381 | [
"BSD-3-Clause"
] | null | null | null | fuzzinator/ui/cli/cli_listener.py | elecro/fuzzinator | 2ed30127c364d50af960ad9f5cecbbae5cde2381 | [
"BSD-3-Clause"
] | null | null | null | fuzzinator/ui/cli/cli_listener.py | elecro/fuzzinator | 2ed30127c364d50af960ad9f5cecbbae5cde2381 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2016-2018 Renata Hodovan, Akos Kiss.
#
# Licensed under the BSD 3-Clause License
# <LICENSE.rst or https://opensource.org/licenses/BSD-3-Clause>.
# This file may not be copied, modified, or distributed except
# according to those terms.
import logging
from ...listener import EventListener
logger = logging.getLogger(__name__)
class CliListener(EventListener):
# Override the ancestor's constructor to avoid the need for passing an unused config parameter.
def __init__(self):
pass
def new_fuzz_job(self, ident, fuzzer, sut, cost, batch):
logger.debug('[{sut}] New fuzzer jobb added: {fuzzer} [{batch}]'.format(sut=sut, fuzzer=fuzzer, batch=batch))
def new_update_job(self, ident, sut):
logger.debug('[{sut}] New update job added.'.format(sut=sut))
def new_reduce_job(self, ident, sut, cost, issue_id, size):
logger.debug('[{sut}] New reduce job added: {issue} [{size} bytes].'.format(sut=sut, issue=issue_id, size=size))
def new_validate_job(self, ident, sut, issue_id):
logger.debug('[{sut}] New validate job added: {issue}.'.format(sut=sut, issue=issue_id))
def remove_job(self, ident):
logger.debug('[{ident}] Remove job.'.format(ident=ident))
def warning(self, msg):
logger.warning(msg)
def new_issue(self, issue):
logger.info('New issue: {msg}'.format(msg=issue['id']))
def invalid_issue(self, issue):
logger.debug('{ident} issue is invalid.'.format(ident=issue['id'].decode('utf-8', errors='ignore')))
def activate_job(self, ident):
logger.debug('Activate job: {ident}'.format(ident=ident))
def update_issue(self, issue):
logger.info('Issue updated: {id}.'.format(id=issue['id']))
| 35.12 | 120 | 0.674829 |
b07aa09ca9ae20f01a6257b7d73108e5e9675625 | 9,704 | py | Python | docs/conf.py | mvanlonden/graphql-core-next | 4b5d4823ff629421c09a5a303e970d973cee6f5c | [
"MIT"
] | null | null | null | docs/conf.py | mvanlonden/graphql-core-next | 4b5d4823ff629421c09a5a303e970d973cee6f5c | [
"MIT"
] | null | null | null | docs/conf.py | mvanlonden/graphql-core-next | 4b5d4823ff629421c09a5a303e970d973cee6f5c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# GraphQL-core 3 documentation build configuration file, created by
# sphinx-quickstart on Thu Jun 21 16:28:30 2018.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'GraphQL-core 3'
copyright = u'2019, Christoph Zwerschke'
author = u'Christoph Zwerschke'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# version = u'3.0'
# The full version, including alpha/beta/rc tags.
version = release = u'3.0.0a1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'GraphQL-core v3.0.0'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'GraphQL-core-3-doc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'GraphQL-core-3.tex', u'GraphQL-core 3 Documentation',
u'Christoph Zwerschke', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'graphql-core', u'GraphQL-core 3 Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'GraphQL-core', u'GraphQL-core 3 Documentation',
author, 'GraphQL-core 3', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
| 29.053892 | 80 | 0.703112 |
1af59b3f611f6b5018e087886a57d7e9ba0f0c28 | 1,165 | py | Python | test/test_authenticate_api.py | go-vela/sdk-python | ca4425995bee43cb517e78fcd6702fec6f758222 | [
"Apache-2.0"
] | 1 | 2020-11-18T13:31:05.000Z | 2020-11-18T13:31:05.000Z | test/test_authenticate_api.py | go-vela/sdk-python | ca4425995bee43cb517e78fcd6702fec6f758222 | [
"Apache-2.0"
] | 57 | 2020-04-30T19:02:47.000Z | 2022-03-28T07:39:58.000Z | test/test_authenticate_api.py | go-vela/sdk-python | 304b2c8645dc6332fd69398c8c849a3961619c29 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
#
# Copyright (c) 2021 Target Brands, Inc. All rights reserved.
"""
Vela server
API for the Vela server # noqa: E501
OpenAPI spec version: 0.6.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import vela
from vela.vela.authenticate_api import AuthenticateApi # noqa: E501
from vela.rest import ApiException
class TestAuthenticateApi(unittest.TestCase):
"""AuthenticateApi unit test stubs"""
def setUp(self):
self.api = AuthenticateApi() # noqa: E501
def tearDown(self):
pass
def test_get_authenticate(self):
"""Test case for get_authenticate
"""
pass
def test_get_login(self):
"""Test case for get_login
"""
pass
def test_logout(self):
"""Test case for logout
"""
pass
def test_post_authenticate(self):
"""Test case for post_authenticate
"""
pass
def test_post_login(self):
"""Test case for post_login
"""
pass
if __name__ == '__main__':
unittest.main()
| 17.651515 | 68 | 0.619742 |
7adece2ecf5e7cf45303e03594c677cb3ab800df | 681 | py | Python | tests/cmip6_zarr/test_retry.py | jonseddon/cmip6-object-store | cc045d482b2ae1aadef2108267590a0bdf7713d7 | [
"BSD-3-Clause"
] | 3 | 2020-11-13T16:59:39.000Z | 2021-03-16T20:39:47.000Z | tests/cmip6_zarr/test_retry.py | jonseddon/cmip6-object-store | cc045d482b2ae1aadef2108267590a0bdf7713d7 | [
"BSD-3-Clause"
] | 71 | 2020-09-11T10:46:43.000Z | 2022-02-11T20:57:13.000Z | tests/cmip6_zarr/test_retry.py | jonseddon/cmip6-object-store | cc045d482b2ae1aadef2108267590a0bdf7713d7 | [
"BSD-3-Clause"
] | 3 | 2021-02-17T17:10:57.000Z | 2021-05-26T10:40:47.000Z | import datetime
from retry import retry
ZERO_OR_ONE = 1
COUNTER = 0
def now():
return datetime.datetime.now()
def get_next():
global ZERO_OR_ONE
if ZERO_OR_ONE == 0:
ZERO_OR_ONE = 1
else:
ZERO_OR_ONE = 0
return ZERO_OR_ONE
@retry(delay=1)
def test_retry_until_seconds_is_div_10():
assert now().second % 10 == 0
@retry(ZeroDivisionError, tries=2, delay=1)
def test_retry_until_not_zero_division_error():
i = get_next()
result = 20 / i
assert result == 20
@retry(tries=10)
def test_retry_10_times():
global COUNTER
COUNTER += 1
if COUNTER != 9:
raise Exception(f"COUNTER not there yet: {COUNTER}")
| 16.609756 | 60 | 0.66373 |
9ef8b055dff237d044e4e602b23f6b1ff6fce982 | 210 | py | Python | 2017/day-17/part2.py | amochtar/adventofcode | 292e7f00a1e19d2149d00246b0a77fedfcd3bd08 | [
"MIT"
] | 1 | 2019-12-27T22:36:30.000Z | 2019-12-27T22:36:30.000Z | 2017/day-17/part2.py | amochtar/adventofcode | 292e7f00a1e19d2149d00246b0a77fedfcd3bd08 | [
"MIT"
] | null | null | null | 2017/day-17/part2.py | amochtar/adventofcode | 292e7f00a1e19d2149d00246b0a77fedfcd3bd08 | [
"MIT"
] | null | null | null | def solve(inp):
pos = 0
val_after_0 = 0
for i in range(1, 50000001):
pos = (pos+inp) % i + 1
if pos == 1:
val_after_0 = i
print("Part 2:", val_after_0)
solve(386)
| 16.153846 | 33 | 0.5 |
efdcae0f7a94f4fe2e118ca9a0be7a7102c17bc2 | 15,317 | py | Python | plaso/engine/knowledge_base.py | nflexfo/plaso | 5da7aa51c39b593773687fdf20a93ba35fc492b4 | [
"Apache-2.0"
] | 1 | 2020-12-04T10:26:34.000Z | 2020-12-04T10:26:34.000Z | plaso/engine/knowledge_base.py | nflexfo/plaso | 5da7aa51c39b593773687fdf20a93ba35fc492b4 | [
"Apache-2.0"
] | null | null | null | plaso/engine/knowledge_base.py | nflexfo/plaso | 5da7aa51c39b593773687fdf20a93ba35fc492b4 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""The artifact knowledge base object.
The knowledge base is filled by user provided input and the pre-processing
phase. It is intended to provide successive phases, like the parsing and
analysis phases, with essential information like the timezone and codepage
of the source data.
"""
from __future__ import unicode_literals
import codecs
import os
from plaso.containers import artifacts
from plaso.engine import logger
import pytz # pylint: disable=wrong-import-order
class KnowledgeBase(object):
"""The knowledge base."""
_DEFAULT_ACTIVE_SESSION = '00000000000000000000000000000000'
def __init__(self):
"""Initializes a knowledge base."""
super(KnowledgeBase, self).__init__()
self._active_session = self._DEFAULT_ACTIVE_SESSION
self._available_time_zones = {}
self._codepage = 'cp1252'
self._environment_variables = {}
self._hostnames = {}
self._mount_path = None
self._text_prepend = None
self._time_zone = pytz.UTC
self._user_accounts = {}
self._values = {}
@property
def available_time_zones(self):
"""list[TimeZone]: available time zones of the current session."""
return self._available_time_zones.get(self._active_session, {}).values()
@property
def codepage(self):
"""str: codepage of the current session."""
return self.GetValue('codepage', default_value=self._codepage)
@property
def hostname(self):
"""str: hostname of the current session."""
hostname_artifact = self._hostnames.get(self._active_session, None)
if not hostname_artifact:
return ''
return hostname_artifact.name or ''
@property
def timezone(self):
"""datetime.tzinfo: timezone of the current session."""
return self._time_zone
@property
def user_accounts(self):
"""list[UserAccountArtifact]: user accounts of the current session."""
return self._user_accounts.get(self._active_session, {}).values()
@property
def year(self):
"""int: year of the current session."""
return self.GetValue('year', default_value=0)
def AddAvailableTimeZone(self, time_zone, session_identifier=None):
"""Adds an available time zone.
Args:
time_zone (TimeZoneArtifact): time zone artifact.
session_identifier (Optional[str])): session identifier, where
None represents the active session.
Raises:
KeyError: if the time zone already exists.
"""
session_identifier = session_identifier or self._active_session
if session_identifier not in self._available_time_zones:
self._available_time_zones[session_identifier] = {}
available_time_zones = self._available_time_zones[session_identifier]
if time_zone.name in available_time_zones:
raise KeyError('Time zone: {0:s} already exists.'.format(time_zone.name))
available_time_zones[time_zone.name] = time_zone
def AddUserAccount(self, user_account, session_identifier=None):
"""Adds an user account.
Args:
user_account (UserAccountArtifact): user account artifact.
session_identifier (Optional[str])): session identifier, where
None represents the active session.
Raises:
KeyError: if the user account already exists.
"""
session_identifier = session_identifier or self._active_session
if session_identifier not in self._user_accounts:
self._user_accounts[session_identifier] = {}
user_accounts = self._user_accounts[session_identifier]
if user_account.identifier in user_accounts:
raise KeyError('User account: {0:s} already exists.'.format(
user_account.identifier))
user_accounts[user_account.identifier] = user_account
def AddEnvironmentVariable(self, environment_variable):
"""Adds an environment variable.
Args:
environment_variable (EnvironmentVariableArtifact): environment variable
artifact.
Raises:
KeyError: if the environment variable already exists.
"""
name = environment_variable.name.upper()
if name in self._environment_variables:
raise KeyError('Environment variable: {0:s} already exists.'.format(
environment_variable.name))
self._environment_variables[name] = environment_variable
def GetEnvironmentVariable(self, name):
"""Retrieves an environment variable.
Args:
name (str): name of the environment variable.
Returns:
EnvironmentVariableArtifact: environment variable artifact or None
if there was no value set for the given name.
"""
name = name.upper()
return self._environment_variables.get(name, None)
def GetEnvironmentVariables(self):
"""Retrieves the environment variables.
Returns:
list[EnvironmentVariableArtifact]: environment variable artifacts.
"""
return self._environment_variables.values()
def GetHostname(self, session_identifier=None):
"""Retrieves the hostname related to the event.
If the hostname is not stored in the event it is determined based
on the preprocessing information that is stored inside the storage file.
Args:
session_identifier (Optional[str])): session identifier, where
None represents the active session.
Returns:
str: hostname.
"""
session_identifier = session_identifier or self._active_session
hostname_artifact = self._hostnames.get(session_identifier, None)
if not hostname_artifact:
return ''
return hostname_artifact.name or ''
def GetMountPath(self):
"""Retrieves the mount path of the source.
Returns:
str: mount path of the source or None if not set.
"""
return self._mount_path
def GetSourceConfigurationArtifacts(self, session_identifier=None):
"""Retrieves the knowledge base as a source configuration artifacts.
Args:
session_identifier (Optional[str])): session identifier, where
None represents the active session.
Returns:
list[SourceConfigurationArtifact]: source configuration artifacts.
"""
source_configuration = artifacts.SourceConfigurationArtifact()
# TODO: set path_spec
source_configuration.system_configuration = (
self._GetSystemConfigurationArtifact(
session_identifier=session_identifier))
return [source_configuration]
def _GetSystemConfigurationArtifact(self, session_identifier=None):
"""Retrieves the knowledge base as a system configuration artifact.
Args:
session_identifier (Optional[str])): session identifier, where
None represents the active session.
Returns:
SystemConfigurationArtifact: system configuration artifact.
"""
session_identifier = session_identifier or self._active_session
system_configuration = artifacts.SystemConfigurationArtifact()
system_configuration.code_page = self.GetValue(
'codepage', default_value=self._codepage)
system_configuration.hostname = self._hostnames.get(
session_identifier, None)
system_configuration.keyboard_layout = self.GetValue('keyboard_layout')
system_configuration.operating_system = self.GetValue('operating_system')
system_configuration.operating_system_product = self.GetValue(
'operating_system_product')
system_configuration.operating_system_version = self.GetValue(
'operating_system_version')
time_zone = self._time_zone.zone
if isinstance(time_zone, bytes):
time_zone = time_zone.decode('ascii')
system_configuration.time_zone = time_zone
available_time_zones = self._available_time_zones.get(
session_identifier, {})
# In Python 3 dict.values() returns a type dict_values, which will cause
# the JSON serializer to raise a TypeError.
system_configuration.available_time_zones = list(
available_time_zones.values())
user_accounts = self._user_accounts.get(session_identifier, {})
# In Python 3 dict.values() returns a type dict_values, which will cause
# the JSON serializer to raise a TypeError.
system_configuration.user_accounts = list(user_accounts.values())
return system_configuration
def GetTextPrepend(self):
"""Retrieves the text to prepend to the display name.
Returns:
str: text to prepend to the display name or None if not set.
"""
return self._text_prepend
def GetUsernameByIdentifier(
self, user_identifier, session_identifier=None):
"""Retrieves the username based on an user identifier.
Args:
user_identifier (str): user identifier, either a UID or SID.
session_identifier (Optional[str])): session identifier, where
None represents the active session.
Returns:
str: username.
"""
session_identifier = session_identifier or self._active_session
user_accounts = self._user_accounts.get(session_identifier, {})
user_account = user_accounts.get(user_identifier, None)
if not user_account:
return ''
return user_account.username or ''
def GetUsernameForPath(self, path):
"""Retrieves a username for a specific path.
This is determining if a specific path is within a user's directory and
returning the username of the user if so.
Args:
path (str): path.
Returns:
str: username or None if the path does not appear to be within a user's
directory.
"""
path = path.lower()
user_accounts = self._user_accounts.get(self._active_session, {})
for user_account in user_accounts.values():
if not user_account.user_directory:
continue
user_directory = user_account.user_directory.lower()
if path.startswith(user_directory):
return user_account.username
return None
def GetValue(self, identifier, default_value=None):
"""Retrieves a value by identifier.
Args:
identifier (str): case insensitive unique identifier for the value.
default_value (object): default value.
Returns:
object: value or default value if not available.
Raises:
TypeError: if the identifier is not a string type.
"""
if not isinstance(identifier, str):
raise TypeError('Identifier not a string type.')
identifier = identifier.lower()
return self._values.get(identifier, default_value)
def HasUserAccounts(self):
"""Determines if the knowledge base contains user accounts.
Returns:
bool: True if the knowledge base contains user accounts.
"""
return self._user_accounts.get(self._active_session, {}) != {}
def ReadSystemConfigurationArtifact(
self, system_configuration, session_identifier=None):
"""Reads the knowledge base values from a system configuration artifact.
Note that this overwrites existing values in the knowledge base.
Args:
system_configuration (SystemConfigurationArtifact): system configuration
artifact.
session_identifier (Optional[str])): session identifier, where
None represents the active session.
"""
session_identifier = session_identifier or self._active_session
if system_configuration.code_page:
try:
self.SetCodepage(system_configuration.code_page)
except ValueError:
logger.warning(
'Unsupported codepage: {0:s}, defaulting to {1:s}'.format(
system_configuration.code_page, self._codepage))
self._hostnames[session_identifier] = system_configuration.hostname
self.SetValue('keyboard_layout', system_configuration.keyboard_layout)
self.SetValue('operating_system', system_configuration.operating_system)
self.SetValue(
'operating_system_product',
system_configuration.operating_system_product)
self.SetValue(
'operating_system_version',
system_configuration.operating_system_version)
if system_configuration.time_zone:
try:
self.SetTimeZone(system_configuration.time_zone)
except ValueError:
logger.warning(
'Unsupported time zone: {0:s}, defaulting to {1:s}'.format(
system_configuration.time_zone, self.timezone.zone))
self._available_time_zones[session_identifier] = {
time_zone.name: time_zone
for time_zone in system_configuration.available_time_zones}
self._user_accounts[session_identifier] = {
user_account.identifier: user_account
for user_account in system_configuration.user_accounts}
def SetActiveSession(self, session_identifier):
"""Sets the active session.
Args:
session_identifier (str): session identifier where None represents
the default active session.
"""
self._active_session = session_identifier or self._DEFAULT_ACTIVE_SESSION
def SetCodepage(self, codepage):
"""Sets the codepage.
Args:
codepage (str): codepage.
Raises:
ValueError: if the codepage is not supported.
"""
try:
codecs.getencoder(codepage)
self._codepage = codepage
except LookupError:
raise ValueError('Unsupported codepage: {0:s}'.format(codepage))
def SetEnvironmentVariable(self, environment_variable):
"""Sets an environment variable.
Args:
environment_variable (EnvironmentVariableArtifact): environment variable
artifact.
"""
name = environment_variable.name.upper()
self._environment_variables[name] = environment_variable
def SetHostname(self, hostname, session_identifier=None):
"""Sets a hostname.
Args:
hostname (HostnameArtifact): hostname artifact.
session_identifier (Optional[str])): session identifier, where
None represents the active session.
"""
session_identifier = session_identifier or self._active_session
self._hostnames[session_identifier] = hostname
def SetMountPath(self, mount_path):
"""Sets the text to prepend to the display name.
Args:
mount_path (str): mount path of the source or None if the source is
not a mounted onto a directory.
"""
# Remove a trailing path separator from the mount path so the relative
# paths will start with a path separator.
if mount_path and mount_path.endswith(os.sep):
mount_path = mount_path[:-1]
self._mount_path = mount_path
def SetTextPrepend(self, text_prepend):
"""Sets the text to prepend to the display name.
Args:
text_prepend (str): text to prepend to the display name or None if no
text should be prepended.
"""
self._text_prepend = text_prepend
def SetTimeZone(self, time_zone):
"""Sets the time zone.
Args:
time_zone (str): time zone.
Raises:
ValueError: if the timezone is not supported.
"""
try:
self._time_zone = pytz.timezone(time_zone)
except (AttributeError, pytz.UnknownTimeZoneError):
raise ValueError('Unsupported timezone: {0!s}'.format(time_zone))
def SetValue(self, identifier, value):
"""Sets a value by identifier.
Args:
identifier (str): case insensitive unique identifier for the value.
value (object): value.
Raises:
TypeError: if the identifier is not a string type.
"""
if not isinstance(identifier, str):
raise TypeError('Identifier not a string type.')
identifier = identifier.lower()
self._values[identifier] = value
| 31.778008 | 79 | 0.713913 |
53b5a0fcf250efcac42449e630e22f079e1f8e6c | 10,979 | py | Python | rlberry/agents/kernel_based/rs_ucbvi.py | akrouriad/rlberry | dde4e2cbafca05fdef1df07646bb6368059eeadf | [
"MIT"
] | null | null | null | rlberry/agents/kernel_based/rs_ucbvi.py | akrouriad/rlberry | dde4e2cbafca05fdef1df07646bb6368059eeadf | [
"MIT"
] | null | null | null | rlberry/agents/kernel_based/rs_ucbvi.py | akrouriad/rlberry | dde4e2cbafca05fdef1df07646bb6368059eeadf | [
"MIT"
] | null | null | null | import logging
from rlberry.agents.agent import AgentWithSimplePolicy
import numpy as np
import gym.spaces as spaces
from rlberry.agents.dynprog.utils import backward_induction
from rlberry.agents.dynprog.utils import backward_induction_in_place
from rlberry.agents.kernel_based.common import map_to_representative
logger = logging.getLogger(__name__)
class RSUCBVIAgent(AgentWithSimplePolicy):
"""
Value iteration with exploration bonuses for continuous-state environments,
using a online discretization strategy.
The strategy:
- Build (online) a set of representative states
- Estimate transtions an rewards on the finite set of representative states
and actions.
Criterion: finite-horizon with discount factor gamma.
If the discount is not 1, only the Q function at h=0 is used.
The recommended policy after all the episodes is computed without
exploration bonuses.
Parameters
----------
env : Model
Online model with continuous (Box) state space and discrete actions
gamma : double
Discount factor in [0, 1]. If gamma is 1.0, the problem is set to
be finite-horizon.
horizon : int
Horizon of the objective function. If None and gamma<1, set to
1/(1-gamma).
lp_metric: int
The metric on the state space is the one induced by the p-norm,
where p = lp_metric. Default = 2, for the Euclidean metric.
scaling: numpy.ndarray
Must have the same size as state array, used to scale the states
before computing the metric.
If None, set to:
- (env.observation_space.high - env.observation_space.low) if high
and low are bounded
- np.ones(env.observation_space.shape[0]) if high or low are
unbounded
min_dist: double
Minimum distance between two representative states
max_repr: int
Maximum number of representative states.
If None, it is set to (sqrt(d)/min_dist)**d, where d
is the dimension of the state space
bonus_scale_factor : double
Constant by which to multiply the exploration bonus, controls
the level of exploration.
bonus_type : string
Type of exploration bonus. Currently, only "simplified_bernstein"
is implemented. If `reward_free` is true, this parameter is ignored
and the algorithm uses 1/n bonuses.
reward_free : bool
If true, ignores rewards and uses only 1/n bonuses.
References
----------
.. [1] Azar, Mohammad Gheshlaghi, Ian Osband, and Rémi Munos.
"Minimax regret bounds for reinforcement learning."
Proceedings of the 34th ICML, 2017.
.. [2] Strehl, Alexander L., and Michael L. Littman.
"An analysis of model-based interval estimation for Markov decision
processes."
Journal of Computer and System Sciences 74.8 (2008): 1309-1331.
.. [3] Kveton, Branislav, and Georgios Theocharous.
"Kernel-Based Reinforcement Learning on Representative States."
AAAI, 2012.
.. [4] Domingues, O. D., Ménard, P., Pirotta, M., Kaufmann, E., & Valko, M.(2020).
A kernel-based approach to non-stationary reinforcement learning in metric
spaces.
arXiv preprint arXiv:2007.05078.
"""
name = "RSUCBVI"
def __init__(
self,
env,
gamma=0.99,
horizon=100,
lp_metric=2,
scaling=None,
min_dist=0.1,
max_repr=1000,
bonus_scale_factor=1.0,
bonus_type="simplified_bernstein",
reward_free=False,
**kwargs
):
# init base class
AgentWithSimplePolicy.__init__(self, env, **kwargs)
self.gamma = gamma
self.horizon = horizon
self.lp_metric = lp_metric
self.min_dist = min_dist
self.bonus_scale_factor = bonus_scale_factor
self.bonus_type = bonus_type
self.reward_free = reward_free
# check environment
assert isinstance(self.env.observation_space, spaces.Box)
assert isinstance(self.env.action_space, spaces.Discrete)
# other checks
assert gamma >= 0 and gamma <= 1.0
if self.horizon is None:
assert gamma < 1.0, "If no horizon is given, gamma must be smaller than 1."
self.horizon = int(np.ceil(1.0 / (1.0 - gamma)))
# state dimension
self.state_dim = self.env.observation_space.shape[0]
# compute scaling, if it is None
if scaling is None:
# if high and low are bounded
if (self.env.observation_space.high == np.inf).sum() == 0 and (
self.env.observation_space.low == -np.inf
).sum() == 0:
scaling = (
self.env.observation_space.high - self.env.observation_space.low
)
# if high or low are unbounded
else:
scaling = np.ones(self.state_dim)
else:
assert scaling.ndim == 1
assert scaling.shape[0] == self.state_dim
self.scaling = scaling
# maximum value
r_range = self.env.reward_range[1] - self.env.reward_range[0]
if r_range == np.inf or r_range == 0.0:
logger.warning(
"{}: Reward range is zero or infinity. ".format(self.name)
+ "Setting it to 1."
)
r_range = 1.0
if self.gamma == 1.0:
self.v_max = r_range * horizon
else:
self.v_max = (
r_range
* (1.0 - np.power(self.gamma, self.horizon))
/ (1.0 - self.gamma)
)
# number of representative states and number of actions
if max_repr is None:
max_repr = int(
np.ceil(
(1.0 * np.sqrt(self.state_dim) / self.min_dist) ** self.state_dim
)
)
self.max_repr = max_repr
# current number of representative states
self.M = None
self.A = self.env.action_space.n
# declaring variables
self.episode = None # current episode
self.representative_states = None # coordinates of all repr states
self.N_sa = None # visits to (s, a)
self.N_sas = None # visits to (s, a, s')
self.S_sa = None # sum of rewards at (s, a)
self.B_sa = None # bonus at (s, a)
self.Q = None # Q function
self.V = None # V function
self.Q_policy = None # Q function for recommended policy
# initialize
self.reset()
def reset(self, **kwargs):
self.M = 0
self.representative_states = np.zeros((self.max_repr, self.state_dim))
self.N_sa = np.zeros((self.max_repr, self.A))
self.N_sas = np.zeros((self.max_repr, self.A, self.max_repr))
self.S_sa = np.zeros((self.max_repr, self.A))
self.B_sa = self.v_max * np.ones((self.max_repr, self.A))
self.R_hat = np.zeros((self.max_repr, self.A))
self.P_hat = np.zeros((self.max_repr, self.A, self.max_repr))
self.V = np.zeros((self.horizon, self.max_repr))
self.Q = np.zeros((self.horizon, self.max_repr, self.A))
self.Q_policy = None
self.episode = 0
def policy(self, observation):
state = observation
assert self.Q_policy is not None
repr_state = self._map_to_repr(state, False)
return self.Q_policy[0, repr_state, :].argmax()
def fit(self, budget: int, **kwargs):
del kwargs
n_episodes_to_run = budget
count = 0
while count < n_episodes_to_run:
self._run_episode()
count += 1
# compute Q function for the recommended policy
self.Q_policy, _ = backward_induction(
self.R_hat[: self.M, :],
self.P_hat[: self.M, :, : self.M],
self.horizon,
self.gamma,
)
def _map_to_repr(self, state, accept_new_repr=True):
repr_state = map_to_representative(
state,
self.lp_metric,
self.representative_states,
self.M,
self.min_dist,
self.scaling,
accept_new_repr,
)
# check if new representative state
if repr_state == self.M:
self.M += 1
return repr_state
def _update(self, state, action, next_state, reward):
repr_state = self._map_to_repr(state)
repr_next_state = self._map_to_repr(next_state)
self.N_sa[repr_state, action] += 1
self.N_sas[repr_state, action, repr_next_state] += 1
self.S_sa[repr_state, action] += reward
self.R_hat[repr_state, action] = (
self.S_sa[repr_state, action] / self.N_sa[repr_state, action]
)
self.P_hat[repr_state, action, :] = (
self.N_sas[repr_state, action, :] / self.N_sa[repr_state, action]
)
self.B_sa[repr_state, action] = self._compute_bonus(
self.N_sa[repr_state, action]
)
def _compute_bonus(self, n):
# reward-free
if self.reward_free:
bonus = 1.0 / n
return bonus
# not reward-free
if self.bonus_type == "simplified_bernstein":
bonus = self.bonus_scale_factor * np.sqrt(1.0 / n) + self.v_max / n
bonus = min(bonus, self.v_max)
return bonus
else:
raise NotImplementedError(
"Error: bonus type {} not implemented".format(self.bonus_type)
)
def _get_action(self, state, hh=0):
assert self.Q is not None
repr_state = self._map_to_repr(state, False)
return self.Q[hh, repr_state, :].argmax()
def _run_episode(self):
# interact for H steps
episode_rewards = 0
state = self.env.reset()
for hh in range(self.horizon):
action = self._get_action(state, hh)
next_state, reward, done, _ = self.env.step(action)
episode_rewards += reward # used for logging only
if self.reward_free:
reward = 0.0 # set to zero before update if reward_free
self._update(state, action, next_state, reward)
state = next_state
if done:
break
# run backward induction
backward_induction_in_place(
self.Q[:, : self.M, :],
self.V[:, : self.M],
self.R_hat[: self.M, :] + self.B_sa[: self.M, :],
self.P_hat[: self.M, :, : self.M],
self.horizon,
self.gamma,
self.v_max,
)
self.episode += 1
#
if self.writer is not None:
self.writer.add_scalar("episode_rewards", episode_rewards, self.episode)
self.writer.add_scalar("representative states", self.M, self.episode)
# return sum of rewards collected in the episode
return episode_rewards
| 34.309375 | 87 | 0.595318 |
0e82cc244879797c52bb0b164ce89073cbf90895 | 4,463 | py | Python | tests/test_app.py | kushal-kumaran/xlwings | 36ea1ba91ecb1c37d36d87dfa7ed987c06bca142 | [
"BSD-3-Clause"
] | null | null | null | tests/test_app.py | kushal-kumaran/xlwings | 36ea1ba91ecb1c37d36d87dfa7ed987c06bca142 | [
"BSD-3-Clause"
] | null | null | null | tests/test_app.py | kushal-kumaran/xlwings | 36ea1ba91ecb1c37d36d87dfa7ed987c06bca142 | [
"BSD-3-Clause"
] | null | null | null | import os
import sys
import time
import unittest
import xlwings as xw
from xlwings.tests.common import TestBase, this_dir, SPEC
class TestApps(TestBase):
def test_active(self):
self.assertTrue(xw.apps.active in [self.app1, self.app2])
def test_len(self):
n_original = len(xw.apps)
app = xw.App(spec=SPEC)
wb = app.books.add()
self.assertEqual(n_original + 1, len(xw.apps))
app.quit()
def test_count(self):
self.assertEqual(xw.apps.count, len(xw.apps))
def test_iter(self):
for app in xw.apps:
if app == (self.app1 or self.app2):
self.assertEqual(len(app.books), 2)
def test_keys(self):
k = xw.apps.keys()[0]
self.assertEqual(xw.apps[k], xw.apps(k))
class TestApp(TestBase):
def test_activate(self):
if sys.platform.startswith('win') and self.app1.version.major > 14:
# Excel >= 2013 on Win has issues with activating hidden apps correctly
# over two instances
with self.assertRaises(Exception):
self.app1.activate()
else:
self.assertEqual(self.app2, xw.apps.active)
self.app1.activate()
self.assertEqual(self.app1, xw.apps.active)
def test_visible(self):
# Can't successfully test for False on Mac...?
self.app1.visible = True
self.assertTrue(self.app1.visible)
def test_quit(self):
app = xw.App()
n_apps = len(xw.apps)
app.quit()
time.sleep(1) # needed for Mac Excel 2011
self.assertEqual(n_apps - 1, len(xw.apps))
def test_kill(self):
app = xw.App(spec=SPEC)
n_apps = len(xw.apps)
app.kill()
import time
time.sleep(0.5)
self.assertEqual(n_apps - 1, len(xw.apps))
def test_screen_updating(self):
self.app1.screen_updating = False
self.assertEqual(self.app1.screen_updating, False)
self.app1.screen_updating = True
self.assertTrue(self.app1.screen_updating)
def test_display_alerts(self):
self.app1.display_alerts = False
self.assertEqual(self.app1.display_alerts, False)
self.app1.display_alerts = True
self.assertTrue(self.app1.display_alerts)
def test_calculation_calculate(self):
sht = self.wb1.sheets[0]
sht.range('A1').value = 2
sht.range('B1').formula = '=A1 * 2'
self.app1.calculation = 'manual'
sht.range('A1').value = 4
self.assertEqual(sht.range('B1').value, 4)
self.app1.calculation = 'automatic'
self.app1.calculate() # This is needed on Mac Excel 2016 but not on Mac Excel 2011 (changed behaviour)
self.assertEqual(sht.range('B1').value, 8)
sht.range('A1').value = 2
self.assertEqual(sht.range('B1').value, 4)
def test_calculation(self):
self.app1.calculation = 'automatic'
self.assertEqual(self.app1.calculation, 'automatic')
self.app1.calculation = 'manual'
self.assertEqual(self.app1.calculation, 'manual')
self.app1.calculation = 'semiautomatic'
self.assertEqual(self.app1.calculation, 'semiautomatic')
def test_version(self):
self.assertTrue(self.app1.version.major > 0)
def test_wb_across_instances(self):
app1_wb_count = len(self.app1.books)
app2_wb_count = len(self.app2.books)
wb2 = self.app1.books.add()
wb3 = self.app2.books.add()
wb4 = self.app2.books.add()
wb5 = self.app2.books.add()
self.assertEqual(len(self.app1.books), app1_wb_count + 1)
self.assertEqual(len(self.app2.books), app2_wb_count + 3)
wb2.close()
wb3.close()
wb4.close()
wb5.close()
def test_selection(self):
self.assertEqual(self.app1.selection.address, '$A$1')
def test_books(self):
self.assertEqual(len(self.app2.books), 2)
def test_pid(self):
self.assertTrue(self.app1.pid > 0)
def test_len(self):
n_books = len(self.app1.books)
self.app1.books.add()
self.assertEqual(len(self.app1.books), n_books + 1)
def test_macro(self):
wb = self.app1.books.open(os.path.join(this_dir, 'macro book.xlsm'))
test1 = self.app1.macro('Module1.Test1')
res1 = test1('Test1a', 'Test1b')
self.assertEqual(res1, 1)
if __name__ == '__main__':
unittest.main()
| 29.95302 | 111 | 0.615953 |
653650045623ca0b568bc213b10750d42c887ec3 | 141,177 | py | Python | QUANTAXIS/QAUtil/QADate_trade.py | lkaiser/QUANTAXIS | 8703d241f8f11fbab89a75b79495b2f9ff036812 | [
"MIT"
] | null | null | null | QUANTAXIS/QAUtil/QADate_trade.py | lkaiser/QUANTAXIS | 8703d241f8f11fbab89a75b79495b2f9ff036812 | [
"MIT"
] | 1 | 2019-01-23T04:46:52.000Z | 2019-01-23T04:46:52.000Z | QUANTAXIS/QAUtil/QADate_trade.py | lkaiser/QUANTAXIS | 8703d241f8f11fbab89a75b79495b2f9ff036812 | [
"MIT"
] | null | null | null | # coding:utf-8
#
# The MIT License (MIT)
#
# Copyright (c) 2016-2018 yutiansut/QUANTAXIS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import datetime
from QUANTAXIS.QAUtil.QAParameter import MARKET_TYPE
# todo 🛠 只记录非交易日,其余的用程序迭代 生成交易日
trade_date_sse = [
'1990-12-19',
'1990-12-20',
'1990-12-21',
'1990-12-24',
'1990-12-25',
'1990-12-26',
'1990-12-27',
'1990-12-28',
'1990-12-31',
'1991-01-02',
'1991-01-03',
'1991-01-04',
'1991-01-07',
'1991-01-08',
'1991-01-09',
'1991-01-10',
'1991-01-11',
'1991-01-14',
'1991-01-15',
'1991-01-16',
'1991-01-17',
'1991-01-18',
'1991-01-21',
'1991-01-22',
'1991-01-23',
'1991-01-24',
'1991-01-25',
'1991-01-28',
'1991-01-29',
'1991-01-30',
'1991-01-31',
'1991-02-01',
'1991-02-04',
'1991-02-05',
'1991-02-06',
'1991-02-07',
'1991-02-08',
'1991-02-11',
'1991-02-12',
'1991-02-13',
'1991-02-14',
'1991-02-19',
'1991-02-20',
'1991-02-21',
'1991-02-22',
'1991-02-25',
'1991-02-26',
'1991-02-27',
'1991-02-28',
'1991-03-01',
'1991-03-04',
'1991-03-05',
'1991-03-06',
'1991-03-07',
'1991-03-08',
'1991-03-11',
'1991-03-12',
'1991-03-13',
'1991-03-14',
'1991-03-15',
'1991-03-18',
'1991-03-19',
'1991-03-20',
'1991-03-21',
'1991-03-22',
'1991-03-25',
'1991-03-26',
'1991-03-27',
'1991-03-28',
'1991-03-29',
'1991-04-01',
'1991-04-02',
'1991-04-03',
'1991-04-04',
'1991-04-05',
'1991-04-08',
'1991-04-09',
'1991-04-10',
'1991-04-11',
'1991-04-12',
'1991-04-15',
'1991-04-16',
'1991-04-17',
'1991-04-18',
'1991-04-19',
'1991-04-22',
'1991-04-23',
'1991-04-24',
'1991-04-25',
'1991-04-26',
'1991-04-29',
'1991-04-30',
'1991-05-02',
'1991-05-03',
'1991-05-06',
'1991-05-07',
'1991-05-08',
'1991-05-09',
'1991-05-10',
'1991-05-13',
'1991-05-14',
'1991-05-15',
'1991-05-16',
'1991-05-17',
'1991-05-20',
'1991-05-21',
'1991-05-22',
'1991-05-23',
'1991-05-24',
'1991-05-27',
'1991-05-28',
'1991-05-29',
'1991-05-30',
'1991-05-31',
'1991-06-03',
'1991-06-04',
'1991-06-05',
'1991-06-06',
'1991-06-07',
'1991-06-10',
'1991-06-11',
'1991-06-12',
'1991-06-13',
'1991-06-14',
'1991-06-17',
'1991-06-18',
'1991-06-19',
'1991-06-20',
'1991-06-21',
'1991-06-24',
'1991-06-25',
'1991-06-26',
'1991-06-27',
'1991-06-28',
'1991-07-01',
'1991-07-02',
'1991-07-03',
'1991-07-04',
'1991-07-05',
'1991-07-08',
'1991-07-09',
'1991-07-10',
'1991-07-11',
'1991-07-12',
'1991-07-15',
'1991-07-16',
'1991-07-17',
'1991-07-18',
'1991-07-19',
'1991-07-22',
'1991-07-23',
'1991-07-24',
'1991-07-25',
'1991-07-26',
'1991-07-29',
'1991-07-30',
'1991-07-31',
'1991-08-01',
'1991-08-02',
'1991-08-05',
'1991-08-06',
'1991-08-07',
'1991-08-08',
'1991-08-09',
'1991-08-12',
'1991-08-13',
'1991-08-14',
'1991-08-15',
'1991-08-16',
'1991-08-19',
'1991-08-20',
'1991-08-21',
'1991-08-22',
'1991-08-23',
'1991-08-26',
'1991-08-27',
'1991-08-28',
'1991-08-29',
'1991-08-30',
'1991-09-02',
'1991-09-03',
'1991-09-04',
'1991-09-05',
'1991-09-06',
'1991-09-09',
'1991-09-10',
'1991-09-11',
'1991-09-12',
'1991-09-13',
'1991-09-16',
'1991-09-17',
'1991-09-18',
'1991-09-19',
'1991-09-20',
'1991-09-23',
'1991-09-24',
'1991-09-25',
'1991-09-26',
'1991-09-27',
'1991-09-30',
'1991-10-03',
'1991-10-04',
'1991-10-07',
'1991-10-08',
'1991-10-09',
'1991-10-10',
'1991-10-11',
'1991-10-14',
'1991-10-15',
'1991-10-16',
'1991-10-17',
'1991-10-18',
'1991-10-21',
'1991-10-22',
'1991-10-23',
'1991-10-24',
'1991-10-25',
'1991-10-28',
'1991-10-29',
'1991-10-30',
'1991-10-31',
'1991-11-01',
'1991-11-04',
'1991-11-05',
'1991-11-06',
'1991-11-07',
'1991-11-08',
'1991-11-11',
'1991-11-12',
'1991-11-13',
'1991-11-14',
'1991-11-15',
'1991-11-18',
'1991-11-19',
'1991-11-20',
'1991-11-21',
'1991-11-22',
'1991-11-25',
'1991-11-26',
'1991-11-27',
'1991-11-28',
'1991-11-29',
'1991-12-02',
'1991-12-03',
'1991-12-04',
'1991-12-05',
'1991-12-06',
'1991-12-09',
'1991-12-10',
'1991-12-11',
'1991-12-12',
'1991-12-13',
'1991-12-16',
'1991-12-17',
'1991-12-18',
'1991-12-19',
'1991-12-20',
'1991-12-23',
'1991-12-24',
'1991-12-25',
'1991-12-26',
'1991-12-27',
'1991-12-30',
'1991-12-31',
'1992-01-02',
'1992-01-03',
'1992-01-06',
'1992-01-07',
'1992-01-08',
'1992-01-09',
'1992-01-10',
'1992-01-13',
'1992-01-14',
'1992-01-15',
'1992-01-16',
'1992-01-17',
'1992-01-20',
'1992-01-21',
'1992-01-22',
'1992-01-23',
'1992-01-24',
'1992-01-27',
'1992-01-28',
'1992-01-29',
'1992-01-30',
'1992-01-31',
'1992-02-03',
'1992-02-07',
'1992-02-10',
'1992-02-11',
'1992-02-12',
'1992-02-13',
'1992-02-14',
'1992-02-17',
'1992-02-18',
'1992-02-19',
'1992-02-20',
'1992-02-21',
'1992-02-24',
'1992-02-25',
'1992-02-26',
'1992-02-27',
'1992-02-28',
'1992-03-02',
'1992-03-03',
'1992-03-04',
'1992-03-05',
'1992-03-06',
'1992-03-09',
'1992-03-10',
'1992-03-11',
'1992-03-12',
'1992-03-13',
'1992-03-16',
'1992-03-17',
'1992-03-18',
'1992-03-19',
'1992-03-20',
'1992-03-23',
'1992-03-24',
'1992-03-25',
'1992-03-26',
'1992-03-27',
'1992-03-30',
'1992-03-31',
'1992-04-01',
'1992-04-02',
'1992-04-03',
'1992-04-06',
'1992-04-07',
'1992-04-08',
'1992-04-09',
'1992-04-10',
'1992-04-13',
'1992-04-14',
'1992-04-15',
'1992-04-16',
'1992-04-17',
'1992-04-20',
'1992-04-21',
'1992-04-22',
'1992-04-23',
'1992-04-24',
'1992-04-27',
'1992-04-28',
'1992-04-29',
'1992-04-30',
'1992-05-04',
'1992-05-05',
'1992-05-06',
'1992-05-07',
'1992-05-08',
'1992-05-11',
'1992-05-12',
'1992-05-13',
'1992-05-14',
'1992-05-15',
'1992-05-18',
'1992-05-19',
'1992-05-20',
'1992-05-21',
'1992-05-22',
'1992-05-25',
'1992-05-26',
'1992-05-27',
'1992-05-28',
'1992-05-29',
'1992-06-01',
'1992-06-02',
'1992-06-03',
'1992-06-04',
'1992-06-05',
'1992-06-08',
'1992-06-09',
'1992-06-10',
'1992-06-11',
'1992-06-12',
'1992-06-15',
'1992-06-16',
'1992-06-17',
'1992-06-18',
'1992-06-19',
'1992-06-22',
'1992-06-23',
'1992-06-24',
'1992-06-25',
'1992-06-26',
'1992-06-29',
'1992-06-30',
'1992-07-01',
'1992-07-02',
'1992-07-03',
'1992-07-06',
'1992-07-07',
'1992-07-08',
'1992-07-09',
'1992-07-10',
'1992-07-13',
'1992-07-14',
'1992-07-15',
'1992-07-16',
'1992-07-17',
'1992-07-20',
'1992-07-21',
'1992-07-22',
'1992-07-23',
'1992-07-24',
'1992-07-27',
'1992-07-28',
'1992-07-29',
'1992-07-30',
'1992-07-31',
'1992-08-03',
'1992-08-04',
'1992-08-05',
'1992-08-06',
'1992-08-07',
'1992-08-10',
'1992-08-11',
'1992-08-12',
'1992-08-13',
'1992-08-14',
'1992-08-17',
'1992-08-18',
'1992-08-19',
'1992-08-20',
'1992-08-21',
'1992-08-24',
'1992-08-25',
'1992-08-26',
'1992-08-27',
'1992-08-28',
'1992-08-31',
'1992-09-01',
'1992-09-02',
'1992-09-03',
'1992-09-04',
'1992-09-07',
'1992-09-08',
'1992-09-09',
'1992-09-10',
'1992-09-11',
'1992-09-14',
'1992-09-15',
'1992-09-16',
'1992-09-17',
'1992-09-18',
'1992-09-21',
'1992-09-22',
'1992-09-23',
'1992-09-24',
'1992-09-25',
'1992-09-28',
'1992-09-29',
'1992-09-30',
'1992-10-05',
'1992-10-06',
'1992-10-07',
'1992-10-08',
'1992-10-09',
'1992-10-12',
'1992-10-13',
'1992-10-14',
'1992-10-15',
'1992-10-16',
'1992-10-19',
'1992-10-20',
'1992-10-21',
'1992-10-22',
'1992-10-23',
'1992-10-26',
'1992-10-27',
'1992-10-28',
'1992-10-29',
'1992-10-30',
'1992-11-02',
'1992-11-03',
'1992-11-04',
'1992-11-05',
'1992-11-06',
'1992-11-09',
'1992-11-10',
'1992-11-11',
'1992-11-12',
'1992-11-13',
'1992-11-16',
'1992-11-17',
'1992-11-18',
'1992-11-19',
'1992-11-20',
'1992-11-23',
'1992-11-24',
'1992-11-25',
'1992-11-26',
'1992-11-27',
'1992-11-30',
'1992-12-01',
'1992-12-02',
'1992-12-03',
'1992-12-04',
'1992-12-07',
'1992-12-08',
'1992-12-09',
'1992-12-10',
'1992-12-11',
'1992-12-14',
'1992-12-15',
'1992-12-16',
'1992-12-17',
'1992-12-18',
'1992-12-21',
'1992-12-22',
'1992-12-23',
'1992-12-24',
'1992-12-25',
'1992-12-28',
'1992-12-29',
'1992-12-30',
'1992-12-31',
'1993-01-04',
'1993-01-05',
'1993-01-06',
'1993-01-07',
'1993-01-08',
'1993-01-11',
'1993-01-12',
'1993-01-13',
'1993-01-14',
'1993-01-15',
'1993-01-18',
'1993-01-19',
'1993-01-20',
'1993-01-21',
'1993-01-22',
'1993-01-27',
'1993-01-28',
'1993-01-29',
'1993-02-01',
'1993-02-02',
'1993-02-03',
'1993-02-04',
'1993-02-05',
'1993-02-08',
'1993-02-09',
'1993-02-10',
'1993-02-11',
'1993-02-12',
'1993-02-15',
'1993-02-16',
'1993-02-17',
'1993-02-18',
'1993-02-19',
'1993-02-22',
'1993-02-23',
'1993-02-24',
'1993-02-25',
'1993-02-26',
'1993-03-01',
'1993-03-02',
'1993-03-03',
'1993-03-04',
'1993-03-05',
'1993-03-08',
'1993-03-09',
'1993-03-10',
'1993-03-11',
'1993-03-12',
'1993-03-15',
'1993-03-16',
'1993-03-17',
'1993-03-18',
'1993-03-19',
'1993-03-22',
'1993-03-23',
'1993-03-24',
'1993-03-25',
'1993-03-26',
'1993-03-29',
'1993-03-30',
'1993-03-31',
'1993-04-01',
'1993-04-02',
'1993-04-05',
'1993-04-06',
'1993-04-07',
'1993-04-08',
'1993-04-09',
'1993-04-12',
'1993-04-13',
'1993-04-14',
'1993-04-15',
'1993-04-16',
'1993-04-19',
'1993-04-20',
'1993-04-21',
'1993-04-22',
'1993-04-23',
'1993-04-26',
'1993-04-27',
'1993-04-28',
'1993-04-29',
'1993-04-30',
'1993-05-03',
'1993-05-04',
'1993-05-05',
'1993-05-06',
'1993-05-07',
'1993-05-10',
'1993-05-11',
'1993-05-12',
'1993-05-13',
'1993-05-14',
'1993-05-17',
'1993-05-18',
'1993-05-19',
'1993-05-20',
'1993-05-21',
'1993-05-24',
'1993-05-25',
'1993-05-26',
'1993-05-27',
'1993-05-28',
'1993-05-31',
'1993-06-01',
'1993-06-02',
'1993-06-03',
'1993-06-04',
'1993-06-07',
'1993-06-08',
'1993-06-09',
'1993-06-10',
'1993-06-11',
'1993-06-14',
'1993-06-15',
'1993-06-16',
'1993-06-17',
'1993-06-18',
'1993-06-21',
'1993-06-22',
'1993-06-23',
'1993-06-24',
'1993-06-25',
'1993-06-28',
'1993-06-29',
'1993-06-30',
'1993-07-01',
'1993-07-02',
'1993-07-05',
'1993-07-06',
'1993-07-07',
'1993-07-08',
'1993-07-09',
'1993-07-12',
'1993-07-13',
'1993-07-14',
'1993-07-15',
'1993-07-16',
'1993-07-19',
'1993-07-20',
'1993-07-21',
'1993-07-22',
'1993-07-23',
'1993-07-26',
'1993-07-27',
'1993-07-28',
'1993-07-29',
'1993-07-30',
'1993-08-02',
'1993-08-03',
'1993-08-04',
'1993-08-05',
'1993-08-06',
'1993-08-09',
'1993-08-10',
'1993-08-11',
'1993-08-12',
'1993-08-13',
'1993-08-16',
'1993-08-17',
'1993-08-18',
'1993-08-19',
'1993-08-20',
'1993-08-23',
'1993-08-24',
'1993-08-25',
'1993-08-26',
'1993-08-27',
'1993-08-30',
'1993-08-31',
'1993-09-01',
'1993-09-02',
'1993-09-03',
'1993-09-06',
'1993-09-07',
'1993-09-08',
'1993-09-09',
'1993-09-10',
'1993-09-13',
'1993-09-14',
'1993-09-15',
'1993-09-16',
'1993-09-17',
'1993-09-20',
'1993-09-21',
'1993-09-22',
'1993-09-23',
'1993-09-24',
'1993-09-27',
'1993-09-28',
'1993-09-29',
'1993-09-30',
'1993-10-04',
'1993-10-05',
'1993-10-06',
'1993-10-07',
'1993-10-08',
'1993-10-11',
'1993-10-12',
'1993-10-13',
'1993-10-14',
'1993-10-15',
'1993-10-18',
'1993-10-19',
'1993-10-20',
'1993-10-21',
'1993-10-22',
'1993-10-25',
'1993-10-26',
'1993-10-27',
'1993-10-28',
'1993-10-29',
'1993-11-01',
'1993-11-02',
'1993-11-03',
'1993-11-04',
'1993-11-05',
'1993-11-08',
'1993-11-09',
'1993-11-10',
'1993-11-11',
'1993-11-12',
'1993-11-15',
'1993-11-16',
'1993-11-17',
'1993-11-18',
'1993-11-19',
'1993-11-22',
'1993-11-23',
'1993-11-24',
'1993-11-25',
'1993-11-26',
'1993-11-29',
'1993-11-30',
'1993-12-01',
'1993-12-02',
'1993-12-03',
'1993-12-06',
'1993-12-07',
'1993-12-08',
'1993-12-09',
'1993-12-10',
'1993-12-13',
'1993-12-14',
'1993-12-15',
'1993-12-16',
'1993-12-17',
'1993-12-20',
'1993-12-21',
'1993-12-22',
'1993-12-23',
'1993-12-24',
'1993-12-27',
'1993-12-28',
'1993-12-29',
'1993-12-30',
'1993-12-31',
'1994-01-03',
'1994-01-04',
'1994-01-05',
'1994-01-06',
'1994-01-07',
'1994-01-10',
'1994-01-11',
'1994-01-12',
'1994-01-13',
'1994-01-14',
'1994-01-17',
'1994-01-18',
'1994-01-19',
'1994-01-20',
'1994-01-21',
'1994-01-24',
'1994-01-25',
'1994-01-26',
'1994-01-27',
'1994-01-28',
'1994-01-31',
'1994-02-01',
'1994-02-02',
'1994-02-03',
'1994-02-04',
'1994-02-14',
'1994-02-15',
'1994-02-16',
'1994-02-17',
'1994-02-18',
'1994-02-21',
'1994-02-22',
'1994-02-23',
'1994-02-24',
'1994-02-25',
'1994-02-28',
'1994-03-01',
'1994-03-02',
'1994-03-03',
'1994-03-04',
'1994-03-07',
'1994-03-08',
'1994-03-09',
'1994-03-10',
'1994-03-11',
'1994-03-14',
'1994-03-15',
'1994-03-16',
'1994-03-17',
'1994-03-18',
'1994-03-21',
'1994-03-22',
'1994-03-23',
'1994-03-24',
'1994-03-25',
'1994-03-28',
'1994-03-29',
'1994-03-30',
'1994-03-31',
'1994-04-01',
'1994-04-04',
'1994-04-05',
'1994-04-06',
'1994-04-07',
'1994-04-08',
'1994-04-11',
'1994-04-12',
'1994-04-13',
'1994-04-14',
'1994-04-15',
'1994-04-18',
'1994-04-19',
'1994-04-20',
'1994-04-21',
'1994-04-22',
'1994-04-25',
'1994-04-26',
'1994-04-27',
'1994-04-28',
'1994-04-29',
'1994-05-03',
'1994-05-04',
'1994-05-05',
'1994-05-06',
'1994-05-09',
'1994-05-10',
'1994-05-11',
'1994-05-12',
'1994-05-13',
'1994-05-16',
'1994-05-17',
'1994-05-18',
'1994-05-19',
'1994-05-20',
'1994-05-23',
'1994-05-24',
'1994-05-25',
'1994-05-26',
'1994-05-27',
'1994-05-30',
'1994-05-31',
'1994-06-01',
'1994-06-02',
'1994-06-03',
'1994-06-06',
'1994-06-07',
'1994-06-08',
'1994-06-09',
'1994-06-10',
'1994-06-13',
'1994-06-14',
'1994-06-15',
'1994-06-16',
'1994-06-17',
'1994-06-20',
'1994-06-21',
'1994-06-22',
'1994-06-23',
'1994-06-24',
'1994-06-27',
'1994-06-28',
'1994-06-29',
'1994-06-30',
'1994-07-01',
'1994-07-04',
'1994-07-05',
'1994-07-06',
'1994-07-07',
'1994-07-08',
'1994-07-11',
'1994-07-12',
'1994-07-13',
'1994-07-14',
'1994-07-15',
'1994-07-18',
'1994-07-19',
'1994-07-20',
'1994-07-21',
'1994-07-22',
'1994-07-25',
'1994-07-26',
'1994-07-27',
'1994-07-28',
'1994-07-29',
'1994-08-01',
'1994-08-02',
'1994-08-03',
'1994-08-04',
'1994-08-05',
'1994-08-08',
'1994-08-09',
'1994-08-10',
'1994-08-11',
'1994-08-12',
'1994-08-15',
'1994-08-16',
'1994-08-17',
'1994-08-18',
'1994-08-19',
'1994-08-22',
'1994-08-23',
'1994-08-24',
'1994-08-25',
'1994-08-26',
'1994-08-29',
'1994-08-30',
'1994-08-31',
'1994-09-01',
'1994-09-02',
'1994-09-05',
'1994-09-06',
'1994-09-07',
'1994-09-08',
'1994-09-09',
'1994-09-12',
'1994-09-13',
'1994-09-14',
'1994-09-15',
'1994-09-16',
'1994-09-19',
'1994-09-20',
'1994-09-21',
'1994-09-22',
'1994-09-23',
'1994-09-26',
'1994-09-27',
'1994-09-28',
'1994-09-29',
'1994-09-30',
'1994-10-05',
'1994-10-06',
'1994-10-07',
'1994-10-10',
'1994-10-11',
'1994-10-12',
'1994-10-13',
'1994-10-14',
'1994-10-17',
'1994-10-18',
'1994-10-19',
'1994-10-20',
'1994-10-21',
'1994-10-24',
'1994-10-25',
'1994-10-26',
'1994-10-27',
'1994-10-28',
'1994-10-31',
'1994-11-01',
'1994-11-02',
'1994-11-03',
'1994-11-04',
'1994-11-07',
'1994-11-08',
'1994-11-09',
'1994-11-10',
'1994-11-11',
'1994-11-14',
'1994-11-15',
'1994-11-16',
'1994-11-17',
'1994-11-18',
'1994-11-21',
'1994-11-22',
'1994-11-23',
'1994-11-24',
'1994-11-25',
'1994-11-28',
'1994-11-29',
'1994-11-30',
'1994-12-01',
'1994-12-02',
'1994-12-05',
'1994-12-06',
'1994-12-07',
'1994-12-08',
'1994-12-09',
'1994-12-12',
'1994-12-13',
'1994-12-14',
'1994-12-15',
'1994-12-16',
'1994-12-19',
'1994-12-20',
'1994-12-21',
'1994-12-22',
'1994-12-23',
'1994-12-26',
'1994-12-27',
'1994-12-28',
'1994-12-29',
'1994-12-30',
'1995-01-03',
'1995-01-04',
'1995-01-05',
'1995-01-06',
'1995-01-09',
'1995-01-10',
'1995-01-11',
'1995-01-12',
'1995-01-13',
'1995-01-16',
'1995-01-17',
'1995-01-18',
'1995-01-19',
'1995-01-20',
'1995-01-23',
'1995-01-24',
'1995-01-25',
'1995-01-26',
'1995-01-27',
'1995-02-06',
'1995-02-07',
'1995-02-08',
'1995-02-09',
'1995-02-10',
'1995-02-13',
'1995-02-14',
'1995-02-15',
'1995-02-16',
'1995-02-17',
'1995-02-20',
'1995-02-21',
'1995-02-22',
'1995-02-23',
'1995-02-24',
'1995-02-27',
'1995-02-28',
'1995-03-01',
'1995-03-02',
'1995-03-03',
'1995-03-06',
'1995-03-07',
'1995-03-08',
'1995-03-09',
'1995-03-10',
'1995-03-13',
'1995-03-14',
'1995-03-15',
'1995-03-16',
'1995-03-17',
'1995-03-20',
'1995-03-21',
'1995-03-22',
'1995-03-23',
'1995-03-24',
'1995-03-27',
'1995-03-28',
'1995-03-29',
'1995-03-30',
'1995-03-31',
'1995-04-03',
'1995-04-04',
'1995-04-05',
'1995-04-06',
'1995-04-07',
'1995-04-10',
'1995-04-11',
'1995-04-12',
'1995-04-13',
'1995-04-14',
'1995-04-17',
'1995-04-18',
'1995-04-19',
'1995-04-20',
'1995-04-21',
'1995-04-24',
'1995-04-25',
'1995-04-26',
'1995-04-27',
'1995-04-28',
'1995-05-02',
'1995-05-03',
'1995-05-04',
'1995-05-05',
'1995-05-08',
'1995-05-09',
'1995-05-10',
'1995-05-11',
'1995-05-12',
'1995-05-15',
'1995-05-16',
'1995-05-17',
'1995-05-18',
'1995-05-19',
'1995-05-22',
'1995-05-23',
'1995-05-24',
'1995-05-25',
'1995-05-26',
'1995-05-29',
'1995-05-30',
'1995-05-31',
'1995-06-01',
'1995-06-02',
'1995-06-05',
'1995-06-06',
'1995-06-07',
'1995-06-08',
'1995-06-09',
'1995-06-12',
'1995-06-13',
'1995-06-14',
'1995-06-15',
'1995-06-16',
'1995-06-19',
'1995-06-20',
'1995-06-21',
'1995-06-22',
'1995-06-23',
'1995-06-26',
'1995-06-27',
'1995-06-28',
'1995-06-29',
'1995-06-30',
'1995-07-03',
'1995-07-04',
'1995-07-05',
'1995-07-06',
'1995-07-07',
'1995-07-10',
'1995-07-11',
'1995-07-12',
'1995-07-13',
'1995-07-14',
'1995-07-17',
'1995-07-18',
'1995-07-19',
'1995-07-20',
'1995-07-21',
'1995-07-24',
'1995-07-25',
'1995-07-26',
'1995-07-27',
'1995-07-28',
'1995-07-31',
'1995-08-01',
'1995-08-02',
'1995-08-03',
'1995-08-04',
'1995-08-07',
'1995-08-08',
'1995-08-09',
'1995-08-10',
'1995-08-11',
'1995-08-14',
'1995-08-15',
'1995-08-16',
'1995-08-17',
'1995-08-18',
'1995-08-21',
'1995-08-22',
'1995-08-23',
'1995-08-24',
'1995-08-25',
'1995-08-28',
'1995-08-29',
'1995-08-30',
'1995-08-31',
'1995-09-01',
'1995-09-04',
'1995-09-05',
'1995-09-06',
'1995-09-07',
'1995-09-08',
'1995-09-11',
'1995-09-12',
'1995-09-13',
'1995-09-14',
'1995-09-15',
'1995-09-18',
'1995-09-19',
'1995-09-20',
'1995-09-21',
'1995-09-22',
'1995-09-25',
'1995-09-26',
'1995-09-27',
'1995-09-28',
'1995-09-29',
'1995-10-04',
'1995-10-05',
'1995-10-06',
'1995-10-09',
'1995-10-10',
'1995-10-11',
'1995-10-12',
'1995-10-13',
'1995-10-16',
'1995-10-17',
'1995-10-18',
'1995-10-19',
'1995-10-20',
'1995-10-23',
'1995-10-24',
'1995-10-25',
'1995-10-26',
'1995-10-27',
'1995-10-30',
'1995-10-31',
'1995-11-01',
'1995-11-02',
'1995-11-03',
'1995-11-06',
'1995-11-07',
'1995-11-08',
'1995-11-09',
'1995-11-10',
'1995-11-13',
'1995-11-14',
'1995-11-15',
'1995-11-16',
'1995-11-17',
'1995-11-20',
'1995-11-21',
'1995-11-22',
'1995-11-23',
'1995-11-24',
'1995-11-27',
'1995-11-28',
'1995-11-29',
'1995-11-30',
'1995-12-01',
'1995-12-04',
'1995-12-05',
'1995-12-06',
'1995-12-07',
'1995-12-08',
'1995-12-11',
'1995-12-12',
'1995-12-13',
'1995-12-14',
'1995-12-15',
'1995-12-18',
'1995-12-19',
'1995-12-20',
'1995-12-21',
'1995-12-22',
'1995-12-25',
'1995-12-26',
'1995-12-27',
'1995-12-28',
'1995-12-29',
'1996-01-02',
'1996-01-03',
'1996-01-04',
'1996-01-05',
'1996-01-08',
'1996-01-09',
'1996-01-10',
'1996-01-11',
'1996-01-12',
'1996-01-15',
'1996-01-16',
'1996-01-17',
'1996-01-18',
'1996-01-19',
'1996-01-22',
'1996-01-23',
'1996-01-24',
'1996-01-25',
'1996-01-26',
'1996-01-29',
'1996-01-30',
'1996-01-31',
'1996-02-01',
'1996-02-02',
'1996-02-05',
'1996-02-06',
'1996-02-07',
'1996-02-08',
'1996-02-09',
'1996-02-12',
'1996-02-13',
'1996-02-14',
'1996-02-15',
'1996-02-16',
'1996-03-04',
'1996-03-05',
'1996-03-06',
'1996-03-07',
'1996-03-08',
'1996-03-11',
'1996-03-12',
'1996-03-13',
'1996-03-14',
'1996-03-15',
'1996-03-18',
'1996-03-19',
'1996-03-20',
'1996-03-21',
'1996-03-22',
'1996-03-25',
'1996-03-26',
'1996-03-27',
'1996-03-28',
'1996-03-29',
'1996-04-01',
'1996-04-02',
'1996-04-03',
'1996-04-04',
'1996-04-05',
'1996-04-08',
'1996-04-09',
'1996-04-10',
'1996-04-11',
'1996-04-12',
'1996-04-15',
'1996-04-16',
'1996-04-17',
'1996-04-18',
'1996-04-19',
'1996-04-22',
'1996-04-23',
'1996-04-24',
'1996-04-25',
'1996-04-26',
'1996-04-29',
'1996-04-30',
'1996-05-02',
'1996-05-03',
'1996-05-06',
'1996-05-07',
'1996-05-08',
'1996-05-09',
'1996-05-10',
'1996-05-13',
'1996-05-14',
'1996-05-15',
'1996-05-16',
'1996-05-17',
'1996-05-20',
'1996-05-21',
'1996-05-22',
'1996-05-23',
'1996-05-24',
'1996-05-27',
'1996-05-28',
'1996-05-29',
'1996-05-30',
'1996-05-31',
'1996-06-03',
'1996-06-04',
'1996-06-05',
'1996-06-06',
'1996-06-07',
'1996-06-10',
'1996-06-11',
'1996-06-12',
'1996-06-13',
'1996-06-14',
'1996-06-17',
'1996-06-18',
'1996-06-19',
'1996-06-20',
'1996-06-21',
'1996-06-24',
'1996-06-25',
'1996-06-26',
'1996-06-27',
'1996-06-28',
'1996-07-01',
'1996-07-02',
'1996-07-03',
'1996-07-04',
'1996-07-05',
'1996-07-08',
'1996-07-09',
'1996-07-10',
'1996-07-11',
'1996-07-12',
'1996-07-15',
'1996-07-16',
'1996-07-17',
'1996-07-18',
'1996-07-19',
'1996-07-22',
'1996-07-23',
'1996-07-24',
'1996-07-25',
'1996-07-26',
'1996-07-29',
'1996-07-30',
'1996-07-31',
'1996-08-01',
'1996-08-02',
'1996-08-05',
'1996-08-06',
'1996-08-07',
'1996-08-08',
'1996-08-09',
'1996-08-12',
'1996-08-13',
'1996-08-14',
'1996-08-15',
'1996-08-16',
'1996-08-19',
'1996-08-20',
'1996-08-21',
'1996-08-22',
'1996-08-23',
'1996-08-26',
'1996-08-27',
'1996-08-28',
'1996-08-29',
'1996-08-30',
'1996-09-02',
'1996-09-03',
'1996-09-04',
'1996-09-05',
'1996-09-06',
'1996-09-09',
'1996-09-10',
'1996-09-11',
'1996-09-12',
'1996-09-13',
'1996-09-16',
'1996-09-17',
'1996-09-18',
'1996-09-19',
'1996-09-20',
'1996-09-23',
'1996-09-24',
'1996-09-25',
'1996-09-26',
'1996-09-27',
'1996-10-03',
'1996-10-04',
'1996-10-07',
'1996-10-08',
'1996-10-09',
'1996-10-10',
'1996-10-11',
'1996-10-14',
'1996-10-15',
'1996-10-16',
'1996-10-17',
'1996-10-18',
'1996-10-21',
'1996-10-22',
'1996-10-23',
'1996-10-24',
'1996-10-25',
'1996-10-28',
'1996-10-29',
'1996-10-30',
'1996-10-31',
'1996-11-01',
'1996-11-04',
'1996-11-05',
'1996-11-06',
'1996-11-07',
'1996-11-08',
'1996-11-11',
'1996-11-12',
'1996-11-13',
'1996-11-14',
'1996-11-15',
'1996-11-18',
'1996-11-19',
'1996-11-20',
'1996-11-21',
'1996-11-22',
'1996-11-25',
'1996-11-26',
'1996-11-27',
'1996-11-28',
'1996-11-29',
'1996-12-02',
'1996-12-03',
'1996-12-04',
'1996-12-05',
'1996-12-06',
'1996-12-09',
'1996-12-10',
'1996-12-11',
'1996-12-12',
'1996-12-13',
'1996-12-16',
'1996-12-17',
'1996-12-18',
'1996-12-19',
'1996-12-20',
'1996-12-23',
'1996-12-24',
'1996-12-25',
'1996-12-26',
'1996-12-27',
'1996-12-30',
'1996-12-31',
'1997-01-02',
'1997-01-03',
'1997-01-06',
'1997-01-07',
'1997-01-08',
'1997-01-09',
'1997-01-10',
'1997-01-13',
'1997-01-14',
'1997-01-15',
'1997-01-16',
'1997-01-17',
'1997-01-20',
'1997-01-21',
'1997-01-22',
'1997-01-23',
'1997-01-24',
'1997-01-27',
'1997-01-28',
'1997-01-29',
'1997-01-30',
'1997-01-31',
'1997-02-17',
'1997-02-18',
'1997-02-19',
'1997-02-20',
'1997-02-21',
'1997-02-24',
'1997-02-25',
'1997-02-26',
'1997-02-27',
'1997-02-28',
'1997-03-03',
'1997-03-04',
'1997-03-05',
'1997-03-06',
'1997-03-07',
'1997-03-10',
'1997-03-11',
'1997-03-12',
'1997-03-13',
'1997-03-14',
'1997-03-17',
'1997-03-18',
'1997-03-19',
'1997-03-20',
'1997-03-21',
'1997-03-24',
'1997-03-25',
'1997-03-26',
'1997-03-27',
'1997-03-28',
'1997-03-31',
'1997-04-01',
'1997-04-02',
'1997-04-03',
'1997-04-04',
'1997-04-07',
'1997-04-08',
'1997-04-09',
'1997-04-10',
'1997-04-11',
'1997-04-14',
'1997-04-15',
'1997-04-16',
'1997-04-17',
'1997-04-18',
'1997-04-21',
'1997-04-22',
'1997-04-23',
'1997-04-24',
'1997-04-25',
'1997-04-28',
'1997-04-29',
'1997-04-30',
'1997-05-05',
'1997-05-06',
'1997-05-07',
'1997-05-08',
'1997-05-09',
'1997-05-12',
'1997-05-13',
'1997-05-14',
'1997-05-15',
'1997-05-16',
'1997-05-19',
'1997-05-20',
'1997-05-21',
'1997-05-22',
'1997-05-23',
'1997-05-26',
'1997-05-27',
'1997-05-28',
'1997-05-29',
'1997-05-30',
'1997-06-02',
'1997-06-03',
'1997-06-04',
'1997-06-05',
'1997-06-06',
'1997-06-09',
'1997-06-10',
'1997-06-11',
'1997-06-12',
'1997-06-13',
'1997-06-16',
'1997-06-17',
'1997-06-18',
'1997-06-19',
'1997-06-20',
'1997-06-23',
'1997-06-24',
'1997-06-25',
'1997-06-26',
'1997-06-27',
'1997-07-02',
'1997-07-03',
'1997-07-04',
'1997-07-07',
'1997-07-08',
'1997-07-09',
'1997-07-10',
'1997-07-11',
'1997-07-14',
'1997-07-15',
'1997-07-16',
'1997-07-17',
'1997-07-18',
'1997-07-21',
'1997-07-22',
'1997-07-23',
'1997-07-24',
'1997-07-25',
'1997-07-28',
'1997-07-29',
'1997-07-30',
'1997-07-31',
'1997-08-01',
'1997-08-04',
'1997-08-05',
'1997-08-06',
'1997-08-07',
'1997-08-08',
'1997-08-11',
'1997-08-12',
'1997-08-13',
'1997-08-14',
'1997-08-15',
'1997-08-18',
'1997-08-19',
'1997-08-20',
'1997-08-21',
'1997-08-22',
'1997-08-25',
'1997-08-26',
'1997-08-27',
'1997-08-28',
'1997-08-29',
'1997-09-01',
'1997-09-02',
'1997-09-03',
'1997-09-04',
'1997-09-05',
'1997-09-08',
'1997-09-09',
'1997-09-10',
'1997-09-11',
'1997-09-12',
'1997-09-15',
'1997-09-16',
'1997-09-17',
'1997-09-18',
'1997-09-19',
'1997-09-22',
'1997-09-23',
'1997-09-24',
'1997-09-25',
'1997-09-26',
'1997-09-29',
'1997-09-30',
'1997-10-06',
'1997-10-07',
'1997-10-08',
'1997-10-09',
'1997-10-10',
'1997-10-13',
'1997-10-14',
'1997-10-15',
'1997-10-16',
'1997-10-17',
'1997-10-20',
'1997-10-21',
'1997-10-22',
'1997-10-23',
'1997-10-24',
'1997-10-27',
'1997-10-28',
'1997-10-29',
'1997-10-30',
'1997-10-31',
'1997-11-03',
'1997-11-04',
'1997-11-05',
'1997-11-06',
'1997-11-07',
'1997-11-10',
'1997-11-11',
'1997-11-12',
'1997-11-13',
'1997-11-14',
'1997-11-17',
'1997-11-18',
'1997-11-19',
'1997-11-20',
'1997-11-21',
'1997-11-24',
'1997-11-25',
'1997-11-26',
'1997-11-27',
'1997-11-28',
'1997-12-01',
'1997-12-02',
'1997-12-03',
'1997-12-04',
'1997-12-05',
'1997-12-08',
'1997-12-09',
'1997-12-10',
'1997-12-11',
'1997-12-12',
'1997-12-15',
'1997-12-16',
'1997-12-17',
'1997-12-18',
'1997-12-19',
'1997-12-22',
'1997-12-23',
'1997-12-24',
'1997-12-25',
'1997-12-26',
'1997-12-29',
'1997-12-30',
'1997-12-31',
'1998-01-05',
'1998-01-06',
'1998-01-07',
'1998-01-08',
'1998-01-09',
'1998-01-12',
'1998-01-13',
'1998-01-14',
'1998-01-15',
'1998-01-16',
'1998-01-19',
'1998-01-20',
'1998-01-21',
'1998-01-22',
'1998-01-23',
'1998-02-09',
'1998-02-10',
'1998-02-11',
'1998-02-12',
'1998-02-13',
'1998-02-16',
'1998-02-17',
'1998-02-18',
'1998-02-19',
'1998-02-20',
'1998-02-23',
'1998-02-24',
'1998-02-25',
'1998-02-26',
'1998-02-27',
'1998-03-02',
'1998-03-03',
'1998-03-04',
'1998-03-05',
'1998-03-06',
'1998-03-09',
'1998-03-10',
'1998-03-11',
'1998-03-12',
'1998-03-13',
'1998-03-16',
'1998-03-17',
'1998-03-18',
'1998-03-19',
'1998-03-20',
'1998-03-23',
'1998-03-24',
'1998-03-25',
'1998-03-26',
'1998-03-27',
'1998-03-30',
'1998-03-31',
'1998-04-01',
'1998-04-02',
'1998-04-03',
'1998-04-06',
'1998-04-07',
'1998-04-08',
'1998-04-09',
'1998-04-10',
'1998-04-13',
'1998-04-14',
'1998-04-15',
'1998-04-16',
'1998-04-17',
'1998-04-20',
'1998-04-21',
'1998-04-22',
'1998-04-23',
'1998-04-24',
'1998-04-27',
'1998-04-28',
'1998-04-29',
'1998-04-30',
'1998-05-04',
'1998-05-05',
'1998-05-06',
'1998-05-07',
'1998-05-08',
'1998-05-11',
'1998-05-12',
'1998-05-13',
'1998-05-14',
'1998-05-15',
'1998-05-18',
'1998-05-19',
'1998-05-20',
'1998-05-21',
'1998-05-22',
'1998-05-25',
'1998-05-26',
'1998-05-27',
'1998-05-28',
'1998-05-29',
'1998-06-01',
'1998-06-02',
'1998-06-03',
'1998-06-04',
'1998-06-05',
'1998-06-08',
'1998-06-09',
'1998-06-10',
'1998-06-11',
'1998-06-12',
'1998-06-15',
'1998-06-16',
'1998-06-17',
'1998-06-18',
'1998-06-19',
'1998-06-22',
'1998-06-23',
'1998-06-24',
'1998-06-25',
'1998-06-26',
'1998-06-29',
'1998-06-30',
'1998-07-01',
'1998-07-02',
'1998-07-03',
'1998-07-06',
'1998-07-07',
'1998-07-08',
'1998-07-09',
'1998-07-10',
'1998-07-13',
'1998-07-14',
'1998-07-15',
'1998-07-16',
'1998-07-17',
'1998-07-20',
'1998-07-21',
'1998-07-22',
'1998-07-23',
'1998-07-24',
'1998-07-27',
'1998-07-28',
'1998-07-29',
'1998-07-30',
'1998-07-31',
'1998-08-03',
'1998-08-04',
'1998-08-05',
'1998-08-06',
'1998-08-07',
'1998-08-10',
'1998-08-11',
'1998-08-12',
'1998-08-13',
'1998-08-14',
'1998-08-17',
'1998-08-18',
'1998-08-19',
'1998-08-20',
'1998-08-21',
'1998-08-24',
'1998-08-25',
'1998-08-26',
'1998-08-27',
'1998-08-28',
'1998-08-31',
'1998-09-01',
'1998-09-02',
'1998-09-03',
'1998-09-04',
'1998-09-07',
'1998-09-08',
'1998-09-09',
'1998-09-10',
'1998-09-11',
'1998-09-14',
'1998-09-15',
'1998-09-16',
'1998-09-17',
'1998-09-18',
'1998-09-21',
'1998-09-22',
'1998-09-23',
'1998-09-24',
'1998-09-25',
'1998-09-28',
'1998-09-29',
'1998-09-30',
'1998-10-05',
'1998-10-06',
'1998-10-07',
'1998-10-08',
'1998-10-09',
'1998-10-12',
'1998-10-13',
'1998-10-14',
'1998-10-15',
'1998-10-16',
'1998-10-19',
'1998-10-20',
'1998-10-21',
'1998-10-22',
'1998-10-23',
'1998-10-26',
'1998-10-27',
'1998-10-28',
'1998-10-29',
'1998-10-30',
'1998-11-02',
'1998-11-03',
'1998-11-04',
'1998-11-05',
'1998-11-06',
'1998-11-09',
'1998-11-10',
'1998-11-11',
'1998-11-12',
'1998-11-13',
'1998-11-16',
'1998-11-17',
'1998-11-18',
'1998-11-19',
'1998-11-20',
'1998-11-23',
'1998-11-24',
'1998-11-25',
'1998-11-26',
'1998-11-27',
'1998-11-30',
'1998-12-01',
'1998-12-02',
'1998-12-03',
'1998-12-04',
'1998-12-07',
'1998-12-08',
'1998-12-09',
'1998-12-10',
'1998-12-11',
'1998-12-14',
'1998-12-15',
'1998-12-16',
'1998-12-17',
'1998-12-18',
'1998-12-21',
'1998-12-22',
'1998-12-23',
'1998-12-24',
'1998-12-25',
'1998-12-28',
'1998-12-29',
'1998-12-30',
'1998-12-31',
'1999-01-04',
'1999-01-05',
'1999-01-06',
'1999-01-07',
'1999-01-08',
'1999-01-11',
'1999-01-12',
'1999-01-13',
'1999-01-14',
'1999-01-15',
'1999-01-18',
'1999-01-19',
'1999-01-20',
'1999-01-21',
'1999-01-22',
'1999-01-25',
'1999-01-26',
'1999-01-27',
'1999-01-28',
'1999-01-29',
'1999-02-01',
'1999-02-02',
'1999-02-03',
'1999-02-04',
'1999-02-05',
'1999-02-08',
'1999-02-09',
'1999-03-01',
'1999-03-02',
'1999-03-03',
'1999-03-04',
'1999-03-05',
'1999-03-08',
'1999-03-09',
'1999-03-10',
'1999-03-11',
'1999-03-12',
'1999-03-15',
'1999-03-16',
'1999-03-17',
'1999-03-18',
'1999-03-19',
'1999-03-22',
'1999-03-23',
'1999-03-24',
'1999-03-25',
'1999-03-26',
'1999-03-29',
'1999-03-30',
'1999-03-31',
'1999-04-01',
'1999-04-02',
'1999-04-05',
'1999-04-06',
'1999-04-07',
'1999-04-08',
'1999-04-09',
'1999-04-12',
'1999-04-13',
'1999-04-14',
'1999-04-15',
'1999-04-16',
'1999-04-19',
'1999-04-20',
'1999-04-21',
'1999-04-22',
'1999-04-23',
'1999-04-26',
'1999-04-27',
'1999-04-28',
'1999-04-29',
'1999-04-30',
'1999-05-04',
'1999-05-05',
'1999-05-06',
'1999-05-07',
'1999-05-10',
'1999-05-11',
'1999-05-12',
'1999-05-13',
'1999-05-14',
'1999-05-17',
'1999-05-18',
'1999-05-19',
'1999-05-20',
'1999-05-21',
'1999-05-24',
'1999-05-25',
'1999-05-26',
'1999-05-27',
'1999-05-28',
'1999-05-31',
'1999-06-01',
'1999-06-02',
'1999-06-03',
'1999-06-04',
'1999-06-07',
'1999-06-08',
'1999-06-09',
'1999-06-10',
'1999-06-11',
'1999-06-14',
'1999-06-15',
'1999-06-16',
'1999-06-17',
'1999-06-18',
'1999-06-21',
'1999-06-22',
'1999-06-23',
'1999-06-24',
'1999-06-25',
'1999-06-28',
'1999-06-29',
'1999-06-30',
'1999-07-01',
'1999-07-02',
'1999-07-05',
'1999-07-06',
'1999-07-07',
'1999-07-08',
'1999-07-09',
'1999-07-12',
'1999-07-13',
'1999-07-14',
'1999-07-15',
'1999-07-16',
'1999-07-19',
'1999-07-20',
'1999-07-21',
'1999-07-22',
'1999-07-23',
'1999-07-26',
'1999-07-27',
'1999-07-28',
'1999-07-29',
'1999-07-30',
'1999-08-02',
'1999-08-03',
'1999-08-04',
'1999-08-05',
'1999-08-06',
'1999-08-09',
'1999-08-10',
'1999-08-11',
'1999-08-12',
'1999-08-13',
'1999-08-16',
'1999-08-17',
'1999-08-18',
'1999-08-19',
'1999-08-20',
'1999-08-23',
'1999-08-24',
'1999-08-25',
'1999-08-26',
'1999-08-27',
'1999-08-30',
'1999-08-31',
'1999-09-01',
'1999-09-02',
'1999-09-03',
'1999-09-06',
'1999-09-07',
'1999-09-08',
'1999-09-09',
'1999-09-10',
'1999-09-13',
'1999-09-14',
'1999-09-15',
'1999-09-16',
'1999-09-17',
'1999-09-20',
'1999-09-21',
'1999-09-22',
'1999-09-23',
'1999-09-24',
'1999-09-27',
'1999-09-28',
'1999-09-29',
'1999-09-30',
'1999-10-08',
'1999-10-11',
'1999-10-12',
'1999-10-13',
'1999-10-14',
'1999-10-15',
'1999-10-18',
'1999-10-19',
'1999-10-20',
'1999-10-21',
'1999-10-22',
'1999-10-25',
'1999-10-26',
'1999-10-27',
'1999-10-28',
'1999-10-29',
'1999-11-01',
'1999-11-02',
'1999-11-03',
'1999-11-04',
'1999-11-05',
'1999-11-08',
'1999-11-09',
'1999-11-10',
'1999-11-11',
'1999-11-12',
'1999-11-15',
'1999-11-16',
'1999-11-17',
'1999-11-18',
'1999-11-19',
'1999-11-22',
'1999-11-23',
'1999-11-24',
'1999-11-25',
'1999-11-26',
'1999-11-29',
'1999-11-30',
'1999-12-01',
'1999-12-02',
'1999-12-03',
'1999-12-06',
'1999-12-07',
'1999-12-08',
'1999-12-09',
'1999-12-10',
'1999-12-13',
'1999-12-14',
'1999-12-15',
'1999-12-16',
'1999-12-17',
'1999-12-21',
'1999-12-22',
'1999-12-23',
'1999-12-24',
'1999-12-27',
'1999-12-28',
'1999-12-29',
'1999-12-30',
'2000-01-04',
'2000-01-05',
'2000-01-06',
'2000-01-07',
'2000-01-10',
'2000-01-11',
'2000-01-12',
'2000-01-13',
'2000-01-14',
'2000-01-17',
'2000-01-18',
'2000-01-19',
'2000-01-20',
'2000-01-21',
'2000-01-24',
'2000-01-25',
'2000-01-26',
'2000-01-27',
'2000-01-28',
'2000-02-14',
'2000-02-15',
'2000-02-16',
'2000-02-17',
'2000-02-18',
'2000-02-21',
'2000-02-22',
'2000-02-23',
'2000-02-24',
'2000-02-25',
'2000-02-28',
'2000-02-29',
'2000-03-01',
'2000-03-02',
'2000-03-03',
'2000-03-06',
'2000-03-07',
'2000-03-08',
'2000-03-09',
'2000-03-10',
'2000-03-13',
'2000-03-14',
'2000-03-15',
'2000-03-16',
'2000-03-17',
'2000-03-20',
'2000-03-21',
'2000-03-22',
'2000-03-23',
'2000-03-24',
'2000-03-27',
'2000-03-28',
'2000-03-29',
'2000-03-30',
'2000-03-31',
'2000-04-03',
'2000-04-04',
'2000-04-05',
'2000-04-06',
'2000-04-07',
'2000-04-10',
'2000-04-11',
'2000-04-12',
'2000-04-13',
'2000-04-14',
'2000-04-17',
'2000-04-18',
'2000-04-19',
'2000-04-20',
'2000-04-21',
'2000-04-24',
'2000-04-25',
'2000-04-26',
'2000-04-27',
'2000-04-28',
'2000-05-08',
'2000-05-09',
'2000-05-10',
'2000-05-11',
'2000-05-12',
'2000-05-15',
'2000-05-16',
'2000-05-17',
'2000-05-18',
'2000-05-19',
'2000-05-22',
'2000-05-23',
'2000-05-24',
'2000-05-25',
'2000-05-26',
'2000-05-29',
'2000-05-30',
'2000-05-31',
'2000-06-01',
'2000-06-02',
'2000-06-05',
'2000-06-06',
'2000-06-07',
'2000-06-08',
'2000-06-09',
'2000-06-12',
'2000-06-13',
'2000-06-14',
'2000-06-15',
'2000-06-16',
'2000-06-19',
'2000-06-20',
'2000-06-21',
'2000-06-22',
'2000-06-23',
'2000-06-26',
'2000-06-27',
'2000-06-28',
'2000-06-29',
'2000-06-30',
'2000-07-03',
'2000-07-04',
'2000-07-05',
'2000-07-06',
'2000-07-07',
'2000-07-10',
'2000-07-11',
'2000-07-12',
'2000-07-13',
'2000-07-14',
'2000-07-17',
'2000-07-18',
'2000-07-19',
'2000-07-20',
'2000-07-21',
'2000-07-24',
'2000-07-25',
'2000-07-26',
'2000-07-27',
'2000-07-28',
'2000-07-31',
'2000-08-01',
'2000-08-02',
'2000-08-03',
'2000-08-04',
'2000-08-07',
'2000-08-08',
'2000-08-09',
'2000-08-10',
'2000-08-11',
'2000-08-14',
'2000-08-15',
'2000-08-16',
'2000-08-17',
'2000-08-18',
'2000-08-21',
'2000-08-22',
'2000-08-23',
'2000-08-24',
'2000-08-25',
'2000-08-28',
'2000-08-29',
'2000-08-30',
'2000-08-31',
'2000-09-01',
'2000-09-04',
'2000-09-05',
'2000-09-06',
'2000-09-07',
'2000-09-08',
'2000-09-11',
'2000-09-12',
'2000-09-13',
'2000-09-14',
'2000-09-15',
'2000-09-18',
'2000-09-19',
'2000-09-20',
'2000-09-21',
'2000-09-22',
'2000-09-25',
'2000-09-26',
'2000-09-27',
'2000-09-28',
'2000-09-29',
'2000-10-09',
'2000-10-10',
'2000-10-11',
'2000-10-12',
'2000-10-13',
'2000-10-16',
'2000-10-17',
'2000-10-18',
'2000-10-19',
'2000-10-20',
'2000-10-23',
'2000-10-24',
'2000-10-25',
'2000-10-26',
'2000-10-27',
'2000-10-30',
'2000-10-31',
'2000-11-01',
'2000-11-02',
'2000-11-03',
'2000-11-06',
'2000-11-07',
'2000-11-08',
'2000-11-09',
'2000-11-10',
'2000-11-13',
'2000-11-14',
'2000-11-15',
'2000-11-16',
'2000-11-17',
'2000-11-20',
'2000-11-21',
'2000-11-22',
'2000-11-23',
'2000-11-24',
'2000-11-27',
'2000-11-28',
'2000-11-29',
'2000-11-30',
'2000-12-01',
'2000-12-04',
'2000-12-05',
'2000-12-06',
'2000-12-07',
'2000-12-08',
'2000-12-11',
'2000-12-12',
'2000-12-13',
'2000-12-14',
'2000-12-15',
'2000-12-18',
'2000-12-19',
'2000-12-20',
'2000-12-21',
'2000-12-22',
'2000-12-25',
'2000-12-26',
'2000-12-27',
'2000-12-28',
'2000-12-29',
'2001-01-02',
'2001-01-03',
'2001-01-04',
'2001-01-05',
'2001-01-08',
'2001-01-09',
'2001-01-10',
'2001-01-11',
'2001-01-12',
'2001-01-15',
'2001-01-16',
'2001-01-17',
'2001-01-18',
'2001-01-19',
'2001-02-05',
'2001-02-06',
'2001-02-07',
'2001-02-08',
'2001-02-09',
'2001-02-12',
'2001-02-13',
'2001-02-14',
'2001-02-15',
'2001-02-16',
'2001-02-19',
'2001-02-20',
'2001-02-21',
'2001-02-22',
'2001-02-23',
'2001-02-26',
'2001-02-27',
'2001-02-28',
'2001-03-01',
'2001-03-02',
'2001-03-05',
'2001-03-06',
'2001-03-07',
'2001-03-08',
'2001-03-09',
'2001-03-12',
'2001-03-13',
'2001-03-14',
'2001-03-15',
'2001-03-16',
'2001-03-19',
'2001-03-20',
'2001-03-21',
'2001-03-22',
'2001-03-23',
'2001-03-26',
'2001-03-27',
'2001-03-28',
'2001-03-29',
'2001-03-30',
'2001-04-02',
'2001-04-03',
'2001-04-04',
'2001-04-05',
'2001-04-06',
'2001-04-09',
'2001-04-10',
'2001-04-11',
'2001-04-12',
'2001-04-13',
'2001-04-16',
'2001-04-17',
'2001-04-18',
'2001-04-19',
'2001-04-20',
'2001-04-23',
'2001-04-24',
'2001-04-25',
'2001-04-26',
'2001-04-27',
'2001-04-30',
'2001-05-08',
'2001-05-09',
'2001-05-10',
'2001-05-11',
'2001-05-14',
'2001-05-15',
'2001-05-16',
'2001-05-17',
'2001-05-18',
'2001-05-21',
'2001-05-22',
'2001-05-23',
'2001-05-24',
'2001-05-25',
'2001-05-28',
'2001-05-29',
'2001-05-30',
'2001-05-31',
'2001-06-01',
'2001-06-04',
'2001-06-05',
'2001-06-06',
'2001-06-07',
'2001-06-08',
'2001-06-11',
'2001-06-12',
'2001-06-13',
'2001-06-14',
'2001-06-15',
'2001-06-18',
'2001-06-19',
'2001-06-20',
'2001-06-21',
'2001-06-22',
'2001-06-25',
'2001-06-26',
'2001-06-27',
'2001-06-28',
'2001-06-29',
'2001-07-02',
'2001-07-03',
'2001-07-04',
'2001-07-05',
'2001-07-06',
'2001-07-09',
'2001-07-10',
'2001-07-11',
'2001-07-12',
'2001-07-13',
'2001-07-16',
'2001-07-17',
'2001-07-18',
'2001-07-19',
'2001-07-20',
'2001-07-23',
'2001-07-24',
'2001-07-25',
'2001-07-26',
'2001-07-27',
'2001-07-30',
'2001-07-31',
'2001-08-01',
'2001-08-02',
'2001-08-03',
'2001-08-06',
'2001-08-07',
'2001-08-08',
'2001-08-09',
'2001-08-10',
'2001-08-13',
'2001-08-14',
'2001-08-15',
'2001-08-16',
'2001-08-17',
'2001-08-20',
'2001-08-21',
'2001-08-22',
'2001-08-23',
'2001-08-24',
'2001-08-27',
'2001-08-28',
'2001-08-29',
'2001-08-30',
'2001-08-31',
'2001-09-03',
'2001-09-04',
'2001-09-05',
'2001-09-06',
'2001-09-07',
'2001-09-10',
'2001-09-11',
'2001-09-12',
'2001-09-13',
'2001-09-14',
'2001-09-17',
'2001-09-18',
'2001-09-19',
'2001-09-20',
'2001-09-21',
'2001-09-24',
'2001-09-25',
'2001-09-26',
'2001-09-27',
'2001-09-28',
'2001-10-08',
'2001-10-09',
'2001-10-10',
'2001-10-11',
'2001-10-12',
'2001-10-15',
'2001-10-16',
'2001-10-17',
'2001-10-18',
'2001-10-19',
'2001-10-22',
'2001-10-23',
'2001-10-24',
'2001-10-25',
'2001-10-26',
'2001-10-29',
'2001-10-30',
'2001-10-31',
'2001-11-01',
'2001-11-02',
'2001-11-05',
'2001-11-06',
'2001-11-07',
'2001-11-08',
'2001-11-09',
'2001-11-12',
'2001-11-13',
'2001-11-14',
'2001-11-15',
'2001-11-16',
'2001-11-19',
'2001-11-20',
'2001-11-21',
'2001-11-22',
'2001-11-23',
'2001-11-26',
'2001-11-27',
'2001-11-28',
'2001-11-29',
'2001-11-30',
'2001-12-03',
'2001-12-04',
'2001-12-05',
'2001-12-06',
'2001-12-07',
'2001-12-10',
'2001-12-11',
'2001-12-12',
'2001-12-13',
'2001-12-14',
'2001-12-17',
'2001-12-18',
'2001-12-19',
'2001-12-20',
'2001-12-21',
'2001-12-24',
'2001-12-25',
'2001-12-26',
'2001-12-27',
'2001-12-28',
'2001-12-31',
'2002-01-04',
'2002-01-07',
'2002-01-08',
'2002-01-09',
'2002-01-10',
'2002-01-11',
'2002-01-14',
'2002-01-15',
'2002-01-16',
'2002-01-17',
'2002-01-18',
'2002-01-21',
'2002-01-22',
'2002-01-23',
'2002-01-24',
'2002-01-25',
'2002-01-28',
'2002-01-29',
'2002-01-30',
'2002-01-31',
'2002-02-01',
'2002-02-04',
'2002-02-05',
'2002-02-06',
'2002-02-07',
'2002-02-08',
'2002-02-25',
'2002-02-26',
'2002-02-27',
'2002-02-28',
'2002-03-01',
'2002-03-04',
'2002-03-05',
'2002-03-06',
'2002-03-07',
'2002-03-08',
'2002-03-11',
'2002-03-12',
'2002-03-13',
'2002-03-14',
'2002-03-15',
'2002-03-18',
'2002-03-19',
'2002-03-20',
'2002-03-21',
'2002-03-22',
'2002-03-25',
'2002-03-26',
'2002-03-27',
'2002-03-28',
'2002-03-29',
'2002-04-01',
'2002-04-02',
'2002-04-03',
'2002-04-04',
'2002-04-05',
'2002-04-08',
'2002-04-09',
'2002-04-10',
'2002-04-11',
'2002-04-12',
'2002-04-15',
'2002-04-16',
'2002-04-17',
'2002-04-18',
'2002-04-19',
'2002-04-22',
'2002-04-23',
'2002-04-24',
'2002-04-25',
'2002-04-26',
'2002-04-29',
'2002-04-30',
'2002-05-08',
'2002-05-09',
'2002-05-10',
'2002-05-13',
'2002-05-14',
'2002-05-15',
'2002-05-16',
'2002-05-17',
'2002-05-20',
'2002-05-21',
'2002-05-22',
'2002-05-23',
'2002-05-24',
'2002-05-27',
'2002-05-28',
'2002-05-29',
'2002-05-30',
'2002-05-31',
'2002-06-03',
'2002-06-04',
'2002-06-05',
'2002-06-06',
'2002-06-07',
'2002-06-10',
'2002-06-11',
'2002-06-12',
'2002-06-13',
'2002-06-14',
'2002-06-17',
'2002-06-18',
'2002-06-19',
'2002-06-20',
'2002-06-21',
'2002-06-24',
'2002-06-25',
'2002-06-26',
'2002-06-27',
'2002-06-28',
'2002-07-01',
'2002-07-02',
'2002-07-03',
'2002-07-04',
'2002-07-05',
'2002-07-08',
'2002-07-09',
'2002-07-10',
'2002-07-11',
'2002-07-12',
'2002-07-15',
'2002-07-16',
'2002-07-17',
'2002-07-18',
'2002-07-19',
'2002-07-22',
'2002-07-23',
'2002-07-24',
'2002-07-25',
'2002-07-26',
'2002-07-29',
'2002-07-30',
'2002-07-31',
'2002-08-01',
'2002-08-02',
'2002-08-05',
'2002-08-06',
'2002-08-07',
'2002-08-08',
'2002-08-09',
'2002-08-12',
'2002-08-13',
'2002-08-14',
'2002-08-15',
'2002-08-16',
'2002-08-19',
'2002-08-20',
'2002-08-21',
'2002-08-22',
'2002-08-23',
'2002-08-26',
'2002-08-27',
'2002-08-28',
'2002-08-29',
'2002-08-30',
'2002-09-02',
'2002-09-03',
'2002-09-04',
'2002-09-05',
'2002-09-06',
'2002-09-09',
'2002-09-10',
'2002-09-11',
'2002-09-12',
'2002-09-13',
'2002-09-16',
'2002-09-17',
'2002-09-18',
'2002-09-19',
'2002-09-20',
'2002-09-23',
'2002-09-24',
'2002-09-25',
'2002-09-26',
'2002-09-27',
'2002-10-08',
'2002-10-09',
'2002-10-10',
'2002-10-11',
'2002-10-14',
'2002-10-15',
'2002-10-16',
'2002-10-17',
'2002-10-18',
'2002-10-21',
'2002-10-22',
'2002-10-23',
'2002-10-24',
'2002-10-25',
'2002-10-28',
'2002-10-29',
'2002-10-30',
'2002-10-31',
'2002-11-01',
'2002-11-04',
'2002-11-05',
'2002-11-06',
'2002-11-07',
'2002-11-08',
'2002-11-11',
'2002-11-12',
'2002-11-13',
'2002-11-14',
'2002-11-15',
'2002-11-18',
'2002-11-19',
'2002-11-20',
'2002-11-21',
'2002-11-22',
'2002-11-25',
'2002-11-26',
'2002-11-27',
'2002-11-28',
'2002-11-29',
'2002-12-02',
'2002-12-03',
'2002-12-04',
'2002-12-05',
'2002-12-06',
'2002-12-09',
'2002-12-10',
'2002-12-11',
'2002-12-12',
'2002-12-13',
'2002-12-16',
'2002-12-17',
'2002-12-18',
'2002-12-19',
'2002-12-20',
'2002-12-23',
'2002-12-24',
'2002-12-25',
'2002-12-26',
'2002-12-27',
'2002-12-30',
'2002-12-31',
'2003-01-02',
'2003-01-03',
'2003-01-06',
'2003-01-07',
'2003-01-08',
'2003-01-09',
'2003-01-10',
'2003-01-13',
'2003-01-14',
'2003-01-15',
'2003-01-16',
'2003-01-17',
'2003-01-20',
'2003-01-21',
'2003-01-22',
'2003-01-23',
'2003-01-24',
'2003-01-27',
'2003-01-28',
'2003-01-29',
'2003-02-10',
'2003-02-11',
'2003-02-12',
'2003-02-13',
'2003-02-14',
'2003-02-17',
'2003-02-18',
'2003-02-19',
'2003-02-20',
'2003-02-21',
'2003-02-24',
'2003-02-25',
'2003-02-26',
'2003-02-27',
'2003-02-28',
'2003-03-03',
'2003-03-04',
'2003-03-05',
'2003-03-06',
'2003-03-07',
'2003-03-10',
'2003-03-11',
'2003-03-12',
'2003-03-13',
'2003-03-14',
'2003-03-17',
'2003-03-18',
'2003-03-19',
'2003-03-20',
'2003-03-21',
'2003-03-24',
'2003-03-25',
'2003-03-26',
'2003-03-27',
'2003-03-28',
'2003-03-31',
'2003-04-01',
'2003-04-02',
'2003-04-03',
'2003-04-04',
'2003-04-07',
'2003-04-08',
'2003-04-09',
'2003-04-10',
'2003-04-11',
'2003-04-14',
'2003-04-15',
'2003-04-16',
'2003-04-17',
'2003-04-18',
'2003-04-21',
'2003-04-22',
'2003-04-23',
'2003-04-24',
'2003-04-25',
'2003-04-28',
'2003-04-29',
'2003-04-30',
'2003-05-12',
'2003-05-13',
'2003-05-14',
'2003-05-15',
'2003-05-16',
'2003-05-19',
'2003-05-20',
'2003-05-21',
'2003-05-22',
'2003-05-23',
'2003-05-26',
'2003-05-27',
'2003-05-28',
'2003-05-29',
'2003-05-30',
'2003-06-02',
'2003-06-03',
'2003-06-04',
'2003-06-05',
'2003-06-06',
'2003-06-09',
'2003-06-10',
'2003-06-11',
'2003-06-12',
'2003-06-13',
'2003-06-16',
'2003-06-17',
'2003-06-18',
'2003-06-19',
'2003-06-20',
'2003-06-23',
'2003-06-24',
'2003-06-25',
'2003-06-26',
'2003-06-27',
'2003-06-30',
'2003-07-01',
'2003-07-02',
'2003-07-03',
'2003-07-04',
'2003-07-07',
'2003-07-08',
'2003-07-09',
'2003-07-10',
'2003-07-11',
'2003-07-14',
'2003-07-15',
'2003-07-16',
'2003-07-17',
'2003-07-18',
'2003-07-21',
'2003-07-22',
'2003-07-23',
'2003-07-24',
'2003-07-25',
'2003-07-28',
'2003-07-29',
'2003-07-30',
'2003-07-31',
'2003-08-01',
'2003-08-04',
'2003-08-05',
'2003-08-06',
'2003-08-07',
'2003-08-08',
'2003-08-11',
'2003-08-12',
'2003-08-13',
'2003-08-14',
'2003-08-15',
'2003-08-18',
'2003-08-19',
'2003-08-20',
'2003-08-21',
'2003-08-22',
'2003-08-25',
'2003-08-26',
'2003-08-27',
'2003-08-28',
'2003-08-29',
'2003-09-01',
'2003-09-02',
'2003-09-03',
'2003-09-04',
'2003-09-05',
'2003-09-08',
'2003-09-09',
'2003-09-10',
'2003-09-11',
'2003-09-12',
'2003-09-15',
'2003-09-16',
'2003-09-17',
'2003-09-18',
'2003-09-19',
'2003-09-22',
'2003-09-23',
'2003-09-24',
'2003-09-25',
'2003-09-26',
'2003-09-29',
'2003-09-30',
'2003-10-08',
'2003-10-09',
'2003-10-10',
'2003-10-13',
'2003-10-14',
'2003-10-15',
'2003-10-16',
'2003-10-17',
'2003-10-20',
'2003-10-21',
'2003-10-22',
'2003-10-23',
'2003-10-24',
'2003-10-27',
'2003-10-28',
'2003-10-29',
'2003-10-30',
'2003-10-31',
'2003-11-03',
'2003-11-04',
'2003-11-05',
'2003-11-06',
'2003-11-07',
'2003-11-10',
'2003-11-11',
'2003-11-12',
'2003-11-13',
'2003-11-14',
'2003-11-17',
'2003-11-18',
'2003-11-19',
'2003-11-20',
'2003-11-21',
'2003-11-24',
'2003-11-25',
'2003-11-26',
'2003-11-27',
'2003-11-28',
'2003-12-01',
'2003-12-02',
'2003-12-03',
'2003-12-04',
'2003-12-05',
'2003-12-08',
'2003-12-09',
'2003-12-10',
'2003-12-11',
'2003-12-12',
'2003-12-15',
'2003-12-16',
'2003-12-17',
'2003-12-18',
'2003-12-19',
'2003-12-22',
'2003-12-23',
'2003-12-24',
'2003-12-25',
'2003-12-26',
'2003-12-29',
'2003-12-30',
'2003-12-31',
'2004-01-02',
'2004-01-05',
'2004-01-06',
'2004-01-07',
'2004-01-08',
'2004-01-09',
'2004-01-12',
'2004-01-13',
'2004-01-14',
'2004-01-15',
'2004-01-16',
'2004-01-29',
'2004-01-30',
'2004-02-02',
'2004-02-03',
'2004-02-04',
'2004-02-05',
'2004-02-06',
'2004-02-09',
'2004-02-10',
'2004-02-11',
'2004-02-12',
'2004-02-13',
'2004-02-16',
'2004-02-17',
'2004-02-18',
'2004-02-19',
'2004-02-20',
'2004-02-23',
'2004-02-24',
'2004-02-25',
'2004-02-26',
'2004-02-27',
'2004-03-01',
'2004-03-02',
'2004-03-03',
'2004-03-04',
'2004-03-05',
'2004-03-08',
'2004-03-09',
'2004-03-10',
'2004-03-11',
'2004-03-12',
'2004-03-15',
'2004-03-16',
'2004-03-17',
'2004-03-18',
'2004-03-19',
'2004-03-22',
'2004-03-23',
'2004-03-24',
'2004-03-25',
'2004-03-26',
'2004-03-29',
'2004-03-30',
'2004-03-31',
'2004-04-01',
'2004-04-02',
'2004-04-05',
'2004-04-06',
'2004-04-07',
'2004-04-08',
'2004-04-09',
'2004-04-12',
'2004-04-13',
'2004-04-14',
'2004-04-15',
'2004-04-16',
'2004-04-19',
'2004-04-20',
'2004-04-21',
'2004-04-22',
'2004-04-23',
'2004-04-26',
'2004-04-27',
'2004-04-28',
'2004-04-29',
'2004-04-30',
'2004-05-10',
'2004-05-11',
'2004-05-12',
'2004-05-13',
'2004-05-14',
'2004-05-17',
'2004-05-18',
'2004-05-19',
'2004-05-20',
'2004-05-21',
'2004-05-24',
'2004-05-25',
'2004-05-26',
'2004-05-27',
'2004-05-28',
'2004-05-31',
'2004-06-01',
'2004-06-02',
'2004-06-03',
'2004-06-04',
'2004-06-07',
'2004-06-08',
'2004-06-09',
'2004-06-10',
'2004-06-11',
'2004-06-14',
'2004-06-15',
'2004-06-16',
'2004-06-17',
'2004-06-18',
'2004-06-21',
'2004-06-22',
'2004-06-23',
'2004-06-24',
'2004-06-25',
'2004-06-28',
'2004-06-29',
'2004-06-30',
'2004-07-01',
'2004-07-02',
'2004-07-05',
'2004-07-06',
'2004-07-07',
'2004-07-08',
'2004-07-09',
'2004-07-12',
'2004-07-13',
'2004-07-14',
'2004-07-15',
'2004-07-16',
'2004-07-19',
'2004-07-20',
'2004-07-21',
'2004-07-22',
'2004-07-23',
'2004-07-26',
'2004-07-27',
'2004-07-28',
'2004-07-29',
'2004-07-30',
'2004-08-02',
'2004-08-03',
'2004-08-04',
'2004-08-05',
'2004-08-06',
'2004-08-09',
'2004-08-10',
'2004-08-11',
'2004-08-12',
'2004-08-13',
'2004-08-16',
'2004-08-17',
'2004-08-18',
'2004-08-19',
'2004-08-20',
'2004-08-23',
'2004-08-24',
'2004-08-25',
'2004-08-26',
'2004-08-27',
'2004-08-30',
'2004-08-31',
'2004-09-01',
'2004-09-02',
'2004-09-03',
'2004-09-06',
'2004-09-07',
'2004-09-08',
'2004-09-09',
'2004-09-10',
'2004-09-13',
'2004-09-14',
'2004-09-15',
'2004-09-16',
'2004-09-17',
'2004-09-20',
'2004-09-21',
'2004-09-22',
'2004-09-23',
'2004-09-24',
'2004-09-27',
'2004-09-28',
'2004-09-29',
'2004-09-30',
'2004-10-08',
'2004-10-11',
'2004-10-12',
'2004-10-13',
'2004-10-14',
'2004-10-15',
'2004-10-18',
'2004-10-19',
'2004-10-20',
'2004-10-21',
'2004-10-22',
'2004-10-25',
'2004-10-26',
'2004-10-27',
'2004-10-28',
'2004-10-29',
'2004-11-01',
'2004-11-02',
'2004-11-03',
'2004-11-04',
'2004-11-05',
'2004-11-08',
'2004-11-09',
'2004-11-10',
'2004-11-11',
'2004-11-12',
'2004-11-15',
'2004-11-16',
'2004-11-17',
'2004-11-18',
'2004-11-19',
'2004-11-22',
'2004-11-23',
'2004-11-24',
'2004-11-25',
'2004-11-26',
'2004-11-29',
'2004-11-30',
'2004-12-01',
'2004-12-02',
'2004-12-03',
'2004-12-06',
'2004-12-07',
'2004-12-08',
'2004-12-09',
'2004-12-10',
'2004-12-13',
'2004-12-14',
'2004-12-15',
'2004-12-16',
'2004-12-17',
'2004-12-20',
'2004-12-21',
'2004-12-22',
'2004-12-23',
'2004-12-24',
'2004-12-27',
'2004-12-28',
'2004-12-29',
'2004-12-30',
'2004-12-31',
'2005-01-04',
'2005-01-05',
'2005-01-06',
'2005-01-07',
'2005-01-10',
'2005-01-11',
'2005-01-12',
'2005-01-13',
'2005-01-14',
'2005-01-17',
'2005-01-18',
'2005-01-19',
'2005-01-20',
'2005-01-21',
'2005-01-24',
'2005-01-25',
'2005-01-26',
'2005-01-27',
'2005-01-28',
'2005-01-31',
'2005-02-01',
'2005-02-02',
'2005-02-03',
'2005-02-04',
'2005-02-16',
'2005-02-17',
'2005-02-18',
'2005-02-21',
'2005-02-22',
'2005-02-23',
'2005-02-24',
'2005-02-25',
'2005-02-28',
'2005-03-01',
'2005-03-02',
'2005-03-03',
'2005-03-04',
'2005-03-07',
'2005-03-08',
'2005-03-09',
'2005-03-10',
'2005-03-11',
'2005-03-14',
'2005-03-15',
'2005-03-16',
'2005-03-17',
'2005-03-18',
'2005-03-21',
'2005-03-22',
'2005-03-23',
'2005-03-24',
'2005-03-25',
'2005-03-28',
'2005-03-29',
'2005-03-30',
'2005-03-31',
'2005-04-01',
'2005-04-04',
'2005-04-05',
'2005-04-06',
'2005-04-07',
'2005-04-08',
'2005-04-11',
'2005-04-12',
'2005-04-13',
'2005-04-14',
'2005-04-15',
'2005-04-18',
'2005-04-19',
'2005-04-20',
'2005-04-21',
'2005-04-22',
'2005-04-25',
'2005-04-26',
'2005-04-27',
'2005-04-28',
'2005-04-29',
'2005-05-09',
'2005-05-10',
'2005-05-11',
'2005-05-12',
'2005-05-13',
'2005-05-16',
'2005-05-17',
'2005-05-18',
'2005-05-19',
'2005-05-20',
'2005-05-23',
'2005-05-24',
'2005-05-25',
'2005-05-26',
'2005-05-27',
'2005-05-30',
'2005-05-31',
'2005-06-01',
'2005-06-02',
'2005-06-03',
'2005-06-06',
'2005-06-07',
'2005-06-08',
'2005-06-09',
'2005-06-10',
'2005-06-13',
'2005-06-14',
'2005-06-15',
'2005-06-16',
'2005-06-17',
'2005-06-20',
'2005-06-21',
'2005-06-22',
'2005-06-23',
'2005-06-24',
'2005-06-27',
'2005-06-28',
'2005-06-29',
'2005-06-30',
'2005-07-01',
'2005-07-04',
'2005-07-05',
'2005-07-06',
'2005-07-07',
'2005-07-08',
'2005-07-11',
'2005-07-12',
'2005-07-13',
'2005-07-14',
'2005-07-15',
'2005-07-18',
'2005-07-19',
'2005-07-20',
'2005-07-21',
'2005-07-22',
'2005-07-25',
'2005-07-26',
'2005-07-27',
'2005-07-28',
'2005-07-29',
'2005-08-01',
'2005-08-02',
'2005-08-03',
'2005-08-04',
'2005-08-05',
'2005-08-08',
'2005-08-09',
'2005-08-10',
'2005-08-11',
'2005-08-12',
'2005-08-15',
'2005-08-16',
'2005-08-17',
'2005-08-18',
'2005-08-19',
'2005-08-22',
'2005-08-23',
'2005-08-24',
'2005-08-25',
'2005-08-26',
'2005-08-29',
'2005-08-30',
'2005-08-31',
'2005-09-01',
'2005-09-02',
'2005-09-05',
'2005-09-06',
'2005-09-07',
'2005-09-08',
'2005-09-09',
'2005-09-12',
'2005-09-13',
'2005-09-14',
'2005-09-15',
'2005-09-16',
'2005-09-19',
'2005-09-20',
'2005-09-21',
'2005-09-22',
'2005-09-23',
'2005-09-26',
'2005-09-27',
'2005-09-28',
'2005-09-29',
'2005-09-30',
'2005-10-10',
'2005-10-11',
'2005-10-12',
'2005-10-13',
'2005-10-14',
'2005-10-17',
'2005-10-18',
'2005-10-19',
'2005-10-20',
'2005-10-21',
'2005-10-24',
'2005-10-25',
'2005-10-26',
'2005-10-27',
'2005-10-28',
'2005-10-31',
'2005-11-01',
'2005-11-02',
'2005-11-03',
'2005-11-04',
'2005-11-07',
'2005-11-08',
'2005-11-09',
'2005-11-10',
'2005-11-11',
'2005-11-14',
'2005-11-15',
'2005-11-16',
'2005-11-17',
'2005-11-18',
'2005-11-21',
'2005-11-22',
'2005-11-23',
'2005-11-24',
'2005-11-25',
'2005-11-28',
'2005-11-29',
'2005-11-30',
'2005-12-01',
'2005-12-02',
'2005-12-05',
'2005-12-06',
'2005-12-07',
'2005-12-08',
'2005-12-09',
'2005-12-12',
'2005-12-13',
'2005-12-14',
'2005-12-15',
'2005-12-16',
'2005-12-19',
'2005-12-20',
'2005-12-21',
'2005-12-22',
'2005-12-23',
'2005-12-26',
'2005-12-27',
'2005-12-28',
'2005-12-29',
'2005-12-30',
'2006-01-04',
'2006-01-05',
'2006-01-06',
'2006-01-09',
'2006-01-10',
'2006-01-11',
'2006-01-12',
'2006-01-13',
'2006-01-16',
'2006-01-17',
'2006-01-18',
'2006-01-19',
'2006-01-20',
'2006-01-23',
'2006-01-24',
'2006-01-25',
'2006-02-06',
'2006-02-07',
'2006-02-08',
'2006-02-09',
'2006-02-10',
'2006-02-13',
'2006-02-14',
'2006-02-15',
'2006-02-16',
'2006-02-17',
'2006-02-20',
'2006-02-21',
'2006-02-22',
'2006-02-23',
'2006-02-24',
'2006-02-27',
'2006-02-28',
'2006-03-01',
'2006-03-02',
'2006-03-03',
'2006-03-06',
'2006-03-07',
'2006-03-08',
'2006-03-09',
'2006-03-10',
'2006-03-13',
'2006-03-14',
'2006-03-15',
'2006-03-16',
'2006-03-17',
'2006-03-20',
'2006-03-21',
'2006-03-22',
'2006-03-23',
'2006-03-24',
'2006-03-27',
'2006-03-28',
'2006-03-29',
'2006-03-30',
'2006-03-31',
'2006-04-03',
'2006-04-04',
'2006-04-05',
'2006-04-06',
'2006-04-07',
'2006-04-10',
'2006-04-11',
'2006-04-12',
'2006-04-13',
'2006-04-14',
'2006-04-17',
'2006-04-18',
'2006-04-19',
'2006-04-20',
'2006-04-21',
'2006-04-24',
'2006-04-25',
'2006-04-26',
'2006-04-27',
'2006-04-28',
'2006-05-08',
'2006-05-09',
'2006-05-10',
'2006-05-11',
'2006-05-12',
'2006-05-15',
'2006-05-16',
'2006-05-17',
'2006-05-18',
'2006-05-19',
'2006-05-22',
'2006-05-23',
'2006-05-24',
'2006-05-25',
'2006-05-26',
'2006-05-29',
'2006-05-30',
'2006-05-31',
'2006-06-01',
'2006-06-02',
'2006-06-05',
'2006-06-06',
'2006-06-07',
'2006-06-08',
'2006-06-09',
'2006-06-12',
'2006-06-13',
'2006-06-14',
'2006-06-15',
'2006-06-16',
'2006-06-19',
'2006-06-20',
'2006-06-21',
'2006-06-22',
'2006-06-23',
'2006-06-26',
'2006-06-27',
'2006-06-28',
'2006-06-29',
'2006-06-30',
'2006-07-03',
'2006-07-04',
'2006-07-05',
'2006-07-06',
'2006-07-07',
'2006-07-10',
'2006-07-11',
'2006-07-12',
'2006-07-13',
'2006-07-14',
'2006-07-17',
'2006-07-18',
'2006-07-19',
'2006-07-20',
'2006-07-21',
'2006-07-24',
'2006-07-25',
'2006-07-26',
'2006-07-27',
'2006-07-28',
'2006-07-31',
'2006-08-01',
'2006-08-02',
'2006-08-03',
'2006-08-04',
'2006-08-07',
'2006-08-08',
'2006-08-09',
'2006-08-10',
'2006-08-11',
'2006-08-14',
'2006-08-15',
'2006-08-16',
'2006-08-17',
'2006-08-18',
'2006-08-21',
'2006-08-22',
'2006-08-23',
'2006-08-24',
'2006-08-25',
'2006-08-28',
'2006-08-29',
'2006-08-30',
'2006-08-31',
'2006-09-01',
'2006-09-04',
'2006-09-05',
'2006-09-06',
'2006-09-07',
'2006-09-08',
'2006-09-11',
'2006-09-12',
'2006-09-13',
'2006-09-14',
'2006-09-15',
'2006-09-18',
'2006-09-19',
'2006-09-20',
'2006-09-21',
'2006-09-22',
'2006-09-25',
'2006-09-26',
'2006-09-27',
'2006-09-28',
'2006-09-29',
'2006-10-09',
'2006-10-10',
'2006-10-11',
'2006-10-12',
'2006-10-13',
'2006-10-16',
'2006-10-17',
'2006-10-18',
'2006-10-19',
'2006-10-20',
'2006-10-23',
'2006-10-24',
'2006-10-25',
'2006-10-26',
'2006-10-27',
'2006-10-30',
'2006-10-31',
'2006-11-01',
'2006-11-02',
'2006-11-03',
'2006-11-06',
'2006-11-07',
'2006-11-08',
'2006-11-09',
'2006-11-10',
'2006-11-13',
'2006-11-14',
'2006-11-15',
'2006-11-16',
'2006-11-17',
'2006-11-20',
'2006-11-21',
'2006-11-22',
'2006-11-23',
'2006-11-24',
'2006-11-27',
'2006-11-28',
'2006-11-29',
'2006-11-30',
'2006-12-01',
'2006-12-04',
'2006-12-05',
'2006-12-06',
'2006-12-07',
'2006-12-08',
'2006-12-11',
'2006-12-12',
'2006-12-13',
'2006-12-14',
'2006-12-15',
'2006-12-18',
'2006-12-19',
'2006-12-20',
'2006-12-21',
'2006-12-22',
'2006-12-25',
'2006-12-26',
'2006-12-27',
'2006-12-28',
'2006-12-29',
'2007-01-04',
'2007-01-05',
'2007-01-08',
'2007-01-09',
'2007-01-10',
'2007-01-11',
'2007-01-12',
'2007-01-15',
'2007-01-16',
'2007-01-17',
'2007-01-18',
'2007-01-19',
'2007-01-22',
'2007-01-23',
'2007-01-24',
'2007-01-25',
'2007-01-26',
'2007-01-29',
'2007-01-30',
'2007-01-31',
'2007-02-01',
'2007-02-02',
'2007-02-05',
'2007-02-06',
'2007-02-07',
'2007-02-08',
'2007-02-09',
'2007-02-12',
'2007-02-13',
'2007-02-14',
'2007-02-15',
'2007-02-16',
'2007-02-26',
'2007-02-27',
'2007-02-28',
'2007-03-01',
'2007-03-02',
'2007-03-05',
'2007-03-06',
'2007-03-07',
'2007-03-08',
'2007-03-09',
'2007-03-12',
'2007-03-13',
'2007-03-14',
'2007-03-15',
'2007-03-16',
'2007-03-19',
'2007-03-20',
'2007-03-21',
'2007-03-22',
'2007-03-23',
'2007-03-26',
'2007-03-27',
'2007-03-28',
'2007-03-29',
'2007-03-30',
'2007-04-02',
'2007-04-03',
'2007-04-04',
'2007-04-05',
'2007-04-06',
'2007-04-09',
'2007-04-10',
'2007-04-11',
'2007-04-12',
'2007-04-13',
'2007-04-16',
'2007-04-17',
'2007-04-18',
'2007-04-19',
'2007-04-20',
'2007-04-23',
'2007-04-24',
'2007-04-25',
'2007-04-26',
'2007-04-27',
'2007-04-30',
'2007-05-08',
'2007-05-09',
'2007-05-10',
'2007-05-11',
'2007-05-14',
'2007-05-15',
'2007-05-16',
'2007-05-17',
'2007-05-18',
'2007-05-21',
'2007-05-22',
'2007-05-23',
'2007-05-24',
'2007-05-25',
'2007-05-28',
'2007-05-29',
'2007-05-30',
'2007-05-31',
'2007-06-01',
'2007-06-04',
'2007-06-05',
'2007-06-06',
'2007-06-07',
'2007-06-08',
'2007-06-11',
'2007-06-12',
'2007-06-13',
'2007-06-14',
'2007-06-15',
'2007-06-18',
'2007-06-19',
'2007-06-20',
'2007-06-21',
'2007-06-22',
'2007-06-25',
'2007-06-26',
'2007-06-27',
'2007-06-28',
'2007-06-29',
'2007-07-02',
'2007-07-03',
'2007-07-04',
'2007-07-05',
'2007-07-06',
'2007-07-09',
'2007-07-10',
'2007-07-11',
'2007-07-12',
'2007-07-13',
'2007-07-16',
'2007-07-17',
'2007-07-18',
'2007-07-19',
'2007-07-20',
'2007-07-23',
'2007-07-24',
'2007-07-25',
'2007-07-26',
'2007-07-27',
'2007-07-30',
'2007-07-31',
'2007-08-01',
'2007-08-02',
'2007-08-03',
'2007-08-06',
'2007-08-07',
'2007-08-08',
'2007-08-09',
'2007-08-10',
'2007-08-13',
'2007-08-14',
'2007-08-15',
'2007-08-16',
'2007-08-17',
'2007-08-20',
'2007-08-21',
'2007-08-22',
'2007-08-23',
'2007-08-24',
'2007-08-27',
'2007-08-28',
'2007-08-29',
'2007-08-30',
'2007-08-31',
'2007-09-03',
'2007-09-04',
'2007-09-05',
'2007-09-06',
'2007-09-07',
'2007-09-10',
'2007-09-11',
'2007-09-12',
'2007-09-13',
'2007-09-14',
'2007-09-17',
'2007-09-18',
'2007-09-19',
'2007-09-20',
'2007-09-21',
'2007-09-24',
'2007-09-25',
'2007-09-26',
'2007-09-27',
'2007-09-28',
'2007-10-08',
'2007-10-09',
'2007-10-10',
'2007-10-11',
'2007-10-12',
'2007-10-15',
'2007-10-16',
'2007-10-17',
'2007-10-18',
'2007-10-19',
'2007-10-22',
'2007-10-23',
'2007-10-24',
'2007-10-25',
'2007-10-26',
'2007-10-29',
'2007-10-30',
'2007-10-31',
'2007-11-01',
'2007-11-02',
'2007-11-05',
'2007-11-06',
'2007-11-07',
'2007-11-08',
'2007-11-09',
'2007-11-12',
'2007-11-13',
'2007-11-14',
'2007-11-15',
'2007-11-16',
'2007-11-19',
'2007-11-20',
'2007-11-21',
'2007-11-22',
'2007-11-23',
'2007-11-26',
'2007-11-27',
'2007-11-28',
'2007-11-29',
'2007-11-30',
'2007-12-03',
'2007-12-04',
'2007-12-05',
'2007-12-06',
'2007-12-07',
'2007-12-10',
'2007-12-11',
'2007-12-12',
'2007-12-13',
'2007-12-14',
'2007-12-17',
'2007-12-18',
'2007-12-19',
'2007-12-20',
'2007-12-21',
'2007-12-24',
'2007-12-25',
'2007-12-26',
'2007-12-27',
'2007-12-28',
'2008-01-02',
'2008-01-03',
'2008-01-04',
'2008-01-07',
'2008-01-08',
'2008-01-09',
'2008-01-10',
'2008-01-11',
'2008-01-14',
'2008-01-15',
'2008-01-16',
'2008-01-17',
'2008-01-18',
'2008-01-21',
'2008-01-22',
'2008-01-23',
'2008-01-24',
'2008-01-25',
'2008-01-28',
'2008-01-29',
'2008-01-30',
'2008-01-31',
'2008-02-01',
'2008-02-04',
'2008-02-05',
'2008-02-13',
'2008-02-14',
'2008-02-15',
'2008-02-18',
'2008-02-19',
'2008-02-20',
'2008-02-21',
'2008-02-22',
'2008-02-25',
'2008-02-26',
'2008-02-27',
'2008-02-28',
'2008-02-29',
'2008-03-03',
'2008-03-04',
'2008-03-05',
'2008-03-06',
'2008-03-07',
'2008-03-10',
'2008-03-11',
'2008-03-12',
'2008-03-13',
'2008-03-14',
'2008-03-17',
'2008-03-18',
'2008-03-19',
'2008-03-20',
'2008-03-21',
'2008-03-24',
'2008-03-25',
'2008-03-26',
'2008-03-27',
'2008-03-28',
'2008-03-31',
'2008-04-01',
'2008-04-02',
'2008-04-03',
'2008-04-07',
'2008-04-08',
'2008-04-09',
'2008-04-10',
'2008-04-11',
'2008-04-14',
'2008-04-15',
'2008-04-16',
'2008-04-17',
'2008-04-18',
'2008-04-21',
'2008-04-22',
'2008-04-23',
'2008-04-24',
'2008-04-25',
'2008-04-28',
'2008-04-29',
'2008-04-30',
'2008-05-05',
'2008-05-06',
'2008-05-07',
'2008-05-08',
'2008-05-09',
'2008-05-12',
'2008-05-13',
'2008-05-14',
'2008-05-15',
'2008-05-16',
'2008-05-19',
'2008-05-20',
'2008-05-21',
'2008-05-22',
'2008-05-23',
'2008-05-26',
'2008-05-27',
'2008-05-28',
'2008-05-29',
'2008-05-30',
'2008-06-02',
'2008-06-03',
'2008-06-04',
'2008-06-05',
'2008-06-06',
'2008-06-10',
'2008-06-11',
'2008-06-12',
'2008-06-13',
'2008-06-16',
'2008-06-17',
'2008-06-18',
'2008-06-19',
'2008-06-20',
'2008-06-23',
'2008-06-24',
'2008-06-25',
'2008-06-26',
'2008-06-27',
'2008-06-30',
'2008-07-01',
'2008-07-02',
'2008-07-03',
'2008-07-04',
'2008-07-07',
'2008-07-08',
'2008-07-09',
'2008-07-10',
'2008-07-11',
'2008-07-14',
'2008-07-15',
'2008-07-16',
'2008-07-17',
'2008-07-18',
'2008-07-21',
'2008-07-22',
'2008-07-23',
'2008-07-24',
'2008-07-25',
'2008-07-28',
'2008-07-29',
'2008-07-30',
'2008-07-31',
'2008-08-01',
'2008-08-04',
'2008-08-05',
'2008-08-06',
'2008-08-07',
'2008-08-08',
'2008-08-11',
'2008-08-12',
'2008-08-13',
'2008-08-14',
'2008-08-15',
'2008-08-18',
'2008-08-19',
'2008-08-20',
'2008-08-21',
'2008-08-22',
'2008-08-25',
'2008-08-26',
'2008-08-27',
'2008-08-28',
'2008-08-29',
'2008-09-01',
'2008-09-02',
'2008-09-03',
'2008-09-04',
'2008-09-05',
'2008-09-08',
'2008-09-09',
'2008-09-10',
'2008-09-11',
'2008-09-12',
'2008-09-16',
'2008-09-17',
'2008-09-18',
'2008-09-19',
'2008-09-22',
'2008-09-23',
'2008-09-24',
'2008-09-25',
'2008-09-26',
'2008-10-06',
'2008-10-07',
'2008-10-08',
'2008-10-09',
'2008-10-10',
'2008-10-13',
'2008-10-14',
'2008-10-15',
'2008-10-16',
'2008-10-17',
'2008-10-20',
'2008-10-21',
'2008-10-22',
'2008-10-23',
'2008-10-24',
'2008-10-27',
'2008-10-28',
'2008-10-29',
'2008-10-30',
'2008-10-31',
'2008-11-03',
'2008-11-04',
'2008-11-05',
'2008-11-06',
'2008-11-07',
'2008-11-10',
'2008-11-11',
'2008-11-12',
'2008-11-13',
'2008-11-14',
'2008-11-17',
'2008-11-18',
'2008-11-19',
'2008-11-20',
'2008-11-21',
'2008-11-24',
'2008-11-25',
'2008-11-26',
'2008-11-27',
'2008-11-28',
'2008-12-01',
'2008-12-02',
'2008-12-03',
'2008-12-04',
'2008-12-05',
'2008-12-08',
'2008-12-09',
'2008-12-10',
'2008-12-11',
'2008-12-12',
'2008-12-15',
'2008-12-16',
'2008-12-17',
'2008-12-18',
'2008-12-19',
'2008-12-22',
'2008-12-23',
'2008-12-24',
'2008-12-25',
'2008-12-26',
'2008-12-29',
'2008-12-30',
'2008-12-31',
'2009-01-05',
'2009-01-06',
'2009-01-07',
'2009-01-08',
'2009-01-09',
'2009-01-12',
'2009-01-13',
'2009-01-14',
'2009-01-15',
'2009-01-16',
'2009-01-19',
'2009-01-20',
'2009-01-21',
'2009-01-22',
'2009-01-23',
'2009-02-02',
'2009-02-03',
'2009-02-04',
'2009-02-05',
'2009-02-06',
'2009-02-09',
'2009-02-10',
'2009-02-11',
'2009-02-12',
'2009-02-13',
'2009-02-16',
'2009-02-17',
'2009-02-18',
'2009-02-19',
'2009-02-20',
'2009-02-23',
'2009-02-24',
'2009-02-25',
'2009-02-26',
'2009-02-27',
'2009-03-02',
'2009-03-03',
'2009-03-04',
'2009-03-05',
'2009-03-06',
'2009-03-09',
'2009-03-10',
'2009-03-11',
'2009-03-12',
'2009-03-13',
'2009-03-16',
'2009-03-17',
'2009-03-18',
'2009-03-19',
'2009-03-20',
'2009-03-23',
'2009-03-24',
'2009-03-25',
'2009-03-26',
'2009-03-27',
'2009-03-30',
'2009-03-31',
'2009-04-01',
'2009-04-02',
'2009-04-03',
'2009-04-07',
'2009-04-08',
'2009-04-09',
'2009-04-10',
'2009-04-13',
'2009-04-14',
'2009-04-15',
'2009-04-16',
'2009-04-17',
'2009-04-20',
'2009-04-21',
'2009-04-22',
'2009-04-23',
'2009-04-24',
'2009-04-27',
'2009-04-28',
'2009-04-29',
'2009-04-30',
'2009-05-04',
'2009-05-05',
'2009-05-06',
'2009-05-07',
'2009-05-08',
'2009-05-11',
'2009-05-12',
'2009-05-13',
'2009-05-14',
'2009-05-15',
'2009-05-18',
'2009-05-19',
'2009-05-20',
'2009-05-21',
'2009-05-22',
'2009-05-25',
'2009-05-26',
'2009-05-27',
'2009-06-01',
'2009-06-02',
'2009-06-03',
'2009-06-04',
'2009-06-05',
'2009-06-08',
'2009-06-09',
'2009-06-10',
'2009-06-11',
'2009-06-12',
'2009-06-15',
'2009-06-16',
'2009-06-17',
'2009-06-18',
'2009-06-19',
'2009-06-22',
'2009-06-23',
'2009-06-24',
'2009-06-25',
'2009-06-26',
'2009-06-29',
'2009-06-30',
'2009-07-01',
'2009-07-02',
'2009-07-03',
'2009-07-06',
'2009-07-07',
'2009-07-08',
'2009-07-09',
'2009-07-10',
'2009-07-13',
'2009-07-14',
'2009-07-15',
'2009-07-16',
'2009-07-17',
'2009-07-20',
'2009-07-21',
'2009-07-22',
'2009-07-23',
'2009-07-24',
'2009-07-27',
'2009-07-28',
'2009-07-29',
'2009-07-30',
'2009-07-31',
'2009-08-03',
'2009-08-04',
'2009-08-05',
'2009-08-06',
'2009-08-07',
'2009-08-10',
'2009-08-11',
'2009-08-12',
'2009-08-13',
'2009-08-14',
'2009-08-17',
'2009-08-18',
'2009-08-19',
'2009-08-20',
'2009-08-21',
'2009-08-24',
'2009-08-25',
'2009-08-26',
'2009-08-27',
'2009-08-28',
'2009-08-31',
'2009-09-01',
'2009-09-02',
'2009-09-03',
'2009-09-04',
'2009-09-07',
'2009-09-08',
'2009-09-09',
'2009-09-10',
'2009-09-11',
'2009-09-14',
'2009-09-15',
'2009-09-16',
'2009-09-17',
'2009-09-18',
'2009-09-21',
'2009-09-22',
'2009-09-23',
'2009-09-24',
'2009-09-25',
'2009-09-28',
'2009-09-29',
'2009-09-30',
'2009-10-09',
'2009-10-12',
'2009-10-13',
'2009-10-14',
'2009-10-15',
'2009-10-16',
'2009-10-19',
'2009-10-20',
'2009-10-21',
'2009-10-22',
'2009-10-23',
'2009-10-26',
'2009-10-27',
'2009-10-28',
'2009-10-29',
'2009-10-30',
'2009-11-02',
'2009-11-03',
'2009-11-04',
'2009-11-05',
'2009-11-06',
'2009-11-09',
'2009-11-10',
'2009-11-11',
'2009-11-12',
'2009-11-13',
'2009-11-16',
'2009-11-17',
'2009-11-18',
'2009-11-19',
'2009-11-20',
'2009-11-23',
'2009-11-24',
'2009-11-25',
'2009-11-26',
'2009-11-27',
'2009-11-30',
'2009-12-01',
'2009-12-02',
'2009-12-03',
'2009-12-04',
'2009-12-07',
'2009-12-08',
'2009-12-09',
'2009-12-10',
'2009-12-11',
'2009-12-14',
'2009-12-15',
'2009-12-16',
'2009-12-17',
'2009-12-18',
'2009-12-21',
'2009-12-22',
'2009-12-23',
'2009-12-24',
'2009-12-25',
'2009-12-28',
'2009-12-29',
'2009-12-30',
'2009-12-31',
'2010-01-04',
'2010-01-05',
'2010-01-06',
'2010-01-07',
'2010-01-08',
'2010-01-11',
'2010-01-12',
'2010-01-13',
'2010-01-14',
'2010-01-15',
'2010-01-18',
'2010-01-19',
'2010-01-20',
'2010-01-21',
'2010-01-22',
'2010-01-25',
'2010-01-26',
'2010-01-27',
'2010-01-28',
'2010-01-29',
'2010-02-01',
'2010-02-02',
'2010-02-03',
'2010-02-04',
'2010-02-05',
'2010-02-08',
'2010-02-09',
'2010-02-10',
'2010-02-11',
'2010-02-12',
'2010-02-22',
'2010-02-23',
'2010-02-24',
'2010-02-25',
'2010-02-26',
'2010-03-01',
'2010-03-02',
'2010-03-03',
'2010-03-04',
'2010-03-05',
'2010-03-08',
'2010-03-09',
'2010-03-10',
'2010-03-11',
'2010-03-12',
'2010-03-15',
'2010-03-16',
'2010-03-17',
'2010-03-18',
'2010-03-19',
'2010-03-22',
'2010-03-23',
'2010-03-24',
'2010-03-25',
'2010-03-26',
'2010-03-29',
'2010-03-30',
'2010-03-31',
'2010-04-01',
'2010-04-02',
'2010-04-06',
'2010-04-07',
'2010-04-08',
'2010-04-09',
'2010-04-12',
'2010-04-13',
'2010-04-14',
'2010-04-15',
'2010-04-16',
'2010-04-19',
'2010-04-20',
'2010-04-21',
'2010-04-22',
'2010-04-23',
'2010-04-26',
'2010-04-27',
'2010-04-28',
'2010-04-29',
'2010-04-30',
'2010-05-04',
'2010-05-05',
'2010-05-06',
'2010-05-07',
'2010-05-10',
'2010-05-11',
'2010-05-12',
'2010-05-13',
'2010-05-14',
'2010-05-17',
'2010-05-18',
'2010-05-19',
'2010-05-20',
'2010-05-21',
'2010-05-24',
'2010-05-25',
'2010-05-26',
'2010-05-27',
'2010-05-28',
'2010-05-31',
'2010-06-01',
'2010-06-02',
'2010-06-03',
'2010-06-04',
'2010-06-07',
'2010-06-08',
'2010-06-09',
'2010-06-10',
'2010-06-11',
'2010-06-17',
'2010-06-18',
'2010-06-21',
'2010-06-22',
'2010-06-23',
'2010-06-24',
'2010-06-25',
'2010-06-28',
'2010-06-29',
'2010-06-30',
'2010-07-01',
'2010-07-02',
'2010-07-05',
'2010-07-06',
'2010-07-07',
'2010-07-08',
'2010-07-09',
'2010-07-12',
'2010-07-13',
'2010-07-14',
'2010-07-15',
'2010-07-16',
'2010-07-19',
'2010-07-20',
'2010-07-21',
'2010-07-22',
'2010-07-23',
'2010-07-26',
'2010-07-27',
'2010-07-28',
'2010-07-29',
'2010-07-30',
'2010-08-02',
'2010-08-03',
'2010-08-04',
'2010-08-05',
'2010-08-06',
'2010-08-09',
'2010-08-10',
'2010-08-11',
'2010-08-12',
'2010-08-13',
'2010-08-16',
'2010-08-17',
'2010-08-18',
'2010-08-19',
'2010-08-20',
'2010-08-23',
'2010-08-24',
'2010-08-25',
'2010-08-26',
'2010-08-27',
'2010-08-30',
'2010-08-31',
'2010-09-01',
'2010-09-02',
'2010-09-03',
'2010-09-06',
'2010-09-07',
'2010-09-08',
'2010-09-09',
'2010-09-10',
'2010-09-13',
'2010-09-14',
'2010-09-15',
'2010-09-16',
'2010-09-17',
'2010-09-20',
'2010-09-21',
'2010-09-27',
'2010-09-28',
'2010-09-29',
'2010-09-30',
'2010-10-08',
'2010-10-11',
'2010-10-12',
'2010-10-13',
'2010-10-14',
'2010-10-15',
'2010-10-18',
'2010-10-19',
'2010-10-20',
'2010-10-21',
'2010-10-22',
'2010-10-25',
'2010-10-26',
'2010-10-27',
'2010-10-28',
'2010-10-29',
'2010-11-01',
'2010-11-02',
'2010-11-03',
'2010-11-04',
'2010-11-05',
'2010-11-08',
'2010-11-09',
'2010-11-10',
'2010-11-11',
'2010-11-12',
'2010-11-15',
'2010-11-16',
'2010-11-17',
'2010-11-18',
'2010-11-19',
'2010-11-22',
'2010-11-23',
'2010-11-24',
'2010-11-25',
'2010-11-26',
'2010-11-29',
'2010-11-30',
'2010-12-01',
'2010-12-02',
'2010-12-03',
'2010-12-06',
'2010-12-07',
'2010-12-08',
'2010-12-09',
'2010-12-10',
'2010-12-13',
'2010-12-14',
'2010-12-15',
'2010-12-16',
'2010-12-17',
'2010-12-20',
'2010-12-21',
'2010-12-22',
'2010-12-23',
'2010-12-24',
'2010-12-27',
'2010-12-28',
'2010-12-29',
'2010-12-30',
'2010-12-31',
'2011-01-04',
'2011-01-05',
'2011-01-06',
'2011-01-07',
'2011-01-10',
'2011-01-11',
'2011-01-12',
'2011-01-13',
'2011-01-14',
'2011-01-17',
'2011-01-18',
'2011-01-19',
'2011-01-20',
'2011-01-21',
'2011-01-24',
'2011-01-25',
'2011-01-26',
'2011-01-27',
'2011-01-28',
'2011-01-31',
'2011-02-01',
'2011-02-09',
'2011-02-10',
'2011-02-11',
'2011-02-14',
'2011-02-15',
'2011-02-16',
'2011-02-17',
'2011-02-18',
'2011-02-21',
'2011-02-22',
'2011-02-23',
'2011-02-24',
'2011-02-25',
'2011-02-28',
'2011-03-01',
'2011-03-02',
'2011-03-03',
'2011-03-04',
'2011-03-07',
'2011-03-08',
'2011-03-09',
'2011-03-10',
'2011-03-11',
'2011-03-14',
'2011-03-15',
'2011-03-16',
'2011-03-17',
'2011-03-18',
'2011-03-21',
'2011-03-22',
'2011-03-23',
'2011-03-24',
'2011-03-25',
'2011-03-28',
'2011-03-29',
'2011-03-30',
'2011-03-31',
'2011-04-01',
'2011-04-06',
'2011-04-07',
'2011-04-08',
'2011-04-11',
'2011-04-12',
'2011-04-13',
'2011-04-14',
'2011-04-15',
'2011-04-18',
'2011-04-19',
'2011-04-20',
'2011-04-21',
'2011-04-22',
'2011-04-25',
'2011-04-26',
'2011-04-27',
'2011-04-28',
'2011-04-29',
'2011-05-03',
'2011-05-04',
'2011-05-05',
'2011-05-06',
'2011-05-09',
'2011-05-10',
'2011-05-11',
'2011-05-12',
'2011-05-13',
'2011-05-16',
'2011-05-17',
'2011-05-18',
'2011-05-19',
'2011-05-20',
'2011-05-23',
'2011-05-24',
'2011-05-25',
'2011-05-26',
'2011-05-27',
'2011-05-30',
'2011-05-31',
'2011-06-01',
'2011-06-02',
'2011-06-03',
'2011-06-07',
'2011-06-08',
'2011-06-09',
'2011-06-10',
'2011-06-13',
'2011-06-14',
'2011-06-15',
'2011-06-16',
'2011-06-17',
'2011-06-20',
'2011-06-21',
'2011-06-22',
'2011-06-23',
'2011-06-24',
'2011-06-27',
'2011-06-28',
'2011-06-29',
'2011-06-30',
'2011-07-01',
'2011-07-04',
'2011-07-05',
'2011-07-06',
'2011-07-07',
'2011-07-08',
'2011-07-11',
'2011-07-12',
'2011-07-13',
'2011-07-14',
'2011-07-15',
'2011-07-18',
'2011-07-19',
'2011-07-20',
'2011-07-21',
'2011-07-22',
'2011-07-25',
'2011-07-26',
'2011-07-27',
'2011-07-28',
'2011-07-29',
'2011-08-01',
'2011-08-02',
'2011-08-03',
'2011-08-04',
'2011-08-05',
'2011-08-08',
'2011-08-09',
'2011-08-10',
'2011-08-11',
'2011-08-12',
'2011-08-15',
'2011-08-16',
'2011-08-17',
'2011-08-18',
'2011-08-19',
'2011-08-22',
'2011-08-23',
'2011-08-24',
'2011-08-25',
'2011-08-26',
'2011-08-29',
'2011-08-30',
'2011-08-31',
'2011-09-01',
'2011-09-02',
'2011-09-05',
'2011-09-06',
'2011-09-07',
'2011-09-08',
'2011-09-09',
'2011-09-13',
'2011-09-14',
'2011-09-15',
'2011-09-16',
'2011-09-19',
'2011-09-20',
'2011-09-21',
'2011-09-22',
'2011-09-23',
'2011-09-26',
'2011-09-27',
'2011-09-28',
'2011-09-29',
'2011-09-30',
'2011-10-10',
'2011-10-11',
'2011-10-12',
'2011-10-13',
'2011-10-14',
'2011-10-17',
'2011-10-18',
'2011-10-19',
'2011-10-20',
'2011-10-21',
'2011-10-24',
'2011-10-25',
'2011-10-26',
'2011-10-27',
'2011-10-28',
'2011-10-31',
'2011-11-01',
'2011-11-02',
'2011-11-03',
'2011-11-04',
'2011-11-07',
'2011-11-08',
'2011-11-09',
'2011-11-10',
'2011-11-11',
'2011-11-14',
'2011-11-15',
'2011-11-16',
'2011-11-17',
'2011-11-18',
'2011-11-21',
'2011-11-22',
'2011-11-23',
'2011-11-24',
'2011-11-25',
'2011-11-28',
'2011-11-29',
'2011-11-30',
'2011-12-01',
'2011-12-02',
'2011-12-05',
'2011-12-06',
'2011-12-07',
'2011-12-08',
'2011-12-09',
'2011-12-12',
'2011-12-13',
'2011-12-14',
'2011-12-15',
'2011-12-16',
'2011-12-19',
'2011-12-20',
'2011-12-21',
'2011-12-22',
'2011-12-23',
'2011-12-26',
'2011-12-27',
'2011-12-28',
'2011-12-29',
'2011-12-30',
'2012-01-04',
'2012-01-05',
'2012-01-06',
'2012-01-09',
'2012-01-10',
'2012-01-11',
'2012-01-12',
'2012-01-13',
'2012-01-16',
'2012-01-17',
'2012-01-18',
'2012-01-19',
'2012-01-20',
'2012-01-30',
'2012-01-31',
'2012-02-01',
'2012-02-02',
'2012-02-03',
'2012-02-06',
'2012-02-07',
'2012-02-08',
'2012-02-09',
'2012-02-10',
'2012-02-13',
'2012-02-14',
'2012-02-15',
'2012-02-16',
'2012-02-17',
'2012-02-20',
'2012-02-21',
'2012-02-22',
'2012-02-23',
'2012-02-24',
'2012-02-27',
'2012-02-28',
'2012-02-29',
'2012-03-01',
'2012-03-02',
'2012-03-05',
'2012-03-06',
'2012-03-07',
'2012-03-08',
'2012-03-09',
'2012-03-12',
'2012-03-13',
'2012-03-14',
'2012-03-15',
'2012-03-16',
'2012-03-19',
'2012-03-20',
'2012-03-21',
'2012-03-22',
'2012-03-23',
'2012-03-26',
'2012-03-27',
'2012-03-28',
'2012-03-29',
'2012-03-30',
'2012-04-05',
'2012-04-06',
'2012-04-09',
'2012-04-10',
'2012-04-11',
'2012-04-12',
'2012-04-13',
'2012-04-16',
'2012-04-17',
'2012-04-18',
'2012-04-19',
'2012-04-20',
'2012-04-23',
'2012-04-24',
'2012-04-25',
'2012-04-26',
'2012-04-27',
'2012-05-02',
'2012-05-03',
'2012-05-04',
'2012-05-07',
'2012-05-08',
'2012-05-09',
'2012-05-10',
'2012-05-11',
'2012-05-14',
'2012-05-15',
'2012-05-16',
'2012-05-17',
'2012-05-18',
'2012-05-21',
'2012-05-22',
'2012-05-23',
'2012-05-24',
'2012-05-25',
'2012-05-28',
'2012-05-29',
'2012-05-30',
'2012-05-31',
'2012-06-01',
'2012-06-04',
'2012-06-05',
'2012-06-06',
'2012-06-07',
'2012-06-08',
'2012-06-11',
'2012-06-12',
'2012-06-13',
'2012-06-14',
'2012-06-15',
'2012-06-18',
'2012-06-19',
'2012-06-20',
'2012-06-21',
'2012-06-25',
'2012-06-26',
'2012-06-27',
'2012-06-28',
'2012-06-29',
'2012-07-02',
'2012-07-03',
'2012-07-04',
'2012-07-05',
'2012-07-06',
'2012-07-09',
'2012-07-10',
'2012-07-11',
'2012-07-12',
'2012-07-13',
'2012-07-16',
'2012-07-17',
'2012-07-18',
'2012-07-19',
'2012-07-20',
'2012-07-23',
'2012-07-24',
'2012-07-25',
'2012-07-26',
'2012-07-27',
'2012-07-30',
'2012-07-31',
'2012-08-01',
'2012-08-02',
'2012-08-03',
'2012-08-06',
'2012-08-07',
'2012-08-08',
'2012-08-09',
'2012-08-10',
'2012-08-13',
'2012-08-14',
'2012-08-15',
'2012-08-16',
'2012-08-17',
'2012-08-20',
'2012-08-21',
'2012-08-22',
'2012-08-23',
'2012-08-24',
'2012-08-27',
'2012-08-28',
'2012-08-29',
'2012-08-30',
'2012-08-31',
'2012-09-03',
'2012-09-04',
'2012-09-05',
'2012-09-06',
'2012-09-07',
'2012-09-10',
'2012-09-11',
'2012-09-12',
'2012-09-13',
'2012-09-14',
'2012-09-17',
'2012-09-18',
'2012-09-19',
'2012-09-20',
'2012-09-21',
'2012-09-24',
'2012-09-25',
'2012-09-26',
'2012-09-27',
'2012-09-28',
'2012-10-08',
'2012-10-09',
'2012-10-10',
'2012-10-11',
'2012-10-12',
'2012-10-15',
'2012-10-16',
'2012-10-17',
'2012-10-18',
'2012-10-19',
'2012-10-22',
'2012-10-23',
'2012-10-24',
'2012-10-25',
'2012-10-26',
'2012-10-29',
'2012-10-30',
'2012-10-31',
'2012-11-01',
'2012-11-02',
'2012-11-05',
'2012-11-06',
'2012-11-07',
'2012-11-08',
'2012-11-09',
'2012-11-12',
'2012-11-13',
'2012-11-14',
'2012-11-15',
'2012-11-16',
'2012-11-19',
'2012-11-20',
'2012-11-21',
'2012-11-22',
'2012-11-23',
'2012-11-26',
'2012-11-27',
'2012-11-28',
'2012-11-29',
'2012-11-30',
'2012-12-03',
'2012-12-04',
'2012-12-05',
'2012-12-06',
'2012-12-07',
'2012-12-10',
'2012-12-11',
'2012-12-12',
'2012-12-13',
'2012-12-14',
'2012-12-17',
'2012-12-18',
'2012-12-19',
'2012-12-20',
'2012-12-21',
'2012-12-24',
'2012-12-25',
'2012-12-26',
'2012-12-27',
'2012-12-28',
'2012-12-31',
'2013-01-04',
'2013-01-07',
'2013-01-08',
'2013-01-09',
'2013-01-10',
'2013-01-11',
'2013-01-14',
'2013-01-15',
'2013-01-16',
'2013-01-17',
'2013-01-18',
'2013-01-21',
'2013-01-22',
'2013-01-23',
'2013-01-24',
'2013-01-25',
'2013-01-28',
'2013-01-29',
'2013-01-30',
'2013-01-31',
'2013-02-01',
'2013-02-04',
'2013-02-05',
'2013-02-06',
'2013-02-07',
'2013-02-08',
'2013-02-18',
'2013-02-19',
'2013-02-20',
'2013-02-21',
'2013-02-22',
'2013-02-25',
'2013-02-26',
'2013-02-27',
'2013-02-28',
'2013-03-01',
'2013-03-04',
'2013-03-05',
'2013-03-06',
'2013-03-07',
'2013-03-08',
'2013-03-11',
'2013-03-12',
'2013-03-13',
'2013-03-14',
'2013-03-15',
'2013-03-18',
'2013-03-19',
'2013-03-20',
'2013-03-21',
'2013-03-22',
'2013-03-25',
'2013-03-26',
'2013-03-27',
'2013-03-28',
'2013-03-29',
'2013-04-01',
'2013-04-02',
'2013-04-03',
'2013-04-08',
'2013-04-09',
'2013-04-10',
'2013-04-11',
'2013-04-12',
'2013-04-15',
'2013-04-16',
'2013-04-17',
'2013-04-18',
'2013-04-19',
'2013-04-22',
'2013-04-23',
'2013-04-24',
'2013-04-25',
'2013-04-26',
'2013-05-02',
'2013-05-03',
'2013-05-06',
'2013-05-07',
'2013-05-08',
'2013-05-09',
'2013-05-10',
'2013-05-13',
'2013-05-14',
'2013-05-15',
'2013-05-16',
'2013-05-17',
'2013-05-20',
'2013-05-21',
'2013-05-22',
'2013-05-23',
'2013-05-24',
'2013-05-27',
'2013-05-28',
'2013-05-29',
'2013-05-30',
'2013-05-31',
'2013-06-03',
'2013-06-04',
'2013-06-05',
'2013-06-06',
'2013-06-07',
'2013-06-13',
'2013-06-14',
'2013-06-17',
'2013-06-18',
'2013-06-19',
'2013-06-20',
'2013-06-21',
'2013-06-24',
'2013-06-25',
'2013-06-26',
'2013-06-27',
'2013-06-28',
'2013-07-01',
'2013-07-02',
'2013-07-03',
'2013-07-04',
'2013-07-05',
'2013-07-08',
'2013-07-09',
'2013-07-10',
'2013-07-11',
'2013-07-12',
'2013-07-15',
'2013-07-16',
'2013-07-17',
'2013-07-18',
'2013-07-19',
'2013-07-22',
'2013-07-23',
'2013-07-24',
'2013-07-25',
'2013-07-26',
'2013-07-29',
'2013-07-30',
'2013-07-31',
'2013-08-01',
'2013-08-02',
'2013-08-05',
'2013-08-06',
'2013-08-07',
'2013-08-08',
'2013-08-09',
'2013-08-12',
'2013-08-13',
'2013-08-14',
'2013-08-15',
'2013-08-16',
'2013-08-19',
'2013-08-20',
'2013-08-21',
'2013-08-22',
'2013-08-23',
'2013-08-26',
'2013-08-27',
'2013-08-28',
'2013-08-29',
'2013-08-30',
'2013-09-02',
'2013-09-03',
'2013-09-04',
'2013-09-05',
'2013-09-06',
'2013-09-09',
'2013-09-10',
'2013-09-11',
'2013-09-12',
'2013-09-13',
'2013-09-16',
'2013-09-17',
'2013-09-18',
'2013-09-23',
'2013-09-24',
'2013-09-25',
'2013-09-26',
'2013-09-27',
'2013-09-30',
'2013-10-08',
'2013-10-09',
'2013-10-10',
'2013-10-11',
'2013-10-14',
'2013-10-15',
'2013-10-16',
'2013-10-17',
'2013-10-18',
'2013-10-21',
'2013-10-22',
'2013-10-23',
'2013-10-24',
'2013-10-25',
'2013-10-28',
'2013-10-29',
'2013-10-30',
'2013-10-31',
'2013-11-01',
'2013-11-04',
'2013-11-05',
'2013-11-06',
'2013-11-07',
'2013-11-08',
'2013-11-11',
'2013-11-12',
'2013-11-13',
'2013-11-14',
'2013-11-15',
'2013-11-18',
'2013-11-19',
'2013-11-20',
'2013-11-21',
'2013-11-22',
'2013-11-25',
'2013-11-26',
'2013-11-27',
'2013-11-28',
'2013-11-29',
'2013-12-02',
'2013-12-03',
'2013-12-04',
'2013-12-05',
'2013-12-06',
'2013-12-09',
'2013-12-10',
'2013-12-11',
'2013-12-12',
'2013-12-13',
'2013-12-16',
'2013-12-17',
'2013-12-18',
'2013-12-19',
'2013-12-20',
'2013-12-23',
'2013-12-24',
'2013-12-25',
'2013-12-26',
'2013-12-27',
'2013-12-30',
'2013-12-31',
'2014-01-02',
'2014-01-03',
'2014-01-06',
'2014-01-07',
'2014-01-08',
'2014-01-09',
'2014-01-10',
'2014-01-13',
'2014-01-14',
'2014-01-15',
'2014-01-16',
'2014-01-17',
'2014-01-20',
'2014-01-21',
'2014-01-22',
'2014-01-23',
'2014-01-24',
'2014-01-27',
'2014-01-28',
'2014-01-29',
'2014-01-30',
'2014-02-07',
'2014-02-10',
'2014-02-11',
'2014-02-12',
'2014-02-13',
'2014-02-14',
'2014-02-17',
'2014-02-18',
'2014-02-19',
'2014-02-20',
'2014-02-21',
'2014-02-24',
'2014-02-25',
'2014-02-26',
'2014-02-27',
'2014-02-28',
'2014-03-03',
'2014-03-04',
'2014-03-05',
'2014-03-06',
'2014-03-07',
'2014-03-10',
'2014-03-11',
'2014-03-12',
'2014-03-13',
'2014-03-14',
'2014-03-17',
'2014-03-18',
'2014-03-19',
'2014-03-20',
'2014-03-21',
'2014-03-24',
'2014-03-25',
'2014-03-26',
'2014-03-27',
'2014-03-28',
'2014-03-31',
'2014-04-01',
'2014-04-02',
'2014-04-03',
'2014-04-04',
'2014-04-08',
'2014-04-09',
'2014-04-10',
'2014-04-11',
'2014-04-14',
'2014-04-15',
'2014-04-16',
'2014-04-17',
'2014-04-18',
'2014-04-21',
'2014-04-22',
'2014-04-23',
'2014-04-24',
'2014-04-25',
'2014-04-28',
'2014-04-29',
'2014-04-30',
'2014-05-05',
'2014-05-06',
'2014-05-07',
'2014-05-08',
'2014-05-09',
'2014-05-12',
'2014-05-13',
'2014-05-14',
'2014-05-15',
'2014-05-16',
'2014-05-19',
'2014-05-20',
'2014-05-21',
'2014-05-22',
'2014-05-23',
'2014-05-26',
'2014-05-27',
'2014-05-28',
'2014-05-29',
'2014-05-30',
'2014-06-03',
'2014-06-04',
'2014-06-05',
'2014-06-06',
'2014-06-09',
'2014-06-10',
'2014-06-11',
'2014-06-12',
'2014-06-13',
'2014-06-16',
'2014-06-17',
'2014-06-18',
'2014-06-19',
'2014-06-20',
'2014-06-23',
'2014-06-24',
'2014-06-25',
'2014-06-26',
'2014-06-27',
'2014-06-30',
'2014-07-01',
'2014-07-02',
'2014-07-03',
'2014-07-04',
'2014-07-07',
'2014-07-08',
'2014-07-09',
'2014-07-10',
'2014-07-11',
'2014-07-14',
'2014-07-15',
'2014-07-16',
'2014-07-17',
'2014-07-18',
'2014-07-21',
'2014-07-22',
'2014-07-23',
'2014-07-24',
'2014-07-25',
'2014-07-28',
'2014-07-29',
'2014-07-30',
'2014-07-31',
'2014-08-01',
'2014-08-04',
'2014-08-05',
'2014-08-06',
'2014-08-07',
'2014-08-08',
'2014-08-11',
'2014-08-12',
'2014-08-13',
'2014-08-14',
'2014-08-15',
'2014-08-18',
'2014-08-19',
'2014-08-20',
'2014-08-21',
'2014-08-22',
'2014-08-25',
'2014-08-26',
'2014-08-27',
'2014-08-28',
'2014-08-29',
'2014-09-01',
'2014-09-02',
'2014-09-03',
'2014-09-04',
'2014-09-05',
'2014-09-09',
'2014-09-10',
'2014-09-11',
'2014-09-12',
'2014-09-15',
'2014-09-16',
'2014-09-17',
'2014-09-18',
'2014-09-19',
'2014-09-22',
'2014-09-23',
'2014-09-24',
'2014-09-25',
'2014-09-26',
'2014-09-29',
'2014-09-30',
'2014-10-08',
'2014-10-09',
'2014-10-10',
'2014-10-13',
'2014-10-14',
'2014-10-15',
'2014-10-16',
'2014-10-17',
'2014-10-20',
'2014-10-21',
'2014-10-22',
'2014-10-23',
'2014-10-24',
'2014-10-27',
'2014-10-28',
'2014-10-29',
'2014-10-30',
'2014-10-31',
'2014-11-03',
'2014-11-04',
'2014-11-05',
'2014-11-06',
'2014-11-07',
'2014-11-10',
'2014-11-11',
'2014-11-12',
'2014-11-13',
'2014-11-14',
'2014-11-17',
'2014-11-18',
'2014-11-19',
'2014-11-20',
'2014-11-21',
'2014-11-24',
'2014-11-25',
'2014-11-26',
'2014-11-27',
'2014-11-28',
'2014-12-01',
'2014-12-02',
'2014-12-03',
'2014-12-04',
'2014-12-05',
'2014-12-08',
'2014-12-09',
'2014-12-10',
'2014-12-11',
'2014-12-12',
'2014-12-15',
'2014-12-16',
'2014-12-17',
'2014-12-18',
'2014-12-19',
'2014-12-22',
'2014-12-23',
'2014-12-24',
'2014-12-25',
'2014-12-26',
'2014-12-29',
'2014-12-30',
'2014-12-31',
'2015-01-05',
'2015-01-06',
'2015-01-07',
'2015-01-08',
'2015-01-09',
'2015-01-12',
'2015-01-13',
'2015-01-14',
'2015-01-15',
'2015-01-16',
'2015-01-19',
'2015-01-20',
'2015-01-21',
'2015-01-22',
'2015-01-23',
'2015-01-26',
'2015-01-27',
'2015-01-28',
'2015-01-29',
'2015-01-30',
'2015-02-02',
'2015-02-03',
'2015-02-04',
'2015-02-05',
'2015-02-06',
'2015-02-09',
'2015-02-10',
'2015-02-11',
'2015-02-12',
'2015-02-13',
'2015-02-16',
'2015-02-17',
'2015-02-25',
'2015-02-26',
'2015-02-27',
'2015-03-02',
'2015-03-03',
'2015-03-04',
'2015-03-05',
'2015-03-06',
'2015-03-09',
'2015-03-10',
'2015-03-11',
'2015-03-12',
'2015-03-13',
'2015-03-16',
'2015-03-17',
'2015-03-18',
'2015-03-19',
'2015-03-20',
'2015-03-23',
'2015-03-24',
'2015-03-25',
'2015-03-26',
'2015-03-27',
'2015-03-30',
'2015-03-31',
'2015-04-01',
'2015-04-02',
'2015-04-03',
'2015-04-07',
'2015-04-08',
'2015-04-09',
'2015-04-10',
'2015-04-13',
'2015-04-14',
'2015-04-15',
'2015-04-16',
'2015-04-17',
'2015-04-20',
'2015-04-21',
'2015-04-22',
'2015-04-23',
'2015-04-24',
'2015-04-27',
'2015-04-28',
'2015-04-29',
'2015-04-30',
'2015-05-04',
'2015-05-05',
'2015-05-06',
'2015-05-07',
'2015-05-08',
'2015-05-11',
'2015-05-12',
'2015-05-13',
'2015-05-14',
'2015-05-15',
'2015-05-18',
'2015-05-19',
'2015-05-20',
'2015-05-21',
'2015-05-22',
'2015-05-25',
'2015-05-26',
'2015-05-27',
'2015-05-28',
'2015-05-29',
'2015-06-01',
'2015-06-02',
'2015-06-03',
'2015-06-04',
'2015-06-05',
'2015-06-08',
'2015-06-09',
'2015-06-10',
'2015-06-11',
'2015-06-12',
'2015-06-15',
'2015-06-16',
'2015-06-17',
'2015-06-18',
'2015-06-19',
'2015-06-23',
'2015-06-24',
'2015-06-25',
'2015-06-26',
'2015-06-29',
'2015-06-30',
'2015-07-01',
'2015-07-02',
'2015-07-03',
'2015-07-06',
'2015-07-07',
'2015-07-08',
'2015-07-09',
'2015-07-10',
'2015-07-13',
'2015-07-14',
'2015-07-15',
'2015-07-16',
'2015-07-17',
'2015-07-20',
'2015-07-21',
'2015-07-22',
'2015-07-23',
'2015-07-24',
'2015-07-27',
'2015-07-28',
'2015-07-29',
'2015-07-30',
'2015-07-31',
'2015-08-03',
'2015-08-04',
'2015-08-05',
'2015-08-06',
'2015-08-07',
'2015-08-10',
'2015-08-11',
'2015-08-12',
'2015-08-13',
'2015-08-14',
'2015-08-17',
'2015-08-18',
'2015-08-19',
'2015-08-20',
'2015-08-21',
'2015-08-24',
'2015-08-25',
'2015-08-26',
'2015-08-27',
'2015-08-28',
'2015-08-31',
'2015-09-01',
'2015-09-02',
'2015-09-07',
'2015-09-08',
'2015-09-09',
'2015-09-10',
'2015-09-11',
'2015-09-14',
'2015-09-15',
'2015-09-16',
'2015-09-17',
'2015-09-18',
'2015-09-21',
'2015-09-22',
'2015-09-23',
'2015-09-24',
'2015-09-25',
'2015-09-28',
'2015-09-29',
'2015-09-30',
'2015-10-08',
'2015-10-09',
'2015-10-12',
'2015-10-13',
'2015-10-14',
'2015-10-15',
'2015-10-16',
'2015-10-19',
'2015-10-20',
'2015-10-21',
'2015-10-22',
'2015-10-23',
'2015-10-26',
'2015-10-27',
'2015-10-28',
'2015-10-29',
'2015-10-30',
'2015-11-02',
'2015-11-03',
'2015-11-04',
'2015-11-05',
'2015-11-06',
'2015-11-09',
'2015-11-10',
'2015-11-11',
'2015-11-12',
'2015-11-13',
'2015-11-16',
'2015-11-17',
'2015-11-18',
'2015-11-19',
'2015-11-20',
'2015-11-23',
'2015-11-24',
'2015-11-25',
'2015-11-26',
'2015-11-27',
'2015-11-30',
'2015-12-01',
'2015-12-02',
'2015-12-03',
'2015-12-04',
'2015-12-07',
'2015-12-08',
'2015-12-09',
'2015-12-10',
'2015-12-11',
'2015-12-14',
'2015-12-15',
'2015-12-16',
'2015-12-17',
'2015-12-18',
'2015-12-21',
'2015-12-22',
'2015-12-23',
'2015-12-24',
'2015-12-25',
'2015-12-28',
'2015-12-29',
'2015-12-30',
'2015-12-31',
'2016-01-04',
'2016-01-05',
'2016-01-06',
'2016-01-07',
'2016-01-08',
'2016-01-11',
'2016-01-12',
'2016-01-13',
'2016-01-14',
'2016-01-15',
'2016-01-18',
'2016-01-19',
'2016-01-20',
'2016-01-21',
'2016-01-22',
'2016-01-25',
'2016-01-26',
'2016-01-27',
'2016-01-28',
'2016-01-29',
'2016-02-01',
'2016-02-02',
'2016-02-03',
'2016-02-04',
'2016-02-05',
'2016-02-15',
'2016-02-16',
'2016-02-17',
'2016-02-18',
'2016-02-19',
'2016-02-22',
'2016-02-23',
'2016-02-24',
'2016-02-25',
'2016-02-26',
'2016-02-29',
'2016-03-01',
'2016-03-02',
'2016-03-03',
'2016-03-04',
'2016-03-07',
'2016-03-08',
'2016-03-09',
'2016-03-10',
'2016-03-11',
'2016-03-14',
'2016-03-15',
'2016-03-16',
'2016-03-17',
'2016-03-18',
'2016-03-21',
'2016-03-22',
'2016-03-23',
'2016-03-24',
'2016-03-25',
'2016-03-28',
'2016-03-29',
'2016-03-30',
'2016-03-31',
'2016-04-01',
'2016-04-05',
'2016-04-06',
'2016-04-07',
'2016-04-08',
'2016-04-11',
'2016-04-12',
'2016-04-13',
'2016-04-14',
'2016-04-15',
'2016-04-18',
'2016-04-19',
'2016-04-20',
'2016-04-21',
'2016-04-22',
'2016-04-25',
'2016-04-26',
'2016-04-27',
'2016-04-28',
'2016-04-29',
'2016-05-03',
'2016-05-04',
'2016-05-05',
'2016-05-06',
'2016-05-09',
'2016-05-10',
'2016-05-11',
'2016-05-12',
'2016-05-13',
'2016-05-16',
'2016-05-17',
'2016-05-18',
'2016-05-19',
'2016-05-20',
'2016-05-23',
'2016-05-24',
'2016-05-25',
'2016-05-26',
'2016-05-27',
'2016-05-30',
'2016-05-31',
'2016-06-01',
'2016-06-02',
'2016-06-03',
'2016-06-06',
'2016-06-07',
'2016-06-08',
'2016-06-13',
'2016-06-14',
'2016-06-15',
'2016-06-16',
'2016-06-17',
'2016-06-20',
'2016-06-21',
'2016-06-22',
'2016-06-23',
'2016-06-24',
'2016-06-27',
'2016-06-28',
'2016-06-29',
'2016-06-30',
'2016-07-01',
'2016-07-04',
'2016-07-05',
'2016-07-06',
'2016-07-07',
'2016-07-08',
'2016-07-11',
'2016-07-12',
'2016-07-13',
'2016-07-14',
'2016-07-15',
'2016-07-18',
'2016-07-19',
'2016-07-20',
'2016-07-21',
'2016-07-22',
'2016-07-25',
'2016-07-26',
'2016-07-27',
'2016-07-28',
'2016-07-29',
'2016-08-01',
'2016-08-02',
'2016-08-03',
'2016-08-04',
'2016-08-05',
'2016-08-08',
'2016-08-09',
'2016-08-10',
'2016-08-11',
'2016-08-12',
'2016-08-15',
'2016-08-16',
'2016-08-17',
'2016-08-18',
'2016-08-19',
'2016-08-22',
'2016-08-23',
'2016-08-24',
'2016-08-25',
'2016-08-26',
'2016-08-29',
'2016-08-30',
'2016-08-31',
'2016-09-01',
'2016-09-02',
'2016-09-05',
'2016-09-06',
'2016-09-07',
'2016-09-08',
'2016-09-09',
'2016-09-12',
'2016-09-13',
'2016-09-14',
'2016-09-19',
'2016-09-20',
'2016-09-21',
'2016-09-22',
'2016-09-23',
'2016-09-26',
'2016-09-27',
'2016-09-28',
'2016-09-29',
'2016-09-30',
'2016-10-10',
'2016-10-11',
'2016-10-12',
'2016-10-13',
'2016-10-14',
'2016-10-17',
'2016-10-18',
'2016-10-19',
'2016-10-20',
'2016-10-21',
'2016-10-24',
'2016-10-25',
'2016-10-26',
'2016-10-27',
'2016-10-28',
'2016-10-31',
'2016-11-01',
'2016-11-02',
'2016-11-03',
'2016-11-04',
'2016-11-07',
'2016-11-08',
'2016-11-09',
'2016-11-10',
'2016-11-11',
'2016-11-14',
'2016-11-15',
'2016-11-16',
'2016-11-17',
'2016-11-18',
'2016-11-21',
'2016-11-22',
'2016-11-23',
'2016-11-24',
'2016-11-25',
'2016-11-28',
'2016-11-29',
'2016-11-30',
'2016-12-01',
'2016-12-02',
'2016-12-05',
'2016-12-06',
'2016-12-07',
'2016-12-08',
'2016-12-09',
'2016-12-12',
'2016-12-13',
'2016-12-14',
'2016-12-15',
'2016-12-16',
'2016-12-19',
'2016-12-20',
'2016-12-21',
'2016-12-22',
'2016-12-23',
'2016-12-26',
'2016-12-27',
'2016-12-28',
'2016-12-29',
'2016-12-30',
'2017-01-03',
'2017-01-04',
'2017-01-05',
'2017-01-06',
'2017-01-09',
'2017-01-10',
'2017-01-11',
'2017-01-12',
'2017-01-13',
'2017-01-16',
'2017-01-17',
'2017-01-18',
'2017-01-19',
'2017-01-20',
'2017-01-23',
'2017-01-24',
'2017-01-25',
'2017-01-26',
'2017-02-03',
'2017-02-06',
'2017-02-07',
'2017-02-08',
'2017-02-09',
'2017-02-10',
'2017-02-13',
'2017-02-14',
'2017-02-15',
'2017-02-16',
'2017-02-17',
'2017-02-20',
'2017-02-21',
'2017-02-22',
'2017-02-23',
'2017-02-24',
'2017-02-27',
'2017-02-28',
'2017-03-01',
'2017-03-02',
'2017-03-03',
'2017-03-06',
'2017-03-07',
'2017-03-08',
'2017-03-09',
'2017-03-10',
'2017-03-13',
'2017-03-14',
'2017-03-15',
'2017-03-16',
'2017-03-17',
'2017-03-20',
'2017-03-21',
'2017-03-22',
'2017-03-23',
'2017-03-24',
'2017-03-27',
'2017-03-28',
'2017-03-29',
'2017-03-30',
'2017-03-31',
'2017-04-05',
'2017-04-06',
'2017-04-07',
'2017-04-10',
'2017-04-11',
'2017-04-12',
'2017-04-13',
'2017-04-14',
'2017-04-17',
'2017-04-18',
'2017-04-19',
'2017-04-20',
'2017-04-21',
'2017-04-24',
'2017-04-25',
'2017-04-26',
'2017-04-27',
'2017-04-28',
'2017-05-02',
'2017-05-03',
'2017-05-04',
'2017-05-05',
'2017-05-08',
'2017-05-09',
'2017-05-10',
'2017-05-11',
'2017-05-12',
'2017-05-15',
'2017-05-16',
'2017-05-17',
'2017-05-18',
'2017-05-19',
'2017-05-22',
'2017-05-23',
'2017-05-24',
'2017-05-25',
'2017-05-26',
'2017-05-31',
'2017-06-01',
'2017-06-02',
'2017-06-05',
'2017-06-06',
'2017-06-07',
'2017-06-08',
'2017-06-09',
'2017-06-12',
'2017-06-13',
'2017-06-14',
'2017-06-15',
'2017-06-16',
'2017-06-19',
'2017-06-20',
'2017-06-21',
'2017-06-22',
'2017-06-23',
'2017-06-26',
'2017-06-27',
'2017-06-28',
'2017-06-29',
'2017-06-30',
'2017-07-03',
'2017-07-04',
'2017-07-05',
'2017-07-06',
'2017-07-07',
'2017-07-10',
'2017-07-11',
'2017-07-12',
'2017-07-13',
'2017-07-14',
'2017-07-17',
'2017-07-18',
'2017-07-19',
'2017-07-20',
'2017-07-21',
'2017-07-24',
'2017-07-25',
'2017-07-26',
'2017-07-27',
'2017-07-28',
'2017-07-31',
'2017-08-01',
'2017-08-02',
'2017-08-03',
'2017-08-04',
'2017-08-07',
'2017-08-08',
'2017-08-09',
'2017-08-10',
'2017-08-11',
'2017-08-14',
'2017-08-15',
'2017-08-16',
'2017-08-17',
'2017-08-18',
'2017-08-21',
'2017-08-22',
'2017-08-23',
'2017-08-24',
'2017-08-25',
'2017-08-28',
'2017-08-29',
'2017-08-30',
'2017-08-31',
'2017-09-01',
'2017-09-04',
'2017-09-05',
'2017-09-06',
'2017-09-07',
'2017-09-08',
'2017-09-11',
'2017-09-12',
'2017-09-13',
'2017-09-14',
'2017-09-15',
'2017-09-18',
'2017-09-19',
'2017-09-20',
'2017-09-21',
'2017-09-22',
'2017-09-25',
'2017-09-26',
'2017-09-27',
'2017-09-28',
'2017-09-29',
'2017-10-09',
'2017-10-10',
'2017-10-11',
'2017-10-12',
'2017-10-13',
'2017-10-16',
'2017-10-17',
'2017-10-18',
'2017-10-19',
'2017-10-20',
'2017-10-23',
'2017-10-24',
'2017-10-25',
'2017-10-26',
'2017-10-27',
'2017-10-30',
'2017-10-31',
'2017-11-01',
'2017-11-02',
'2017-11-03',
'2017-11-06',
'2017-11-07',
'2017-11-08',
'2017-11-09',
'2017-11-10',
'2017-11-13',
'2017-11-14',
'2017-11-15',
'2017-11-16',
'2017-11-17',
'2017-11-20',
'2017-11-21',
'2017-11-22',
'2017-11-23',
'2017-11-24',
'2017-11-27',
'2017-11-28',
'2017-11-29',
'2017-11-30',
'2017-12-01',
'2017-12-04',
'2017-12-05',
'2017-12-06',
'2017-12-07',
'2017-12-08',
'2017-12-11',
'2017-12-12',
'2017-12-13',
'2017-12-14',
'2017-12-15',
'2017-12-18',
'2017-12-19',
'2017-12-20',
'2017-12-21',
'2017-12-22',
'2017-12-25',
'2017-12-26',
'2017-12-27',
'2017-12-28',
'2017-12-29',
'2018-01-02',
'2018-01-03',
'2018-01-04',
'2018-01-05',
'2018-01-08',
'2018-01-09',
'2018-01-10',
'2018-01-11',
'2018-01-12',
'2018-01-15',
'2018-01-16',
'2018-01-17',
'2018-01-18',
'2018-01-19',
'2018-01-22',
'2018-01-23',
'2018-01-24',
'2018-01-25',
'2018-01-26',
'2018-01-29',
'2018-01-30',
'2018-01-31',
'2018-02-01',
'2018-02-02',
'2018-02-05',
'2018-02-06',
'2018-02-07',
'2018-02-08',
'2018-02-09',
'2018-02-12',
'2018-02-13',
'2018-02-14',
'2018-02-22',
'2018-02-23',
'2018-02-26',
'2018-02-27',
'2018-02-28',
'2018-03-01',
'2018-03-02',
'2018-03-05',
'2018-03-06',
'2018-03-07',
'2018-03-08',
'2018-03-09',
'2018-03-12',
'2018-03-13',
'2018-03-14',
'2018-03-15',
'2018-03-16',
'2018-03-19',
'2018-03-20',
'2018-03-21',
'2018-03-22',
'2018-03-23',
'2018-03-26',
'2018-03-27',
'2018-03-28',
'2018-03-29',
'2018-03-30',
'2018-04-02',
'2018-04-03',
'2018-04-04',
'2018-04-09',
'2018-04-10',
'2018-04-11',
'2018-04-12',
'2018-04-13',
'2018-04-16',
'2018-04-17',
'2018-04-18',
'2018-04-19',
'2018-04-20',
'2018-04-23',
'2018-04-24',
'2018-04-25',
'2018-04-26',
'2018-04-27',
'2018-05-02',
'2018-05-03',
'2018-05-04',
'2018-05-07',
'2018-05-08',
'2018-05-09',
'2018-05-10',
'2018-05-11',
'2018-05-14',
'2018-05-15',
'2018-05-16',
'2018-05-17',
'2018-05-18',
'2018-05-21',
'2018-05-22',
'2018-05-23',
'2018-05-24',
'2018-05-25',
'2018-05-28',
'2018-05-29',
'2018-05-30',
'2018-05-31',
'2018-06-01',
'2018-06-04',
'2018-06-05',
'2018-06-06',
'2018-06-07',
'2018-06-08',
'2018-06-11',
'2018-06-12',
'2018-06-13',
'2018-06-14',
'2018-06-15',
'2018-06-19',
'2018-06-20',
'2018-06-21',
'2018-06-22',
'2018-06-25',
'2018-06-26',
'2018-06-27',
'2018-06-28',
'2018-06-29',
'2018-07-02',
'2018-07-03',
'2018-07-04',
'2018-07-05',
'2018-07-06',
'2018-07-09',
'2018-07-10',
'2018-07-11',
'2018-07-12',
'2018-07-13',
'2018-07-16',
'2018-07-17',
'2018-07-18',
'2018-07-19',
'2018-07-20',
'2018-07-23',
'2018-07-24',
'2018-07-25',
'2018-07-26',
'2018-07-27',
'2018-07-30',
'2018-07-31',
'2018-08-01',
'2018-08-02',
'2018-08-03',
'2018-08-06',
'2018-08-07',
'2018-08-08',
'2018-08-09',
'2018-08-10',
'2018-08-13',
'2018-08-14',
'2018-08-15',
'2018-08-16',
'2018-08-17',
'2018-08-20',
'2018-08-21',
'2018-08-22',
'2018-08-23',
'2018-08-24',
'2018-08-27',
'2018-08-28',
'2018-08-29',
'2018-08-30',
'2018-08-31',
'2018-09-03',
'2018-09-04',
'2018-09-05',
'2018-09-06',
'2018-09-07',
'2018-09-10',
'2018-09-11',
'2018-09-12',
'2018-09-13',
'2018-09-14',
'2018-09-17',
'2018-09-18',
'2018-09-19',
'2018-09-20',
'2018-09-21',
'2018-09-25',
'2018-09-26',
'2018-09-27',
'2018-09-28',
'2018-10-08',
'2018-10-09',
'2018-10-10',
'2018-10-11',
'2018-10-12',
'2018-10-15',
'2018-10-16',
'2018-10-17',
'2018-10-18',
'2018-10-19',
'2018-10-22',
'2018-10-23',
'2018-10-24',
'2018-10-25',
'2018-10-26',
'2018-10-29',
'2018-10-30',
'2018-10-31',
'2018-11-01',
'2018-11-02',
'2018-11-05',
'2018-11-06',
'2018-11-07',
'2018-11-08',
'2018-11-09',
'2018-11-12',
'2018-11-13',
'2018-11-14',
'2018-11-15',
'2018-11-16',
'2018-11-19',
'2018-11-20',
'2018-11-21',
'2018-11-22',
'2018-11-23',
'2018-11-26',
'2018-11-27',
'2018-11-28',
'2018-11-29',
'2018-11-30',
'2018-12-03',
'2018-12-04',
'2018-12-05',
'2018-12-06',
'2018-12-07',
'2018-12-10',
'2018-12-11',
'2018-12-12',
'2018-12-13',
'2018-12-14',
'2018-12-17',
'2018-12-18',
'2018-12-19',
'2018-12-20',
'2018-12-21',
'2018-12-24',
'2018-12-25',
'2018-12-26',
'2018-12-27',
'2018-12-28',
'2019-01-02',
'2019-01-03',
'2019-01-04',
'2019-01-07',
'2019-01-08',
'2019-01-09',
'2019-01-10',
'2019-01-11',
'2019-01-14',
'2019-01-15',
'2019-01-16',
'2019-01-17',
'2019-01-18',
'2019-01-21',
'2019-01-22',
'2019-01-23',
'2019-01-24',
'2019-01-25',
'2019-01-28',
'2019-01-29',
'2019-01-30',
'2019-01-31',
'2019-02-01',
'2019-02-11',
'2019-02-12',
'2019-02-13',
'2019-02-14',
'2019-02-15',
'2019-02-18',
'2019-02-19',
'2019-02-20',
'2019-02-21',
'2019-02-22',
'2019-02-25',
'2019-02-26',
'2019-02-27',
'2019-02-28',
'2019-03-01',
'2019-03-04',
'2019-03-05',
'2019-03-06',
'2019-03-07',
'2019-03-08',
'2019-03-11',
'2019-03-12',
'2019-03-13',
'2019-03-14',
'2019-03-15',
'2019-03-18',
'2019-03-19',
'2019-03-20',
'2019-03-21',
'2019-03-22',
'2019-03-25',
'2019-03-26',
'2019-03-27',
'2019-03-28',
'2019-03-29',
'2019-04-01',
'2019-04-02',
'2019-04-03',
'2019-04-04',
'2019-04-08',
'2019-04-09',
'2019-04-10',
'2019-04-11',
'2019-04-12',
'2019-04-15',
'2019-04-16',
'2019-04-17',
'2019-04-18',
'2019-04-19',
'2019-04-22',
'2019-04-23',
'2019-04-24',
'2019-04-25',
'2019-04-26',
'2019-04-29',
'2019-04-30',
'2019-05-02',
'2019-05-03',
'2019-05-06',
'2019-05-07',
'2019-05-08',
'2019-05-09',
'2019-05-10',
'2019-05-13',
'2019-05-14',
'2019-05-15',
'2019-05-16',
'2019-05-17',
'2019-05-20',
'2019-05-21',
'2019-05-22',
'2019-05-23',
'2019-05-24',
'2019-05-27',
'2019-05-28',
'2019-05-29',
'2019-05-30',
'2019-05-31',
'2019-06-03',
'2019-06-04',
'2019-06-05',
'2019-06-06',
'2019-06-10',
'2019-06-11',
'2019-06-12',
'2019-06-13',
'2019-06-14',
'2019-06-17',
'2019-06-18',
'2019-06-19',
'2019-06-20',
'2019-06-21',
'2019-06-24',
'2019-06-25',
'2019-06-26',
'2019-06-27',
'2019-06-28',
'2019-07-01',
'2019-07-02',
'2019-07-03',
'2019-07-04',
'2019-07-05',
'2019-07-08',
'2019-07-09',
'2019-07-10',
'2019-07-11',
'2019-07-12',
'2019-07-15',
'2019-07-16',
'2019-07-17',
'2019-07-18',
'2019-07-19',
'2019-07-22',
'2019-07-23',
'2019-07-24',
'2019-07-25',
'2019-07-26',
'2019-07-29',
'2019-07-30',
'2019-07-31',
'2019-08-01',
'2019-08-02',
'2019-08-05',
'2019-08-06',
'2019-08-07',
'2019-08-08',
'2019-08-09',
'2019-08-12',
'2019-08-13',
'2019-08-14',
'2019-08-15',
'2019-08-16',
'2019-08-19',
'2019-08-20',
'2019-08-21',
'2019-08-22',
'2019-08-23',
'2019-08-26',
'2019-08-27',
'2019-08-28',
'2019-08-29',
'2019-08-30',
'2019-09-02',
'2019-09-03',
'2019-09-04',
'2019-09-05',
'2019-09-06',
'2019-09-09',
'2019-09-10',
'2019-09-11',
'2019-09-12',
'2019-09-16',
'2019-09-17',
'2019-09-18',
'2019-09-19',
'2019-09-20',
'2019-09-23',
'2019-09-24',
'2019-09-25',
'2019-09-26',
'2019-09-27',
'2019-09-30',
'2019-10-08',
'2019-10-09',
'2019-10-10',
'2019-10-11',
'2019-10-14',
'2019-10-15',
'2019-10-16',
'2019-10-17',
'2019-10-18',
'2019-10-21',
'2019-10-22',
'2019-10-23',
'2019-10-24',
'2019-10-25',
'2019-10-28',
'2019-10-29',
'2019-10-30',
'2019-10-31',
'2019-11-01',
'2019-11-04',
'2019-11-05',
'2019-11-06',
'2019-11-07',
'2019-11-08',
'2019-11-11',
'2019-11-12',
'2019-11-13',
'2019-11-14',
'2019-11-15',
'2019-11-18',
'2019-11-19',
'2019-11-20',
'2019-11-21',
'2019-11-22',
'2019-11-25',
'2019-11-26',
'2019-11-27',
'2019-11-28',
'2019-11-29',
'2019-12-02',
'2019-12-03',
'2019-12-04',
'2019-12-05',
'2019-12-06',
'2019-12-09',
'2019-12-10',
'2019-12-11',
'2019-12-12',
'2019-12-13',
'2019-12-16',
'2019-12-17',
'2019-12-18',
'2019-12-19',
'2019-12-20',
'2019-12-23',
'2019-12-24',
'2019-12-25',
'2019-12-26',
'2019-12-27',
'2019-12-30',
'2019-12-31',
'2020-01-02',
'2020-01-03',
'2020-01-06',
'2020-01-07',
'2020-01-08',
'2020-01-09',
'2020-01-10',
'2020-01-13',
'2020-01-14',
'2020-01-15',
'2020-01-16',
'2020-01-17',
'2020-01-20',
'2020-01-21',
'2020-01-22',
'2020-01-23',
'2020-02-03',
'2020-02-04',
'2020-02-05',
'2020-02-06',
'2020-02-07',
'2020-02-10',
'2020-02-11',
'2020-02-12',
'2020-02-13',
'2020-02-14',
'2020-02-17',
'2020-02-18',
'2020-02-19',
'2020-02-20',
'2020-02-21',
'2020-02-24',
'2020-02-25',
'2020-02-26',
'2020-02-27',
'2020-02-28',
'2020-03-02',
'2020-03-03',
'2020-03-04',
'2020-03-05',
'2020-03-06',
'2020-03-09',
'2020-03-10',
'2020-03-11',
'2020-03-12',
'2020-03-13',
'2020-03-16',
'2020-03-17',
'2020-03-18',
'2020-03-19',
'2020-03-20',
'2020-03-23',
'2020-03-24',
'2020-03-25',
'2020-03-26',
'2020-03-27',
'2020-03-30',
'2020-03-31',
'2020-04-01',
'2020-04-02',
'2020-04-03',
'2020-04-07',
'2020-04-08',
'2020-04-09',
'2020-04-10',
'2020-04-13',
'2020-04-14',
'2020-04-15',
'2020-04-16',
'2020-04-17',
'2020-04-20',
'2020-04-21',
'2020-04-22',
'2020-04-23',
'2020-04-24',
'2020-04-27',
'2020-04-28',
'2020-04-29',
'2020-04-30',
'2020-05-06',
'2020-05-07',
'2020-05-08',
'2020-05-11',
'2020-05-12',
'2020-05-13',
'2020-05-14',
'2020-05-15',
'2020-05-18',
'2020-05-19',
'2020-05-20',
'2020-05-21',
'2020-05-22',
'2020-05-25',
'2020-05-26',
'2020-05-27',
'2020-05-28',
'2020-05-29',
'2020-06-01',
'2020-06-02',
'2020-06-03',
'2020-06-04',
'2020-06-05',
'2020-06-08',
'2020-06-09',
'2020-06-10',
'2020-06-11',
'2020-06-12',
'2020-06-15',
'2020-06-16',
'2020-06-17',
'2020-06-18',
'2020-06-19',
'2020-06-22',
'2020-06-23',
'2020-06-24',
'2020-06-29',
'2020-06-30',
'2020-07-01',
'2020-07-02',
'2020-07-03',
'2020-07-06',
'2020-07-07',
'2020-07-08',
'2020-07-09',
'2020-07-10',
'2020-07-13',
'2020-07-14',
'2020-07-15',
'2020-07-16',
'2020-07-17',
'2020-07-20',
'2020-07-21',
'2020-07-22',
'2020-07-23',
'2020-07-24',
'2020-07-27',
'2020-07-28',
'2020-07-29',
'2020-07-30',
'2020-07-31',
'2020-08-03',
'2020-08-04',
'2020-08-05',
'2020-08-06',
'2020-08-07',
'2020-08-10',
'2020-08-11',
'2020-08-12',
'2020-08-13',
'2020-08-14',
'2020-08-17',
'2020-08-18',
'2020-08-19',
'2020-08-20',
'2020-08-21',
'2020-08-24',
'2020-08-25',
'2020-08-26',
'2020-08-27',
'2020-08-28',
'2020-08-31',
'2020-09-01',
'2020-09-02',
'2020-09-03',
'2020-09-04',
'2020-09-07',
'2020-09-08',
'2020-09-09',
'2020-09-10',
'2020-09-11',
'2020-09-14',
'2020-09-15',
'2020-09-16',
'2020-09-17',
'2020-09-18',
'2020-09-21',
'2020-09-22',
'2020-09-23',
'2020-09-24',
'2020-09-25',
'2020-09-28',
'2020-09-29',
'2020-09-30',
'2020-10-09',
'2020-10-12',
'2020-10-13',
'2020-10-14',
'2020-10-15',
'2020-10-16',
'2020-10-19',
'2020-10-20',
'2020-10-21',
'2020-10-22',
'2020-10-23',
'2020-10-26',
'2020-10-27',
'2020-10-28',
'2020-10-29',
'2020-10-30',
'2020-11-02',
'2020-11-03',
'2020-11-04',
'2020-11-05',
'2020-11-06',
'2020-11-09',
'2020-11-10',
'2020-11-11',
'2020-11-12',
'2020-11-13',
'2020-11-16',
'2020-11-17',
'2020-11-18',
'2020-11-19',
'2020-11-20',
'2020-11-23',
'2020-11-24',
'2020-11-25',
'2020-11-26',
'2020-11-27',
'2020-11-30',
'2020-12-01',
'2020-12-02',
'2020-12-03',
'2020-12-04',
'2020-12-07',
'2020-12-08',
'2020-12-09',
'2020-12-10',
'2020-12-11',
'2020-12-14',
'2020-12-15',
'2020-12-16',
'2020-12-17',
'2020-12-18',
'2020-12-21',
'2020-12-22',
'2020-12-23',
'2020-12-24',
'2020-12-25',
'2020-12-28',
'2020-12-29',
'2020-12-30',
'2020-12-31'
]
def QA_util_if_trade(day):
'''
'日期是否交易'
查询上面的 交易日 列表
:param day: 类型 str eg: 2018-11-11
:return: Boolean 类型
'''
if day in trade_date_sse:
return True
else:
return False
def QA_util_if_tradetime(
_time=datetime.datetime.now(),
market=MARKET_TYPE.STOCK_CN,
code=None
):
'时间是否交易'
_time = datetime.datetime.strptime(str(_time)[0:19], '%Y-%m-%d %H:%M:%S')
if market is MARKET_TYPE.STOCK_CN:
if QA_util_if_trade(str(_time.date())[0:10]):
if _time.hour in [10, 13, 14]:
return True
elif _time.hour in [
9
] and _time.minute >= 15: # 修改成9:15 加入 9:15-9:30的盘前竞价时间
return True
elif _time.hour in [11] and _time.minute <= 30:
return True
else:
return False
else:
return False
elif market is MARKET_TYPE.FUTURE_CN:
# 暂时用螺纹
if code[0:2] in ['rb', 'RB']:
if QA_util_if_trade(str(_time.date())[0:10]):
if _time.hour in [9, 10, 14, 21, 22]:
return True
elif _time.hour in [13] and _time.minute >= 30:
return True
elif _time.hour in [11] and _time.minute <= 30:
return True
else:
return False
else:
return False
def QA_util_get_next_day(date, n=1):
'''
得到下一个(n)交易日
:param date: 类型 str eg: 2018-11-11
:param n: 整形
:return: 字符串 str eg: 2018-11-12
'''
date = str(date)[0:10]
return QA_util_date_gap(date, n, 'gt')
def QA_util_get_last_day(date, n=1):
'''
得到上一个(n)交易日
:param date: 类型 str eg: 2018-11-11
:param n: 整形
:return: 字符串 str eg: 2018-11-10
'''
date = str(date)[0:10]
return QA_util_date_gap(date, n, 'lt')
def QA_util_get_last_datetime(datetime, day=1):
date = str(datetime)[0:10]
return '{} {}'.format(QA_util_date_gap(date, day, 'lt'), str(datetime)[11:])
def QA_util_get_next_datetime(datetime, day=1):
date = str(datetime)[0:10]
return '{} {}'.format(QA_util_date_gap(date, day, 'gt'), str(datetime)[11:])
def QA_util_get_real_date(date, trade_list=trade_date_sse, towards=-1):
"""
获取真实的交易日期,其中,第三个参数towards是表示向前/向后推
towards=1 日期向后迭代
towards=-1 日期向前迭代
@ yutiansut
"""
date = str(date)[0:10]
if towards == 1:
while date not in trade_list:
date = str(
datetime.datetime.strptime(str(date)[0:10],
'%Y-%m-%d') +
datetime.timedelta(days=1)
)[0:10]
else:
return str(date)[0:10]
elif towards == -1:
while date not in trade_list:
date = str(
datetime.datetime.strptime(str(date)[0:10],
'%Y-%m-%d') -
datetime.timedelta(days=1)
)[0:10]
else:
return str(date)[0:10]
def QA_util_get_real_datelist(start, end):
"""
取数据的真实区间,返回的时候用 start,end=QA_util_get_real_datelist
@yutiansut
2017/8/10
当start end中间没有交易日 返回None, None
@yutiansut/ 2017-12-19
"""
real_start = QA_util_get_real_date(start, trade_date_sse, 1)
real_end = QA_util_get_real_date(end, trade_date_sse, -1)
if trade_date_sse.index(real_start) > trade_date_sse.index(real_end):
return None, None
else:
return (real_start, real_end)
def QA_util_get_trade_range(start, end):
'给出交易具体时间'
start, end = QA_util_get_real_datelist(start, end)
if start is not None:
return trade_date_sse[trade_date_sse
.index(start):trade_date_sse.index(end) + 1:1]
else:
return None
def QA_util_get_trade_gap(start, end):
'返回start_day到end_day中间有多少个交易天 算首尾'
start, end = QA_util_get_real_datelist(start, end)
if start is not None:
return trade_date_sse.index(end) + 1 - trade_date_sse.index(start)
else:
return 0
def QA_util_date_gap(date, gap, methods):
'''
:param date: 字符串起始日 类型 str eg: 2018-11-11
:param gap: 整数 间隔多数个交易日
:param methods: gt大于 ,gte 大于等于, 小于lt ,小于等于lte , 等于===
:return: 字符串 eg:2000-01-01
'''
try:
if methods in ['>', 'gt']:
return trade_date_sse[trade_date_sse.index(date) + gap]
elif methods in ['>=', 'gte']:
return trade_date_sse[trade_date_sse.index(date) + gap - 1]
elif methods in ['<', 'lt']:
return trade_date_sse[trade_date_sse.index(date) - gap]
elif methods in ['<=', 'lte']:
return trade_date_sse[trade_date_sse.index(date) - gap + 1]
elif methods in ['==', '=', 'eq']:
return date
except:
return 'wrong date'
def QA_util_get_trade_datetime(dt=datetime.datetime.now()):
"""交易的真实日期
Returns:
[type] -- [description]
"""
#dt= datetime.datetime.now()
if QA_util_if_trade(str(dt.date())) and dt.time() < datetime.time(15, 0, 0):
return str(dt.date())
else:
return QA_util_get_real_date(str(dt.date()), trade_date_sse, 1)
def QA_util_get_order_datetime(dt):
"""委托的真实日期
Returns:
[type] -- [description]
"""
#dt= datetime.datetime.now()
dt = datetime.datetime.strptime(str(dt)[0:19], '%Y-%m-%d %H:%M:%S')
if QA_util_if_trade(str(dt.date())) and dt.time() < datetime.time(15, 0, 0):
return str(dt)
else:
# print('before')
# print(QA_util_date_gap(str(dt.date()),1,'lt'))
return '{} {}'.format(
QA_util_date_gap(str(dt.date()),
1,
'lt'),
dt.time()
)
def QA_util_future_to_tradedatetime(real_datetime):
"""输入是真实交易时间,返回按期货交易所规定的时间* 适用于tb/文华/博弈的转换
Arguments:
real_datetime {[type]} -- [description]
Returns:
[type] -- [description]
"""
if len(str(real_datetime)) >= 19:
dt = datetime.datetime.strptime(
str(real_datetime)[0:19],
'%Y-%m-%d %H:%M:%S'
)
return dt if dt.time(
) < datetime.time(21,
0) else QA_util_get_next_datetime(dt,
1)
elif len(str(real_datetime)) == 16:
dt = datetime.datetime.strptime(
str(real_datetime)[0:16],
'%Y-%m-%d %H:%M'
)
return dt if dt.time(
) < datetime.time(21,
0) else QA_util_get_next_datetime(dt,
1)
def QA_util_future_to_realdatetime(trade_datetime):
"""输入是交易所规定的时间,返回真实时间*适用于通达信的时间转换
Arguments:
trade_datetime {[type]} -- [description]
Returns:
[type] -- [description]
"""
if len(str(trade_datetime)) == 19:
dt = datetime.datetime.strptime(
str(trade_datetime)[0:19],
'%Y-%m-%d %H:%M:%S'
)
return dt if dt.time(
) < datetime.time(21,
0) else QA_util_get_last_datetime(dt,
1)
elif len(str(trade_datetime)) == 16:
dt = datetime.datetime.strptime(
str(trade_datetime)[0:16],
'%Y-%m-%d %H:%M'
)
return dt if dt.time(
) < datetime.time(21,
0) else QA_util_get_last_datetime(dt,
1)
| 18.466579 | 80 | 0.451922 |
f0156f170be155e4079edf22910bfa3a0bcb131c | 13,758 | py | Python | output_report.py | Lumpkins/output_report | f4b190c468765dcc058af13966b43f031122add6 | [
"MIT"
] | null | null | null | output_report.py | Lumpkins/output_report | f4b190c468765dcc058af13966b43f031122add6 | [
"MIT"
] | null | null | null | output_report.py | Lumpkins/output_report | f4b190c468765dcc058af13966b43f031122add6 | [
"MIT"
] | null | null | null | import sys
import os
import matplotlib.pyplot as plt
import docx
from docx.enum.text import WD_ALIGN_PARAGRAPH
from plot_params import ePlot_type
import pandas as pd
import pdb
class OutputReport():
def __init__(self,**kwargs):
self.loc=kwargs.get("loc",r"C:\test.docx")
self.debug_mode=kwargs.get("debug_mode",False)
self.file=docx.Document()
self.file.save(self.loc)
def add_plot(self,plot):#just takes a single plot() object
if not plot.generated:
plot.generate_plot()
plot.figure.savefig("temp.png",bbox_inches='tight')
for section in self.file.sections:
width=8.5-section.left_margin.inches-section.right_margin.inches
self.file.add_picture("temp.png",width=docx.shared.Inches(width))
os.remove("temp.png")
if self.debug_mode:
plot.figure.show()
def add_table(self,data,**kwargs):
merges = kwargs.get("merges", [])
if isinstance(data,pd.DataFrame):
data=[data.columns.values.tolist()]+ data.values.tolist()
table=self.file.add_table(rows=len(data),cols=len(data[0]))
for i,_ in enumerate(data):
for j,_ in enumerate(data[i]):
cell=table.cell(i,j)
cell.text=str(data[i][j])
for merge in merges:
a=table.cell(merge[0][0],merge[0][1])
b=table.cell(merge[1][0],merge[1][1])
A=a.merge(b)
table.style = 'LightShading-Accent1'
return table
def add_text(self,text,**kwargs):
the_type=kwargs.get("type","body")#body, title, header
default=WD_ALIGN_PARAGRAPH.LEFT
if the_type=="title":
default=WD_ALIGN_PARAGRAPH.CENTER
alignment=kwargs.get("align",default)
if the_type=="title":
print("adding title")
t=self.file.add_heading(text,level=0)
elif the_type=="heading":
t=self.file.add_heading(text,level=1)
else:#body
t=self.file.add_paragraph(text)
t.alignment=alignment
return t
def Save(self):
self.file.save(self.loc)
os.startfile(self.loc,"open")
def SaveAsPDF(self):
pass
class plot():
"""
Data : list (x and y), Series or Dataframe objects
kwargs:
start_date
end_date
title=""
ylabel=False, can be set to True to display labels associated with a Series object input or set to text directly
xlabel=False, can be set to True to display labels associated with a Series object input or set to text directly
width=1
sharex=False, share x axis with previous plot
sharey=False, share y axis with previous plot
new_graph, default of True, set to False if you want the current Data to be
plotted on the current subplot
new_line, default = True, set to False if you want the current addition to
be added horizontally on the report, ei for the graphs to be side by side
instead of below the previous graph
https://www.python-course.eu/matplotlib_multiple_figures.php
@author: jlumpkin
"""
def __init__(self,**kwargs):
'''
kwargs:
title
figsize
debug_mode
'''
plt.rcParams.update({'figure.max_open_warning': 0})
self._initialize_variables()
self.title=kwargs.get("title",False)
self.figsize=kwargs.get("figsize",None)#(8.5,11)
self.debug_mode=kwargs.get("debug_mode",False)
self.figure=None
self.generated=False
def generate_plot(self):
plt.ioff()
plt.style.use('seaborn')
#plt.style.use('default')
#plt.rcParams['figure.constrained_layout.use'] = True
self.nrow=self.current_row+self._height_save
prev_ax=None
tolerance = 10 # points
connected=False
figure, ax=plt.subplots(figsize=self.figsize)
self.figure=figure
gridsize = ( self.nrow,self.ncol)
plt.rcParams['axes.axisbelow'] = True
for plot in self.plots:
if plot["new_graph"]:
if plot["sharex"]:
prev_ax=plt.subplot2grid(gridsize,(plot["Row"]-1,plot["Col"]-1),rowspan=plot["height"],colspan=plot["width"],sharex=prev_ax)
plt.setp(prev_ax.get_xticklabels(), visible=False)
elif plot["sharey"]:
prev_ax=plt.subplot2grid(gridsize,(plot["Row"]-1,plot["Col"]-1),rowspan=plot["height"],colspan=plot["width"],sharey=prev_ax)
plt.setp(prev_ax.get_yticklabels(), visible=False)
else:
prev_ax=plt.subplot2grid(gridsize,(plot["Row"]-1,plot["Col"]-1),rowspan=plot["height"],colspan=plot["width"])
plt.grid(True, linewidth=1)
if plot["title"]:
prev_ax.set_title(plot["title"])
if plot["xlabel"]:
if isinstance(plot["xlabel"],str):
prev_ax.set_xlabel(plot["xlabel"])
else:
prev_ax.set_xlabel(plot["Data"].index.name)
if plot["ylabel"]:
if isinstance(plot["ylabel"],str):
prev_ax.set_ylabel(plot["ylabel"])
else:
prev_ax.set_ylabel(plot["Data"].index.name)
#pdb.set_trace()
if plot["Type"]==ePlot_type.line:
prev_ax.plot(plot["Data"], picker=tolerance,label=plot["name"],**plot["kwargs"])
elif plot["Type"]==ePlot_type.scatter:
prev_ax.scatter(plot["Data"].index,plot["Data"].values, picker=tolerance,label=plot["name"],**plot["kwargs"])
elif plot["Type"]==ePlot_type.histogram:
prev_ax.hist(plot["Data"],bins=plot["bin"], picker=tolerance,label=plot["name"],**plot["kwargs"])
elif plot["Type"]==ePlot_type.span:
for span in plot["Data"]:
prev_ax.axvspan(span[0],span[1],alpha=.5,**plot["kwargs"])
elif plot["Type"]==ePlot_type.bar:
prev_ax.bar(plot["Data"].index,plot["Data"].values,**plot["kwargs"])
if plot["show_legend"]:
plt.legend(loc='best')
if plot["func"] and not connected:
figure.func=plot["func"]
connected=True
if self.debug_mode:
prev_ax.set_facecolor("r")
prev_ax.axis("on")
#plt.tight_layout()
#plt.subplots_adjust(hspace=1)
if self.title:
figure.suptitle(self.title)
self._initialize_variables()
self.generated=True
def plot_dict(self,**kwargs):
plot={"Data":kwargs.get("Data",None),
"new_graph":kwargs.get("new_graph",True),
"Type":kwargs.get("Type",None),
"Row":self.current_row,
"Col":self.current_col,
"title":kwargs.get("title",False),
"name":kwargs.get("name",None),#if you pass in a list or ndarray, you can specify a name which will show up in a lengend
"width":kwargs.get("width",1),
"height":kwargs.get("height",1),
"sharex":kwargs.get("sharex",False),
"sharey":kwargs.get("sharey",False),
"xlabel":kwargs.get("xlabel",None),
"ylabel":kwargs.get("ylabel",None),
"show_legend":kwargs.get("show_legend",False),
"func":kwargs.get("func",None)}
keys=plot.keys()
for key in keys:#any kwarg that is not above get repackaged as kwargs in the plot_dict
if key in kwargs:del kwargs[key]
plot["kwargs"]=kwargs
return plot
def add_spans(self,Data,**kwargs):
#Data must be a list of tuples with (start_date, end_date)
self._adjust_plot_pos(**kwargs)
plot=self.plot_dict(Data=Data,Type=ePlot_type.span,**kwargs)
#plot["color"]=kwargs.get("color","green")
if plot["height"]>self._height_save:
self._height_save=plot["height"]
self.plots.append(plot)
def add_line_graph(self,*Data,**kwargs):
Data=self._santize_data(*Data,**kwargs)
self._adjust_plot_pos(**kwargs)
plot=self.plot_dict(Data=Data,Type=ePlot_type.line,**kwargs)
#plot["color"]=kwargs.get("color","red")
if plot["height"]>self._height_save:
self._height_save=plot["height"]
self.plots.append(plot)
def add_scatter_graph(self,*Data,**kwargs):
Data=self._santize_data(*Data,**kwargs)
self._adjust_plot_pos(**kwargs)
plot=self.plot_dict(Data=Data,Type=ePlot_type.scatter,**kwargs)
#plot["color"]=kwargs.get("color","red")
#plot["s"]=kwargs.get("s",3)
#plot["marker"]=kwargs.get("marker","o")#https://matplotlib.org/api/markers_api.html
if plot["height"]>self._height_save:
self._height_save=plot["height"]
self.plots.append(plot)
def add_bar_graph(self,*Data,**kwargs):
Data=self._santize_data(*Data,**kwargs)
self._adjust_plot_pos(**kwargs)
plot=self.plot_dict(Data=Data,Type=ePlot_type.bar,**kwargs)
#plot["color"]=kwargs.get("color","red")
if plot["height"]>self._height_save:
self._height_save=plot["height"]
self.plots.append(plot)
def add_histogram_graph(self,*Data,**kwargs):
Data=self._santize_data(*Data,**kwargs)
self._adjust_plot_pos(**kwargs)
kwargs["bin"]=kwargs.get("bin",6)#this keyword applies to histograms only
plot=self.plot_dict(Data=Data,Type=ePlot_type.histogram,**kwargs)
if plot["height"]>self._height_save:
self._height_save=plot["height"]
self.plots.append(plot)
def _initialize_variables(self):
self.nrow, self.ncol, self.current_row, self.current_col =1,1,1,1
self._height_save=1
self.plots=list()#list of dicts
self.first_plot=True
def _adjust_plot_pos(self,**kwargs):
if self.first_plot:
self.first_plot=False
self.nrow+=kwargs.get("height",1)-1
return
new_graph=kwargs.get("new_graph",True)
if new_graph:
new_line=kwargs.get("new_line",True)
if new_line:#if this addition is stacking vertically
self.current_col=1
self.current_row+=self._height_save
self.nrow+=self._height_save
self._height_save=1
else:#if this addition if stacking horizontally
self.current_col+=1
if self.current_col>self.ncol:
self.ncol+=1
def _santize_data(self,*Data,**kwargs):#insures data become a Series object
if len(Data)==1 :
if isinstance(Data[0],pd.Series) or isinstance(Data[0],pd.DataFrame):
return Data[0]
elif isinstance(Data[0],list):
ret=pd.Series(data=Data[0],index=list(range(1,len(Data[0])+1)))
ret.name=kwargs.get("name","")
ret.index.name=""
return ret
else:
raise Exception("Error, can only accept Series or Dataframe objects")
elif len(Data)==2 :
x=None
y=None
name=kwargs.get("name","")
index_name=""
if isinstance(Data[0],pd.Series):
x=Data[0].values
index_name=Data[0].name
if isinstance(Data[1],pd.Series):
y=Data[1].values
name=Data[1].name
if isinstance(Data[0],list):
x=Data[0]
elif isinstance(Data[0],range):
x=list(Data[0])
if isinstance(Data[1],list):
y=Data[1]
elif isinstance(Data[1],range):
x=list(Data[1])
if (isinstance(Data[0],pd.Series) or isinstance(Data[0],list)or isinstance(Data[0],range)) and (isinstance(Data[1],pd.Series) or isinstance(Data[1],list)or isinstance(Data[1],range)):
ret=pd.Series(data=y,index=x)
ret.name=name
ret.index.name=index_name
return ret
else:
raise Exception("Error, when two arguments are passed in, they must both be Series, list, or range objects")
else:
raise Exception("Error, can only accept 1 or 2 arguments")
def _get_index(self,row,col,width):
if width==1:
return (row-1)*self.ncol+col
else:
return((row-1)*self.ncol+col,(row-1)*self.ncol+col+width-1)
| 30.505543 | 195 | 0.533799 |
cc21eeee315ae0036bbef771643a4e54b8891dff | 7,818 | py | Python | core/manager/mysqlManager.py | umknow/python | 6fc4ad43bf0886c49b807c203ca485a15056c97c | [
"BSD-2-Clause"
] | null | null | null | core/manager/mysqlManager.py | umknow/python | 6fc4ad43bf0886c49b807c203ca485a15056c97c | [
"BSD-2-Clause"
] | null | null | null | core/manager/mysqlManager.py | umknow/python | 6fc4ad43bf0886c49b807c203ca485a15056c97c | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# @Date: 2019-04-25
# @Author:zoubiao
import decimal
from collections import OrderedDict
import pymysql
import sqlalchemy
from DBUtils.PooledDB import PooledDB
from sqlalchemy import create_engine, and_
from sqlalchemy.orm import scoped_session, sessionmaker
from contextlib import contextmanager
from quotations.conf import config
import pandas as pd
class MysqlPool:
'''
数据库连接池的单例模式
'''
__intance = {} ## 单例使用
__pool = {} ## 区分不同的连接池
def __new__(cls, db_type):
if db_type not in cls.__intance:
cls.__intance[db_type] = super(MysqlPool, cls).__new__(cls)
return cls.__intance[db_type]
def __init__(self, db_type):
if db_type not in self.__pool:
MysqlPool.init_pool(db_type)
self.pool = self.__pool[db_type]
@staticmethod
def init_pool(db_type):
conf = config('mysql').get(db_type)##从配置中获取到数据库信息
MysqlPool.__pool[db_type] = PooledDB(**conf)
class ORMBase:
'''
数据库orm的单例模式
'''
__intance = {}
__engine_obj = {}
def __new__(cls, db_type):
if db_type not in cls.__intance:
cls.__intance[db_type] = super(ORMBase, cls).__new__(cls)
return cls.__intance[db_type]
def __init__(self, db_type):
if db_type not in self.__engine_obj:
ORMBase.init_engine(db_type)
self.engine = self.__engine_obj[db_type]
self.session = scoped_session(sessionmaker(bind=self.engine, autoflush=False, autocommit=False))
@staticmethod
def init_engine(db_type):
MYSQL_PATH = config('mysql').get(db_type, uri=True)
engine = create_engine(MYSQL_PATH, pool_recycle=10, pool_size=30, max_overflow=0, pool_timeout=60)
ORMBase.__engine_obj[db_type] = engine
class MysqlManager:
'''
mysql数据库封装,使用pooledDB库实现单例数据库连接池,以及SQLALCHAMY的orm实例。
##如果想直接通过sql获取到结果,使用read_sql方法,参数to_pandas默认为False,
##返回list结果,True代表返回pandas结果。
>>> sql = "SELECT * FROM `macd_daily_bfq` limit 1;"
>>> result_list = MysqlManager('quant').read_sql(sql)
>>> print(isinstance(result_list, list))
True
>>> result_pd = MysqlManager('quant').read_sql(sql, to_DataFrame=True)##to_DataFrame
>>> print(isinstance(result_pd, pd.DataFrame))
True
>>> with MysqlManager('quant') as session:
... result = session.fetchall(sql)
>>> print(isinstance(result_list, list))
True
>>> with MysqlManager('quant').Session as session:
... print(isinstance(session, sqlalchemy.orm.session.Session))
True
'''
__intance = {}
def __init__(self, db_type=None):
self.__pool = MysqlPool(db_type).pool##单例数据库连接池
self.__session = ORMBase(db_type).session
self.engine = ORMBase(db_type).engine
@property
def __init_conn(self):
self.__conn = self.__pool.connection() ##获取连接
self.__cursor = self.__conn.cursor(pymysql.cursors.DictCursor)
@property
@contextmanager
def Session(self):
session = self.__session()
try:
yield session
session.commit()
except Exception as e:
session.rollback()
raise e
finally:
session.close()
def execute_many(self, sql, params=None):
sql_list = sql.strip().split(';')
sql_list.remove('')
try:
self.__init_conn
for sql in sql_list:
self.__cursor.execute(sql, params)
self.__conn.commit()
return True
except Exception as e:
self.__conn.rollback()
raise e
def execute(self, sqls, params=None):
'''
等同mysql中的execute,执行sql语句.
:param sql:
:param params:u
:return:
'''
try:
self.__init_conn
self.__cursor.execute(sqls, params)
self.__conn.commit()
return True
except Exception as e:
self.__conn.rollback()
raise e
def read_sql(self, sql, to_DataFrame=False):
"""
执行查询sql,返回结果。
:param sql:
:param to_DataFrame:是否返回panda结构数据。
:return:
"""
return self.__read_main(sql=sql, to_DataFrame=to_DataFrame)
def read_safe_sql(self, sql, params, to_DataFrame=False):
"""
安全执行查询sql,添加params,防止sql注入
:param sql:
:param params:
:param to_DataFrame:
:return:
"""
return self.__read_main(sql, params, to_DataFrame)
def __read_main(self, sql, params=None, to_DataFrame=False):
"""
执行sql查询
:param sql:
:param params:
:param to_DataFrame:
:return:
"""
try:
result = self.fetchall(sql, params, to_DataFrame)
return result
except Exception as e:
print(e)
raise e
def __change_type(self, result):
for info_dict in result:
for k, v in info_dict.items():
if isinstance(v, decimal.Decimal):
info_dict[k] = float(v)
return result
def fetchall(self, sql, params=None, to_DataFrame=False):
'''
获取sql查询出的所有数据,默认转换为列表字典格式
:param sql:
:param params:
:return:
'''
try:
self.execute(sql, params)
result = self.__cursor.fetchall()
if result:
result = self.__change_type(result) ##替换decimal类型数据
if to_DataFrame:
# Create DataFrame Preserving Order of the columns: noqa
result_fix = list(map(lambda x: OrderedDict(x), result))
result = pd.DataFrame(list(result_fix))
return result
except Exception as e:
print('sql error %s' % str(e))
raise e
finally:
self.close()
def insert_many(self, sql, values=[]):
'''
批量插入,args为数据列表。
:param sql: insert into tablename (id,name) values (%s,%s)
:param values:[(1,'test'),(2, 'new')]
:return:
'''
try:
self.__init_conn
self.__cursor.executemany(sql, values)
self.__conn.commit()
except Exception as e:
self.__conn.rollback()
raise e
finally:
self.close()
def __enter__(self):
'''
上下文管理器中进入,则返回该对象
:return:
'''
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def close(self):
self.__cursor.close()
self.__conn.close()
if __name__ == '__main__':
sql = 'select code,date,MA5,MA10,MA20,MA60,resample_date,fact,close from ma_daily_bfq where code = %s and frequency=%s and id>=0 order by `date` desc limit %s '
params = ['000698.SZ', 'mothly', 60]
#
result = MysqlManager('quant')
data = result.read_safe_sql(sql, params=params)
print(data)
# import doctest
#
# doctest.testmod(verbose=True)
# @retry(tries=3, delay=0.5)
# def deal_sql(sql):
# try:
# print(sql)
# with MysqlManager('quant') as session:
# session.fetchall(sql)
# except Exception as e:
# print('----------------------------')
# raise e
# deal_sql(sql)
# import time
# start = time.time()
# from multiprocessing.pool import ThreadPool
#
# start = time.time()
# pool = ThreadPool(4)
#
# for i in range(10000):
# pool.apply_async(deal_sql, (i,))
# pool.close()
# pool.join()
# print(time.time()-start)
# for i in range(10000):
# with MysqlManager('python') as session:
# session.fetchall(sql)
# print(time.time() - start)
| 27.723404 | 164 | 0.578153 |
54b319b9ea76f3df6bf7324d4ec02fa2c9ebf55b | 2,734 | py | Python | oswin_tempest_plugin/tests/scenario/test_secure_boot.py | openstack/oswin-tempest-plugin | 59e6a14d01dda304c7d11fda1d35198f25799d6c | [
"Apache-2.0"
] | 6 | 2017-10-31T10:40:24.000Z | 2019-01-28T22:08:15.000Z | oswin_tempest_plugin/tests/scenario/test_secure_boot.py | openstack/oswin-tempest-plugin | 59e6a14d01dda304c7d11fda1d35198f25799d6c | [
"Apache-2.0"
] | null | null | null | oswin_tempest_plugin/tests/scenario/test_secure_boot.py | openstack/oswin-tempest-plugin | 59e6a14d01dda304c7d11fda1d35198f25799d6c | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 Cloudbase Solutions SRL
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oswin_tempest_plugin import config
from oswin_tempest_plugin.tests._mixins import optional_feature
from oswin_tempest_plugin.tests import test_base
CONF = config.CONF
class SecureBootTestCase(optional_feature._OptionalFeatureMixin,
test_base.TestBase):
"""Secure boot test suite.
This test suite will spawn instances requiring secure boot to be
enabled.
This test suite will require a Generation 2 VHDX image, with a
Linux guest OS (it tests connectivity via SSH).
The configured image must contain the following properties:
* os_type=linux
* hw_machine_type=hyperv-gen2
Hyper-V Secure Boot was first introduced in Windows / Hyper-V Server 2012
R2, but support for Linux guests was introduced in Windows / Hyper-V
Server 2016, which is why this test suite will require compute nodes
with the OS version 10.0 or newer.
"""
_MIN_HYPERV_VERSION = 10000
# NOTE(amuresan):Images supporting secure boot usually require more disk
# space. We're trying to use the largest of the configured
# flavors.
_FLAVOR_REF = CONF.compute.flavor_ref_alt
_IMAGE_REF = CONF.hyperv.secure_boot_image_ref
_IMAGE_SSH_USER = CONF.hyperv.secure_boot_image_ssh_user
_FEATURE_FLAVOR = {'extra_specs': {'os:secure_boot': 'required'}}
# TODO(amuresan): the secure_boot_image_ref should be reused in
# more than one test case so we don't have to add a different
# image for every test.
@classmethod
def skip_checks(cls):
super(SecureBootTestCase, cls).skip_checks()
# check if the needed image ref has been configured.
if not cls._IMAGE_REF:
msg = ('The config option "hyperv.secure_boot_image_ref" '
'has not been set. Skipping secure boot tests.')
raise cls.skipException(msg)
if not cls._IMAGE_SSH_USER:
msg = ('The config option "hyperv.secure_boot_image_ssh_user" '
'has not been set. Skipping.')
raise cls.skipException(msg)
| 38.507042 | 78 | 0.701902 |
02b098da092b8cd3d04cdaea8fbe63208ad18529 | 556 | py | Python | test_plus/runner.py | kadamsagar039/python-sonar | 3f73425c85d3a98bd4ce321e2f5fb33a36be1150 | [
"BSD-3-Clause"
] | 530 | 2015-05-23T18:25:39.000Z | 2022-03-20T14:30:10.000Z | test_plus/runner.py | kadamsagar039/python-sonar | 3f73425c85d3a98bd4ce321e2f5fb33a36be1150 | [
"BSD-3-Clause"
] | 144 | 2015-05-27T04:09:15.000Z | 2021-11-24T15:32:08.000Z | test_plus/runner.py | kadamsagar039/python-sonar | 3f73425c85d3a98bd4ce321e2f5fb33a36be1150 | [
"BSD-3-Clause"
] | 62 | 2015-05-27T02:47:19.000Z | 2022-02-11T21:01:36.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
from django.test.runner import DiscoverRunner as DefaultRunner
class NoLoggingRunner(DefaultRunner):
def run_tests(self, test_labels, extra_tests=None, **kwargs):
# Disable logging below CRITICAL while running the tests
logging.disable(logging.CRITICAL)
return super(NoLoggingRunner, self).run_tests(test_labels,
extra_tests,
**kwargs)
| 32.705882 | 66 | 0.607914 |
4cf73f99fd48da88c83e8f5affe9c2c1e159c370 | 140,794 | py | Python | tensorflow/python/ops/array_ops.py | yxd886/tensorflow | 2eebcc63a6cd3aad483bf7c1cb25df2b8780ef67 | [
"Apache-2.0"
] | 1 | 2019-03-28T19:21:24.000Z | 2019-03-28T19:21:24.000Z | tensorflow/python/ops/array_ops.py | yxd886/tensorflow | 2eebcc63a6cd3aad483bf7c1cb25df2b8780ef67 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/ops/array_ops.py | yxd886/tensorflow | 2eebcc63a6cd3aad483bf7c1cb25df2b8780ef67 | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Tests for this file live in python/kernel_tests/array_ops_test.py
"""Support for manipulating tensors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import numpy as np
import six
from tensorflow.python.eager import context
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
# 'Constant' gets imported in the module 'array_ops'.
from tensorflow.python.framework.constant_op import constant
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_math_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_array_ops import *
from tensorflow.python.ops.gen_array_ops import reverse_v2 as reverse # pylint: disable=unused-import
from tensorflow.python.util import deprecation
from tensorflow.python.util import dispatch
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
# pylint: enable=wildcard-import
# Used for slicing to specify a new 1 size dimension
newaxis = None
tf_export("newaxis").export_constant(__name__, "newaxis")
# We override the 'slice' for the "slice" op, so we keep python's
# existing 'slice' for later use in this module.
_BaseSlice = slice
@tf_export("identity")
@dispatch.add_dispatch_support
def identity(input, name=None): # pylint: disable=redefined-builtin
r"""Return a tensor with the same shape and contents as input.
Args:
input: A `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
if context.executing_eagerly() and not hasattr(input, "graph"):
input = ops.convert_to_tensor(input)
in_device = input.backing_device
# TODO(ashankar): Does 'identity' need to invoke execution callbacks?
context_device = context.context().device_name
if not context_device:
context_device = "/job:localhost/replica:0/task:0/device:CPU:0"
if context_device == in_device:
return input
else:
copied = input._copy() # pylint: disable=protected-access
if hasattr(copied, "_handle_data"):
copied._handle_data = input._handle_data # pylint: disable=protected-access
return copied
else:
ret = gen_array_ops.identity(input, name=name)
# Propagate handle data for happier shape inference for resource variables.
if hasattr(input, "_handle_data"):
ret._handle_data = input._handle_data # pylint: disable=protected-access
return ret
# pylint: disable=redefined-builtin,protected-access
@tf_export(v1=["expand_dims"])
@dispatch.add_dispatch_support
@deprecation.deprecated_args(None, "Use the `axis` argument instead", "dim")
def expand_dims(input, axis=None, name=None, dim=None):
"""Inserts a dimension of 1 into a tensor's shape.
Given a tensor `input`, this operation inserts a dimension of 1 at the
dimension index `axis` of `input`'s shape. The dimension index `axis` starts
at zero; if you specify a negative number for `axis` it is counted backward
from the end.
This operation is useful if you want to add a batch dimension to a single
element. For example, if you have a single image of shape `[height, width,
channels]`, you can make it a batch of 1 image with `expand_dims(image, 0)`,
which will make the shape `[1, height, width, channels]`.
Other examples:
```python
# 't' is a tensor of shape [2]
tf.shape(tf.expand_dims(t, 0)) # [1, 2]
tf.shape(tf.expand_dims(t, 1)) # [2, 1]
tf.shape(tf.expand_dims(t, -1)) # [2, 1]
# 't2' is a tensor of shape [2, 3, 5]
tf.shape(tf.expand_dims(t2, 0)) # [1, 2, 3, 5]
tf.shape(tf.expand_dims(t2, 2)) # [2, 3, 1, 5]
tf.shape(tf.expand_dims(t2, 3)) # [2, 3, 5, 1]
```
This operation requires that:
`-1-input.dims() <= dim <= input.dims()`
This operation is related to `squeeze()`, which removes dimensions of
size 1.
Args:
input: A `Tensor`.
axis: 0-D (scalar). Specifies the dimension index at which to
expand the shape of `input`. Must be in the range
`[-rank(input) - 1, rank(input)]`.
name: The name of the output `Tensor` (optional).
dim: 0-D (scalar). Equivalent to `axis`, to be deprecated.
Returns:
A `Tensor` with the same data as `input`, but its shape has an additional
dimension of size 1 added.
Raises:
ValueError: if either both or neither of `dim` and `axis` are specified.
"""
axis = deprecation.deprecated_argument_lookup("axis", axis, "dim", dim)
if axis is None:
raise ValueError("Must specify an axis argument to tf.expand_dims()")
return expand_dims_v2(input, axis, name)
@tf_export("expand_dims", v1=[])
@dispatch.add_dispatch_support
def expand_dims_v2(input, axis, name=None):
"""Inserts a dimension of 1 into a tensor's shape.
Given a tensor `input`, this operation inserts a dimension of 1 at the
dimension index `axis` of `input`'s shape. The dimension index `axis` starts
at zero; if you specify a negative number for `axis` it is counted backward
from the end.
This operation is useful if you want to add a batch dimension to a single
element. For example, if you have a single image of shape `[height, width,
channels]`, you can make it a batch of 1 image with `expand_dims(image, 0)`,
which will make the shape `[1, height, width, channels]`.
Other examples:
```python
# 't' is a tensor of shape [2]
tf.shape(tf.expand_dims(t, 0)) # [1, 2]
tf.shape(tf.expand_dims(t, 1)) # [2, 1]
tf.shape(tf.expand_dims(t, -1)) # [2, 1]
# 't2' is a tensor of shape [2, 3, 5]
tf.shape(tf.expand_dims(t2, 0)) # [1, 2, 3, 5]
tf.shape(tf.expand_dims(t2, 2)) # [2, 3, 1, 5]
tf.shape(tf.expand_dims(t2, 3)) # [2, 3, 5, 1]
```
This operation requires that:
`-1-input.dims() <= dim <= input.dims()`
This operation is related to `squeeze()`, which removes dimensions of
size 1.
Args:
input: A `Tensor`.
axis: 0-D (scalar). Specifies the dimension index at which to
expand the shape of `input`. Must be in the range
`[-rank(input) - 1, rank(input)]`.
name: The name of the output `Tensor` (optional).
Returns:
A `Tensor` with the same data as `input`, but its shape has an additional
dimension of size 1 added.
"""
return gen_array_ops.expand_dims(input, axis, name)
# pylint: enable=redefined-builtin,protected-access
# Aliases for some automatically-generated names.
# pylint: disable=protected-access
@deprecation.deprecated(
"2016-11-30",
"This op will be removed after the deprecation date. "
"Please switch to tf.setdiff1d().")
def listdiff(x, y, out_idx=None, name=None):
return gen_array_ops.list_diff(x, y, out_idx, name)
listdiff.__doc__ = gen_array_ops.list_diff.__doc__ + "\n" + listdiff.__doc__
# pylint: enable=protected-access
# pylint: disable=undefined-variable
@deprecation.deprecated(
"2018-11-30",
"This op will be removed after the deprecation date. "
"Please switch to tf.sets.difference().")
@tf_export(v1=["setdiff1d"])
def setdiff1d(x, y, index_dtype=dtypes.int32, name=None):
return gen_array_ops.list_diff(x, y, index_dtype, name)
setdiff1d.__doc__ = gen_array_ops.list_diff.__doc__
@tf_export("broadcast_dynamic_shape")
def broadcast_dynamic_shape(shape_x, shape_y):
"""Computes the shape of a broadcast given symbolic shapes.
When shape_x and shape_y are Tensors representing shapes (i.e. the result of
calling tf.shape on another Tensor) this computes a Tensor which is the shape
of the result of a broadcasting op applied in tensors of shapes shape_x and
shape_y.
For example, if shape_x is [1, 2, 3] and shape_y is [5, 1, 3], the result is a
Tensor whose value is [5, 2, 3].
This is useful when validating the result of a broadcasting operation when the
tensors do not have statically known shapes.
Args:
shape_x: A rank 1 integer `Tensor`, representing the shape of x.
shape_y: A rank 1 integer `Tensor`, representing the shape of y.
Returns:
A rank 1 integer `Tensor` representing the broadcasted shape.
"""
return gen_array_ops.broadcast_args(shape_x, shape_y)
@tf_export("broadcast_static_shape")
def broadcast_static_shape(shape_x, shape_y):
"""Computes the shape of a broadcast given known shapes.
When shape_x and shape_y are fully known TensorShapes this computes a
TensorShape which is the shape of the result of a broadcasting op applied in
tensors of shapes shape_x and shape_y.
For example, if shape_x is [1, 2, 3] and shape_y is [5, 1, 3], the result is a
TensorShape whose value is [5, 2, 3].
This is useful when validating the result of a broadcasting operation when the
tensors have statically known shapes.
Args:
shape_x: A `TensorShape`
shape_y: A `TensorShape`
Returns:
A `TensorShape` representing the broadcasted shape.
Raises:
ValueError: If the two shapes can not be broadcasted.
"""
return common_shapes.broadcast_shape(shape_x, shape_y)
@tf_export("shape", v1=[])
def shape_v2(input, out_type=dtypes.int32, name=None):
# pylint: disable=redefined-builtin
return shape(input, name, out_type)
@tf_export(v1=["shape"])
def shape(input, name=None, out_type=dtypes.int32):
# pylint: disable=redefined-builtin
"""Returns the shape of a tensor.
This operation returns a 1-D integer tensor representing the shape of `input`.
For example:
```python
t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]])
tf.shape(t) # [2, 2, 3]
```
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
out_type: (Optional) The specified output type of the operation
(`int32` or `int64`). Defaults to `tf.int32`.
Returns:
A `Tensor` of type `out_type`.
"""
return shape_internal(input, name, optimize=True, out_type=out_type)
def shape_internal(input, name=None, optimize=True, out_type=dtypes.int32):
# pylint: disable=redefined-builtin
"""Returns the shape of a tensor.
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
optimize: if true, encode the shape as a constant when possible.
out_type: (Optional) The specified output type of the operation
(`int32` or `int64`). Defaults to tf.int32.
Returns:
A `Tensor` of type `out_type`.
"""
with ops.name_scope(name, "Shape", [input]) as name:
if isinstance(input, (sparse_tensor.SparseTensor,
sparse_tensor.SparseTensorValue)):
return gen_math_ops.cast(input.dense_shape, out_type)
else:
if not context.executing_eagerly():
input_tensor = ops.convert_to_tensor(input)
input_shape = input_tensor.get_shape()
if optimize and input_shape.is_fully_defined():
return constant(input_shape.as_list(), out_type, name=name)
return gen_array_ops.shape(input, name=name, out_type=out_type)
@tf_export("shape_n")
def shape_n(input, out_type=dtypes.int32, name=None):
# pylint: disable=redefined-builtin
"""Returns shape of tensors.
Args:
input: A list of at least 1 `Tensor` object with the same type.
out_type: The specified output type of the operation
(`int32` or `int64`). Defaults to `tf.int32`(optional).
name: A name for the operation (optional).
Returns:
A list with the same length as `input` of `Tensor` objects with
type `out_type`.
"""
return gen_array_ops.shape_n(input, out_type=out_type, name=name)
@tf_export("size", v1=[])
@dispatch.add_dispatch_support
def size_v2(input, out_type=dtypes.int32, name=None):
# pylint: disable=redefined-builtin
return size(input, name, out_type)
@tf_export(v1=["size"])
@dispatch.add_dispatch_support
def size(input, name=None, out_type=dtypes.int32):
# pylint: disable=redefined-builtin
"""Returns the size of a tensor.
Returns a 0-D `Tensor` representing the number of elements in `input`
of type `out_type`. Defaults to tf.int32.
For example:
```python
t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]])
tf.size(t) # 12
```
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
out_type: (Optional) The specified non-quantized numeric output type
of the operation. Defaults to `tf.int32`.
Returns:
A `Tensor` of type `out_type`. Defaults to `tf.int32`.
@compatibility(numpy)
Equivalent to np.size()
@end_compatibility
"""
return size_internal(input, name, optimize=True, out_type=out_type)
def size_internal(input, name=None, optimize=True, out_type=dtypes.int32):
# pylint: disable=redefined-builtin,protected-access
"""Returns the size of a tensor.
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
optimize: if true, encode the size as a constant when possible.
out_type: (Optional) The specified non-quantized numeric output type
of the operation. Defaults to `tf.int32`.
Returns:
A `Tensor` of type `out_type`. Defaults to `tf.int32`.
"""
if context.executing_eagerly() and not isinstance(
input, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
input = ops.convert_to_tensor(input)
np_out_type = out_type.as_numpy_dtype
num_elements = np.prod(input._shape_tuple(), dtype=np_out_type) # pylint: disable=protected-access
return ops.convert_to_tensor(num_elements, dtype=out_type)
with ops.name_scope(name, "Size", [input]) as name:
if isinstance(input, (sparse_tensor.SparseTensor,
sparse_tensor.SparseTensorValue)):
return gen_math_ops.prod(
gen_math_ops.cast(input.dense_shape, out_type), 0, name=name)
else:
input_tensor = ops.convert_to_tensor(input)
input_shape = input_tensor.get_shape()
if optimize:
if input_shape.is_fully_defined():
return constant(input_shape.num_elements(), out_type, name=name)
if input_shape.dims and any(dim == 0 for dim in input_shape.dims):
return constant(0, out_type, name=name)
return gen_array_ops.size(input, name=name, out_type=out_type)
@tf_export("rank")
@dispatch.add_dispatch_support
def rank(input, name=None):
# pylint: disable=redefined-builtin
"""Returns the rank of a tensor.
Returns a 0-D `int32` `Tensor` representing the rank of `input`.
For example:
```python
# shape of tensor 't' is [2, 2, 3]
t = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]])
tf.rank(t) # 3
```
**Note**: The rank of a tensor is not the same as the rank of a matrix. The
rank of a tensor is the number of indices required to uniquely select each
element of the tensor. Rank is also known as "order", "degree", or "ndims."
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `int32`.
@compatibility(numpy)
Equivalent to np.ndim
@end_compatibility
"""
return rank_internal(input, name, optimize=True)
def rank_internal(input, name=None, optimize=True):
# pylint: disable=redefined-builtin
"""Returns the rank of a tensor.
Args:
input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
optimize: if true, encode the rank as a constant when possible.
Returns:
A `Tensor` of type `int32`.
"""
with ops.name_scope(name, "Rank", [input]) as name:
if isinstance(input, (sparse_tensor.SparseTensor,
sparse_tensor.SparseTensorValue)):
return gen_array_ops.size(input.dense_shape, name=name)
else:
input_tensor = ops.convert_to_tensor(input)
input_shape = input_tensor.get_shape()
if optimize and input_shape.ndims is not None:
return constant(input_shape.ndims, dtypes.int32, name=name)
return gen_array_ops.rank(input, name=name)
_SLICE_TYPE_ERROR = (
"Only integers, slices (`:`), ellipsis (`...`), "
"tf.newaxis (`None`) and scalar tf.int32/tf.int64 tensors are valid "
"indices")
_SUPPORTED_SLICE_DTYPES = (
dtypes.int32,
dtypes.int32_ref,
dtypes.int64,
dtypes.int64_ref
)
def _check_index(idx):
"""Check if a given value is a valid index into a tensor."""
if isinstance(idx, (six.integer_types, tensor_shape.Dimension)):
return
# Optimistic check. Assumptions:
# * any object with a dtype is supported
# * any object with a dtype has a sizeable shape attribute.
dtype = getattr(idx, "dtype", None)
if (dtype is None or
dtypes.as_dtype(dtype) not in _SUPPORTED_SLICE_DTYPES or
idx.shape and len(idx.shape) == 1):
# TODO(slebedev): IndexError seems more appropriate here, but it
# will break `_slice_helper` contract.
raise TypeError(_SLICE_TYPE_ERROR + ", got {!r}".format(idx))
def _slice_helper(tensor, slice_spec, var=None):
"""Overload for Tensor.__getitem__.
This operation extracts the specified region from the tensor.
The notation is similar to NumPy with the restriction that
currently only support basic indexing. That means that
using a non-scalar tensor as input is not currently allowed.
Some useful examples:
```python
# strip leading and trailing 2 elements
foo = tf.constant([1,2,3,4,5,6])
print(foo[2:-2].eval()) # => [3,4]
# skip every row and reverse every column
foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]])
print(foo[::2,::-1].eval()) # => [[3,2,1], [9,8,7]]
# Use scalar tensors as indices on both dimensions
print(foo[tf.constant(0), tf.constant(2)].eval()) # => 3
# Insert another dimension
foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]])
print(foo[tf.newaxis, :, :].eval()) # => [[[1,2,3], [4,5,6], [7,8,9]]]
print(foo[:, tf.newaxis, :].eval()) # => [[[1,2,3]], [[4,5,6]], [[7,8,9]]]
print(foo[:, :, tf.newaxis].eval()) # => [[[1],[2],[3]], [[4],[5],[6]],
[[7],[8],[9]]]
# Ellipses (3 equivalent operations)
foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]])
print(foo[tf.newaxis, :, :].eval()) # => [[[1,2,3], [4,5,6], [7,8,9]]]
print(foo[tf.newaxis, ...].eval()) # => [[[1,2,3], [4,5,6], [7,8,9]]]
print(foo[tf.newaxis].eval()) # => [[[1,2,3], [4,5,6], [7,8,9]]]
# masks
foo = tf.constant([[1,2,3], [4,5,6], [7,8,9]])
print(foo[foo > 2].eval()) # => [3, 4, 5, 6, 7, 8, 9]
```
Notes:
- `tf.newaxis` is `None` as in NumPy.
- An implicit ellipsis is placed at the end of the `slice_spec`
- NumPy advanced indexing is currently not supported.
Args:
tensor: An ops.Tensor object.
slice_spec: The arguments to Tensor.__getitem__.
var: In the case of variable slice assignment, the Variable
object to slice (i.e. tensor is the read-only view of this
variable).
Returns:
The appropriate slice of "tensor", based on "slice_spec".
Raises:
ValueError: If a slice range is negative size.
TypeError: If the slice indices aren't int, slice, ellipsis,
tf.newaxis or scalar int32/int64 tensors.
"""
if isinstance(slice_spec, bool) or \
(isinstance(slice_spec, ops.Tensor) and slice_spec.dtype == dtypes.bool) or \
(isinstance(slice_spec, np.ndarray) and slice_spec.dtype == bool):
return boolean_mask(tensor=tensor, mask=slice_spec)
if not isinstance(slice_spec, (list, tuple)):
slice_spec = [slice_spec]
begin, end, strides = [], [], []
index = 0
new_axis_mask, shrink_axis_mask = 0, 0
begin_mask, end_mask = 0, 0
ellipsis_mask = 0
for s in slice_spec:
if isinstance(s, _BaseSlice):
# python doesn't always use None when constructing ranges
# for example a[:] gives slice(None,sys.maxsize,None)
# whereas a[::1] gives slice(None,None,None)
if s.start is not None and s.start is not sys.maxsize:
_check_index(s.start)
begin.append(s.start)
else:
begin.append(0)
begin_mask |= (1 << index)
if s.stop is not None and s.stop != sys.maxsize:
_check_index(s.stop)
end.append(s.stop)
else:
end.append(0)
end_mask |= (1 << index)
if s.step is not None:
_check_index(s.step)
strides.append(s.step)
else:
strides.append(1)
elif s is Ellipsis:
begin.append(0)
end.append(0)
strides.append(1)
ellipsis_mask |= (1 << index)
elif s is newaxis:
begin.append(0)
end.append(0)
strides.append(1)
new_axis_mask |= (1 << index)
else:
_check_index(s)
begin.append(s)
end.append(s + 1)
strides.append(1)
shrink_axis_mask |= (1 << index)
index += 1
# stack possibly involves no tensors, so we must use op_scope correct graph.
with ops.name_scope(None, "strided_slice",
[tensor] + begin + end + strides) as name:
if begin:
packed_begin, packed_end, packed_strides = (stack(begin), stack(end),
stack(strides))
if (packed_begin.dtype == dtypes.int64 or
packed_end.dtype == dtypes.int64 or
packed_strides.dtype == dtypes.int64):
if packed_begin.dtype != dtypes.int64:
packed_begin = gen_math_ops.cast(packed_begin, dtypes.int64)
if packed_end.dtype != dtypes.int64:
packed_end = gen_math_ops.cast(packed_end, dtypes.int64)
if packed_strides.dtype != dtypes.int64:
packed_strides = gen_math_ops.cast(packed_strides, dtypes.int64)
else:
var_empty = constant([], dtype=dtypes.int32)
packed_begin = packed_end = packed_strides = var_empty
return strided_slice(
tensor,
packed_begin,
packed_end,
packed_strides,
begin_mask=begin_mask,
end_mask=end_mask,
shrink_axis_mask=shrink_axis_mask,
new_axis_mask=new_axis_mask,
ellipsis_mask=ellipsis_mask,
var=var,
name=name)
# pylint: disable=undefined-variable,protected-access,redefined-outer-name
@tf_export("slice")
def slice(input_, begin, size, name=None):
# pylint: disable=redefined-builtin
"""Extracts a slice from a tensor.
This operation extracts a slice of size `size` from a tensor `input` starting
at the location specified by `begin`. The slice `size` is represented as a
tensor shape, where `size[i]` is the number of elements of the 'i'th dimension
of `input` that you want to slice. The starting location (`begin`) for the
slice is represented as an offset in each dimension of `input`. In other
words, `begin[i]` is the offset into the 'i'th dimension of `input` that you
want to slice from.
Note that `tf.Tensor.__getitem__` is typically a more pythonic way to
perform slices, as it allows you to write `foo[3:7, :-2]` instead of
`tf.slice(foo, [3, 0], [4, foo.get_shape()[1]-2])`.
`begin` is zero-based; `size` is one-based. If `size[i]` is -1,
all remaining elements in dimension i are included in the
slice. In other words, this is equivalent to setting:
`size[i] = input.dim_size(i) - begin[i]`
This operation requires that:
`0 <= begin[i] <= begin[i] + size[i] <= Di for i in [0, n]`
For example:
```python
t = tf.constant([[[1, 1, 1], [2, 2, 2]],
[[3, 3, 3], [4, 4, 4]],
[[5, 5, 5], [6, 6, 6]]])
tf.slice(t, [1, 0, 0], [1, 1, 3]) # [[[3, 3, 3]]]
tf.slice(t, [1, 0, 0], [1, 2, 3]) # [[[3, 3, 3],
# [4, 4, 4]]]
tf.slice(t, [1, 0, 0], [2, 1, 3]) # [[[3, 3, 3]],
# [[5, 5, 5]]]
```
Args:
input_: A `Tensor`.
begin: An `int32` or `int64` `Tensor`.
size: An `int32` or `int64` `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` the same type as `input`.
"""
return gen_array_ops._slice(input_, begin, size, name=name)
# pylint: disable=invalid-name
@tf_export("strided_slice")
def strided_slice(input_,
begin,
end,
strides=None,
begin_mask=0,
end_mask=0,
ellipsis_mask=0,
new_axis_mask=0,
shrink_axis_mask=0,
var=None,
name=None):
"""Extracts a strided slice of a tensor (generalized python array indexing).
**Instead of calling this op directly most users will want to use the
NumPy-style slicing syntax (e.g. `tensor[..., 3:4:-1, tf.newaxis, 3]`), which
is supported via `tf.Tensor.__getitem__` and `tf.Variable.__getitem__`.**
The interface of this op is a low-level encoding of the slicing syntax.
Roughly speaking, this op extracts a slice of size `(end-begin)/stride`
from the given `input_` tensor. Starting at the location specified by `begin`
the slice continues by adding `stride` to the index until all dimensions are
not less than `end`.
Note that a stride can be negative, which causes a reverse slice.
Given a Python slice `input[spec0, spec1, ..., specn]`,
this function will be called as follows.
`begin`, `end`, and `strides` will be vectors of length n.
n in general is not equal to the rank of the `input_` tensor.
In each mask field (`begin_mask`, `end_mask`, `ellipsis_mask`,
`new_axis_mask`, `shrink_axis_mask`) the ith bit will correspond to
the ith spec.
If the ith bit of `begin_mask` is set, `begin[i]` is ignored and
the fullest possible range in that dimension is used instead.
`end_mask` works analogously, except with the end range.
`foo[5:,:,:3]` on a 7x8x9 tensor is equivalent to `foo[5:7,0:8,0:3]`.
`foo[::-1]` reverses a tensor with shape 8.
If the ith bit of `ellipsis_mask` is set, as many unspecified dimensions
as needed will be inserted between other dimensions. Only one
non-zero bit is allowed in `ellipsis_mask`.
For example `foo[3:5,...,4:5]` on a shape 10x3x3x10 tensor is
equivalent to `foo[3:5,:,:,4:5]` and
`foo[3:5,...]` is equivalent to `foo[3:5,:,:,:]`.
If the ith bit of `new_axis_mask` is set, then `begin`,
`end`, and `stride` are ignored and a new length 1 dimension is
added at this point in the output tensor.
For example,
`foo[:4, tf.newaxis, :2]` would produce a shape `(4, 1, 2)` tensor.
If the ith bit of `shrink_axis_mask` is set, it implies that the ith
specification shrinks the dimensionality by 1, taking on the value at index
`begin[i]`. `end[i]` and `strides[i]` are ignored in this case. For example in
Python one might do `foo[:, 3, :]` which would result in `shrink_axis_mask`
equal to 2.
NOTE: `begin` and `end` are zero-indexed.
`strides` entries must be non-zero.
```python
t = tf.constant([[[1, 1, 1], [2, 2, 2]],
[[3, 3, 3], [4, 4, 4]],
[[5, 5, 5], [6, 6, 6]]])
tf.strided_slice(t, [1, 0, 0], [2, 1, 3], [1, 1, 1]) # [[[3, 3, 3]]]
tf.strided_slice(t, [1, 0, 0], [2, 2, 3], [1, 1, 1]) # [[[3, 3, 3],
# [4, 4, 4]]]
tf.strided_slice(t, [1, -1, 0], [2, -3, 3], [1, -1, 1]) # [[[4, 4, 4],
# [3, 3, 3]]]
```
Args:
input_: A `Tensor`.
begin: An `int32` or `int64` `Tensor`.
end: An `int32` or `int64` `Tensor`.
strides: An `int32` or `int64` `Tensor`.
begin_mask: An `int32` mask.
end_mask: An `int32` mask.
ellipsis_mask: An `int32` mask.
new_axis_mask: An `int32` mask.
shrink_axis_mask: An `int32` mask.
var: The variable corresponding to `input_` or None
name: A name for the operation (optional).
Returns:
A `Tensor` the same type as `input`.
"""
if strides is None:
strides = ones_like(begin)
op = gen_array_ops.strided_slice(
input=input_,
begin=begin,
end=end,
strides=strides,
name=name,
begin_mask=begin_mask,
end_mask=end_mask,
ellipsis_mask=ellipsis_mask,
new_axis_mask=new_axis_mask,
shrink_axis_mask=shrink_axis_mask)
parent_name = name
if not (var is None and isinstance(op, ops.EagerTensor)):
def assign(val, name=None):
"""Closure that holds all the arguments to create an assignment."""
if var is None:
raise ValueError("Sliced assignment is only supported for variables")
if name is None:
name = parent_name + "_assign"
return var._strided_slice_assign(
begin=begin,
end=end,
strides=strides,
value=val,
name=name,
begin_mask=begin_mask,
end_mask=end_mask,
ellipsis_mask=ellipsis_mask,
new_axis_mask=new_axis_mask,
shrink_axis_mask=shrink_axis_mask)
op.assign = assign
return op
def _SliceHelperVar(var, slice_spec):
"""Creates a slice helper object given a variable.
This allows creating a sub-tensor from part of the current contents
of a variable. See `tf.Tensor.__getitem__` for detailed examples
of slicing.
This function in addition also allows assignment to a sliced range.
This is similar to `__setitem__` functionality in Python. However,
the syntax is different so that the user can capture the assignment
operation for grouping or passing to `sess.run()`.
For example,
```python
import tensorflow as tf
A = tf.Variable([[1,2,3], [4,5,6], [7,8,9]], dtype=tf.float32)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print(sess.run(A[:2, :2])) # => [[1,2], [4,5]]
op = A[:2,:2].assign(22. * tf.ones((2, 2)))
print(sess.run(op)) # => [[22, 22, 3], [22, 22, 6], [7,8,9]]
```
Note that assignments currently do not support NumPy broadcasting
semantics.
Args:
var: An `ops.Variable` object.
slice_spec: The arguments to `Tensor.__getitem__`.
Returns:
The appropriate slice of "tensor", based on "slice_spec".
As an operator. The operator also has a `assign()` method
that can be used to generate an assignment operator.
Raises:
ValueError: If a slice range is negative size.
TypeError: TypeError: If the slice indices aren't int, slice,
ellipsis, tf.newaxis or int32/int64 tensors.
"""
return _slice_helper(var.value(), slice_spec, var)
ops.Tensor._override_operator("__getitem__", _slice_helper)
@tf_export("parallel_stack")
def parallel_stack(values, name="parallel_stack"):
"""Stacks a list of rank-`R` tensors into one rank-`(R+1)` tensor in parallel.
Requires that the shape of inputs be known at graph construction time.
Packs the list of tensors in `values` into a tensor with rank one higher than
each tensor in `values`, by packing them along the first dimension.
Given a list of length `N` of tensors of shape `(A, B, C)`; the `output`
tensor will have the shape `(N, A, B, C)`.
For example:
```python
x = tf.constant([1, 4])
y = tf.constant([2, 5])
z = tf.constant([3, 6])
tf.parallel_stack([x, y, z]) # [[1, 4], [2, 5], [3, 6]]
```
The difference between `stack` and `parallel_stack` is that `stack` requires
all the inputs be computed before the operation will begin but doesn't require
that the input shapes be known during graph construction.
`parallel_stack` will copy pieces of the input into the output as they become
available, in some situations this can provide a performance benefit.
Unlike `stack`, `parallel_stack` does NOT support backpropagation.
This is the opposite of unstack. The numpy equivalent is
tf.parallel_stack([x, y, z]) = np.asarray([x, y, z])
Args:
values: A list of `Tensor` objects with the same shape and type.
name: A name for this operation (optional).
Returns:
output: A stacked `Tensor` with the same type as `values`.
"""
with ops.name_scope(name):
value_t = ops.convert_to_tensor(values[0])
value_shape = ops.convert_to_tensor(value_t).get_shape()
output_shape = tensor_shape.TensorShape([len(values)])
output_shape = output_shape.concatenate(value_shape)
# expand_dims converts concat to stack.
return gen_array_ops.parallel_concat(
[expand_dims(value, 0) for value in values], shape=output_shape)
@tf_export("stack")
@dispatch.add_dispatch_support
def stack(values, axis=0, name="stack"):
"""Stacks a list of rank-`R` tensors into one rank-`(R+1)` tensor.
Packs the list of tensors in `values` into a tensor with rank one higher than
each tensor in `values`, by packing them along the `axis` dimension.
Given a list of length `N` of tensors of shape `(A, B, C)`;
if `axis == 0` then the `output` tensor will have the shape `(N, A, B, C)`.
if `axis == 1` then the `output` tensor will have the shape `(A, N, B, C)`.
Etc.
For example:
```python
x = tf.constant([1, 4])
y = tf.constant([2, 5])
z = tf.constant([3, 6])
tf.stack([x, y, z]) # [[1, 4], [2, 5], [3, 6]] (Pack along first dim.)
tf.stack([x, y, z], axis=1) # [[1, 2, 3], [4, 5, 6]]
```
This is the opposite of unstack. The numpy equivalent is
```python
tf.stack([x, y, z]) = np.stack([x, y, z])
```
Args:
values: A list of `Tensor` objects with the same shape and type.
axis: An `int`. The axis to stack along. Defaults to the first dimension.
Negative values wrap around, so the valid range is `[-(R+1), R+1)`.
name: A name for this operation (optional).
Returns:
output: A stacked `Tensor` with the same type as `values`.
Raises:
ValueError: If `axis` is out of the range [-(R+1), R+1).
"""
if axis == 0:
try:
# If the input is a constant list, it can be converted to a constant op
return ops.convert_to_tensor(values, name=name)
except (TypeError, ValueError):
pass # Input list contains non-constant tensors
value_shape = ops.convert_to_tensor(values[0], name=name)._shape_tuple() # pylint: disable=protected-access
if value_shape is not None:
expanded_num_dims = len(value_shape) + 1
if axis < -expanded_num_dims or axis >= expanded_num_dims:
raise ValueError("axis = %d not in [%d, %d)" % (axis, -expanded_num_dims,
expanded_num_dims))
return gen_array_ops.pack(values, axis=axis, name=name)
# pylint: disable=invalid-name
def _autopacking_helper(list_or_tuple, dtype, name):
"""Converts the given list or tuple to a tensor by packing.
Args:
list_or_tuple: A (possibly nested) list or tuple containing a tensor.
dtype: The element type of the returned tensor.
name: A name for the returned tensor.
Returns:
A `tf.Tensor` with value equivalent to `list_or_tuple`.
"""
if context.executing_eagerly():
# NOTE: Fast path when all the items are tensors, this doesn't do any type
# checking.
if all(ops.is_dense_tensor_like(elem) for elem in list_or_tuple):
return gen_array_ops.pack(list_or_tuple, name=name)
must_pack = False
converted_elems = []
with ops.name_scope(name) as scope:
for i, elem in enumerate(list_or_tuple):
if ops.is_dense_tensor_like(elem):
if dtype is not None and elem.dtype.base_dtype != dtype:
raise TypeError("Cannot convert a list containing a tensor of dtype "
"%s to %s (Tensor is: %r)" % (elem.dtype, dtype,
elem))
converted_elems.append(elem)
must_pack = True
elif isinstance(elem, (list, tuple)):
converted_elem = _autopacking_helper(elem, dtype, str(i))
if ops.is_dense_tensor_like(converted_elem):
must_pack = True
converted_elems.append(converted_elem)
else:
converted_elems.append(elem)
if must_pack:
elems_as_tensors = []
for i, elem in enumerate(converted_elems):
if ops.is_dense_tensor_like(elem):
elems_as_tensors.append(elem)
else:
# NOTE(mrry): This is inefficient, but it enables us to
# handle the case where the list arguments are other
# convertible-to-tensor types, such as numpy arrays.
elems_as_tensors.append(
constant_op.constant(elem, dtype=dtype, name=str(i)))
return gen_array_ops.pack(elems_as_tensors, name=scope)
else:
return converted_elems
def _get_dtype_from_nested_lists(list_or_tuple):
"""Returns the dtype of any tensor-like object in `list_or_tuple`, if found.
Args:
list_or_tuple: A list or tuple representing an object that can be
converted to a `tf.Tensor`.
Returns:
The dtype of any tensor-like object in `list_or_tuple`, or `None` if no
such object exists.
"""
for elem in list_or_tuple:
if ops.is_dense_tensor_like(elem):
return elem.dtype.base_dtype
elif isinstance(elem, (list, tuple)):
maybe_dtype = _get_dtype_from_nested_lists(elem)
if maybe_dtype is not None:
return maybe_dtype
return None
def _cast_nested_seqs_to_dtype(dtype):
def _maybe_cast(elem):
if ops.is_dense_tensor_like(elem):
if dtype != elem.dtype.base_dtype:
elem = gen_math_ops.cast(elem, dtype)
return elem
return _maybe_cast
def _autopacking_conversion_function(v, dtype=None, name=None, as_ref=False):
"""Tensor conversion function that automatically packs arguments."""
if as_ref:
return NotImplemented
inferred_dtype = _get_dtype_from_nested_lists(v)
if inferred_dtype is None:
# We did not find any tensor-like objects in the nested lists, so defer to
# other conversion functions.
return NotImplemented
if dtype is None:
dtype = inferred_dtype
elif dtype != inferred_dtype:
v = nest.map_structure(_cast_nested_seqs_to_dtype(dtype), v)
return _autopacking_helper(v, dtype, name or "packed")
# pylint: enable=invalid-name
# NOTE: Register this conversion function to run *before* one that
# assumes every element is a value.
ops.register_tensor_conversion_function((list, tuple),
_autopacking_conversion_function, 99)
@tf_export("unstack")
def unstack(value, num=None, axis=0, name="unstack"):
"""Unpacks the given dimension of a rank-`R` tensor into rank-`(R-1)` tensors.
Unpacks `num` tensors from `value` by chipping it along the `axis` dimension.
If `num` is not specified (the default), it is inferred from `value`'s shape.
If `value.shape[axis]` is not known, `ValueError` is raised.
For example, given a tensor of shape `(A, B, C, D)`;
If `axis == 0` then the i'th tensor in `output` is the slice
`value[i, :, :, :]` and each tensor in `output` will have shape `(B, C, D)`.
(Note that the dimension unpacked along is gone, unlike `split`).
If `axis == 1` then the i'th tensor in `output` is the slice
`value[:, i, :, :]` and each tensor in `output` will have shape `(A, C, D)`.
Etc.
This is the opposite of stack.
Args:
value: A rank `R > 0` `Tensor` to be unstacked.
num: An `int`. The length of the dimension `axis`. Automatically inferred
if `None` (the default).
axis: An `int`. The axis to unstack along. Defaults to the first
dimension. Negative values wrap around, so the valid range is `[-R, R)`.
name: A name for the operation (optional).
Returns:
The list of `Tensor` objects unstacked from `value`.
Raises:
ValueError: If `num` is unspecified and cannot be inferred.
ValueError: If `axis` is out of the range [-R, R).
"""
if num is None:
value = ops.convert_to_tensor(value)
value_shape = value.get_shape()
if value_shape.ndims is not None:
if axis < -value_shape.ndims or axis >= value_shape.ndims:
raise ValueError("axis = %d not in [%d, %d)" %
(axis, -value_shape.ndims, value_shape.ndims))
num = value_shape.dims[axis].value
if num is None:
raise ValueError("Cannot infer num from shape %s" % value_shape)
return gen_array_ops.unpack(value, num=num, axis=axis, name=name)
@tf_export("concat")
@dispatch.add_dispatch_support
def concat(values, axis, name="concat"):
"""Concatenates tensors along one dimension.
Concatenates the list of tensors `values` along dimension `axis`. If
`values[i].shape = [D0, D1, ... Daxis(i), ...Dn]`, the concatenated
result has shape
[D0, D1, ... Raxis, ...Dn]
where
Raxis = sum(Daxis(i))
That is, the data from the input tensors is joined along the `axis`
dimension.
The number of dimensions of the input tensors must match, and all dimensions
except `axis` must be equal.
For example:
```python
t1 = [[1, 2, 3], [4, 5, 6]]
t2 = [[7, 8, 9], [10, 11, 12]]
tf.concat([t1, t2], 0) # [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]
tf.concat([t1, t2], 1) # [[1, 2, 3, 7, 8, 9], [4, 5, 6, 10, 11, 12]]
# tensor t3 with shape [2, 3]
# tensor t4 with shape [2, 3]
tf.shape(tf.concat([t3, t4], 0)) # [4, 3]
tf.shape(tf.concat([t3, t4], 1)) # [2, 6]
```
As in Python, the `axis` could also be negative numbers. Negative `axis`
are interpreted as counting from the end of the rank, i.e.,
`axis + rank(values)`-th dimension.
For example:
```python
t1 = [[[1, 2], [2, 3]], [[4, 4], [5, 3]]]
t2 = [[[7, 4], [8, 4]], [[2, 10], [15, 11]]]
tf.concat([t1, t2], -1)
```
would produce:
```python
[[[ 1, 2, 7, 4],
[ 2, 3, 8, 4]],
[[ 4, 4, 2, 10],
[ 5, 3, 15, 11]]]
```
Note: If you are concatenating along a new axis consider using stack.
E.g.
```python
tf.concat([tf.expand_dims(t, axis) for t in tensors], axis)
```
can be rewritten as
```python
tf.stack(tensors, axis=axis)
```
Args:
values: A list of `Tensor` objects or a single `Tensor`.
axis: 0-D `int32` `Tensor`. Dimension along which to concatenate. Must be
in the range `[-rank(values), rank(values))`. As in Python, indexing
for axis is 0-based. Positive axis in the rage of
`[0, rank(values))` refers to `axis`-th dimension. And negative axis
refers to `axis + rank(values)`-th dimension.
name: A name for the operation (optional).
Returns:
A `Tensor` resulting from concatenation of the input tensors.
"""
if not isinstance(values, (list, tuple)):
values = [values]
# TODO(mrry): Change to return values?
if len(values) == 1: # Degenerate case of one tensor.
# Make a throwaway call to convert_to_tensor to make sure
# that axis is of the correct type, and make sure that
# the returned tensor is a scalar.
# TODO(keveman): Implement a standalone type and shape checker.
with ops.name_scope(name) as scope:
ops.convert_to_tensor(
axis, name="concat_dim",
dtype=dtypes.int32).get_shape().assert_is_compatible_with(
tensor_shape.scalar())
return identity(values[0], name=scope)
return gen_array_ops.concat_v2(values=values, axis=axis, name=name)
@tf_export(v1=["boolean_mask"])
def boolean_mask(tensor, mask, name="boolean_mask", axis=None):
"""Apply boolean mask to tensor. Numpy equivalent is `tensor[mask]`.
```python
# 1-D example
tensor = [0, 1, 2, 3]
mask = np.array([True, False, True, False])
boolean_mask(tensor, mask) # [0, 2]
```
In general, `0 < dim(mask) = K <= dim(tensor)`, and `mask`'s shape must match
the first K dimensions of `tensor`'s shape. We then have:
`boolean_mask(tensor, mask)[i, j1,...,jd] = tensor[i1,...,iK,j1,...,jd]`
where `(i1,...,iK)` is the ith `True` entry of `mask` (row-major order).
The `axis` could be used with `mask` to indicate the axis to mask from.
In that case, `axis + dim(mask) <= dim(tensor)` and `mask`'s shape must match
the first `axis + dim(mask)` dimensions of `tensor`'s shape.
Args:
tensor: N-D tensor.
mask: K-D boolean tensor, K <= N and K must be known statically.
name: A name for this operation (optional).
axis: A 0-D int Tensor representing the axis in `tensor` to mask from.
By default, axis is 0 which will mask from the first dimension. Otherwise
K + axis <= N.
Returns:
(N-K+1)-dimensional tensor populated by entries in `tensor` corresponding
to `True` values in `mask`.
Raises:
ValueError: If shapes do not conform.
Examples:
```python
# 2-D example
tensor = [[1, 2], [3, 4], [5, 6]]
mask = np.array([True, False, True])
boolean_mask(tensor, mask) # [[1, 2], [5, 6]]
```
"""
def _apply_mask_1d(reshaped_tensor, mask, axis=None):
"""Mask tensor along dimension 0 with a 1-D mask."""
indices = squeeze(where(mask), axis=[1])
return gather(reshaped_tensor, indices, axis=axis)
with ops.name_scope(name, values=[tensor, mask]):
tensor = ops.convert_to_tensor(tensor, name="tensor")
mask = ops.convert_to_tensor(mask, name="mask")
shape_mask = mask.get_shape()
ndims_mask = shape_mask.ndims
shape_tensor = tensor.get_shape()
if ndims_mask == 0:
raise ValueError("mask cannot be scalar.")
if ndims_mask is None:
raise ValueError(
"Number of mask dimensions must be specified, even if some dimensions"
" are None. E.g. shape=[None] is ok, but shape=None is not.")
axis = 0 if axis is None else axis
shape_tensor[axis:axis + ndims_mask].assert_is_compatible_with(shape_mask)
leading_size = gen_math_ops.prod(shape(tensor)[axis:axis + ndims_mask], [0])
tensor = reshape(tensor,
concat([
shape(tensor)[:axis], [leading_size],
shape(tensor)[axis + ndims_mask:]
], 0))
first_dim = shape_tensor[axis:axis + ndims_mask].num_elements()
tensor.set_shape(
tensor_shape.as_shape(shape_tensor[:axis]).concatenate([first_dim])
.concatenate(shape_tensor[axis + ndims_mask:]))
mask = reshape(mask, [-1])
return _apply_mask_1d(tensor, mask, axis)
@tf_export("boolean_mask", v1=[])
@dispatch.add_dispatch_support
def boolean_mask_v2(tensor, mask, axis=None, name="boolean_mask"):
"""Apply boolean mask to tensor.
Numpy equivalent is `tensor[mask]`.
```python
# 1-D example
tensor = [0, 1, 2, 3]
mask = np.array([True, False, True, False])
boolean_mask(tensor, mask) # [0, 2]
```
In general, `0 < dim(mask) = K <= dim(tensor)`, and `mask`'s shape must match
the first K dimensions of `tensor`'s shape. We then have:
`boolean_mask(tensor, mask)[i, j1,...,jd] = tensor[i1,...,iK,j1,...,jd]`
where `(i1,...,iK)` is the ith `True` entry of `mask` (row-major order).
The `axis` could be used with `mask` to indicate the axis to mask from.
In that case, `axis + dim(mask) <= dim(tensor)` and `mask`'s shape must match
the first `axis + dim(mask)` dimensions of `tensor`'s shape.
Args:
tensor: N-D tensor.
mask: K-D boolean tensor, K <= N and K must be known statically.
axis: A 0-D int Tensor representing the axis in `tensor` to mask from. By
default, axis is 0 which will mask from the first dimension. Otherwise K +
axis <= N.
name: A name for this operation (optional).
Returns:
(N-K+1)-dimensional tensor populated by entries in `tensor` corresponding
to `True` values in `mask`.
Raises:
ValueError: If shapes do not conform.
Examples:
```python
# 2-D example
tensor = [[1, 2], [3, 4], [5, 6]]
mask = np.array([True, False, True])
boolean_mask(tensor, mask) # [[1, 2], [5, 6]]
```
"""
return boolean_mask(tensor, mask, name, axis)
@tf_export("sparse.mask", v1=["sparse.mask", "sparse_mask"])
@deprecation.deprecated_endpoints("sparse_mask")
def sparse_mask(a, mask_indices, name=None):
"""Masks elements of `IndexedSlices`.
Given an `IndexedSlices` instance `a`, returns another `IndexedSlices` that
contains a subset of the slices of `a`. Only the slices at indices not
specified in `mask_indices` are returned.
This is useful when you need to extract a subset of slices in an
`IndexedSlices` object.
For example:
```python
# `a` contains slices at indices [12, 26, 37, 45] from a large tensor
# with shape [1000, 10]
a.indices # [12, 26, 37, 45]
tf.shape(a.values) # [4, 10]
# `b` will be the subset of `a` slices at its second and third indices, so
# we want to mask its first and last indices (which are at absolute
# indices 12, 45)
b = tf.sparse.mask(a, [12, 45])
b.indices # [26, 37]
tf.shape(b.values) # [2, 10]
```
Args:
a: An `IndexedSlices` instance.
mask_indices: Indices of elements to mask.
name: A name for the operation (optional).
Returns:
The masked `IndexedSlices` instance.
"""
with ops.name_scope(name, "sparse_mask", [a, mask_indices]) as name:
indices = a.indices
out_indices, to_gather = setdiff1d(indices, mask_indices)
out_values = gather(a.values, to_gather, name=name)
return ops.IndexedSlices(out_values, out_indices, a.dense_shape)
@tf_export("unique")
def unique(x, out_idx=dtypes.int32, name=None):
# TODO(yongtang): switch to v2 once API deprecation
# period (3 weeks) pass.
# TODO(yongtang): The documentation should also
# be updated when switch to v2.
return gen_array_ops.unique(x, out_idx, name)
unique.__doc__ = gen_array_ops.unique.__doc__
@tf_export("unique_with_counts")
def unique_with_counts(x, out_idx=dtypes.int32, name=None):
# TODO(yongtang): switch to v2 once API deprecation
# period (3 weeks) pass.
# TODO(yongtang): The documentation should also
# be updated when switch to v2.
return gen_array_ops.unique_with_counts(x, out_idx, name)
unique_with_counts.__doc__ = gen_array_ops.unique_with_counts.__doc__
@tf_export("split")
def split(value, num_or_size_splits, axis=0, num=None, name="split"):
"""Splits a tensor into sub tensors.
If `num_or_size_splits` is an integer, then `value` is split along dimension
`axis` into `num_split` smaller tensors. This requires that `num_split` evenly
divides `value.shape[axis]`.
If `num_or_size_splits` is a 1-D Tensor (or list), we call it `size_splits`
and `value` is split into `len(size_splits)` elements. The shape of the `i`-th
element has the same size as the `value` except along dimension `axis` where
the size is `size_splits[i]`.
For example:
```python
# 'value' is a tensor with shape [5, 30]
# Split 'value' into 3 tensors with sizes [4, 15, 11] along dimension 1
split0, split1, split2 = tf.split(value, [4, 15, 11], 1)
tf.shape(split0) # [5, 4]
tf.shape(split1) # [5, 15]
tf.shape(split2) # [5, 11]
# Split 'value' into 3 tensors along dimension 1
split0, split1, split2 = tf.split(value, num_or_size_splits=3, axis=1)
tf.shape(split0) # [5, 10]
```
Args:
value: The `Tensor` to split.
num_or_size_splits: Either an integer indicating the number of
splits along split_dim or a 1-D integer `Tensor` or Python list containing
the sizes of each output tensor along split_dim. If a scalar then it must
evenly divide `value.shape[axis]`; otherwise the sum of sizes along the
split dimension must match that of the `value`.
axis: An integer or scalar `int32` `Tensor`. The dimension along which to
split. Must be in the range `[-rank(value), rank(value))`. Defaults to 0.
num: Optional, used to specify the number of outputs when it cannot be
inferred from the shape of `size_splits`.
name: A name for the operation (optional).
Returns:
if `num_or_size_splits` is a scalar returns `num_or_size_splits` `Tensor`
objects; if `num_or_size_splits` is a 1-D Tensor returns
`num_or_size_splits.get_shape[0]` `Tensor` objects resulting from splitting
`value`.
Raises:
ValueError: If `num` is unspecified and cannot be inferred.
"""
size_splits = ops.convert_to_tensor(num_or_size_splits)
if isinstance(num_or_size_splits,
six.integer_types + (tensor_shape.Dimension,)):
return gen_array_ops.split(
axis=axis, num_split=num_or_size_splits, value=value, name=name)
if size_splits._rank() == 0:
raise ValueError(
"Rank-0 tensors are not supported as the num_or_size_splits argument "
"to split. Argument provided: %s" % (num_or_size_splits,))
if num is None:
size_splits_shape = size_splits._shape_tuple()
if size_splits_shape:
num = size_splits_shape[0]
if num is None:
raise ValueError("Cannot infer num from shape %s" % num_or_size_splits)
return gen_array_ops.split_v(
value=value, size_splits=size_splits, axis=axis, num_split=num, name=name)
@tf_export("transpose", v1=[])
def transpose_v2(a, perm=None, conjugate=False, name="transpose"):
"""Transposes `a`. Permutes the dimensions according to `perm`.
The returned tensor's dimension i will correspond to the input dimension
`perm[i]`. If `perm` is not given, it is set to (n-1...0), where n is
the rank of the input tensor. Hence by default, this operation performs a
regular matrix transpose on 2-D input Tensors. If conjugate is True and
`a.dtype` is either `complex64` or `complex128` then the values of `a`
are conjugated and transposed.
@compatibility(numpy)
In `numpy` transposes are memory-efficient constant time operations as they
simply return a new view of the same data with adjusted `strides`.
TensorFlow does not support strides, so `transpose` returns a new tensor with
the items permuted.
@end_compatibility
For example:
```python
x = tf.constant([[1, 2, 3], [4, 5, 6]])
tf.transpose(x) # [[1, 4]
# [2, 5]
# [3, 6]]
# Equivalently
tf.transpose(x, perm=[1, 0]) # [[1, 4]
# [2, 5]
# [3, 6]]
# If x is complex, setting conjugate=True gives the conjugate transpose
x = tf.constant([[1 + 1j, 2 + 2j, 3 + 3j],
[4 + 4j, 5 + 5j, 6 + 6j]])
tf.transpose(x, conjugate=True) # [[1 - 1j, 4 - 4j],
# [2 - 2j, 5 - 5j],
# [3 - 3j, 6 - 6j]]
# 'perm' is more useful for n-dimensional tensors, for n > 2
x = tf.constant([[[ 1, 2, 3],
[ 4, 5, 6]],
[[ 7, 8, 9],
[10, 11, 12]]])
# Take the transpose of the matrices in dimension-0
# (this common operation has a shorthand `linalg.transpose`)
tf.transpose(x, perm=[0, 2, 1]) # [[[1, 4],
# [2, 5],
# [3, 6]],
# [[7, 10],
# [8, 11],
# [9, 12]]]
```
Args:
a: A `Tensor`.
perm: A permutation of the dimensions of `a`.
conjugate: Optional bool. Setting it to `True` is mathematically equivalent
to tf.conj(tf.transpose(input)).
name: A name for the operation (optional).
Returns:
A transposed `Tensor`.
"""
return transpose(a=a, perm=perm, name=name, conjugate=conjugate)
@tf_export(v1=["transpose"])
def transpose(a, perm=None, name="transpose", conjugate=False):
"""Transposes `a`. Permutes the dimensions according to `perm`.
The returned tensor's dimension i will correspond to the input dimension
`perm[i]`. If `perm` is not given, it is set to (n-1...0), where n is
the rank of the input tensor. Hence by default, this operation performs a
regular matrix transpose on 2-D input Tensors. If conjugate is True and
`a.dtype` is either `complex64` or `complex128` then the values of `a`
are conjugated and transposed.
@compatibility(numpy)
In `numpy` transposes are memory-efficient constant time operations as they
simply return a new view of the same data with adjusted `strides`.
TensorFlow does not support strides, so `transpose` returns a new tensor with
the items permuted.
@end_compatibility
For example:
```python
x = tf.constant([[1, 2, 3], [4, 5, 6]])
tf.transpose(x) # [[1, 4]
# [2, 5]
# [3, 6]]
# Equivalently
tf.transpose(x, perm=[1, 0]) # [[1, 4]
# [2, 5]
# [3, 6]]
# If x is complex, setting conjugate=True gives the conjugate transpose
x = tf.constant([[1 + 1j, 2 + 2j, 3 + 3j],
[4 + 4j, 5 + 5j, 6 + 6j]])
tf.transpose(x, conjugate=True) # [[1 - 1j, 4 - 4j],
# [2 - 2j, 5 - 5j],
# [3 - 3j, 6 - 6j]]
# 'perm' is more useful for n-dimensional tensors, for n > 2
x = tf.constant([[[ 1, 2, 3],
[ 4, 5, 6]],
[[ 7, 8, 9],
[10, 11, 12]]])
# Take the transpose of the matrices in dimension-0
# (this common operation has a shorthand `linalg.transpose`)
tf.transpose(x, perm=[0, 2, 1]) # [[[1, 4],
# [2, 5],
# [3, 6]],
# [[7, 10],
# [8, 11],
# [9, 12]]]
```
Args:
a: A `Tensor`.
perm: A permutation of the dimensions of `a`.
name: A name for the operation (optional).
conjugate: Optional bool. Setting it to `True` is mathematically equivalent
to tf.conj(tf.transpose(input)).
Returns:
A transposed `Tensor`.
"""
with ops.name_scope(name, "transpose", [a]) as name:
transpose_fn = (
gen_array_ops.conjugate_transpose
if (conjugate and a.dtype.is_complex) else gen_array_ops.transpose)
if perm is None:
a = ops.convert_to_tensor(a, name="a")
if not a.get_shape().ndims:
rank = gen_array_ops.rank(a)
perm = (rank - 1) - gen_math_ops._range(0, rank, 1)
else:
rank = a.get_shape().ndims
perm = (rank - 1) - np.arange(rank)
ret = transpose_fn(a, perm, name=name)
# NOTE(mrry): Setting the shape explicitly because
# reverse is not handled by the shape function.
if not context.executing_eagerly():
input_shape = ret.op.inputs[0].get_shape().dims
if input_shape is not None:
ret.set_shape(input_shape[::-1])
else:
ret = transpose_fn(a, perm, name=name)
return ret
# pylint: disable=invalid-name
@tf_export("linalg.transpose", v1=["linalg.transpose", "matrix_transpose"])
@deprecation.deprecated_endpoints("matrix_transpose")
def matrix_transpose(a, name="matrix_transpose", conjugate=False):
"""Transposes last two dimensions of tensor `a`.
For example:
```python
x = tf.constant([[1, 2, 3], [4, 5, 6]])
tf.linalg.transpose(x) # [[1, 4],
# [2, 5],
# [3, 6]]
x = tf.constant([[1 + 1j, 2 + 2j, 3 + 3j],
[4 + 4j, 5 + 5j, 6 + 6j]])
tf.linalg.transpose(x, conjugate=True) # [[1 - 1j, 4 - 4j],
# [2 - 2j, 5 - 5j],
# [3 - 3j, 6 - 6j]]
# Matrix with two batch dimensions.
# x.shape is [1, 2, 3, 4]
# tf.linalg.transpose(x) is shape [1, 2, 4, 3]
```
Note that `tf.matmul` provides kwargs allowing for transpose of arguments.
This is done with minimal cost, and is preferable to using this function. E.g.
```python
# Good! Transpose is taken at minimal additional cost.
tf.matmul(matrix, b, transpose_b=True)
# Inefficient!
tf.matmul(matrix, tf.linalg.transpose(b))
```
@compatibility(numpy)
In `numpy` transposes are memory-efficient constant time operations as they
simply return a new view of the same data with adjusted `strides`.
TensorFlow does not support strides, `linalg.transposes` return a new tensor
with the items permuted.
@end_compatibility
Args:
a: A `Tensor` with `rank >= 2`.
name: A name for the operation (optional).
conjugate: Optional bool. Setting it to `True` is mathematically equivalent
to tf.conj(tf.linalg.transpose(input)).
Returns:
A transposed batch matrix `Tensor`.
Raises:
ValueError: If `a` is determined statically to have `rank < 2`.
"""
with ops.name_scope(name, values=[a]):
a = ops.convert_to_tensor(a, name="a")
# If we know the number of dimensions (statically), we can do two things:
# 1. Check that `a` is a (batch) matrix.
# 2. Use a python list for perm. This preserves static shape information
# and avoids extra computations.
a_shape = a.get_shape()
ndims = a_shape.ndims
if ndims is not None:
if ndims < 2:
raise ValueError(
"Argument 'a' should be a (batch) matrix, with rank >= 2. Found: "
"%s" % a_shape)
perm = list(range(ndims - 2)) + [ndims - 1] + [ndims - 2]
else:
a_rank = rank(a)
perm = concat((gen_math_ops._range(0, a_rank - 2, 1),
[a_rank - 1, a_rank - 2]), 0)
return transpose(a, perm=perm, conjugate=conjugate)
# pylint: enable=invalid-name
def _constant_if_small(value, shape, dtype, name):
try:
if np.prod(shape) < 1000:
return constant(value, shape=shape, dtype=dtype, name=name)
except TypeError:
# Happens when shape is a Tensor, list with Tensor elements, etc.
pass
return None
@tf_export("zeros")
def zeros(shape, dtype=dtypes.float32, name=None):
"""Creates a tensor with all elements set to zero.
This operation returns a tensor of type `dtype` with shape `shape` and
all elements set to zero.
For example:
```python
tf.zeros([3, 4], tf.int32) # [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]
```
Args:
shape: A list of integers, a tuple of integers, or a 1-D `Tensor` of type
`int32`.
dtype: The type of an element in the resulting `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` with all elements set to zero.
"""
dtype = dtypes.as_dtype(dtype).base_dtype
with ops.name_scope(name, "zeros", [shape]) as name:
if dtype == dtypes.bool:
zero = False
elif dtype == dtypes.string:
zero = ""
else:
zero = 0
if not isinstance(shape, ops.Tensor):
try:
# Create a constant if it won't be very big. Otherwise create a fill op
# to prevent serialized GraphDefs from becoming too large.
output = _constant_if_small(zero, shape, dtype, name)
if output is not None:
return output
# Go through tensor shapes to get int64-if-needed semantics
shape = constant_op._tensor_shape_tensor_conversion_function(
tensor_shape.TensorShape(shape))
except (TypeError, ValueError):
# Happens when shape is a list with tensor elements
shape = ops.convert_to_tensor(shape, dtype=dtypes.int32)
if not shape._shape_tuple():
shape = reshape(shape, [-1]) # Ensure it's a vector
output = fill(shape, constant(zero, dtype=dtype), name=name)
assert output.dtype.base_dtype == dtype
return output
@tf_export(v1=["zeros_like"])
@dispatch.add_dispatch_support
def zeros_like(tensor, dtype=None, name=None, optimize=True):
"""Creates a tensor with all elements set to zero.
Given a single tensor (`tensor`), this operation returns a tensor of the
same type and shape as `tensor` with all elements set to zero. Optionally,
you can use `dtype` to specify a new type for the returned tensor.
For example:
```python
tensor = tf.constant([[1, 2, 3], [4, 5, 6]])
tf.zeros_like(tensor) # [[0, 0, 0], [0, 0, 0]]
```
Args:
tensor: A `Tensor`.
dtype: A type for the returned `Tensor`. Must be `float16`, `float32`,
`float64`, `int8`, `uint8`, `int16`, `uint16`, `int32`, `int64`,
`complex64`, `complex128`, `bool` or `string`.
name: A name for the operation (optional).
optimize: if true, attempt to statically determine the shape of 'tensor'
and encode it as a constant.
Returns:
A `Tensor` with all elements set to zero.
"""
return zeros_like_impl(tensor, dtype, name, optimize)
@tf_export("zeros_like", v1=[])
@dispatch.add_dispatch_support
def zeros_like_v2(
input, # pylint: disable=redefined-builtin
dtype=None,
name=None):
"""Creates a tensor with all elements set to zero.
Given a single tensor (`tensor`), this operation returns a tensor of the
same type and shape as `tensor` with all elements set to zero. Optionally,
you can use `dtype` to specify a new type for the returned tensor.
For example:
```python
tensor = tf.constant([[1, 2, 3], [4, 5, 6]])
tf.zeros_like(tensor) # [[0, 0, 0], [0, 0, 0]]
```
Args:
input: A `Tensor`.
dtype: A type for the returned `Tensor`. Must be `float16`, `float32`,
`float64`, `int8`, `uint8`, `int16`, `uint16`, `int32`, `int64`,
`complex64`, `complex128`, `bool` or `string`.
name: A name for the operation (optional).
Returns:
A `Tensor` with all elements set to zero.
"""
return zeros_like_impl(input, dtype, name, optimize=True)
def zeros_like_impl(tensor, dtype, name, optimize=True):
"""Internal implementation for the v1/v2 zeros_like API calls."""
with ops.name_scope(name, "zeros_like", [tensor]) as name:
tensor = ops.convert_to_tensor(tensor, name="tensor")
if context.executing_eagerly():
if dtype is not None and dtype != tensor.dtype:
return zeros(
shape_internal(tensor, optimize=optimize), dtype=dtype, name=name)
with ops.device(tensor.device):
return gen_array_ops.zeros_like(tensor, name=name)
# For now, variant types must be created via zeros_like; as we need to
# pass the input variant object to the proper zeros callback.
if (optimize and tensor.shape.is_fully_defined() and
tensor.dtype != dtypes.variant):
# We can produce a zeros tensor independent of the value of 'tensor',
# since the shape is known statically.
return zeros(tensor.shape, dtype=dtype or tensor.dtype, name=name)
if dtype is not None and dtype != tensor.dtype and dtype != dtypes.variant:
return zeros(
shape_internal(tensor, optimize=optimize), dtype=dtype, name=name)
else:
return gen_array_ops.zeros_like(tensor, name=name)
@tf_export(v1=["ones_like"])
@dispatch.add_dispatch_support
def ones_like(tensor, dtype=None, name=None, optimize=True):
"""Creates a tensor with all elements set to 1.
Given a single tensor (`tensor`), this operation returns a tensor of the same
type and shape as `tensor` with all elements set to 1. Optionally, you can
specify a new type (`dtype`) for the returned tensor.
For example:
```python
tensor = tf.constant([[1, 2, 3], [4, 5, 6]])
tf.ones_like(tensor) # [[1, 1, 1], [1, 1, 1]]
```
Args:
tensor: A `Tensor`.
dtype: A type for the returned `Tensor`. Must be `float32`, `float64`,
`int8`, `uint8`, `int16`, `uint16`, `int32`, `int64`,
`complex64`, `complex128` or `bool`.
name: A name for the operation (optional).
optimize: if true, attempt to statically determine the shape of 'tensor'
and encode it as a constant.
Returns:
A `Tensor` with all elements set to 1.
"""
return ones_like_impl(tensor, dtype, name, optimize)
@tf_export("ones_like", v1=[])
@dispatch.add_dispatch_support
def ones_like_v2(
input, # pylint: disable=redefined-builtin
dtype=None,
name=None):
"""Creates a tensor with all elements set to zero.
Given a single tensor (`tensor`), this operation returns a tensor of the
same type and shape as `tensor` with all elements set to zero. Optionally,
you can use `dtype` to specify a new type for the returned tensor.
For example:
```python
tensor = tf.constant([[1, 2, 3], [4, 5, 6]])
tf.ones_like(tensor) # [[1, 1, 1], [1, 1, 1]]
```
Args:
input: A `Tensor`.
dtype: A type for the returned `Tensor`. Must be `float16`, `float32`,
`float64`, `int8`, `uint8`, `int16`, `uint16`, `int32`, `int64`,
`complex64`, `complex128`, `bool` or `string`.
name: A name for the operation (optional).
Returns:
A `Tensor` with all elements set to zero.
"""
return ones_like_impl(input, dtype, name, optimize=True)
def ones_like_impl(tensor, dtype, name, optimize=True):
"""Internal implementation for the v1/v2 ones_like API calls."""
with ops.name_scope(name, "ones_like", [tensor]) as name:
tensor = ops.convert_to_tensor(tensor, name="tensor")
ones_shape = shape_internal(tensor, optimize=optimize)
if dtype is None:
dtype = tensor.dtype
ret = ones(ones_shape, dtype=dtype, name=name)
if not context.executing_eagerly():
ret.set_shape(tensor.get_shape())
return ret
@tf_export("ones")
def ones(shape, dtype=dtypes.float32, name=None):
"""Creates a tensor with all elements set to 1.
This operation returns a tensor of type `dtype` with shape `shape` and all
elements set to 1.
For example:
```python
tf.ones([2, 3], tf.int32) # [[1, 1, 1], [1, 1, 1]]
```
Args:
shape: A list of integers, a tuple of integers, or a 1-D `Tensor` of type
`int32`.
dtype: The type of an element in the resulting `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` with all elements set to 1.
"""
dtype = dtypes.as_dtype(dtype).base_dtype
with ops.name_scope(name, "ones", [shape]) as name:
one = True if dtype == dtypes.bool else 1
if not isinstance(shape, ops.Tensor):
try:
# Create a constant if it won't be very big. Otherwise create a fill op
# to prevent serialized GraphDefs from becoming too large.
output = _constant_if_small(one, shape, dtype, name)
if output is not None:
return output
# Go through tensor shapes to get int64-if-needed semantics
shape = constant_op._tensor_shape_tensor_conversion_function(
tensor_shape.TensorShape(shape))
except (TypeError, ValueError):
# Happens when shape is a list with tensor elements
shape = ops.convert_to_tensor(shape, dtype=dtypes.int32)
if not shape._shape_tuple():
shape = reshape(shape, [-1]) # Ensure it's a vector
output = fill(shape, constant(one, dtype=dtype), name=name)
assert output.dtype.base_dtype == dtype
return output
@tf_export(v1=["placeholder"])
def placeholder(dtype, shape=None, name=None):
"""Inserts a placeholder for a tensor that will be always fed.
**Important**: This tensor will produce an error if evaluated. Its value must
be fed using the `feed_dict` optional argument to `Session.run()`,
`Tensor.eval()`, or `Operation.run()`.
For example:
```python
x = tf.placeholder(tf.float32, shape=(1024, 1024))
y = tf.matmul(x, x)
with tf.Session() as sess:
print(sess.run(y)) # ERROR: will fail because x was not fed.
rand_array = np.random.rand(1024, 1024)
print(sess.run(y, feed_dict={x: rand_array})) # Will succeed.
```
@compatibility(eager)
Placeholders are not compatible with eager execution.
@end_compatibility
Args:
dtype: The type of elements in the tensor to be fed.
shape: The shape of the tensor to be fed (optional). If the shape is not
specified, you can feed a tensor of any shape.
name: A name for the operation (optional).
Returns:
A `Tensor` that may be used as a handle for feeding a value, but not
evaluated directly.
Raises:
RuntimeError: if eager execution is enabled
"""
if context.executing_eagerly():
raise RuntimeError("tf.placeholder() is not compatible with "
"eager execution.")
return gen_array_ops.placeholder(dtype=dtype, shape=shape, name=name)
@tf_export(v1=["placeholder_with_default"])
def placeholder_with_default(input, shape, name=None): # pylint: disable=redefined-builtin
"""A placeholder op that passes through `input` when its output is not fed.
Args:
input: A `Tensor`. The default value to produce when output is not fed.
shape: A `tf.TensorShape` or list of `int`s. The (possibly partial) shape
of the tensor.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
return gen_array_ops.placeholder_with_default(input, shape, name)
# pylint: disable=redefined-outer-name
def _normalize_sparse_shape(shape, name):
"""Returns a tuple of (Tensor or None, rank or None)."""
if shape is None:
return (None, None)
rank = shape.get_shape()[0] if isinstance(shape, ops.Tensor) else len(shape)
if not isinstance(shape, ops.Tensor) and None in shape:
return (None, rank)
return (ops.convert_to_tensor(shape, dtype=dtypes.int64, name=name), rank)
@tf_export(v1=["sparse.placeholder", "sparse_placeholder"])
@deprecation.deprecated_endpoints("sparse_placeholder")
def sparse_placeholder(dtype, shape=None, name=None):
"""Inserts a placeholder for a sparse tensor that will be always fed.
**Important**: This sparse tensor will produce an error if evaluated.
Its value must be fed using the `feed_dict` optional argument to
`Session.run()`, `Tensor.eval()`, or `Operation.run()`.
For example:
```python
x = tf.sparse.placeholder(tf.float32)
y = tf.sparse.reduce_sum(x)
with tf.Session() as sess:
print(sess.run(y)) # ERROR: will fail because x was not fed.
indices = np.array([[3, 2, 0], [4, 5, 1]], dtype=np.int64)
values = np.array([1.0, 2.0], dtype=np.float32)
shape = np.array([7, 9, 2], dtype=np.int64)
print(sess.run(y, feed_dict={
x: tf.SparseTensorValue(indices, values, shape)})) # Will succeed.
print(sess.run(y, feed_dict={
x: (indices, values, shape)})) # Will succeed.
sp = tf.SparseTensor(indices=indices, values=values, dense_shape=shape)
sp_value = sp.eval(session=sess)
print(sess.run(y, feed_dict={x: sp_value})) # Will succeed.
```
@compatibility{eager} Placeholders are not compatible with eager execution.
Args:
dtype: The type of `values` elements in the tensor to be fed.
shape: The shape of the tensor to be fed (optional). If the shape is not
specified, you can feed a sparse tensor of any shape.
name: A name for prefixing the operations (optional).
Returns:
A `SparseTensor` that may be used as a handle for feeding a value, but not
evaluated directly.
Raises:
RuntimeError: if eager execution is enabled
"""
if context.executing_eagerly():
raise RuntimeError("tf.placeholder() is not compatible with "
"eager execution.")
shape_name = (name + "/shape") if name is not None else None
shape, rank = _normalize_sparse_shape(shape, shape_name)
if shape is None:
shape = placeholder(dtypes.int64, shape=[rank], name=shape_name)
return sparse_tensor.SparseTensor(
values=placeholder(
dtype,
shape=[None],
name=(name + "/values") if name is not None else None),
indices=placeholder(
dtypes.int64, shape=[None, rank],
name=(name + "/indices") if name is not None else None),
dense_shape=shape)
# pylint: enable=redefined-outer-name
@tf_export("pad", v1=[])
def pad_v2(tensor, paddings, mode="CONSTANT", constant_values=0, name=None):
"""Pads a tensor.
This operation pads a `tensor` according to the `paddings` you specify.
`paddings` is an integer tensor with shape `[n, 2]`, where n is the rank of
`tensor`. For each dimension D of `input`, `paddings[D, 0]` indicates how
many values to add before the contents of `tensor` in that dimension, and
`paddings[D, 1]` indicates how many values to add after the contents of
`tensor` in that dimension. If `mode` is "REFLECT" then both `paddings[D, 0]`
and `paddings[D, 1]` must be no greater than `tensor.dim_size(D) - 1`. If
`mode` is "SYMMETRIC" then both `paddings[D, 0]` and `paddings[D, 1]` must be
no greater than `tensor.dim_size(D)`.
The padded size of each dimension D of the output is:
`paddings[D, 0] + tensor.dim_size(D) + paddings[D, 1]`
For example:
```python
t = tf.constant([[1, 2, 3], [4, 5, 6]])
paddings = tf.constant([[1, 1,], [2, 2]])
# 'constant_values' is 0.
# rank of 't' is 2.
tf.pad(t, paddings, "CONSTANT") # [[0, 0, 0, 0, 0, 0, 0],
# [0, 0, 1, 2, 3, 0, 0],
# [0, 0, 4, 5, 6, 0, 0],
# [0, 0, 0, 0, 0, 0, 0]]
tf.pad(t, paddings, "REFLECT") # [[6, 5, 4, 5, 6, 5, 4],
# [3, 2, 1, 2, 3, 2, 1],
# [6, 5, 4, 5, 6, 5, 4],
# [3, 2, 1, 2, 3, 2, 1]]
tf.pad(t, paddings, "SYMMETRIC") # [[2, 1, 1, 2, 3, 3, 2],
# [2, 1, 1, 2, 3, 3, 2],
# [5, 4, 4, 5, 6, 6, 5],
# [5, 4, 4, 5, 6, 6, 5]]
```
Args:
tensor: A `Tensor`.
paddings: A `Tensor` of type `int32`.
mode: One of "CONSTANT", "REFLECT", or "SYMMETRIC" (case-insensitive)
constant_values: In "CONSTANT" mode, the scalar pad value to use. Must be
same type as `tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `tensor`.
Raises:
ValueError: When mode is not one of "CONSTANT", "REFLECT", or "SYMMETRIC".
"""
return pad(tensor, paddings, mode, name, constant_values)
@tf_export(v1=["pad"])
def pad(tensor, paddings, mode="CONSTANT", name=None, constant_values=0): # pylint: disable=invalid-name
"""Pads a tensor.
This operation pads a `tensor` according to the `paddings` you specify.
`paddings` is an integer tensor with shape `[n, 2]`, where n is the rank of
`tensor`. For each dimension D of `input`, `paddings[D, 0]` indicates how
many values to add before the contents of `tensor` in that dimension, and
`paddings[D, 1]` indicates how many values to add after the contents of
`tensor` in that dimension. If `mode` is "REFLECT" then both `paddings[D, 0]`
and `paddings[D, 1]` must be no greater than `tensor.dim_size(D) - 1`. If
`mode` is "SYMMETRIC" then both `paddings[D, 0]` and `paddings[D, 1]` must be
no greater than `tensor.dim_size(D)`.
The padded size of each dimension D of the output is:
`paddings[D, 0] + tensor.dim_size(D) + paddings[D, 1]`
For example:
```python
t = tf.constant([[1, 2, 3], [4, 5, 6]])
paddings = tf.constant([[1, 1,], [2, 2]])
# 'constant_values' is 0.
# rank of 't' is 2.
tf.pad(t, paddings, "CONSTANT") # [[0, 0, 0, 0, 0, 0, 0],
# [0, 0, 1, 2, 3, 0, 0],
# [0, 0, 4, 5, 6, 0, 0],
# [0, 0, 0, 0, 0, 0, 0]]
tf.pad(t, paddings, "REFLECT") # [[6, 5, 4, 5, 6, 5, 4],
# [3, 2, 1, 2, 3, 2, 1],
# [6, 5, 4, 5, 6, 5, 4],
# [3, 2, 1, 2, 3, 2, 1]]
tf.pad(t, paddings, "SYMMETRIC") # [[2, 1, 1, 2, 3, 3, 2],
# [2, 1, 1, 2, 3, 3, 2],
# [5, 4, 4, 5, 6, 6, 5],
# [5, 4, 4, 5, 6, 6, 5]]
```
Args:
tensor: A `Tensor`.
paddings: A `Tensor` of type `int32`.
mode: One of "CONSTANT", "REFLECT", or "SYMMETRIC" (case-insensitive)
name: A name for the operation (optional).
constant_values: In "CONSTANT" mode, the scalar pad value to use. Must be
same type as `tensor`.
Returns:
A `Tensor`. Has the same type as `tensor`.
Raises:
ValueError: When mode is not one of "CONSTANT", "REFLECT", or "SYMMETRIC".
"""
# Convert lower/mixed case to upper for NumPy compatibility
# NumPy uses all lower-case modes.
mode = mode.upper()
if mode == "CONSTANT":
# TODO(rjryan): Once the forward compatibility period (3 weeks) have passed
# remove the "Pad" fallback here.
if constant_values != 0:
result = gen_array_ops.pad_v2(
tensor, paddings, constant_values, name=name)
else:
result = gen_array_ops.pad(tensor, paddings, name=name)
elif mode == "REFLECT":
result = gen_array_ops.mirror_pad(
tensor, paddings, mode="REFLECT", name=name)
elif mode == "SYMMETRIC":
result = gen_array_ops.mirror_pad(
tensor, paddings, mode="SYMMETRIC", name=name)
else:
raise ValueError("Unknown padding mode: %s" % mode)
# Restore shape information where possible.
if not context.executing_eagerly():
paddings_constant = tensor_util.constant_value(
result.op.inputs[1], partial=True)
input_shape = result.op.inputs[0].shape
if (input_shape.ndims is not None and not result.shape.is_fully_defined()
and paddings_constant is not None):
new_shape = []
for padding, dim in zip(paddings_constant, input_shape.as_list()):
if padding is None or dim is None or any((x is None for x in padding)):
new_shape.append(None)
else:
new_shape.append(sum(padding) + dim)
result.set_shape(new_shape)
return result
@tf_export("meshgrid")
def meshgrid(*args, **kwargs):
"""Broadcasts parameters for evaluation on an N-D grid.
Given N one-dimensional coordinate arrays `*args`, returns a list `outputs`
of N-D coordinate arrays for evaluating expressions on an N-D grid.
Notes:
`meshgrid` supports cartesian ('xy') and matrix ('ij') indexing conventions.
When the `indexing` argument is set to 'xy' (the default), the broadcasting
instructions for the first two dimensions are swapped.
Examples:
Calling `X, Y = meshgrid(x, y)` with the tensors
```python
x = [1, 2, 3]
y = [4, 5, 6]
X, Y = tf.meshgrid(x, y)
# X = [[1, 2, 3],
# [1, 2, 3],
# [1, 2, 3]]
# Y = [[4, 4, 4],
# [5, 5, 5],
# [6, 6, 6]]
```
Args:
*args: `Tensor`s with rank 1.
**kwargs:
- indexing: Either 'xy' or 'ij' (optional, default: 'xy').
- name: A name for the operation (optional).
Returns:
outputs: A list of N `Tensor`s with rank N.
Raises:
TypeError: When no keyword arguments (kwargs) are passed.
ValueError: When indexing keyword argument is not one of `xy` or `ij`.
"""
indexing = kwargs.pop("indexing", "xy")
name = kwargs.pop("name", "meshgrid")
if kwargs:
key = list(kwargs.keys())[0]
raise TypeError("'{}' is an invalid keyword argument "
"for this function".format(key))
if indexing not in ("xy", "ij"):
raise ValueError("indexing parameter must be either 'xy' or 'ij'")
with ops.name_scope(name, "meshgrid", args) as name:
ndim = len(args)
s0 = (1,) * ndim
# Prepare reshape by inserting dimensions with size 1 where needed
output = []
for i, x in enumerate(args):
output.append(reshape(stack(x), (s0[:i] + (-1,) + s0[i + 1::])))
# Create parameters for broadcasting each tensor to the full size
shapes = [size(x) for x in args]
output_dtype = ops.convert_to_tensor(args[0]).dtype.base_dtype
if indexing == "xy" and ndim > 1:
output[0] = reshape(output[0], (1, -1) + (1,) * (ndim - 2))
output[1] = reshape(output[1], (-1, 1) + (1,) * (ndim - 2))
shapes[0], shapes[1] = shapes[1], shapes[0]
# TODO(nolivia): improve performance with a broadcast
mult_fact = ones(shapes, output_dtype)
return [x * mult_fact for x in output]
NEW_AXIS = -1
SHRINK_AXIS = -2
# PEP-8 naming
# pylint: disable=invalid-name,redefined-outer-name
def _compute_size_of_strided_dim(shrink, spec, size):
"""Computes the size of a single strided slice dimension."""
unknown = None # Document what None means here.
use_full_range = None # Document other use of None.
# if this is a shrink axis (i.e. a non-range index)
# it either will produce an error or return 1
if shrink:
return 1
if size is unknown or size.value is unknown:
return unknown
size = size.value
stride = spec.step
if stride is not unknown:
if stride == 0:
return unknown
stride = spec.step
valid_range = [0, size] if stride > 0 else [-1, size - 1]
# PEP-8 naming
# pylint: disable=invalid-name
def canonical(x, c):
if x is use_full_range:
return valid_range[c] if stride > 0 else valid_range[(c + 1) & 1]
else:
x_fwd = size + x if x < 0 else x # make negative indices positive
return max(valid_range[0], min(valid_range[1], x_fwd))
begin = canonical(spec.start, 0)
end = canonical(spec.stop, 1)
interval_length = end - begin
if interval_length == 0 or ((interval_length < 0) != (stride < 0)):
return 0
else:
remainder = 1 if interval_length % stride != 0 else 0
return interval_length // stride + remainder
else:
return unknown # unknown because stride is unknown
def _TileGradShape(op):
"""Shape function for the TileGrad op."""
multiples_shape = op.inputs[1].get_shape().with_rank(1)
input_shape = op.inputs[0].get_shape().with_rank(multiples_shape[0])
# NOTE(mrry): Represent `multiples` as a `TensorShape` because (i)
# it is a vector of non-negative integers, and (ii) doing so allows
# us to handle partially-known multiples.
multiples = tensor_util.constant_value_as_shape(op.inputs[1]).with_rank(
input_shape.ndims)
if multiples.ndims is None:
return [tensor_shape.unknown_shape()]
else:
output_dims = []
for dim, multiple in zip(input_shape.dims, multiples.dims):
output_dims.append(dim // multiple)
return [tensor_shape.TensorShape(output_dims)]
@tf_export("edit_distance")
def edit_distance(hypothesis, truth, normalize=True, name="edit_distance"):
"""Computes the Levenshtein distance between sequences.
This operation takes variable-length sequences (`hypothesis` and `truth`),
each provided as a `SparseTensor`, and computes the Levenshtein distance.
You can normalize the edit distance by length of `truth` by setting
`normalize` to true.
For example, given the following input:
```python
# 'hypothesis' is a tensor of shape `[2, 1]` with variable-length values:
# (0,0) = ["a"]
# (1,0) = ["b"]
hypothesis = tf.SparseTensor(
[[0, 0, 0],
[1, 0, 0]],
["a", "b"],
(2, 1, 1))
# 'truth' is a tensor of shape `[2, 2]` with variable-length values:
# (0,0) = []
# (0,1) = ["a"]
# (1,0) = ["b", "c"]
# (1,1) = ["a"]
truth = tf.SparseTensor(
[[0, 1, 0],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0]],
["a", "b", "c", "a"],
(2, 2, 2))
normalize = True
```
This operation would return the following:
```python
# 'output' is a tensor of shape `[2, 2]` with edit distances normalized
# by 'truth' lengths.
output ==> [[inf, 1.0], # (0,0): no truth, (0,1): no hypothesis
[0.5, 1.0]] # (1,0): addition, (1,1): no hypothesis
```
Args:
hypothesis: A `SparseTensor` containing hypothesis sequences.
truth: A `SparseTensor` containing truth sequences.
normalize: A `bool`. If `True`, normalizes the Levenshtein distance by
length of `truth.`
name: A name for the operation (optional).
Returns:
A dense `Tensor` with rank `R - 1`, where R is the rank of the
`SparseTensor` inputs `hypothesis` and `truth`.
Raises:
TypeError: If either `hypothesis` or `truth` are not a `SparseTensor`.
"""
if not isinstance(hypothesis, (sparse_tensor.SparseTensor,
sparse_tensor.SparseTensorValue)):
raise TypeError("Hypothesis must be a SparseTensor.")
if not isinstance(truth, (sparse_tensor.SparseTensor,
sparse_tensor.SparseTensorValue)):
raise TypeError("Truth must be a SparseTensor.")
return gen_array_ops.edit_distance(
hypothesis.indices,
hypothesis.values,
hypothesis.dense_shape,
truth.indices,
truth.values,
truth.dense_shape,
normalize=normalize,
name=name)
@ops.RegisterGradient("FakeQuantWithMinMaxArgs")
def _FakeQuantWithMinMaxArgsGradient(op, grad):
"""Gradient for FakeQuantWithMinMaxArgs op."""
return fake_quant_with_min_max_args_gradient(
grad,
op.inputs[0],
min=op.get_attr("min"),
max=op.get_attr("max"),
num_bits=op.get_attr("num_bits"),
narrow_range=op.get_attr("narrow_range"))
@ops.RegisterGradient("FakeQuantWithMinMaxVars")
def _FakeQuantWithMinMaxVarsGradient(op, grad):
"""Gradient for FakeQuantWithMinMaxVars op."""
return fake_quant_with_min_max_vars_gradient(
grad,
op.inputs[0],
op.inputs[1],
op.inputs[2],
num_bits=op.get_attr("num_bits"),
narrow_range=op.get_attr("narrow_range"))
@ops.RegisterGradient("FakeQuantWithMinMaxVarsPerChannel")
def _FakeQuantWithMinMaxVarsPerChannelGradient(op, grad):
"""Gradient for FakeQuantWithMinMaxVarsPerChannel op."""
return fake_quant_with_min_max_vars_per_channel_gradient(
grad,
op.inputs[0],
op.inputs[1],
op.inputs[2],
num_bits=op.get_attr("num_bits"),
narrow_range=op.get_attr("narrow_range"))
@tf_export("required_space_to_batch_paddings")
def required_space_to_batch_paddings(input_shape,
block_shape,
base_paddings=None,
name=None):
"""Calculate padding required to make block_shape divide input_shape.
This function can be used to calculate a suitable paddings argument for use
with space_to_batch_nd and batch_to_space_nd.
Args:
input_shape: int32 Tensor of shape [N].
block_shape: int32 Tensor of shape [N].
base_paddings: Optional int32 Tensor of shape [N, 2]. Specifies the minimum
amount of padding to use. All elements must be >= 0. If not specified,
defaults to 0.
name: string. Optional name prefix.
Returns:
(paddings, crops), where:
`paddings` and `crops` are int32 Tensors of rank 2 and shape [N, 2]
satisfying:
paddings[i, 0] = base_paddings[i, 0].
0 <= paddings[i, 1] - base_paddings[i, 1] < block_shape[i]
(input_shape[i] + paddings[i, 0] + paddings[i, 1]) % block_shape[i] == 0
crops[i, 0] = 0
crops[i, 1] = paddings[i, 1] - base_paddings[i, 1]
Raises: ValueError if called with incompatible shapes.
"""
with ops.name_scope(name, "required_space_to_batch_paddings",
[input_shape, block_shape]):
input_shape = ops.convert_to_tensor(
input_shape, dtype=dtypes.int32, name="input_shape")
block_shape = ops.convert_to_tensor(
block_shape, dtype=dtypes.int32, name="block_shape")
block_shape.get_shape().assert_is_fully_defined()
block_shape.get_shape().assert_has_rank(1)
num_block_dims = block_shape.get_shape().dims[0].value
if num_block_dims == 0:
return zeros([0, 2], dtypes.int32), zeros([0, 2], dtypes.int32)
input_shape.get_shape().assert_is_compatible_with([num_block_dims])
if base_paddings is not None:
base_paddings = ops.convert_to_tensor(
base_paddings, dtype=dtypes.int32, name="base_paddings")
base_paddings.get_shape().assert_is_compatible_with([num_block_dims, 2])
else:
base_paddings = zeros([num_block_dims, 2], dtypes.int32)
const_block_shape = tensor_util.constant_value(block_shape)
const_input_shape = tensor_util.constant_value(input_shape)
const_base_paddings = tensor_util.constant_value(base_paddings)
if (const_block_shape is not None and const_input_shape is not None and
const_base_paddings is not None):
block_shape = const_block_shape
input_shape = const_input_shape
base_paddings = const_base_paddings
# Use same expression for both constant and non-constant case.
pad_start = base_paddings[:, 0]
orig_pad_end = base_paddings[:, 1]
full_input_shape = input_shape + pad_start + orig_pad_end
pad_end_extra = (block_shape - full_input_shape % block_shape) % block_shape
pad_end = orig_pad_end + pad_end_extra
result_paddings = stack(
[[pad_start[i], pad_end[i]] for i in range(num_block_dims)],
name="paddings")
result_crops = stack(
[[0, pad_end_extra[i]] for i in range(num_block_dims)], name="crops")
return result_paddings, result_crops
@tf_export(v1=["nn.space_to_batch", "space_to_batch"])
@deprecation.deprecated_endpoints("space_to_batch")
def space_to_batch( # pylint: disable=missing-docstring
input, paddings, block_size=None, name=None, block_shape=None): # pylint: disable=redefined-builtin
block_size = deprecation.deprecated_argument_lookup(
"block_shape", block_shape, "block_size", block_size)
result = space_to_batch_nd(
input,
paddings=paddings,
block_shape=np.array([block_size, block_size], dtype=np.int64),
name=name)
result.set_shape(result.get_shape().with_rank(4))
return result
space_to_batch.__doc__ = gen_array_ops.space_to_batch.__doc__
@tf_export("space_to_batch", "nn.space_to_batch", v1=[])
def space_to_batch_v2(input, block_shape, paddings, name=None): # pylint: disable=redefined-builtin
return space_to_batch_nd(input, block_shape, paddings, name)
space_to_batch_v2.__doc__ = gen_array_ops.space_to_batch_nd.__doc__
@tf_export(v1=["nn.space_to_depth", "space_to_depth"])
@deprecation.deprecated_endpoints("space_to_depth")
def space_to_depth(input, block_size, name=None, data_format="NHWC"): # pylint: disable=redefined-builtin
return gen_array_ops.space_to_depth(input, block_size, data_format, name=name)
space_to_depth.__doc__ = gen_array_ops.space_to_depth.__doc__
@tf_export("nn.space_to_depth", v1=[])
def space_to_depth_v2(input, block_size, data_format="NHWC", name=None): # pylint: disable=redefined-builtin
return gen_array_ops.space_to_depth(input, block_size, data_format, name=name)
space_to_depth_v2.__doc__ = gen_array_ops.space_to_depth.__doc__
@tf_export(v1=["nn.depth_to_space", "depth_to_space"])
@deprecation.deprecated_endpoints("depth_to_space")
def depth_to_space(input, block_size, name=None, data_format="NHWC"): # pylint: disable=redefined-builtin
return gen_array_ops.depth_to_space(input, block_size, data_format, name=name)
depth_to_space.__doc__ = gen_array_ops.depth_to_space.__doc__
@tf_export("nn.depth_to_space", v1=[])
def depth_to_space_v2(input, block_size, data_format="NHWC", name=None): # pylint: disable=redefined-builtin
return gen_array_ops.depth_to_space(input, block_size, data_format, name=name)
depth_to_space_v2.__doc__ = gen_array_ops.depth_to_space.__doc__
@tf_export(v1=["batch_to_space"])
def batch_to_space(input, crops, block_size, name=None, block_shape=None): # pylint: disable=redefined-builtin,missing-docstring
block_size = deprecation.deprecated_argument_lookup(
"block_shape", block_shape, "block_size", block_size)
result = batch_to_space_nd(
input,
crops=crops,
block_shape=np.array([block_size, block_size], dtype=np.int64),
name=name)
result.set_shape(result.get_shape().with_rank(4))
return result
batch_to_space.__doc__ = gen_array_ops.batch_to_space.__doc__
@tf_export("batch_to_space", v1=[])
def batch_to_space_v2(input, block_shape, crops, name=None): # pylint: disable=redefined-builtin
"""BatchToSpace for N-D tensors of type T.
This operation reshapes the "batch" dimension 0 into `M + 1` dimensions of
shape `block_shape + [batch]`, interleaves these blocks back into the grid
defined by the spatial dimensions `[1, ..., M]`, to obtain a result with the
same rank as the input. The spatial dimensions of this intermediate result
are then optionally cropped according to `crops` to produce the output. This
is the reverse of SpaceToBatch. See below for a precise description.
Args:
input: A `Tensor`.
N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`,
where spatial_shape has M dimensions.
block_shape: A `Tensor`. Must be one of the following types:
`int32`, `int64`. 1-D with shape `[M]`, all values must be >= 1.
For backwards compatibility with TF 1.0, this parameter may be an int, in
which case it is converted to
`numpy.array([block_shape, block_shape], dtype=numpy.int64)`.
crops: A `Tensor`. Must be one of the following types: `int32`, `int64`.
2-D with shape `[M, 2]`, all values must be >= 0.
`crops[i] = [crop_start, crop_end]` specifies the amount to crop from
input dimension `i + 1`, which corresponds to spatial dimension `i`. It
is required that
`crop_start[i] + crop_end[i] <= block_shape[i] * input_shape[i + 1]`.
This operation is equivalent to the following steps:
1. Reshape `input` to `reshaped` of shape:
[block_shape[0], ..., block_shape[M-1],
batch / prod(block_shape),
input_shape[1], ..., input_shape[N-1]]
2. Permute dimensions of `reshaped` to produce `permuted` of shape
[batch / prod(block_shape),
input_shape[1], block_shape[0],
...,
input_shape[M], block_shape[M-1],
input_shape[M+1], ..., input_shape[N-1]]
3. Reshape `permuted` to produce `reshaped_permuted` of shape
[batch / prod(block_shape),
input_shape[1] * block_shape[0],
...,
input_shape[M] * block_shape[M-1],
input_shape[M+1],
...,
input_shape[N-1]]
4. Crop the start and end of dimensions `[1, ..., M]` of
`reshaped_permuted` according to `crops` to produce the
output of shape:
[batch / prod(block_shape),
input_shape[1] * block_shape[0] - crops[0,0] - crops[0,1],
...,
input_shape[M] * block_shape[M-1] - crops[M-1,0] - crops[M-1,1],
input_shape[M+1], ..., input_shape[N-1]]
Some examples:
(1) For the following input of shape `[4, 1, 1, 1]`,
`block_shape = [2, 2]`, and `crops = [[0, 0], [0, 0]]`:
```
[[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
```
The output tensor has shape `[1, 2, 2, 1]` and value:
```
x = [[[[1], [2]], [[3], [4]]]]
```
(2) For the following input of shape `[4, 1, 1, 3]`,
`block_shape = [2, 2]`, and `crops = [[0, 0], [0, 0]]`:
```
[[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]
```
The output tensor has shape `[1, 2, 2, 3]` and value:
```
x = [[[[1, 2, 3], [4, 5, 6]],
[[7, 8, 9], [10, 11, 12]]]]
```
(3) For the following input of shape `[4, 2, 2, 1]`,
`block_shape = [2, 2]`, and `crops = [[0, 0], [0, 0]]`:
```
x = [[[[1], [3]], [[9], [11]]],
[[[2], [4]], [[10], [12]]],
[[[5], [7]], [[13], [15]]],
[[[6], [8]], [[14], [16]]]]
```
The output tensor has shape `[1, 4, 4, 1]` and value:
```
x = [[[1], [2], [3], [4]],
[[5], [6], [7], [8]],
[[9], [10], [11], [12]],
[[13], [14], [15], [16]]]
```
(4) For the following input of shape `[8, 1, 3, 1]`,
`block_shape = [2, 2]`, and `crops = [[0, 0], [2, 0]]`:
```
x = [[[[0], [1], [3]]], [[[0], [9], [11]]],
[[[0], [2], [4]]], [[[0], [10], [12]]],
[[[0], [5], [7]]], [[[0], [13], [15]]],
[[[0], [6], [8]]], [[[0], [14], [16]]]]
```
The output tensor has shape `[2, 2, 4, 1]` and value:
```
x = [[[[1], [2], [3], [4]],
[[5], [6], [7], [8]]],
[[[9], [10], [11], [12]],
[[13], [14], [15], [16]]]]
```
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
if isinstance(block_shape, int):
block_shape = np.array([block_shape, block_shape], dtype=np.int64)
return batch_to_space_nd(input=input,
block_shape=block_shape,
crops=crops,
name=name)
@tf_export("one_hot")
def one_hot(indices,
depth,
on_value=None,
off_value=None,
axis=None,
dtype=None,
name=None):
"""Returns a one-hot tensor.
The locations represented by indices in `indices` take value `on_value`,
while all other locations take value `off_value`.
`on_value` and `off_value` must have matching data types. If `dtype` is also
provided, they must be the same data type as specified by `dtype`.
If `on_value` is not provided, it will default to the value `1` with type
`dtype`
If `off_value` is not provided, it will default to the value `0` with type
`dtype`
If the input `indices` is rank `N`, the output will have rank `N+1`. The
new axis is created at dimension `axis` (default: the new axis is appended
at the end).
If `indices` is a scalar the output shape will be a vector of length `depth`
If `indices` is a vector of length `features`, the output shape will be:
```
features x depth if axis == -1
depth x features if axis == 0
```
If `indices` is a matrix (batch) with shape `[batch, features]`, the output
shape will be:
```
batch x features x depth if axis == -1
batch x depth x features if axis == 1
depth x batch x features if axis == 0
```
If `dtype` is not provided, it will attempt to assume the data type of
`on_value` or `off_value`, if one or both are passed in. If none of
`on_value`, `off_value`, or `dtype` are provided, `dtype` will default to the
value `tf.float32`.
Note: If a non-numeric data type output is desired (`tf.string`, `tf.bool`,
etc.), both `on_value` and `off_value` _must_ be provided to `one_hot`.
For example:
```python
indices = [0, 1, 2]
depth = 3
tf.one_hot(indices, depth) # output: [3 x 3]
# [[1., 0., 0.],
# [0., 1., 0.],
# [0., 0., 1.]]
indices = [0, 2, -1, 1]
depth = 3
tf.one_hot(indices, depth,
on_value=5.0, off_value=0.0,
axis=-1) # output: [4 x 3]
# [[5.0, 0.0, 0.0], # one_hot(0)
# [0.0, 0.0, 5.0], # one_hot(2)
# [0.0, 0.0, 0.0], # one_hot(-1)
# [0.0, 5.0, 0.0]] # one_hot(1)
indices = [[0, 2], [1, -1]]
depth = 3
tf.one_hot(indices, depth,
on_value=1.0, off_value=0.0,
axis=-1) # output: [2 x 2 x 3]
# [[[1.0, 0.0, 0.0], # one_hot(0)
# [0.0, 0.0, 1.0]], # one_hot(2)
# [[0.0, 1.0, 0.0], # one_hot(1)
# [0.0, 0.0, 0.0]]] # one_hot(-1)
```
Args:
indices: A `Tensor` of indices.
depth: A scalar defining the depth of the one hot dimension.
on_value: A scalar defining the value to fill in output when `indices[j]
= i`. (default: 1)
off_value: A scalar defining the value to fill in output when `indices[j]
!= i`. (default: 0)
axis: The axis to fill (default: -1, a new inner-most axis).
dtype: The data type of the output tensor.
name: A name for the operation (optional).
Returns:
output: The one-hot tensor.
Raises:
TypeError: If dtype of either `on_value` or `off_value` don't match `dtype`
TypeError: If dtype of `on_value` and `off_value` don't match one another
"""
with ops.name_scope(name, "one_hot",
[indices, depth, on_value, off_value, axis,
dtype]) as name:
on_exists = on_value is not None
off_exists = off_value is not None
on_dtype = (ops.convert_to_tensor(on_value).dtype.base_dtype if on_exists
else None)
off_dtype = (ops.convert_to_tensor(off_value).dtype.base_dtype if off_exists
else None)
if on_exists or off_exists:
if dtype is not None:
# Ensure provided on_value and/or off_value match dtype
if on_exists and on_dtype != dtype:
raise TypeError("dtype {0} of on_value does not match "
"dtype parameter {1}".format(on_dtype, dtype))
if off_exists and off_dtype != dtype:
raise TypeError("dtype {0} of off_value does not match "
"dtype parameter {1}".format(off_dtype, dtype))
else:
# dtype not provided: automatically assign it
dtype = on_dtype if on_exists else off_dtype
elif dtype is None:
# None of on_value, off_value, or dtype provided. Default dtype to float32
dtype = dtypes.float32
if not on_exists:
# on_value not provided: assign to value 1 of type dtype
on_value = ops.convert_to_tensor(1, dtype, name="on_value")
on_dtype = dtype
if not off_exists:
# off_value not provided: assign to value 0 of type dtype
off_value = ops.convert_to_tensor(0, dtype, name="off_value")
off_dtype = dtype
if on_dtype != off_dtype:
raise TypeError("dtype {0} of on_value does not match "
"dtype {1} of off_value".format(on_dtype, off_dtype))
return gen_array_ops.one_hot(indices, depth, on_value, off_value, axis,
name)
def _all_dimensions(x):
"""Returns a 1D-tensor listing all dimensions in x."""
# Fast path: avoid creating Rank and Range ops if ndims is known.
if isinstance(x, ops.Tensor) and x.get_shape().ndims is not None:
return constant_op.constant(
np.arange(x.get_shape().ndims), dtype=dtypes.int32)
if (isinstance(x, sparse_tensor.SparseTensor) and
x.dense_shape.get_shape().is_fully_defined()):
r = x.dense_shape.get_shape().dims[0].value # sparse.dense_shape is 1-D.
return constant_op.constant(np.arange(r), dtype=dtypes.int32)
# Otherwise, we rely on `range` and `rank` to do the right thing at runtime.
return gen_math_ops._range(0, rank(x), 1)
@tf_export("sequence_mask")
def sequence_mask(lengths, maxlen=None, dtype=dtypes.bool, name=None):
"""Returns a mask tensor representing the first N positions of each cell.
If `lengths` has shape `[d_1, d_2, ..., d_n]` the resulting tensor `mask` has
dtype `dtype` and shape `[d_1, d_2, ..., d_n, maxlen]`, with
```
mask[i_1, i_2, ..., i_n, j] = (j < lengths[i_1, i_2, ..., i_n])
```
Examples:
```python
tf.sequence_mask([1, 3, 2], 5) # [[True, False, False, False, False],
# [True, True, True, False, False],
# [True, True, False, False, False]]
tf.sequence_mask([[1, 3],[2,0]]) # [[[True, False, False],
# [True, True, True]],
# [[True, True, False],
# [False, False, False]]]
```
Args:
lengths: integer tensor, all its values <= maxlen.
maxlen: scalar integer tensor, size of last dimension of returned tensor.
Default is the maximum value in `lengths`.
dtype: output type of the resulting tensor.
name: name of the op.
Returns:
A mask tensor of shape `lengths.shape + (maxlen,)`, cast to specified dtype.
Raises:
ValueError: if `maxlen` is not a scalar.
"""
with ops.name_scope(name, "SequenceMask", [lengths, maxlen]):
lengths = ops.convert_to_tensor(lengths)
if maxlen is None:
maxlen = gen_math_ops._max(lengths, _all_dimensions(lengths))
maxlen = gen_math_ops.maximum(constant(0, maxlen.dtype), maxlen)
else:
maxlen = ops.convert_to_tensor(maxlen)
if maxlen.get_shape().ndims is not None and maxlen.get_shape().ndims != 0:
raise ValueError("maxlen must be scalar for sequence_mask")
# The basic idea is to compare a range row vector of size maxlen:
# [0, 1, 2, 3, 4]
# to length as a matrix with 1 column: [[1], [3], [2]].
# Because of broadcasting on both arguments this comparison results
# in a matrix of size (len(lengths), maxlen)
row_vector = gen_math_ops._range(
constant(0, maxlen.dtype), maxlen, constant(1, maxlen.dtype))
# Since maxlen >= max(lengths), it is safe to use maxlen as a cast
# authoritative type. Whenever maxlen fits into tf.int32, so do the lengths.
matrix = gen_math_ops.cast(expand_dims(lengths, -1), maxlen.dtype)
result = row_vector < matrix
if dtype is None or result.dtype.base_dtype == dtype.base_dtype:
return result
else:
return gen_math_ops.cast(result, dtype)
@tf_export(v1=["squeeze"])
@deprecation.deprecated_args(None, "Use the `axis` argument instead",
"squeeze_dims")
def squeeze(input, axis=None, name=None, squeeze_dims=None):
# pylint: disable=redefined-builtin
"""Removes dimensions of size 1 from the shape of a tensor.
Given a tensor `input`, this operation returns a tensor of the same type with
all dimensions of size 1 removed. If you don't want to remove all size 1
dimensions, you can remove specific size 1 dimensions by specifying
`axis`.
For example:
```python
# 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
tf.shape(tf.squeeze(t)) # [2, 3]
```
Or, to remove specific size 1 dimensions:
```python
# 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
tf.shape(tf.squeeze(t, [2, 4])) # [1, 2, 3, 1]
```
Args:
input: A `Tensor`. The `input` to squeeze.
axis: An optional list of `ints`. Defaults to `[]`.
If specified, only squeezes the dimensions listed. The dimension
index starts at 0. It is an error to squeeze a dimension that is not 1.
Must be in the range `[-rank(input), rank(input))`.
name: A name for the operation (optional).
squeeze_dims: Deprecated keyword argument that is now axis.
Returns:
A `Tensor`. Has the same type as `input`.
Contains the same data as `input`, but has one or more dimensions of
size 1 removed.
Raises:
ValueError: When both `squeeze_dims` and `axis` are specified.
"""
axis = deprecation.deprecated_argument_lookup(
"axis", axis, "squeeze_dims", squeeze_dims)
if np.isscalar(axis):
axis = [axis]
return gen_array_ops.squeeze(input, axis, name)
@tf_export("squeeze", v1=[])
def squeeze_v2(input, axis=None, name=None):
# pylint: disable=redefined-builtin
return squeeze(input, axis, name)
@tf_export("where")
@dispatch.add_dispatch_support
def where(condition, x=None, y=None, name=None):
"""Return the elements, either from `x` or `y`, depending on the `condition`.
If both `x` and `y` are None, then this operation returns the coordinates of
true elements of `condition`. The coordinates are returned in a 2-D tensor
where the first dimension (rows) represents the number of true elements, and
the second dimension (columns) represents the coordinates of the true
elements. Keep in mind, the shape of the output tensor can vary depending on
how many true values there are in input. Indices are output in row-major
order.
If both non-None, `x` and `y` must have the same shape.
The `condition` tensor must be a scalar if `x` and `y` are scalar.
If `x` and `y` are vectors of higher rank, then `condition` must be either a
vector with size matching the first dimension of `x`, or must have the same
shape as `x`.
The `condition` tensor acts as a mask that chooses, based on the value at each
element, whether the corresponding element / row in the output should be taken
from `x` (if true) or `y` (if false).
If `condition` is a vector and `x` and `y` are higher rank matrices, then it
chooses which row (outer dimension) to copy from `x` and `y`. If `condition`
has the same shape as `x` and `y`, then it chooses which element to copy from
`x` and `y`.
Args:
condition: A `Tensor` of type `bool`
x: A Tensor which may have the same shape as `condition`. If `condition` is
rank 1, `x` may have higher rank, but its first dimension must match the
size of `condition`.
y: A `tensor` with the same shape and type as `x`.
name: A name of the operation (optional)
Returns:
A `Tensor` with the same type and shape as `x`, `y` if they are non-None.
Otherwise, a `Tensor` with shape `(num_true, rank(condition))`.
Raises:
ValueError: When exactly one of `x` or `y` is non-None.
"""
if x is None and y is None:
with ops.name_scope(name, "Where", [condition]) as name:
condition = ops.convert_to_tensor(
condition, preferred_dtype=dtypes.bool, name="condition")
return gen_array_ops.where(condition=condition, name=name)
elif x is not None and y is not None:
return gen_math_ops.select(condition=condition, x=x, y=y, name=name)
else:
raise ValueError("x and y must both be non-None or both be None.")
# pylint: disable=redefined-builtin
@tf_export(v1=["reverse_sequence"])
@deprecation.deprecated_args(
None, "seq_dim is deprecated, use seq_axis instead", "seq_dim")
@deprecation.deprecated_args(
None, "batch_dim is deprecated, use batch_axis instead", "batch_dim")
def reverse_sequence(input,
seq_lengths,
seq_axis=None,
batch_axis=None,
name=None,
seq_dim=None,
batch_dim=None):
seq_axis = deprecation.deprecated_argument_lookup("seq_axis", seq_axis,
"seq_dim", seq_dim)
batch_axis = deprecation.deprecated_argument_lookup("batch_axis", batch_axis,
"batch_dim", batch_dim)
return gen_array_ops.reverse_sequence(
input=input,
seq_lengths=seq_lengths,
seq_dim=seq_axis,
batch_dim=batch_axis,
name=name)
reverse_sequence.__doc__ = deprecation.rewrite_argument_docstring(
deprecation.rewrite_argument_docstring(
gen_array_ops.reverse_sequence.__doc__, "batch_dim", "batch_axis"),
"seq_dim", "seq_axis")
@tf_export("reverse_sequence", v1=[])
def reverse_sequence_v2(
input, seq_lengths, seq_axis=None, batch_axis=None, name=None):
return gen_array_ops.reverse_sequence(
input=input,
seq_lengths=seq_lengths,
seq_dim=seq_axis,
batch_dim=batch_axis,
name=name)
reverse_sequence_v2.__doc__ = deprecation.rewrite_argument_docstring(
deprecation.rewrite_argument_docstring(
gen_array_ops.reverse_sequence.__doc__, "batch_dim", "batch_axis"),
"seq_dim", "seq_axis")
# pylint: enable=redefined-builtin
@tf_export(v1=["gather"])
@dispatch.add_dispatch_support
def gather(params,
indices,
validate_indices=None,
name=None,
axis=None,
batch_dims=0):
r"""Gather slices from params axis axis according to indices.
Gather slices from params axis `axis` according to `indices`. `indices` must
be an integer tensor of any dimension (usually 0-D or 1-D).
For 0-D (scalar) `indices`:
> `output`$$[p_0, ..., p_{axis-1}, \hspace{5.1em}
> p_{axis + 1}, ..., p_{N-1}]$$ =\
> `params`$$[p_0, ..., p_{axis-1}, \hspace{1em}
> indices, \hspace{1em}
> p_{axis + 1}, ..., p_{N-1}]$$.
For 1-D (vector) `indices` with `batch_dims=0`:
> `output`$$[p_0, ..., p_{axis-1}, \hspace{2.6em}
> i, \hspace{2.6em}
> p_{axis + 1}, ..., p_{N-1}]$$ =\
> `params`$$[p_0, ..., p_{axis-1}, \hspace{1em}
> indices[i], \hspace{1em}
> p_{axis + 1}, ..., p_{N-1}]$$.
In the general case, produces an output tensor where:
> `output`$$[p_0, ..., p_{axis-1}, \hspace{1.2em}
> i_{batch\_dims}, ..., i_{M-1}, \hspace{1.3em}
> p_{axis + 1}, ..., p_{N-1}]$$ =\
> `params`$$[p_0, ..., p_{axis-1}, \hspace{1em}
> indices[i_0, ..., i_{M-1}], \hspace{1em}
> p_{axis + 1}, ..., p_{N-1}]$$.
Where $$N$$=`ndims(params)` and $$M$$=`ndims(indices)`.
The shape of the output tensor is:
> `output.shape = params.shape[:axis] + indices.shape[batch_dims:] +
> params.shape[axis + 1:]`.
Note that on CPU, if an out of bound index is found, an error is returned.
On GPU, if an out of bound index is found, a 0 is stored in the corresponding
output value.
See also `tf.gather_nd`.
<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
<img style="width:100%" src="https://www.tensorflow.org/images/Gather.png"
alt>
</div>
Args:
params: The `Tensor` from which to gather values. Must be at least rank
`axis + 1`.
indices: The index `Tensor`. Must be one of the following types: `int32`,
`int64`. Must be in range `[0, params.shape[axis])`.
validate_indices: Deprecated, does nothing.
name: A name for the operation (optional).
axis: A `Tensor`. Must be one of the following types: `int32`, `int64`. The
`axis` in `params` to gather `indices` from. Must be greater than or equal
to `batch_dims`. Defaults to the first non-batch dimension. Supports
negative indexes.
batch_dims: An `integer`. The number of batch dimensions. Must be less
than `rank(indices)`.
Returns:
A `Tensor`. Has the same type as `params`.
"""
del validate_indices
if batch_dims != 0:
with ops.name_scope(name, "Gather", [params, indices, axis]):
return _batch_gather(params, indices, batch_dims, axis)
if axis is None:
axis = batch_dims
if axis != 0:
# Note that we do a sparse_read here to avoid snapshotting the entire
# resource variable and doing a gather, which can be inefficient and lead to
# subtle race conditions. TODO(apassos) implement axis != 0 on sparse_read
return gen_array_ops.gather_v2(params, indices, axis, name=name)
try:
# TODO(apassos) find a less bad way of detecting resource variables without
# introducing a circular dependency.
return params.sparse_read(indices, name=name)
except AttributeError:
return gen_array_ops.gather_v2(params, indices, axis, name=name)
@tf_export("gather", v1=[])
@dispatch.add_dispatch_support
def gather_v2(params, indices, validate_indices=None, axis=None,
batch_dims=0, name=None):
return gather(params, indices, validate_indices=validate_indices, name=name,
axis=axis, batch_dims=batch_dims)
gather.__doc__ = gather_v2.__doc__ = gen_array_ops.gather_v2.__doc__
@tf_export(v1=["batch_gather"])
@dispatch.add_dispatch_support
@deprecation.deprecated(
"2017-10-25", "`tf.batch_gather` is deprecated, please use `tf.gather` "
"with `batch_dims` instead.") # pylint: disable=missing-docstring
def batch_gather(params, indices, name=None):
"""Gather slices from params according to indices with leading batch dims."""
with ops.name_scope(name, "BatchGather", [params, indices]):
indices = ops.convert_to_tensor(indices, name="indices")
params = ops.convert_to_tensor(params, name="params")
if indices.shape.ndims is None:
raise ValueError(
"batch_gather does not allow indices with unknown shape.")
return _batch_gather(params, indices, batch_dims=indices.shape.ndims - 1)
def _batch_gather(params, indices, batch_dims, axis=None):
r"""Gather slices from params according to indices with leading batch dims.
This operation assumes that the leading `batch_dims` dimensions of `indices`
and `params` are batch dimensions; and performs a `tf.gather` operation within
each batch. (If `batch_dims` is not specified, then it defaults to
`rank(indices)-1`.) In the case in which `batch_dims==0`, this operation
is equivalent to `tf.gather`.
Args:
params: A Tensor. The tensor from which to gather values.
indices: A Tensor. Must be one of the following types: int32, int64. Index
tensor. Must be in range `[0, params.shape[batch_dims]]`.
batch_dims: An integer or none. The number of batch dimensions. Must be
less than `rank(indices)`. Defaults to `rank(indices) - 1` if None.
axis: A `Tensor`. Must be one of the following types: `int32`, `int64`. The
`axis` in `params` to gather `indices` from. Must be greater than or equal
to `batch_dims`. Defaults to the first non-batch dimension. Supports
negative indexes.
Returns:
A Tensor. Has the same type as `params`.
Raises:
ValueError: if `indices` has an unknown shape.
"""
if batch_dims is not None and not isinstance(batch_dims, int):
raise TypeError("batch_dims must be an int; got %r" % batch_dims)
indices = ops.convert_to_tensor(indices, name="indices")
params = ops.convert_to_tensor(params, name="params")
indices_ndims = indices.shape.ndims
if indices_ndims is None:
raise ValueError("tf.gather does not allow indices with unknown "
"rank when batch_dims is specified.")
if batch_dims is None:
batch_dims = indices_ndims - 1
if batch_dims < 0:
batch_dims += indices_ndims
if batch_dims < 0 or batch_dims >= indices_ndims:
raise ValueError("batch_dims = %d must be less than rank(indices) = %d" %
(batch_dims, indices_ndims))
if params.shape.ndims is not None and batch_dims >= params.shape.ndims:
raise ValueError("batch_dims = %d must be less than rank(params) = %d" %
(batch_dims, params.shape.ndims))
# Handle axis by transposing the axis dimension to be the first non-batch
# dimension, recursively calling batch_gather with axis=0, and then
# transposing the result to put the pre-axis dimensions before the indices
# dimensions.
if axis is not None and axis != batch_dims:
# Adjust axis to be positive.
if not isinstance(axis, int):
axis = tf.where(axis < 0, axis + array_ops.rank(params), axis)
elif axis < 0 and params.shape.ndims is None:
axis = axis + array_ops.rank(params)
else:
if (axis < -params.shape.ndims) or (axis >= params.shape.ndims):
raise ValueError("axis (%d) out of range [%d, %d)" %
(axis, -params.shape.ndims, params.shape.ndims))
if axis < 0:
axis += params.shape.ndims
if axis < batch_dims:
raise ValueError("batch_dims = %d must be less than or equal to "
"axis = %d" % (batch_dims, axis))
# Move params[axis] up to params[batch_dims].
perm = [
list(range(batch_dims)), [axis],
gen_math_ops._range(batch_dims, axis, 1),
gen_math_ops._range(axis + 1, rank(params), 1)
]
params = transpose(params, concat(perm, axis=0))
result = _batch_gather(params, indices, batch_dims=batch_dims)
# Move the result dimensions corresponding to params[batch_dims:axis]
# to just before the dimensions corresponding to indices[batch_dims:].
params_start = indices_ndims + axis - batch_dims
perm = [
list(range(batch_dims)),
gen_math_ops._range(indices_ndims, params_start, 1),
list(range(batch_dims, indices_ndims)),
gen_math_ops._range(params_start, rank(result), 1)
]
return transpose(result, perm=concat(perm, axis=0))
indices_shape = shape(indices)
params_shape = shape(params)
batch_indices = indices
indices_dtype = indices.dtype.base_dtype
accum_dim_value = ones((), dtype=indices_dtype)
# Use correct type for offset index computation
casted_params_shape = gen_math_ops.cast(params_shape, indices_dtype)
for dim in range(batch_dims, 0, -1):
dim_value = casted_params_shape[dim - 1]
accum_dim_value *= casted_params_shape[dim]
start = zeros((), dtype=indices_dtype)
step = ones((), dtype=indices_dtype)
dim_indices = gen_math_ops._range(start, dim_value, step)
dim_indices *= accum_dim_value
dim_shape = stack(
[1] * (dim - 1) + [dim_value] + [1] * (indices_ndims - dim), axis=0)
batch_indices += reshape(dim_indices, dim_shape)
flat_indices = reshape(batch_indices, [-1])
outer_shape = params_shape[batch_dims + 1:]
flat_inner_shape = gen_math_ops.prod(params_shape[:batch_dims + 1], [0],
False)
flat_params = reshape(params, concat([[flat_inner_shape], outer_shape],
axis=0))
flat_result = gather(flat_params, flat_indices)
result = reshape(flat_result, concat([indices_shape, outer_shape], axis=0))
final_shape = indices.get_shape()[:batch_dims].merge_with(
params.get_shape()[:batch_dims])
final_shape = final_shape.concatenate(indices.get_shape().dims[batch_dims:])
final_shape = final_shape.concatenate(params.get_shape()[batch_dims + 1:])
result.set_shape(final_shape)
return result
@tf_export(v1=["gather_nd", "manip.gather_nd"])
@dispatch.add_dispatch_support
@deprecated_endpoints("manip.gather_nd")
def gather_nd(params, indices, name=None, batch_dims=0):
r"""Gather slices from `params` into a Tensor with shape specified by `indices`.
`indices` is an K-dimensional integer tensor, best thought of as a
(K-1)-dimensional tensor of indices into `params`, where each element defines
a slice of `params`:
output[\\(i_0, ..., i_{K-2}\\)] = params[indices[\\(i_0, ..., i_{K-2}\\)]]
Whereas in `tf.gather` `indices` defines slices into the first
dimension of `params`, in `tf.gather_nd`, `indices` defines slices into the
first `N` dimensions of `params`, where `N = indices.shape[-1]`.
The last dimension of `indices` can be at most the rank of
`params`:
indices.shape[-1] <= params.rank
The last dimension of `indices` corresponds to elements
(if `indices.shape[-1] == params.rank`) or slices
(if `indices.shape[-1] < params.rank`) along dimension `indices.shape[-1]`
of `params`. The output tensor has shape
indices.shape[:-1] + params.shape[indices.shape[-1]:]
Additionally both 'params' and 'indices' can have M leading batch
dimensions that exactly match. In this case 'batch_dims' must be M.
Note that on CPU, if an out of bound index is found, an error is returned.
On GPU, if an out of bound index is found, a 0 is stored in the
corresponding output value.
Some examples below.
Simple indexing into a matrix:
```python
indices = [[0, 0], [1, 1]]
params = [['a', 'b'], ['c', 'd']]
output = ['a', 'd']
```
Slice indexing into a matrix:
```python
indices = [[1], [0]]
params = [['a', 'b'], ['c', 'd']]
output = [['c', 'd'], ['a', 'b']]
```
Indexing into a 3-tensor:
```python
indices = [[1]]
params = [[['a0', 'b0'], ['c0', 'd0']],
[['a1', 'b1'], ['c1', 'd1']]]
output = [[['a1', 'b1'], ['c1', 'd1']]]
indices = [[0, 1], [1, 0]]
params = [[['a0', 'b0'], ['c0', 'd0']],
[['a1', 'b1'], ['c1', 'd1']]]
output = [['c0', 'd0'], ['a1', 'b1']]
indices = [[0, 0, 1], [1, 0, 1]]
params = [[['a0', 'b0'], ['c0', 'd0']],
[['a1', 'b1'], ['c1', 'd1']]]
output = ['b0', 'b1']
```
The examples below are for the case when only indices have leading extra
dimensions. If both 'params' and 'indices' have leading batch dimensions, use
the 'batch_dims' parameter to run gather_nd in batch mode.
Batched indexing into a matrix:
```python
indices = [[[0, 0]], [[0, 1]]]
params = [['a', 'b'], ['c', 'd']]
output = [['a'], ['b']]
```
Batched slice indexing into a matrix:
```python
indices = [[[1]], [[0]]]
params = [['a', 'b'], ['c', 'd']]
output = [[['c', 'd']], [['a', 'b']]]
```
Batched indexing into a 3-tensor:
```python
indices = [[[1]], [[0]]]
params = [[['a0', 'b0'], ['c0', 'd0']],
[['a1', 'b1'], ['c1', 'd1']]]
output = [[[['a1', 'b1'], ['c1', 'd1']]],
[[['a0', 'b0'], ['c0', 'd0']]]]
indices = [[[0, 1], [1, 0]], [[0, 0], [1, 1]]]
params = [[['a0', 'b0'], ['c0', 'd0']],
[['a1', 'b1'], ['c1', 'd1']]]
output = [[['c0', 'd0'], ['a1', 'b1']],
[['a0', 'b0'], ['c1', 'd1']]]
indices = [[[0, 0, 1], [1, 0, 1]], [[0, 1, 1], [1, 1, 0]]]
params = [[['a0', 'b0'], ['c0', 'd0']],
[['a1', 'b1'], ['c1', 'd1']]]
output = [['b0', 'b1'], ['d0', 'c1']]
```
Examples with batched 'params' and 'indices':
```python
batch_dims = 1
indices = [[1], [0]]
params = [[['a0', 'b0'], ['c0', 'd0']],
[['a1', 'b1'], ['c1', 'd1']]]
output = [['c0', 'd0'], ['a1', 'b1']]
batch_dims = 1
indices = [[[1]], [[0]]]
params = [[['a0', 'b0'], ['c0', 'd0']],
[['a1', 'b1'], ['c1', 'd1']]]
output = [[['c0', 'd0']], [['a1', 'b1']]]
batch_dims = 1
indices = [[[1, 0]], [[0, 1]]]
params = [[['a0', 'b0'], ['c0', 'd0']],
[['a1', 'b1'], ['c1', 'd1']]]
output = [['c0'], ['b1']]
```
See also `tf.gather`.
Args:
params: A `Tensor`. The tensor from which to gather values.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
Index tensor.
name: A name for the operation (optional).
batch_dims: An integer or a scalar 'Tensor'. The number of batch dimensions.
Returns:
A `Tensor`. Has the same type as `params`.
"""
batch_dims_ = tensor_util.constant_value(batch_dims)
if batch_dims_ is not None:
batch_dims = int(batch_dims_)
if batch_dims == 0:
return gen_array_ops.gather_nd(params, indices, name=name)
else:
return batch_gather_nd(
params, indices, batch_dims=batch_dims, name=name)
@tf_export("gather_nd", v1=[])
@dispatch.add_dispatch_support
def gather_nd_v2(params, indices, batch_dims=0, name=None):
return gather_nd(params, indices, name=name, batch_dims=batch_dims)
gather_nd_v2.__doc__ = gather_nd.__doc__
def batch_gather_nd(params, indices, batch_dims, name=None):
"""gather_nd implementation with batch support."""
with ops.name_scope(name, "BatchGatherND", [params, indices]):
indices = ops.convert_to_tensor(indices, name="indices")
params = ops.convert_to_tensor(params, name="params")
if not isinstance(batch_dims, int):
raise TypeError("batch_dims must be an int; got %r" % batch_dims)
if batch_dims < 0:
raise ValueError("tf.gather_nd does not allow negative batch_dims.")
params_ndims = params.shape.ndims
indices_ndims = indices.shape.ndims
if indices_ndims is not None and batch_dims >= indices_ndims:
raise ValueError("batch_dims = %d must be less than rank(indices) = %d" %
(batch_dims, indices_ndims))
if params_ndims is not None and batch_dims >= params_ndims:
raise ValueError("batch_dims = %d must be less than rank(params) = %d" %
(batch_dims, params_ndims))
expand = batch_dims == 0
if expand:
# Normally gather_nd will be called when batch_dims == 0.
# But if this function is called with batch_dims = 0, e.g. for testing
# purposes, this adds a dummy batch dimension to make batch_dims = 1.
params = expand_dims(params, axis=0)
indices = expand_dims(indices, axis=0)
batch_dims = 1
params_shape = shape(params)
indices_shape = shape(indices)
batch_shape = params_shape[:batch_dims]
batch_size = gen_math_ops.prod(batch_shape, [0])
index_internal_ndims = rank(indices) - batch_dims - 1
indices_internal_shape = indices_shape[batch_dims:-1]
# Assuming a 'params' with shape [b1, ..., bM, g1, ..., gN] and an 'indices'
# with shape [b1, ..., bM, i1, ..., iK, C], where C <= N, we need to modify
# 'indices' s.t. it has shape [i1, ..., iK, D], where D <= M + N and slices
# to the entire 'params' tensor.
# Assuming we have a batch of shape [B1, B2], we use meshgrid to create a
# grid of size B1 x B2.
batch_dim_list = unstack(batch_shape, axis=0)
dim_ranges = [
gen_math_ops.cast(gen_math_ops._range(0, x, 1), indices.dtype)
for x in batch_dim_list]
mesh_list = meshgrid(*dim_ranges, indexing="ij") if dim_ranges else []
# Then we flatten and stack the tensors to form a (B1.B2) by 2 matrix.
flat_list = [reshape(x, shape=(-1,)) for x in mesh_list]
index_grid = transpose(stack(flat_list, axis=0))
# We need to concatenate these batch coordinates with the internal indices.
# concat -> index_grid [B1.B2, 2] with indices [i1, ..., iK, C]
# So we reshape them both to [(B1.B2), i1, ..., iK, *]
index_grid_shape = shape(index_grid)
index_grid = reshape(index_grid,
concat([index_grid_shape[:1],
ones(index_internal_ndims, dtype=dtypes.int32),
index_grid_shape[1:]], axis=0))
tile_shape = concat(((1,), indices_internal_shape, (1,)), axis=0)
index_grid = tile(index_grid, multiples=tile_shape)
# index_grid now has shape [(B1.B2), i1, ..., iK, 2]
flat_shape = concat(([batch_size], indices_shape[batch_dims:]), axis=0)
flat_indices = reshape(indices, shape=flat_shape)
# flat_indices now has shape [(B1.B2), i1, ..., iK, C]
indices = concat((index_grid, flat_indices), axis=-1)
# indices has shape [(B1.B2), i1, ..., iK, 2+C]
out = gen_array_ops.gather_nd(params, indices)
# out has shape [(B1.B2), i1, ..., iK, N-C]. Now we reshape batch to
# its original form.
out_shape = shape(out)
out = reshape(out, shape=concat((batch_shape, out_shape[1:]), axis=0))
if expand:
out = squeeze(out, axis=0)
return out
# Define quantize_v2 here in order to make name the second-to-last attribute,
# because round_mode was added later.
@tf_export(v1=["quantize_v2"])
@deprecation.deprecated(
"2017-10-25",
"`tf.quantize_v2` is deprecated, please use `tf.quantization.quantize` "
"instead.") # pylint: disable=missing-docstring
def quantize_v2(input, # pylint: disable=redefined-builtin
min_range,
max_range,
T,
mode="MIN_COMBINED",
name=None,
round_mode="HALF_AWAY_FROM_ZERO"):
return gen_array_ops.quantize_v2(input,
min_range,
max_range,
T=T,
mode=mode,
name=name,
round_mode=round_mode)
quantize_v2.__doc__ = """Please use `tf.quantization.quantize` instead."""
# We want to expose tf.quantize instead of tf.quantize_v2; we can deprecate
# tf.quantize_v2 in next version of TensorFlow.
@tf_export("quantization.quantize", v1=["quantization.quantize", "quantize"])
@deprecation.deprecated_endpoints("quantize")
def quantize(input, # pylint: disable=redefined-builtin
min_range,
max_range,
T,
mode="MIN_COMBINED",
round_mode="HALF_AWAY_FROM_ZERO",
name=None):
return gen_array_ops.quantize_v2(
input,
min_range,
max_range,
T,
mode=mode,
round_mode=round_mode,
name=name)
@tf_export("searchsorted")
def searchsorted(sorted_sequence,
values,
side="left",
out_type=dtypes.int32,
name=None):
"""Searches input tensor for values on the innermost dimension.
A 2-D example:
```
sorted_sequence = [[0, 3, 9, 9, 10],
[1, 2, 3, 4, 5]]
values = [[2, 4, 9],
[0, 2, 6]]
result = searchsorted(sorted_sequence, values, side="left")
result == [[1, 2, 2],
[0, 1, 5]]
result = searchsorted(sorted_sequence, values, side="right")
result == [[1, 2, 4],
[0, 2, 5]]
```
Args:
sorted_sequence: N-D `Tensor` containing a sorted sequence.
values: N-D `Tensor` containing the search values.
side: 'left' or 'right'; 'left' corresponds to lower_bound and 'right' to
upper_bound.
out_type: The output type (`int32` or `int64`). Default is `tf.int32`.
name: Optional name for the operation.
Returns:
An N-D `Tensor` the size of values containing the result of applying either
lower_bound or upper_bound (depending on side) to each value. The result
is not a global index to the entire `Tensor`, but the index in the last
dimension.
Raises:
ValueError: If the last dimension of `sorted_sequence >= 2^31-1` elements.
If the total size of values exceeds `2^31 - 1` elements.
If the first `N-1` dimensions of the two tensors don't match.
"""
sequence_size = shape_internal(sorted_sequence)[-1]
values_size = shape_internal(values)[-1]
sorted_sequence_2d = reshape(sorted_sequence, [-1, sequence_size])
values_2d = reshape(values, [-1, values_size])
if side == "right":
output = gen_array_ops.upper_bound(sorted_sequence_2d, values_2d, out_type,
name)
elif side == "left":
output = gen_array_ops.lower_bound(sorted_sequence_2d, values_2d, out_type,
name)
else:
raise ValueError("side must be either 'right' or 'left'. Saw: %s." % side)
return reshape(output, shape_internal(values))
quantize.__doc__ = gen_array_ops.quantize_v2.__doc__
@tf_export("image.extract_image_patches", v1=[])
def extract_image_patches_v2(
images,
sizes,
strides,
rates,
padding,
name=None):
# pylint: disable=line-too-long
r"""Extract `patches` from `images` and put them in the \"depth\" output dimension.
Args:
images: A 4-D Tensor with shape `[batch, in_rows, in_cols, depth]
sizes: The size of the sliding window for each dimension of `images`.
strides: A 1-D Tensor of length 4. How far the centers of two consecutive
patches are in the images. Must be: `[1, stride_rows, stride_cols, 1]`.
rates: A 1-D Tensor of length 4. Must be: `[1, rate_rows, rate_cols, 1]`.
This is the input stride, specifying how far two consecutive patch samples
are in the input. Equivalent to extracting patches with `patch_sizes_eff =
patch_sizes + (patch_sizes - 1) * (rates - 1)`, followed by subsampling
them spatially by a factor of `rates`. This is equivalent to `rate` in
dilated (a.k.a. Atrous) convolutions.
padding: The type of padding algorithm to use.
We specify the size-related attributes as: ```python ksizes = [1,
ksize_rows, ksize_cols, 1] strides = [1, strides_rows, strides_cols, 1]
rates = [1, rates_rows, rates_cols, 1]
name: A name for the operation (optional).
Returns:
A 4-D Tensor. Has the same type as `images`, and with shape `[batch,
out_rows, out_cols, ksize_rows * ksize_cols * depth]` containing image
patches with size `ksize_rows x ksize_cols x depth` vectorized in the
\"depth\" dimension. Note `out_rows` and `out_cols` are the dimensions of
the output patches.
"""
# pylint: enable=line-too-long
return gen_array_ops.extract_image_patches(
images, sizes, strides, rates, padding, name)
@tf_export(v1=["image.extract_image_patches", "extract_image_patches"])
@deprecation.deprecated_args(
None, "ksizes is deprecated, use sizes instead", "ksizes")
def extract_image_patches( # pylint: disable=missing-docstring
images,
ksizes=None,
strides=None,
rates=None,
padding=None,
name=None,
sizes=None):
ksizes = deprecation.deprecated_argument_lookup(
"sizes", sizes, "ksizes", ksizes)
return gen_array_ops.extract_image_patches(
images, ksizes, strides, rates, padding, name)
extract_image_patches.__doc__ = gen_array_ops.extract_image_patches.__doc__
| 35.734518 | 129 | 0.638522 |
50d6f5e87bd50fa2b707b764a883ebbca48f0362 | 17,616 | py | Python | pydisney/m3u8_formater.py | andrewzhong1122/DSNP-dl | 2587aae1c772c40bc4a17e1e6396ab8886091a3d | [
"MIT"
] | 28 | 2022-01-02T03:39:18.000Z | 2022-03-27T13:49:18.000Z | pydisney/m3u8_formater.py | Machtergreifung/DISNEY-4K-SCRIPT | 337490793b475ee04a4b9eb12c2bd3917415219a | [
"MIT"
] | 1 | 2022-01-24T03:23:13.000Z | 2022-01-24T03:23:13.000Z | pydisney/m3u8_formater.py | Machtergreifung/DISNEY-4K-SCRIPT | 337490793b475ee04a4b9eb12c2bd3917415219a | [
"MIT"
] | 58 | 2022-01-02T01:45:30.000Z | 2022-03-15T06:47:57.000Z | # ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
from __future__ import absolute_import
import binascii
import copy
import os
import random
import re
import time
from datetime import datetime
from datetime import timedelta
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.ciphers import algorithms
from cryptography.hazmat.primitives.ciphers import Cipher
from cryptography.hazmat.primitives.ciphers import modes
from svtplay_dl.error import ServiceError
from svtplay_dl.error import UIException
from svtplay_dl.fetcher import VideoRetriever
from svtplay_dl.subtitle import subtitle
from svtplay_dl.utils.http import get_full_url
from svtplay_dl.utils.output import ETA
from svtplay_dl.utils.output import output
from svtplay_dl.utils.output import progress_stream
from svtplay_dl.utils.output import progressbar
class HLSException(UIException):
def __init__(self, url, message):
self.url = url
super().__init__(message)
class LiveHLSException(HLSException):
def __init__(self, url):
super().__init__(url, "This is a live HLS stream, and they are not supported.")
def hlsparse(config, res, url, **kwargs):
streams = {}
if not res:
return streams
if res.status_code > 400:
streams[0] = ServiceError("Can't read HLS playlist. {}".format(res.status_code))
return streams
m3u8 = M3U8(res.text)
keycookie = kwargs.pop("keycookie", None)
authorization = kwargs.pop("authorization", None)
httpobject = kwargs.pop("httpobject", None)
output = kwargs.pop("output", None)
media = {}
subtitles = {}
segments = None
if m3u8.master_playlist:
for i in m3u8.master_playlist:
audio_url = None
if i["TAG"] == "EXT-X-MEDIA":
if "AUTOSELECT" in i and (i["AUTOSELECT"].upper() == "YES"):
if i["TYPE"] and i["TYPE"] != "SUBTITLES":
if "URI" in i:
if segments is None:
segments = True
if i["GROUP-ID"] not in media:
media[i["GROUP-ID"]] = []
media[i["GROUP-ID"]].append(i["URI"])
else:
segments = False
if i["TYPE"] == "SUBTITLES":
if "URI" in i:
if i["GROUP-ID"] not in subtitles:
subtitles[i["GROUP-ID"]] = []
item = [i["URI"], i["LANGUAGE"]]
if item not in subtitles[i["GROUP-ID"]]:
subtitles[i["GROUP-ID"]].append(item)
continue
elif i["TAG"] == "EXT-X-STREAM-INF":
bit_rate = float(i["BANDWIDTH"]) / 1000
if "AUDIO" in i and (i["AUDIO"] in media):
audio_url = get_full_url(media[i["AUDIO"]][0], url)
urls = get_full_url(i["URI"], url)
else:
continue # Needs to be changed to utilise other tags.
streams[int(bit_rate)] = HLS(
copy.copy(config),
urls,
bit_rate,
cookies=res.cookies,
keycookie=keycookie,
authorization=authorization,
audio=audio_url,
output=output,
segments=bool(segments),
kwargs=kwargs,
)
if subtitles and httpobject:
for sub in list(subtitles.keys()):
for n in subtitles[sub]:
m3u8s = M3U8(httpobject.request("get", get_full_url(n[0], url), cookies=res.cookies).text)
if "cmore" in url:
subtype = "wrstsegment" # this have been seen in tv4play
else:
subtype = "wrst"
streams[int(random.randint(1, 40))] = subtitle(
copy.copy(config),
subtype,
get_full_url(m3u8s.media_segment[0]["URI"], url),
subfix=n[1],
output=copy.copy(output),
m3u8=m3u8s,
)
elif m3u8.media_segment:
config.set("segments", False)
streams[0] = HLS(
copy.copy(config), url, 0, cookies=res.cookies, keycookie=keycookie, authorization=authorization, output=output, segments=False
)
else:
streams[0] = ServiceError("Can't find HLS playlist in m3u8 file.")
return streams
class HLS(VideoRetriever):
@property
def name(self):
return "hls"
def download(self):
self.output_extention = "ts"
if self.segments:
if self.audio:
self._download(self.audio, file_name=(copy.copy(self.output), "audio.ts"))
self._download(self.url, file_name=(self.output, "ts"))
else:
# Ignore audio
self.audio = None
self._download(self.url, file_name=(self.output, "ts"))
def _download(self, url, file_name):
cookies = self.kwargs.get("cookies", None)
start_time = time.time()
m3u8 = M3U8(self.http.request("get", url, cookies=cookies).text)
key = None
def random_iv():
return os.urandom(16)
file_d = output(file_name[0], self.config, file_name[1])
if file_d is None:
return
hls_time_stamp = self.kwargs.pop("hls_time_stamp", False)
decryptor = None
size_media = len(m3u8.media_segment)
eta = ETA(size_media)
total_duration = 0
duration = 0
max_duration = 0
for index, i in enumerate(m3u8.media_segment):
if "duration" in i["EXTINF"]:
duration = i["EXTINF"]["duration"]
max_duration = max(max_duration, duration)
total_duration += duration
item = get_full_url(i["URI"], url)
if not self.config.get("silent"):
if self.config.get("live"):
progressbar(size_media, index + 1, "".join(["DU: ", str(timedelta(seconds=int(total_duration)))]))
else:
eta.increment()
progressbar(size_media, index + 1, "".join(["ETA: ", str(eta)]))
data = self.http.request("get", item, cookies=cookies)
if data.status_code == 404:
break
data = data.content
if m3u8.encrypted:
headers = {}
if self.keycookie:
keycookies = self.keycookie
else:
keycookies = cookies
if self.authorization:
headers["authorization"] = self.authorization
# Update key/decryptor
if "EXT-X-KEY" in i:
keyurl = get_full_url(i["EXT-X-KEY"]["URI"], url)
if keyurl and keyurl[:4] == "skd:":
raise HLSException(keyurl, "Can't decrypt beacuse of DRM")
key = self.http.request("get", keyurl, cookies=keycookies, headers=headers).content
iv = binascii.unhexlify(i["EXT-X-KEY"]["IV"][2:].zfill(32)) if "IV" in i["EXT-X-KEY"] else random_iv()
backend = default_backend()
cipher = Cipher(algorithms.AES(key), modes.CBC(iv), backend=backend)
decryptor = cipher.decryptor()
if decryptor:
data = decryptor.update(data)
else:
raise ValueError("No decryptor found for encrypted hls steam.")
file_d.write(data)
if self.config.get("capture_time") > 0 and total_duration >= self.config.get("capture_time") * 60:
break
if (size_media == (index + 1)) and self.config.get("live"):
sleep_int = (start_time + max_duration * 2) - time.time()
if sleep_int > 0:
time.sleep(sleep_int)
size_media_old = size_media
while size_media_old == size_media:
start_time = time.time()
if hls_time_stamp:
end_time_stamp = (datetime.utcnow() - timedelta(minutes=1, seconds=max_duration * 2)).replace(microsecond=0)
start_time_stamp = end_time_stamp - timedelta(minutes=1)
base_url = url.split(".m3u8")[0]
url = "{}.m3u8?in={}&out={}?".format(base_url, start_time_stamp.isoformat(), end_time_stamp.isoformat())
new_m3u8 = M3U8(self.http.request("get", url, cookies=cookies).text)
for n_m3u in new_m3u8.media_segment:
if not any(d["URI"] == n_m3u["URI"] for d in m3u8.media_segment):
m3u8.media_segment.append(n_m3u)
size_media = len(m3u8.media_segment)
if size_media_old == size_media:
time.sleep(max_duration)
file_d.close()
if not self.config.get("silent"):
progress_stream.write("\n")
self.finished = True
class M3U8:
# Created for hls version <=7
# https://tools.ietf.org/html/rfc8216
MEDIA_SEGMENT_TAGS = ("EXTINF", "EXT-X-BYTERANGE", "EXT-X-DISCONTINUITY", "EXT-X-KEY", "EXT-X-MAP", "EXT-X-PROGRAM-DATE-TIME", "EXT-X-DATERANGE")
MEDIA_PLAYLIST_TAGS = (
"EXT-X-TARGETDURATION",
"EXT-X-MEDIA-SEQUENCE",
"EXT-X-DISCONTINUITY-SEQUENCE",
"EXT-X-ENDLIST",
"EXT-X-PLAYLIST-TYPE",
"EXT-X-I-FRAMES-ONLY",
)
MASTER_PLAYLIST_TAGS = ("EXT-X-MEDIA", "EXT-X-STREAM-INF", "EXT-X-I-FRAME-STREAM-INF", "EXT-X-SESSION-DATA", "EXT-X-SESSION-KEY")
MEDIA_OR_MASTER_PLAYLIST_TAGS = ("EXT-X-INDEPENDENT-SEGMENTS", "EXT-X-START")
TAG_TYPES = {"MEDIA_SEGMENT": 0, "MEDIA_PLAYLIST": 1, "MASTER_PLAYLIST": 2}
def __init__(self, data):
self.version = None
self.media_segment = []
self.media_playlist = {}
self.master_playlist = []
self.encrypted = False
self.independent_segments = False
self.parse_m3u(data)
def __str__(self):
return "Version: {}\nMedia Segment: {}\nMedia Playlist: {}\nMaster Playlist: {}\nEncrypted: {}\tIndependent_segments: {}".format(
self.version, self.media_segment, self.media_playlist, self.master_playlist, self.encrypted, self.independent_segments
)
def parse_m3u(self, data):
if not data.startswith("#EXTM3U"):
raise ValueError("Does not appear to be an 'EXTM3U' file.")
data = data.replace("\r\n", "\n")
lines = data.split("\n")[1:]
last_tag_type = None
tag_type = None
media_segment_info = {}
for index, l in enumerate(lines):
if not l:
continue
elif l.startswith("#EXT"):
info = {}
tag, attr = _get_tag_attribute(l)
if tag == "EXT-X-VERSION":
self.version = int(attr)
# 4.3.2. Media Segment Tags
elif tag in M3U8.MEDIA_SEGMENT_TAGS:
tag_type = M3U8.TAG_TYPES["MEDIA_SEGMENT"]
# 4.3.2.1. EXTINF
if tag == "EXTINF":
if "," in attr:
dur, title = attr.split(",", 1)
else:
dur = attr
title = None
info["duration"] = float(dur)
info["title"] = title
# 4.3.2.2. EXT-X-BYTERANGE
elif tag == "EXT-X-BYTERANGE":
if "@" in attr:
n, o = attr.split("@", 1)
info["n"], info["o"] = (int(n), int(o))
else:
info["n"] = int(attr)
info["o"] = 0
# 4.3.2.3. EXT-X-DISCONTINUITY
elif tag == "EXT-X-DISCONTINUITY":
pass
# 4.3.2.4. EXT-X-KEY
elif tag == "EXT-X-KEY":
self.encrypted = True
info = _get_tuple_attribute(attr)
# 4.3.2.5. EXT-X-MAP
elif tag == "EXT-X-MAP":
info = _get_tuple_attribute(attr)
# 4.3.2.6. EXT-X-PROGRAM-DATE-TIME"
elif tag == "EXT-X-PROGRAM-DATE-TIME":
info = attr
# 4.3.2.7. EXT-X-DATERANGE
elif tag == "EXT-X-DATERANGE":
info = _get_tuple_attribute(attr)
media_segment_info[tag] = info
# 4.3.3. Media Playlist Tags
elif tag in M3U8.MEDIA_PLAYLIST_TAGS:
tag_type = M3U8.TAG_TYPES["MEDIA_PLAYLIST"]
# 4.3.3.1. EXT-X-TARGETDURATION
if tag == "EXT-X-TARGETDURATION":
info = int(attr)
# 4.3.3.2. EXT-X-MEDIA-SEQUENCE
elif tag == "EXT-X-MEDIA-SEQUENCE":
info = int(attr)
# 4.3.3.3. EXT-X-DISCONTINUITY-SEQUENCE
elif tag == "EXT-X-DISCONTINUITY-SEQUENCE":
info = int(attr)
# 4.3.3.4. EXT-X-ENDLIST
elif tag == "EXT-X-ENDLIST":
break
# 4.3.3.5. EXT-X-PLAYLIST-TYPE
elif tag == "EXT-X-PLAYLIST-TYPE":
info = attr
# 4.3.3.6. EXT-X-I-FRAMES-ONLY
elif tag == "EXT-X-I-FRAMES-ONLY":
pass
self.media_playlist[tag] = info
# 4.3.4. Master Playlist Tags
elif tag in M3U8.MASTER_PLAYLIST_TAGS:
tag_type = M3U8.TAG_TYPES["MASTER_PLAYLIST"]
# 4.3.4.1. EXT-X-MEDIA
if tag == "EXT-X-MEDIA":
info = _get_tuple_attribute(attr)
# 4.3.4.2. EXT-X-STREAM-INF
elif tag == "EXT-X-STREAM-INF":
info = _get_tuple_attribute(attr)
if "BANDWIDTH" not in info:
raise ValueError("Can't find 'BANDWIDTH' in 'EXT-X-STREAM-INF'")
info["URI"] = lines[index + 1]
# 4.3.4.3. EXT-X-I-FRAME-STREAM-INF
elif tag == "EXT-X-I-FRAME-STREAM-INF":
info = _get_tuple_attribute(attr)
# 4.3.4.4. EXT-X-SESSION-DATA
elif tag == "EXT-X-SESSION-DATA":
info = _get_tuple_attribute(attr)
# 4.3.4.5. EXT-X-SESSION-KEY
elif tag == "EXT-X-SESSION-KEY":
self.encrypted = True
info = _get_tuple_attribute(attr)
info["TAG"] = tag
self.master_playlist.append(info)
# 4.3.5. Media or Master Playlist Tags
elif tag in M3U8.MEDIA_OR_MASTER_PLAYLIST_TAGS:
tag_type = M3U8.TAG_TYPES["MEDIA_PLAYLIST"]
# 4.3.5.1. EXT-X-INDEPENDENT-SEGMENTS
if tag == "EXT-X-INDEPENDENT-SEGMENTS":
self.independent_segments = True
# 4.3.5.2. EXT-X-START
elif tag == "EXT-X-START":
info = _get_tuple_attribute(attr)
self.media_playlist[tag] = info
# Unused tags
else:
pass
# This is a comment
elif l.startswith("#"):
pass
# This must be a url/uri
else:
tag_type = None
if last_tag_type is M3U8.TAG_TYPES["MEDIA_SEGMENT"]:
media_segment_info["URI"] = l
self.media_segment.append(media_segment_info)
media_segment_info = {}
last_tag_type = tag_type
if self.media_segment and self.master_playlist:
raise ValueError("This 'M3U8' file contains data for both 'Media Segment' and 'Master Playlist'. This is not allowed.")
def _get_tag_attribute(line):
line = line[1:]
try:
search_line = re.search(r"^([A-Z\-]*):(.*)", line)
return search_line.group(1), search_line.group(2)
except Exception:
return line, None
def _get_tuple_attribute(attribute):
attr_tuple = {}
for art_l in re.split(""",(?=(?:[^'"]|'[^']*'|"[^"]*")*$)""", attribute):
if art_l:
name, value = art_l.split("=", 1)
name = name.strip()
# Checks for attribute name
if not re.match(r"^[A-Z0-9\-]*$", name):
raise ValueError("Not a valid attribute name.")
# Remove extra quotes of string
if value.startswith('"') and value.endswith('"'):
value = value[1:-1]
attr_tuple[name] = value
return attr_tuple | 37.322034 | 149 | 0.499092 |
f46b76f1cdb494ea867c5507021cea82625b4d59 | 2,084 | py | Python | __init__.py | krisgesling/webpage-homescreen-skill | d428057472675e48a00c3ed51e0793dfb2c68a59 | [
"Apache-2.0"
] | null | null | null | __init__.py | krisgesling/webpage-homescreen-skill | d428057472675e48a00c3ed51e0793dfb2c68a59 | [
"Apache-2.0"
] | 1 | 2021-08-23T03:58:26.000Z | 2021-08-23T03:58:26.000Z | __init__.py | krisgesling/webpage-homescreen-skill | d428057472675e48a00c3ed51e0793dfb2c68a59 | [
"Apache-2.0"
] | null | null | null | from mycroft import MycroftSkill, intent_handler
from mycroft.skills import resting_screen_handler
MARK_II = "mycroft_mark_2"
class WebpageHomescreen(MycroftSkill):
def __init__(self):
MycroftSkill.__init__(self)
self.is_active = False
self.platform = self.config_core["enclosure"].get("platform", "unknown")
def initialize(self):
"""Perform final setup of Skill."""
# Disable manual refresh until this Homepage is made active.
self.disable_intent("refresh-homepage.intent")
self.settings_change_callback = self.refresh_homescreen
def get_intro_message(self):
"""Provide instructions on first install."""
self.speak_dialog("setting-url")
if self.platform == MARK_II:
self.speak_dialog("selecting-homescreen")
@resting_screen_handler("Webpage Homescreen")
def handle_request_to_use_homescreen(self, _):
"""Handler for requests from GUI to use this Homescreen."""
self.is_active = True
self.display_homescreen()
self.refresh_homescreen()
self.enable_intent("refresh-homepage.intent")
def display_homescreen(self):
"""Display the selected webpage as the Homescreen."""
default_url = "https://mycroft.ai"
url = self.settings.get("homepage_url", default_url)
self.gui.show_url(url)
@intent_handler("refresh-homepage.intent")
def refresh_homescreen(self):
"""Update refresh rate of homescreen and refresh screen.
Defaults to 600 seconds / 10 minutes.
"""
self.cancel_scheduled_event("refresh-webpage-homescreen")
if self.is_active:
self.schedule_repeating_event(
self.display_homescreen,
0,
self.settings.get("refresh_frequency", 600),
name="refresh-webpage-homescreen",
)
def shutdown(self):
"""Actions to perform when Skill is shutting down."""
self.cancel_all_repeating_events()
def create_skill():
return WebpageHomescreen()
| 33.612903 | 80 | 0.661708 |
1fed9ea41382cb67ee406a378acdf7b118da45de | 1,281 | py | Python | parameters/migrations/0001_initial.py | sgleisner/django-project-template | 1ec8ef798fa0d203fe9d875481a3063c678ed7d6 | [
"MIT"
] | null | null | null | parameters/migrations/0001_initial.py | sgleisner/django-project-template | 1ec8ef798fa0d203fe9d875481a3063c678ed7d6 | [
"MIT"
] | 11 | 2020-06-06T00:50:21.000Z | 2022-02-26T19:41:44.000Z | parameters/migrations/0001_initial.py | sgleisner/django-project-template | 1ec8ef798fa0d203fe9d875481a3063c678ed7d6 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-05-19 15:32
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Parameter',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, help_text='creation date', verbose_name='created at')),
('updated_at', models.DateTimeField(auto_now=True, help_text='edition date', null=True, verbose_name='updated at')),
('raw_value', models.TextField(verbose_name='value')),
('name', models.CharField(max_length=50, unique=True, verbose_name='name')),
('kind', models.CharField(choices=[('int', 'integer'), ('str', 'text'), ('time', 'time'), ('date', 'date'), ('json', 'json')], max_length=255, verbose_name='kind')),
('cache_seconds', models.PositiveIntegerField(default=3600, verbose_name='cache seconds')),
],
options={
'abstract': False,
},
),
]
| 40.03125 | 181 | 0.595628 |
7fa18d953d102adf17d7fd9af64135427aeee659 | 2,778 | py | Python | summarize.py | DataEngineeringLabs/arrow-string-view | fca2b503ce9a47980b8292b744170fd0aeebece6 | [
"Apache-2.0"
] | 1 | 2021-12-16T10:22:31.000Z | 2021-12-16T10:22:31.000Z | summarize.py | DataEngineeringLabs/arrow-string-view | fca2b503ce9a47980b8292b744170fd0aeebece6 | [
"Apache-2.0"
] | null | null | null | summarize.py | DataEngineeringLabs/arrow-string-view | fca2b503ce9a47980b8292b744170fd0aeebece6 | [
"Apache-2.0"
] | null | null | null | import json
import os
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
def set_size(width, fraction=1):
"""Set figure dimensions to avoid scaling in LaTeX.
Parameters
----------
width: float
Document textwidth or columnwidth in pts
fraction: float, optional
Fraction of the width which you wish the figure to occupy
Returns
-------
fig_dim: tuple
Dimensions of figure in inches
"""
# Width of figure (in pts)
fig_width_pt = width * fraction
# Convert from pt to inches
inches_per_pt = 1 / 72.27
# Golden ratio to set aesthetic figure height
# https://disq.us/p/2940ij3
golden_ratio = (5 ** 0.5 - 1) / 2
# Figure width in inches
fig_width_in = fig_width_pt * inches_per_pt
# Figure height in inches
fig_height_in = fig_width_in * golden_ratio
fig_dim = (fig_width_in, fig_height_in)
return fig_dim
def _read_reports(bench: str):
root = f"target/criterion/{bench}/"
result = []
for original_path, dirs, files in os.walk(root):
path = original_path.split(os.sep)
if path[-1] != "new":
continue
path = path[-4:-1]
task = path[0]
type = path[1]
size = int(path[2])
with open(os.path.join(original_path, "estimates.json")) as f:
data = json.load(f)
ms = data["mean"]["point_estimate"] / 1000
result.append(
{
"task": task,
"type": type,
"size": size,
"time": ms,
}
)
return result
def plot(result, choices, title, filename, to_stdout=False):
x = [2 ** x["size"] for x in result if x["type"] == choices[0][0]]
x = sorted(x)
fig, ax = plt.subplots(1, 1, figsize=set_size(512))
for (choice, name) in choices:
values = [r for r in result if r["type"] == choice]
values = sorted(values, key=lambda r: int(r["size"]))
values = [r["time"] for r in values]
ax.plot(x, values, "-o", label=name)
if to_stdout:
print(name)
print("size, time (ms)")
for (v1, v2) in zip(x, values):
print(f"{v1}, {v2}")
ax.set(xlabel="size", ylabel="time (ms)", title=title)
ax.xaxis.set_major_formatter(mtick.ScalarFormatter(useMathText=True))
ax.grid()
ax.legend()
fig.savefig(filename)
result = (
_read_reports("take")
)
print(result)
plot(
result,
[
("array", "array"),
("view", "view"),
],
"Take N random elements from an Arrow Utf8Array of\n"
"N vs a \"sequence view\" of N elements, each with \"size\" elements of of 0-20 bytes",
"array_vs_view.png",
True,
)
| 25.027027 | 91 | 0.568035 |
f9bc91ae19acc26cf504f1ca0a240059dd8b5177 | 6,146 | py | Python | geo/db/query.py | hariharshankar/pygeo | f87b5f117dd35e8c6491d1c627cc795e416df6f9 | [
"MIT"
] | 1 | 2017-09-03T14:39:26.000Z | 2017-09-03T14:39:26.000Z | geo/db/query.py | hariharshankar/pygeo | f87b5f117dd35e8c6491d1c627cc795e416df6f9 | [
"MIT"
] | null | null | null | geo/db/query.py | hariharshankar/pygeo | f87b5f117dd35e8c6491d1c627cc795e416df6f9 | [
"MIT"
] | null | null | null | """
Builds and executes all select sql statements.
"""
class Select(object):
"""
Builds and executes all select sql statements.
"""
def __init__(self, db):
"""
Query the database.
:param db: a valid database connection.
"""
self.db_conn = db.session
self.db_conn.get_warnings = True
def __del__(self):
"""
Close the open sessions.
:return:
"""
self.db_conn.close()
def read(self, table_name,
columns=None,
where=None,
order_by=None,
limit=None, dict_cursor=True):
"""
Fetch rows from database.
:param table_name: <string> name of the table
:param columns: <list<string>> column names to fetch
:param where: list<<list<string>>> where clause as a list of strings.
[["field_name", "operator", "value"], ["and|or"],
["field_name", "operator", "value"]]
:param order_by: list<<string>,<string>> order_by clause
["field_name", "asc|desc"]
:param limit: list<<int>,<int>> start stop limits like [0,1]
:returns: {"keys": [keys], "values": [v]}
"""
if table_name == "" or table_name.find(" ") >= 0:
return
if dict_cursor:
db_cur = self.db_conn.cursor(dictionary=True)
else:
db_cur = self.db_conn.cursor(raw=True)
sql = ["SELECT"]
alt_sql = ["SELECT"]
params = {}
if not columns or len(columns) == 0:
sql.append("*")
alt_sql.append("*")
else:
sql.append(",".join([c for c in columns if c.find(" ") < 0]))
alt_sql.append(",".join([c for c in columns if c.find(" ") < 0]))
sql.extend(["FROM", table_name])
alt_sql.extend(["FROM", table_name])
if where and len(where) > 0 and len(where) % 2 == 1:
sql.append("WHERE")
alt_sql.append("WHERE")
for i, whe in enumerate(where):
if i % 2 == 0\
and len(whe) == 3\
and whe[1].lower() in\
['<', '>', '<=', '>=', '=', 'like', 'in']:
if whe[1].lower() == 'in' and len(whe[2]) == 0:
return []
# the prepare stmt throws an error if "in" is used
# with only one value. converting it into "=" instead.
if whe[1].lower() == 'in' and len(whe[2]) == 1:
sql.extend([whe[0], "=", "':wh%s'" % str(i)])
alt_sql.extend([whe[0], "=", "%(wh"+str(i)+")s"])
params["wh"+str(i)] = whe[2][0]
elif whe[1].lower() == 'in':
sql.extend([whe[0], whe[1], "("])
alt_sql.extend([whe[0], whe[1], "("])
vals = []
alt_vals = []
for v, val in enumerate(whe[2]):
vals.append("':wh%s'" % str(v))
alt_vals.append("%(wh" + str(v)+")s")
params["wh"+str(v)] = str(val)
sql.append(",".join(vals))
sql.append(")")
alt_sql.append(",".join(alt_vals))
alt_sql.append(")")
#params["wh"+str(i)] = ",".join([str(val) for val in whe[2]])
else:
sql.extend([whe[0], whe[1], "':wh%s'" % str(i)])
alt_sql.extend([whe[0], whe[1], "%(wh"+str(i)+")s"])
params["wh"+str(i)] = whe[2]
elif i % 2 == 1 and whe[0].lower() in ['and', 'or']:
sql.append(whe[0])
alt_sql.append(whe[0])
else:
sql.pop()
alt_sql.pop()
if order_by and len(order_by) > 0:
sql.append("ORDER BY")
sql.extend(order_by)
alt_sql.append("ORDER BY")
alt_sql.extend(order_by)
if limit and len(limit) > 0:
sql.append("LIMIT")
sql.append(",".join(limit))
alt_sql.append("LIMIT")
alt_sql.append(",".join(limit))
try:
db_cur.execute(" ".join(alt_sql), params)
except Exception:
try:
# may be there is a spl char in the sql stmt
# using connection().execute will not quote the sql stmt
# and some messy hack is needed to avoid param execution
sql_stmt = " ".join(alt_sql)
sql_stmt = sql_stmt.replace("(%)", "(##)")
sql_stmt = sql_stmt % params
sql_stmt = sql_stmt.replace("(##)", "(%)")
db_cur.execute(sql_stmt)
except Exception:
raise
#print(db_cur.statement)
#print(db_cur.fetchwarnings())
return db_cur
def read_column_names(self, table_name, where=None):
"""
Read the columns of a table. Helps build queries dynamically
without knowing the table columns.
"""
db_cur = self.db_conn.cursor(dictionary=True)
sql = "SHOW COLUMNS FROM %s" % table_name
if where:
sql += " LIKE '%s'" % where
db_cur.execute(sql)
cols = [(dic["Field"], dic["Type"]) for dic in db_cur]
return cols
@staticmethod
def process_result_set(result):
"""
Convert the returned values from result obj into lists for
easy json serialization.
"""
if not result:
return [], []
keys = result.column_names
values = []
if not result.with_rows:
return keys, values
res = result.fetchall()
for r in res:
v = []
for k in keys:
v.append(r.get(k))
values.append(v)
#sorted(values)
return keys, values
| 34.723164 | 85 | 0.451839 |
146531d67c6e9d495fe86d3581e87f653d84ebb5 | 528 | py | Python | backend/home/migrations/0001_load_initial_data.py | crowdbotics-apps/new-a-28530 | 99733ac3680dc5e99f6eb92eb039747ca24e5e32 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/home/migrations/0001_load_initial_data.py | crowdbotics-apps/new-a-28530 | 99733ac3680dc5e99f6eb92eb039747ca24e5e32 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/home/migrations/0001_load_initial_data.py | crowdbotics-apps/new-a-28530 | 99733ac3680dc5e99f6eb92eb039747ca24e5e32 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | from django.db import migrations
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "new-a-28530.botics.co"
site_params = {
"name": "new a",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_site),
]
| 20.307692 | 61 | 0.647727 |
a5d24bc4fb5f31d987e24d58206e316b1597636e | 2,565 | py | Python | src/satosa/util.py | misi/SATOSA | 04907b44dbe93eea2f46026e7b6923435accba5a | [
"Apache-2.0"
] | 1 | 2019-07-10T14:07:51.000Z | 2019-07-10T14:07:51.000Z | src/satosa/util.py | misi/SATOSA | 04907b44dbe93eea2f46026e7b6923435accba5a | [
"Apache-2.0"
] | 1 | 2019-01-09T17:48:06.000Z | 2019-01-09T17:48:06.000Z | src/satosa/util.py | misi/SATOSA | 04907b44dbe93eea2f46026e7b6923435accba5a | [
"Apache-2.0"
] | 1 | 2019-10-03T15:50:09.000Z | 2019-10-03T15:50:09.000Z | """
Python package file for util functions.
"""
import hashlib
import logging
import random
import string
from satosa.logging_util import satosa_logging
logger = logging.getLogger(__name__)
def hash_data(salt, value, hash_alg=None):
"""
Hashes a value together with a salt with the given hash algorithm.
:type salt: str
:type hash_alg: str
:type value: str
:param salt: hash salt
:param hash_alg: the hash algorithm to use (default: SHA512)
:param value: value to hash together with the salt
:return: hashed value
"""
hash_alg = hash_alg or 'sha512'
hasher = hashlib.new(hash_alg)
hasher.update(value.encode('utf-8'))
hasher.update(salt.encode('utf-8'))
value_hashed = hasher.hexdigest()
return value_hashed
def check_set_dict_defaults(dic, spec):
for path, value in spec.items():
keys = path.split('.')
try:
_val = dict_get_nested(dic, keys)
except KeyError:
if type(value) is list:
value_default = value[0]
else:
value_default = value
dict_set_nested(dic, keys, value_default)
else:
if type(value) is list:
is_value_valid = _val in value
elif type(value) is dict:
# do not validate dict
is_value_valid = bool(_val)
else:
is_value_valid = _val == value
if not is_value_valid:
satosa_logging(
logger, logging.WARNING,
"Incompatible configuration value '{}' for '{}'."
" Value shoud be: {}".format(_val, path, value),
{})
return dic
def dict_set_nested(dic, keys, value):
for key in keys[:-1]:
dic = dic.setdefault(key, {})
dic[keys[-1]] = value
def dict_get_nested(dic, keys):
for key in keys[:-1]:
dic = dic.setdefault(key, {})
return dic[keys[-1]]
def get_dict_defaults(d, *keys):
for key in keys:
d = d.get(key, d.get("", d.get("default", {})))
return d
def rndstr(size=16, alphabet=""):
"""
Returns a string of random ascii characters or digits
:type size: int
:type alphabet: str
:param size: The length of the string
:param alphabet: A string with characters.
:return: string
"""
rng = random.SystemRandom()
if not alphabet:
alphabet = string.ascii_letters[0:52] + string.digits
return type(alphabet)().join(rng.choice(alphabet) for _ in range(size))
| 27.287234 | 75 | 0.592982 |
6e228b20c491dbf5945054e0f280789575e6f9d1 | 1,621 | py | Python | tests/integration/goldens/logging/samples/generated_samples/logging_generated_logging_v2_config_service_v2_delete_view_sync.py | BenRKarl/gapic-generator-python | e4f92bd988a5b955ede88a9a10163010aae825f1 | [
"Apache-2.0"
] | 86 | 2018-09-28T11:46:15.000Z | 2022-03-27T19:25:09.000Z | tests/integration/goldens/logging/samples/generated_samples/logging_generated_logging_v2_config_service_v2_delete_view_sync.py | BenRKarl/gapic-generator-python | e4f92bd988a5b955ede88a9a10163010aae825f1 | [
"Apache-2.0"
] | 1,054 | 2018-04-19T18:35:05.000Z | 2022-03-30T14:12:38.000Z | tests/integration/goldens/logging/samples/generated_samples/logging_generated_logging_v2_config_service_v2_delete_view_sync.py | BenRKarl/gapic-generator-python | e4f92bd988a5b955ede88a9a10163010aae825f1 | [
"Apache-2.0"
] | 47 | 2018-04-26T22:08:56.000Z | 2022-03-22T22:18:00.000Z | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for DeleteView
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-logging
# [START logging_generated_logging_v2_ConfigServiceV2_DeleteView_sync]
from google.cloud import logging_v2
def sample_delete_view():
"""Snippet for delete_view"""
# Create a client
client = logging_v2.ConfigServiceV2Client()
# Initialize request argument(s)
project = "my-project-id"
location = "us-central1"
bucket = "bucket_value"
view = "view_value"
name = f"projects/{project}/locations/{location}/buckets/{bucket}/views/{view}"
request = logging_v2.DeleteViewRequest(
name=name,
)
# Make the request
response = client.delete_view(request=request)
# [END logging_generated_logging_v2_ConfigServiceV2_DeleteView_sync]
| 31.173077 | 85 | 0.744602 |
bee9f1975f35bb755414cd958647be199bd62d7a | 5,084 | py | Python | ocd_backend/utils/misc.py | openstate/open-wob-api | cafddece6078e68f0db58ec18b083598247ade8f | [
"CC-BY-4.0"
] | 6 | 2017-08-16T13:14:42.000Z | 2021-11-23T00:41:20.000Z | ocd_backend/utils/misc.py | openstate/open-wob-api | cafddece6078e68f0db58ec18b083598247ade8f | [
"CC-BY-4.0"
] | 21 | 2017-11-06T17:05:08.000Z | 2022-03-11T23:18:12.000Z | ocd_backend/utils/misc.py | openstate/open-wob-api | cafddece6078e68f0db58ec18b083598247ade8f | [
"CC-BY-4.0"
] | 2 | 2017-12-27T13:12:23.000Z | 2019-08-08T07:17:52.000Z | import datetime
import json
import re
import glob
import translitcodec
from elasticsearch.helpers import scan, bulk
def reindex(client, source_index, target_index, target_client=None, chunk_size=500, scroll='5m', transformation_callable=None):
"""
Reindex all documents from one index to another, potentially (if
`target_client` is specified) on a different cluster.
.. note::
This helper doesn't transfer mappings, just the data.
:arg client: instance of :class:`~elasticsearch.Elasticsearch` to use (for
read if `target_client` is specified as well)
:arg source_index: index (or list of indices) to read documents from
:arg target_index: name of the index in the target cluster to populate
:arg target_client: optional, is specified will be used for writing (thus
enabling reindex between clusters)
:arg chunk_size: number of docs in one chunk sent to es (default: 500)
:arg scroll: Specify how long a consistent view of the index should be
maintained for scrolled search
"""
target_client = client if target_client is None else target_client
docs = scan(client, index=source_index, scroll=scroll, _source_include=['*'])
def _change_doc_index(hits, index):
for h in hits:
h['_index'] = index
if transformation_callable is not None:
h = transformation_callable(h)
yield h
return bulk(target_client, _change_doc_index(docs, target_index),
chunk_size=chunk_size, stats_only=True)
def load_sources_config(path):
"""Loads a JSON file(s) containing the configuration of the available
sources.
:param path: the path of the JSON file(s) wildcards * enabled.
:type path: str.
"""
result = []
for filename in glob.glob(path):
try:
with open(filename) as json_file:
for entry in json.load(json_file):
result.append(entry)
except IOError, e:
e.strerror = 'Unable to load sources configuration file (%s)' % (
e.strerror,)
raise
return result
def load_object(path):
"""Load an object given it's absolute object path, and return it.
The object can be a class, function, variable or instance.
:param path: absolute object path (i.e. 'ocd_backend.extractor.BaseExtractor')
:type path: str.
"""
try:
dot = path.rindex('.')
except ValueError:
raise ValueError, "Error loading object '%s': not a full path" % path
module, name = path[:dot], path[dot+1:]
try:
mod = __import__(module, {}, {}, [''])
except ImportError, e:
raise ImportError, "Error loading object '%s': %s" % (path, e)
try:
obj = getattr(mod, name)
except AttributeError:
raise NameError, "Module '%s' doesn't define any object named '%s'" % (
module, name)
return obj
def try_convert(conv, value):
try:
return conv(value)
except ValueError:
return value
def parse_date(regexen, date_str):
"""
Parse a messy string into a granular date
`regexen` is of the form [ (regex, (granularity, groups -> datetime)) ]
"""
if date_str:
for reg, (gran, dater) in regexen:
m = re.match(reg, date_str)
if m:
try:
return gran, dater(m.groups())
except ValueError:
return 0, None
return 0, None
def parse_date_span(regexen, date1_str, date2_str):
"""
Parse a start & end date into a (less) granular date
`regexen` is of the form [ (regex, (granularity, groups -> datetime)) ]
"""
date1_gran, date1 = parse_date(regexen, date1_str)
date2_gran, date2 = parse_date(regexen, date2_str)
if date2:
# TODO: integrate both granularities
if (date1_gran, date1) == (date2_gran, date2):
return date1_gran, date1
if (date2 - date1).days < 5*365:
return 4, date1
if (date2 - date1).days < 50*365:
return 3, date1
if (date2 - date1).days >= 50*365:
return 2, date1
else:
return date1_gran, date1
class DatetimeJSONEncoder(json.JSONEncoder):
"""
JSONEncoder that can handle ``datetime.datetime``, ``datetime.date`` and
``datetime.timedelta`` objects.
"""
def default(self, o):
if isinstance(o, datetime.datetime) or isinstance(o, datetime.date):
return o.isoformat()
elif isinstance(o, datetime.timedelta):
return (datetime.datetime.min + o).time().isoformat()
else:
return super(DatetimeJSONEncoder, self).default(o)
_punct_re = re.compile(r'[\t !"#$%&\'()*\-/<=>?@\[\\\]^_`{|},.]+')
def slugify(text, delim=u'-'):
"""Generates an ASCII-only slug."""
result = []
for word in _punct_re.split(text.lower()):
word = word.encode('translit/long')
if word:
result.append(word)
return unicode(delim.join(result))
| 31.190184 | 127 | 0.616837 |
4a9f6722aea144ba7b014bcb537c921c43205c87 | 1,993 | py | Python | ixtlilton_tools/_private_tools/exceptions.py | uibcdf/Ixtlilton | 1f7e8afe9ca6a2c25646bab0bf1770fe75c8aa5e | [
"MIT"
] | null | null | null | ixtlilton_tools/_private_tools/exceptions.py | uibcdf/Ixtlilton | 1f7e8afe9ca6a2c25646bab0bf1770fe75c8aa5e | [
"MIT"
] | 13 | 2017-08-01T22:31:24.000Z | 2021-12-17T18:56:22.000Z | ixtlilton_tools/_private_tools/exceptions.py | uibcdf/Ixtlilton | 1f7e8afe9ca6a2c25646bab0bf1770fe75c8aa5e | [
"MIT"
] | null | null | null | class BadCallError(ValueError):
def __init__(self, message=None):
if message is None:
message = 'Wrong way of invoking this method. Check the online documentation for more information: http://www.uibcdf.org/MolSysMT'
super().__init__(message)
class NotImplementedError(NotImplementedError):
def __init__(self, message=None):
if message is None:
message = 'It has not been implemeted yet. Write a new issue in https://github.com/uibcdf/MolSysMT/issues asking for it.'
super().__init__(message)
class LibraryNotFound(ValueError):
def __init__(self, library):
message = 'The python library {} was not found.'.format(library)
super().__init__(message)
class NoAdminRights(ValueError):
def __init__(self, message=None):
if message is None:
message = 'This method needs administration rights.'
super().__init__(message)
class DirectoryConflict(ValueError):
def __init__(self, message=None):
if message is None:
message = 'There is a directory conflict'
super().__init__(message)
class NoUIDsAvailable(ValueError):
def __init__(self, message=None):
if message is None:
message = 'All user ids between 2000 and 2999 are already taken.'
super().__init__(message)
class UserDoesNotExist(ValueError):
def __init__(self, username=None, message=None):
if message is None:
if username is None:
message = 'The user does not exists.'
else:
message = f'The user {username} does not exists.'
super().__init__(message)
class GroupDoesNotExist(ValueError):
def __init__(self, groupname=None, message=None):
if message is None:
if groupname is None:
message = 'The group does not exists.'
else:
message = f'The group {groupname} does not exists.'
super().__init__(message)
| 36.236364 | 142 | 0.644757 |
d8d77294762ed00878626bef699bf28a5a0e122e | 505 | py | Python | server/scripts/encontrar_investigacoes.py | danilopcarlotti/scdf | cb89216f6a07da94f765d101390a521861063c76 | [
"MIT"
] | 3 | 2019-11-28T22:58:50.000Z | 2020-08-20T12:23:38.000Z | server/scripts/encontrar_investigacoes.py | danilopcarlotti/scdf | cb89216f6a07da94f765d101390a521861063c76 | [
"MIT"
] | null | null | null | server/scripts/encontrar_investigacoes.py | danilopcarlotti/scdf | cb89216f6a07da94f765d101390a521861063c76 | [
"MIT"
] | 1 | 2019-03-21T20:13:51.000Z | 2019-03-21T20:13:51.000Z | import os
import sys
from dotenv import load_dotenv, find_dotenv
from pymongo import MongoClient
from pathlib import Path
PATH_ROOT = Path().absolute().parent.parent
sys.path.append(str(PATH_ROOT))
load_dotenv(find_dotenv())
mongo_url = os.getenv("mongo_url")
def investigacoes_usuario(id_responsavel):
myclient = MongoClient(mongo_url)
mydb_master = myclient["SCDF"]
col = mydb_master["investigacoes"]
return [i["id_investigacao"] for i in col.find({"id_responsavel":id_responsavel})] | 28.055556 | 86 | 0.770297 |
60ce605d1df042d5296c4bf0fdde71ac70a50978 | 256 | py | Python | Day1/day1.2.py | akashvacher/AdventOfCode2021 | 8d1429c0cc33cf67f84097b38fb01f02e69c1717 | [
"MIT"
] | null | null | null | Day1/day1.2.py | akashvacher/AdventOfCode2021 | 8d1429c0cc33cf67f84097b38fb01f02e69c1717 | [
"MIT"
] | null | null | null | Day1/day1.2.py | akashvacher/AdventOfCode2021 | 8d1429c0cc33cf67f84097b38fb01f02e69c1717 | [
"MIT"
] | null | null | null | def part2():
count = 0
numbers = []
for line in open("in.txt").read().splitlines():
numbers.append(int(line))
if len(numbers) >= 4:
if numbers[-1] > numbers[-4]:
count += 1
print(count)
part2()
| 19.692308 | 51 | 0.488281 |
5dcc1bea08b638c1ed29d74dbe8c218e35035b78 | 8,261 | py | Python | realtime_hand_3d/segmentation/criterion.py | NeelayS/realtime_hand | 219c772b9b7df60c390edac7da23f9cdddebca4d | [
"MIT"
] | null | null | null | realtime_hand_3d/segmentation/criterion.py | NeelayS/realtime_hand | 219c772b9b7df60c390edac7da23f9cdddebca4d | [
"MIT"
] | null | null | null | realtime_hand_3d/segmentation/criterion.py | NeelayS/realtime_hand | 219c772b9b7df60c390edac7da23f9cdddebca4d | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
from ..utils import Registry
SEG_CRITERION_REGISTRY = Registry("CRITERION")
@SEG_CRITERION_REGISTRY.register()
class OhemCrossEntropy2dTensor(nn.Module):
def __init__(
self,
ignore_label,
reduction="elementwise_mean",
thresh=0.6,
min_kept=256,
down_ratio=1,
use_weight=False,
):
super(OhemCrossEntropy2dTensor, self).__init__()
self.ignore_label = ignore_label
self.thresh = float(thresh)
self.min_kept = int(min_kept)
self.down_ratio = down_ratio
if use_weight:
weight = torch.FloatTensor(
[
0.8373,
0.918,
0.866,
1.0345,
1.0166,
0.9969,
0.9754,
1.0489,
0.8786,
1.0023,
0.9539,
0.9843,
1.1116,
0.9037,
1.0865,
1.0955,
1.0865,
1.1529,
1.0507,
]
)
self.criterion = torch.nn.CrossEntropyLoss(
reduction=reduction, weight=weight, ignore_index=ignore_label
)
else:
self.criterion = torch.nn.CrossEntropyLoss(
reduction=reduction, ignore_index=ignore_label
)
def forward(self, pred, target):
b, c, h, w = pred.size()
target = target.view(-1)
valid_mask = target.ne(self.ignore_label)
target = target * valid_mask.long()
num_valid = valid_mask.sum()
prob = F.softmax(pred, dim=1)
prob = (prob.transpose(0, 1)).reshape(c, -1)
if self.min_kept > num_valid:
print("Labels: {}".format(num_valid))
elif num_valid > 0:
prob = prob.masked_fill_(~valid_mask, 1)
mask_prob = prob[target, torch.arange(len(target), dtype=torch.long)]
threshold = self.thresh
if self.min_kept > 0:
_, index = mask_prob.sort()
threshold_index = index[min(len(index), self.min_kept) - 1]
if mask_prob[threshold_index] > self.thresh:
threshold = mask_prob[threshold_index]
kept_mask = mask_prob.le(threshold)
target = target * kept_mask.long()
valid_mask = valid_mask * kept_mask
target = target.masked_fill_(~valid_mask, self.ignore_label)
target = target.view(b, h, w)
return self.criterion(pred, target)
@SEG_CRITERION_REGISTRY.register()
class CriterionDSN(nn.CrossEntropyLoss):
def __init__(self, ignore_index=255, reduce=True):
super(CriterionDSN, self).__init__()
self.ignore_index = ignore_index
self.reduce = reduce
def forward(self, preds, target):
H, W = target.shape[-2:]
scale_pred = F.interpolate(
input=preds[0], size=(H, W), mode="bilinear", align_corners=True
)
loss1 = super(CriterionDSN, self).forward(scale_pred, target)
scale_pred = F.interpolate(
input=preds[-1], size=(H, W), mode="bilinear", align_corners=True
)
loss2 = super(CriterionDSN, self).forward(scale_pred, target)
return loss1 + loss2 * 0.4
@SEG_CRITERION_REGISTRY.register()
class CriterionOhemDSN(nn.Module):
def __init__(self, ignore_index=255, thresh=0.7, min_kept=100000, reduce=True):
super(CriterionOhemDSN, self).__init__()
self.ignore_index = ignore_index
self.criterion1 = OhemCrossEntropy2dTensor(
ignore_index, thresh=thresh, min_kept=min_kept
)
self.criterion2 = torch.nn.CrossEntropyLoss(
ignore_index=ignore_index, reduce=reduce
)
def forward(self, preds, target):
H, W = target.shape[-2:]
scale_pred = F.interpolate(
input=preds[0], size=(H, W), mode="bilinear", align_corners=True
)
loss1 = self.criterion1(scale_pred, target)
scale_pred = F.interpolate(
input=preds[1], size=(H, W), mode="bilinear", align_corners=True
)
loss2 = self.criterion2(scale_pred, target)
return loss1 + loss2 * 0.4
@SEG_CRITERION_REGISTRY.register()
class CriterionDFANet(nn.Module):
def __init__(self, ignore_index=255, thresh=0.7, min_kept=100000, reduce=True):
super(CriterionDFANet, self).__init__()
self.ignore_index = ignore_index
self.criterion1 = OhemCrossEntropy2dTensor(
ignore_index, thresh=thresh, min_kept=min_kept
)
self.criterion2 = torch.nn.CrossEntropyLoss(
ignore_index=ignore_index, reduce=reduce
)
def forward(self, preds, target):
h, w = target.size(1), target.size(2)
scale_pred = F.interpolate(
input=preds[0], size=(h, w), mode="bilinear", align_corners=True
)
loss1 = self.criterion1(scale_pred, target)
scale_pred = F.interpolate(
input=preds[1], size=(h, w), mode="bilinear", align_corners=True
)
loss2 = self.criterion1(scale_pred, target)
scale_pred = F.interpolate(
input=preds[2], size=(h, w), mode="bilinear", align_corners=True
)
loss3 = self.criterion1(scale_pred, target)
return loss1 + 0.4 * loss2 + 0.4 * loss3
@SEG_CRITERION_REGISTRY.register()
class CriterionICNet(nn.Module):
def __init__(self, ignore_index=255, thresh=0.7, min_kept=100000, reduce=True):
super(CriterionICNet, self).__init__()
self.ignore_index = ignore_index
self.criterion1 = OhemCrossEntropy2dTensor(
ignore_index, thresh=thresh, min_kept=min_kept
)
def forward(self, preds, target):
h, w = target.size(1), target.size(2)
scale_pred = F.interpolate(
input=preds[0], size=(h, w), mode="bilinear", align_corners=True
)
loss1 = self.criterion1(scale_pred, target)
scale_pred = F.interpolate(
input=preds[1], size=(h, w), mode="bilinear", align_corners=True
)
loss2 = self.criterion1(scale_pred, target)
scale_pred = F.interpolate(
input=preds[2], size=(h, w), mode="bilinear", align_corners=True
)
loss3 = self.criterion1(scale_pred, target)
scale_pred = F.interpolate(
input=preds[3], size=(h, w), mode="bilinear", align_corners=True
)
loss4 = self.criterion1(scale_pred, target)
return loss1 + 0.4 * loss2 + 0.4 * loss3 + 0.4 * loss4
@SEG_CRITERION_REGISTRY.register()
class ModCriterionICNet(nn.Module):
def __init__(self, ignore_index=255, thresh=0.7, min_kept=100000, reduce=True):
super(ModCriterionICNet, self).__init__()
self.ignore_index = ignore_index
self.criterion1 = OhemCrossEntropy2dTensor(
ignore_index, thresh=thresh, min_kept=min_kept
)
def forward(self, preds, target):
h, w = target.size(1), target.size(2)
scale_pred = F.interpolate(
input=preds[0], size=(h, w), mode="bilinear", align_corners=True
)
loss1 = self.criterion1(scale_pred, target)
scale_pred = F.interpolate(
input=preds[1], size=(h, w), mode="bilinear", align_corners=True
)
loss2 = self.criterion1(scale_pred, target)
scale_pred = F.interpolate(
input=preds[2], size=(h, w), mode="bilinear", align_corners=True
)
loss3 = self.criterion1(scale_pred, target)
return loss1 + 0.4 * loss2 + 0.4 * loss3
SEG_MODEL_CRITERIONS = {
"BiSeNet": "CriterionDFANet",
"DFANet": "CriterionDFANet",
"DFSegNet": "CriterionDSN",
"DFSegNetV1": "CriterionDSN",
"DFSegNetV2": "CriterionDSN",
"ESPNet": "CriterionDSN",
"FastSCNN": "CriterionDSN",
"ICNet": "ModCriterionICNet",
"CustomICNet": "CriterionICNet",
"SwiftNetRes18": "CriterionDSN",
"SwiftNetResNet": "CriterionDSN",
}
| 30.596296 | 83 | 0.584554 |
27631e790ce112e044b6b4353ec3732cac0152db | 13,735 | py | Python | senlin-7.0.0/senlin/tests/unit/db/test_cluster_policy_api.py | scottwedge/OpenStack-Stein | 7077d1f602031dace92916f14e36b124f474de15 | [
"Apache-2.0"
] | null | null | null | senlin-7.0.0/senlin/tests/unit/db/test_cluster_policy_api.py | scottwedge/OpenStack-Stein | 7077d1f602031dace92916f14e36b124f474de15 | [
"Apache-2.0"
] | 5 | 2019-08-14T06:46:03.000Z | 2021-12-13T20:01:25.000Z | senlin-7.0.0/senlin/tests/unit/db/test_cluster_policy_api.py | scottwedge/OpenStack-Stein | 7077d1f602031dace92916f14e36b124f474de15 | [
"Apache-2.0"
] | 2 | 2020-03-15T01:24:15.000Z | 2020-07-22T20:34:26.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_db.sqlalchemy import utils as sa_utils
from oslo_utils import timeutils as tu
from senlin.common import consts
from senlin.db.sqlalchemy import api as db_api
from senlin.tests.unit.common import base
from senlin.tests.unit.common import utils
from senlin.tests.unit.db import shared
class DBAPIClusterPolicyTest(base.SenlinTestCase):
def setUp(self):
super(DBAPIClusterPolicyTest, self).setUp()
self.ctx = utils.dummy_context()
self.profile = shared.create_profile(self.ctx)
self.cluster = shared.create_cluster(self.ctx, self.profile)
def create_policy(self, **kwargs):
data = {
'name': 'test_policy',
'type': 'ScalingPolicy',
'user': self.ctx.user_id,
'project': self.ctx.project_id,
'domain': self.ctx.domain_id,
'spec': {
'min_size': 1,
'max_size': 10,
'paust_time': 'PT10M',
},
'data': None,
}
data.update(kwargs)
return db_api.policy_create(self.ctx, data)
def test_policy_attach_detach(self):
policy = self.create_policy()
fields = {
'enabled': True,
}
db_api.cluster_policy_attach(self.ctx, self.cluster.id, policy.id,
fields)
bindings = db_api.cluster_policy_get_all(self.ctx, self.cluster.id)
self.assertEqual(1, len(bindings))
self.assertTrue(bindings[0].enabled)
# This will succeed
db_api.cluster_policy_detach(self.ctx, self.cluster.id, policy.id)
bindings = db_api.cluster_policy_get_all(self.ctx, self.cluster.id)
self.assertEqual(0, len(bindings))
# This will fail silently
res = db_api.cluster_policy_detach(self.ctx, self.cluster.id, 'BOGUS')
self.assertIsNone(res)
def test_policy_enable_disable(self):
policy = self.create_policy()
fields = {
'enabled': True,
}
db_api.cluster_policy_attach(self.ctx, self.cluster.id, policy.id,
fields)
bindings = db_api.cluster_policy_get_all(self.ctx, self.cluster.id)
self.assertEqual(1, len(bindings))
self.assertTrue(bindings[0].enabled)
db_api.cluster_policy_update(self.ctx, self.cluster.id, policy.id,
{'enabled': True})
bindings = db_api.cluster_policy_get_all(self.ctx, self.cluster.id)
self.assertEqual(1, len(bindings))
self.assertTrue(bindings[0].enabled)
db_api.cluster_policy_update(self.ctx, self.cluster.id, policy.id,
{'enabled': False})
bindings = db_api.cluster_policy_get_all(self.ctx, self.cluster.id)
self.assertEqual(1, len(bindings))
self.assertFalse(bindings[0].enabled)
db_api.cluster_policy_update(self.ctx, self.cluster.id, policy.id,
{'enabled': True})
bindings = db_api.cluster_policy_get_all(self.ctx, self.cluster.id)
self.assertEqual(1, len(bindings))
self.assertTrue(bindings[0].enabled)
# No policy binding found
res = db_api.cluster_policy_update(self.ctx, self.cluster.id, 'BOGUS',
{})
self.assertIsNone(res)
def test_policy_update_with_data(self):
policy = self.create_policy()
db_api.cluster_policy_attach(self.ctx, self.cluster.id, policy.id, {})
bindings = db_api.cluster_policy_get_all(self.ctx, self.cluster.id)
self.assertEqual(1, len(bindings))
self.assertIsNone(bindings[0].data)
fields = {'data': {'foo': 'bar'}}
db_api.cluster_policy_update(self.ctx, self.cluster.id, policy.id,
fields)
bindings = db_api.cluster_policy_get_all(self.ctx, self.cluster.id)
self.assertEqual(1, len(bindings))
self.assertEqual({'foo': 'bar'}, bindings[0].data)
fields = {'data': {'foo': 'BAR'}}
db_api.cluster_policy_update(self.ctx, self.cluster.id, policy.id,
fields)
bindings = db_api.cluster_policy_get_all(self.ctx, self.cluster.id)
self.assertEqual(1, len(bindings))
self.assertEqual({'foo': 'BAR'}, bindings[0].data)
def test_policy_update_last_op(self):
policy = self.create_policy()
db_api.cluster_policy_attach(self.ctx, self.cluster.id, policy.id, {})
bindings = db_api.cluster_policy_get_all(self.ctx, self.cluster.id)
self.assertEqual(1, len(bindings))
self.assertIsNone(bindings[0].last_op)
timestamp = tu.utcnow(True)
fields = {'last_op': timestamp}
db_api.cluster_policy_update(self.ctx, self.cluster.id, policy.id,
fields)
bindings = db_api.cluster_policy_get_all(self.ctx, self.cluster.id)
self.assertEqual(1, len(bindings))
self.assertEqual(timestamp, bindings[0].last_op)
def test_cluster_policy_get(self):
policy = self.create_policy()
db_api.cluster_policy_attach(self.ctx, self.cluster.id, policy.id, {})
binding = db_api.cluster_policy_get(self.ctx, self.cluster.id,
policy.id)
self.assertIsNotNone(binding)
self.assertEqual(self.cluster.id, binding.cluster_id)
self.assertEqual(policy.id, binding.policy_id)
def test_policy_get_all_with_empty_filters(self):
for pid in ['policy1', 'policy2']:
self.create_policy(id=pid)
db_api.cluster_policy_attach(self.ctx, self.cluster.id, pid, {})
filters = None
results = db_api.cluster_policy_get_all(self.ctx, self.cluster.id,
filters=filters)
self.assertEqual(2, len(results))
@mock.patch.object(sa_utils, 'paginate_query')
def test_policy_get_all_with_sort_key_are_used(self, mock_paginate):
values = {
'policy1': {'enabled': True},
'policy2': {'enabled': True},
'policy3': {'enabled': True}
}
# prepare
for key in values:
value = values[key]
policy_id = self.create_policy(id=key).id
db_api.cluster_policy_attach(self.ctx, self.cluster.id, policy_id,
value)
sort = consts.CLUSTER_POLICY_SORT_KEYS
db_api.cluster_policy_get_all(self.ctx, self.cluster.id,
sort=','.join(sort))
# Check sort_keys used
args = mock_paginate.call_args[0]
sort.append('id')
self.assertEqual(set(sort), set(args[3]))
def test_policy_get_all_with_sorting(self):
values = {
'policy1': {'enabled': True},
'policy2': {'enabled': True},
'policy3': {'enabled': False}
}
# prepare
for key in values:
value = values[key]
policy_id = self.create_policy(id=key).id
db_api.cluster_policy_attach(self.ctx, self.cluster.id, policy_id,
value)
# sorted by enabled, the 2nd and 3rd are unpredictable
results = db_api.cluster_policy_get_all(self.ctx, self.cluster.id,
sort='enabled')
self.assertEqual('policy3', results[0].policy_id)
def test_policy_get_all_by_policy_type(self):
for pid in ['policy1', 'policy2']:
self.create_policy(id=pid)
db_api.cluster_policy_attach(self.ctx, self.cluster.id, pid, {})
results = db_api.cluster_policy_get_by_type(self.ctx, self.cluster.id,
'ScalingPolicy')
self.assertEqual(2, len(results))
results = db_api.cluster_policy_get_by_type(self.ctx, self.cluster.id,
'UnknownPolicy')
self.assertEqual(0, len(results))
def test_policy_get_all_by_policy_name(self):
for pid in ['policy1', 'policy2']:
self.create_policy(id=pid)
db_api.cluster_policy_attach(self.ctx, self.cluster.id, pid, {})
results = db_api.cluster_policy_get_by_name(self.ctx, self.cluster.id,
'test_policy')
self.assertEqual(2, len(results))
results = db_api.cluster_policy_get_by_name(self.ctx, self.cluster.id,
'unknown_policy')
self.assertEqual(0, len(results))
def test_policy_get_all_by_policy_type_with_filter(self):
for pid in ['policy1', 'policy2']:
self.create_policy(id=pid)
db_api.cluster_policy_attach(self.ctx, self.cluster.id, pid,
{'enabled': True})
filters = {'enabled': True}
results = db_api.cluster_policy_get_by_type(self.ctx, self.cluster.id,
'ScalingPolicy',
filters=filters)
self.assertEqual(2, len(results))
filters = {'enabled': False}
results = db_api.cluster_policy_get_by_type(self.ctx, self.cluster.id,
'ScalingPolicy',
filters=filters)
self.assertEqual(0, len(results))
def test_policy_get_all_by_policy_name_with_filter(self):
for pid in ['policy1', 'policy2']:
self.create_policy(id=pid)
db_api.cluster_policy_attach(self.ctx, self.cluster.id, pid,
{'enabled': True})
filters = {'enabled': True}
results = db_api.cluster_policy_get_by_name(self.ctx, self.cluster.id,
'test_policy',
filters=filters)
self.assertEqual(2, len(results))
filters = {'enabled': False}
results = db_api.cluster_policy_get_by_name(self.ctx, self.cluster.id,
'test_policy',
filters=filters)
self.assertEqual(0, len(results))
def test_policy_get_all_with_all_filters(self):
for pid in ['policy1', 'policy2']:
self.create_policy(id=pid)
db_api.cluster_policy_attach(self.ctx, self.cluster.id, pid,
{'enabled': True})
filters = {'enabled': True,
'policy_name': 'test_policy',
'policy_type': 'ScalingPolicy'}
results = db_api.cluster_policy_get_all(self.ctx, self.cluster.id,
filters=filters)
self.assertEqual(2, len(results))
filters = {'enabled': True,
'policy_type': 'ScalingPolicy'}
results = db_api.cluster_policy_get_all(self.ctx, self.cluster.id,
filters=filters)
self.assertEqual(2, len(results))
filters = {'enabled': True,
'policy_name': 'test_policy'}
results = db_api.cluster_policy_get_all(self.ctx, self.cluster.id,
filters=filters)
self.assertEqual(2, len(results))
filters = {'enabled': True,
'policy_name': 'wrong_name',
'policy_type': 'wrong_type'}
results = db_api.cluster_policy_get_all(self.ctx, self.cluster.id,
filters=filters)
self.assertEqual(0, len(results))
filters = {'policy_name': 'test_policy'}
results = db_api.cluster_policy_get_all(self.ctx, self.cluster.id,
filters=filters)
self.assertEqual(2, len(results))
filters = {'policy_type': 'ScalingPolicy'}
results = db_api.cluster_policy_get_all(self.ctx, self.cluster.id,
filters=filters)
self.assertEqual(2, len(results))
filters = {'enabled': False}
results = db_api.cluster_policy_get_all(self.ctx, self.cluster.id,
filters=filters)
self.assertEqual(0, len(results))
def test_cluster_policy_ids_by_cluster(self):
# prepare
ids = []
for i in range(3):
policy_id = self.create_policy().id
ids.append(policy_id)
db_api.cluster_policy_attach(self.ctx, self.cluster.id, policy_id,
{'enabled': True})
# sorted by enabled, the 2nd and 3rd are unpredictable
results = db_api.cluster_policy_ids_by_cluster(self.ctx,
self.cluster.id)
self.assertEqual(set(ids), set(results))
| 41.74772 | 78 | 0.568839 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.