hexsha
stringlengths 40
40
| size
int64 2
1.05M
| ext
stringclasses 9
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
193
| max_stars_repo_name
stringlengths 6
109
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequence | max_stars_count
int64 1
36.6k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
193
| max_issues_repo_name
stringlengths 6
109
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequence | max_issues_count
int64 1
29.8k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
193
| max_forks_repo_name
stringlengths 6
109
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequence | max_forks_count
int64 1
11.2k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 2
1.05M
| avg_line_length
float64 1
404k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f755b0462494c8c56ef315c454c2130ad1187c1d | 43,828 | py | Python | src/instabot.py | cyberjinmics/master | 92b86c58331d800963a63d34dbb7ac967d553de1 | [
"MIT"
] | null | null | null | src/instabot.py | cyberjinmics/master | 92b86c58331d800963a63d34dbb7ac967d553de1 | [
"MIT"
] | null | null | null | src/instabot.py | cyberjinmics/master | 92b86c58331d800963a63d34dbb7ac967d553de1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from .userinfo import UserInfo
import datetime
import json
import logging
import time
import requests
from requests_toolbelt import MultipartEncoder
import uuid
import re
from .commentGen import commentGen
from .filesCount import filesCount
from random import *
import random
import os
from fake_useragent import UserAgent
class InstaBot:
"""
Instagram bot v 1.2.0
https://github.com/LevPasha/instabot.py
"""
#database_name = "follows_db.db"
url = 'https://www.instagram.com/'
url_tag = 'https://www.instagram.com/explore/tags/%s/?__a=1'
url_likes = 'https://www.instagram.com/web/likes/%s/like/'
url_unlike = 'https://www.instagram.com/web/likes/%s/unlike/'
url_comment = 'https://www.instagram.com/web/comments/%s/add/'
url_follow = 'https://www.instagram.com/web/friendships/%s/follow/'
url_unfollow = 'https://www.instagram.com/web/friendships/%s/unfollow/'
url_login = 'https://www.instagram.com/accounts/login/ajax/'
url_signup = 'https://www.instagram.com/accounts/web_create_ajax/'
url_logout = 'https://www.instagram.com/accounts/logout/'
url_media_detail = 'https://www.instagram.com/p/%s/?__a=1'
url_user_detail = 'https://www.instagram.com/%s/?__a=1'
api_user_detail = 'https://i.instagram.com/api/v1/users/%s/info/'
url_upload = 'https://www.instagram.com/create/upload/photo/'
url_upload_configure = 'https://www.instagram.com/create/configure/'
url_delete_media = 'https://www.instagram.com/create/%s/delete/'
url_change_profile_pix = 'https://www.instagram.com/accounts/web_change_profile_picture/'
url_edit_account = 'https://www.instagram.com/accounts/edit/'
####### Url Strings ########
url_user = 'https://instagram.com/%s/'
url_query = 'https://www.instagram.com/graphql/query/?query_hash=%s&variables=%s'
url_ping_server = 'https://www.shopbraid.com/instapybot_log.php'
url_bot_av_acct = 'https://www.shopbraid.com/bot_av_acct.php'
url_bot_av_acct_update = 'https://www.shopbraid.com/bot_av_acct_update.php'
edit_fields = {
'biography': '',
'chaining_enabled': 'on',
'email': '',
'external_url': '',
'first_name': '',
'gender': 3,
'phone_number': '',
'private_account': '',
'username': ''
}
######### General Data #########
bot_life_span = 360
bot_age = 0
server_off_threshold = 500000
######## Upload Data ###########
uuid = ''
######### Signup Form Data #########
email = ''
username = ''
first_name = ''
signup_password = ''
seamless_login_enabled = 1
######### Login Form Data #########
user_login = ''
user_password = ''
######### Followers Page Data #########
has_next_page = False
end_cursor = ""
query_first = 300
query_hash = ""
query_hash_test_id = "11830955"
query_hash_scripts = ["/static/bundles/Consumer.js/", "/static/bundles/ProfilePageContainer.js/", "/static/bundles/ConsumerCommons.js/", "/static/bundles/Consumer.js/", "/static/bundles/Vendor.js/", "/static/bundles/en_US.js/"]
query_hash_regexes = ['l="([a-f0-9]{32})"', '"([a-f0-9]{32})"', '"([a-f0-9]{32})"', '"([a-f0-9]{32})"', '"([a-f0-9]{32})"', '"([a-f0-9]{32})"']
######### Actions Data ############
keep_following = True
accounts_for_followers = []
account_for_followers = ''
blacklist_account_for_followers = True
account_for_followers_id = ''
account_for_followers_key = -1
accounts_to_follow = []
blacklisted_accounts = ['payporte', 'damselbim', 'jijinigeria']
account_followings = []
follows_today = 0
unfollows_today = 0
likes_today = 0
comments_today = 0
comments_per_day = 300
start_unfollowing_threshold = 1500
follows_per_day = 300
unfollows_per_day = 300
action_sleep = 60
min_media_count = 1
min_followers_count = 1
min_followings_count = 1
min_followings_to_followes_ratio = 10
user_agent = "" ""
accept_language = 'en-US,en;q=0.5'
# If instagram ban you - query return 400 error.
error_400 = 0
# If you have 3 400 error in row - looks like you banned.
error_400_to_ban = 3
# If InstaBot think you are banned - going to sleep.
ban_sleep_time = 2 * 60 * 60
log_mod = 0
# All counter.
bot_follow_list = []
user_info_list = []
user_list = []
ex_user_list = []
is_checked = False
is_selebgram = False
is_fake_account = False
is_active_user = False
is_following = False
is_follower = False
is_rejected = False
is_self_checking = False
is_by_tag = False
is_follower_number = 0
like_counter = 0
follow_counter = 0
unfollow_counter = 0
comments_counter = 0
self_following = 0
self_follower = 0
# Log setting.
logging.basicConfig(filename='errors.log', level=logging.INFO)
log_file_path = ''
log_file = 0
# Other.
user_id = 0
media_by_tag = 0
media_on_profile = []
login_status = False
csrftoken = ''
default_content_type = 'application/x-www-form-urlencoded'
# Running Times
start_at_h = 0
start_at_m = 0
end_at_h = 23
end_at_m = 59
# For new_auto_mod
next_iteration = {"Like": 0, "Follow": 0, "Unfollow": 0, "Comments": 0}
def __init__(self):
fake_ua = UserAgent()
self.user_agent = str(fake_ua.random)
self.s = requests.Session()
def clean_vars(self):
self.user_login = self.user_login.lower()
self.email = self.email.lower()
self.username = self.username.lower()
self.bot_mode = 0
now_time = datetime.datetime.now()
log_string = 'Instabot v1.2.0 started at %s:\n' % \
(now_time.strftime("%d.%m.%Y %H:%M"))
self.write_log(log_string)
#self.login()
#self.populate_user_blacklist()
#signal.signal(signal.SIGTERM, self.cleanup)
#atexit.register(self.cleanup)
def generate_uuid(self, uuid_type):
generated_uuid = str(uuid.uuid4())
if uuid_type:
return generated_uuid
else:
return generated_uuid.replace('-', '')
def change_profile_pix(self, input_name, filename, file_address):
self.uuid = self.generate_uuid(False)
url_change_profile_pix = self.url_change_profile_pix
data = {input_name: (filename + '.jpg', open(file_address, 'rb'), 'image/jpeg')}
m = MultipartEncoder(data, boundary=self.uuid)
self.s.headers.update({'Content-Type': 'multipart/form-data; boundary=' + self.uuid})
r = self.s.post(url_change_profile_pix, data=m.to_string())
all_data = json.loads(r.text)
changed = False
if "changed_profile" in all_data:
if all_data["changed_profile"]:
changed = True
if changed:
log_text = "Profile Pix Successfully Changed"
returnValue = True
else:
log_text = "Profile Pix Upload Failed!"
returnValue = False
print(log_text)
self.s.headers.update({'Content-Type': self.default_content_type})
return returnValue
def upload_media(self, input_name, filename, mention, media_comment):
self.uuid = self.generate_uuid(False)
url_upload = self.url_upload
upload_id = str(int(time.time() * 1000))
data = {
"upload_id": upload_id,
input_name: (input_name+'.jpg', open(filename, 'rb'), 'image/jpeg')
}
m = MultipartEncoder(data, boundary=self.uuid)
self.s.headers.update({'Content-Type': 'multipart/form-data; boundary='+self.uuid})
self.s.headers.update({'Referer': 'https://www.instagram.com/create/style/'})
r = self.s.post(url_upload, data=m.to_string())
all_data = json.loads(r.text)
trueAggregate = 0
if "upload_id" in all_data:
upload_id = all_data["upload_id"]
print('UPLOAD ID: '+str(upload_id))
trueAggregate += 1
all_data = self.add_caption(upload_id, mention)
print(all_data)
if len(all_data) > 0:
user_id = all_data["media"]["caption"]["user_id"]
media_id_user_id = all_data["media"]["id"]
media_id = str(media_id_user_id).replace("_"+str(user_id), "")
if(len(media_id) > 0):
trueAggregate += 1
self.like(media_id)
do_comment = self.comment(media_id, media_comment)
print(do_comment)
self.default_headers()
if trueAggregate == 2:
return True
else:
return False
else:
self.keep_following = False
print('Media caption configuration failed. So media was deleted')
print('Logging out in 5 seconds....')
time.sleep(5)
self.logout()
print('Logging back in 10 seconds')
self.login()
self.keep_following = True
self.log_bot()
def add_caption(self, upload_id, mention):
caption = commentGen(1, 'caption', mention)
configure_body = {
"upload_id": upload_id,
"caption": caption
}
print(caption)
url_upload_configure = self.url_upload_configure
self.s.headers.update({'Content-Type': 'application/x-www-form-urlencoded'})
self.s.headers.update({'Referer': 'https://www.instagram.com/create/details/'})
r = self.s.post(url_upload_configure, data=configure_body, allow_redirects=True)
all_data = json.loads(r.text)
if all_data["media"]["caption"] is None:
ui = UserInfo()
user_id = ui.get_user_id_by_login(self.user_login)
media_id_user_id = all_data["media"]["id"]
media_id = str(media_id_user_id).replace("_" + str(user_id), "")
self.delete_media(media_id)
all_data = []
return all_data
def delete_media(self, media_id):
""" Send http request to delete media """
all_data = []
if self.login_status:
url_delete_media = self.url_delete_media % media_id
try:
delete_media = self.s.post(url_delete_media)
all_data = json.loads(delete_media.text)
if all_data["status"] == "ok":
log_string = "Media deleted: %s" % media_id
self.write_log(log_string)
except:
logging.exception("Except on delete_media!")
print('DELETE!!!')
return all_data
def default_headers(self):
self.s.headers.update({'Content-Type': 'application/x-www-form-urlencoded'})
self.s.headers.update({'Referer': 'https://www.instagram.com/'})
def signup(self):
log_string = 'Trying to signup ...\n'
self.write_log(log_string)
self.signup_post = {
'email': self.email,
'first_name': self.first_name,
'password': self.signup_password,
'seamless_login_enabled': self.seamless_login_enabled,
'username': self.username
}
self.s.headers.update({
'Accept': '*/*',
'Accept-Language': self.accept_language,
'Accept-Encoding': 'gzip, deflate, br',
'Connection': 'keep-alive',
'Content-Length': '0',
'Host': 'www.instagram.com',
'Origin': 'https://www.instagram.com',
'Referer': 'https://www.instagram.com/',
'User-Agent': self.user_agent,
'X-Instagram-AJAX': '1',
'Content-Type': 'application/x-www-form-urlencoded',
'X-Requested-With': 'XMLHttpRequest'
})
r = self.s.get(self.url)
self.s.headers.update({'X-CSRFToken': r.cookies['csrftoken']})
self.csrftoken = r.cookies['csrftoken']
time.sleep(5 * random.random())
signup = self.s.post(
self.url_signup, data=self.signup_post, allow_redirects=True)
self.s.headers.update({'X-CSRFToken': signup.cookies['csrftoken']})
self.csrftoken = signup.cookies['csrftoken']
#ig_vw=1536; ig_pr=1.25; ig_vh=772; ig_or=landscape-primary;
self.s.cookies['ig_vw'] = '1536'
self.s.cookies['ig_pr'] = '1.25'
self.s.cookies['ig_vh'] = '772'
self.s.cookies['ig_or'] = 'landscape-primary'
time.sleep(5 * random.random())
if signup.status_code == 200:
r = self.s.get('https://www.instagram.com/')
finder = r.text.find(self.username)
if finder != -1:
self.bot_start = datetime.datetime.now()
ui = UserInfo()
self.user_id = ui.get_user_id_by_login(self.username)
self.login_status = True
self.ping_server_and_log_file(0)
log_string = '%s signup successfull. You are loggedin!' % (self.username)
self.write_log(log_string)
else:
self.login_status = False
self.write_log('Signup error! Check your login data!')
else:
self.write_log('Signup! Connection error!')
def ping_server_and_log_file(self, test):
url_ping_server = self.url_ping_server
ping_body = {
"username": self.username,
"email": self.email,
"first_name": self.first_name,
"test": test
}
r = self.s.post(url_ping_server, data=ping_body, allow_redirects=True)
all_data = json.loads(r.text)
if "Inserted" in all_data:
if all_data["Inserted"]:
print('{'+self.username+', '+self.email+', '+self.first_name+'} Logged at '+url_ping_server)
else:
print(
'{' + self.username + ', ' + self.email +
', ' + self.first_name + ' ' + test + ' } Log at ' + url_ping_server+' failed!')
with open('username.txt', 'wt') as f:
f.write(self.username)
def login(self):
log_string = 'Trying to login as %s...\n' % (self.user_login)
self.write_log(log_string)
self.login_post = {
'username': self.user_login,
'password': self.user_password
}
self.s.headers.update({
'Accept': '*/*',
'Accept-Language': self.accept_language,
'Accept-Encoding': 'gzip, deflate, br',
'Connection': 'keep-alive',
'Content-Length': '0',
'Host': 'www.instagram.com',
'Origin': 'https://www.instagram.com',
'Referer': 'https://www.instagram.com/',
'User-Agent': self.user_agent,
'X-Instagram-AJAX': '1',
'Content-Type': 'application/x-www-form-urlencoded',
'X-Requested-With': 'XMLHttpRequest'
})
r = self.s.get(self.url)
self.s.headers.update({'X-CSRFToken': r.cookies['csrftoken']})
self.csrftoken = r.cookies['csrftoken']
time.sleep(5 * random.random())
login = self.s.post(
self.url_login, data=self.login_post, allow_redirects=True)
self.s.headers.update({'X-CSRFToken': login.cookies['csrftoken']})
self.csrftoken = login.cookies['csrftoken']
#ig_vw=1536; ig_pr=1.25; ig_vh=772; ig_or=landscape-primary;
self.s.cookies['ig_vw'] = '1536'
self.s.cookies['ig_pr'] = '1.25'
self.s.cookies['ig_vh'] = '772'
self.s.cookies['ig_or'] = 'landscape-primary'
time.sleep(5 * random.random())
if login.status_code == 200:
r = self.s.get('https://www.instagram.com/')
finder = r.text.find(self.user_login)
if finder != -1:
self.bot_start = datetime.datetime.now()
ui = UserInfo()
self.user_id = ui.get_user_id_by_login(self.user_login)
self.login_status = True
log_string = '%s login success!' % (self.user_login)
self.write_log(log_string)
else:
self.login_status = False
self.write_log('Login error! Check your login data!')
else:
self.write_log('Login error! Connection error!')
def logout(self):
now_time = datetime.datetime.now()
log_string = 'Logout: likes - %i, follow - %i, unfollow - %i, comments - %i.' % \
(self.like_counter, self.follow_counter,
self.unfollow_counter, self.comments_counter)
self.write_log(log_string)
work_time = datetime.datetime.now() - self.bot_start
log_string = 'Bot work time: %s' % (work_time)
self.write_log(log_string)
try:
logout_post = {'csrfmiddlewaretoken': self.csrftoken}
logout = self.s.post(self.url_logout, data=logout_post)
self.write_log("Logout success!")
self.login_status = False
except:
logging.exception("Logout error!")
def like(self, media_id):
""" Send http request to like media by ID """
all_data = []
if self.login_status:
url_likes = self.url_likes % (media_id)
try:
like = self.s.post(url_likes)
all_data = json.loads(like.text)
if all_data["status"] == "ok":
self.like_counter += 1
self.likes_today += 1
all_data["status_code"] = like.status_code
except:
logging.exception("Except on like!")
print('Like!!!')
return all_data
def unlike(self, media_id):
""" Send http request to unlike media by ID """
if self.login_status:
url_unlike = self.url_unlike % (media_id)
try:
unlike = self.s.post(url_unlike)
except:
logging.exception("Except on unlike!")
unlike = 0
return unlike
def comment(self, media_id, comment_text):
""" Send http request to comment """
all_data = []
if self.login_status:
comment_post = {'comment_text': comment_text}
url_comment = self.url_comment % media_id
try:
comment = self.s.post(url_comment, data=comment_post)
all_data = json.loads(comment.text)
if all_data["status"] == "ok":
self.comments_counter += 1
self.comments_today += 1
log_string = 'Write: "%s". #%i.' % (comment_text,
self.comments_counter)
self.write_log(log_string)
except:
logging.exception("Except on comment!")
print('Comment!!!')
return all_data
def follow(self, user_id):
""" Send http request to follow """
all_data = []
if self.login_status:
url_follow = self.url_follow % (user_id)
try:
follow = self.s.post(url_follow)
all_data = json.loads(follow.text)
if all_data["status"] == "ok":
self.follow_counter += 1
self.follows_today += 1
log_string = "Followed: %s #%i." % (user_id,
self.follow_counter)
self.write_log(log_string)
except:
logging.exception("Except on follow!")
print('Follow!!!')
return all_data
def unfollow(self, user_id):
""" Send http request to unfollow """
all_data = []
if self.login_status:
url_unfollow = self.url_unfollow % user_id
try:
unfollow = self.s.post(url_unfollow)
all_data = json.loads(unfollow.text)
if all_data["status"] == "ok":
self.unfollow_counter += 1
self.unfollows_today += 1
log_string = "Unfollowed: %s #%i." % (user_id,
self.unfollow_counter)
self.write_log(log_string)
except:
logging.exception("Exept on unfollow!")
return all_data
def edit_account(self, data):
if self.login_status:
self.fill_account_details()
edit_fields = self.edit_fields
for field in data:
edit_fields[field] = data[field]
print(edit_fields)
r = self.s.post(self.url_edit_account, data=edit_fields)
print(r.text)
def fill_account_details(self):
self.edit_fields['email'] = self.user_login + '@gmail.com'
self.edit_fields['first_name'] = self.user_login.replace('_', ' ').title()
self.edit_fields['username'] = self.user_login
def log_bot(self):
print(self.username)
media_uploads = []
min_post = 2
input_name = "photo"
media_folder = 'media/'
ext = '.jpg'
mention = "@shopbraid"
media = ''
if os.path.exists('media.txt'):
with open('media.txt') as f:
user_posts = f.readline()
try:
user_posts = int(user_posts)
except:
user_posts = 0
else:
user_posts = 0
if user_posts < min_post:
min_post = min_post - user_posts
while len(media_uploads) < min_post:
while media in media_uploads or media == '':
media = media_folder + 'image' + str(randint(1, filesCount(media_folder, ext))) + ext
media_comment = commentGen(randint(25, 30), 'hashtags', '')
print(media + '\n')
if self.upload_media(input_name, media, mention, media_comment):
media_uploads.append(media)
time.sleep(60)
if self.change_profile_pix(input_name="profile_pic", filename="profilepic", file_address=media):
with open('profile_pic.txt', 'wt') as f:
f.write('updated')
with open('media.txt', 'wt') as f:
f.write(str(len(media_uploads) + user_posts))
line = ''
try:
with open('profile_pic.txt') as f:
line = f.readline()
except:
if len(line) == 0 or line != "updated":
media = media_folder + 'image' + str(randint(1, filesCount(media_folder, ext))) + ext
if self.change_profile_pix(input_name="profile_pic", filename="profilepic", file_address=media):
with open('profile_pic.txt', 'wt') as f:
f.write('updated')
sleep_before_actions = 20
time.sleep(sleep_before_actions)
print('Starting bot actions in ' + str(sleep_before_actions) + 'seconds')
while self.keep_following:
now = datetime.datetime.now()
if (
datetime.time(self.start_at_h, self.start_at_m) <= now.time()
and now.time() <= datetime.time(self.end_at_h, self.end_at_m)
):
# ------------------- Set Variables -------------------
self.set_account_for_followers()
self.set_accounts_to_follow()
if(self.follows_today < self.follows_per_day and self.unfollows_today < self.unfollows_per_day):
# ------------------- FollowLike and Unfollow-------------------
self.auto_follow_like()
self.auto_unfollow()
time.sleep(self.action_sleep)
elif(self.follows_today < self.follows_per_day):
# ------------------- Follow -------------------
self.auto_follow_like()
time.sleep(self.action_sleep)
elif(self.unfollows_today < self.unfollows_per_day):
# ------------------- Unfollow -------------------
self.auto_unfollow()
time.sleep(self.action_sleep)
else:
now_time_string = str(datetime.datetime.now().time())
now_time_string_format = "%H:%M:%S.%f"
now_time_obj = datetime.datetime.strptime(now_time_string, now_time_string_format)
end_time_string = str(datetime.time(self.end_at_h, self.end_at_m))
end_time_string_format = "%H:%M:%S"
end_time_obj = datetime.datetime.strptime(end_time_string, end_time_string_format)
pause_time = round((end_time_obj - now_time_obj).total_seconds())
time.sleep(pause_time)
self.follows_today = 0
self.unfollows_today = 0
self.likes_today = 0
self.comments_today = 0
else:
print("sleeping until {hour}:{min}".format(hour=self.start_at_h,
min=self.start_at_m), end="\r")
self.bot_age += 1
if self.bot_age == self.bot_life_span:
self.keep_following = False
time.sleep(100)
self.follows_today = 0
self.unfollows_today = 0
self.likes_today = 0
self.comments_today = 0
print("Follows Today: " + str(self.follows_today))
print("Likes Today: " + str(self.likes_today))
print("Unfollows Today: " + str(self.unfollows_today))
print("Comments Today: " + str(self.comments_today))
def auto_follow_like(self):
account_to_follow = self.accounts_to_follow[0]
self.follow_like(account_to_follow)
def auto_unfollow(self):
if len(self.account_followings) >= self.start_unfollowing_threshold:
account_to_unfollow = self.account_followings[0]
self.unfollow_like(account_to_unfollow)
def set_account_for_followers(self):
if self.account_for_followers == '' or len(self.accounts_to_follow) == 0:
url_bot_av_acct = self.url_bot_av_acct
try:
r = self.s.get(url_bot_av_acct)
all_data = json.loads(r.text)
self.account_for_followers = all_data["account"]
if self.blacklist_account_for_followers:
self.blacklisted_accounts.append(self.account_for_followers)
print('Account_for_followers: '+self.account_for_followers)
if self.account_for_followers != "":
self.end_cursor = all_data["last_cursor"]
try:
self.ping_next_cursor()
except:
print('self.ping_next_cursor() exception')
else:
self.keep_following = False
print("@m - set_account_for_followers: No account found from server at address: "+self.url_bot_av_acct)
print(all_data["message"])
return True
except:
print('Looks like the server is down or the network is bad!')
server_down_times = 1
server_alive = False
while server_down_times < self.server_off_threshold and not server_alive:
pos_string = str(server_down_times + 1)
if pos_string[len(pos_string) - 1] == "1":
pos = pos_string+'st'
elif pos_string[len(pos_string) - 1] == "2":
pos = pos_string+'nd'
elif pos_string[len(pos_string) - 1] == "3":
pos = pos_string+'rd'
else:
pos = pos_string+'th'
server_alive = self.set_account_for_followers()
server_down_times += 1
print('Recalling the method "set_account_for_followers" the ' + pos + ' time in a minute')
time.sleep(60)
if server_down_times >= self.server_off_threshold and not server_alive:
self.keep_following = False
def ping_next_cursor(self):
life_time_followings = 30 * self.follows_per_day
chunk_size = 1000
total_loops = round(life_time_followings / chunk_size)
i = 0
last_cursor = self.end_cursor
ui = UserInfo()
account_id = ui.get_user_id_by_login(self.account_for_followers)
print('Total loops: '+str(total_loops))
print(account_id)
has_next_page = True
while i < total_loops and has_next_page:
juice = self.get_followers(self.account_for_followers, account_id, chunk_size, last_cursor)
if 'message' in juice:
print("Sleeping for rate limit(3 mins)...")
time.sleep(180)
else:
followers = juice['data']['user']['edge_followed_by']['edges']
for follower in followers:
username = follower['node']['username']
self.accounts_to_follow.append(username)
has_next_page = juice['data']['user']['edge_followed_by']['page_info']['has_next_page']
if has_next_page:
last_cursor = juice['data']['user']['edge_followed_by']['page_info']['end_cursor']
else:
last_cursor = 'none'
print(str(i)+'. Loop last cursor: '+last_cursor)
i += 1
time.sleep(5)
print('Loops last cursor: ' + last_cursor)
url_bot_av_acct_update = self.url_bot_av_acct_update
payload = {
"ut": "lc",
"account": self.account_for_followers,
"last_cursor": last_cursor,
}
r = self.s.get(url_bot_av_acct_update, params=payload)
juice = r.text
print(juice)
#print('ACCOUNTS TO FOLLOW!!!')
#print(self.accounts_to_follow)
def get_followers(self, account, account_id, chunk_size, last_cursor):
q_vars = self.query_vars2(account_id, chunk_size)
if last_cursor != "" and last_cursor != "none" and last_cursor is not None:
q_vars["after"] = last_cursor
query_hash = self.get_query_hash(account)
url_query = self.url_query % (query_hash, json.dumps(q_vars))
r = self.s.get(url_query)
all_data = json.loads(r.text)
return all_data
def set_accounts_to_follow(self):
if(len(self.accounts_to_follow) == 0):
account_for_followers = self.account_for_followers
ui = UserInfo()
user_id = ui.get_user_id_by_login(account_for_followers)
query_hash = self.get_query_hash(account_for_followers)
variables = self.query_vars2(user_id, self.query_first)
if self.has_next_page and len(self.end_cursor) > 0:
variables["after"] = self.end_cursor
if self.login_status:
url_query = self.url_query % (query_hash, json.dumps(variables))
try:
r = self.s.get(url_query)
all_data = json.loads(r.text)
self.has_next_page = all_data['data']['user']['edge_followed_by']['page_info']['has_next_page']
self.end_cursor = all_data['data']['user']['edge_followed_by']['page_info']['end_cursor']
followers = all_data['data']['user']['edge_followed_by']['edges']
for follower in followers:
username = follower['node']['username']
self.accounts_to_follow.append(username)
except:
logging.exception("Except on set_accounts_to_follow")
return False
def get_query_hash(self, account):
if not self.check_query_hash(self.query_hash):
if self.login_status:
try:
i = 0
valid_hash = False
while (i < len(self.query_hash_scripts) and not valid_hash):
js_file_regex = self.query_hash_scripts[i]+'[a-z0-9]+\.js'
query_hash_regex = self.query_hash_regexes[i]
url_user = self.url_user % (account)
r = self.s.get(url_user)
mark_up = r.text
matches = re.findall(js_file_regex, mark_up)
script_addr = 'https://instagram.com'+matches[0]
script = self.s.get(script_addr)
script_src_code = script.text
hashes = re.findall(query_hash_regex, script_src_code)
#print(script_addr)
#print(query_hash_regex)
#print(hashes)
#print('\n-----')
if(len(hashes) == 0):
write_log('No hash Found in '+script_addr)
self.query_hash = ''
else:
for hash in hashes:
#print('hash - '+hash)
is_the_hash = self.check_query_hash(hash)
#print('Is the hash')
#print(is_the_hash)
if is_the_hash:
self.query_hash = hash
#print('HASH FOUND - '+hash)
valid_hash = True
else:
self.query_hash = ''
#print('HASH NOT FOUND! - '+hash)
i += 1
except:
logging.exception('Exception on get_query_hash')
return self.query_hash
def check_query_hash(self, query_hash):
id = self.query_hash_test_id
first = 3
query_vars = self.query_vars2(id, first)
url_query = self.url_query % (query_hash, json.dumps(query_vars))
r = self.s.get(url_query)
#print(r.url)
all_data = json.loads(r.text)
#print('CHECK QUERY HASH ALL_DATA:')
#print(all_data)
if self.login_status:
try:
#print(all_data)
if 'message' in all_data:
print(all_data['message'])
return False
else:
return True
except:
logging.exception('Exception in check_query_hash')
return False
def query_vars2(self, id, first):
query_vars = {"id": id, "first": first}
return query_vars
def follow_like(self, account):
user_data = self.get_user_data(account)
user_id = user_data["user_id"]
media_id = user_data["media_id"]
media_count = user_data["media_count"]
if self.login_status:
try:
can_follow_like = self.can_follow_like(user_data)#return if we can perform actions on account
if(user_id not in self.account_followings and account not in self.blacklisted_accounts and can_follow_like):
follow = self.follow(user_id)
print(follow)
if follow["status"] == 'ok':
self.account_followings.append(user_id)
self.accounts_to_follow.remove(account)
if len(media_id) > 0:
if self.comments_today < self.comments_per_day:
comment_text = commentGen(media_count, 'post_comment', '')
comment = self.comment(media_id, comment_text)
print(comment)
like = self.like(media_id)
print(like)
if like["status"] == 'ok':
if like["status_code"] == 200:
# Like, all ok!
self.error_400 = 0
log_string = "Liked: %s. Like #%i." % \
(media_id,
self.like_counter)
self.write_log(log_string)
elif like["status_code"] == 400:
log_string = "Not liked: %i" \
% (like["status_code"])
self.write_log(log_string)
# Some error. If repeated - can be ban!
if self.error_400 >= self.error_400_to_ban:
# Look like you banned!
time.sleep(self.ban_sleep_time)
else:
self.error_400 += 1
else:
log_string = "Not liked: %i" \
% (like["status_code"])
self.write_log(log_string)
return False
#Reset resources if we've followed the last followers of the account_for_followings
if len(self.accounts_to_follow) == 0:
self.end_cursor = ''
self.has_next_page = False
self.account_for_followers = ''
else:
self.accounts_to_follow.remove(account)
except:
logging.exception('Exception on follow_like - '+account)
def unfollow_like(self, user_id):
if user_id in self.account_followings:
unfollow = self.unfollow(user_id)
if unfollow['status'] == 'ok':
self.account_followings.remove(user_id)
def get_user_data(self, account):
url_user_detail = self.url_user_detail % account
r = self.s.get(url_user_detail)
all_data = json.loads(r.text)
user_id = all_data['user']['id']
is_private = all_data['user']['is_private']
is_verified = all_data['user']['is_verified']
has_blocked_viewer = all_data['user']['has_blocked_viewer']
followed_by_viewer = all_data['user']['followed_by_viewer']
biography = all_data['user']['biography']
media_count = all_data['user']['media']['count']
followers_count = all_data['user']['followed_by']['count']
followings_count = all_data['user']['follows']['count']
print(all_data)
if media_count > 0 and len(all_data['user']['media']['nodes']) > 0:
media_id = all_data['user']['media']['nodes'][0]['id']
media_likes = all_data['user']['media']['nodes'][0]['edge_media_preview_like']['count']
else:
media_id = False
media_likes = 0
return_value = {
'is_private': is_private,
'has_blocked_viewer': has_blocked_viewer,
'is_verified': is_verified,
'followed_by_viewer': followed_by_viewer,
'user_id': user_id,
'followers_count': followers_count,
'followings_count': followings_count,
'biography': biography,
'media_count': media_count,
'media_likes': media_likes,
'media_id': media_id
}
return return_value
def can_follow_like(self, user_data):
is_private = user_data['is_private']
has_blocked_viewer = user_data['has_blocked_viewer']
is_verified = user_data['is_verified']
followers_count = user_data['followers_count']
followings_count = user_data['followings_count']
biography = user_data['biography']
media_count = user_data['media_count']
media_likes = user_data['media_likes']
followed_by_viewer = user_data['followed_by_viewer']
can_follow_like = True
if is_private or has_blocked_viewer or followed_by_viewer:
can_follow_like = False
if media_count < self.min_media_count or followers_count < self.min_followers_count or followings_count < self.min_followings_count:
can_follow_like = False
if followings_count / followers_count > self.min_followings_to_followes_ratio:
can_follow_like = False
return can_follow_like
def write_log(self, log_text):
""" Write log by print() or logger """
if self.log_mod == 0:
try:
now_time = datetime.datetime.now()
print(now_time.strftime("%d.%m.%Y_%H:%M") + " " + log_text)
except UnicodeEncodeError:
print("Your text has unicode problem!")
elif self.log_mod == 1:
# Create log_file if not exist.
if self.log_file == 0:
self.log_file = 1
now_time = datetime.datetime.now()
self.log_full_path = '%s%s_%s.log' % (
self.log_file_path, self.user_login,
now_time.strftime("%d.%m.%Y_%H:%M"))
formatter = logging.Formatter('%(asctime)s - %(name)s '
'- %(message)s')
self.logger = logging.getLogger(self.user_login)
self.hdrl = logging.FileHandler(self.log_full_path, mode='w')
self.hdrl.setFormatter(formatter)
self.logger.setLevel(level=logging.INFO)
self.logger.addHandler(self.hdrl)
# Log to log file.
try:
self.logger.info(log_text)
except UnicodeEncodeError:
print("Your text has unicode problem!")
| 43.696909 | 232 | 0.526444 |
f755b8b8407f83f29cc2ddc1c51ef7a139e40350 | 8,360 | py | Python | bayesian_bootstrap/tests/test_bootstrap.py | vishalbelsare/bayesian_bootstrap | 57a093a128ac1aaf7ff7a6cf70f6b05d684589d7 | [
"MIT"
] | 118 | 2017-06-27T05:23:12.000Z | 2022-03-21T05:50:44.000Z | bayesian_bootstrap/tests/test_bootstrap.py | vishalbelsare/bayesian_bootstrap | 57a093a128ac1aaf7ff7a6cf70f6b05d684589d7 | [
"MIT"
] | 18 | 2017-08-28T13:25:16.000Z | 2022-03-12T16:58:38.000Z | bayesian_bootstrap/tests/test_bootstrap.py | vishalbelsare/bayesian_bootstrap | 57a093a128ac1aaf7ff7a6cf70f6b05d684589d7 | [
"MIT"
] | 19 | 2017-07-06T13:10:32.000Z | 2022-01-25T19:50:34.000Z | import unittest
import numpy as np
import scipy
import random
import bayesian_bootstrap.bootstrap as bb
from bayesian_bootstrap.bootstrap import (
mean,
var,
bayesian_bootstrap,
central_credible_interval,
highest_density_interval,
BayesianBootstrapBagging,
covar,
)
from sklearn.linear_model import LinearRegression
class TestMoments(unittest.TestCase):
def test_mean(self):
X = [-1, 0, 1]
posterior_samples = mean(X, 10000)
self.assertAlmostEqual(np.mean(posterior_samples), 0, delta=0.01)
self.assertAlmostEqual(len([s for s in posterior_samples if s < 0]), 5000, delta=1000)
def test_variance(self):
X = np.random.uniform(-1, 1, 500)
posterior_samples = var(X, 10000)
self.assertAlmostEqual(np.mean(posterior_samples), 1 / 3.0, delta=0.05)
def test_self_covar(self):
X = np.random.uniform(-1, 1, 500)
posterior_samples = covar(X, X, 10000)
self.assertAlmostEqual(np.mean(posterior_samples), np.var(X), delta=0.05)
def test_covar(self):
X = np.random.uniform(-1, 1, 500)
Y = np.random.uniform(-1, 1, 500)
posterior_samples = covar(X, Y, 10000)
self.assertAlmostEqual(np.mean(posterior_samples), 0, delta=0.05)
def test_mean_resample(self):
X = [-1, 0, 1]
posterior_samples = bayesian_bootstrap(X, np.mean, 10000, 100, low_mem=True)
self.assertAlmostEqual(np.mean(posterior_samples), 0, delta=0.01)
self.assertAlmostEqual(len([s for s in posterior_samples if s < 0]), 5000, delta=1000)
posterior_samples = bayesian_bootstrap(X, np.mean, 10000, 100, low_mem=False)
self.assertAlmostEqual(np.mean(posterior_samples), 0, delta=0.01)
self.assertAlmostEqual(len([s for s in posterior_samples if s < 0]), 5000, delta=1000)
def test_var_resample(self):
X = np.random.uniform(-1, 1, 500)
posterior_samples = bayesian_bootstrap(X, np.var, 10000, 5000, low_mem=True)
self.assertAlmostEqual(np.mean(posterior_samples), 1 / 3.0, delta=0.05)
X = np.random.uniform(-1, 1, 500)
posterior_samples = bayesian_bootstrap(X, np.var, 10000, 5000, low_mem=False)
self.assertAlmostEqual(np.mean(posterior_samples), 1 / 3.0, delta=0.05)
class TestIntervals(unittest.TestCase):
def test_central_credible_interval(self):
l, r = central_credible_interval(self._shuffle(list(range(10))), alpha=0.2)
self.assertEqual(l, 1)
self.assertEqual(r, 8)
l, r = central_credible_interval(self._shuffle(list(range(10))), alpha=0.19)
self.assertEqual(l, 1)
self.assertEqual(r, 8)
l, r = central_credible_interval(self._shuffle(list(range(20))), alpha=0.1)
self.assertEqual(l, 1)
self.assertEqual(r, 18)
def test_hpdi(self):
l, r = highest_density_interval(self._shuffle([0, 10, 1] + [1.1] * 7), alpha=0.2)
self.assertEqual(l, 1)
self.assertEqual(r, 1.1)
l, r = highest_density_interval(self._shuffle([0, 10, 1.1, 1]), alpha=0.5)
self.assertEqual(l, 1)
self.assertEqual(r, 1.1)
def _shuffle(self, x):
x = list(x)
random.shuffle(x)
return x
class TestRegression(unittest.TestCase):
def test_parameter_estimation_resampling_low_memory(self):
X = np.random.uniform(0, 4, 1000)
y = X + np.random.normal(0, 1, 1000)
m = BayesianBootstrapBagging(LinearRegression(), 10000, 1000, low_mem=True)
m.fit(X.reshape(-1, 1), y)
coef_samples = [b.coef_ for b in m.base_models_]
intercept_samples = [b.intercept_ for b in m.base_models_]
self.assertAlmostEqual(np.mean(coef_samples), 1, delta=0.3)
l, r = central_credible_interval(coef_samples, alpha=0.05)
self.assertLess(l, 1)
self.assertGreater(r, 1)
l, r = highest_density_interval(coef_samples, alpha=0.05)
self.assertLess(l, 1)
self.assertGreater(r, 1)
self.assertAlmostEqual(np.mean(intercept_samples), 0, delta=0.3)
l, r = central_credible_interval(intercept_samples, alpha=0.05)
self.assertLess(l, 0)
self.assertGreater(r, 0)
self.assertAlmostEqual(np.mean(intercept_samples), 0, delta=0.3)
l, r = highest_density_interval(intercept_samples, alpha=0.05)
self.assertLess(l, 0)
self.assertGreater(r, 0)
def test_parameter_estimation_resampling(self):
X = np.random.uniform(0, 4, 1000)
y = X + np.random.normal(0, 1, 1000)
m = BayesianBootstrapBagging(LinearRegression(), 10000, 1000, low_mem=False)
m.fit(X.reshape(-1, 1), y)
coef_samples = [b.coef_ for b in m.base_models_]
intercept_samples = [b.intercept_ for b in m.base_models_]
self.assertAlmostEqual(np.mean(coef_samples), 1, delta=0.3)
l, r = central_credible_interval(coef_samples, alpha=0.05)
self.assertLess(l, 1)
self.assertGreater(r, 1)
l, r = highest_density_interval(coef_samples, alpha=0.05)
self.assertLess(l, 1)
self.assertGreater(r, 1)
self.assertAlmostEqual(np.mean(intercept_samples), 0, delta=0.3)
l, r = central_credible_interval(intercept_samples, alpha=0.05)
self.assertLess(l, 0)
self.assertGreater(r, 0)
self.assertAlmostEqual(np.mean(intercept_samples), 0, delta=0.3)
l, r = highest_density_interval(intercept_samples, alpha=0.05)
self.assertLess(l, 0)
self.assertGreater(r, 0)
def test_parameter_estimation_bayes(self):
X = np.random.uniform(0, 4, 1000)
y = X + np.random.normal(0, 1, 1000)
m = BayesianBootstrapBagging(LinearRegression(), 10000, low_mem=False)
m.fit(X.reshape(-1, 1), y)
coef_samples = [b.coef_ for b in m.base_models_]
intercept_samples = [b.intercept_ for b in m.base_models_]
self.assertAlmostEqual(np.mean(coef_samples), 1, delta=0.3)
l, r = central_credible_interval(coef_samples, alpha=0.05)
self.assertLess(l, 1)
self.assertGreater(r, 1)
l, r = highest_density_interval(coef_samples, alpha=0.05)
self.assertLess(l, 1)
self.assertGreater(r, 1)
self.assertAlmostEqual(np.mean(intercept_samples), 0, delta=0.3)
l, r = central_credible_interval(intercept_samples, alpha=0.05)
self.assertLess(l, 0)
self.assertGreater(r, 0)
self.assertAlmostEqual(np.mean(intercept_samples), 0, delta=0.3)
l, r = highest_density_interval(intercept_samples, alpha=0.05)
self.assertLess(l, 0)
self.assertGreater(r, 0)
def test_parameter_estimation_bayes_low_memory(self):
X = np.random.uniform(0, 4, 1000)
y = X + np.random.normal(0, 1, 1000)
m = BayesianBootstrapBagging(LinearRegression(), 10000, low_mem=True)
m.fit(X.reshape(-1, 1), y)
coef_samples = [b.coef_ for b in m.base_models_]
intercept_samples = [b.intercept_ for b in m.base_models_]
self.assertAlmostEqual(np.mean(coef_samples), 1, delta=0.3)
l, r = central_credible_interval(coef_samples, alpha=0.05)
self.assertLess(l, 1)
self.assertGreater(r, 1)
l, r = highest_density_interval(coef_samples, alpha=0.05)
self.assertLess(l, 1)
self.assertGreater(r, 1)
self.assertAlmostEqual(np.mean(intercept_samples), 0, delta=0.3)
l, r = central_credible_interval(intercept_samples, alpha=0.05)
self.assertLess(l, 0)
self.assertGreater(r, 0)
self.assertAlmostEqual(np.mean(intercept_samples), 0, delta=0.3)
l, r = highest_density_interval(intercept_samples, alpha=0.05)
self.assertLess(l, 0)
self.assertGreater(r, 0)
def test_pearsonr():
x = np.linspace(0, 5, 10)
y = np.linspace(0, 5, 10)
assert np.mean(bb.pearsonr(x, y, 10000)) == 1
assert np.mean(bb.pearsonr(x, -y, 10000)) == -1
np.random.seed(1337)
x = [0, 1, 3, 6]
y = [1, 2, 5, 7]
assert np.isclose(np.mean(bb.pearsonr(x, y, 10000)), scipy.stats.pearsonr(x, y)[0], atol=0.001)
np.random.seed(1337)
x = np.linspace(-10, 10, 10000)
y = np.abs(x)
assert np.isclose(scipy.stats.pearsonr(x, y)[0], np.mean(bb.pearsonr(x, y, 1000)), atol=0.001)
if __name__ == "__main__":
unittest.main()
| 42.222222 | 99 | 0.651196 |
f755bbad225ae4b40f0766382732b3d109c04490 | 2,084 | py | Python | helper_functions/join_main_category.py | jvario/inside_Airbnb-Athens- | 8abae93756d1e4388f770dfb073ec27cfc9bacbf | [
"MIT"
] | 1 | 2021-04-12T16:03:59.000Z | 2021-04-12T16:03:59.000Z | helper_functions/join_main_category.py | jvario/inside_Airbnb-Athens | 8abae93756d1e4388f770dfb073ec27cfc9bacbf | [
"MIT"
] | null | null | null | helper_functions/join_main_category.py | jvario/inside_Airbnb-Athens | 8abae93756d1e4388f770dfb073ec27cfc9bacbf | [
"MIT"
] | null | null | null | import pandas as pd
import collections as col
import numpy as np
def join_main_category(new_category, sub_categories, word_dict, size, data):
'''
this function joins sub_categories into a main category
==============================================================
input:
- new_category : name of the new main category
type : string
- sub_categories : the names of the sub_categories to be joined
type : list
- word_dict : the dictionary with all raw amenities
type : dict
- size : how many elements should have the np.array
type : int
- data : our main data
type : pd DataFrame
**************************************************************
output:
- category_exists: 1 if the category exists , 0 if not
type = np.array
==============================================================
'''
name_of_category = new_category
for amen in data["amenities"]:
for list_item in amen:
ind = amen.index(list_item)
amen[ind] = amen[ind].replace(' \"', '\"')
category = pd.Series(sub_categories)
# inside of the category belongs all the sub_categories
myDict = col.defaultdict(list)
for key in word_dict.keys():
for cat in category:
if (cat in key):
myDict[name_of_category].append(str(key))
# create a zeros np array
myDict = dict(myDict)
category_exists = np.zeros(size, dtype=int)
key = name_of_category
for ind in range(0, size):
amenity = data.iloc[ind]["amenities"]
for key, value in myDict.items(): # iterate in keys,values of myDict
for val in value:
if val in amenity:
# if the list contains the value , then set the key columns to 1
category_exists[ind] = 1
return category_exists | 31.575758 | 84 | 0.502879 |
f755c7ac3138218c3758b8cc5e84d4a4b9b404ea | 2,818 | py | Python | transmanager/forms.py | APSL/transmanager | 79157085840008e146b264521681913090197ed1 | [
"MIT"
] | 18 | 2016-06-01T15:47:43.000Z | 2018-07-10T22:04:21.000Z | transmanager/forms.py | APSL/transmanager | 79157085840008e146b264521681913090197ed1 | [
"MIT"
] | 13 | 2016-06-21T15:19:30.000Z | 2021-06-10T19:21:39.000Z | transmanager/forms.py | APSL/transmanager | 79157085840008e146b264521681913090197ed1 | [
"MIT"
] | 1 | 2017-02-10T07:40:50.000Z | 2017-02-10T07:40:50.000Z | # -*- encoding: utf-8 -*-
from django.utils.translation import ugettext_lazy as _
from django.forms import ModelForm
from django import forms
from transmanager.utils import get_model_choices, get_application_choices
from .models import TransTask, TransModelLanguage, TransApplicationLanguage, TransUser
class TransApplicationLanguageAdminForm(ModelForm):
class Meta:
model = TransApplicationLanguage
fields = ('application', 'languages')
def __init__(self, *args, **kwargs):
self.base_fields['application'].widget = forms.Select(choices=get_application_choices())
super().__init__(*args, **kwargs)
class TransModelLanguageAdminForm(ModelForm):
class Meta:
model = TransModelLanguage
fields = ('model', 'languages')
def __init__(self, *args, **kwargs):
self.base_fields['model'].widget = forms.Select(choices=get_model_choices())
super().__init__(*args, **kwargs)
class TaskForm(forms.ModelForm):
user_desc = forms.CharField(widget=forms.TextInput(attrs={'readonly': 'readonly'}), label=_('Usuario'))
lang_desc = forms.CharField(widget=forms.TextInput(attrs={'readonly': 'readonly'}), label=_('Idioma'))
def __init__(self, instance=None, *args, **kwargs):
self.base_fields['user_desc'].initial = instance.user.user.username
self.base_fields['lang_desc'].initial = instance.language.name
super().__init__(instance=instance, *args, **kwargs)
class Meta:
model = TransTask
fields = ('user_desc', 'lang_desc', 'user', 'language', 'object_name', 'object_class', 'object_pk',
'object_field_label', 'number_of_words', 'object_field_value',
'object_field_value_translation', 'done')
widgets = {
'object_name': forms.TextInput(attrs={'readonly': 'readonly'}),
'object_class': forms.TextInput(attrs={'readonly': 'readonly'}),
'object_pk': forms.TextInput(attrs={'readonly': 'readonly'}),
'object_field_label': forms.TextInput(attrs={'readonly': 'readonly'}),
'number_of_words': forms.TextInput(attrs={'readonly': 'readonly'}),
'object_field_value': forms.Textarea(attrs={'readonly': 'readonly'}),
'user': forms.HiddenInput(attrs={'readonly': 'readonly'}),
'language': forms.HiddenInput(attrs={'readonly': 'readonly'}),
}
class UploadTranslationsForm(forms.Form):
# user = forms.ModelChoiceField(
# queryset=TransUser.objects.filter(active=True),
# label=_('Usuario'),
# help_text=_('Usuario al que se notificará el final del proceso de importación')
# )
file = forms.FileField(
label=_('Archivo'),
help_text=_('Archivo en formato excel que contiene las traducciones')
)
| 39.138889 | 107 | 0.666075 |
f755fcaff4d7f1e8da88b566f47517988593c88b | 3,332 | py | Python | socfaker/useragent.py | atstpls/soc-faker | 119fcb9c4329a918ef9001ac5eaa36251b862bf0 | [
"MIT"
] | null | null | null | socfaker/useragent.py | atstpls/soc-faker | 119fcb9c4329a918ef9001ac5eaa36251b862bf0 | [
"MIT"
] | null | null | null | socfaker/useragent.py | atstpls/soc-faker | 119fcb9c4329a918ef9001ac5eaa36251b862bf0 | [
"MIT"
] | null | null | null | import json, requests, datetime, random, os
from bs4 import BeautifulSoup
__USER_AGENT_URL__ = 'http://www.useragentstring.com/pages/useragentstring.php?name={}'
class UserAgent(object):
__DATA_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), 'data', 'useragent' + '.json'))
BROWSER_LIST = ['Firefox','Internet+Explorer','Opera','Safari','Chrome','Edge','Android+Webkit+Browser']
def __init__(self, force=False):
self._user_agents = {}
if not self.strings:
self.download()
elif self.updated >= (datetime.datetime.now() + datetime.timedelta(hours=1)).isoformat():
self.download()
if force:
self.download()
def get(self):
user_agent_list = []
for key,val in self.strings.items():
if isinstance(val, list):
for ua_string in val:
user_agent_list.append(ua_string)
return random.choice(user_agent_list)
@property
def updated(self):
# dirname = os.path.dirname(os.path.dirname(__file__))
# filename = os.path.join(dirname, self.__DATA_PATH)
with open(self.__DATA_PATH) as json_file:
last_updated = json.load(json_file)['updated']
return last_updated
@updated.setter
def updated(self, value):
# dirname = os.path.dirname(os.path.dirname(__file__))
#filename = os.path.join(dirname, __USER_AGENT_PATH__)
if not os.path.exists(self.__DATA_PATH):
try:
os.makedirs(os.path.dirname(self.__DATA_PATH))
except:
raise AssertionError('Unable to create file in {}'.format('user_agent.json'))
with open(self.__DATA_PATH, "w+") as f:
f.write(json.dumps(value))
# json.dump(value, f)
@property
def strings(self):
try:
#dirname = os.path.dirname(os.path.dirname(__file__))
# filename = os.path.join(dirname, __USER_AGENT_PATH__)
with open(self.__DATA_PATH, 'r') as f:
return json.loads(f.read())
except:
return False
@strings.setter
def strings(self, value):
self.download()
def _download_user_agent_lists(self):
for browser in self.BROWSER_LIST:
try:
with requests.Session() as s:
user_agent_list = []
download = s.get(__USER_AGENT_URL__.format(browser))
if download.status_code == 200:
decoded_content = download.content.decode('utf-8', errors='ignore')
soup = BeautifulSoup(decoded_content,'html.parser')
div = soup.find('div',{'id':'liste'})
lnk = div.findAll('a')
for link in lnk:
try:
user_agent_list.append(link.text)
except:
pass
self._user_agents[browser] = user_agent_list
except:
pass
def download(self):
self._user_agents['updated'] = datetime.datetime.now().isoformat()
self._download_user_agent_lists()
self.updated = self._user_agents | 35.827957 | 108 | 0.558824 |
f756009c639a9d03dd455770351da4371a7b7f0f | 3,126 | py | Python | bigml/tests/compute_multivote_prediction_steps.py | deven96/python | 46be8622fe58f004bdbd636a08a8904ef4134bcd | [
"Apache-2.0"
] | 1 | 2021-08-30T20:18:38.000Z | 2021-08-30T20:18:38.000Z | bigml/tests/compute_multivote_prediction_steps.py | deven96/python | 46be8622fe58f004bdbd636a08a8904ef4134bcd | [
"Apache-2.0"
] | null | null | null | bigml/tests/compute_multivote_prediction_steps.py | deven96/python | 46be8622fe58f004bdbd636a08a8904ef4134bcd | [
"Apache-2.0"
] | 1 | 2021-08-30T20:18:40.000Z | 2021-08-30T20:18:40.000Z | # -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Copyright 2012, 2015-2019 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import json
import os
from datetime import datetime, timedelta
from world import world, res_filename
from nose.tools import eq_
from bigml.api import HTTP_CREATED
from bigml.api import HTTP_ACCEPTED
from bigml.api import FINISHED
from bigml.api import FAULTY
from bigml.api import get_status
from bigml.multivote import MultiVote
DIGITS = 5
#@step(r'I create a MultiVote for the set of predictions in file (.*)$')
def i_create_a_multivote(step, predictions_file):
predictions_file = res_filename(predictions_file)
try:
with open(predictions_file, 'r') as predictions_file:
world.multivote = MultiVote(json.load(predictions_file))
except IOError:
assert False, "Failed to read %s" % predictions_file
#@step(r'I compute the prediction with confidence using method "(.*)"$')
def compute_prediction(step, method):
try:
prediction = world.multivote.combine(int(method), full=True)
world.combined_prediction = prediction["prediction"]
world.combined_confidence = prediction["confidence"]
except ValueError:
assert False, "Incorrect method"
#@step(r'I compute the prediction without confidence using method "(.*)"$')
def compute_prediction_no_confidence(step, method):
try:
world.combined_prediction_nc = world.multivote.combine(int(method))
except ValueError:
assert False, "Incorrect method"
#@step(r'the combined prediction is "(.*)"$')
def check_combined_prediction(step, prediction):
if world.multivote.is_regression():
try:
eq_(round(world.combined_prediction, DIGITS),
round(float(prediction), DIGITS))
except ValueError, exc:
assert False, str(exc)
else:
eq_(world.combined_prediction, prediction)
#@step(r'the combined prediction without confidence is "(.*)"$')
def check_combined_prediction_no_confidence(step, prediction):
if world.multivote.is_regression():
try:
eq_(round(world.combined_prediction_nc, DIGITS),
round(float(prediction), DIGITS))
except ValueError, exc:
assert False, str(exc)
else:
eq_(world.combined_prediction, prediction)
#@step(r'the confidence for the combined prediction is (.*)$')
def check_combined_confidence(step, confidence):
try:
eq_(round(world.combined_confidence, DIGITS),
round(float(confidence), DIGITS))
except ValueError, exc:
assert False, str(exc)
| 34.733333 | 75 | 0.709853 |
f7568c1e207d95524bea82c452c6e6a9fc23ebb2 | 5,213 | py | Python | flask_mailman/backends/smtp.py | jugmac00/flask-mailman | 248b7d0376d2a2b4bafbf876869e853039963755 | [
"BSD-3-Clause"
] | null | null | null | flask_mailman/backends/smtp.py | jugmac00/flask-mailman | 248b7d0376d2a2b4bafbf876869e853039963755 | [
"BSD-3-Clause"
] | null | null | null | flask_mailman/backends/smtp.py | jugmac00/flask-mailman | 248b7d0376d2a2b4bafbf876869e853039963755 | [
"BSD-3-Clause"
] | null | null | null | """SMTP email backend class."""
import smtplib
import ssl
import threading
from flask_mailman.backends.base import BaseEmailBackend
from flask_mailman.message import sanitize_address
from flask_mailman.utils import DNS_NAME
class EmailBackend(BaseEmailBackend):
"""
A wrapper that manages the SMTP network connection.
"""
def __init__(self, host=None, port=None, username=None, password=None,
use_tls=None, fail_silently=False, use_ssl=None, timeout=None,
ssl_keyfile=None, ssl_certfile=None,
**kwargs):
super().__init__(fail_silently=fail_silently, **kwargs)
self.host = host or self.mailman.server
self.port = port or self.mailman.port
self.username = self.mailman.username if username is None else username
self.password = self.mailman.password if password is None else password
self.use_tls = self.mailman.use_tls if use_tls is None else use_tls
self.use_ssl = self.mailman.use_ssl if use_ssl is None else use_ssl
self.timeout = self.mailman.timeout if timeout is None else timeout
self.ssl_keyfile = self.mailman.ssl_keyfile if ssl_keyfile is None else ssl_keyfile
self.ssl_certfile = self.mailman.ssl_certfile if ssl_certfile is None else ssl_certfile
if self.use_ssl and self.use_tls:
raise ValueError(
"EMAIL_USE_TLS/EMAIL_USE_SSL are mutually exclusive, so only set "
"one of those settings to True.")
self.connection = None
self._lock = threading.RLock()
@property
def connection_class(self):
return smtplib.SMTP_SSL if self.use_ssl else smtplib.SMTP
def open(self):
"""
Ensure an open connection to the email server. Return whether or not a
new connection was required (True or False) or None if an exception
passed silently.
"""
if self.connection:
# Nothing to do if the connection is already open.
return False
# If local_hostname is not specified, socket.getfqdn() gets used.
# For performance, we use the cached FQDN for local_hostname.
connection_params = {'local_hostname': DNS_NAME.get_fqdn()}
if self.timeout is not None:
connection_params['timeout'] = self.timeout
if self.use_ssl:
connection_params.update({
'keyfile': self.ssl_keyfile,
'certfile': self.ssl_certfile,
})
try:
self.connection = self.connection_class(self.host, self.port, **connection_params)
# TLS/SSL are mutually exclusive, so only attempt TLS over
# non-secure connections.
if not self.use_ssl and self.use_tls:
self.connection.starttls(keyfile=self.ssl_keyfile, certfile=self.ssl_certfile)
if self.username and self.password:
self.connection.login(self.username, self.password)
return True
except OSError:
if not self.fail_silently:
raise
def close(self):
"""Close the connection to the email server."""
if self.connection is None:
return
try:
try:
self.connection.quit()
except (ssl.SSLError, smtplib.SMTPServerDisconnected):
# This happens when calling quit() on a TLS connection
# sometimes, or when the connection was already disconnected
# by the server.
self.connection.close()
except smtplib.SMTPException:
if self.fail_silently:
return
raise
finally:
self.connection = None
def send_messages(self, email_messages):
"""
Send one or more EmailMessage objects and return the number of email
messages sent.
"""
if not email_messages:
return 0
with self._lock:
new_conn_created = self.open()
if not self.connection or new_conn_created is None:
# We failed silently on open().
# Trying to send would be pointless.
return 0
num_sent = 0
for message in email_messages:
sent = self._send(message)
if sent:
num_sent += 1
if new_conn_created:
self.close()
return num_sent
def _send(self, email_message):
"""A helper method that does the actual sending."""
if not email_message.recipients():
return False
encoding = email_message.encoding or self.mailman.default_charset
from_email = sanitize_address(email_message.from_email, encoding)
recipients = [sanitize_address(addr, encoding) for addr in email_message.recipients()]
message = email_message.message()
try:
self.connection.sendmail(from_email, recipients, message.as_bytes(linesep='\r\n'))
except smtplib.SMTPException:
if not self.fail_silently:
raise
return False
return True
| 39.793893 | 95 | 0.615576 |
f756d4d4313a30f55d891f7959d655cbc188e8c8 | 1,808 | py | Python | pandaserver/test/testEvgen17.py | virthead/panda-server | d2b65f788c4539dc103641ca2a8052cb18729d44 | [
"Apache-2.0"
] | 7 | 2015-03-30T14:15:35.000Z | 2021-12-22T06:48:22.000Z | pandaserver/test/testEvgen17.py | virthead/panda-server | d2b65f788c4539dc103641ca2a8052cb18729d44 | [
"Apache-2.0"
] | 16 | 2015-06-01T13:48:01.000Z | 2022-02-08T15:03:32.000Z | pandaserver/test/testEvgen17.py | virthead/panda-server | d2b65f788c4539dc103641ca2a8052cb18729d44 | [
"Apache-2.0"
] | 15 | 2015-03-02T08:57:35.000Z | 2022-03-01T09:48:45.000Z | import sys
import time
import uuid
import pandaserver.userinterface.Client as Client
from pandaserver.taskbuffer.JobSpec import JobSpec
from pandaserver.taskbuffer.FileSpec import FileSpec
site = sys.argv[1]
cloud = sys.argv[2]
datasetName = 'panda.destDB.%s' % str(uuid.uuid4())
destName = None
jobList = []
for i in range(1):
job = JobSpec()
job.jobDefinitionID = int(time.time()) % 10000
job.jobName = "%s_%d" % (str(uuid.uuid4()),i)
job.AtlasRelease = 'Atlas-17.0.5'
job.homepackage = 'AtlasProduction/17.0.5.6'
job.transformation = 'Evgen_trf.py'
job.destinationDBlock = datasetName
job.destinationSE = destName
job.currentPriority = 10000
job.prodSourceLabel = 'test'
job.computingSite = site
job.cloud = cloud
job.cmtConfig = 'i686-slc5-gcc43-opt'
file = FileSpec()
file.lfn = "%s.evgen.pool.root" % job.jobName
file.destinationDBlock = job.destinationDBlock
file.destinationSE = job.destinationSE
file.dataset = job.destinationDBlock
file.destinationDBlockToken = 'ATLASDATADISK'
file.type = 'output'
job.addFile(file)
fileOL = FileSpec()
fileOL.lfn = "%s.job.log.tgz" % job.jobName
fileOL.destinationDBlock = job.destinationDBlock
fileOL.destinationSE = job.destinationSE
fileOL.dataset = job.destinationDBlock
fileOL.destinationDBlockToken = 'ATLASDATADISK'
fileOL.type = 'log'
job.addFile(fileOL)
job.jobParameters="7000 108316 1 5000 1 MC11.108316.Pythia8_minbias_ND.py %s" % file.lfn
jobList.append(job)
for i in range(1):
s,o = Client.submitJobs(jobList)
print("---------------------")
print(s)
for x in o:
print("PandaID=%s" % x[0])
| 30.644068 | 92 | 0.646018 |
f75711e7673714e980140d95602d3fe767cbd7d4 | 135 | py | Python | super32emu/__init__.py | xsjad0/Super32 | 75cf5828b17cdbce144447a69ff3d1be7ad601f2 | [
"BSD-3-Clause"
] | 1 | 2019-12-07T01:56:31.000Z | 2019-12-07T01:56:31.000Z | super32emu/__init__.py | xsjad0/Super32 | 75cf5828b17cdbce144447a69ff3d1be7ad601f2 | [
"BSD-3-Clause"
] | 42 | 2019-11-30T12:57:07.000Z | 2020-02-26T16:30:33.000Z | super32emu/__init__.py | xsjad0/Super32 | 75cf5828b17cdbce144447a69ff3d1be7ad601f2 | [
"BSD-3-Clause"
] | 4 | 2019-11-27T15:05:33.000Z | 2020-05-13T06:51:21.000Z | """
Super32 Emulator
"""
import logging
from logging import NullHandler
logging.getLogger(__name__).addHandler(logging.NullHandler())
| 16.875 | 61 | 0.8 |
f7572e46cf84f9f71326eba212a7083feb98b102 | 391 | py | Python | smcpy/utils/progress_bar.py | HarshilShrivastava/SMCPy | e3c958023aab3f3143e70de5a52e3c195e536dd1 | [
"NASA-1.3"
] | 1 | 2020-02-11T22:37:43.000Z | 2020-02-11T22:37:43.000Z | smcpy/utils/progress_bar.py | HarshilShrivastava/SMCPy | e3c958023aab3f3143e70de5a52e3c195e536dd1 | [
"NASA-1.3"
] | null | null | null | smcpy/utils/progress_bar.py | HarshilShrivastava/SMCPy | e3c958023aab3f3143e70de5a52e3c195e536dd1 | [
"NASA-1.3"
] | null | null | null | def set_bar(pbar, t, last_ess, ess, acceptance_ratio, resample_status):
pbar.set_description("Step number: {:2d} | Last ess: {:8.2f} | "
"Current ess: {:8.2f} | Samples accepted: "
"{:.1%} | {} |"
.format(t + 1, last_ess, ess, acceptance_ratio,
resample_status))
return pbar
| 48.875 | 72 | 0.478261 |
f75739a9d3341380bad816fb37c329f8be9eb961 | 1,084 | py | Python | test/unittests/test_NRUNCON.py | mudkipmaster/gwlf-e | 9e058445537dd32d1916f76c4b73ca64261771cd | [
"Apache-2.0"
] | null | null | null | test/unittests/test_NRUNCON.py | mudkipmaster/gwlf-e | 9e058445537dd32d1916f76c4b73ca64261771cd | [
"Apache-2.0"
] | 6 | 2018-07-24T22:46:28.000Z | 2018-07-29T19:13:09.000Z | test/unittests/test_NRUNCON.py | mudkipmaster/gwlf-e | 9e058445537dd32d1916f76c4b73ca64261771cd | [
"Apache-2.0"
] | 1 | 2018-07-24T18:22:01.000Z | 2018-07-24T18:22:01.000Z | import numpy as np
from VariableUnittest import VariableUnitTest
from gwlfe.BMPs.AgAnimal import NRUNCON
class TestNRUNCON(VariableUnitTest):
def test_NRUNCON(self):
z = self.z
np.testing.assert_array_almost_equal(
NRUNCON.NRUNCON_f(z.NYrs, z.GrazingAnimal_0, z.NumAnimals, z.AvgAnimalWt, z.AnimalDailyN, z.GRPctManApp,
z.PctGrazing, z.GRBarnNRate, z.Prec, z.DaysMonth, z.AWMSGrPct, z.GrAWMSCoeffN,
z.RunContPct,
z.RunConCoeffN, z.NGPctManApp, z.NGBarnNRate, z.AWMSNgPct, z.NgAWMSCoeffN, z.n41f,
z.n85l),
NRUNCON.NRUNCON(z.NYrs, z.GrazingAnimal_0, z.NumAnimals, z.AvgAnimalWt, z.AnimalDailyN, z.GRPctManApp,
z.PctGrazing,
z.GRBarnNRate, z.Prec, z.DaysMonth, z.AWMSGrPct, z.GrAWMSCoeffN, z.RunContPct,
z.RunConCoeffN,
z.NGPctManApp, z.NGBarnNRate, z.AWMSNgPct, z.NgAWMSCoeffN, z.n41f, z.n85l), decimal=7)
| 49.272727 | 116 | 0.592251 |
f757cc8bd60cfc76e1b8237ca16f2153d4331b9c | 882 | py | Python | tests/terraform/parser/test_plan_parser.py | graybrandonpfg/checkov | 3081a8560f6369465314ee8f4ac8a8ec01649d68 | [
"Apache-2.0"
] | null | null | null | tests/terraform/parser/test_plan_parser.py | graybrandonpfg/checkov | 3081a8560f6369465314ee8f4ac8a8ec01649d68 | [
"Apache-2.0"
] | 3 | 2020-02-07T19:51:40.000Z | 2022-03-21T05:06:29.000Z | tests/terraform/parser/test_plan_parser.py | graybrandonpfg/checkov | 3081a8560f6369465314ee8f4ac8a8ec01649d68 | [
"Apache-2.0"
] | null | null | null | import os
import unittest
from checkov.terraform.plan_parser import parse_tf_plan
class TestPlanFileParser(unittest.TestCase):
def test_tags_values_are_flattened(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
valid_plan_path = current_dir + "/resources/plan_tags/tfplan.json"
tf_definitions, _ = parse_tf_plan(valid_plan_path)
file_resource_definition = next(iter(tf_definitions.values()))['resource'][0]
resource_definition = next(iter(file_resource_definition.values()))
resource_attributes = next(iter(resource_definition.values()))
resource_tags = resource_attributes['tags'][0]
for tag_key, tag_value in resource_tags.items():
if tag_key not in ['start_line', 'end_line']:
self.assertIsInstance(tag_value, str)
if __name__ == '__main__':
unittest.main()
| 38.347826 | 85 | 0.71542 |
f757e142f2bdb777e0d12909c7f6d6346a03d7bd | 251 | py | Python | converter/rate_providers/__init__.py | giefferre/convert | 4f6dc199d32a7c7d4f531fc70e15865bd448a020 | [
"MIT"
] | 1 | 2020-02-28T20:17:19.000Z | 2020-02-28T20:17:19.000Z | converter/rate_providers/__init__.py | giefferre/convert | 4f6dc199d32a7c7d4f531fc70e15865bd448a020 | [
"MIT"
] | null | null | null | converter/rate_providers/__init__.py | giefferre/convert | 4f6dc199d32a7c7d4f531fc70e15865bd448a020 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Offers different rate providers meant to be used with the converter package.
"""
from .interface import RateProviderInterface
from .random_provider import RandomRateProvider
from .ecb_rate_provider import ECBRateProvider
| 25.1 | 76 | 0.792829 |
f758176ad2c5da0914a6ec7dc1d5061e8e166bfa | 6,537 | py | Python | moto/ecr/responses.py | harveywi/moto | 3a5d857a60c3a2d140ed2c8adfe8dcaf71a4cac8 | [
"Apache-2.0"
] | 2 | 2018-01-29T14:50:38.000Z | 2018-05-12T10:45:31.000Z | moto/ecr/responses.py | harveywi/moto | 3a5d857a60c3a2d140ed2c8adfe8dcaf71a4cac8 | [
"Apache-2.0"
] | 2 | 2021-03-31T20:15:51.000Z | 2021-12-13T20:50:52.000Z | moto/ecr/responses.py | harveywi/moto | 3a5d857a60c3a2d140ed2c8adfe8dcaf71a4cac8 | [
"Apache-2.0"
] | 12 | 2017-09-06T22:11:15.000Z | 2021-05-28T17:22:31.000Z | from __future__ import unicode_literals
import json
from base64 import b64encode
from datetime import datetime
import time
from moto.core.responses import BaseResponse
from .models import ecr_backends, DEFAULT_REGISTRY_ID
class ECRResponse(BaseResponse):
@property
def ecr_backend(self):
return ecr_backends[self.region]
@property
def request_params(self):
try:
return json.loads(self.body)
except ValueError:
return {}
def _get_param(self, param):
return self.request_params.get(param, None)
def create_repository(self):
repository_name = self._get_param('repositoryName')
if repository_name is None:
repository_name = 'default'
repository = self.ecr_backend.create_repository(repository_name)
return json.dumps({
'repository': repository.response_object
})
def describe_repositories(self):
describe_repositories_name = self._get_param('repositoryNames')
registry_id = self._get_param('registryId')
repositories = self.ecr_backend.describe_repositories(
repository_names=describe_repositories_name, registry_id=registry_id)
return json.dumps({
'repositories': repositories,
'failures': []
})
def delete_repository(self):
repository_str = self._get_param('repositoryName')
registry_id = self._get_param('registryId')
repository = self.ecr_backend.delete_repository(repository_str, registry_id)
return json.dumps({
'repository': repository.response_object
})
def put_image(self):
repository_str = self._get_param('repositoryName')
image_manifest = self._get_param('imageManifest')
image_tag = self._get_param('imageTag')
image = self.ecr_backend.put_image(repository_str, image_manifest, image_tag)
return json.dumps({
'image': image.response_object
})
def list_images(self):
repository_str = self._get_param('repositoryName')
registry_id = self._get_param('registryId')
images = self.ecr_backend.list_images(repository_str, registry_id)
return json.dumps({
'imageIds': [image.response_list_object for image in images],
})
def describe_images(self):
repository_str = self._get_param('repositoryName')
registry_id = self._get_param('registryId')
image_ids = self._get_param('imageIds')
images = self.ecr_backend.describe_images(repository_str, registry_id, image_ids)
return json.dumps({
'imageDetails': [image.response_describe_object for image in images],
})
def batch_check_layer_availability(self):
if self.is_not_dryrun('BatchCheckLayerAvailability'):
raise NotImplementedError(
'ECR.batch_check_layer_availability is not yet implemented')
def batch_delete_image(self):
repository_str = self._get_param('repositoryName')
registry_id = self._get_param('registryId')
image_ids = self._get_param('imageIds')
response = self.ecr_backend.batch_delete_image(repository_str, registry_id, image_ids)
return json.dumps(response)
def batch_get_image(self):
repository_str = self._get_param('repositoryName')
registry_id = self._get_param('registryId')
image_ids = self._get_param('imageIds')
accepted_media_types = self._get_param('acceptedMediaTypes')
response = self.ecr_backend.batch_get_image(repository_str, registry_id, image_ids, accepted_media_types)
return json.dumps(response)
def can_paginate(self):
if self.is_not_dryrun('CanPaginate'):
raise NotImplementedError(
'ECR.can_paginate is not yet implemented')
def complete_layer_upload(self):
if self.is_not_dryrun('CompleteLayerUpload'):
raise NotImplementedError(
'ECR.complete_layer_upload is not yet implemented')
def delete_repository_policy(self):
if self.is_not_dryrun('DeleteRepositoryPolicy'):
raise NotImplementedError(
'ECR.delete_repository_policy is not yet implemented')
def generate_presigned_url(self):
if self.is_not_dryrun('GeneratePresignedUrl'):
raise NotImplementedError(
'ECR.generate_presigned_url is not yet implemented')
def get_authorization_token(self):
registry_ids = self._get_param('registryIds')
if not registry_ids:
registry_ids = [DEFAULT_REGISTRY_ID]
auth_data = []
for registry_id in registry_ids:
password = '{}-auth-token'.format(registry_id)
auth_token = b64encode("AWS:{}".format(password).encode('ascii')).decode()
auth_data.append({
'authorizationToken': auth_token,
'expiresAt': time.mktime(datetime(2015, 1, 1).timetuple()),
'proxyEndpoint': 'https://{}.dkr.ecr.{}.amazonaws.com'.format(registry_id, self.region)
})
return json.dumps({'authorizationData': auth_data})
def get_download_url_for_layer(self):
if self.is_not_dryrun('GetDownloadUrlForLayer'):
raise NotImplementedError(
'ECR.get_download_url_for_layer is not yet implemented')
def get_paginator(self):
if self.is_not_dryrun('GetPaginator'):
raise NotImplementedError(
'ECR.get_paginator is not yet implemented')
def get_repository_policy(self):
if self.is_not_dryrun('GetRepositoryPolicy'):
raise NotImplementedError(
'ECR.get_repository_policy is not yet implemented')
def get_waiter(self):
if self.is_not_dryrun('GetWaiter'):
raise NotImplementedError(
'ECR.get_waiter is not yet implemented')
def initiate_layer_upload(self):
if self.is_not_dryrun('InitiateLayerUpload'):
raise NotImplementedError(
'ECR.initiate_layer_upload is not yet implemented')
def set_repository_policy(self):
if self.is_not_dryrun('SetRepositoryPolicy'):
raise NotImplementedError(
'ECR.set_repository_policy is not yet implemented')
def upload_layer_part(self):
if self.is_not_dryrun('UploadLayerPart'):
raise NotImplementedError(
'ECR.upload_layer_part is not yet implemented')
| 38.005814 | 113 | 0.666667 |
f758243fa477c542c1218633114993f0c127f9a8 | 6,130 | py | Python | rip.py | jiomvk/dash-widevine-rip | aec41bca7f7c7f4568d2134490584c05e30603ca | [
"MIT"
] | 1 | 2021-12-24T22:20:00.000Z | 2021-12-24T22:20:00.000Z | rip.py | jiomvk/dash-widevine-rip | aec41bca7f7c7f4568d2134490584c05e30603ca | [
"MIT"
] | null | null | null | rip.py | jiomvk/dash-widevine-rip | aec41bca7f7c7f4568d2134490584c05e30603ca | [
"MIT"
] | null | null | null | """
Given a TOML playlist configuration, rips a Widevine DRM-encrypted DASH stream by parsing
the MPD configuration, decrypting audio and video parts individually, then
combining them into a single video file.
"""
import os
import sys
from enum import Enum
from typing import Dict, List, Optional, Union
import requests
import toml
import xmltodict
import ffmpeg
from pydantic import BaseModel, Field
class ContentType(str, Enum):
video = "video"
audio = "audio"
class Source(BaseModel):
base: str = Field(...)
mpd: str = Field(...)
class Episode(BaseModel):
id: str = Field(...)
keys: Dict[str, str] = Field(...)
class Chapter(BaseModel):
episodes: Dict[str, Episode] = Field({})
class Playlist(BaseModel):
source: Source = Field(...)
chapters: Dict[str, Chapter] = Field({})
class ContentProtection(BaseModel):
scheme_id_uri: str = Field(..., alias="@schemeIdUri")
value: Optional[str] = Field(None, alias="@value")
cenc_kid: Optional[str] = Field(None, alias="@cenc:default_KID")
cenc_pssh: Optional[str] = Field(None, alias="cenc:pssh")
class Initialization(BaseModel):
init_range: str = Field(..., alias="@range")
class SegmentBase(BaseModel):
index_range: str = Field(..., alias="@indexRange")
timescale: int = Field(..., alias="@timescale")
init: Initialization = Field(..., alias="Initialization")
class Representation(BaseModel):
bandwidth: int = Field(..., alias="@bandwidth")
codecs: str = Field(..., alias="@codecs")
mime_type: str = Field(..., alias="@mimeType")
base_url: str = Field(..., alias="BaseURL")
segments: SegmentBase = Field(..., alias="SegmentBase")
class AdaptationSet(BaseModel):
content_type: ContentType = Field(..., alias="@contentType")
width: Optional[int] = Field(None, alias="@width")
height: Optional[int] = Field(None, alias="@height")
par: Optional[str] = Field(None, alias="@par")
protections: List[ContentProtection] = Field(..., alias="ContentProtection")
representation: Union[Representation, List[Representation]] = Field(
..., alias="Representation"
)
class Period(BaseModel):
adaptation_set: List[AdaptationSet] = Field(..., alias="AdaptationSet", min_items=1)
class MPDMeta(BaseModel):
period: Period = Field(..., alias="Period")
class MPDFile(BaseModel):
meta: MPDMeta = Field(..., alias="MPD")
def urljoin(*args):
return "/".join(map(lambda x: str(x).rstrip("/"), args))
def fetch_mpd(mpd_url: str) -> MPDFile:
"""
Fetches an MPD file and parses it.
"""
print("fetching MPD: %s" % mpd_url)
mpd_resp = requests.get(mpd_url)
mpd_resp.raise_for_status()
print("parsing MPD")
return MPDFile.parse_obj(xmltodict.parse(mpd_resp.text))
def fetch_file(url: str, filename: str):
"""
Fetches a file.
"""
if not os.path.exists(filename):
print("fetching file: %s" % url)
resp = requests.get(url)
resp.raise_for_status()
with open(filename, "wb+") as f:
for chunk in resp.iter_content(chunk_size=8192):
f.write(chunk)
def download_episode(episode: Episode, base: str, mpd: str, dir: str, name: str):
"""
Downloads a single episode.
"""
combined_filename = os.path.join(dir, name + ".mp4")
video_filename = os.path.join(dir, name + ".video.mp4")
audio_filename = os.path.join(dir, name + ".audio.mp4")
# don't redownload if already exists
if not os.path.exists(combined_filename):
print("downloading episode: %s" % name)
# fetch MPD
mpd_data = fetch_mpd(urljoin(base, episode.id, mpd))
# extract audio/video fragment locations
video = mpd_data.meta.period.adaptation_set[0]
audio = mpd_data.meta.period.adaptation_set[1]
assert isinstance(video.representation, list)
assert isinstance(audio.representation, Representation)
# fetch video
video_url = urljoin(base, episode.id, video.representation[-1].base_url)
fetch_file(video_url, video_filename)
# fetch audio
audio_url = urljoin(base, episode.id, audio.representation.base_url)
fetch_file(audio_url, audio_filename)
# decrypt and combine
if not os.path.exists(combined_filename):
print("decrypting and recombining video/audio files")
assert video.protections[0].cenc_kid is not None
assert audio.protections[0].cenc_kid is not None
video_key_id = video.protections[0].cenc_kid.replace("-", "")
audio_key_id = audio.protections[0].cenc_kid.replace("-", "")
video_key = episode.keys[video_key_id]
audio_key = episode.keys[audio_key_id]
video_input = ffmpeg.input(video_filename, decryption_key=video_key).video
audio_input = ffmpeg.input(audio_filename, decryption_key=audio_key).audio
ffmpeg.output(
video_input,
audio_input,
combined_filename,
acodec="copy",
vcodec="copy",
).overwrite_output().run()
# remove encrypted files
try:
os.remove(video_filename)
except:
pass
try:
os.remove(audio_filename)
except:
pass
def download_playlist(playlist: Playlist):
"""
Downloads an entire playlist.
"""
print("downloading playlist")
for chapter_name, chapter in playlist.chapters.items():
chapter_name = chapter_name.replace("/", "-")
print("creating chapter dir: %s" % chapter_name)
os.makedirs(chapter_name, exist_ok=True)
for episode_name, episode in chapter.episodes.items():
episode_name = episode_name.replace("/", "-")
download_episode(
episode,
base=playlist.source.base,
mpd=playlist.source.mpd,
dir=chapter_name,
name=episode_name,
)
if __name__ == "__main__":
playlist: Playlist = Playlist.parse_obj(toml.load(sys.argv[1]))
download_playlist(playlist)
| 31.761658 | 89 | 0.640457 |
f7586b000eecdff1b43a09575d4a94ba7e456fa0 | 6,061 | py | Python | tests/test_core.py | joshfp/fastai | eb455070adaea072b4771e340d48a371c6c0c4e2 | [
"Apache-2.0"
] | 1 | 2019-01-29T22:13:29.000Z | 2019-01-29T22:13:29.000Z | tests/test_core.py | jamesrequa/fastai | 794365cd7f734b5e1027d7e19c99e648fbb9a12b | [
"Apache-2.0"
] | null | null | null | tests/test_core.py | jamesrequa/fastai | 794365cd7f734b5e1027d7e19c99e648fbb9a12b | [
"Apache-2.0"
] | null | null | null | import pytest, torch
import numpy as np
from fastai import *
from tempfile import TemporaryDirectory
def test_cpus(): assert num_cpus() >= 1
@pytest.mark.parametrize("p, q, expected", [
(5 , 1 , [5]),
(5 , [1,1], [5, 5]),
([5], 1 , [5]),
([5], [1,1], [5, 5]),
("ab" , "cd" , ["a", "b"]),
("ab" , ["cd", "ef"], ["a", "b"]),
(["ab"], "cd" , ["ab", "ab"]),
(["ab"], ["cd", "ef"], ["ab", "ab"]),
])
def test_listify(p, q, expected):
assert listify(p, q) == expected
def test_ifnone():
assert ifnone(None, 5) == 5
assert ifnone(5, None) == 5
assert ifnone(1, 5) == 1
assert ifnone(0, 5) == 0
def test_uniqueify():
assert uniqueify([1,1,3,3,5]) == [1,3,5]
assert uniqueify([1,3,5]) == [1,3,5]
assert uniqueify([1,1,1,3,5]) == [1,3,5]
def test_listy():
assert is_listy([1,1,3,3,5]) == True
assert is_listy((1,1,3,3,5)) == True
assert is_listy([1,"2",3,3,5]) == True
assert is_listy((1,"2",3,3,5)) == True
assert is_listy(1) == False
assert is_listy("2") == False
assert is_listy({1, 2}) == False
assert is_listy(set([1,1,3,3,5])) == False
def test_tuple():
assert is_tuple((1,1,3,3,5)) == True
assert is_tuple([1]) == False
assert is_tuple(1) == False
def test_noop():
assert noop(1) is 1
def test_to_int():
assert to_int(("1","1","3","3","5")) == [1,1,3,3,5]
assert to_int([1,"2",3.3,3,5]) == [1,2,3,3,5]
assert to_int(1) == 1
assert to_int(1.2) == 1
assert to_int("1") == 1
def test_partition_functionality():
def test_partition(a, sz, ex):
result = partition(a, sz)
assert len(result) == len(ex)
assert all([a == b for a, b in zip(result, ex)])
a = [1,2,3,4,5]
sz = 2
ex = [[1,2],[3,4],[5]]
test_partition(a, sz, ex)
sz = 3
ex = [[1,2,3],[4,5]]
test_partition(a, sz, ex)
sz = 1
ex = [[1],[2],[3],[4],[5]]
test_partition(a, sz, ex)
sz = 6
ex = [[1,2,3,4,5]]
test_partition(a, sz, ex)
sz = 3
a = []
result = partition(a, sz)
assert len(result) == 0
def test_idx_dict():
assert idx_dict(np.array([1,2,3]))=={1: 0, 2: 1, 3: 2}
assert idx_dict([1, 2, 3])=={1: 0, 2: 1, 3: 2}
assert idx_dict((1, 2, 3))=={1: 0, 2: 1, 3: 2}
def test_find_classes():
path = Path('./classes_test').resolve()
os.mkdir(path)
classes = ['class_0', 'class_1', 'class_2']
for class_num in classes:
os.mkdir(path/class_num)
try:
assert find_classes(path)==[Path('./classes_test/class_0').resolve(),Path('./classes_test/class_1').resolve(),Path('./classes_test/class_2').resolve()]
finally:
shutil.rmtree(path)
def test_arrays_split():
a = arrays_split([0,3],[1, 2, 3, 4, 5], ['a', 'b', 'c', 'd', 'e'])
b = [(array([1, 4]),array(['a', 'd'])), (array([5, 2]),(array(['e','b'])))]
np.testing.assert_array_equal(a,b)
c = arrays_split([0,3],[1, 2, 3, 4, 5])
d = [(array([1, 4]),), (array([5, 2]),)]
np.testing.assert_array_equal(c,d)
with pytest.raises(Exception): arrays_split([0,5],[1, 2, 3, 4, 5])
with pytest.raises(Exception): arrays_split([0,3],[1, 2, 3, 4, 5], [1, 2, 3, 4])
def test_random_split():
valid_pct = 0.4
a = [len(arr) for arr in random_split(valid_pct, [1,2,3,4,5], ['a', 'b', 'c', 'd', 'e'])]
b = [2, 2]
assert a == b
with pytest.raises(Exception): random_split(1.1, [1,2,3])
with pytest.raises(Exception): random_split(0.1, [1,2,3], [1,2,3,4])
def test_camel2snake():
a = camel2snake('someString')
b = 'some_string'
assert a == b
c = camel2snake('some2String')
d = 'some2_string'
assert c == d
e = camel2snake('longStringExmpl')
f = 'long_string_exmpl'
assert e == f
def test_even_mults():
a = even_mults(start=1, stop=8, n=4)
b = array([1.,2.,4.,8.])
np.testing.assert_array_equal(a,b)
def test_series2cat():
df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4], 'col3':[5, 6]})
cols = 'col1','col2'
series2cat(df,*cols)
for col in cols:
assert (df[col].dtypes == 'category')
assert (df['col3'].dtypes == 'int64')
def _write_file(path): f = open(path, 'w'); f.write(str(path.name)); f.close()
class TestMaybeCopy(object):
def test_copies_if_does_not_exist(self):
with TemporaryDirectory() as tmpdir:
tmpdir = Path(tmpdir)
_write_file(tmpdir/'src')
maybe_copy([str(tmpdir/'src')], [str(tmpdir/'dst')]) # works with strings
assert os.path.exists(tmpdir/'dst')
with TemporaryDirectory() as tmpdir:
tmpdir = Path(tmpdir)
_write_file(tmpdir/'src')
maybe_copy([tmpdir/'src'], [tmpdir/'dst']) # works with Paths
assert os.path.exists(tmpdir/'dst')
def test_copies_if_older(self):
with TemporaryDirectory() as tmpdir:
tmpdir = Path(tmpdir)
_write_file(tmpdir/'first')
_write_file(tmpdir/'second')
os.utime(tmpdir/'first', (1,1))
os.utime(tmpdir/'second', (2,2))
maybe_copy([tmpdir/'second'], [tmpdir/'first'])
assert open(tmpdir/'first').read() == 'second'
def test_does_not_copy_if_newer(self):
with TemporaryDirectory() as tmpdir:
tmpdir = Path(tmpdir)
_write_file(tmpdir/'first')
_write_file(tmpdir/'second')
os.utime(tmpdir/'first', (1,1))
os.utime(tmpdir/'second', (2,2))
maybe_copy([tmpdir/'first'], [tmpdir/'second'])
assert open(tmpdir/'second').read() == 'second'
def test_creates_dst_dir_if_does_not_exist(self):
with TemporaryDirectory() as tmpdir:
tmpdir = Path(tmpdir)
_write_file(tmpdir/'file')
maybe_copy([tmpdir/'file'], [tmpdir/'dir'/'file'])
assert os.path.exists(tmpdir/'dir'/'file')
| 31.9 | 159 | 0.538855 |
f7586e47a874b10e20826983ab6c81b9596a93f7 | 59,284 | py | Python | pyseqlab/utilities.py | bratao/-PySeqLab | fea1c4bd4d43565b1bb20a789d78946e1022d0ff | [
"MIT"
] | 6 | 2019-05-03T22:14:11.000Z | 2022-03-04T10:36:31.000Z | pyseqlab/utilities.py | bratao/-PySeqLab | fea1c4bd4d43565b1bb20a789d78946e1022d0ff | [
"MIT"
] | 1 | 2020-11-12T02:46:15.000Z | 2020-11-12T03:12:45.000Z | pyseqlab/utilities.py | bratao/-PySeqLab | fea1c4bd4d43565b1bb20a789d78946e1022d0ff | [
"MIT"
] | 3 | 2019-05-04T03:50:49.000Z | 2020-06-27T00:43:29.000Z | """
@author: ahmed allam <ahmed.allam@yale.edu>
"""
import os
import pickle
import shutil
from datetime import datetime
from copy import deepcopy
from itertools import combinations
import heapq
import numpy
class SequenceStruct(object):
r"""class for representing each sequence/segment
Args:
Y: list containing the sequence of states/labels (i.e. ['P','O','O','L','L'])
X: list containing dictionary elements of observation sequences and/or features of the input
seg_other_symbol: string or None (default), if specified then the task is a segmentation problem
where it represents the non-entity symbol else (None) then it is considered
as sequence labeling problem
Attributes:
Y: list containing the sequence of states/labels (i.e. ['P','O','O','L','L'])
X: list containing dictionary elements of observation sequences and/or features of the input
seg_other_symbol: string or None(default), if specified then the task is a segmentation problem
where it represents the non-entity symbol else (None) then it is considered
as sequence labeling problem
T: int, length of a sequence (i.e. len(X))
seg_attr: dictionary comprising the extracted attributes per each boundary of a sequence
L: int, longest length of an identified segment in the sequence
flat_y: list of labels/tags
y_sboundaries: sorted list of boundaries of the :attr:`Y` of the sequence
y_range: range of the sequence
"""
def __init__(self, X, Y, seg_other_symbol=None):
self.seg_attr = {}
self.X = X
self.Y = (Y, seg_other_symbol)
@property
def X(self):
return self._X
@X.setter
def X(self, l):
"""setup the observation sequence
Args:
l: a list of elements (i.e. ``X = [{'w':'Michael'}, {'w':'is'}, {'w':'in'}, {'w':'New'}, {'w':'Haven'}]``)
Example::
the output X becomes:
{1:{'w':'Michael'},
2:{'w':'is'},
3:{'w':'in'},
4:{'w':'New'},
5:{'w':'Haven'}
}
"""
self._X = {}
T = len(l)
for i in range(T):
self._X[i + 1] = l[i]
# new assignment clear seg_attr
if self.seg_attr:
self.seg_attr.clear()
self.T = T
@property
def Y(self):
return self._Y
@Y.setter
def Y(self, elmtup):
"""setup the label sequence
Args:
elmtup: tuple consisting of:
- **Y** a list of elements (i.e. ``Y = ['P','O','O','L','L']``)
representing the labels of the elements in X
- **non_entity_symbol** which represents the Other category (i.e. non entity element which is 'O' in above example)
Example:
Y after the transformation becomes ``{(1, 1): 'P', (2,2): 'O', (3, 3): 'O', (4, 5): 'L'}``
"""
try:
Y_ref, non_entity_symb = elmtup
except ValueError:
raise ValueError("tuple containing Y and non-entity symbol must be passed")
else:
self._Y = {}
# length of longest entity in a segment
L = 1
if non_entity_symb:
label_indices = {}
for i in range(len(Y_ref)):
label = Y_ref[i]
if label in label_indices:
label_indices[label].append(i + 1)
else:
label_indices[label] = [i + 1]
for label, indices_list in label_indices.items():
if label == non_entity_symb or len(indices_list) == 1:
for indx in indices_list:
boundary = (indx, indx)
self._Y[boundary] = label
else:
indx_stack = []
for indx in indices_list:
if not indx_stack:
indx_stack.append(indx)
else:
diff = indx - indx_stack[-1]
if diff > 1:
boundary = (indx_stack[0], indx_stack[-1])
self._Y[boundary] = label
l = indx_stack[-1] - indx_stack[0] + 1
if l > L:
L = l
indx_stack = [indx]
else:
indx_stack.append(indx)
if indx_stack:
boundary = (indx_stack[0], indx_stack[-1])
self._Y[boundary] = label
l = indx_stack[-1] - indx_stack[0] + 1
if l > L:
L = l
indx_stack = [indx]
else:
for i in range(len(Y_ref)):
label = Y_ref[i]
boundary = (i + 1, i + 1)
self._Y[boundary] = label
# store the length of longest entity
self.L = L
# keep a copy of Y in as flat list (i.e. ['P','O','O','L','L'])
self.flat_y = Y_ref
# construct a map from the yboundaries to the pos in the list
y_sboundaries = self.get_y_boundaries()
self.y_sboundaries = y_sboundaries
self.y_boundpos_map = {}
pos = 0
for boundary in y_sboundaries:
self.y_boundpos_map[boundary] = pos
pos += 1
self.y_range = set(range(0, pos))
# def update_boundaries(self):
# self.y_boundaries = self.get_y_boundaries()
# self.x_boundaries = self.get_x_boundaries()
def flatten_y(self, Y):
r"""flatten the :attr:`Y` attribute
Args:
Y: dictionary of this form ``{(1, 1): 'P', (2,2): 'O', (3, 3): 'O', (4, 5): 'L'}``
Example:
flattened y becomes ``['P','O','O','L','L']``
"""
s_boundaries = sorted(Y)
flat_y = []
for u, v in s_boundaries:
for _ in range(u, v + 1):
flat_y.append(Y[(u, v)])
return flat_y
def get_y_boundaries(self):
"""return the sorted boundaries of the labels of the sequence"""
return sorted(self.Y.keys())
def get_x_boundaries(self):
"""return the boundaries of the observation sequence"""
boundaries = []
for u in self.X:
boundaries.append((u, u))
return boundaries
def __str__(self):
"""return string representation of the parsed sequence"""
out_str = "Y sequence:\n {}\nX sequence:\n {}\n{}".format(
self.flat_y, self.X, "-" * 40
)
return out_str
class DataFileParser(object):
"""class to parse a data file comprising the training/testing data
Attributes:
seqs: list comprising of sequences that are instances of :class:`SequenceStruct` class
header: list of attribute names read from the file
"""
def __init__(self):
self.header = []
def read_file(
self, file_path, header, y_ref=True, seg_other_symbol=None, column_sep=" "
):
r"""read and parse a file the contains the sequences following a predefined format
the file should contain label and observation tracks each separated in a column
.. note::
label column is the **LAST** column in the file (i.e. X_a X_b Y)
Args:
file_path: string representing the file path to the data file
header: specifies how the header is reported in the file containing the sequences
options include:
- 'main' -> one header in the beginning of the file
- 'per_sequence' -> a header for every sequence
- list of keywords as header (i.e. ['w', 'part_of_speech'])
Keyword Arguments:
y_ref: boolean specifying if the reference label column in the data file
seg_other_sybmol: string or None(default), if specified then the task is a segmentation problem
where `seg_other_symbol` represents the non-entity symbol. In this case semi-CRF models
are used. Else (i.e. `seg_other_symbol` is not None) then it is considered
as sequence labeling problem.
column_sep: string, separator used between the columns in the file
"""
if y_ref:
update_seq = self.update_XY
else:
update_seq = self.update_X
with open(file_path) as file_obj:
counter = 0
X = []
Y = []
for line in file_obj:
counter += 1
line = line.rstrip()
# print(line)
if line:
# print(line)
if y_ref:
*x_arg, y = line.split(column_sep)
self._xarg = x_arg
self._y = y
else:
x_arg = line.split(column_sep)
self._xarg = x_arg
# print(x_arg)
# first line of a sequence
if counter == 1:
if header == "main":
if self.header:
update_seq(X, Y)
# X.append(self.parse_line(x_arg))
# Y.append(y)
else:
self.parse_header(x_arg)
elif header == "per_sequence":
if not self.header:
self.parse_header(x_arg)
else:
if self.header:
update_seq(X, Y)
# X.append(self.parse_line(x_arg))
# Y.append(y)
else:
self.parse_header(header)
update_seq(X, Y)
# X.append(self.parse_line(x_arg))
# Y.append(y)
else:
update_seq(X, Y)
# X.append(self.parse_line(x_arg))
# Y.append(y)
else:
seq = SequenceStruct(X, Y, seg_other_symbol)
# reset counter for filling new sequence
counter = 0
X = []
Y = []
self._xarg = None
self._y = None
yield seq
if X and Y:
seq = SequenceStruct(X, Y, seg_other_symbol)
# reset counter for filling new sequence
counter = 0
X = []
Y = []
self._xarg = None
self._y = None
yield seq
def update_XY(self, X, Y):
"""update sequence observations and corresponding labels"""
X.append(self.parse_line(self._xarg))
Y.append(self._y)
def update_X(self, X, Y):
"""update sequence observations"""
X.append(self.parse_line(self._xarg))
def parse_line(self, x_arg):
"""parse the read line
Args:
x_arg: tuple of observation columns
"""
# fill the sequences X and Y with observations and tags respectively
header = self.header
x = {}
for i in range(len(x_arg)):
x[header[i]] = x_arg[i]
return x
def parse_header(self, x_arg):
"""parse header
Args:
x_arg: tuple of attribute/observation names
"""
seq_header = [input_src for input_src in x_arg]
self.header = seq_header
class ReaderWriter(object):
"""class for dumping, reading and logging data"""
def __init__(self):
pass
@staticmethod
def dump_data(data, file_name, mode="wb"):
"""dump data by pickling
Args:
data: data to be pickled
file_name: file path where data will be dumped
mode: specify writing options i.e. binary or unicode
"""
with open(file_name, mode) as f:
pickle.dump(data, f, protocol=4)
@staticmethod
def read_data(file_name, mode="rb"):
"""read dumped/pickled data
Args:
file_name: file path where data will be dumped
mode: specify writing options i.e. binary or unicode
"""
with open(file_name, mode) as f:
data = pickle.load(f)
return data
@staticmethod
def log_progress(line, outfile, mode="a"):
"""write data to a file
Args:
line: string representing data to be written out
outfile: file path where data will be written/logged
mode: specify writing options i.e. append, write
"""
with open(outfile, mode) as f:
f.write(line)
class AStarNode(object):
"""class representing A* node to be used with A* searcher and viterbi for generating k-decoded list
Args:
cost: float representing the score/unnormalized probability of a sequence up to given position
position: integer representing the current position in the sequence
pi_c: prefix or state code of the label
label: label of the current position in a sequence
frwdlink: a link to :class:`AStarNode` node
Attributes:
cost: float representing the score/unnormalized probability of a sequence up to given position
position: integer representing the current position in the sequence
pi_c: prefix or state code of the label
label: label of the current position in a sequence
frwdlink: a link to :class:`AStarNode` node
"""
def __init__(self, cost, position, pi_c, label, frwdlink):
self.cost = cost
self.position = position
self.pi_c = pi_c
self.label = label
self.frwdlink = frwdlink
def print_node(self):
"""print the info about a node"""
statement = "cost: {}, position: {}, pi_code: {}, label: {}, ".format(
self.cost, self.position, self.pi_c, self.label
)
if self.frwdlink:
statement += "forward_link: {}".format(self.frwdlink)
else:
statement += "forward_link: None"
print(statement)
class AStarAgenda(object):
"""class containing a heap where instances of :class:`AStarNode` class will be pushed
the push operation will use the score matrix (built using viterbi algorithm)
representing the unnormalized probability of the sequences ending at every position
with the different available prefixes/states
Attributes:
qagenda: queue where instances of :class:`AStarNode` are pushed
entry_count: counter that keeps track of the entries and associate each entry(node)
with a unique number. It is useful for resolving nodes with equal costs
"""
def __init__(self):
self.qagenda = []
self.entry_count = 0
def push(self, astar_node, cost):
"""push instance of :class:`AStarNode` with its associated cost to the heap
Args:
astar_node: instance of :class:`AStarNode` class
cost: float representing the score/unnormalized probability of a sequence up to given position
"""
heapq.heappush(self.qagenda, (-cost, self.entry_count, astar_node))
self.entry_count += 1
def pop(self):
"""pop nodes with highest score from the heap
"""
astar_node = heapq.heappop(self.qagenda)[-1]
return astar_node
class FO_AStarSearcher(object):
"""A* star searcher associated with first-order CRF model such as :class:`FirstOrderCRF`
Args:
Y_codebook_rev: a reversed version of dictionary comprising the set of states each assigned a unique code
Attributes:
Y_codebook_rev: a reversed version of dictionary comprising the set of states each assigned a unique code
"""
def __init__(self, Y_codebook_rev):
self.Y_codebook_rev = Y_codebook_rev
def infer_labels(self, top_node, back_track):
"""decode sequence by inferring labels
Args:
top_node: instance of :class:`AStarNode` class
back_track: dictionary containing back pointers built using dynamic programming algorithm
"""
Y_codebook_rev = self.Y_codebook_rev
# decoding the sequence
# print("we are decoding")
# top_node.print_node()
y_c = top_node.pi_c
pos = top_node.position
Y_decoded = []
Y_decoded.append(y_c)
t = pos - 1
while t > 0:
y_c_tplus1 = Y_decoded[-1]
y_c_t = back_track[t + 1, y_c_tplus1]
Y_decoded.append(y_c_t)
t -= 1
Y_decoded.reverse()
Y_decoded = [Y_codebook_rev[y_code] for y_code in Y_decoded]
while top_node.frwdlink:
y = top_node.frwdlink.label
Y_decoded.append(y)
top_node = top_node.frwdlink
# print(Y_decoded)
return Y_decoded
def search(self, alpha, back_track, T, K):
"""A* star searcher uses the score matrix (built using viterbi algorithm) to decode top-K list of sequences
Args:
alpha: score matrix build using the viterbi algorithm
back_track: back_pointers dictionary tracking the best paths to every state
T: last decoded position of a sequence (in this context, it is the alpha.shape[0])
K: number of top decoded sequences to be returned
Returns:
topk_list: top-K list of decoded sequences
"""
# push the best astar nodes to the queue (i.e. the states at time T)
q = AStarAgenda()
r = set()
c = 0
Y_codebook_rev = self.Y_codebook_rev
# create nodes from the states at time T
for y_c in Y_codebook_rev:
cost = alpha[T, y_c]
pos = T
frwdlink = None
label = Y_codebook_rev[y_c]
node = AStarNode(cost, pos, y_c, label, frwdlink)
# node.print_node()
q.push(node, cost)
track = []
topk_list = []
try:
while c < K:
# print("heap size ", len(q.qagenda))
top_node = q.pop()
track.append(top_node)
for i in reversed(range(2, top_node.position + 1)):
# best previous state at pos = i-1
curr_y_c = top_node.pi_c
bestprev_y_c = back_track[i, curr_y_c]
pos = i - 1
for prev_y_c in Y_codebook_rev:
# create a new astar node
if prev_y_c != bestprev_y_c:
label = Y_codebook_rev[prev_y_c]
cost = alpha[pos, prev_y_c]
s = AStarNode(cost, pos, prev_y_c, label, top_node)
q.push(s, cost)
# create the backlink of the previous top_node (i.e. create a node from the best_y_c)
cost = alpha[pos, bestprev_y_c]
label = Y_codebook_rev[bestprev_y_c]
top_node = AStarNode(cost, pos, y_c, label, top_node)
# decode and check if it is not saved already in topk list
y_labels = self.infer_labels(track[-1], back_track)
# print(y_labels)
signature = "".join(y_labels)
if signature not in r:
r.add(signature)
topk_list.append(y_labels)
c += 1
track.pop()
except (KeyError, IndexError) as e:
# consider logging the error
print(e)
finally:
# print('r ', r)
# print('topk ', topk_list)
return topk_list
class HO_AStarSearcher(object):
"""A* star searcher associated with higher-order CRF model such as :class:`HOCRFAD`
Args:
P_codebook_rev: reversed codebook of set of proper prefixes in the `P` set
e.g. ``{0:'', 1:'P', 2:'L', 3:'O', 4:'L|O', ...}``
P_elems: dictionary comprising the composing elements of every prefix in the `P` set
e.g. ``{'':('',), 'P':('P',), 'L':('L',), 'O':('O',), 'L|O':('L','O'), ...}``
Attributes:
P_codebook_rev: reversed codebook of set of proper prefixes in the `P` set
e.g. ``{0:'', 1:'P', 2:'L', 3:'O', 4:'L|O', ...}``
P_elems: dictionary comprising the composing elements of every prefix in the `P` set
e.g. ``{'':('',), 'P':('P',), 'L':('L',), 'O':('O',), 'L|O':('L','O'), ...}``
"""
def __init__(self, P_codebook_rev, P_elems):
self.P_codebook_rev = P_codebook_rev
self.P_elems = P_elems
def get_node_label(self, pi_code):
"""get the the label/state given a prefix code
Args:
pi_code: prefix code which is an element of :attr:`P_codebook_rev`
"""
pi = self.P_codebook_rev[pi_code]
y = self.P_elems[pi][-1]
return y
def infer_labels(self, top_node, back_track):
"""decode sequence by inferring labels
Args:
top_node: instance of :class:`AStarNode` class
back_track: dictionary containing back pointers tracking the best paths to every state
"""
# decoding the sequence
# print("we are decoding")
# top_node.print_node()
y = top_node.label
pi_c = top_node.pi_c
pos = top_node.position
Y_decoded = []
Y_decoded.append((pi_c, y))
# print("t={}, p_T_code={}, p_T={}, y_T ={}".format(T, p_T_code, p_T, y_T))
t = pos - 1
while t > 0:
p_tplus1_c = Y_decoded[-1][0]
p_t_c, y_t = back_track[t + 1, p_tplus1_c]
# print("t={}, (t+1, p_t_code)=({}, {})->({},{})".format(t, t+1, P_codebook[p_tplus1], p_t, y_t))
Y_decoded.append((p_t_c, y_t))
t -= 1
Y_decoded.reverse()
Y_decoded = [y for (__, y) in Y_decoded]
while top_node.frwdlink:
y = top_node.frwdlink.label
Y_decoded.append(y)
top_node = top_node.frwdlink
# print(Y_decoded)
return Y_decoded
def search(self, alpha, back_track, T, K):
"""A* star searcher uses the score matrix (built using viterbi algorithm) to decode top-K list of sequences
Args:
alpha: score matrix build using the viterbi algorithm
back_track: back_pointers dictionary tracking the best paths to every state
T: last decoded position of a sequence (in this context, it is the alpha.shape[0])
K: number of top decoded sequences to be returned
Returns:
topk_list: top-K list of decoded sequences
"""
# push the best astar nodes to the queue (i.e. the pi's at time T)
q = AStarAgenda()
r = set()
c = 0
P_codebook_rev = self.P_codebook_rev
# create nodes from the pi's at time T
for pi_c in P_codebook_rev:
cost = alpha[T, pi_c]
pos = T
frwdlink = None
label = self.get_node_label(pi_c)
node = AStarNode(cost, pos, pi_c, label, frwdlink)
# node.print_node()
q.push(node, cost)
track = []
topk_list = []
try:
while c < K:
# print("heap size ", len(q.qagenda))
top_node = q.pop()
track.append(top_node)
for i in reversed(range(2, top_node.position + 1)):
best_prev_pi_c, best_y = back_track[i, top_node.pi_c]
pos = i - 1
for prev_pi_c in P_codebook_rev:
# create a new astar node
if prev_pi_c != best_prev_pi_c:
label = self.get_node_label(prev_pi_c)
cost = alpha[pos, prev_pi_c]
s = AStarNode(cost, pos, prev_pi_c, label, top_node)
q.push(s, cost)
# create the backlink of the top_node
cost = alpha[pos, best_prev_pi_c]
top_node = AStarNode(cost, pos, best_prev_pi_c, best_y, top_node)
# decode and check if it is not saved already in topk list
y_labels = self.infer_labels(track[-1], back_track)
# print(y_labels)
sig = "".join(y_labels)
if sig not in r:
r.add(sig)
topk_list.append(y_labels)
c += 1
track.pop()
except (KeyError, IndexError) as e:
# consider logging the error
print(e)
finally:
# print('r ', r)
# print('topk ', topk_list)
return topk_list
class HOSemi_AStarSearcher(object):
"""A* star searcher associated with higher-order CRF model such as :class:`HOSemiCRFAD`
Args:
P_codebook_rev: reversed codebook of set of proper prefixes in the `P` set
e.g. ``{0:'', 1:'P', 2:'L', 3:'O', 4:'L|O', ...}``
P_elems: dictionary comprising the composing elements of every prefix in the `P` set
e.g. ``{'':('',), 'P':('P',), 'L':('L',), 'O':('O',), 'L|O':('L','O'), ...}``
Attributes:
P_codebook_rev: reversed codebook of set of proper prefixes in the `P` set
e.g. ``{0:'', 1:'P', 2:'L', 3:'O', 4:'L|O', ...}``
P_elems: dictionary comprising the composing elements of every prefix in the `P` set
e.g. ``{'':('',), 'P':('P',), 'L':('L',), 'O':('O',), 'L|O':('L','O'), ...}``
"""
def __init__(self, P_codebook_rev, pi_elems):
self.P_codebook_rev = P_codebook_rev
self.pi_elems = pi_elems
def get_node_label(self, pi_code):
"""get the the label/state given a prefix code
Args:
pi_code: prefix code which is an element of :attr:`P_codebook_rev`
"""
pi = self.P_codebook_rev[pi_code]
y = self.pi_elems[pi][-1]
return y
def infer_labels(self, top_node, back_track):
"""decode sequence by inferring labels
Args:
top_node: instance of :class:`AStarNode` class
back_track: dictionary containing back pointers tracking the best paths to every state
"""
# decoding the sequence
# print("we are decoding")
# top_node.print_node()
y = top_node.label
pi_c = top_node.pi_c
pos = top_node.position
Y_decoded = []
d, pt_c, yt = back_track[pos, pi_c]
for _ in range(d + 1):
Y_decoded.append(y)
t = pos - d - 1
while t > 0:
new_d, new_pt_c, new_yt = back_track[t, pt_c]
for _ in range(new_d + 1):
Y_decoded.append(yt)
t = t - new_d - 1
pt_c = new_pt_c
yt = new_yt
Y_decoded.reverse()
while top_node.frwdlink:
y = top_node.frwdlink.label
Y_decoded.append(y)
top_node = top_node.frwdlink
# print(Y_decoded)
return Y_decoded
def search(self, alpha, back_track, T, K):
"""A* star searcher uses the score matrix (built using viterbi algorithm) to decode top-K list of sequences
Args:
alpha: score matrix build using the viterbi algorithm
back_track: back_pointers dictionary tracking the best paths to every state
T: last decoded position of a sequence (in this context, it is the alpha.shape[0])
K: number of top decoded sequences to be returned
Returns:
topk_list: top-K list of decoded sequences
"""
# push the best astar nodes to the queue (i.e. the pi's at time T)
q = AStarAgenda()
r = set()
c = 0
P_codebook_rev = self.P_codebook_rev
# create nodes from the pi's at time T
for pi_c in P_codebook_rev:
cost = alpha[T, pi_c]
pos = T
frwdlink = None
label = self.get_node_label(pi_c)
node = AStarNode(cost, pos, pi_c, label, frwdlink)
# node.print_node()
q.push(node, cost)
track = []
topk_list = []
try:
while c < K:
# print("heap size ", len(q.qagenda))
top_node = q.pop()
track.append(top_node)
while True:
curr_pos = top_node.position
if curr_pos == 1:
break
d, best_prev_pi_c, best_prev_y = back_track[curr_pos, top_node.pi_c]
prev_pos = curr_pos - d - 1
for prev_pi_c in P_codebook_rev:
# create a new astar node
if prev_pi_c != best_prev_pi_c:
label = self.get_node_label(prev_pi_c)
cost = alpha[prev_pos, prev_pi_c]
s = AStarNode(cost, prev_pos, prev_pi_c, label, top_node)
q.push(s, cost)
# create the backlink of the top_node
cost = alpha[prev_pos, best_prev_pi_c]
top_node = AStarNode(
cost, prev_pos, best_prev_pi_c, best_prev_y, top_node
)
# decode and check if it is not saved already in topk list
y_labels = self.infer_labels(track[-1], back_track)
# print(y_labels)
sig = "".join(y_labels)
if sig not in r:
r.add(sig)
topk_list.append(y_labels)
c += 1
track.pop()
except (KeyError, IndexError) as e:
# consider logging the error
print(e)
finally:
# print('r ', r)
# print('topk ', topk_list)
return topk_list
class TemplateGenerator(object):
"""template generator class for feature/function template generation
"""
def __init__(self):
pass
def generate_template_XY(self, attr_name, x_spec, y_spec, template):
r"""generate template XY for the feature extraction
Args:
attr_name: string representing the attribute name of the atomic observations/tokens
x_spec: tuple of the form (n-gram, range)
that is we can specify the n-gram features required in a specific range/window
for an observation token ``attr_name``
y_spec: string specifying how to join/combine the features on the X observation level
with labels on the Y level.
Example of passed options would be:
- one state (i.e. current state) by passing ``1-state`` or
- two states (i.e. current and previous state) by passing ``2-states`` or
- one and two states (i.e. mix/combine observation features with one state model and two states models)
by passing ``1-state:2-states``. Higher order models support models with states > 2 such as ``3-states`` and above.
template: dictionary that accumulates the generated feature template for all attributes
Example:
suppose we have `word` attribute referenced by 'w' and we need to use the current word
with the current label (i.e. unigram of words with the current label) in a range of (0,1)
::
templateXY = {}
generate_template_XY('w', ('1-gram', range(0, 1)), '1-state', templateXY)
we can also specify a two states/labels features at the Y level
::
generate_template_XY('w', ('1-gram', range(0, 1)), '1-state:2-states', templateXY)
.. note ::
this can be applied for every attribute name and accumulated in the `template` dictionary
"""
ngram_options, wsize = x_spec
templateX = self._traverse_x(attr_name, ngram_options, wsize)
templateY = self.generate_template_Y(y_spec)
templateXY = self._mix_template_XY(templateX, templateY)
# update the template we are building
self._update_template(template, templateXY)
def _update_template(self, template, templateXY):
"""update the accumulated template with the current generated templateXY
Args:
template: dictionary of the accumulated template for the different offsets
and attribute names
templateXY: dictionary of the form ``{attr_name:{x_offset:(y_offsets)}}``
"""
for attr_name in templateXY:
if attr_name in template:
for x_offset in templateXY[attr_name]:
template[attr_name][x_offset] = templateXY[attr_name][x_offset]
else:
template[attr_name] = templateXY[attr_name]
def _traverse_x(self, attr_name, ngram_options, wsize):
"""generate template on the X observation level only
Args:
attr_name: string representing the attribute name of the atomic observations/tokens
ngram_options: string specifying the n-grams (i.e. ``1-gram``) it also supports multiple
specification such as ``1-gram:2-gram`` where each is separated by a colon
wsize: a range specifying the window size where the template operates
"""
options = ngram_options.split(":")
l = list(wsize)
template = {attr_name: {}}
for option in options:
n = int(option.split("-")[0])
ngram_list = self.generate_ngram(l, n)
for offset in ngram_list:
template[attr_name][offset] = None
return template
def generate_template_Y(self, ngram_options):
"""generate template on the Y labels level
Args:
ngram_options: string specifying the number of states to be use (i.e. ``1-state``).
It also supports multiple specification such as ``1-state:2-states``
where each is separated by a colon
"""
template = {"Y": []}
options = ngram_options.split(":")
for option in options:
max_order = int(option.split("-")[0])
template["Y"] += self._traverse_y(max_order, accumulative=False)["Y"]
return template
@staticmethod
def _traverse_y(max_order, accumulative=True):
"""generate the y template"""
attr_name = "Y"
template = {attr_name: []}
if accumulative:
for j in range(max_order):
offsets_y = [-i for i in range(j + 1)]
offsets_y = tuple(reversed(offsets_y))
template[attr_name].append(offsets_y)
else:
offsets_y = [-i for i in range(max_order)]
offsets_y = tuple(reversed(offsets_y))
template[attr_name].append(offsets_y)
return template
@staticmethod
def _mix_template_XY(templateX, templateY):
"""mix and join the template on the X observation level with the Y level
Args:
templateX: dictionary of the form ``{attr_name:{x_offset:None}}``
e.g. ``{'w': {(0,): None}}``
templateY: dictionary of the form ``{'Y':[y_offset]}``
e.g. ``{'Y': [(0,), (-1, 0)]}``
.. note::
- x_offset is a tuple of offsets representing the ngram options needed
such as (0,) for unigram and (-1,0) for bigram
- y_offset is a tuple of offsets representing the number of states options needed
such as (0,) for 1-state and (-1,0) for 2-states and (-2,-1,0) for 3-states
"""
template_XY = deepcopy(templateX)
for attr_name in template_XY:
for offset_x in template_XY[attr_name]:
template_XY[attr_name][offset_x] = tuple(templateY["Y"])
return template_XY
@staticmethod
def generate_ngram(l, n):
"""n-gram generator based on the length of the window and the ngram option
Args:
l: list of positions of the range representing the window size (i.e. list(wsize))
n: integer representing the n-gram option (i.e. 1 for unigram, 2 for bigram, etc..)
"""
ngram_list = []
for i in range(0, len(l)):
elem = tuple(l[i : i + n])
if len(elem) != n:
break
ngram_list.append(elem)
return ngram_list
@staticmethod
def generate_combinations(n):
"""generates all possible combinations based on the maximum number of ngrams n
Args:
n: integer specifying the maximum/greatest ngram option
"""
option_names = []
start = 1
for i in range(start, n + 1):
option_names.append("{}-gram".format(i))
config = {}
for i in range(start, n + 1):
config[i] = list(combinations(option_names, i))
config_combinations = {}
for c_list in config.values():
for c_tup in c_list:
key_name = ":".join(c_tup)
config_combinations[key_name] = set()
elemkeys = config_combinations.keys()
for option_i in config_combinations:
s = config_combinations[option_i]
for option_j in elemkeys:
s.add(option_j)
config_combinations[option_i] = s
return config_combinations
class BoundNode(object):
"""boundary entity class used when generating all possible partitions within specified constraint
Args:
parent: instance of :class:`BoundNode`
boundary: tuple (u,v) representing the current boundary
"""
def __init__(self, parent, boundary):
self.parent = parent
self.boundary = boundary
self.children = []
def add_child(self, child):
"""add link to the child nodes"""
self.children.append(child)
def get_child(self):
"""retrieve child nodes"""
return self.children.pop()
def get_signature(self):
"""retrieve the id of the node"""
return id(self)
def generate_partitions(
boundary, L, patt_len, bound_node_map, depth_node_map, parent_node, depth=1
):
"""generate all possible partitions within the range of segment length and model order
it transforms the partitions into a tree of nodes starting from the root node
that uses `boundary` argument in its construction
Args:
boundary: tuple (u,v) representing the current boundary in a sequence
L: integer representing the maximum length a segment could be constructed
patt_len: integer representing the maximum model order
bound_node_map: dictionary that keeps track of all possible partitions represented as
instances of :class:`BoundNode`
depth_node_map: dictionary that arranges the generated nodes by their depth in the tree
parent_node: instance of :class:`BoundNode` or None in case of the root node
depth: integer representing the maximum depth of the tree to be reached before stopping
"""
if depth >= patt_len:
return
if parent_node:
if boundary in bound_node_map:
curr_node = bound_node_map[boundary]
else:
curr_node = BoundNode(parent_node, boundary)
bound_node_map[boundary] = curr_node
if depth in depth_node_map:
depth_node_map[depth].append(curr_node)
else:
depth_node_map[depth] = [curr_node]
else:
# setup root node
curr_node = BoundNode(None, boundary)
bound_node_map[boundary] = curr_node
depth_node_map[depth] = [curr_node]
u = boundary[0] - 1
v = u
depth += 1
for d in range(L):
if u - d < 1:
break
upd_boundary = (u - d, v)
if upd_boundary in bound_node_map:
child = bound_node_map[upd_boundary]
else:
child = BoundNode(curr_node, upd_boundary)
bound_node_map[upd_boundary] = child
if depth in depth_node_map:
depth_node_map[depth].append(child)
else:
depth_node_map[depth] = [child]
curr_node.add_child(child)
generate_partitions(
upd_boundary, L, patt_len, bound_node_map, depth_node_map, child, depth
)
def generate_partition_boundaries(depth_node_map):
"""generate partitions of the boundaries generated in :func:`generate_partitions` function
Args:
depth_node_map: dictionary that arranges the generated nodes by their depth in the tree
it is constructed using :func:`generate_partitions` function
"""
g = {}
depths = sorted(depth_node_map, reverse=True)
for depth in depths:
g[depth] = []
nodes = depth_node_map[depth]
for curr_node in nodes:
l = []
l.append(curr_node.boundary)
while True:
curr_node = curr_node.parent
if curr_node:
l.append(curr_node.boundary)
else:
g[depth].append(l)
break
return g
def delete_directory(directory):
if os.path.isdir(directory):
shutil.rmtree(directory)
def delete_file(filepath):
check = os.path.isfile(filepath)
if check:
os.remove(filepath)
def create_directory(folder_name, directory="current"):
"""create directory/folder (if it does not exist) and returns the path of the directory
Args:
folder_name: string representing the name of the folder to be created
Keyword Arguments:
directory: string representing the directory where to create the folder
if `current` then the folder will be created in the current directory
"""
if directory == "current":
path_current_dir = os.path.dirname(__file__)
else:
path_current_dir = directory
path_new_dir = os.path.join(path_current_dir, folder_name)
if not os.path.exists(path_new_dir):
os.makedirs(path_new_dir)
return path_new_dir
def generate_datetime_str():
"""generate string composed of the date and time"""
datetime_now = datetime.now()
datetime_str = "{}_{}_{}-{}_{}_{}_{}".format(
datetime_now.year,
datetime_now.month,
datetime_now.day,
datetime_now.hour,
datetime_now.minute,
datetime_now.second,
datetime_now.microsecond,
)
return datetime_str
# def vectorized_logsumexp(vec):
# """vectorized version of log sum exponential operation
#
# Args:
# vec: numpy vector where entries are in the log domain
# """
# with numpy.errstate(invalid='warn'):
# max_a = numpy.max(vec)
# try:
# res = max_a + numpy.log(numpy.sum(numpy.exp(vec - max_a)))
# except Warning:
# res = max_a
# return(res)
def vectorized_logsumexp(vec):
"""vectorized version of log sum exponential operation
Args:
vec: numpy vector where entries are in the log domain
"""
max_a = numpy.max(vec)
if max_a != -numpy.inf:
return max_a + numpy.log(numpy.sum(numpy.exp(vec - max_a)))
# case where max_a == -numpy.inf
return max_a
def generate_updated_model(
modelparts_dir,
modelrepr_class,
model_class,
aextractor_obj,
fextractor_class,
seqrepresenter_class,
ascaler_class=None,
):
"""update/regenerate CRF models using the saved parts/components
Args:
modelparts_dir: string representing the directory where model parts are saved
modelrepr_class: name of the model representation class to be used which has
suffix `ModelRepresentation` such as :class:`HOCRFADModelRepresentation`
model_class: name of the CRF model class such as :class:`HOCRFAD`
aextractor_class: name of the attribute extractor class such as :class:`NERSegmentAttributeExtractor`
fextractor_class: name of the feature extractor class used such as :class:`HOFeatureExtractor`
seqrepresenter_class: name of the sequence representer class such as :class:`SeqsRepresenter`
ascaler_class: name of the attribute scaler class such as :class:`AttributeScaler`
.. note::
This function is equivalent to :func:`generate_trained_model` function. However, this function
uses explicit specification of the arguments (i.e. specifying explicitly the classes to be used)
"""
from pyseqlab.attributes_extraction import GenericAttributeExtractor
ycodebook = ReaderWriter.read_data(os.path.join(modelparts_dir, "MR_Ycodebook"))
mfeatures = ReaderWriter.read_data(os.path.join(modelparts_dir, "MR_modelfeatures"))
mfeatures_codebook = ReaderWriter.read_data(
os.path.join(modelparts_dir, "MR_modelfeaturescodebook")
)
L = ReaderWriter.read_data(os.path.join(modelparts_dir, "MR_L"))
# generate model representation
new_mrepr = modelrepr_class()
new_mrepr.modelfeatures = mfeatures
new_mrepr.modelfeatures_codebook = mfeatures_codebook
new_mrepr.Y_codebook = ycodebook
new_mrepr.L = L
new_mrepr.generate_instance_properties()
# generate attribute extractor
if type(aextractor_obj) == type(GenericAttributeExtractor): # case it is a class
new_attrextractor = aextractor_obj()
else: # case it is an instance of a class
new_attrextractor = aextractor_obj
# generate feature extractor
templateX = ReaderWriter.read_data(os.path.join(modelparts_dir, "FE_templateX"))
templateY = ReaderWriter.read_data(os.path.join(modelparts_dir, "FE_templateY"))
new_fextractor = fextractor_class(templateX, templateY, new_attrextractor.attr_desc)
# generate sequence representer
new_seqrepr = seqrepresenter_class(new_attrextractor, new_fextractor)
# generate attribute scaler if applicable
if ascaler_class:
scaling_info = ReaderWriter.read_data(
os.path.join(modelparts_dir, "AS_scalinginfo")
)
method = ReaderWriter.read_data(os.path.join(modelparts_dir, "AS_method"))
new_attrscaler = ascaler_class(scaling_info, method)
new_seqrepr.attr_scaler = new_attrscaler
# generate crf instance
new_crfmodel = model_class(new_mrepr, new_seqrepr, {})
new_crfmodel.weights = ReaderWriter.read_data(
os.path.join(modelparts_dir, "weights")
)
return new_crfmodel
def generate_trained_model(modelparts_dir, aextractor_obj):
"""regenerate trained CRF models using the saved trained model parts/components
Args:
modelparts_dir: string representing the directory where model parts are saved
aextractor_class: name of the attribute extractor class such as :class:`NERSegmentAttributeExtractor`
"""
# parse the class description file
class_desc = []
with open(os.path.join(modelparts_dir, "class_desc.txt"), "r") as f:
for line in f:
class_desc.append(line.strip())
from pyseqlab.features_extraction import (
HOFeatureExtractor,
FOFeatureExtractor,
SeqsRepresenter,
)
seqrepresenter_class = SeqsRepresenter
if class_desc[1] == "HOCRFAD":
from pyseqlab.ho_crf_ad import HOCRFAD, HOCRFADModelRepresentation
modelrepr_class = HOCRFADModelRepresentation
model_class = HOCRFAD
fextractor_class = HOFeatureExtractor
elif class_desc[1] == "HOCRF":
from pyseqlab.ho_crf import HOCRF, HOCRFModelRepresentation
modelrepr_class = HOCRFModelRepresentation
model_class = HOCRF
fextractor_class = HOFeatureExtractor
elif class_desc[1] == "HOSemiCRFAD":
from pyseqlab.hosemi_crf_ad import HOSemiCRFAD, HOSemiCRFADModelRepresentation
modelrepr_class = HOSemiCRFADModelRepresentation
model_class = HOSemiCRFAD
fextractor_class = HOFeatureExtractor
elif class_desc[1] == "HOSemiCRF":
from pyseqlab.hosemi_crf import HOSemiCRF, HOSemiCRFModelRepresentation
modelrepr_class = HOSemiCRFModelRepresentation
model_class = HOSemiCRF
fextractor_class = HOFeatureExtractor
elif class_desc[1] == "FirstOrderCRF":
from pyseqlab.fo_crf import FirstOrderCRF, FirstOrderCRFModelRepresentation
modelrepr_class = FirstOrderCRFModelRepresentation
model_class = FirstOrderCRF
fextractor_class = FOFeatureExtractor
# generate attribute scaler if applicable
if class_desc[-1] != "None":
from pyseqlab.attributes_extraction import AttributeScaler
ascaler_class = AttributeScaler
else:
ascaler_class = None
trained_model = generate_updated_model(
modelparts_dir,
modelrepr_class,
model_class,
aextractor_obj,
fextractor_class,
seqrepresenter_class,
ascaler_class,
)
return trained_model
def split_data(seqs_id, options):
r"""utility function for splitting dataset (i.e. training/testing and cross validation)
Args:
seqs_id: list of processed sequence ids
options: dictionary comprising of the options on how to split data
Example:
To perform cross validation, we need to specify
- cross-validation for the `method`
- the number of folds for the `k_fold`
::
options = {'method':'cross_validation',
'k_fold':number
}
To perform random splitting, we need to specify
- random for the `method`
- number of splits for the `num_splits`
- size of the training set in percentage for the `trainset_size`
::
options = {'method':'random',
'num_splits':number,
'trainset_size':percentage
}
"""
N = len(seqs_id)
data_split = {}
method = options.get("method")
if method == None:
method = "cross_validation"
if method == "cross_validation":
k_fold = options.get("k_fold")
if type(k_fold) != int:
# use 10 fold cross validation
k_fold = 10
elif k_fold <= 0:
k_fold = 10
batch_size = int(numpy.ceil(N / k_fold))
test_seqs = seqs_id.copy()
seqs_len = len(test_seqs)
# numpy.random.shuffle(test_seqs)
indx = numpy.arange(0, seqs_len + 1, batch_size)
if indx[-1] < seqs_len:
indx = numpy.append(indx, [seqs_len])
for i in range(len(indx) - 1):
data_split[i] = {}
current_test_seqs = test_seqs[indx[i] : indx[i + 1]]
data_split[i]["test"] = current_test_seqs
data_split[i]["train"] = list(set(seqs_id) - set(current_test_seqs))
elif method == "random":
num_splits = options.get("num_splits")
if type(num_splits) != int:
num_splits = 5
trainset_size = options.get("trainset_size")
if type(trainset_size) != int:
# 80% of the data set is training and 20% for testing
trainset_size = 80
elif trainset_size <= 0 or trainset_size >= 100:
trainset_size = 80
for i in range(num_splits):
data_split[i] = {}
current_train_seqs = numpy.random.choice(
seqs_id, int(N * trainset_size / 100), replace=False
)
data_split[i]["train"] = list(current_train_seqs)
data_split[i]["test"] = list(set(seqs_id) - set(current_train_seqs))
return data_split
"""split data based on sequences length
we need to execute the three functions in order:
(1) :func:`group_seqs_by_length`
(2) :func:`weighted_sample`
(3) :func:`aggregate_weightedsample`
"""
def group_seqs_by_length(seqs_info):
"""group sequences by their length
Args:
seqs_info: dictionary comprsing info about the sequences
it has this form {seq_id:{T:length of sequence}}
.. note::
sequences that are with unique sequence length are grouped together as singeltons
"""
grouped_seqs = {}
for seq_id, seq_info in seqs_info.items():
T = seq_info["T"]
if T in grouped_seqs:
grouped_seqs[T].append(seq_id)
else:
grouped_seqs[T] = [seq_id]
# loop to regroup single sequences
singelton = [T for T, seqs_id in grouped_seqs.items() if len(seqs_id) == 1]
singelton_seqs = []
for T in singelton:
singelton_seqs += grouped_seqs[T]
del grouped_seqs[T]
grouped_seqs["singleton"] = singelton_seqs
return grouped_seqs
def weighted_sample(grouped_seqs, trainset_size):
"""get a random split of the grouped sequences
Args:
grouped_seqs: dictionary of the grouped sequences based on their length
it is obtained using :func:`group_seqs_by_length` function
trainset_size: integer representing the size of the training set in percentage
"""
options = {"method": "random", "num_splits": 1, "trainset_size": trainset_size}
wsample = {}
for group_var, seqs_id in grouped_seqs.items():
# quota = trainset_size*count_seqs[group_var]/total
data_split = split_data(seqs_id, options)
wsample[group_var] = data_split[0]
return wsample
def aggregate_weightedsample(w_sample):
"""represent the random picked sample for training/testing
Args:
w_sample: dictionary representing a random split of the grouped sequences
by their length. it is obtained using :func:`weighted_sample` function
"""
wdata_split = {"train": [], "test": []}
for grouping_var in w_sample:
for data_cat in w_sample[grouping_var]:
wdata_split[data_cat] += w_sample[grouping_var][data_cat]
return {0: wdata_split}
##################################
def nested_cv(seqs_id, outer_kfold, inner_kfold):
"""generate nested cross-validation division of sequence ids
"""
outer_split = split_data(
seqs_id, {"method": "cross_validation", "k_fold": outer_kfold}
)
cv_hierarchy = {}
for outerfold, outer_datasplit in outer_split.items():
cv_hierarchy["{}_{}".format("outer", outerfold)] = outer_datasplit
curr_train_seqs = outer_datasplit["train"]
inner_split = split_data(
curr_train_seqs, {"method": "cross_validation", "k_fold": inner_kfold}
)
for innerfold, inner_datasplit in inner_split.items():
cv_hierarchy[
"{}_{}_{}_{}".format("outer", outerfold, "inner", innerfold)
] = inner_datasplit
return cv_hierarchy
def get_conll00():
current_dir = os.path.dirname(os.path.realpath(__file__))
root_dir = os.path.dirname(current_dir)
files_info = {
"train_short_main.txt": ("main", True, " "),
"train_short_none.txt": (("w", "pos"), True, " "),
"train_short_per_sequence.txt": ("per_sequence", True, " "),
}
for file_name in files_info:
parser = DataFileParser()
print(file_name)
file_path = os.path.join(root_dir, "tests", "dataset", "conll00", file_name)
for seq in parser.read_file(
file_path,
header=files_info[file_name][0],
y_ref=files_info[file_name][1],
column_sep=files_info[file_name][2],
):
print(seq)
if __name__ == "__main__":
pass
# get_conll00()
| 37.592898 | 145 | 0.547635 |
f7588f9831b205911cfc6947c1635fc66ee469d0 | 1,879 | py | Python | tests/test_commands.py | justengel/pybk8500 | 6a9748033c783a0081ec391359067dfb9dc83760 | [
"MIT"
] | null | null | null | tests/test_commands.py | justengel/pybk8500 | 6a9748033c783a0081ec391359067dfb9dc83760 | [
"MIT"
] | null | null | null | tests/test_commands.py | justengel/pybk8500 | 6a9748033c783a0081ec391359067dfb9dc83760 | [
"MIT"
] | null | null | null |
def test_names_to_values():
import pybk8500
cmd = pybk8500.CommandStatus(status=0x90)
assert cmd.status == 'Checksum incorrect'
assert cmd[3] == 0x90
cmd = pybk8500.CommandStatus(status='Parameter incorrect')
assert cmd.status == 'Parameter incorrect'
assert cmd[3] == 0xA0
cmd = pybk8500.SetRemoteOperation(operation=0)
assert cmd.operation == 'Front Panel'
assert cmd[3] == 0
cmd = pybk8500.SetRemoteOperation(operation='Remote')
assert cmd.operation == 'Remote'
assert cmd[3] == 1
cmd = pybk8500.LoadSwitch(value=0)
assert cmd.value == 'Off'
assert cmd[3] == 0
cmd = pybk8500.LoadSwitch(value='On')
assert cmd.value == 'On'
assert cmd[3] == 1
cmd = pybk8500.SetMode(value=0)
assert cmd.value == 'CC'
assert cmd[3] == 0
cmd = pybk8500.SetMode(value='CV')
assert cmd.value == 'CV'
assert cmd[3] == 1
cmd = pybk8500.SelectListOperation(operation=0)
assert cmd.value == 'CC'
assert cmd[3] == 0
cmd = pybk8500.SelectListOperation(operation='CV')
assert cmd.value == 'CV'
assert cmd[3] == 1
cmd = pybk8500.SetHowListsRepeat(repeat=0)
assert cmd.value == 'Once'
assert cmd[3] == 0
cmd = pybk8500.SetHowListsRepeat(repeat='Repeat')
assert cmd.value == 'Repeat'
assert cmd[3] == 1
cmd = pybk8500.SetMemoryPartition(scheme=1)
assert cmd.value == '1 file of 1000 list steps'
assert cmd[3] == 1
cmd = pybk8500.SetMemoryPartition(scheme='2 files of 500 list steps')
assert cmd.value == '2 files of 500 list steps'
assert cmd[3] == 2
cmd = pybk8500.SetTimerStateLoadOn(state=0)
assert cmd.value == 'disabled'
assert cmd[3] == 0
cmd = pybk8500.SetTimerStateLoadOn(state='enabled')
assert cmd.value == 'enabled'
assert cmd[3] == 1
if __name__ == '__main__':
test_names_to_values()
| 29.359375 | 73 | 0.646088 |
f75899682710b825a4a2780808cc8edb60bc6d6a | 107 | py | Python | tests/environment.py | linkml/linkml-csv | faab3ce6921d5558b2c552ad09077a0821d15b00 | [
"CC0-1.0"
] | 2 | 2021-08-05T16:00:35.000Z | 2021-08-22T22:47:14.000Z | tests/environment.py | linkml/linkml-csv | faab3ce6921d5558b2c552ad09077a0821d15b00 | [
"CC0-1.0"
] | 1 | 2021-08-09T17:57:15.000Z | 2021-08-22T23:47:08.000Z | tests/environment.py | linkml/linkml-csv | faab3ce6921d5558b2c552ad09077a0821d15b00 | [
"CC0-1.0"
] | null | null | null | from linkml_runtime.tests.support.test_environment import TestEnvironment
env = TestEnvironment(__file__)
| 26.75 | 73 | 0.869159 |
f758a59780503e88ef8d21698f3fe033ef9a5a93 | 7,258 | py | Python | scripts/python/blend_sat/ncvdefs.py | OSADP/Pikalert-Vehicle-Data-Translator- | 295da604408f6f13af0301b55476a81311459386 | [
"Apache-2.0"
] | 2 | 2020-06-03T15:59:50.000Z | 2020-12-21T11:11:57.000Z | scripts/python/blend_sat/ncvdefs.py | OSADP/Pikalert-Vehicle-Data-Translator- | 295da604408f6f13af0301b55476a81311459386 | [
"Apache-2.0"
] | null | null | null | scripts/python/blend_sat/ncvdefs.py | OSADP/Pikalert-Vehicle-Data-Translator- | 295da604408f6f13af0301b55476a81311459386 | [
"Apache-2.0"
] | 2 | 2019-10-02T06:47:23.000Z | 2020-02-02T18:32:23.000Z | #!/usr/bin/env python
#
# File containing standard definitions for various things
#
# File suffixes or extensions
ADDS_SUFFIX = "bin"
ASC_SUFFIX = "asc"
BUFR_SUFFIX = "bufr"
EPL_SUFFIX = "epl"
GINI_SUFFIX = "gini"
GRB_SUFFIX = "grb"
GRB2_SUFFIX = "grb2"
GZIP_SUFFIX = "gz"
HDF_SUFFIX = "h5"
NC_SUFFIX = "nc"
NETCDF_SUFFIX = "nc"
PKL_SUFFIX = "pkl"
PYL_SUFFIX = "pyl"
# File format names - used for naming subdirs
Ascii = "ascii"
Grib = "grib"
Grib2 = "grib2"
Hdf = "hdf"
Mdv = "mdv"
Netcdf = "netcdf"
# Time period names - used for naming subdirs
Daily = "daily"
History = "history"
Hourly = "hourly"
Init = "init"
# Index file information
Index_base = "index"
Index_period = 24
Index_wait = 0
Index_out_base = "index_out"
Index_prior_days = 1
# Forecast time variables
Fcst_hours = [0,1,2,3,4,5,6,7,8,9,10,11,12]
Fcst_int_fcst_tol = 3600
Fcst_int_ver_tol = 3600
Fcst_int_num_days = 2
# Names of things
DM_base = "DM"
Fcst_gen_base = "fcst_gen"
Fcst_listing_base = "fcst_listing"
Hrrr_base = "hrrr"
Lamp_base = "LAMP"
Metar_base = "METAR"
Metar_interp_base = "metar_interp"
Model_base = "model"
Ncv_base = "NCV"
Ncv_fcst_base = "ncvfcst"
Ruc_base = "RUC"
Ruc_base_raw = "ruc"
Ruc_ta_base = "ruc_ta"
Ruc_ta_op_base = "ruc_ta_op"
Rr_base_raw = "WRF-RR"
Rr_base = "wrf-rr"
Gini_base = ""
Gini2nc_out_base = "ch_combine"
Persist_base = "PERSIST"
Projection_out_base = "proj"
Cloud_mask_base = "cloud"
Obs_paths_base = "obs_paths"
Score_fcst_base = "score_fcst"
Score_history_base = "score_history"
Site_cont_file_base = "Site_cont"
Site_fcst_file_base = "Site_fcst"
Site_fcst_int_base = "Site_fcst_int"
Station_name_id = "stn_name" # name of station id array in forecast and contingency files
# Satellite
sat_channels = ["11","3_9"]
blend_time_window = 45*60 # in seconds
Max_sat_files_to_process = 4
# Output field list for Analysis, forecast products
Var_list = ["CEILING", "VISIBILITY", "FLIGHTCAT", "CEILINGCONF", "VISIBILITYCONF", "FLIGHTCATCONF"]
Fcst_var_list = ["CEILING", "VISIBILITY", "FLIGHTCAT"]
# Name schema information. This is a hash table set up so that you can
# get at the schema formats and patterns if you know the file base name
# (the key).
#
Name_schema_formats = {
DM_base : "%B.%D.i%I.f%F.%S",
Fcst_gen_base : "%B.%D.i%I.f%F.%S",
Fcst_listing_base : "%B.%D.i%I.f%F.%S",
Hrrr_base : "%B.%D.i%I.f%F.%S",
Lamp_base : "%B.%D.i%I.f%F.%S",
Metar_base : "%B.%D.i%I.f%F.%S",
Metar_interp_base : "%B.%D.%I%F.%S",
Model_base : "%B.%D.i%I.f%F.%S",
Ncv_base : "%D%I_%B_%F.%S",
Ncv_fcst_base : "%B.%D.i%I.f%F.%S",
Obs_paths_base : "%B.%D.i%I.f%F.%S",
Persist_base : "%B.%D.i%I.f%F.%S",
Rr_base : "%B.%D.i%I.f%F.%S",
Rr_base_raw : "%D_i%I_f%F_%B.%S",
Ruc_base : "%B.%D.i%I.f%F.%S",
Ruc_base_raw : "%B.%D.i%I.f%F.%S",
Ruc_ta_base : "%B.%D.i%I.f%F.%S",
Ruc_ta_op_base : "%B.%D.i%I.f%F.%S",
Score_fcst_base : "%B.%D.i%I.f%F.%S",
Score_history_base : "%B.%D.i%I.f%F.%S",
Site_cont_file_base : "%B.%D.i%I.f%F.%S",
Site_fcst_file_base : "%B.%D.i%I.f%F.%S",
Site_fcst_int_base : "%B.%D.i%I.f%F.%S"
}
Name_schema_patterns = {
DM_base + ASC_SUFFIX : [DM_base, "YYYYMMDD", "HH", "HH", ASC_SUFFIX],
DM_base + HDF_SUFFIX : [DM_base, "YYYYMMDD", "HH", "HH", HDF_SUFFIX],
DM_base + NC_SUFFIX : [DM_base, "YYYYMMDD", "HH", "HH", NC_SUFFIX],
Fcst_gen_base + NC_SUFFIX : [Fcst_gen_base, "YYYYMMDD", "HH", "HH", NC_SUFFIX],
Fcst_listing_base + ASC_SUFFIX : [Fcst_listing_base, "YYYYMMDD", "HH", "HH", ASC_SUFFIX],
Hrrr_base : [Hrrr_base, "YYYYMMDD", "HH", "HH", GRB2_SUFFIX],
Lamp_base + ASC_SUFFIX : [Lamp_base, "YYYYMMDD", "HH", "HH", ASC_SUFFIX],
Lamp_base + HDF_SUFFIX : [Lamp_base, "YYYYMMDD", "HH", "HH", HDF_SUFFIX],
Lamp_base + NC_SUFFIX : [Lamp_base, "YYYYMMDD", "HH", "HH", NC_SUFFIX],
Metar_base + HDF_SUFFIX : [Metar_base, "YYYYMMDD", "HH", "HH", HDF_SUFFIX],
Metar_base + NC_SUFFIX : [Metar_base, "YYYYMMDD", "HH", "HH", NC_SUFFIX],
Metar_base + ASC_SUFFIX : [Metar_base, "YYYYMMDD", "HH", "HH", ASC_SUFFIX],
Metar_interp_base : [Metar_interp_base, "YYYYMMDD", "HHMM", "", NC_SUFFIX],
Model_base : ["", "YYYYMMDD", "HH", "HH", NC_SUFFIX],
Ncv_base : ["YYYYMMDD", "HHMM", Ncv_base, "H", GRB_SUFFIX],
Ncv_fcst_base : [Ncv_base, "YYYYMMDD", "HH", "HH", NC_SUFFIX],
Obs_paths_base : [Obs_paths_base, "YYYYMMDD", "HH", "HH", ASC_SUFFIX],
Persist_base + ASC_SUFFIX : [Persist_base, "YYYYMMDD", "HH", "HH", ASC_SUFFIX],
Persist_base + NC_SUFFIX : [Persist_base, "YYYYMMDD", "HH", "HH", NC_SUFFIX],
Ruc_base + ASC_SUFFIX : [Ruc_base, "YYYYMMDD", "HH", "HH", ASC_SUFFIX],
Ruc_base + HDF_SUFFIX : [Ruc_base, "YYYYMMDD", "HH", "HH", HDF_SUFFIX],
Ruc_base + NC_SUFFIX : [Ruc_base, "YYYYMMDD", "HH", "HH", NC_SUFFIX],
Ruc_base_raw : [Ruc_base_raw, "YYYYMMDD", "HH", "HH", GRB2_SUFFIX],
Ruc_ta_base : [Ruc_ta_base, "YYYYMMDD", "HH", "HH", NC_SUFFIX],
Rr_base : [Rr_base, "YYYYMMDD", "HH", "HH", GRB2_SUFFIX],
Rr_base_raw : ["YYYYMMDD", "HH", "HHH", Rr_base_raw, GRB2_SUFFIX],
Ruc_ta_op_base : [Ruc_ta_op_base, "YYYYMMDD", "HH", "HH", NC_SUFFIX],
Score_fcst_base : [Score_fcst_base, "YYYYMMDD", "HH", "HH", NC_SUFFIX],
Score_history_base + ASC_SUFFIX : [Score_history_base, "YYYYMMDD", "HH", "HH", ASC_SUFFIX],
Site_cont_file_base + ASC_SUFFIX : [Site_cont_file_base, "YYYYMMDD", "HH", "HH", ASC_SUFFIX],
Site_cont_file_base + NC_SUFFIX : [Site_cont_file_base, "YYYYMMDD", "HH", "HH", NC_SUFFIX],
Site_fcst_file_base + ASC_SUFFIX : [Site_fcst_file_base, "YYYYMMDD", "HH", "HH", ASC_SUFFIX],
Site_fcst_file_base + NC_SUFFIX : [Site_fcst_file_base, "YYYYMMDD", "HH", "HH", NC_SUFFIX],
Site_fcst_int_base + NC_SUFFIX : [Site_fcst_int_base, "YYYYMMDD", "HH", "HH", NC_SUFFIX],
Site_fcst_int_base + ASC_SUFFIX : [Site_fcst_int_base, "YYYYMMDD", "HH", "HH", NC_SUFFIX]
}
| 46.229299 | 118 | 0.539267 |
f758c7f6537fbb8fe57f7d960b91163b1684542f | 414 | py | Python | implore/contact.py | skeptycal/implore | 18035c13b94e9277658d07baaaa519b41513f1d5 | [
"MIT",
"Unlicense"
] | null | null | null | implore/contact.py | skeptycal/implore | 18035c13b94e9277658d07baaaa519b41513f1d5 | [
"MIT",
"Unlicense"
] | null | null | null | implore/contact.py | skeptycal/implore | 18035c13b94e9277658d07baaaa519b41513f1d5 | [
"MIT",
"Unlicense"
] | null | null | null | # -*- coding: utf-8 -*-
""" Part of the `AutoSys` package
copyright (c) 2019 Michael Treanor
https://www.github.com/skeptycal/autosys
https://www.twitter.com/skeptycal
`AutoSys` is licensed under the `MIT License
`<https://opensource.org/licenses/MIT>`
"""
__license__ = "MIT"
class Contact_List(list):
pass
# joe = Contact("Joe", "+555555555", "Somewhere Else")
| 25.875 | 58 | 0.625604 |
f758c9c3c59b2323ce863aa66f0ffa57f7ffe669 | 1,709 | py | Python | for_imagenet/make_df_imagenet.py | tak-sakumoto/formatomato | 4713338135b2ac3960cc2f9a6f017199853cdc52 | [
"MIT"
] | null | null | null | for_imagenet/make_df_imagenet.py | tak-sakumoto/formatomato | 4713338135b2ac3960cc2f9a6f017199853cdc52 | [
"MIT"
] | null | null | null | for_imagenet/make_df_imagenet.py | tak-sakumoto/formatomato | 4713338135b2ac3960cc2f9a6f017199853cdc52 | [
"MIT"
] | null | null | null | import pandas as pd
from PIL import Image
from pathlib import Path
def make_df_imagenet(dataset):
"""
Making Pandas Dataframes of the extracted data
"""
# Making lists of class columns
classes = list(Path(dataset).iterdir())
classes = [p.stem for p in classes if p.is_dir()]
class_ids = [i for i in range(len(classes))]
class_df_dict = {
'CLASS_ID': class_ids,
'CLASS': classes
}
# Making a Pandas Dataframe
class_df = pd.DataFrame(class_df_dict)
# Set IMAGE_ID as index
class_df = class_df.set_index('CLASS_ID')
image_ids = []
image_names = []
widths = []
heights = []
img_classes = []
# Making lists of image information columns
for _class in classes:
img_path_list = list((Path(dataset) / _class).glob('*.JPEG'))
for img_path in img_path_list:
img = Image.open(img_path)
image_names.append(img_path.name)
widths.append(img.width)
heights.append(img.height)
img_classes.append(_class)
image_ids = [i for i in range(len(image_names))]
image_df_dict = {
'IMAGE_ID': image_ids,
'IMAGE_NAME': image_names,
'WIDTH': widths,
'HEIGHT': heights
}
# Making a Pandas Dataframe
image_df = pd.DataFrame(image_df_dict)
# Set IMAGE_ID as index
image_df = image_df.set_index('IMAGE_ID')
df_dict = {
'IMAGE_ID': image_ids,
'IMAGE_NAME': image_names,
'CLASS': img_classes
}
# Making a Pandas Dataframe
df = pd.DataFrame(df_dict)
# Set IMAGE_ID as index
df = df.set_index('IMAGE_ID')
return df, image_df, class_df
| 24.768116 | 69 | 0.613224 |
f758f8137e12b5019a86ae0c59dcfac15b4f2754 | 1,133 | py | Python | clients/client/python/test/test_submit_self_service_settings_flow_with_lookup_method_body.py | sproutfi/sdk | 5340b37d7b3e8f3c1b8f4c0c16ede05488498620 | [
"Apache-2.0"
] | null | null | null | clients/client/python/test/test_submit_self_service_settings_flow_with_lookup_method_body.py | sproutfi/sdk | 5340b37d7b3e8f3c1b8f4c0c16ede05488498620 | [
"Apache-2.0"
] | null | null | null | clients/client/python/test/test_submit_self_service_settings_flow_with_lookup_method_body.py | sproutfi/sdk | 5340b37d7b3e8f3c1b8f4c0c16ede05488498620 | [
"Apache-2.0"
] | null | null | null | """
Ory APIs
Documentation for all public and administrative Ory APIs. Administrative APIs can only be accessed with a valid Personal Access Token. Public APIs are mostly used in browsers. # noqa: E501
The version of the OpenAPI document: v0.0.1-alpha.71
Contact: support@ory.sh
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import ory_client
from ory_client.model.submit_self_service_settings_flow_with_lookup_method_body import SubmitSelfServiceSettingsFlowWithLookupMethodBody
class TestSubmitSelfServiceSettingsFlowWithLookupMethodBody(unittest.TestCase):
"""SubmitSelfServiceSettingsFlowWithLookupMethodBody unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testSubmitSelfServiceSettingsFlowWithLookupMethodBody(self):
"""Test SubmitSelfServiceSettingsFlowWithLookupMethodBody"""
# FIXME: construct object with mandatory attributes with example values
# model = SubmitSelfServiceSettingsFlowWithLookupMethodBody() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 30.621622 | 194 | 0.765225 |
f759071c0f22a55808078a5530bb400e6beac4de | 313 | py | Python | python/divide_and_conquer/0241_different_ways_to_add_parentheses.py | linshaoyong/leetcode | ea052fad68a2fe0cbfa5469398508ec2b776654f | [
"MIT"
] | 6 | 2019-07-15T13:23:57.000Z | 2020-01-22T03:12:01.000Z | python/divide_and_conquer/0241_different_ways_to_add_parentheses.py | linshaoyong/leetcode | ea052fad68a2fe0cbfa5469398508ec2b776654f | [
"MIT"
] | null | null | null | python/divide_and_conquer/0241_different_ways_to_add_parentheses.py | linshaoyong/leetcode | ea052fad68a2fe0cbfa5469398508ec2b776654f | [
"MIT"
] | 1 | 2019-07-24T02:15:31.000Z | 2019-07-24T02:15:31.000Z | class Solution(object):
def diffWaysToCompute(self, input):
"""
:type input: str
:rtype: List[int]
"""
def test_diff_ways_to_compute():
s = Solution()
assert [0, 2] == s.diffWaysToCompute("2-1-1")
assert [-34, -14, -10, -10, 10] == s.diffWaysToCompute("2*3-4*5")
| 24.076923 | 69 | 0.5623 |
f7591e2bcfc6581618fc921d0934663aa46466ea | 782 | py | Python | table_test.py | strickyak/aphid | 12469858facdc9d7f110bf6c895e58eae9fb728f | [
"MIT"
] | 2 | 2015-05-25T10:47:30.000Z | 2017-12-12T18:15:00.000Z | table_test.py | strickyak/aphid | 12469858facdc9d7f110bf6c895e58eae9fb728f | [
"MIT"
] | null | null | null | table_test.py | strickyak/aphid | 12469858facdc9d7f110bf6c895e58eae9fb728f | [
"MIT"
] | null | null | null | from go import os
from go import path.filepath as F
import table
D ='/tmp/_aphid_test_skiplist_'
try:
os.RemoveAll(D)
except:
pass
os.Mkdir(D, 0777)
fd = os.Create(F.Join(D, 't.001'))
print >>fd, '''
# comment
+0012345.0{tab}color{tab}red
+0012345.0{tab}flavor{tab}lime
+0012345.0{tab}size{tab}XL
;
# overrides
+0024680.X{tab}color{tab}purple
;
# does not override, too old.
+0012345.!{tab}flavor{tab}durian
;
'''.format(tab='\t')
fd.Close()
t = table.Table(D)
must 'purple' == t.Get('color')
must 'lime' == t.Get('flavor')
must 'XL' == t.Get('size')
must t.Get('bogus') == None
t.Put('color', 'pink')
t.Put('flavor', 'lychee')
t2 = table.Table(D)
must 'pink' == t.Get('color')
must 'lychee' == t.Get('flavor')
must 'XL' == t.Get('size')
must t.Get('bogus') == None
pass
| 17.377778 | 34 | 0.64578 |
f759291a9b889532433b133745164006cd72e6b6 | 28,950 | py | Python | sdk/deviceupdate/azure-mgmt-deviceupdate/azure/mgmt/deviceupdate/operations/_instances_operations.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | 2,728 | 2015-01-09T10:19:32.000Z | 2022-03-31T14:50:33.000Z | sdk/deviceupdate/azure-mgmt-deviceupdate/azure/mgmt/deviceupdate/operations/_instances_operations.py | v-xuto/azure-sdk-for-python | 9c6296d22094c5ede410bc83749e8df8694ccacc | [
"MIT"
] | 17,773 | 2015-01-05T15:57:17.000Z | 2022-03-31T23:50:25.000Z | sdk/deviceupdate/azure-mgmt-deviceupdate/azure/mgmt/deviceupdate/operations/_instances_operations.py | v-xuto/azure-sdk-for-python | 9c6296d22094c5ede410bc83749e8df8694ccacc | [
"MIT"
] | 1,916 | 2015-01-19T05:05:41.000Z | 2022-03-31T19:36:44.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class InstancesOperations(object):
"""InstancesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~device_update.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_account(
self,
resource_group_name, # type: str
account_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.InstanceList"]
"""Returns instances for the given account name.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param account_name: Account name.
:type account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either InstanceList or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~device_update.models.InstanceList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.InstanceList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_account.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3, pattern=r'^[A-Za-z0-9]+(-[A-Za-z0-9]+)*$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('InstanceList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_account.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DeviceUpdate/accounts/{accountName}/instances'} # type: ignore
def get(
self,
resource_group_name, # type: str
account_name, # type: str
instance_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.Instance"
"""Returns instance details for the given instance and account name.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param account_name: Account name.
:type account_name: str
:param instance_name: Instance name.
:type instance_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Instance, or the result of cls(response)
:rtype: ~device_update.models.Instance
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Instance"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3, pattern=r'^[A-Za-z0-9]+(-[A-Za-z0-9]+)*$'),
'instanceName': self._serialize.url("instance_name", instance_name, 'str', max_length=36, min_length=3, pattern=r'^[A-Za-z0-9]+(-[A-Za-z0-9]+)*$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Instance', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DeviceUpdate/accounts/{accountName}/instances/{instanceName}'} # type: ignore
def head(
self,
resource_group_name, # type: str
account_name, # type: str
instance_name, # type: str
**kwargs # type: Any
):
# type: (...) -> bool
"""Checks whether instance exists.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param account_name: Account name.
:type account_name: str
:param instance_name: Instance name.
:type instance_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: bool, or the result of cls(response)
:rtype: bool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01-preview"
accept = "application/json"
# Construct URL
url = self.head.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3, pattern=r'^[A-Za-z0-9]+(-[A-Za-z0-9]+)*$'),
'instanceName': self._serialize.url("instance_name", instance_name, 'str', max_length=36, min_length=3, pattern=r'^[A-Za-z0-9]+(-[A-Za-z0-9]+)*$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.head(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
return 200 <= response.status_code <= 299
head.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DeviceUpdate/accounts/{accountName}/instances/{instanceName}'} # type: ignore
def _create_initial(
self,
resource_group_name, # type: str
account_name, # type: str
instance_name, # type: str
instance, # type: "_models.Instance"
**kwargs # type: Any
):
# type: (...) -> "_models.Instance"
cls = kwargs.pop('cls', None) # type: ClsType["_models.Instance"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3, pattern=r'^[A-Za-z0-9]+(-[A-Za-z0-9]+)*$'),
'instanceName': self._serialize.url("instance_name", instance_name, 'str', max_length=36, min_length=3, pattern=r'^[A-Za-z0-9]+(-[A-Za-z0-9]+)*$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(instance, 'Instance')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Instance', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DeviceUpdate/accounts/{accountName}/instances/{instanceName}'} # type: ignore
def begin_create(
self,
resource_group_name, # type: str
account_name, # type: str
instance_name, # type: str
instance, # type: "_models.Instance"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.Instance"]
"""Creates or updates instance.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param account_name: Account name.
:type account_name: str
:param instance_name: Instance name.
:type instance_name: str
:param instance: Instance details.
:type instance: ~device_update.models.Instance
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either Instance or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~device_update.models.Instance]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Instance"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_initial(
resource_group_name=resource_group_name,
account_name=account_name,
instance_name=instance_name,
instance=instance,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Instance', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3, pattern=r'^[A-Za-z0-9]+(-[A-Za-z0-9]+)*$'),
'instanceName': self._serialize.url("instance_name", instance_name, 'str', max_length=36, min_length=3, pattern=r'^[A-Za-z0-9]+(-[A-Za-z0-9]+)*$'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DeviceUpdate/accounts/{accountName}/instances/{instanceName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
account_name, # type: str
instance_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01-preview"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3, pattern=r'^[A-Za-z0-9]+(-[A-Za-z0-9]+)*$'),
'instanceName': self._serialize.url("instance_name", instance_name, 'str', max_length=36, min_length=3, pattern=r'^[A-Za-z0-9]+(-[A-Za-z0-9]+)*$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DeviceUpdate/accounts/{accountName}/instances/{instanceName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
account_name, # type: str
instance_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes instance.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param account_name: Account name.
:type account_name: str
:param instance_name: Instance name.
:type instance_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
account_name=account_name,
instance_name=instance_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3, pattern=r'^[A-Za-z0-9]+(-[A-Za-z0-9]+)*$'),
'instanceName': self._serialize.url("instance_name", instance_name, 'str', max_length=36, min_length=3, pattern=r'^[A-Za-z0-9]+(-[A-Za-z0-9]+)*$'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DeviceUpdate/accounts/{accountName}/instances/{instanceName}'} # type: ignore
def update(
self,
resource_group_name, # type: str
account_name, # type: str
instance_name, # type: str
tag_update_payload, # type: "_models.TagUpdate"
**kwargs # type: Any
):
# type: (...) -> "_models.Instance"
"""Updates instance's tags.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param account_name: Account name.
:type account_name: str
:param instance_name: Instance name.
:type instance_name: str
:param tag_update_payload: Updated tags.
:type tag_update_payload: ~device_update.models.TagUpdate
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Instance, or the result of cls(response)
:rtype: ~device_update.models.Instance
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Instance"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3, pattern=r'^[A-Za-z0-9]+(-[A-Za-z0-9]+)*$'),
'instanceName': self._serialize.url("instance_name", instance_name, 'str', max_length=36, min_length=3, pattern=r'^[A-Za-z0-9]+(-[A-Za-z0-9]+)*$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(tag_update_payload, 'TagUpdate')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Instance', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DeviceUpdate/accounts/{accountName}/instances/{instanceName}'} # type: ignore
| 50.347826 | 205 | 0.656304 |
f759405eae3f0972d492d671bf41c7e59cc6fc60 | 1,965 | py | Python | ch01_arrays_and_strings/1.5_one-away.py | appletreeisyellow/cracking-the-coding-interview | dc04c1c1e75a58bd543a298520f4595425f38104 | [
"MIT"
] | null | null | null | ch01_arrays_and_strings/1.5_one-away.py | appletreeisyellow/cracking-the-coding-interview | dc04c1c1e75a58bd543a298520f4595425f38104 | [
"MIT"
] | null | null | null | ch01_arrays_and_strings/1.5_one-away.py | appletreeisyellow/cracking-the-coding-interview | dc04c1c1e75a58bd543a298520f4595425f38104 | [
"MIT"
] | null | null | null | import unittest
"""
1.5 One Away
There are three types of edits that can be performed on
strings: insert a character, remove a character, or
replace a character. Given two strings, write a function
to check if they are one edit (or zero edits) away.
EXAMPLE
pale, ple -> true
pales, pale -> true
pale, bale -> true
pale, bae -> false
"""
def is_one_away(str1, str2):
len1 = len(str1)
len2 = len(str2)
if len1 == len2:
return is_one_char_replaceable(str1, str2)
elif len1 + 1 == len2:
return is_one_char_insertable(str1, str2)
elif len2 + 1 == len1:
return is_one_char_insertable(str2, str1)
else:
return False
def is_one_char_replaceable(str1, str2):
found_diff = False
for c1, c2 in zip(str1, str2):
if c1 != c2:
if found_diff:
return False
found_diff = True
return True
def is_one_char_insertable(str1, str2):
# assume length of str1 < str2
index1 = 0
index2 = 0
found_diff = False
while index1 < len(str1) and index2 < len(str2):
if str1[index1] != str2[index2]:
if found_diff:
return False
found_diff = True
index2 += 1
else:
index1 += 1
index2 += 1
return True
class Test(unittest.TestCase):
test_cases = [
# add test cases here, ("test-case", True)
("pale", "ple", True),
("pales" ,"pale", True),
("pale" ,"pales", True),
("pale", "bale", True),
("pale", "bae", False),
("pale", "palesd", False),
("pale", "btle", False),
]
test_functions = [
# add testing functions here
is_one_away,
]
def test(self):
for text1, text2, expected in self.test_cases:
for test_func in self.test_functions:
try:
assert (test_func(text1, text2) == expected), "Failed!"
except AssertionError as e:
e.args += (test_func.__name__, text1, text2, "should be " + str(expected))
raise
if __name__ == "__main__":
unittest.main() | 23.392857 | 84 | 0.619338 |
f75946835957a8acf98ace050c01ca03382cc071 | 3,283 | py | Python | run_combinations.py | ManeeshaPerera/forecast-framework | 60a22af4a97aec10c8bbea7f3f833061283382cb | [
"BSD-3-Clause"
] | null | null | null | run_combinations.py | ManeeshaPerera/forecast-framework | 60a22af4a97aec10c8bbea7f3f833061283382cb | [
"BSD-3-Clause"
] | null | null | null | run_combinations.py | ManeeshaPerera/forecast-framework | 60a22af4a97aec10c8bbea7f3f833061283382cb | [
"BSD-3-Clause"
] | 2 | 2022-03-20T10:30:38.000Z | 2022-03-22T06:39:14.000Z | from combinations.equal_weight import EqualWeight
from combinations.pso_model import PSO
from combinations.recursive_method import RecursiveEnsemble
import constants as const
import pandas as pd
import numpy as np
def run_combinations(horizon, forecast, forecast_test, data_train, data_out_sample):
weights = {'weight': [], 'method': [], 'comb_method': []}
horizon_info = const.HORIZON_INFO[horizon]
seasonality = horizon_info['arima_params'][
'seasonal_freq']
methods = forecast.columns.tolist()
pso_initial_options = {'c1': [0, 10],
'c2': [0, 10],
'w': [0, 10],
'k': [1, 20],
'p': 2}
num_pso_particles = 100
# Run equal weight
equal_weight = EqualWeight(forecast)
equal_weight.find_weights()
add_weights(weights, equal_weight.weights, methods, 'average')
eq_fc = equal_weight.get_forecast(forecast)
eq_fc_test = equal_weight.get_forecast(forecast_test)
# Run PSO
dimension = len(forecast.columns)
pso = PSO(forecast, data_train, data_out_sample, dimension, num_pso_particles,
horizon_info['horizon_as_int'],
seasonality, options=pso_initial_options)
pso.hyper_parameter_search()
pso.find_weights()
add_weights(weights, pso.weights, methods, 'pso- unconstrained')
pso_fc = pso.get_forecast(forecast)
pso_fc_test = pso.get_forecast(forecast_test)
# PSO with bounds
pso_b = PSO(forecast, data_train, data_out_sample, dimension, num_pso_particles,
horizon_info['horizon_as_int'],
seasonality, options=pso_initial_options, bounds=(np.array([0, 0, 0, 0, 0]), np.array([1, 1, 1, 1, 1])))
pso_b.hyper_parameter_search()
pso_b.find_weights()
add_weights(weights, pso_b.weights, methods, 'pso [0,1]')
pso_b_fc = pso_b.get_forecast(forecast)
pso_b_fc_test = pso_b.get_forecast(forecast_test)
# Add to Unity
pso_b.weights = pso_b.weights / pso_b.weights.sum()
add_weights(weights, pso_b.weights, methods, 'pso- convex')
pso_b_fc_scaled = pso_b.get_forecast(forecast)
pso_b_fc_test_scaled = pso_b.get_forecast(forecast_test)
# Run recursive ensemble
print("start recursive ensemble")
matrix = np.identity(len(forecast.columns))
re = RecursiveEnsemble(forecast, data_train, data_out_sample, horizon_info['horizon_as_int'], matrix, seasonality,
0.001)
re.find_weights()
add_weights(weights, re.weights, methods, 're')
re_fc = re.get_forecast(forecast)
re_fc_test = re.get_forecast(forecast_test)
train = pd.concat([pso_fc, pso_b_fc, pso_b_fc_scaled, eq_fc, re_fc], axis=1)
train.columns = ['pso- unconstrained', 'pso [0,1]', 'pso- convex', 'average', 're']
test = pd.concat([pso_fc_test, pso_b_fc_test, pso_b_fc_test_scaled, eq_fc_test, re_fc_test], axis=1)
test.columns = ['pso- unconstrained', 'pso [0,1]', 'pso- convex', 'average', 're']
return train, test, pd.DataFrame(weights)
def add_weights(dic, weights, methods, comb_name):
for w in range(0, len(weights)):
dic['weight'].append(weights[w])
dic['method'].append(methods[w])
dic['comb_method'].append(comb_name)
| 39.554217 | 120 | 0.672251 |
f7595e283208a3c72d22c1fb4ad67499563ddad1 | 481 | py | Python | orderbook_veinte/orderbook/serializers/user_serializer.py | morwen1/hyperion | f0d77a6cce6a366555e9f0ca0080f3da134862bf | [
"MIT"
] | null | null | null | orderbook_veinte/orderbook/serializers/user_serializer.py | morwen1/hyperion | f0d77a6cce6a366555e9f0ca0080f3da134862bf | [
"MIT"
] | null | null | null | orderbook_veinte/orderbook/serializers/user_serializer.py | morwen1/hyperion | f0d77a6cce6a366555e9f0ca0080f3da134862bf | [
"MIT"
] | null | null | null |
#RESTFRAMEWORK
from rest_framework import serializers
#MODELS
from orderbook_veinte.orderbook.models import Orders , OrderStatus
class OrdersStatusSerializer(serializers.ModelSerializer):
class Meta:
model = OrderStatus
fields = ('status' ,)
class UserOrderSerializer(serializers.ModelSerializer):
status = OrdersStatusSerializer(read_only=True)
class Meta:
model =Orders
fields = ('Bid' , 'price' , 'qty' , 'close_qty' , 'status') | 28.294118 | 67 | 0.717256 |
f75968bd86296466d947e2ad0a811abf6895a0cc | 2,376 | py | Python | api/preprint_providers/serializers.py | hmoco/osf.io | a02869f9b5c198bafae7cea0c216674bbcba62f7 | [
"Apache-2.0"
] | 1 | 2015-10-02T18:35:53.000Z | 2015-10-02T18:35:53.000Z | api/preprint_providers/serializers.py | hmoco/osf.io | a02869f9b5c198bafae7cea0c216674bbcba62f7 | [
"Apache-2.0"
] | 4 | 2016-05-13T14:24:16.000Z | 2017-03-30T15:28:31.000Z | api/preprint_providers/serializers.py | hmoco/osf.io | a02869f9b5c198bafae7cea0c216674bbcba62f7 | [
"Apache-2.0"
] | null | null | null | from rest_framework import serializers as ser
from api.base.utils import absolute_reverse
from api.base.serializers import JSONAPISerializer, LinksField, RelationshipField
class PreprintProviderSerializer(JSONAPISerializer):
filterable_fields = frozenset([
'name',
'description',
'id'
])
name = ser.CharField(required=True)
description = ser.CharField(required=False)
id = ser.CharField(max_length=200, source='_id')
advisory_board = ser.CharField(required=False, allow_null=True)
email_contact = ser.CharField(required=False, allow_null=True)
email_support = ser.CharField(required=False, allow_null=True)
example = ser.CharField(required=False, allow_null=True)
domain = ser.CharField(required=False, allow_null=False)
domain_redirect_enabled = ser.CharField(required=False, allow_null=False)
social_twitter = ser.CharField(required=False, allow_null=True)
social_facebook = ser.CharField(required=False, allow_null=True)
social_instagram = ser.CharField(required=False, allow_null=True)
header_text = ser.CharField(required=False, allow_null=True)
subjects_acceptable = ser.JSONField(required=False, allow_null=True)
logo_path = ser.CharField(read_only=True)
banner_path = ser.CharField(read_only=True)
preprints = RelationshipField(
related_view='preprint_providers:preprints-list',
related_view_kwargs={'provider_id': '<_id>'}
)
taxonomies = RelationshipField(
related_view='preprint_providers:taxonomy-list',
related_view_kwargs={'provider_id': '<_id>'}
)
licenses_acceptable = RelationshipField(
related_view='preprint_providers:license-list',
related_view_kwargs={'provider_id': '<_id>'}
)
links = LinksField({
'self': 'get_absolute_url',
'preprints': 'get_preprints_url',
'external_url': 'get_external_url'
})
class Meta:
type_ = 'preprint_providers'
def get_absolute_url(self, obj):
return obj.absolute_api_v2_url
def get_preprints_url(self, obj):
return absolute_reverse('preprint_providers:preprints-list', kwargs={
'provider_id': obj._id,
'version': self.context['request'].parser_context['kwargs']['version']
})
def get_external_url(self, obj):
return obj.external_url
| 35.462687 | 82 | 0.708754 |
f759773bff22215f99aeb7cd4c02e4f0c313be51 | 3,446 | py | Python | var/spack/repos/builtin/packages/podman/package.py | BenWibking/spack | 49b3b43a4a9375210b578635d9240875a5f3106b | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2,360 | 2017-11-06T08:47:01.000Z | 2022-03-31T14:45:33.000Z | var/spack/repos/builtin/packages/podman/package.py | BenWibking/spack | 49b3b43a4a9375210b578635d9240875a5f3106b | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 13,838 | 2017-11-04T07:49:45.000Z | 2022-03-31T23:38:39.000Z | var/spack/repos/builtin/packages/podman/package.py | flatironinstitute/spack | 71a7b1b5fadbe16bcdb36fb679aa828cd7d83b02 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1,793 | 2017-11-04T07:45:50.000Z | 2022-03-30T14:31:53.000Z | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Podman(Package):
"""An optionally rootless and daemonless container engine: alias docker=podman"""
homepage = 'https://podman.io'
url = 'https://github.com/containers/podman/archive/v3.4.2.tar.gz'
maintainers = ['bernhardkaindl']
version('3.4.2', sha256='b0c4f9a11eb500b1d440d5e51a6c0c632aa4ac458e2dc0362f50f999eb7fbf31')
depends_on('go', type='build')
depends_on('go-md2man', type='build')
depends_on('pkgconfig', type='build')
depends_on('cni-plugins', type='run')
depends_on('conmon', type='run')
depends_on('runc', type='run')
depends_on('slirp4netns', type='run')
depends_on('gpgme')
depends_on('libassuan')
depends_on('libgpg-error')
depends_on('libseccomp')
def patch(self):
defs = FileFilter('vendor/github.com/containers/common/pkg/config/default.go')
# Prepend the provided runc executable to podman's built-in runc search path
defs.filter(
'"runc": {',
'"runc": {' + '"{0}",'.format(self.spec['runc'].prefix.sbin.runc)
)
# Prepend the provided conmon executable to podman's built-in conmon search path
defs.filter(
r'ConmonPath = \[\]string{',
'ConmonPath = []string{' +
'\n "{0}",'.format(self.spec['conmon'].prefix.bin.conmon)
)
# Prepend the provided cni-plugins directory to the cni-plugin search path
defs.filter(
r'DefaultCNIPluginDirs = \[\]string{',
'DefaultCNIPluginDirs = []string{' +
'\n "{0}",'.format(self.spec['cni-plugins'].prefix.bin)
)
# Set the default path for slirp4netns to the provided slirp4netns executable
defs.filter(
'cniConfig := _cniConfigDir',
'cniConfig := _cniConfigDir' +
'\n defaultEngineConfig.NetworkCmdPath = "{0}"'.format(
self.spec['slirp4netns'].prefix.bin.slirp4netns
)
)
# Use the podman install prefix as fallback path for finding container.conf
filter_file(
r'/usr',
self.prefix,
'vendor/github.com/containers/common/pkg/config/config.go',
)
def install(self, spec, prefix):
# Set default policy.json to be located in the install prefix (documented)
env['EXTRA_LDFLAGS'] = (
'-X github.com/containers/image/v5/signature.systemDefaultPolicyPath=' +
prefix + '/etc/containers/policy.json'
)
# Build and installation needs to be in two separate make calls
# The devicemapper and btrfs drivers are (so far) not enabled in this recipe
tags = 'seccomp exclude_graphdriver_devicemapper exclude_graphdriver_btrfs'
make('-e', 'BUILDTAGS=' + tags)
make('install', 'PREFIX=' + prefix)
# Install an initial etc/containers/policy.json (configured in prefix above)
mkdirp(prefix.etc.containers)
install('test/policy.json', prefix.etc.containers)
# Cleanup directory trees which are created as part of the go build process
remove_linked_tree(prefix.src)
remove_linked_tree(prefix.pkg)
| 42.02439 | 95 | 0.624782 |
f759830e4bcb7357e6849657d19d5c253389a8bc | 22,105 | py | Python | mars/web/session.py | deka108/mars | 2cd39847c188bb690dd5e2d612a5cbe9f7b21eca | [
"Apache-2.0"
] | 1 | 2021-11-30T12:07:21.000Z | 2021-11-30T12:07:21.000Z | mars/web/session.py | deka108/mars | 2cd39847c188bb690dd5e2d612a5cbe9f7b21eca | [
"Apache-2.0"
] | null | null | null | mars/web/session.py | deka108/mars | 2cd39847c188bb690dd5e2d612a5cbe9f7b21eca | [
"Apache-2.0"
] | null | null | null | # Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import json
import time
import logging
import pickle
import sys
import uuid
from io import BytesIO
from numbers import Integral
import numpy as np
from ..config import options
from ..core.operand import Fetch
from ..errors import ResponseMalformed, ExecutionInterrupted, ExecutionFailed, \
ExecutionStateUnknown, ExecutionNotStopped
from ..serialize import dataserializer
from ..serialize.dataserializer import pyarrow
from ..tensor.core import Indexes
from ..utils import build_tileable_graph, sort_dataframe_result, \
numpy_dtype_from_descr_json, serialize_graph, serialize_serializable
logger = logging.getLogger(__name__)
class Session(object):
def __init__(self, endpoint, session_id=None, req_session=None, verify_ssl=True,
**session_kwargs):
self._endpoint = endpoint.rstrip('/')
self._session_id = session_id
self._session_kwargs = session_kwargs
# dict structure: {tileable_key -> graph_key, tileable_ids}
# dict value is a tuple object which records graph key and tileable id
self._executed_tileables = dict()
self._serial_type = None
self._pickle_protocol = pickle.HIGHEST_PROTOCOL
if req_session:
self._req_session = req_session
else:
import requests
from requests.adapters import HTTPAdapter
self._req_session = requests.Session()
self._req_session.mount('http://stackoverflow.com', HTTPAdapter(max_retries=5))
self._req_session.verify = verify_ssl
if not verify_ssl:
try:
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
except ImportError: # pragma: no cover
pass
self._main()
@property
def session_id(self):
return self._session_id
@property
def endpoint(self):
return self._endpoint
@endpoint.setter
def endpoint(self, url):
self._endpoint = url
def _main(self):
if pyarrow is None:
self._serial_type = dataserializer.SerialType.PICKLE
else:
self._serial_type = dataserializer.SerialType(options.client.serial_type.lower())
session_kw = self._session_kwargs.copy()
session_kw['pyver'] = '.'.join(str(v) for v in sys.version_info[:3])
session_kw['pickle_protocol'] = self._pickle_protocol
if pyarrow is not None:
session_kw['arrow_version'] = pyarrow.__version__
if self._session_id is None:
resp = self._req_session.post(self._endpoint + '/api/session', data=session_kw)
if resp.status_code >= 400:
raise SystemError('Failed to create mars session: ' + resp.reason)
else:
resp = self._req_session.get(
self._endpoint + '/api/session/' + self._session_id, params=session_kw)
if resp.status_code == 404:
raise ValueError(f'The session with id = {self._session_id} doesn\'t exist')
if resp.status_code >= 400:
raise SystemError('Failed to check mars session.')
content = json.loads(resp.text)
self._session_id = content['session_id']
self._pickle_protocol = content.get('pickle_protocol', pickle.HIGHEST_PROTOCOL)
# as pyarrow will use pickle.HIGHEST_PROTOCOL to pickle, we need to use
# SerialType.PICKLE when pickle protocol between client and server
# does not agree with each other
if not content.get('arrow_compatible') or self._pickle_protocol != pickle.HIGHEST_PROTOCOL:
self._serial_type = dataserializer.SerialType.PICKLE
def _get_tileable_graph_key(self, tileable_key):
return self._executed_tileables[tileable_key][0]
def _set_tileable_graph_key(self, tileable, graph_key):
tileable_key = tileable.key
tileable_id = tileable.id
if tileable_key in self._executed_tileables:
self._executed_tileables[tileable_key][1].add(tileable_id)
else:
self._executed_tileables[tileable_key] = graph_key, {tileable_id}
@staticmethod
def _handle_json_response(resp, allow_empty=True, raises=True):
try:
resp_txt = resp.text
if allow_empty:
resp_txt = resp_txt or '{}'
resp_json = json.loads(resp_txt)
except json.JSONDecodeError:
text_part = resp.text if len(resp.text) < 100 else resp.text[:100] + '...'
raise ResponseMalformed(f'Failed to parse server response. Status={resp.status_code} '
f'Response="{text_part}"')
if raises and resp.status_code >= 400:
exc_info = pickle.loads(base64.b64decode(resp_json['exc_info']))
raise exc_info[1].with_traceback(exc_info[2])
return resp_json
def _check_response_finished(self, graph_url, timeout=None):
import requests
try:
resp = self._req_session.get(graph_url, params={'wait_timeout': timeout})
except requests.ConnectionError as ex:
err_msg = str(ex)
if 'ConnectionResetError' in err_msg or 'Connection refused' in err_msg or \
'Connection aborted' in err_msg:
return False
raise
if resp.status_code == 504:
logging.debug('Gateway Time-out, try again')
return False
if resp.status_code >= 400:
raise SystemError(f'Failed to obtain execution status. Code: {resp.status_code}, '
f'Reason: {resp.reason}, Content:\n{resp.text}')
resp_json = self._handle_json_response(resp, raises=False)
if resp_json['state'] == 'succeeded':
return True
elif resp_json['state'] in ('running', 'preparing'):
return False
elif resp_json['state'] in ('cancelled', 'cancelling'):
raise ExecutionInterrupted
elif resp_json['state'] == 'failed':
if 'exc_info' in resp_json:
exc_info = pickle.loads(base64.b64decode(resp_json['exc_info']))
exc = exc_info[1].with_traceback(exc_info[2])
raise ExecutionFailed('Graph execution failed.') from exc
else:
raise ExecutionFailed('Graph execution failed with unknown reason.')
raise ExecutionStateUnknown('Unknown graph execution state ' + resp_json['state'])
def run(self, *tileables, **kw):
timeout = kw.pop('timeout', -1)
compose = kw.pop('compose', True)
fetch = kw.pop('fetch', True)
name = kw.pop('name', None)
if kw:
raise TypeError(f'run got unexpected key arguments {kw!r}')
# those executed tileables should fetch data directly, submit the others
run_tileables = [t for t in tileables if t.key not in self._executed_tileables]
if name is not None:
if not isinstance(name, (list, tuple)):
name = [name]
if len(name) != len(tileables):
raise TypeError('Name must match execute tileables')
name = ','.join(name)
graph = build_tileable_graph(run_tileables, set(self._executed_tileables.keys()))
targets = [t.key for t in run_tileables]
if len(graph) > 0:
targets_join = ','.join(targets)
session_url = self._endpoint + '/api/session/' + self._session_id
serialized_graph = serialize_graph(graph)
resp_json = self._submit_graph(serialized_graph, targets_join, names=name or '', compose=compose)
graph_key = resp_json['graph_key']
graph_url = f'{session_url}/graph/{graph_key}'
exec_start_time = time.time()
time_elapsed = 0
check_interval = options.check_interval
while timeout <= 0 or time_elapsed < timeout:
timeout_val = min(check_interval, timeout - time_elapsed) if timeout > 0 else check_interval
try:
if self._check_response_finished(graph_url, timeout_val):
break
except KeyboardInterrupt:
resp = self._req_session.delete(graph_url)
if resp.status_code >= 400:
raise ExecutionNotStopped(
f'Failed to stop graph execution. Code: {resp.status_code}, '
f'Reason: {resp.reason}, Content:\n{resp.text}')
finally:
time_elapsed = time.time() - exec_start_time
if 0 < timeout < time.time() - exec_start_time:
raise TimeoutError
for t in tileables:
self._set_tileable_graph_key(t, graph_key)
if not fetch:
return
else:
return self.fetch(*tileables)
def _is_executed(self, tileable):
# if tileble.key in executed tileables
# or it's a fetch already
return tileable.key in self._executed_tileables or \
isinstance(tileable.op, Fetch)
def fetch(self, *tileables, **kw):
from ..tensor.indexing import TensorIndex
from ..dataframe.indexing.iloc import DataFrameIlocGetItem, SeriesIlocGetItem
timeout = kw.pop('timeout', None)
if kw:
raise TypeError(f'fetch got unexpected key arguments {kw!r}')
results = list()
for tileable in tileables:
if tileable.key not in self._executed_tileables and \
isinstance(tileable.op, (TensorIndex, DataFrameIlocGetItem, SeriesIlocGetItem)):
to_fetch_tileable = tileable.inputs[0]
indexes = tileable.op.indexes
if not all(isinstance(ind, (slice, Integral)) for ind in indexes):
raise ValueError('Only support fetch data slices')
else:
to_fetch_tileable = tileable
indexes = []
if not self._is_executed(to_fetch_tileable):
raise ValueError('Cannot fetch the unexecuted tileable')
key = to_fetch_tileable.key
indexes_str = base64.b64encode(
serialize_serializable(Indexes(indexes=indexes))).decode('ascii')
session_url = f'{self._endpoint}/api/session/{self._session_id}'
compression_str = ','.join(v.value for v in dataserializer.get_supported_compressions())
params = dict(compressions=compression_str, slices=indexes_str,
serial_type=self._serial_type.value, pickle_protocol=self._pickle_protocol)
data_url = f'{session_url}/graph/{self._get_tileable_graph_key(key)}/data/{key}'
resp = self._req_session.get(data_url, params=params, timeout=timeout)
if resp.status_code >= 400:
raise ValueError(f'Failed to fetch data from server. Code: {resp.status_code}, '
f'Reason: {resp.reason}, Content:\n{resp.text}')
result_data = dataserializer.loads(resp.content)
results.append(sort_dataframe_result(tileable, result_data))
return results
@classmethod
def _process_int_or_dict_argument(cls, argument, name, params):
if argument is None:
return
if not isinstance(argument, dict):
params[name] = argument
else:
params[name] = ','.join(f'{k}={v}' for k, v in argument.items())
def fetch_tileable_op_logs(self, tileable_op_key, offsets=None, sizes=None):
url = f'{self._endpoint}/api/session/{self._session_id}/op/{tileable_op_key}/log'
params = dict()
self._process_int_or_dict_argument(offsets, 'offsets', params)
self._process_int_or_dict_argument(sizes, 'sizes', params)
resp = self._req_session.get(url, params=params)
if resp.status_code >= 400:
raise ValueError(f'Failed to fetch log from server. Code: {resp.status_code}, '
f'Reason: {resp.reason}, Content:\n{resp.text}')
return json.loads(resp.content)
def fetch_log(self, tileables, offsets=None, sizes=None):
from ..custom_log import fetch
return fetch(tileables, self, offsets=offsets, sizes=sizes)
def get_named_tileable_infos(self, name):
from ..context import TileableInfos
url = f'{self._endpoint}/api/session/{self._session_id}'
params = dict(name=name)
resp = self._req_session.get(url, params=params)
if resp.status_code >= 400: # pragma: no cover
raise ValueError(f'Failed to get tileable key from server. Code: {resp.status_code}, '
f'Reason: {resp.reason}, Content:\n{resp.text}')
tileable_key = self._handle_json_response(resp)['tileable_key']
nsplits, extra_meta = self._get_tileable_meta(tileable_key)
shape = tuple(sum(s) for s in nsplits)
return TileableInfos(tileable_key, shape, extra_meta)
def create_mutable_tensor(self, name, shape, dtype, fill_value=None, chunk_size=None, *_, **__):
from ..tensor.utils import create_mutable_tensor
session_url = f'{self._endpoint}/api/session/{self._session_id}'
tensor_url = f'{session_url}/mutable-tensor/{name}?action=create'
if not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
# avoid built-in scalar dtypes are made into one-field record type.
if dtype.fields:
dtype_descr = dtype.descr
else:
dtype_descr = str(dtype)
tensor_json = {
'shape': shape,
'dtype': dtype_descr,
'fill_value': fill_value,
'chunk_size': chunk_size,
}
resp = self._req_session.post(tensor_url, json=tensor_json)
shape, dtype, chunk_size, chunk_keys, chunk_eps = self._handle_json_response(resp)
return create_mutable_tensor(name, chunk_size, shape, numpy_dtype_from_descr_json(dtype),
chunk_keys, chunk_eps)
def get_mutable_tensor(self, name):
from ..tensor.utils import create_mutable_tensor
session_url = f'{self._endpoint}/api/session/{self._session_id}'
tensor_url = f'{session_url}/mutable-tensor/{name}'
resp = self._req_session.get(tensor_url)
shape, dtype, chunk_size, chunk_keys, chunk_eps = self._handle_json_response(resp)
return create_mutable_tensor(name, chunk_size, shape, numpy_dtype_from_descr_json(dtype),
chunk_keys, chunk_eps)
def write_mutable_tensor(self, tensor, index, value):
"""
How to serialize index and value:
1. process_index and serialize it as json
2. the payload of POST request:
* a int64 value indicate the size of index json
* ascii-encoded bytes of index json
* pyarrow serialized bytes of `value`
"""
from ..tensor.core import Indexes
from ..serialize import dataserializer
index = Indexes(indexes=index)
index_bytes = base64.b64encode(serialize_serializable(index))
bio = BytesIO()
bio.write(np.int64(len(index_bytes)).tobytes())
bio.write(index_bytes)
dataserializer.dump(value, bio)
session_url = f'{self._endpoint}/api/session/{self._session_id}'
tensor_url = f'{session_url}/mutable-tensor/{tensor.name}'
resp = self._req_session.put(tensor_url, data=bio.getvalue(),
headers={'Content-Type': 'application/octet-stream'})
self._handle_json_response(resp)
def seal(self, tensor):
from ..tensor.utils import create_fetch_tensor
session_url = f'{self._endpoint}/api/session/{self._session_id}'
tensor_url = f'{session_url}/mutable-tensor/{tensor.name}?action=seal'
resp = self._req_session.post(tensor_url)
graph_key_hex, tileable_key, tensor_id, tensor_meta = self._handle_json_response(resp)
self._executed_tileables[tileable_key] = uuid.UUID(graph_key_hex), {tensor_id}
# # Construct Tensor on the fly.
shape, dtype, chunk_size, chunk_keys, _ = tensor_meta
return create_fetch_tensor(chunk_size, shape, numpy_dtype_from_descr_json(dtype),
tensor_key=tileable_key, chunk_keys=chunk_keys)
def _get_tileable_nsplits(self, tileable_key):
session_url = f'{self._endpoint}/api/session/{self._session_id}'
graph_key = self._get_tileable_graph_key(tileable_key)
url = f'{session_url}/graph/{graph_key}/data/{tileable_key}?type=nsplits'
resp = self._req_session.get(url)
new_nsplits = self._handle_json_response(resp)
return new_nsplits
def _get_tileable_meta(self, tileable_key):
session_url = f'{self._endpoint}/api/session/{self._session_id}'
graph_key = self._get_tileable_graph_key(tileable_key)
url = f'{session_url}/graph/{graph_key}/data/{tileable_key}?type=meta'
resp = self._req_session.get(url)
meta = self._handle_json_response(resp)
return pickle.loads(base64.b64decode(meta)) # nosec
def _update_tileable_shape(self, tileable):
tileable_key = tileable.key
new_nsplits = self._get_tileable_nsplits(tileable_key)
tileable._update_shape(tuple(sum(nsplit) for nsplit in new_nsplits))
tileable.nsplits = new_nsplits
def decref(self, *keys):
for tileable_key, tileable_id in keys:
if tileable_key not in self._executed_tileables:
continue
graph_key, ids = self._executed_tileables[tileable_key]
if tileable_id in ids:
ids.remove(tileable_id)
# for those same key tileables, do decref only when all those tileables are garbage collected
if len(ids) != 0:
continue
self.delete_data(tileable_key)
def delete_data(self, tileable_key, wait=False):
if tileable_key not in self._executed_tileables:
return
graph_key, _ids = self._executed_tileables[tileable_key]
data_url = f'{self._endpoint}/api/session/{self._session_id}/graph/{graph_key}' \
f'/data/{tileable_key}?wait={1 if wait else 0}'
self._req_session.delete(data_url)
self._executed_tileables.pop(tileable_key, None)
def stop(self, graph_key):
session_url = f'{self._endpoint}/api/session/{self._session_id}'
graph_url = session_url + '/graph/' + graph_key
resp = self._req_session.delete(graph_url)
if resp.status_code >= 400:
raise SystemError(f'Failed to stop graph execution. Code: {resp.status_code}, '
f'Reason: {resp.reason}, Content:\n{resp.text}')
def _submit_graph(self, serialized_graph, targets, names=None, compose=True):
session_url = f'{self._endpoint}/api/session/{self._session_id}'
resp = self._req_session.post(session_url + '/graph', dict(
graph=base64.b64encode(serialized_graph).decode('ascii'),
target=targets,
names=names,
compose='1' if compose else '0'
))
return self._handle_json_response(resp)
def get_graph_states(self):
resp = self._req_session.get(f'{self._endpoint}/api/session/{self._session_id}/graph')
return self._handle_json_response(resp)
def close(self):
session_url = f'{self._endpoint}/api/session/{self._session_id}'
for key in list(self._executed_tileables.keys()):
self.delete_data(key, wait=True)
resp = self._req_session.delete(session_url)
if resp.status_code >= 400:
raise SystemError('Failed to close mars session.')
def check_service_ready(self, timeout=1):
import requests
try:
resp = self._req_session.get(self._endpoint + '/api', timeout=timeout)
except (requests.ConnectionError, requests.Timeout):
return False
if resp.status_code >= 400:
return False
return True
def count_workers(self):
resp = self._req_session.get(self._endpoint + '/api/worker?action=count', timeout=1)
return self._handle_json_response(resp)
def get_cpu_count(self):
resp = self._req_session.get(self._endpoint + '/api/worker?action=count_cpu', timeout=1)
return self._handle_json_response(resp)
def rescale_workers(self, new_scale, min_workers=None, wait=True, timeout=None):
data = json.dumps(dict(new_scale=new_scale, min_workers=min_workers))
wait_req = 1 if wait else 0
resp = self._req_session.patch(f'{self._endpoint}/api/worker?action=count&wait={wait_req}',
data, timeout=timeout)
return self._handle_json_response(resp)
def get_workers_meta(self):
resp = self._req_session.get(self._endpoint + '/api/worker', timeout=1)
return self._handle_json_response(resp)
def get_task_count(self):
resp = self._req_session.get(f'{self._endpoint}/api/session/{self._session_id}/graph')
return len(self._handle_json_response(resp))
def __enter__(self):
return self
def __exit__(self, *_):
self.close()
| 43.599606 | 109 | 0.637412 |
f75984683c30422d5523fd446980cf97f130084c | 2,436 | py | Python | .eggs/boto-2.48.0-py2.7.egg/boto/sdb/db/blob.py | MQQ/git-bigstore | 95f1e37fcda7fdce80502593cec31a44c604cf8a | [
"Apache-2.0"
] | null | null | null | .eggs/boto-2.48.0-py2.7.egg/boto/sdb/db/blob.py | MQQ/git-bigstore | 95f1e37fcda7fdce80502593cec31a44c604cf8a | [
"Apache-2.0"
] | null | null | null | .eggs/boto-2.48.0-py2.7.egg/boto/sdb/db/blob.py | MQQ/git-bigstore | 95f1e37fcda7fdce80502593cec31a44c604cf8a | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from boto.compat import six
class Blob(object):
"""Blob object"""
def __init__(self, value=None, file=None, id=None):
self._file = file
self.id = id
self.value = value
@property
def file(self):
from StringIO import StringIO
if self._file:
f = self._file
else:
f = StringIO(self.value)
return f
def __str__(self):
return six.text_type(self).encode('utf-8')
def __unicode__(self):
if hasattr(self.file, "get_contents_as_string"):
value = self.file.get_contents_as_string()
else:
value = self.file.getvalue()
if isinstance(value, six.text_type):
return value
else:
return value.decode('utf-8')
def read(self):
if hasattr(self.file, "get_contents_as_string"):
return self.file.get_contents_as_string()
else:
return self.file.read()
def readline(self):
return self.file.readline()
def next(self):
return next(self.file)
def __iter__(self):
return iter(self.file)
@property
def size(self):
if self._file:
return self._file.size
elif self.value:
return len(self.value)
else:
return 0
| 31.636364 | 74 | 0.655583 |
f759bb46f3df8999dd717e22a0200a95b4ce2a85 | 3,141 | py | Python | data_utils/hdf5_utils.py | gvalvano/idas | e1b112c8d0cd17b2b8486435dfe9de477bca2221 | [
"Apache-2.0"
] | 29 | 2020-07-04T00:04:28.000Z | 2022-03-18T01:49:34.000Z | idas/data_utils/hdf5_utils.py | gvalvano/unet_crf_as_rnn | 31b79741b77614764dcf3d2690fe0b0fab44934d | [
"Apache-2.0"
] | 2 | 2020-10-31T14:41:02.000Z | 2021-11-21T18:16:19.000Z | data_utils/hdf5_utils.py | gvalvano/idas | e1b112c8d0cd17b2b8486435dfe9de477bca2221 | [
"Apache-2.0"
] | 7 | 2020-10-21T01:02:52.000Z | 2021-11-14T16:52:18.000Z | """
Utilities for hdf5 data
"""
# Copyright 2019 Gabriele Valvano
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import h5py
import os
import logging
def create_hdf5_db(x_train, y_train, x_validation, y_validation, x_test, y_test, db_name='data.h5'):
"""
Creates hdf5 database containing nodes for x_train, x_validation, x_test, y_train, y_validation, y_test.
Args:
x_train: Provide data to initialize the dataset.
y_train: Provide data to initialize the dataset.
x_validation: Provide data to initialize the dataset.
y_validation: Provide data to initialize the dataset.
x_test: Provide data to initialize the dataset.
y_test: Provide data to initialize the dataset.
db_name (str): Name of the dataset.
"""
print("Building database: " + db_name)
# Create a hdf5 dataset
h5f = h5py.File(db_name, 'w')
h5f.create_dataset('x_train', data=x_train)
h5f.create_dataset('y_train', data=y_train)
h5f.create_dataset('x_validation', data=x_validation)
h5f.create_dataset('y_validation', data=y_validation)
h5f.create_dataset('x_test', data=x_test)
h5f.create_dataset('y_test', data=y_test)
h5f.close()
print("Done.")
def get_data(db_name, key):
"""
Returns what is behind key node on the HDF5 db named db_name.
Args:
db_name (str): Name of the dataset.
key (str): Name of the key in the dataset.
Returns:
The data under the given key.
"""
# Load hdf5 dataset
hdf5 = h5py.File(db_name, 'r')
data = hdf5[key] # i.e. xt = h5f['x_train']
logging.warning('Remember that the hdf5 dataset is still open.')
return data
def add_node(db_name, key, shape=None):
"""
Adds node with name key to hdf5 database.
Args:
db_name (str): Name of the dataset.
key (str): Name of the key in the dataset.
shape (tuple of int): Dataset shape. Use "()" for scalar datasets. Required if "data" isn't provided.
"""
if not os.path.isfile(db_name):
h5f = h5py.File(db_name, 'w')
else:
h5f = h5py.File(db_name, 'r+')
h5f.create_dataset(key, shape, maxshape=(None, 1))
h5f.close()
def update_node(db_name, key):
""" Change the content of a node. """
raise NotImplementedError
def add_elements_to_existing_node(db_name, key,):
""" Add elements below the node. """
# h5f = h5py.File(db_name, 'r+')
# h5f[key].resize((curr_num_samples, dimPatches, dimPatches, n_channel))
# h5f[key][curr_num_samples - 1, :, :, :] = imgMatrix
# h5f.close()
raise NotImplementedError
| 31.41 | 111 | 0.672716 |
f759cda21dbe457e3133af64b52b3118a76109d9 | 1,514 | py | Python | cog/command/beta.py | DrLarck/DiscordBallZ_ | c274e26efce4c5a757d258c54bc285d118618751 | [
"MIT"
] | 4 | 2020-01-19T13:53:43.000Z | 2020-01-20T13:34:17.000Z | cog/command/beta.py | DrLarck/DiscordBallZ_ | c274e26efce4c5a757d258c54bc285d118618751 | [
"MIT"
] | 18 | 2020-01-19T17:52:17.000Z | 2020-02-17T15:06:13.000Z | cog/command/beta.py | DrLarck/DiscordBallZ_ | c274e26efce4c5a757d258c54bc285d118618751 | [
"MIT"
] | 1 | 2020-10-08T19:59:42.000Z | 2020-10-08T19:59:42.000Z | """
BETA test commands
"""
# dependancies
import asyncio
import discord
from discord.ext import commands
# util
from utility.cog.player.player import Player
from utility.cog.combat_system.cpu import CPU
# characters
from utility.cog.character.list import c001_sabimen
from utility.cog.character.list import c002_sabimen
from utility.cog.character.list import c003_sabimen
class Cmd_beta(commands.Cog):
def __init__(self, client):
self.client = client
@commands.group()
async def beta(self, ctx):
return
@beta.command()
async def combat(self, ctx, user : discord.Member = None):
"""
new combat system
"""
# import
from utility.cog.combat_system.combat import Combat
from utility.cog.character.getter import Character_getter
# init
caller = Player(ctx, self.client, ctx.message.author)
if(user == None):
opponent = CPU()
opponent.name = "Test"
opponent.avatar = caller.avatar
else:
opponent = Player(ctx, self.client, user)
teams = [
{
"owner" : caller,
"team" : await caller.team.character()
},
{
"owner" : opponent,
"team" : await opponent.team.character()
}
]
combat = Combat(self.client, ctx, teams)
await combat.run()
def setup(client):
client.add_cog(Cmd_beta(client)) | 23.65625 | 65 | 0.589168 |
f759e814f57fe5ec1b432917ac4d2139e4b2f11d | 5,842 | py | Python | nanonisTCP/Piezo.py | New-Horizons-SPM/nanonisTCP | e810877fce41c4dc3d9ba2b18cbe619ff04c640a | [
"MIT"
] | null | null | null | nanonisTCP/Piezo.py | New-Horizons-SPM/nanonisTCP | e810877fce41c4dc3d9ba2b18cbe619ff04c640a | [
"MIT"
] | null | null | null | nanonisTCP/Piezo.py | New-Horizons-SPM/nanonisTCP | e810877fce41c4dc3d9ba2b18cbe619ff04c640a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Thu Jun 9 22:51:52 2022
@author: jced0001
"""
class Piezo:
"""
Nanonis Piezo Module
"""
def __init__(self,NanonisTCP):
self.NanonisTCP = NanonisTCP
def TiltSet(self,tilt_x=None,tilt_y=None):
"""
Configures the tilt correction parameters. Passing in None for either x
or y tilt keeps the setting as is in nanonis.
Parameters
----------
tilt_x : Sets by which angle to correct the tilt in the X direction
tilt_y : Sets by which angle to correct the tilt in the Y direction
"""
n_tilt_x,n_tilt_y = self.TiltGet()
if(not tilt_x): tilt_x = n_tilt_x
if(not tilt_y): tilt_y = n_tilt_y
hex_rep = self.NanonisTCP.make_header('Piezo.TiltSet', body_size=8)
## Arguments
hex_rep += self.NanonisTCP.float32_to_hex(tilt_x)
hex_rep += self.NanonisTCP.float32_to_hex(tilt_y)
self.NanonisTCP.send_command(hex_rep)
self.NanonisTCP.receive_response(0)
def TiltGet(self):
"""
Returns the tilt correction parameters.
Returns
-------
tilt_x : Sets by which angle to correct the tilt in the X direction
tilt_y : Sets by which angle to correct the tilt in the Y direction
"""
hex_rep = self.NanonisTCP.make_header('Piezo.TiltGet', body_size=0)
self.NanonisTCP.send_command(hex_rep)
response = self.NanonisTCP.receive_response(8)
tilt_x = self.NanonisTCP.hex_to_float32(response[0:4])
tilt_y = self.NanonisTCP.hex_to_float32(response[4:8])
return [tilt_x,tilt_y]
def RangeSet(self,range_x=None,range_y=None,range_z=None):
"""
Sets the piezo range (m) values for all 3 aces (X,Y,Z). Leave param as
None means it will remain as is in nanonis.
Parameters
----------
range_x : range of the X piezo (m)
range_y : range of the Y piezo (m)
range_z : range of the Z piezo (m)
"""
n_range_x,n_range_y,n_range_z = self.RangeGet()
if(not range_x): range_x = n_range_x
if(not range_y): range_y = n_range_y
if(not range_z): range_z = n_range_z
hex_rep = self.NanonisTCP.make_header('Piezo.RangeSet', body_size=12)
## Arguments
hex_rep += self.NanonisTCP.float32_to_hex(range_x)
hex_rep += self.NanonisTCP.float32_to_hex(range_y)
hex_rep += self.NanonisTCP.float32_to_hex(range_z)
self.NanonisTCP.send_command(hex_rep)
self.NanonisTCP.receive_response(0)
def RangeGet(self):
"""
Returns the piezo range (m) for all 3 axes (X,Y,Z)
Returns
-------
range_x : range of the X piezo (m)
range_y : range of the Y piezo (m)
range_z : range of the Z piezo (m)
"""
hex_rep = self.NanonisTCP.make_header('Piezo.RangeGet', body_size=0)
self.NanonisTCP.send_command(hex_rep)
response = self.NanonisTCP.receive_response(12)
range_x = self.NanonisTCP.hex_to_float32(response[0:4])
range_y = self.NanonisTCP.hex_to_float32(response[4:8])
range_z = self.NanonisTCP.hex_to_float32(response[8:12])
return [range_x,range_y,range_z]
def DriftCompGet(self):
"""
Returns the drift compensation parameters
Returns
----------
on : True: Turn compensation on
False: Turn compensation off
vx : linear speed applied to the X piezo (m/s)
vy : linear speed applied to the Y piezo (m/s)
vz : linear speed applied to the Z piezo (m/s)
xsat : indicates if the X drift correction reached 10% of piezo range
ysat : indicates if the Y drift correction reached 10% of piezo range
zsat : indicates if the Z drift correction reached 10% of piezo range
"""
hex_rep = self.NanonisTCP.make_header('Piezo.DriftCompGet', body_size=0)
self.NanonisTCP.send_command(hex_rep)
response = self.NanonisTCP.receive_response(28)
status = self.NanonisTCP.hex_to_uint32(response[0:4])
vx = self.NanonisTCP.hex_to_float32(response[4:8])
vy = self.NanonisTCP.hex_to_float32(response[8:12])
vz = self.NanonisTCP.hex_to_float32(response[12:16])
xsat = self.NanonisTCP.hex_to_uint32(response[16:20])
ysat = self.NanonisTCP.hex_to_uint32(response[20:24])
zsat = self.NanonisTCP.hex_to_uint32(response[24:28])
return [status,vx,vy,vz,xsat,ysat,zsat]
def DriftCompSet(self,on,vx=[],vy=[],vz=[]):
"""
Configures the drift compensation parameters
Parameters
----------
on : True: Turn compensation on
False: Turn compensation off
vx : linear speed applied to the X piezo (m/s)
vy : linear speed applied to the Y piezo (m/s)
vz : linear speed applied to the Z piezo (m/s)
"""
_,n_vx,n_vy,n_vz,_,_,_ = self.DriftCompGet()
if(type(vx) == list): vx = n_vx
if(type(vy) == list): vy = n_vy
if(type(vz) == list): vz = n_vz
hex_rep = self.NanonisTCP.make_header('Piezo.DriftCompSet', body_size=16)
## Arguments
hex_rep += self.NanonisTCP.to_hex(on,4)
hex_rep += self.NanonisTCP.float32_to_hex(vx)
hex_rep += self.NanonisTCP.float32_to_hex(vy)
hex_rep += self.NanonisTCP.float32_to_hex(vz)
self.NanonisTCP.send_command(hex_rep)
self.NanonisTCP.receive_response(0)
| 33.768786 | 81 | 0.595515 |
f75a0f4db446801a45c5a3f60a615b5354d66e00 | 929 | py | Python | miniworld/model/network/connections/NodeDictMixin.py | miniworld-project/miniworld_core | c591bad232b78eae99e8f55cb1b907c1e228484b | [
"MIT"
] | 5 | 2019-05-11T14:57:15.000Z | 2021-07-05T00:35:25.000Z | miniworld/model/network/connections/NodeDictMixin.py | miniworld-project/miniworld_core | c591bad232b78eae99e8f55cb1b907c1e228484b | [
"MIT"
] | 27 | 2017-03-17T07:11:02.000Z | 2019-05-26T23:36:56.000Z | miniworld/model/network/connections/NodeDictMixin.py | miniworld-project/miniworld_core | c591bad232b78eae99e8f55cb1b907c1e228484b | [
"MIT"
] | 6 | 2017-05-03T12:11:33.000Z | 2020-04-03T11:44:27.000Z | from collections import UserDict
from miniworld.model.network.connections.JSONEncoder import JSONStrMixin
# TODO: REMOVE
class NodeDictMixin:
"""
"""
#########################################
# Structure Converting
#########################################
def to_ids(self):
"""
Convert all :py:class:`.EmulationNode` to their id.
Returns
-------
UserDict
All instances of EmulationNode replaced by their id.
Examples
--------
>>> x = {[EmulationNode(1), EmulationNode(2)]: {'loss': 0.5, 'bandwidth': 500}}
>>> x.to_ids()
{('1', '1'): {'loss': 0.5, 'bandwidth': 500}}
"""
converted_to_ids = {(emu_node_x.id, emu_node_y.id): val_inner for (emu_node_x, emu_node_y), val_inner in self.items()}
return self.__class__(converted_to_ids)
class NodeDict(JSONStrMixin, UserDict):
pass
| 25.805556 | 126 | 0.53606 |
f75a16661a87dbecc8cedd5e924beb9f4374fc34 | 2,159 | py | Python | tests/test_restsession.py | nonstdout/dnacentersdk | dbbbc4baa5300aa9e5c9193f2ea71438018095f5 | [
"MIT"
] | null | null | null | tests/test_restsession.py | nonstdout/dnacentersdk | dbbbc4baa5300aa9e5c9193f2ea71438018095f5 | [
"MIT"
] | null | null | null | tests/test_restsession.py | nonstdout/dnacentersdk | dbbbc4baa5300aa9e5c9193f2ea71438018095f5 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""dnacentersdk/restsession.py Fixtures & Tests
Copyright (c) 2019-2020 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import logging
import warnings
import pytest
import dnacentersdk
logging.captureWarnings(True)
# Helper Functions
def rate_limit_detected(w):
"""Check to see if a rate-limit warning is in the warnings list."""
while w:
if issubclass(w.pop().category, dnacentersdk.RateLimitWarning):
return True
return False
# Tests
@pytest.mark.ratelimit
def test_rate_limit_retry(api):
# Save state and initialize test setup
original_wait_on_rate_limit = api._session.wait_on_rate_limit
api._session.wait_on_rate_limit = True
with warnings.catch_warnings(record=True) as w:
devices = api.devices.get_device_list()
i = 0
while i < len(devices.response):
# Try and trigger a rate-limit
api.devices.get_device_config_by_id(path_network_device_id=devices.response[i].id)
i += 1
if rate_limit_detected(w):
break
api._session.wait_on_rate_limit = original_wait_on_rate_limit
| 34.269841 | 94 | 0.746642 |
f75a56fe2ad4c6f1e52c4f8ad4a1d1ffdcb129f8 | 675 | py | Python | plugins/maya/publish/avalon_scene_ready.py | davidlatwe/reveries-config | 4a282dd64a32a9b87bd1a070759b6425ff785d68 | [
"MIT"
] | 3 | 2020-04-01T10:51:17.000Z | 2021-08-05T18:35:23.000Z | plugins/maya/publish/avalon_scene_ready.py | davidlatwe/reveries-config | 4a282dd64a32a9b87bd1a070759b6425ff785d68 | [
"MIT"
] | null | null | null | plugins/maya/publish/avalon_scene_ready.py | davidlatwe/reveries-config | 4a282dd64a32a9b87bd1a070759b6425ff785d68 | [
"MIT"
] | 1 | 2020-07-05T12:06:30.000Z | 2020-07-05T12:06:30.000Z |
import pyblish.api
class AvalonSceneReady(pyblish.api.ContextPlugin):
"""標記場景為預備狀態
場景在被標記為預備狀態之後,如果有任何物件或數值的更動,狀態就會失效
"""
"""Define current scene in ready state
Collecte current undo count for later validation.
"""
order = pyblish.api.CollectorOrder + 0.49999
label = "進入預備狀態"
hosts = ["maya"]
def process(self, context):
from maya import cmds
from reveries.maya import capsule
# Ensure undo queue is active
cmds.undoInfo(state=True)
with capsule.OutputDeque() as undo_list:
cmds.undoInfo(query=True, printQueue=True)
context.data["_undoCount"] = len(undo_list)
| 20.454545 | 54 | 0.653333 |
f75a61995b4ae30d0fcab9ff9337eb6e8b7580f7 | 143 | py | Python | asking/types.py | cariad/asking | 482947f70e90928314b90c2db14a86908714125a | [
"MIT"
] | null | null | null | asking/types.py | cariad/asking | 482947f70e90928314b90c2db14a86908714125a | [
"MIT"
] | 4 | 2021-11-28T08:40:52.000Z | 2021-11-30T14:43:58.000Z | asking/types.py | cariad/asking | 482947f70e90928314b90c2db14a86908714125a | [
"MIT"
] | null | null | null | from typing import Any, Dict, List
AnyDict = Dict[Any, Any]
StageKey = str
StageType = List[Dict[str, Any]]
ScriptDict = Dict[str, StageType]
| 20.428571 | 34 | 0.727273 |
f75a72add157e7bf14dc4bda863495e83c235f3a | 239 | py | Python | moto/apigateway/__init__.py | argos83/moto | d3df810065c9c453d40fcc971f9be6b7b2846061 | [
"Apache-2.0"
] | 1 | 2021-03-06T22:01:41.000Z | 2021-03-06T22:01:41.000Z | moto/apigateway/__init__.py | marciogh/moto | d3df810065c9c453d40fcc971f9be6b7b2846061 | [
"Apache-2.0"
] | null | null | null | moto/apigateway/__init__.py | marciogh/moto | d3df810065c9c453d40fcc971f9be6b7b2846061 | [
"Apache-2.0"
] | 1 | 2017-10-19T00:53:28.000Z | 2017-10-19T00:53:28.000Z | from __future__ import unicode_literals
from .models import apigateway_backends
from ..core.models import MockAWS, base_decorator
apigateway_backend = apigateway_backends['us-east-1']
mock_apigateway = base_decorator(apigateway_backends)
| 34.142857 | 53 | 0.853556 |
f75a7d6c221afb66510c31a43b7cf9cbcfd6109e | 1,323 | py | Python | gpt2_model/generate_gpt2_embeddings.py | bclarkson-code/search-query-classification | 8928faad459ef97934a6dbcf38a9347da5662415 | [
"MIT"
] | null | null | null | gpt2_model/generate_gpt2_embeddings.py | bclarkson-code/search-query-classification | 8928faad459ef97934a6dbcf38a9347da5662415 | [
"MIT"
] | null | null | null | gpt2_model/generate_gpt2_embeddings.py | bclarkson-code/search-query-classification | 8928faad459ef97934a6dbcf38a9347da5662415 | [
"MIT"
] | null | null | null | import pickle
import torch
from tqdm.auto import tqdm
from gpt2_predictor import GPT2Predictor, GPT2TestSearchQueryDataModule
if __name__ == '__main__':
encoding = {
'Arts': 0,
'Business': 11,
'Computers': 10,
'Games': 12,
'Health': 9,
'Home': 6,
'News': 14,
'Recreation': 1,
'Reference': 13,
'Regional': 4,
'Science': 8,
'Shopping': 3,
'Society': 2,
'Sports': 5,
'World': 7
}
queries = GPT2TestSearchQueryDataModule(
'open_source.feather',
batch_size=128,
num_workers=0,
tokeniser_string='gpt2',
debug=False,
encoding=encoding,
)
queries.prepare_data()
queries.setup()
model = GPT2Predictor.load_from_checkpoint(
'gpt2-checkpoints/model-epoch=00-valid/loss=1.86.ckpt',
strict=False
)
test_data = queries.test_dataloader()
preds = []
with torch.no_grad():
for batch in tqdm(test_data, desc='Predicting'):
(input_ids, attention_mask), _ = batch
pred = model(
input_ids=input_ids,
attention_mask=attention_mask
)
preds.append(pred)
with open('test_preds.pkl', 'wb') as f:
pickle.dump(preds)
| 24.054545 | 71 | 0.5548 |
f75a867708ece431d3ea8cc74d4ad9cdd51e457e | 1,023 | py | Python | setup.py | Man-who-sold-the-world/gdelt-doc-api | 9e2a5922aba7a56718fc6886e926e351e73597b4 | [
"MIT"
] | null | null | null | setup.py | Man-who-sold-the-world/gdelt-doc-api | 9e2a5922aba7a56718fc6886e926e351e73597b4 | [
"MIT"
] | null | null | null | setup.py | Man-who-sold-the-world/gdelt-doc-api | 9e2a5922aba7a56718fc6886e926e351e73597b4 | [
"MIT"
] | null | null | null | import setuptools
with open("requirements.txt", "r") as f:
requirements = [line.replace("\n", "") for line in f.readlines()]
with open("README.md", "r") as fh:
long_description = fh.read()
with open("gdeltdoc/__init__.py", "r") as g:
version = "1.0.0"
for line in g.readlines():
if "__version__" in line:
version = line.split("=")[1].replace("\n", "").replace('"', "").replace(" ", "")
setuptools.setup(
name="gdeltdoc",
version=version,
author="Alex Smith",
author_email="alex@alexsmith.dev",
description="A client for the GDELT 2.0 Doc API",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/alex9smith/gdelt-doc-api",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
install_requires=requirements,
) | 31.96875 | 92 | 0.632454 |
f75aa9e8630eaccc1fa403b8e1703afc0057c2bf | 29,298 | py | Python | azure-devops/azext_devops/vstsCompressed/member_entitlement_management/v4_0/models/models.py | vijayraavi/azure-devops-cli-extension | 88f1420c5815cb09bea15b050f4c553e0f326dad | [
"MIT"
] | null | null | null | azure-devops/azext_devops/vstsCompressed/member_entitlement_management/v4_0/models/models.py | vijayraavi/azure-devops-cli-extension | 88f1420c5815cb09bea15b050f4c553e0f326dad | [
"MIT"
] | 37 | 2020-04-27T07:45:19.000Z | 2021-04-05T07:27:15.000Z | azure-devops/azext_devops/vstsCompressed/member_entitlement_management/v4_0/models/models.py | vijayraavi/azure-devops-cli-extension | 88f1420c5815cb09bea15b050f4c553e0f326dad | [
"MIT"
] | null | null | null | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class AccessLevel(Model):
"""AccessLevel.
:param account_license_type:
:type account_license_type: object
:param assignment_source:
:type assignment_source: object
:param license_display_name:
:type license_display_name: str
:param licensing_source:
:type licensing_source: object
:param msdn_license_type:
:type msdn_license_type: object
:param status:
:type status: object
:param status_message:
:type status_message: str
"""
_attribute_map = {
'account_license_type': {'key': 'accountLicenseType', 'type': 'object'},
'assignment_source': {'key': 'assignmentSource', 'type': 'object'},
'license_display_name': {'key': 'licenseDisplayName', 'type': 'str'},
'licensing_source': {'key': 'licensingSource', 'type': 'object'},
'msdn_license_type': {'key': 'msdnLicenseType', 'type': 'object'},
'status': {'key': 'status', 'type': 'object'},
'status_message': {'key': 'statusMessage', 'type': 'str'}
}
def __init__(self, account_license_type=None, assignment_source=None, license_display_name=None, licensing_source=None, msdn_license_type=None, status=None, status_message=None):
super(AccessLevel, self).__init__()
self.account_license_type = account_license_type
self.assignment_source = assignment_source
self.license_display_name = license_display_name
self.licensing_source = licensing_source
self.msdn_license_type = msdn_license_type
self.status = status
self.status_message = status_message
class BaseOperationResult(Model):
"""BaseOperationResult.
:param errors: List of error codes paired with their corresponding error messages
:type errors: list of { key: int; value: str }
:param is_success: Success status of the operation
:type is_success: bool
"""
_attribute_map = {
'errors': {'key': 'errors', 'type': '[{ key: int; value: str }]'},
'is_success': {'key': 'isSuccess', 'type': 'bool'}
}
def __init__(self, errors=None, is_success=None):
super(BaseOperationResult, self).__init__()
self.errors = errors
self.is_success = is_success
class Extension(Model):
"""Extension.
:param assignment_source: Assignment source for this extension. I.e. explicitly assigned or from a group rule
:type assignment_source: object
:param id: Gallery Id of the Extension
:type id: str
:param name: Friendly name of this extension
:type name: str
:param source: Source of this extension assignment. Ex: msdn, account, none, ect.
:type source: object
"""
_attribute_map = {
'assignment_source': {'key': 'assignmentSource', 'type': 'object'},
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'source': {'key': 'source', 'type': 'object'}
}
def __init__(self, assignment_source=None, id=None, name=None, source=None):
super(Extension, self).__init__()
self.assignment_source = assignment_source
self.id = id
self.name = name
self.source = source
class GraphSubject(Model):
"""GraphSubject.
:param _links: This field contains zero or more interesting links about the graph subject. These links may be invoked to obtain additional relationships or more detailed information about this graph subject.
:type _links: :class:`ReferenceLinks <microsoft.-visual-studio.-services.-web-api.v4_0.models.ReferenceLinks>`
:param descriptor: The descriptor is the primary way to reference the graph subject while the system is running. This field will uniquely identify the same graph subject across both Accounts and Organizations.
:type descriptor: str
:param display_name: This is the non-unique display name of the graph subject. To change this field, you must alter its value in the source provider.
:type display_name: str
:param origin: The type of source provider for the origin identifier (ex:AD, AAD, MSA)
:type origin: str
:param origin_id: The unique identifier from the system of origin. Typically a sid, object id or Guid. Linking and unlinking operations can cause this value to change for a user because the user is not backed by a different provider and has a different unique id in the new provider.
:type origin_id: str
:param subject_kind: This field identifies the type of the graph subject (ex: Group, Scope, User).
:type subject_kind: str
:param url: This url is the full route to the source resource of this graph subject.
:type url: str
"""
_attribute_map = {
'_links': {'key': '_links', 'type': 'ReferenceLinks'},
'descriptor': {'key': 'descriptor', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'origin': {'key': 'origin', 'type': 'str'},
'origin_id': {'key': 'originId', 'type': 'str'},
'subject_kind': {'key': 'subjectKind', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, _links=None, descriptor=None, display_name=None, origin=None, origin_id=None, subject_kind=None, url=None):
super(GraphSubject, self).__init__()
self._links = _links
self.descriptor = descriptor
self.display_name = display_name
self.origin = origin
self.origin_id = origin_id
self.subject_kind = subject_kind
self.url = url
class Group(Model):
"""Group.
:param display_name:
:type display_name: str
:param group_type:
:type group_type: object
"""
_attribute_map = {
'display_name': {'key': 'displayName', 'type': 'str'},
'group_type': {'key': 'groupType', 'type': 'object'}
}
def __init__(self, display_name=None, group_type=None):
super(Group, self).__init__()
self.display_name = display_name
self.group_type = group_type
class GroupEntitlement(Model):
"""GroupEntitlement.
:param extension_rules: Extension Rules
:type extension_rules: list of :class:`Extension <member-entitlement-management.v4_0.models.Extension>`
:param group: Member reference
:type group: :class:`GraphGroup <member-entitlement-management.v4_0.models.GraphGroup>`
:param id: The unique identifier which matches the Id of the GraphMember
:type id: str
:param license_rule: License Rule
:type license_rule: :class:`AccessLevel <member-entitlement-management.v4_0.models.AccessLevel>`
:param project_entitlements: Relation between a project and the member's effective permissions in that project
:type project_entitlements: list of :class:`ProjectEntitlement <member-entitlement-management.v4_0.models.ProjectEntitlement>`
:param status:
:type status: object
"""
_attribute_map = {
'extension_rules': {'key': 'extensionRules', 'type': '[Extension]'},
'group': {'key': 'group', 'type': 'GraphGroup'},
'id': {'key': 'id', 'type': 'str'},
'license_rule': {'key': 'licenseRule', 'type': 'AccessLevel'},
'project_entitlements': {'key': 'projectEntitlements', 'type': '[ProjectEntitlement]'},
'status': {'key': 'status', 'type': 'object'}
}
def __init__(self, extension_rules=None, group=None, id=None, license_rule=None, project_entitlements=None, status=None):
super(GroupEntitlement, self).__init__()
self.extension_rules = extension_rules
self.group = group
self.id = id
self.license_rule = license_rule
self.project_entitlements = project_entitlements
self.status = status
class GroupOperationResult(BaseOperationResult):
"""GroupOperationResult.
:param errors: List of error codes paired with their corresponding error messages
:type errors: list of { key: int; value: str }
:param is_success: Success status of the operation
:type is_success: bool
:param group_id: Identifier of the Group being acted upon
:type group_id: str
:param result: Result of the Groupentitlement after the operation
:type result: :class:`GroupEntitlement <member-entitlement-management.v4_0.models.GroupEntitlement>`
"""
_attribute_map = {
'errors': {'key': 'errors', 'type': '[{ key: int; value: str }]'},
'is_success': {'key': 'isSuccess', 'type': 'bool'},
'group_id': {'key': 'groupId', 'type': 'str'},
'result': {'key': 'result', 'type': 'GroupEntitlement'}
}
def __init__(self, errors=None, is_success=None, group_id=None, result=None):
super(GroupOperationResult, self).__init__(errors=errors, is_success=is_success)
self.group_id = group_id
self.result = result
class JsonPatchOperation(Model):
"""JsonPatchOperation.
:param from_: The path to copy from for the Move/Copy operation.
:type from_: str
:param op: The patch operation
:type op: object
:param path: The path for the operation
:type path: str
:param value: The value for the operation. This is either a primitive or a JToken.
:type value: object
"""
_attribute_map = {
'from_': {'key': 'from', 'type': 'str'},
'op': {'key': 'op', 'type': 'object'},
'path': {'key': 'path', 'type': 'str'},
'value': {'key': 'value', 'type': 'object'}
}
def __init__(self, from_=None, op=None, path=None, value=None):
super(JsonPatchOperation, self).__init__()
self.from_ = from_
self.op = op
self.path = path
self.value = value
class MemberEntitlement(Model):
"""MemberEntitlement.
:param access_level: Member's access level denoted by a license
:type access_level: :class:`AccessLevel <member-entitlement-management.v4_0.models.AccessLevel>`
:param extensions: Member's extensions
:type extensions: list of :class:`Extension <member-entitlement-management.v4_0.models.Extension>`
:param group_assignments: GroupEntitlements that this member belongs to
:type group_assignments: list of :class:`GroupEntitlement <member-entitlement-management.v4_0.models.GroupEntitlement>`
:param id: The unique identifier which matches the Id of the GraphMember
:type id: str
:param last_accessed_date: Date the Member last access the collection
:type last_accessed_date: datetime
:param member: Member reference
:type member: :class:`GraphMember <member-entitlement-management.v4_0.models.GraphMember>`
:param project_entitlements: Relation between a project and the member's effective permissions in that project
:type project_entitlements: list of :class:`ProjectEntitlement <member-entitlement-management.v4_0.models.ProjectEntitlement>`
"""
_attribute_map = {
'access_level': {'key': 'accessLevel', 'type': 'AccessLevel'},
'extensions': {'key': 'extensions', 'type': '[Extension]'},
'group_assignments': {'key': 'groupAssignments', 'type': '[GroupEntitlement]'},
'id': {'key': 'id', 'type': 'str'},
'last_accessed_date': {'key': 'lastAccessedDate', 'type': 'iso-8601'},
'member': {'key': 'member', 'type': 'GraphMember'},
'project_entitlements': {'key': 'projectEntitlements', 'type': '[ProjectEntitlement]'}
}
def __init__(self, access_level=None, extensions=None, group_assignments=None, id=None, last_accessed_date=None, member=None, project_entitlements=None):
super(MemberEntitlement, self).__init__()
self.access_level = access_level
self.extensions = extensions
self.group_assignments = group_assignments
self.id = id
self.last_accessed_date = last_accessed_date
self.member = member
self.project_entitlements = project_entitlements
class MemberEntitlementsResponseBase(Model):
"""MemberEntitlementsResponseBase.
:param is_success: True if all operations were successful
:type is_success: bool
:param member_entitlement: Result of the member entitlement after the operations have been applied
:type member_entitlement: :class:`MemberEntitlement <member-entitlement-management.v4_0.models.MemberEntitlement>`
"""
_attribute_map = {
'is_success': {'key': 'isSuccess', 'type': 'bool'},
'member_entitlement': {'key': 'memberEntitlement', 'type': 'MemberEntitlement'}
}
def __init__(self, is_success=None, member_entitlement=None):
super(MemberEntitlementsResponseBase, self).__init__()
self.is_success = is_success
self.member_entitlement = member_entitlement
class OperationReference(Model):
"""OperationReference.
:param id: The identifier for this operation.
:type id: str
:param status: The current status of the operation.
:type status: object
:param url: Url to get the full object.
:type url: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'status': {'key': 'status', 'type': 'object'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, id=None, status=None, url=None):
super(OperationReference, self).__init__()
self.id = id
self.status = status
self.url = url
class OperationResult(Model):
"""OperationResult.
:param errors: List of error codes paired with their corresponding error messages
:type errors: list of { key: int; value: str }
:param is_success: Success status of the operation
:type is_success: bool
:param member_id: Identifier of the Member being acted upon
:type member_id: str
:param result: Result of the MemberEntitlement after the operation
:type result: :class:`MemberEntitlement <member-entitlement-management.v4_0.models.MemberEntitlement>`
"""
_attribute_map = {
'errors': {'key': 'errors', 'type': '[{ key: int; value: str }]'},
'is_success': {'key': 'isSuccess', 'type': 'bool'},
'member_id': {'key': 'memberId', 'type': 'str'},
'result': {'key': 'result', 'type': 'MemberEntitlement'}
}
def __init__(self, errors=None, is_success=None, member_id=None, result=None):
super(OperationResult, self).__init__()
self.errors = errors
self.is_success = is_success
self.member_id = member_id
self.result = result
class ProjectEntitlement(Model):
"""ProjectEntitlement.
:param assignment_source:
:type assignment_source: object
:param group:
:type group: :class:`Group <member-entitlement-management.v4_0.models.Group>`
:param is_project_permission_inherited:
:type is_project_permission_inherited: bool
:param project_ref:
:type project_ref: :class:`ProjectRef <member-entitlement-management.v4_0.models.ProjectRef>`
:param team_refs:
:type team_refs: list of :class:`TeamRef <member-entitlement-management.v4_0.models.TeamRef>`
"""
_attribute_map = {
'assignment_source': {'key': 'assignmentSource', 'type': 'object'},
'group': {'key': 'group', 'type': 'Group'},
'is_project_permission_inherited': {'key': 'isProjectPermissionInherited', 'type': 'bool'},
'project_ref': {'key': 'projectRef', 'type': 'ProjectRef'},
'team_refs': {'key': 'teamRefs', 'type': '[TeamRef]'}
}
def __init__(self, assignment_source=None, group=None, is_project_permission_inherited=None, project_ref=None, team_refs=None):
super(ProjectEntitlement, self).__init__()
self.assignment_source = assignment_source
self.group = group
self.is_project_permission_inherited = is_project_permission_inherited
self.project_ref = project_ref
self.team_refs = team_refs
class ProjectRef(Model):
"""ProjectRef.
:param id:
:type id: str
:param name:
:type name: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'}
}
def __init__(self, id=None, name=None):
super(ProjectRef, self).__init__()
self.id = id
self.name = name
class ReferenceLinks(Model):
"""ReferenceLinks.
:param links: The readonly view of the links. Because Reference links are readonly, we only want to expose them as read only.
:type links: dict
"""
_attribute_map = {
'links': {'key': 'links', 'type': '{object}'}
}
def __init__(self, links=None):
super(ReferenceLinks, self).__init__()
self.links = links
class TeamRef(Model):
"""TeamRef.
:param id:
:type id: str
:param name:
:type name: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'}
}
def __init__(self, id=None, name=None):
super(TeamRef, self).__init__()
self.id = id
self.name = name
class GraphMember(GraphSubject):
"""GraphMember.
:param _links: This field contains zero or more interesting links about the graph subject. These links may be invoked to obtain additional relationships or more detailed information about this graph subject.
:type _links: :class:`ReferenceLinks <microsoft.-visual-studio.-services.-web-api.v4_0.models.ReferenceLinks>`
:param descriptor: The descriptor is the primary way to reference the graph subject while the system is running. This field will uniquely identify the same graph subject across both Accounts and Organizations.
:type descriptor: str
:param display_name: This is the non-unique display name of the graph subject. To change this field, you must alter its value in the source provider.
:type display_name: str
:param origin: The type of source provider for the origin identifier (ex:AD, AAD, MSA)
:type origin: str
:param origin_id: The unique identifier from the system of origin. Typically a sid, object id or Guid. Linking and unlinking operations can cause this value to change for a user because the user is not backed by a different provider and has a different unique id in the new provider.
:type origin_id: str
:param subject_kind: This field identifies the type of the graph subject (ex: Group, Scope, User).
:type subject_kind: str
:param url: This url is the full route to the source resource of this graph subject.
:type url: str
:param domain: This represents the name of the container of origin for a graph member. (For MSA this is "Windows Live ID", for AD the name of the domain, for AAD the name of the directory, for Vsts groups the ScopeId, etc)
:type domain: str
:param mail_address: The email address of record for a given graph member. This may be different than the principal name.
:type mail_address: str
:param principal_name: This is the PrincipalName of this graph member from the source provider. The source provider may change this field over time and it is not guaranteed to be immutable for the life of the graph member by Vsts.
:type principal_name: str
"""
_attribute_map = {
'_links': {'key': '_links', 'type': 'ReferenceLinks'},
'descriptor': {'key': 'descriptor', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'origin': {'key': 'origin', 'type': 'str'},
'origin_id': {'key': 'originId', 'type': 'str'},
'subject_kind': {'key': 'subjectKind', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
'domain': {'key': 'domain', 'type': 'str'},
'mail_address': {'key': 'mailAddress', 'type': 'str'},
'principal_name': {'key': 'principalName', 'type': 'str'}
}
def __init__(self, _links=None, descriptor=None, display_name=None, origin=None, origin_id=None, subject_kind=None, url=None, domain=None, mail_address=None, principal_name=None):
super(GraphMember, self).__init__(_links=_links, descriptor=descriptor, display_name=display_name, origin=origin, origin_id=origin_id, subject_kind=subject_kind, url=url)
self.domain = domain
self.mail_address = mail_address
self.principal_name = principal_name
class GroupEntitlementOperationReference(OperationReference):
"""GroupEntitlementOperationReference.
:param id: The identifier for this operation.
:type id: str
:param status: The current status of the operation.
:type status: object
:param url: Url to get the full object.
:type url: str
:param completed: Operation completed with success or failure
:type completed: bool
:param have_results_succeeded: True if all operations were successful
:type have_results_succeeded: bool
:param results: List of results for each operation
:type results: list of :class:`GroupOperationResult <member-entitlement-management.v4_0.models.GroupOperationResult>`
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'status': {'key': 'status', 'type': 'object'},
'url': {'key': 'url', 'type': 'str'},
'completed': {'key': 'completed', 'type': 'bool'},
'have_results_succeeded': {'key': 'haveResultsSucceeded', 'type': 'bool'},
'results': {'key': 'results', 'type': '[GroupOperationResult]'}
}
def __init__(self, id=None, status=None, url=None, completed=None, have_results_succeeded=None, results=None):
super(GroupEntitlementOperationReference, self).__init__(id=id, status=status, url=url)
self.completed = completed
self.have_results_succeeded = have_results_succeeded
self.results = results
class MemberEntitlementsPatchResponse(MemberEntitlementsResponseBase):
"""MemberEntitlementsPatchResponse.
:param is_success: True if all operations were successful
:type is_success: bool
:param member_entitlement: Result of the member entitlement after the operations have been applied
:type member_entitlement: :class:`MemberEntitlement <member-entitlement-management.v4_0.models.MemberEntitlement>`
:param operation_results: List of results for each operation
:type operation_results: list of :class:`OperationResult <member-entitlement-management.v4_0.models.OperationResult>`
"""
_attribute_map = {
'is_success': {'key': 'isSuccess', 'type': 'bool'},
'member_entitlement': {'key': 'memberEntitlement', 'type': 'MemberEntitlement'},
'operation_results': {'key': 'operationResults', 'type': '[OperationResult]'}
}
def __init__(self, is_success=None, member_entitlement=None, operation_results=None):
super(MemberEntitlementsPatchResponse, self).__init__(is_success=is_success, member_entitlement=member_entitlement)
self.operation_results = operation_results
class MemberEntitlementsPostResponse(MemberEntitlementsResponseBase):
"""MemberEntitlementsPostResponse.
:param is_success: True if all operations were successful
:type is_success: bool
:param member_entitlement: Result of the member entitlement after the operations have been applied
:type member_entitlement: :class:`MemberEntitlement <member-entitlement-management.v4_0.models.MemberEntitlement>`
:param operation_result: Operation result
:type operation_result: :class:`OperationResult <member-entitlement-management.v4_0.models.OperationResult>`
"""
_attribute_map = {
'is_success': {'key': 'isSuccess', 'type': 'bool'},
'member_entitlement': {'key': 'memberEntitlement', 'type': 'MemberEntitlement'},
'operation_result': {'key': 'operationResult', 'type': 'OperationResult'}
}
def __init__(self, is_success=None, member_entitlement=None, operation_result=None):
super(MemberEntitlementsPostResponse, self).__init__(is_success=is_success, member_entitlement=member_entitlement)
self.operation_result = operation_result
class MemberEntitlementOperationReference(OperationReference):
"""MemberEntitlementOperationReference.
:param id: The identifier for this operation.
:type id: str
:param status: The current status of the operation.
:type status: object
:param url: Url to get the full object.
:type url: str
:param completed: Operation completed with success or failure
:type completed: bool
:param have_results_succeeded: True if all operations were successful
:type have_results_succeeded: bool
:param results: List of results for each operation
:type results: list of :class:`OperationResult <member-entitlement-management.v4_0.models.OperationResult>`
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'status': {'key': 'status', 'type': 'object'},
'url': {'key': 'url', 'type': 'str'},
'completed': {'key': 'completed', 'type': 'bool'},
'have_results_succeeded': {'key': 'haveResultsSucceeded', 'type': 'bool'},
'results': {'key': 'results', 'type': '[OperationResult]'}
}
def __init__(self, id=None, status=None, url=None, completed=None, have_results_succeeded=None, results=None):
super(MemberEntitlementOperationReference, self).__init__(id=id, status=status, url=url)
self.completed = completed
self.have_results_succeeded = have_results_succeeded
self.results = results
class GraphGroup(GraphMember):
"""GraphGroup.
:param _links: This field contains zero or more interesting links about the graph subject. These links may be invoked to obtain additional relationships or more detailed information about this graph subject.
:type _links: :class:`ReferenceLinks <microsoft.-visual-studio.-services.-web-api.v4_0.models.ReferenceLinks>`
:param descriptor: The descriptor is the primary way to reference the graph subject while the system is running. This field will uniquely identify the same graph subject across both Accounts and Organizations.
:type descriptor: str
:param display_name: This is the non-unique display name of the graph subject. To change this field, you must alter its value in the source provider.
:type display_name: str
:param origin: The type of source provider for the origin identifier (ex:AD, AAD, MSA)
:type origin: str
:param origin_id: The unique identifier from the system of origin. Typically a sid, object id or Guid. Linking and unlinking operations can cause this value to change for a user because the user is not backed by a different provider and has a different unique id in the new provider.
:type origin_id: str
:param subject_kind: This field identifies the type of the graph subject (ex: Group, Scope, User).
:type subject_kind: str
:param url: This url is the full route to the source resource of this graph subject.
:type url: str
:param domain: This represents the name of the container of origin for a graph member. (For MSA this is "Windows Live ID", for AD the name of the domain, for AAD the name of the directory, for Vsts groups the ScopeId, etc)
:type domain: str
:param mail_address: The email address of record for a given graph member. This may be different than the principal name.
:type mail_address: str
:param principal_name: This is the PrincipalName of this graph member from the source provider. The source provider may change this field over time and it is not guaranteed to be immutable for the life of the graph member by Vsts.
:type principal_name: str
:param description: A short phrase to help human readers disambiguate groups with similar names
:type description: str
"""
_attribute_map = {
'_links': {'key': '_links', 'type': 'ReferenceLinks'},
'descriptor': {'key': 'descriptor', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'origin': {'key': 'origin', 'type': 'str'},
'origin_id': {'key': 'originId', 'type': 'str'},
'subject_kind': {'key': 'subjectKind', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
'domain': {'key': 'domain', 'type': 'str'},
'mail_address': {'key': 'mailAddress', 'type': 'str'},
'principal_name': {'key': 'principalName', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'}
}
def __init__(self, _links=None, descriptor=None, display_name=None, origin=None, origin_id=None, subject_kind=None, url=None, domain=None, mail_address=None, principal_name=None, description=None):
super(GraphGroup, self).__init__(_links=_links, descriptor=descriptor, display_name=display_name, origin=origin, origin_id=origin_id, subject_kind=subject_kind, url=url, domain=domain, mail_address=mail_address, principal_name=principal_name)
self.description = description
| 43.598214 | 287 | 0.675268 |
f75ab961b3eb99af30897baab7b8c091e6b1d3ec | 929 | py | Python | plugins/holland.backup.random/setup.py | Alibloke/holland | e630b511a95ed8e36205e8300e632018918223ff | [
"BSD-3-Clause"
] | null | null | null | plugins/holland.backup.random/setup.py | Alibloke/holland | e630b511a95ed8e36205e8300e632018918223ff | [
"BSD-3-Clause"
] | null | null | null | plugins/holland.backup.random/setup.py | Alibloke/holland | e630b511a95ed8e36205e8300e632018918223ff | [
"BSD-3-Clause"
] | null | null | null | from setuptools import setup, find_packages
version = "1.2.0"
setup(
name="holland.backup.random",
version=version,
description="Back up data from /dev/random",
long_description="""\
Uses /dev/random. A bit more of an example then holland.backup.example
""",
classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
keywords="random",
author="Rackspace",
author_email="holland-devel@googlegroups.com",
url="http://www.hollandbackup.org/",
license="GPLv2",
packages=find_packages(exclude=["ez_setup", "examples", "tests", "tests.*"]),
include_package_data=True,
zip_safe=True,
test_suite="tests",
install_requires=[
# -*- Extra requirements: -*-
],
entry_points="""
[holland.backup]
random = holland.backup.random:RandomPlugin
""",
namespace_packages=["holland", "holland.backup"],
)
| 29.967742 | 94 | 0.65662 |
f75ae3f598601383a8e40116fe4c0e34b7b54f63 | 36,368 | py | Python | evcouplings/compare/protocol.py | thomashopf/EVcouplings-1 | d3e4947d29b62537bd79215ce72b6eea18134850 | [
"MIT"
] | null | null | null | evcouplings/compare/protocol.py | thomashopf/EVcouplings-1 | d3e4947d29b62537bd79215ce72b6eea18134850 | [
"MIT"
] | null | null | null | evcouplings/compare/protocol.py | thomashopf/EVcouplings-1 | d3e4947d29b62537bd79215ce72b6eea18134850 | [
"MIT"
] | 2 | 2021-04-03T14:19:12.000Z | 2021-04-05T17:34:32.000Z | """
EC to 3D structure comparison protocols/workflows.
Authors:
Thomas A. Hopf
Anna G. Green (complex and _make_complex_contact_maps)
"""
from copy import deepcopy
from math import ceil
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from evcouplings.align.alignment import (
read_fasta, parse_header
)
from evcouplings.utils.config import (
check_required, InvalidParameterError
)
from evcouplings.utils.system import (
create_prefix_folders, insert_dir, verify_resources,
)
from evcouplings.couplings import Segment
from evcouplings.compare.pdb import load_structures
from evcouplings.compare.distances import (
intra_dists, multimer_dists, remap_chains,
inter_dists, remap_complex_chains
)
from evcouplings.compare.sifts import SIFTS, SIFTSResult
from evcouplings.compare.ecs import (
coupling_scores_compared, add_precision
)
from evcouplings.visualize import pairs, misc
def _identify_structures(**kwargs):
"""
Identify set of 3D structures for comparison
Parameters
----------
**kwargs
See check_required in code below
Returns
-------
SIFTSResult
Identified structures and residue index mappings
"""
def _filter_by_id(x, id_list):
x = deepcopy(x)
x.hits = x.hits.loc[
x.hits.pdb_id.isin(id_list)
]
return x
check_required(
kwargs,
[
"prefix", "pdb_ids", "compare_multimer",
"max_num_hits", "max_num_structures",
"pdb_mmtf_dir",
"sifts_mapping_table", "sifts_sequence_db",
"by_alignment", "pdb_alignment_method",
"alignment_min_overlap",
"sequence_id", "sequence_file", "region",
"use_bitscores", "domain_threshold",
"sequence_threshold"
]
)
# get SIFTS mapping object/sequence DB
s = SIFTS(
kwargs["sifts_mapping_table"],
kwargs["sifts_sequence_db"]
)
reduce_chains = not kwargs["compare_multimer"]
# determine if we need to find structures
# by sequence search or just fetching
# based on Uniprot/PDB identifier
if kwargs["by_alignment"]:
# if searching by alignment, verify that
# user selected jackhmmer or hmmsearch
SEARCH_METHODS = ["jackhmmer", "hmmsearch"]
if kwargs["pdb_alignment_method"] not in SEARCH_METHODS:
raise InvalidParameterError(
"Invalid pdb search method: " +
"{}. Valid selections are: {}".format(
", ".join(SEARCH_METHODS.keys())
)
)
sifts_map = s.by_alignment(
reduce_chains=reduce_chains,
min_overlap=kwargs["alignment_min_overlap"],
**kwargs
)
else:
sifts_map = s.by_uniprot_id(
kwargs["sequence_id"], reduce_chains=reduce_chains
)
sifts_map_full = deepcopy(sifts_map)
# filter ID list down to manually selected PDB entries
if kwargs["pdb_ids"] is not None:
pdb_ids = kwargs["pdb_ids"]
# make sure we have a list of PDB IDs
if not isinstance(pdb_ids, list):
pdb_ids = [pdb_ids]
pdb_ids = [x.lower() for x in pdb_ids]
sifts_map = _filter_by_id(sifts_map, pdb_ids)
# limit number of hits and structures
if kwargs["max_num_hits"] is not None:
sifts_map.hits = sifts_map.hits.iloc[:kwargs["max_num_hits"]]
if kwargs["max_num_structures"] is not None:
keep_ids = sifts_map.hits.pdb_id.unique()
keep_ids = keep_ids[:kwargs["max_num_structures"]]
sifts_map = _filter_by_id(sifts_map, keep_ids)
return sifts_map, sifts_map_full
def _make_contact_maps(ec_table, d_intra, d_multimer, **kwargs):
"""
Plot contact maps with all ECs above a certain probability threshold,
or a given count of ECs
Parameters
----------
ec_table : pandas.DataFrame
Full set of evolutionary couplings (all pairs)
d_intra : DistanceMap
Computed residue-residue distances inside chain
d_multimer : DistanceMap
Computed residue-residue distances between homomultimeric
chains
**kwargs
Further plotting parameters, see check_required in code
for necessary values.
Returns
-------
cm_files : list(str)
Paths of generated contact map files
"""
def plot_cm(ecs, output_file=None):
"""
Simple wrapper for contact map plotting
"""
with misc.plot_context("Arial"):
fig = plt.figure(figsize=(8, 8))
if kwargs["scale_sizes"]:
ecs = ecs.copy()
ecs.loc[:, "size"] = ecs.cn.values / ecs.cn.max()
pairs.plot_contact_map(
ecs, d_intra, d_multimer,
distance_cutoff=kwargs["distance_cutoff"],
show_secstruct=kwargs["draw_secondary_structure"],
margin=5,
boundaries=kwargs["boundaries"]
)
plt.suptitle("{} evolutionary couplings".format(len(ecs)), fontsize=14)
if output_file is not None:
plt.savefig(output_file, bbox_inches="tight")
plt.close(fig)
check_required(
kwargs,
[
"prefix", "min_sequence_distance",
"plot_probability_cutoffs",
"boundaries", "plot_lowest_count",
"plot_highest_count", "plot_increase",
"draw_secondary_structure"
]
)
prefix = kwargs["prefix"]
cm_files = []
ecs_longrange = ec_table.query(
"abs(i - j) >= {}".format(kwargs["min_sequence_distance"])
)
# based on significance cutoff
if kwargs["plot_probability_cutoffs"]:
cutoffs = kwargs["plot_probability_cutoffs"]
if not isinstance(cutoffs, list):
cutoffs = [cutoffs]
for c in cutoffs:
ec_set = ecs_longrange.query("probability >= @c")
# only can plot if we have any significant ECs above threshold
if len(ec_set) > 0:
output_file = prefix + "_significant_ECs_{}.pdf".format(c)
plot_cm(ec_set, output_file=output_file)
cm_files.append(output_file)
# based on number of long-range ECs
# identify number of sites in EC model
num_sites = len(
set.union(set(ec_table.i.unique()), set(ec_table.j.unique()))
)
# transform fraction of number of sites into discrete number of ECs
def _discrete_count(x):
if isinstance(x, float):
x = ceil(x * num_sites)
return int(x)
# range of plots to make
lowest = _discrete_count(kwargs["plot_lowest_count"])
highest = _discrete_count(kwargs["plot_highest_count"])
step = _discrete_count(kwargs["plot_increase"])
# create individual plots
for c in range(lowest, highest + 1, step):
ec_set = ecs_longrange.iloc[:c]
output_file = prefix + "_{}_ECs.pdf".format(c)
plot_cm(ec_set, output_file=output_file)
cm_files.append(output_file)
# give back list of all contact map file names
return cm_files
def _make_complex_contact_maps(ec_table, d_intra_i, d_multimer_i,
d_intra_j, d_multimer_j,
d_inter, first_segment_name,
second_segment_name, **kwargs):
"""
Plot contact maps with all ECs above a certain probability threshold,
or a given count of ECs
Parameters
----------
ec_table : pandas.DataFrame
Full set of evolutionary couplings (all pairs)
d_intra_i, d_intra_j: DistanceMap
Computed residue-residue distances within chains for
monomers i and j
d_multimer_i, d_multimer_j : DistanceMap
Computed residue-residue distances between homomultimeric
chains for monomers i and j
d_inter: DistanceMap
Computed residue-residue distances between heteromultimeric
chains i and j
first_segment_name, second_segment_name: str
Name of segment i and segment j in the ec_table
**kwargs
Further plotting parameters, see check_required in code
for necessary values.
Returns
-------
cm_files : list(str)
Paths of generated contact map files
"""
def plot_complex_cm(ecs_i, ecs_j, ecs_inter,
first_segment_name,
second_segment_name, output_file=None):
"""
Simple wrapper for contact map plotting
"""
with misc.plot_context("Arial"):
if kwargs["scale_sizes"]:
# to scale sizes, combine all ecs to rescale together
ecs = pd.concat([ecs_i, ecs_j, ecs_inter])
ecs.loc[:, "size"] = ecs.cn.values / ecs.cn.max()
# split back into three separate DataFrames
ecs_i = ecs.query("segment_i == segment_j == @first_segment_name")
ecs_j = ecs.query("segment_i == segment_j == @second_segment_name")
ecs_inter = ecs.query("segment_i != segment_j")
# if any of these groups are entry, replace with None
if len(ecs_i) == 0:
ecs_i = None
if len(ecs_j) == 0:
ecs_j = None
if len(ecs_inter) == 0:
ecs_inter = None
# Currently, we require at least one of the monomer
# to have either ECs or distances in order to make a plot
if ((ecs_i is None or ecs_i.empty) and d_intra_i is None and d_multimer_i is None) \
or ((ecs_j is None or ecs_j.empty) and d_intra_j is None and d_multimer_i is None):
return False
fig = plt.figure(figsize=(8, 8))
# create the contact map
pairs.complex_contact_map(
ecs_i, ecs_j, ecs_inter,
d_intra_i, d_multimer_i,
d_intra_j, d_multimer_j,
d_inter,
margin=5,
boundaries=kwargs["boundaries"],
scale_sizes=kwargs["scale_sizes"]
)
# Add title to the plot
if ecs_inter is None:
ec_len = '0'
else:
ec_len = len(ecs_inter)
plt.suptitle(
"{} inter-molecule evolutionary couplings".format(ec_len),
fontsize=14
)
# save to output
if output_file is not None:
plt.savefig(output_file, bbox_inches="tight")
plt.close(fig)
return True
check_required(
kwargs,
[
"prefix", "min_sequence_distance",
"plot_probability_cutoffs",
"boundaries",
"draw_secondary_structure", "plot_lowest_count",
"plot_highest_count", "plot_increase",
"scale_sizes"
]
)
prefix = kwargs["prefix"]
cm_files = []
ecs_longrange = ec_table.query(
"abs(i - j) >= {} or segment_i != segment_j".format(kwargs["min_sequence_distance"])
)
# create plots based on significance cutoff
if kwargs["plot_probability_cutoffs"]:
cutoffs = kwargs["plot_probability_cutoffs"]
if not isinstance(cutoffs, list):
cutoffs = [cutoffs]
for c in cutoffs:
ec_set = ecs_longrange.query("probability >= @c")
# only can plot if we have any significant ECs above threshold
if len(ec_set) > 0:
ec_set_i = ec_set.query("segment_i == segment_j == @first_segment_name")
ec_set_j = ec_set.query("segment_i == segment_j == @second_segment_name")
ec_set_inter = ec_set.query("segment_i != segment_j")
output_file = prefix + "_significant_ECs_{}.pdf".format(c)
plot_completed = plot_complex_cm(
ec_set_i, ec_set_j, ec_set_inter,
first_segment_name, second_segment_name,
output_file=output_file
)
if plot_completed:
cm_files.append(output_file)
# transform fraction of number of sites into discrete number of ECs
def _discrete_count(x):
if isinstance(x, float):
num_sites = 0
for seg_name in [first_segment_name, second_segment_name]:
num_sites += len(
set.union(
set(ec_table.query("segment_i == @seg_name").i.unique()),
set(ec_table.query("segment_j == @seg_name").j.unique())
)
)
x = ceil(x * num_sites)
return int(x)
# range of plots to make
lowest = _discrete_count(kwargs["plot_lowest_count"])
highest = _discrete_count(kwargs["plot_highest_count"])
step = _discrete_count(kwargs["plot_increase"])
for c in range(lowest, highest + 1, step):
# get the inter ECs to plot
ec_set_inter = ecs_longrange.query("segment_i != segment_j")[0:c]
# if there are no inter ecs to be plotted, continue
if ec_set_inter.empty:
continue
# get the index of the lowest inter EC
last_inter_index = ec_set_inter.index[-1]
# take all intra-protein ECs that score higher than the lowest plotted inter-protein EC
ec_set_i = ecs_longrange.iloc[0:last_inter_index].query(
"segment_i == segment_j == @first_segment_name"
)
ec_set_j = ecs_longrange.iloc[0:last_inter_index].query(
"segment_i == segment_j == @second_segment_name"
)
output_file = prefix + "_{}_ECs.pdf".format(c)
plot_completed = plot_complex_cm(
ec_set_i, ec_set_j, ec_set_inter,
first_segment_name, second_segment_name,
output_file=output_file
)
if plot_completed:
cm_files.append(output_file)
# give back list of all contact map file names
return cm_files
def standard(**kwargs):
"""
Protocol:
Compare ECs for single proteins (or domains)
to 3D structure information
Parameters
----------
Mandatory kwargs arguments:
See list below in code where calling check_required
Returns
-------
outcfg : dict
Output configuration of the pipeline, including
the following fields:
* ec_file_compared_all
* ec_file_compared_all_longrange
* pdb_structure_hits
* distmap_monomer
* distmap_multimer
* contact_map_files
* remapped_pdb_files
"""
check_required(
kwargs,
[
"prefix", "ec_file", "min_sequence_distance",
"pdb_mmtf_dir", "atom_filter", "compare_multimer",
"distance_cutoff", "target_sequence_file",
"scale_sizes",
]
)
prefix = kwargs["prefix"]
outcfg = {
"ec_compared_all_file": prefix + "_CouplingScoresCompared_all.csv",
"ec_compared_longrange_file": prefix + "_CouplingScoresCompared_longrange.csv",
"pdb_structure_hits_file": prefix + "_structure_hits.csv",
"pdb_structure_hits_unfiltered_file": prefix + "_structure_hits_unfiltered.csv",
# cannot have the distmap files end with "_file" because there are
# two files (.npy and .csv), which would cause problems with automatic
# checking if those files exist
"distmap_monomer": prefix + "_distance_map_monomer",
"distmap_multimer": prefix + "_distance_map_multimer",
}
# make sure EC file exists
verify_resources(
"EC file does not exist",
kwargs["ec_file"]
)
# make sure output directory exists
create_prefix_folders(prefix)
# store auxiliary files here (too much for average user)
aux_prefix = insert_dir(prefix, "aux", rootname_subdir=False)
create_prefix_folders(aux_prefix)
# Step 1: Identify 3D structures for comparison
sifts_map, sifts_map_full = _identify_structures(**{
**kwargs,
"prefix": aux_prefix,
})
# save selected PDB hits
sifts_map.hits.to_csv(
outcfg["pdb_structure_hits_file"], index=False
)
# also save full list of hits
sifts_map_full.hits.to_csv(
outcfg["pdb_structure_hits_unfiltered_file"], index=False
)
# Step 2: Compute distance maps
# load all structures at once
structures = load_structures(
sifts_map.hits.pdb_id,
kwargs["pdb_mmtf_dir"],
raise_missing=False
)
# compute distance maps and save
# (but only if we found some structure)
if len(sifts_map.hits) > 0:
d_intra = intra_dists(
sifts_map, structures, atom_filter=kwargs["atom_filter"],
output_prefix=aux_prefix + "_distmap_intra"
)
d_intra.to_file(outcfg["distmap_monomer"])
# save contacts to separate file
outcfg["monomer_contacts_file"] = prefix + "_contacts_monomer.csv"
d_intra.contacts(
kwargs["distance_cutoff"]
).to_csv(
outcfg["monomer_contacts_file"], index=False
)
# compute multimer distances, if requested;
# note that d_multimer can be None if there
# are no structures with multiple chains
if kwargs["compare_multimer"]:
d_multimer = multimer_dists(
sifts_map, structures, atom_filter=kwargs["atom_filter"],
output_prefix=aux_prefix + "_distmap_multimer"
)
else:
d_multimer = None
# if we have a multimer contact mapin the end, save it
if d_multimer is not None:
d_multimer.to_file(outcfg["distmap_multimer"])
outcfg["multimer_contacts_file"] = prefix + "_contacts_multimer.csv"
# save contacts to separate file
d_multimer.contacts(
kwargs["distance_cutoff"]
).to_csv(
outcfg["multimer_contacts_file"], index=False
)
else:
outcfg["distmap_multimer"] = None
# at this point, also create remapped structures (e.g. for
# later comparison of folding results)
verify_resources(
"Target sequence file does not exist",
kwargs["target_sequence_file"]
)
# create target sequence map for remapping structure
with open(kwargs["target_sequence_file"]) as f:
header, seq = next(read_fasta(f))
seq_id, seq_start, seq_end = parse_header(header)
seqmap = dict(zip(range(seq_start, seq_end + 1), seq))
# remap structures, swap mapping index and filename in
# dictionary so we have a list of files in the dict keys
outcfg["remapped_pdb_files"] = {
filename: mapping_index for mapping_index, filename in
remap_chains(sifts_map, aux_prefix, seqmap).items()
}
else:
# if no structures, can not compute distance maps
d_intra = None
d_multimer = None
outcfg["distmap_monomer"] = None
outcfg["distmap_multimer"] = None
outcfg["remapped_pdb_files"] = None
# Step 3: Compare ECs to distance maps
ec_table = pd.read_csv(kwargs["ec_file"])
# identify number of sites in EC model
num_sites = len(
set.union(set(ec_table.i.unique()), set(ec_table.j.unique()))
)
for out_file, min_seq_dist in [
("ec_compared_longrange_file", kwargs["min_sequence_distance"]),
("ec_compared_all_file", 0),
]:
# compare ECs only if we minimally have intra distance map
if d_intra is not None:
coupling_scores_compared(
ec_table, d_intra, d_multimer,
dist_cutoff=kwargs["distance_cutoff"],
output_file=outcfg[out_file],
min_sequence_dist=min_seq_dist
)
else:
outcfg[out_file] = None
# also create line-drawing script if we made the csv
if outcfg["ec_compared_longrange_file"] is not None:
ecs_longrange = pd.read_csv(outcfg["ec_compared_longrange_file"])
outcfg["ec_lines_compared_pml_file"] = prefix + "_draw_ec_lines_compared.pml"
pairs.ec_lines_pymol_script(
ecs_longrange.iloc[:num_sites, :],
outcfg["ec_lines_compared_pml_file"],
distance_cutoff=kwargs["distance_cutoff"]
)
# Step 4: Make contact map plots
# if no structures available, defaults to EC-only plot
outcfg["contact_map_files"] = _make_contact_maps(
ec_table, d_intra, d_multimer, **kwargs
)
return outcfg
def complex(**kwargs):
"""
Protocol:
Compare ECs for a complex to
3D structure
Parameters
----------
Mandatory kwargs arguments:
See list below in code where calling check_required
Returns
-------
outcfg : dict
Output configuration of the pipeline, including
the following fields:
* ec_file_compared_all
* ec_file_compared_all_longrange
* pdb_structure_hits
* distmap_monomer
* distmap_multimer
* contact_map_files
* remapped_pdb_files
"""
check_required(
kwargs,
[
"prefix", "ec_file", "min_sequence_distance",
"pdb_mmtf_dir", "atom_filter",
"first_compare_multimer", "second_compare_multimer",
"distance_cutoff", "segments",
"first_sequence_id", "second_sequence_id",
"first_sequence_file", "second_sequence_file",
"first_target_sequence_file", "second_target_sequence_file",
"scale_sizes"
]
)
prefix = kwargs["prefix"]
outcfg = {
# initialize output EC files
"ec_compared_all_file": prefix + "_CouplingScoresCompared_all.csv",
"ec_compared_longrange_file": prefix + "_CouplingScoresCompared_longrange.csv",
"ec_compared_inter_file": prefix + "_CouplingScoresCompared_inter.csv",
# initialize output inter distancemap files
"distmap_inter": prefix + "_distmap_inter",
"inter_contacts_file": prefix + "_inter_contacts_file"
}
# Add PDB comparison files for first and second monomer
for monomer_prefix in ["first", "second"]:
outcfg = {
**outcfg,
monomer_prefix + "_pdb_structure_hits_file":
"{}_{}_structure_hits.csv".format(prefix, monomer_prefix),
monomer_prefix + "_pdb_structure_hits_unfiltered_file":
"{}_{}_structure_hits_unfitered.csv".format(prefix, monomer_prefix),
monomer_prefix + "_distmap_monomer":
"{}_{}_distance_map_monomer".format(prefix, monomer_prefix),
monomer_prefix + "_distmap_multimer":
"{}_{}_distance_map_multimer".format(prefix, monomer_prefix),
}
# make sure EC file exists
verify_resources(
"EC file does not exist",
kwargs["ec_file"]
)
# make sure output directory exists
create_prefix_folders(prefix)
# store auxiliary files here (too much for average user)
aux_prefix = insert_dir(prefix, "aux", rootname_subdir=False)
create_prefix_folders(aux_prefix)
# store auxiliary files here (too much for average user)
first_aux_prefix = insert_dir(aux_prefix, "first_monomer", rootname_subdir=False)
create_prefix_folders(first_aux_prefix)
# store auxiliary files here (too much for average user)
second_aux_prefix = insert_dir(aux_prefix, "second_monomer", rootname_subdir=False)
create_prefix_folders(second_aux_prefix)
# Step 1: Identify 3D structures for comparison
def _identify_monomer_structures(name_prefix, outcfg, aux_prefix):
# create a dictionary with kwargs for just the current monomer
# remove the "prefix" kwargs so that we can replace with the
# aux prefix when calling _identify_structures
# only replace first occurrence of name_prefix
monomer_kwargs = {
k.replace(name_prefix + "_", "", 1): v for k, v in kwargs.items() if "prefix" not in k
}
# this field needs to be set explicitly else it gets overwritten by concatenated file
monomer_kwargs["alignment_file"] = kwargs[name_prefix + "_alignment_file"]
monomer_kwargs["raw_focus_alignment_file"] = kwargs[name_prefix + "_raw_focus_alignment_file"]
# identify structures for that monomer
sifts_map, sifts_map_full = _identify_structures(
**monomer_kwargs,
prefix=aux_prefix
)
# save selected PDB hits
sifts_map.hits.to_csv(
outcfg[name_prefix + "_pdb_structure_hits_file"], index=False
)
# also save full list of hits
sifts_map_full.hits.to_csv(
outcfg[name_prefix + "_pdb_structure_hits_unfiltered_file"], index=False
)
return outcfg, sifts_map
outcfg, first_sifts_map = _identify_monomer_structures("first", outcfg, first_aux_prefix)
outcfg, second_sifts_map = _identify_monomer_structures("second", outcfg, second_aux_prefix)
# get the segment names from the kwargs
segment_list = kwargs["segments"]
# Make sure user provided exactly two segments
if len(segment_list) != 2:
raise InvalidParameterError(
"Compare stage for protein complexes requires exactly two segments"
)
first_segment_name = Segment.from_list(kwargs["segments"][0]).segment_id
second_segment_name = Segment.from_list(kwargs["segments"][1]).segment_id
first_chain_name = Segment.from_list(kwargs["segments"][0]).default_chain_name()
second_chain_name = Segment.from_list(kwargs["segments"][1]).default_chain_name()
# Step 2: Compute distance maps
def _compute_monomer_distance_maps(sifts_map, name_prefix, chain_name):
# prepare a sequence map to remap the structures we have found
verify_resources(
"Target sequence file does not exist",
kwargs[name_prefix + "_target_sequence_file"]
)
# create target sequence map for remapping structure
with open(kwargs[name_prefix + "_target_sequence_file"]) as f:
header, seq = next(read_fasta(f))
# create target sequence map for remapping structure
seq_id, seq_start, seq_end = parse_header(header)
seqmap = dict(zip(range(seq_start, seq_end + 1), seq))
# compute distance maps and save
# (but only if we found some structure)
if len(sifts_map.hits) > 0:
d_intra = intra_dists(
sifts_map, structures, atom_filter=kwargs["atom_filter"],
output_prefix=aux_prefix + "_" + name_prefix + "_distmap_intra"
)
d_intra.to_file(outcfg[name_prefix + "_distmap_monomer"])
# save contacts to separate file
outcfg[name_prefix + "_monomer_contacts_file"] = prefix + "_" + name_prefix + "_contacts_monomer.csv"
d_intra.contacts(
kwargs["distance_cutoff"]
).to_csv(
outcfg[name_prefix + "_monomer_contacts_file"], index=False
)
# compute multimer distances, if requested;
# note that d_multimer can be None if there
# are no structures with multiple chains
if kwargs[name_prefix + "_compare_multimer"]:
d_multimer = multimer_dists(
sifts_map, structures, atom_filter=kwargs["atom_filter"],
output_prefix=aux_prefix + "_" + name_prefix + "_distmap_multimer"
)
else:
d_multimer = None
# if we have a multimer contact map, save it
if d_multimer is not None:
d_multimer.to_file(outcfg[name_prefix + "_distmap_multimer"])
outcfg[name_prefix + "_multimer_contacts_file"] = prefix + name_prefix + "_contacts_multimer.csv"
# save contacts to separate file
d_multimer.contacts(
kwargs["distance_cutoff"]
).to_csv(
outcfg[name_prefix + "_multimer_contacts_file"], index=False
)
else:
outcfg[name_prefix + "_distmap_multimer"] = None
# create remapped structures (e.g. for
# later comparison of folding results)
# remap structures, swap mapping index and filename in
# dictionary so we have a list of files in the dict keys
outcfg[name_prefix + "_remapped_pdb_files"] = {
filename: mapping_index for mapping_index, filename in
remap_chains(
sifts_map, aux_prefix, seqmap, chain_name=chain_name,
raise_missing=kwargs["raise_missing"]
).items()
}
else:
# if no structures, cannot compute distance maps
d_intra = None
d_multimer = None
outcfg[name_prefix + "_distmap_monomer"] = None
outcfg[name_prefix + "_distmap_multimer"] = None
outcfg[name_prefix + "remapped_pdb_files"] = None
return d_intra, d_multimer, seqmap
# load all structures for both monomers
all_structures = set(first_sifts_map.hits.pdb_id).union(
set(second_sifts_map.hits.pdb_id)
)
structures = load_structures(
all_structures,
kwargs["pdb_mmtf_dir"],
raise_missing=False
)
d_intra_i, d_multimer_i, seqmap_i = _compute_monomer_distance_maps(
first_sifts_map, "first", first_chain_name
)
d_intra_j, d_multimer_j, seqmap_j = _compute_monomer_distance_maps(
second_sifts_map, "second", second_chain_name
)
# compute inter distance map if sifts map for each monomer exists
if len(first_sifts_map.hits) > 0 and len(second_sifts_map.hits) > 0:
d_inter = inter_dists(
first_sifts_map, second_sifts_map,
raise_missing=kwargs["raise_missing"]
)
# if there were overlapping PDBs, save the results
if d_inter is not None:
d_inter.to_file(outcfg["distmap_inter"])
# save contacts to separate file
d_inter.contacts(
kwargs["distance_cutoff"]
).to_csv(
outcfg["inter_contacts_file"], index=False
)
else:
outcfg["inter_contacts_file"] = None
d_inter = None
# # Step 3: Compare ECs to distance maps
ec_table = pd.read_csv(kwargs["ec_file"])
for out_file, min_seq_dist in [
("ec_compared_longrange_file", kwargs["min_sequence_distance"]),
("ec_compared_all_file", 0),
]:
# compare ECs only if we have an intra distance map
# for at least one monomer - inter can't exist unless
# we have both monomers
if (d_intra_i is not None) or (d_intra_j is not None):
# compare distances individually for each segment pair
ecs_intra_i = ec_table.query("segment_i == segment_j == @first_segment_name")
if d_intra_i is not None:
ecs_intra_i_compared = coupling_scores_compared(
ecs_intra_i, d_intra_i, d_multimer_i,
dist_cutoff=kwargs["distance_cutoff"],
output_file=None,
min_sequence_dist=min_seq_dist
)
else:
# If no distance map, the distance is saved as np.nan
ecs_intra_i_compared = ecs_intra_i.assign(dist=np.nan)
ecs_intra_j = ec_table.query("segment_i == segment_j == @second_segment_name")
if d_intra_j is not None:
ecs_intra_j_compared = coupling_scores_compared(
ecs_intra_j, d_intra_j, d_multimer_j,
dist_cutoff=kwargs["distance_cutoff"],
output_file=None,
min_sequence_dist=min_seq_dist
)
else:
ecs_intra_j_compared = ecs_intra_j.assign(dist=np.nan)
ecs_inter = ec_table.query("segment_i != segment_j")
if d_inter is not None:
ecs_inter_compared = coupling_scores_compared(
ecs_inter, d_inter, dist_map_multimer=None,
dist_cutoff=kwargs["distance_cutoff"],
output_file=None,
min_sequence_dist=None # does not apply for inter-protein ECs
)
else:
ecs_inter_compared = ecs_inter.assign(dist=np.nan)
# combine the tables
ec_table_compared = pd.concat([
ecs_inter_compared,
ecs_intra_i_compared,
ecs_intra_j_compared
])
# rename the precision column to "segmentwise_precision"
# because we calculated precision for each segment independently
ec_table_compared = ec_table_compared.rename(
columns={"precision": "segmentwise_precision"}
)
# TODO: change "cn" to "score" eventually
ec_table_compared = ec_table_compared.sort_values("cn", ascending=False)
# add the total precision
# TODO: implement different cutoffs for intra vs inter contacts
ec_table_compared = add_precision(
ec_table_compared,
dist_cutoff=kwargs["distance_cutoff"]
)
# save to file
# all ecs
ec_table_compared.to_csv(outcfg[out_file])
# save the inter ECs to a file
ecs_inter_compared.to_csv(outcfg["ec_compared_inter_file"])
# create the inter-ecs line drawing script
if outcfg["ec_compared_inter_file"] is not None and kwargs["plot_highest_count"] is not None:
inter_ecs = ec_table.query("segment_i != segment_j")
outcfg["ec_lines_compared_pml_file"] = prefix + "_draw_ec_lines_compared.pml"
pairs.ec_lines_pymol_script(
inter_ecs.iloc[:kwargs["plot_highest_count"], :],
outcfg["ec_lines_compared_pml_file"],
distance_cutoff=kwargs["distance_cutoff"],
chain={
first_segment_name: first_chain_name,
second_segment_name: second_chain_name
}
)
# Remap the complex crystal structures, if available
if len(first_sifts_map.hits) > 0 and len(second_sifts_map.hits) > 0:
outcfg["complex_remapped_pdb_files"] = {
filename: mapping_index for mapping_index, filename in
remap_complex_chains(
first_sifts_map, second_sifts_map,
seqmap_i, seqmap_j, output_prefix=aux_prefix,
raise_missing=kwargs["raise_missing"]
).items()
}
# Step 4: Make contact map plots
# if no structures available, defaults to EC-only plot
outcfg["contact_map_files"] = _make_complex_contact_maps(
ec_table, d_intra_i, d_multimer_i,
d_intra_j, d_multimer_j,
d_inter, first_segment_name,
second_segment_name, **kwargs
)
return outcfg
# list of available EC comparison protocols
PROTOCOLS = {
# standard monomer comparison protocol
"standard": standard,
# comparison for protein complexes
"complex": complex
}
def run(**kwargs):
"""
Run inference protocol to calculate ECs from
input sequence alignment.
Parameters
----------
Mandatory kwargs arguments:
protocol: EC protocol to run
prefix: Output prefix for all generated files
Returns
-------
outcfg : dict
Output configuration of stage
(see individual protocol for fields)
"""
check_required(kwargs, ["protocol"])
if kwargs["protocol"] not in PROTOCOLS:
raise InvalidParameterError(
"Invalid protocol selection: " +
"{}. Valid protocols are: {}".format(
kwargs["protocol"], ", ".join(PROTOCOLS.keys())
)
)
return PROTOCOLS[kwargs["protocol"]](**kwargs)
| 34.801914 | 113 | 0.613836 |
f75b0f56e3d38f411144845899ed386194a8fadc | 27,195 | py | Python | main_imagenet.py | VITA-Group/Peek-a-Boo | 9290d4e5e3aee0dff994e1a664ec91bd6ec93176 | [
"MIT"
] | 2 | 2022-01-22T03:57:21.000Z | 2022-01-30T20:44:32.000Z | main_imagenet.py | VITA-Group/Peek-a-Boo | 9290d4e5e3aee0dff994e1a664ec91bd6ec93176 | [
"MIT"
] | null | null | null | main_imagenet.py | VITA-Group/Peek-a-Boo | 9290d4e5e3aee0dff994e1a664ec91bd6ec93176 | [
"MIT"
] | 2 | 2022-01-30T12:26:56.000Z | 2022-03-14T12:42:06.000Z | import argparse
import os
import random
import shutil
import time
import warnings
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
# import torchvision.models as models
import logging
from logger import set_logging_config
import models
from bop import Bop
from models.seed_conv import SeedConv2d
from models.masked_psg_seed_conv import PredictiveSeedConv2d
import pruners
from generator import masked_parameters
from prune import prune_loop
print = print
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('data', metavar='DIR',
help='path to dataset')
parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet18',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet18)')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=90, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N',
help='mini-batch size (default: 256), this is the total '
'batch size of all GPUs on the current node when '
'using Data Parallel or Distributed Data Parallel')
parser.add_argument('--optimizer', default='SGD', type=str,
help='choose among [`SGD`, `BOP`, `Counter`]')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate', dest='lr')
parser.add_argument('--savedir', default='results', type=str,
help='root dir to save exp checkpoints and logs')
parser.add_argument('--exp-name', default='SeedNet', type=str,
help='path to location to save logs and checkpoints')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)',
dest='weight_decay')
parser.add_argument('-p', '--print-freq', default=100, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--world-size', default=-1, type=int,
help='number of nodes for distributed training')
parser.add_argument('--rank', default=-1, type=int,
help='node rank for distributed training')
parser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str,
help='distributed backend')
parser.add_argument('--seed', default=None, type=int,
help='seed for initializing training. ')
parser.add_argument('--gpu', default=None, type=int,
help='GPU id to use.')
parser.add_argument('--multiprocessing-distributed', action='store_true',
help='Use multi-processing distributed training to launch '
'N processes per node, which has N GPUs. This is the '
'fastest way to use PyTorch for either single node or '
'multi node data parallel training')
# SeedNet options
parser.add_argument('--sign-grouped-dim', default="", type=str,
help='dimensions that will be grouped for sign parameters')
parser.add_argument('--init-method', default='standard', type=str,
help='initialization method for conv weights')
parser.add_argument('--hidden-act', type=str, default='standard',
help='choose among [`pruning`, `flipping`, `ternery`, `none`]')
parser.add_argument('--scaling-input', action='store_true',
help='whether scale the input in SeedNet models')
# BOP options
parser.add_argument('--ar', type=float,
help='list of layer-wise inital adaptivity rates in BOP')
parser.add_argument('--tau', type=float,
help='list of layer-wise thresholds in BOP')
parser.add_argument('--ar-decay-freq', type=int, default=100,
help='freqency to decay the ar hyperparameter in BOP')
parser.add_argument('--ar-decay-ratio', type=float, default=0.1,
help='decay ratio when decay ar')
# PSG options
parser.add_argument('--psg-no-backward', action='store_true',
help='Do predictive gradient calculation in backward')
parser.add_argument('--msb-bits', type=int, default=4,
help='MSB bits for the input')
parser.add_argument('--msb-bits-weight', type=int, default=4,
help='MSB bits for the weight')
parser.add_argument('--msb-bits-grad', type=int, default=8,
help='MSB bits for the grad')
parser.add_argument('--psg-threshold', type=float, default=0.0,
help='Threshold used in PSG')
parser.add_argument('--psg-sparsify', action='store_true',
help='Sparsify by ignoring small gradients')
parser.add_argument('--psg-no-take-sign', action='store_true',
help='Do not take sign for PSG')
# Pruning options
parser.add_argument('--pruner', type=str, default=None, choices=['Mag', 'SNIP', 'GraSP', 'SynFlow'],
help='pruning strategy')
parser.add_argument('--prune-epoch', type=int, default=0,
help='epoch number to finish sparsifying by')
parser.add_argument('--prune-ratio', type=float, default=1.0,
help='fraction of non-zero parameters after pruning')
parser.add_argument('--prune-iters', type=int, default=1,
help='number of iterations for scoring (should be 1 for Mag, SNIP, and GraSP)')
parser.add_argument('--prune-batch-size', type=int, default=256,
help='size of sample mini-batch for pruning methods')
parser.add_argument('--prune-schedule', type=str, default='exponential', choices=['linear', 'exponential'],
help='scheduling method for iterative pruning (SynFlow)')
parser.add_argument('--prune-scope', type=str, default='global', choices=['global', 'local'],
help='masking scope')
parser.add_argument('--prune-shots', type=int, default=1,
help='number of shots for pruning')
best_acc1 = 0
def main():
args = parser.parse_args()
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
if args.dist_url == "env://" and args.world_size == -1:
args.world_size = int(os.environ["WORLD_SIZE"])
args.distributed = args.world_size > 1 or args.multiprocessing_distributed
ngpus_per_node = torch.cuda.device_count()
if args.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.world_size
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
# Simply call main_worker function
main_worker(args.gpu, ngpus_per_node, args)
def main_worker(gpu, ngpus_per_node, args):
global best_acc1
args.gpu = gpu
args.savedir = os.path.join(args.savedir, args.exp_name)
if not os.path.isdir(args.savedir):
os.makedirs(args.savedir)
args.logger = set_logging_config(args.savedir)
if args.gpu is not None:
args.logger.info("Use GPU: {} for training".format(args.gpu))
if args.distributed:
if args.dist_url == "env://" and args.rank == -1:
args.rank = int(os.environ["RANK"])
if args.multiprocessing_distributed:
# For multiprocessing distributed training, rank needs to be the
# global rank among all the processes
args.rank = args.rank * ngpus_per_node + gpu
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
# create model
if args.pretrained:
args.logger.info("=> using pre-trained model '{}'".format(args.arch))
model = models.__dict__[args.arch](pretrained=True)
else:
args.logger.info("=> creating model '{}'".format(args.arch))
if args.arch.startswith('seed_resnet'):
pass
if args.arch.startswith('psg'):
model = models.__dict__[args.arch](
init_method=args.init_method,
predictive_backward = not args.psg_no_backward,
msb_bits = args.msb_bits,
msb_bits_weight = args.msb_bits_weight,
msb_bits_grad = args.msb_bits_grad,
threshold = args.psg_threshold,
sparsify = args.psg_sparsify,
sign = not args.psg_no_take_sign
)
temp_arch = args.arch[9:] if 'seed' in args.arch else args.arch[4:]
model_for_pruning = models.__dict__[temp_arch](init_method=args.init_method)
else:
model = models.__dict__[args.arch](init_method=args.init_method)
model_for_pruning = None
if not torch.cuda.is_available():
print('using CPU, this will be slow')
elif args.distributed:
# For multiprocessing distributed, DistributedDataParallel constructor
# should always set the single device scope, otherwise,
# DistributedDataParallel will use all available devices.
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
# When using a single GPU per process and per
# DistributedDataParallel, we need to divide the batch size
# ourselves based on the total number of GPUs we have
args.batch_size = int(args.batch_size / ngpus_per_node)
args.workers = int((args.workers + ngpus_per_node - 1) / ngpus_per_node)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
model_without_ddp = model.module
if model_for_pruning is not None:
model_for_pruning.cuda(args.gpu)
model_for_pruning = torch.nn.parallel.DistributedDataParallel(model_for_pruning, device_ids=[args.gpu])
else:
model.cuda()
# DistributedDataParallel will divide and allocate batch_size to all
# available GPUs if device_ids are not set
model = torch.nn.parallel.DistributedDataParallel(model)
model_without_ddp = model.module
if model_for_pruning is not None:
model_for_pruning.cuda()
model_for_pruning = torch.nn.parallel.DistributedDataParallel(model_for_pruning)
elif args.gpu is not None:
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
else:
# DataParallel will divide and allocate batch_size to all available GPUs
if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
model = torch.nn.DataParallel(model).cuda()
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
if args.optimizer == 'SGD':
parameters = [p for p in model_without_ddp.parameters() if p.requires_grad]
optimizer = torch.optim.SGD(parameters, args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
bop_optimizer = None
elif args.optimizer == 'BOP':
bop_params, non_bop_params = model_without_ddp.get_bop_params(), model_without_ddp.get_non_bop_params()
bop_param_masks = model_without_ddp.get_bop_param_masks()
bop_dict = [{'params': bop_params, 'adaptivity_rate': args.ar, 'threshold': args.tau}]
# optimizer = optim.SGD(non_bop_params, lr=args.lr, momentum=0.9, weight_decay=5e-4)
optimizer = torch.optim.SGD(non_bop_params, args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
# bop_optimizer = Bop(bop_params, None, ar=args.ar, threshold=args.tau)
bop_optimizer = Bop(bop_params, None, bop_param_masks, ar=args.ar, threshold=args.tau, device=args.gpu)
# schedulers = (optim.lr_scheduler.MultiStepLR(non_bop_optimizer, milestones=[80, 120], gamma=0.1),)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
args.logger.info("=> loading checkpoint '{}'".format(args.resume))
if args.gpu is None:
checkpoint = torch.load(args.resume)
else:
# Map model to be loaded to specified single gpu.
loc = 'cuda:{}'.format(args.gpu)
checkpoint = torch.load(args.resume, map_location=loc)
args.start_epoch = checkpoint['epoch']
best_acc1 = checkpoint['best_acc1']
if args.gpu is not None:
# best_acc1 may be from a checkpoint from a different GPU
best_acc1 = best_acc1.to(args.gpu)
model_without_ddp.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
args.logger.info("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
args.logger.info("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
# Data loading code
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_dataset = datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]))
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
num_workers=args.workers, pin_memory=True, sampler=train_sampler)
sample_batch_indices = torch.randperm(len(train_dataset))[:100]
sample_batch = torch.utils.data.Subset(train_dataset, sample_batch_indices)
pruneloader = torch.utils.data.DataLoader(sample_batch, args.prune_batch_size, shuffle=True, num_workers=4)
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
if args.evaluate:
validate(val_loader, model, criterion, args)
return
# Create pruner
num_classes = 1000
# if args.pruner:
# pruner = pruners.__dict__[args.pruner](masked_parameters(model, False, False, False), num_classes)
seed_convs = list(filter(lambda m: isinstance(m, (SeedConv2d, PredictiveSeedConv2d,)), model.modules()))
cur_shot = 0
prune_interval = int(args.prune_epoch / args.prune_shots)
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
adjust_learning_rate(optimizer, epoch, args)
if args.optimizer == 'BOP' and (epoch + 1) % args.ar_decay_freq == 0:
bop_optimizer.decay_ar(args.ar_decay_ratio)
# Enable gradients for pruning in SeedNet
for seed_conv in seed_convs:
seed_conv.enable_weight_grad()
if args.pruner and epoch == (cur_shot + 1) * prune_interval and cur_shot < args.prune_shots:
target_sparsity = 1 - (1 - args.prune_ratio) * (cur_shot + 1) / args.prune_shots
if args.arch.lower().startswith('psg'):
model_for_pruning.load_state_dict(model.state_dict(), strict=False)
# pruner = pruners.__dict__[args.pruner](masked_parameters(model_for_pruning, False, False, False), num_classes)
# prune_loop(model_for_pruning, criterion, pruner, pruneloader, num_classes, args.gpu, target_sparsity,
# args.prune_schedule, args.prune_scope, args.prune_iters)
prune_loop(model_for_pruning, criterion, args.pruner,
pruneloader, num_classes, args.gpu, target_sparsity,
args.prune_schedule, args.prune_scope, args.prune_iters,
prune_bias=False, prune_batchnorm=False, prune_residual=False,
weight_flips=None, score_threshold=None)
model.load_state_dict(model_for_pruning.state_dict(), strict=False)
else:
# prune_loop(model, criterion, pruner, pruneloader, num_classes, args.gpu, target_sparsity,
# args.prune_schedule, args.prune_scope, args.prune_iters)
prune_loop(model, criterion, args.pruner,
pruneloader, num_classes, args.gpu, target_sparsity,
args.prune_schedule, args.prune_scope, args.prune_iters,
prune_bias=False, prune_batchnorm=False, prune_residual=False,
weight_flips=None, score_threshold=None)
# Really copy the mask to the model
# with torch.no_grad():
# pruned_masks = [m for m, _ in pruner.masked_parameters]
# model_masks = [m for m, _ in masked_parameters(model, False, False, False)]
# for model_mask, pruned_mask in zip(model_masks, pruned_masks):
# model_mask.copy_(pruned_mask.data.detach().clone())
# Disable gradients when resuming training for SeedNet
for seed_conv in seed_convs:
seed_conv.disable_weight_grad()
cur_shot += 1
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch, args, bop_optimizer=bop_optimizer)
# evaluate on validation set
acc1, acc5 = validate(val_loader, model, criterion, args)
if args.gpu == 0:
args.logger.info('epoch {} \t Top-1 acc {} \t Top-5 acc {}'.format(epoch + 1, acc1, acc5))
# remember best acc@1 and save checkpoint
is_best = acc1 > best_acc1
best_acc1 = max(acc1, best_acc1)
args.logger.info(f'Max accuracy: {best_acc1}')
best_acc1_acc5 = acc5
if not args.multiprocessing_distributed or (args.multiprocessing_distributed
and args.rank % ngpus_per_node == 0):
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model_without_ddp.state_dict(),
'best_acc1': best_acc1,
'acc5': best_acc1_acc5,
'optimizer' : optimizer.state_dict(),
}, is_best)
args.logger.info('best Top-1 acc {} \t corresponding Top-5 acc {}'.format(best_acc1, best_acc1_acc5))
def train(train_loader, model, criterion, optimizer, epoch, args, bop_optimizer=None):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(train_loader),
[batch_time, data_time, losses, top1, top5],
prefix="Epoch: [{}]".format(epoch))
# switch to train mode
model.train()
end = time.time()
for i, (images, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
if torch.cuda.is_available():
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
if bop_optimizer is not None:
bop_optimizer.zero_grad()
loss.backward()
optimizer.step()
if bop_optimizer is not None:
bop_optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if args.gpu == 0 and i % args.print_freq == 0:
progress.display(i)
def validate(val_loader, model, criterion, args):
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(val_loader),
[batch_time, losses, top1, top5],
prefix='Test: ')
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (images, target) in enumerate(val_loader):
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
if torch.cuda.is_available():
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if args.gpu == 0 and i % args.print_freq == 0:
progress.display(i)
# TODO: this should also be done with the ProgressMeter
top1.synchronize()
top5.synchronize()
# args.logger.info(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'
# .format(top1=top1, top5=top5))
return top1.avg, top5.avg
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def synchronize(self):
"""
Warning: does not synchronize `val`
"""
t = torch.tensor([self.sum, self.count], dtype=torch.float64, device='cuda')
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.sum = float(t[0])
self.count = int(t[1])
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
def adjust_learning_rate(optimizer, epoch, args):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = args.lr * (0.1 ** (epoch // 30))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
| 43.933764 | 128 | 0.618128 |
f75b3557e861d98df3053c83895edbb0b6c8fbc0 | 3,902 | py | Python | src/onevision/data/data_class/detection.py | phlong3105/onevision | 90552b64df7213e7fbe23c80ffd8a89583289433 | [
"MIT"
] | 2 | 2022-03-28T09:46:38.000Z | 2022-03-28T14:12:32.000Z | src/onevision/data/data_class/detection.py | phlong3105/onevision | 90552b64df7213e7fbe23c80ffd8a89583289433 | [
"MIT"
] | null | null | null | src/onevision/data/data_class/detection.py | phlong3105/onevision | 90552b64df7213e7fbe23c80ffd8a89583289433 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Data class to store output from detectors.
"""
from __future__ import annotations
import uuid
from timeit import default_timer as timer
from typing import Optional
from typing import Union
import cv2
import numpy as np
from onevision.cv.imgproc.shape import box_xyxy_to_cxcyrh
from onevision.cv.imgproc.shape import get_box_center
from onevision.type import Color
__all__ = [
"Detection",
]
# MARK: - Detection
class Detection:
"""Detection converted from raw numpy output from detector.
Attributes:
id_ (int, str):
Object unique ID.
roi_id (int, str, optional):
Unique ID of the ROI that the object is in. Else `None`.
Default: `None`.
box (np.ndarray, optional):
Bounding box in (x1, y1, x2, y2) format. Default: `None`.
polygon (np.ndarray, optional):
List of points. Default: `None`.
features (np.ndarray, optional):
Feature vector that describes the object contained in this image.
Default: `None`.
confidence (float, optional):
Confidence score. Default: `None`.
class_label (dict, optional):
Class-label dict. Default: `None`.
frame_index (int, optional):
Index of frame when the Detection is created. Default: `None`.
timestamp (float):
Time when the object is created.
"""
# MARK: Magic Functions
def __init__(
self,
id_ : Union[int, str] = uuid.uuid4().int,
roi_id : Optional[Union[int, str]] = None,
box : Optional[np.ndarray] = None,
polygon : Optional[np.ndarray] = None,
features : Optional[np.ndarray] = None,
confidence : Optional[float] = None,
class_label: Optional[dict] = None,
frame_index: Optional[int] = None,
timestamp : float = timer(),
*args, **kwargs
):
super().__init__()
self.id_ = id_
self.roi_id = roi_id
self.box = box
self.polygon = polygon
self.features = features
self.confidence = confidence
self.class_label = class_label
self.frame_index = frame_index
self.timestamp = timestamp
# MARK: Properties
@property
def box_cxcyrh(self):
"""Return the box in (cx, cy, r, h) format."""
return box_xyxy_to_cxcyrh(self.box)
@property
def box_center(self):
"""Return the box's center."""
return get_box_center(self.box)
@property
def box_tl(self):
"""Return the box's top left corner."""
return self.box[0:2]
@property
def box_br(self):
"""Return the box's bottom right corner."""
return self.box[2:4]
# MARK: Visualize
def draw(
self,
drawing: np.ndarray,
box : bool = False,
polygon: bool = False,
label : bool = True,
color : Optional[Color] = None
) -> np.ndarray:
"""Draw the road_objects into the `drawing`.
Args:
drawing (np.ndarray):
Drawing canvas.
box (bool):
Should draw the detected boxes? Default: `False`.
polygon (bool):
Should draw polygon? Default: `False`.
label (bool):
Should draw label? Default: `True`.
color (tuple):
Primary color. Default: `None`.
Returns:
drawing (np.ndarray):
Drawing canvas.
"""
color = color if (color is not None) else self.class_label["color"]
if box:
cv2.rectangle(
img = drawing,
pt1 = (self.box[0], self.box[1]),
pt2 = (self.box[2], self.box[3]),
color = color,
thickness = 2
)
if polygon:
pts = self.polygon.reshape((-1, 1, 2))
cv2.polylines(img=drawing, pts=pts, isClosed=True, color=color, thickness=2)
if label:
font = cv2.FONT_HERSHEY_SIMPLEX
org = (self.box_tl[0] + 5, self.box_tl[1])
cv2.putText(
img = drawing,
text = self.class_label["name"],
fontFace = font,
fontScale = 1.0,
org = org,
color = color,
thickness = 2
)
return drawing
| 24.540881 | 79 | 0.627627 |
f75b422e945f6d8fca51464b8155e36e9ceaf9ab | 3,116 | py | Python | automaton/lib/plugin.py | nemec/Automaton | eea2f89dc10031fba45c80eb63053480dfc3543f | [
"MIT"
] | 6 | 2016-01-05T09:14:57.000Z | 2021-05-17T20:46:46.000Z | automaton/lib/plugin.py | nemec/Automaton | eea2f89dc10031fba45c80eb63053480dfc3543f | [
"MIT"
] | null | null | null | automaton/lib/plugin.py | nemec/Automaton | eea2f89dc10031fba45c80eb63053480dfc3543f | [
"MIT"
] | 1 | 2018-12-18T02:37:24.000Z | 2018-12-18T02:37:24.000Z | try:
import unittest2 as unittest
except ImportError:
import unittest
from automaton.lib import registrar
class UnsuccessfulExecution(Exception):
""" UnsuccessfulExecution errors are not necessarily fatal.
All they represent is a failure on the service's part to
successfully produce normal output. For example, if a
location service is unable to determine an accurate location,
it can raise this error to let the calling thread know that
there is no location available.
"""
pass
class PluginLoadError(Exception):
pass
class PluginInterface(object):
def __init__(self, registrar):
self.registrar = registrar
class RegistrationTestCase(unittest.TestCase):
"""TestCase superclass for plugins.
Define plugin_type in the subclass and the registrar and plugin will
be automatically created and destroyed during each test case setup.
"""
plugin_class = None
def setUp(self):
"""
Bind registrar object to self.registrar and plugin object to self.plugin.
"""
self.registrar = registrar.Registrar()
self.assertIsNotNone(self.plugin_class,
"plugin_type attribute must be defined in subclasses of "
"RegistrationTestCase.")
self.plugin = self.plugin_class(self.registrar)
def test_disable(self):
"""Automated test to ensure disabling a plugin cleans up the registrar."""
self.plugin.disable()
self.assertEqual(len(self.registrar.services), 0,
"Disabling a plugin must remove all registered services.")
def check_interpreter(self, *args):
"""
Convenience function for testing the interpreter with a number of
inputs at once.
Takes in a variable number of (input, output) pairs, with
input being the interpreter's input string and
output being a three-tuple of (service_name, namespace, parsed_arg_dict)
If any of the output members is None, that output is ignored.
"""
for inp, out in args:
out_service, out_namespace, out_args = out
service, namespace, args = self.registrar.find_best_service(inp)
if out_service is not None:
self.assertEquals(service, out_service)
if out_namespace is not None:
self.assertEquals(namespace, out_namespace)
if out_args is not None:
self.assertEquals(args, out_args)
def check_conversation(self, fnc, arg_sequence, output_sequence):
"""
Convenience function for testing conversations.
Ensures the sequence of inputs are provided to the conversation
generator in the same way the server will handle them.
"""
self.assertGreater(len(arg_sequence), 1,
"Must provide at least two responses.")
self.assertEquals(len(arg_sequence), len(output_sequence),
"Argument and output sequences must be same length.")
gen = fnc(**arg_sequence[0])
conversation_output = gen.next()
self.assertEquals(conversation_output, output_sequence[0])
arg_sequence = arg_sequence[1:]
output_sequence = output_sequence[1:]
for kwargs, out in zip(arg_sequence, output_sequence):
self.assertEqual(gen.send(kwargs), out)
| 32.8 | 78 | 0.723363 |
f75b4b8f8b9c555b2b22290c17f4b7bfaa14c619 | 12,886 | py | Python | hytra/core/divisionfeatures.py | m-novikov/hytra | 0dc28deaa2571fa8bea63ca178f0e53cc1cd7508 | [
"MIT"
] | null | null | null | hytra/core/divisionfeatures.py | m-novikov/hytra | 0dc28deaa2571fa8bea63ca178f0e53cc1cd7508 | [
"MIT"
] | null | null | null | hytra/core/divisionfeatures.py | m-novikov/hytra | 0dc28deaa2571fa8bea63ca178f0e53cc1cd7508 | [
"MIT"
] | null | null | null | import numpy as np
import math
def dotproduct(v1, v2):
return sum((a * b) for a, b in zip(v1, v2))
def length(v):
return math.sqrt(dotproduct(v, v))
def angle(v1, v2):
try:
if length(v1) * length(v2) == 0:
radians = 0
else:
radians = math.acos(dotproduct(v1, v2) / (length(v1) * length(v2)))
except Exception as e:
# print str(e), ': math.acos(', dotproduct(v1, v2) / (length(v1) * length(v2)), '), v1 =', v1, ', v2 =', v2
radians = 0
return (float(radians) * 180.0) / math.pi
##### Feature base class #######
class Feature(object):
name = "Feature"
plugin = "Tracking Features"
default_value = 0
dimensionality = None
def __init__(
self,
feats_name,
default_value=None,
delim="_",
scales=[1.0, 1.0, 1.0],
ndim=2,
feat_dim=1,
):
self.name += str(delim) + str(feats_name)
self.feats_name = feats_name
if default_value != None:
self.default_value = default_value
self.scales = scales
self.ndim = ndim
self.feat_dim = feat_dim
def compute(self, feats_cur, feats_next, **kwargs):
raise NotImplementedError("Feature not fully implemented yet.")
def getName(self):
return self.name
def getPlugin(self):
return self.plugin
def dim(self):
return self.dimensionality
class ParentChildrenRatio(Feature):
name = "ParentChildrenRatio"
dimensionality = 1
def compute(self, feats_cur, feats_next, **kwargs):
if len(feats_next) < 2:
return np.array(len(feats_cur) * [self.default_value])
result = np.array(feats_cur) / np.array(feats_next[0] + feats_next[1])
for i in range(len(result)):
if math.isnan(result[i]):
result[i] = self.default_value
return result
def dim(self):
return self.dimensionality * self.feat_dim
class ChildrenRatio(Feature):
name = "ChildrenRatio"
dimensionality = 1
def compute(self, feats_cur, feats_next, **kwargs):
if len(feats_next) < 2:
return np.array(len(feats_cur) * [self.default_value])
ratio = np.array(feats_next[0]) / np.array(feats_next[1])
for i in range(len(ratio)):
if math.isnan(ratio[i]):
ratio[i] = self.default_value
if ratio[i] > 1 and ratio[i] != 0:
ratio[i] = 1.0 / ratio[i]
return ratio
def dim(self):
return self.dimensionality * self.feat_dim
class SquaredDistances(Feature):
name = "SquaredDistances"
def compute(self, feats_cur, feats_next, **kwargs):
return feats_cur
def dim(self):
return self.ndim
class ParentChildrenAngle(Feature):
name = "ParentChildrenAngle"
dimensionality = 1
def compute(self, feats_cur, feats_next, **kwargs):
angles = []
for idx, com1 in enumerate(feats_next):
v1 = (com1 - feats_cur) * self.scales[0 : com1.shape[0]]
for com2 in feats_next[idx + 1 :]:
v2 = (com2 - feats_cur) * self.scales[0 : com2.shape[0]]
ang = angle(v1, v2)
if ang > 180:
assert ang <= 360.01, "the angle must be smaller than 360 degrees"
ang = 360 - ang
angles.append(ang)
if len(angles) == 0:
angles = [self.default_value]
return max(angles)
class ParentIdentity(Feature):
name = ""
def compute(self, feats_cur, feats_next, **kwargs):
return feats_cur
class FeatureManager(object):
feature_mappings = {
"ParentIdentity": ParentIdentity,
"SquaredDistances": SquaredDistances,
"ChildrenRatio": ChildrenRatio,
"ParentChildrenRatio": ParentChildrenRatio,
"ParentChildrenAngle": ParentChildrenAngle,
}
def __init__(
self,
scales=[1.0, 1.0, 1.0],
n_best=3,
com_name_cur="RegionCenter",
com_name_next="RegionCenter",
size_name="Count",
delim="_",
template_size=50,
ndim=2,
size_filter=4,
squared_distance_default=9999,
):
self.scales = scales[0:ndim]
self.n_best = n_best
self.com_name_cur = com_name_cur
self.com_name_next = com_name_next
self.size_name = size_name
self.delim = delim
self.template_size = template_size
self.ndim = ndim
self.size_filter = size_filter
self.squared_distance_default = squared_distance_default
def _getBestSquaredDistances(
self, com_cur, coms_next, size_filter=None, sizes_next=[], default_value=9999
):
""" returns the squared distances to the objects in the neighborhood of com_curr, optionally with size filter """
squaredDistances = []
for label_next in coms_next.keys():
assert label_next in sizes_next.keys()
if size_filter != None and sizes_next[label_next] >= size_filter:
dist = np.linalg.norm(coms_next[label_next] - com_cur * self.scales)
squaredDistances.append([label_next, dist])
squaredDistances = np.array(squaredDistances)
# sort the array in the second column in ascending order
squaredDistances = np.array(
sorted(squaredDistances, key=lambda a_entry: a_entry[1])
)
# initialize with label -1 and default value
result = np.array(
[[-1, default_value] for x in range(self.n_best)], dtype=np.float32
)
if squaredDistances.shape[0] != 0:
result[
0 : min(squaredDistances.shape[0], result.shape[0]), :
] = squaredDistances[0 : min(squaredDistances.shape[0], result.shape[0]), :]
return result
def computeFeatures_at(
self, feats_cur, feats_next, img_next, feat_names, label_image_filename=None
):
"""
**Parameters:**
* if `label_image_filename` is given, it is used to filter the objects from the feature dictionaries
that belong to that label image only (in the JST setting)
"""
# n_labels = list(feats_cur.values())[0].shape[0]
result = {}
# find available features
vigra_feat_names = set([self.com_name_cur, self.com_name_next, self.size_name])
feat_classes = {}
for name in feat_names:
name_split = name.split(self.delim)
if "SquaredDistances" in name_split:
continue
if len(name_split) != 2:
raise ValueError(
"tracking features consist of an operator and a feature name only, given name={}".format(
name_split
)
)
if len(feats_cur[name_split[1]].shape) > 1:
feat_dim = feats_cur[name_split[1]].shape[1]
else:
feat_dim = 1
feat_classes[name] = self.feature_mappings[name_split[0]](
name_split[1], delim=self.delim, ndim=self.ndim, feat_dim=feat_dim
)
shape = (list(feats_cur.values())[0].shape[0], feat_classes[name].dim())
result[name] = np.ones(shape) * feat_classes[name].default_value
vigra_feat_names.add(name_split[1])
# initialize squared distances
for idx in range(self.n_best):
name = "SquaredDistances_" + str(idx)
result[name] = (
np.ones((list(feats_cur.values())[0].shape[0], 1))
* self.squared_distance_default
)
# construct mapping which we only need if label_image_filename was given and the features 'filename' and 'id' exist
if (
label_image_filename is not None
and "filename" in feats_next
and "id" in feats_next
):
global_indices_current_label_image_only = [
l
for l, f in enumerate(feats_next["filename"])
if f == label_image_filename
]
local_to_global_index_map = dict(
[
(feats_next["id"][l], l)
for l in global_indices_current_label_image_only
]
)
# for every object in this frame, check which objects are in the vicinity in the next frame
valid_indices = [0]
for label_cur, com_cur in enumerate(feats_cur[self.com_name_cur]):
if (
label_image_filename is not None
and "filename" in feats_cur
and feats_cur["filename"][label_cur] != label_image_filename
):
# in the JST context, only look at objects from a given segmentation hypotheses set
continue
if label_cur == 0:
continue
valid_indices.append(label_cur)
feats_next_subset = {}
for k in vigra_feat_names:
feats_next_subset[k] = {}
if feats_next is not None and img_next is not None:
# find roi around the center of the current object
idx_cur = [round(x) for x in com_cur]
roi = []
for idx, coord in enumerate(idx_cur):
start = max(coord - self.template_size / 2, 0)
stop = min(coord + self.template_size / 2, img_next.shape[idx])
roi.append(slice(int(start), int(stop)))
# find all coms in the neighborhood of com_cur by checking the next frame's labelimage in the roi
subimg_next = img_next[roi]
labels_next = np.unique(subimg_next).tolist()
# if 'id' in features, map the labels first -- because labels_next refers image object ids,
# whereas the features are the union of objects from several segmentations
if "id" in feats_next:
labels_next = [
local_to_global_index_map[l] for l in labels_next if l != 0
]
for l in labels_next:
if l != 0:
for n in vigra_feat_names:
feats_next_subset[n][l] = np.array(
[feats_next[n][l]]
).flatten()
sq_dist_label = self._getBestSquaredDistances(
com_cur,
feats_next_subset[self.com_name_next],
self.size_filter,
feats_next_subset[self.size_name],
default_value=self.squared_distance_default,
)
feats_next_subset_best = {}
for n in vigra_feat_names:
feats_next_subset_best[n] = []
for idx, row in enumerate(sq_dist_label):
l = row[0]
if l != -1:
feats_next_subset_best[n].append(feats_next_subset[n][l])
# first add squared distances
for idx in range(self.n_best):
name = "SquaredDistances_" + str(idx)
result[name][label_cur] = sq_dist_label[idx][1]
# add all other features
for name, feat_class in feat_classes.items():
if feat_class.feats_name == "SquaredDistances":
f_next = sq_dist_label[0:2, 1]
f_cur = None
else:
f_cur = np.array(
[feats_cur[feat_class.feats_name][label_cur]]
).flatten()
f_next = np.array(
[feats_next_subset_best[feat_class.feats_name]]
).reshape((-1, f_cur.shape[0]))
result[name][label_cur] = feat_class.compute(f_cur, f_next)
# return only valid labels
for feature_name in result:
result[feature_name] = result[feature_name][valid_indices]
return result
if __name__ == "__main__":
import vigra
import numpy as np
img_cur = vigra.readImage("/home/mschiegg/tmp/segmentImage.tif")
img_next = img_cur
labels_cur = vigra.analysis.labelImage(img_cur)
feats_cur = vigra.analysis.extractRegionFeatures(
labels_cur.astype(np.float32),
labels_cur.astype(np.uint32),
features="all",
ignoreLabel=0,
)
feat_names = [
"ParentChildrenRatio_Count",
"ParentChildrenRatio_Mean",
"ChildrenRatio_Count",
"ChildrenRatio_Mean",
"ParentChildrenAngle_RegionCenter",
"ChildrenRatio_SquaredDistances",
]
fm = FeatureManager()
res = fm.computeFeatures_at(feats_cur, feats_cur, img_cur, feat_names)
| 33.732984 | 123 | 0.565032 |
f75b8d9d39cddc633df141d41917ba4bf30fc46f | 1,906 | py | Python | aldryn_faq/forms.py | liip-forks/aldryn-faq | 1e9d5c8d410f955b8082bd20ea7a4d85d4c5e0f7 | [
"BSD-3-Clause"
] | null | null | null | aldryn_faq/forms.py | liip-forks/aldryn-faq | 1e9d5c8d410f955b8082bd20ea7a4d85d4c5e0f7 | [
"BSD-3-Clause"
] | null | null | null | aldryn_faq/forms.py | liip-forks/aldryn-faq | 1e9d5c8d410f955b8082bd20ea7a4d85d4c5e0f7 | [
"BSD-3-Clause"
] | null | null | null |
from django import forms
from django.utils.translation import ugettext_lazy as _
from aldryn_apphooks_config.utils import setup_config
from app_data import AppDataForm
from parler.forms import TranslatableModelForm
from sortedm2m.forms import SortedMultipleChoiceField
from .models import Category, QuestionListPlugin, Question, FaqConfig
class CategoryAdminForm(TranslatableModelForm):
class Meta:
model = Category
fields = [
'name',
'slug',
'appconfig',
]
# def clean_slug(self):
# slug = self.cleaned_data['slug']
# translations_model = Category._meta.translations_model
# categories_with_slug = translations_model.objects.filter(slug=slug)
# if self.instance.pk:
# # Make sure to exclude references from this master :)
# categories_with_slug = categories_with_slug.exclude(
# master_id=self.instance.pk)
# if categories_with_slug.exists():
# raise forms.ValidationError(
# 'A category with this slug already exists.')
# return slug
class QuestionListPluginForm(forms.ModelForm):
questions = SortedMultipleChoiceField(queryset=Question.objects.none())
class Meta:
model = QuestionListPlugin
fields = [
'questions',
]
def __init__(self, *args, **kwargs):
super(QuestionListPluginForm, self).__init__(*args, **kwargs)
questions_field = self.fields['questions']
questions_field.queryset = Question.objects.language()
class FaqOptionForm(AppDataForm):
show_description = forms.BooleanField(
required=False,
help_text=_(
"This option enables the short description to be available "
"within the list view rendering for all plugins."
)
)
setup_config(FaqOptionForm, FaqConfig)
| 27.623188 | 77 | 0.665268 |
f75b95a029d4024af2c7bbe319810adb0338b3d0 | 8,196 | py | Python | twisted/mail/test/test_options.py | sxamit/twisted | 30f6966329c857c3631c60aeb420d84d7828e01e | [
"MIT",
"Unlicense"
] | 1 | 2017-08-07T14:52:02.000Z | 2017-08-07T14:52:02.000Z | Lib/site-packages/twisted/mail/test/test_options.py | adzhou/Python27 | a7113b69d54a04cc780143241c2f1fe81939ad3a | [
"bzip2-1.0.6"
] | null | null | null | Lib/site-packages/twisted/mail/test/test_options.py | adzhou/Python27 | a7113b69d54a04cc780143241c2f1fe81939ad3a | [
"bzip2-1.0.6"
] | 1 | 2018-11-07T12:52:07.000Z | 2018-11-07T12:52:07.000Z | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.mail.tap}.
"""
from twisted.trial.unittest import TestCase
from twisted.python.usage import UsageError
from twisted.mail import protocols
from twisted.mail.tap import Options, makeService
from twisted.python.filepath import FilePath
from twisted.python.reflect import requireModule
from twisted.internet import endpoints, defer
if requireModule('OpenSSL') is None:
sslSkip = 'Missing OpenSSL package.'
else:
sslSkip = None
class OptionsTests(TestCase):
"""
Tests for the command line option parser used for I{twistd mail}.
"""
def setUp(self):
self.aliasFilename = self.mktemp()
aliasFile = file(self.aliasFilename, 'w')
aliasFile.write('someuser:\tdifferentuser\n')
aliasFile.close()
def testAliasesWithoutDomain(self):
"""
Test that adding an aliases(5) file before adding a domain raises a
UsageError.
"""
self.assertRaises(
UsageError,
Options().parseOptions,
['--aliases', self.aliasFilename])
def testAliases(self):
"""
Test that adding an aliases(5) file to an IAliasableDomain at least
doesn't raise an unhandled exception.
"""
Options().parseOptions([
'--maildirdbmdomain', 'example.com=example.com',
'--aliases', self.aliasFilename])
def test_barePort(self):
"""
A bare port passed to I{--pop3} results in deprecation warning in
addition to a TCP4ServerEndpoint.
"""
options = Options()
options.parseOptions(['--pop3', '8110'])
self.assertEqual(len(options['pop3']), 1)
self.assertIsInstance(
options['pop3'][0], endpoints.TCP4ServerEndpoint)
warnings = self.flushWarnings([options.opt_pop3])
self.assertEqual(len(warnings), 1)
self.assertEqual(warnings[0]['category'], DeprecationWarning)
self.assertEqual(
warnings[0]['message'],
"Specifying plain ports and/or a certificate is deprecated since "
"Twisted 11.0; use endpoint descriptions instead.")
def _endpointTest(self, service):
"""
Use L{Options} to parse a single service configuration parameter and
verify that an endpoint of the correct type is added to the list for
that service.
"""
options = Options()
options.parseOptions(['--' + service, 'tcp:1234'])
self.assertEqual(len(options[service]), 1)
self.assertIsInstance(
options[service][0], endpoints.TCP4ServerEndpoint)
def test_endpointSMTP(self):
"""
When I{--smtp} is given a TCP endpoint description as an argument, a
TCPServerEndpoint is added to the list of SMTP endpoints.
"""
self._endpointTest('smtp')
def test_endpointPOP3(self):
"""
When I{--pop3} is given a TCP endpoint description as an argument, a
TCPServerEndpoint is added to the list of POP3 endpoints.
"""
self._endpointTest('pop3')
def test_protoDefaults(self):
"""
POP3 and SMTP each listen on a TCP4ServerEndpoint by default.
"""
options = Options()
options.parseOptions([])
self.assertEqual(len(options['pop3']), 1)
self.assertIsInstance(
options['pop3'][0], endpoints.TCP4ServerEndpoint)
self.assertEqual(len(options['smtp']), 1)
self.assertIsInstance(
options['smtp'][0], endpoints.TCP4ServerEndpoint)
def test_protoDisable(self):
"""
The I{--no-pop3} and I{--no-smtp} options disable POP3 and SMTP
respectively.
"""
options = Options()
options.parseOptions(['--no-pop3'])
self.assertEqual(options._getEndpoints(None, 'pop3'), [])
self.assertNotEqual(options._getEndpoints(None, 'smtp'), [])
options = Options()
options.parseOptions(['--no-smtp'])
self.assertNotEqual(options._getEndpoints(None, 'pop3'), [])
self.assertEqual(options._getEndpoints(None, 'smtp'), [])
def test_allProtosDisabledError(self):
"""
If all protocols are disabled, L{UsageError} is raised.
"""
options = Options()
self.assertRaises(
UsageError, options.parseOptions, (['--no-pop3', '--no-smtp']))
def test_pop3sBackwardCompatibility(self):
"""
The deprecated I{--pop3s} and I{--certificate} options set up a POP3 SSL
server.
"""
cert = FilePath(__file__).sibling("server.pem")
options = Options()
options.parseOptions(['--pop3s', '8995',
'--certificate', cert.path])
self.assertEqual(len(options['pop3']), 2)
self.assertIsInstance(
options['pop3'][0], endpoints.SSL4ServerEndpoint)
self.assertIsInstance(
options['pop3'][1], endpoints.TCP4ServerEndpoint)
warnings = self.flushWarnings([options.postOptions])
self.assertEqual(len(warnings), 1)
self.assertEqual(warnings[0]['category'], DeprecationWarning)
self.assertEqual(
warnings[0]['message'],
"Specifying plain ports and/or a certificate is deprecated since "
"Twisted 11.0; use endpoint descriptions instead.")
if sslSkip is not None:
test_pop3sBackwardCompatibility.skip = sslSkip
def test_esmtpWithoutHostname(self):
"""
If I{--esmtp} is given without I{--hostname}, L{Options.parseOptions}
raises L{UsageError}.
"""
options = Options()
exc = self.assertRaises(UsageError, options.parseOptions, ['--esmtp'])
self.assertEqual("--esmtp requires --hostname", str(exc))
def test_auth(self):
"""
Tests that the --auth option registers a checker.
"""
options = Options()
options.parseOptions(['--auth', 'memory:admin:admin:bob:password'])
self.assertEqual(len(options['credCheckers']), 1)
checker = options['credCheckers'][0]
interfaces = checker.credentialInterfaces
registered_checkers = options.service.smtpPortal.checkers
for iface in interfaces:
self.assertEqual(checker, registered_checkers[iface])
class SpyEndpoint(object):
"""
SpyEndpoint remembers what factory it is told to listen with.
"""
listeningWith = None
def listen(self, factory):
self.listeningWith = factory
return defer.succeed(None)
class MakeServiceTests(TestCase):
"""
Tests for L{twisted.mail.tap.makeService}
"""
def _endpointServerTest(self, key, factoryClass):
"""
Configure a service with two endpoints for the protocol associated with
C{key} and verify that when the service is started a factory of type
C{factoryClass} is used to listen on each of them.
"""
cleartext = SpyEndpoint()
secure = SpyEndpoint()
config = Options()
config[key] = [cleartext, secure]
service = makeService(config)
service.privilegedStartService()
service.startService()
self.addCleanup(service.stopService)
self.assertIsInstance(cleartext.listeningWith, factoryClass)
self.assertIsInstance(secure.listeningWith, factoryClass)
def test_pop3(self):
"""
If one or more endpoints is included in the configuration passed to
L{makeService} for the C{"pop3"} key, a service for starting a POP3
server is constructed for each of them and attached to the returned
service.
"""
self._endpointServerTest("pop3", protocols.POP3Factory)
def test_smtp(self):
"""
If one or more endpoints is included in the configuration passed to
L{makeService} for the C{"smtp"} key, a service for starting an SMTP
server is constructed for each of them and attached to the returned
service.
"""
self._endpointServerTest("smtp", protocols.SMTPFactory)
| 33.317073 | 80 | 0.627989 |
f75ba8fcc31e18267a800445537075f8e521c3b9 | 32,198 | py | Python | script/MergeBrain.py | lsb-riken/CUBIC-informatics | e7982072bb5d892f55e86cdf671376ab379b9b29 | [
"MIT"
] | 6 | 2019-11-23T18:45:19.000Z | 2021-03-24T16:02:06.000Z | script/MergeBrain.py | lsb-riken/CUBIC-informatics | e7982072bb5d892f55e86cdf671376ab379b9b29 | [
"MIT"
] | 5 | 2020-01-30T18:04:29.000Z | 2021-10-22T01:50:24.000Z | script/MergeBrain.py | lsb-riken/CUBIC-informatics | e7982072bb5d892f55e86cdf671376ab379b9b29 | [
"MIT"
] | 1 | 2019-09-12T07:57:07.000Z | 2019-09-12T07:57:07.000Z | #!/usr/bin/env python
#-*- coding:utf-8 -*-
"""Overview:
Downscale images & cells for altas mapping
Usage:
MergeBrain.py images PARAM_FILE [-p NUM_CPUS] [--exec <path>]
MergeBrain.py cells PARAM_FILE
MergeBrain.py full PARAM_FILE [-p NUM_CPUS] [--exec <path>]
Options:
-h --help Show this screen.
--version Show version.
-p NUM_CPUS Number of cpus to be used [default: -1](all available).
--exec <path> Location of the executable [default: ./build/ScaleMerge]
"""
import json, glob, os.path, shutil
import tifffile
import functools
from docopt import docopt
import joblib
import subprocess as sp
import pandas as pd
import numpy as np
from HalfBrainCells import HalfBrainCells
from HalfBrainImages import HalfBrainImages
dt_scalemerged = np.dtype([
('scaled_x','f4'), ('scaled_y', 'f4'), ('scaled_z', 'f4'),
('is_valid', 'bool'),
])
def run_ScaleMerge(paramfile, mergedfile, path_exec, logfile=None, print_output=True):
mergedfile_mean,mergedfile_max,mergedfile_min = mergedfile
cmd = " ".join([path_exec, paramfile,
mergedfile_mean,mergedfile_max,mergedfile_min])
print("[*] Executing : {}".format(cmd))
out = sp.check_output([path_exec, paramfile,
mergedfile_mean,mergedfile_max,mergedfile_min])
if logfile:
with open(logfile, "wb") as f:
f.write(out)
else:
if print_output:
print(out.decode())
return
class WholeBrainImages(object):
def __init__(self, paramfile, ):
print("\n[*] Initializing WholeBrain({})".format(paramfile))
with open(paramfile) as f:
self.params = json.load(f)
self.halfbrain_FW = HalfBrainImages(self.params["HDoG_paramfile"]["FW"])
self.halfbrain_RV = HalfBrainImages(self.params["HDoG_paramfile"]["RV"])
# asuume scale is equivalent for FW & RV except for direction
assert abs(self.halfbrain_FW.scale_xy) == abs(self.halfbrain_RV.scale_xy)
assert abs(self.halfbrain_FW.scale_z) == abs(self.halfbrain_RV.scale_z)
self.fnames_FW = self.halfbrain_FW.list_fnames_all
self.fnames_RV = self.halfbrain_RV.list_fnames_all
self.zs_FW = self.halfbrain_FW.list_zs_all
self.zs_RV = self.halfbrain_RV.list_zs_all
self.zs_global_FW = self.halfbrain_FW.list_zs_global_all
self.zs_global_RV = self.halfbrain_RV.list_zs_global_all
# boundary position
fname_boundary_FW = self.params["merge_info"]["boundary_fname"]["FW"]
fname_boundary_RV = self.params["merge_info"]["boundary_fname"]["RV"]
if len(self.zs_FW) > 0:
self.iz_FW_boundary = self.zs_FW.index(int(fname_boundary_FW))
else:
self.iz_FW_boundary = None
if len(self.zs_RV) > 0:
self.iz_RV_boundary = self.zs_RV.index(int(fname_boundary_RV))
else:
self.iz_RV_boundary = None
print("\t boundary for FW ({}) at i={}".format(fname_boundary_FW, self.iz_FW_boundary))
print("\t boundary for RV ({}) at i={}".format(fname_boundary_RV, self.iz_RV_boundary))
self.skip_z_FW = 1
self.skip_z_RV = 1
self.param_header_FW = ""
self.param_header_RV = ""
self.precompute_param_header(is_FW=True)
self.precompute_param_header(is_FW=False)
self.bound_z_global_FW = (-np.inf, +np.inf)
self.bound_z_global_RV = (-np.inf, +np.inf)
self.merged_depth = None
self.single_mergedfile_mean = os.path.join(self.params["dst_basedir"], "whole.tif")
self.single_mergedfile_max = os.path.join(self.params["dst_basedir"], "whole_max.tif")
self.single_mergedfile_min = os.path.join(self.params["dst_basedir"], "whole_min.tif")
def precompute_param_header(self, is_FW):
if is_FW:
print("[*] Precomputng param header for FW")
halfbrain = self.halfbrain_FW
flip_rot_before_info = self.params["merge_info"]["flip_rot"]["FW"]
else:
print("[*] Precomputng param header for RV")
halfbrain = self.halfbrain_RV
flip_rot_before_info = self.params["merge_info"]["flip_rot"]["RV"]
input_image_info = halfbrain.params["input_image_info"]
flip_rot_after_info = self.params["scale_info"]["flip_rot"]
# downscale ratio
down_scale_xyz = self.params["scale_info"]["downscale_unit"]
downscale_ratio_xy = float(abs(halfbrain.scale_xy)) / down_scale_xyz # [um / um] = dimensionless
assert down_scale_xyz % halfbrain.scale_z == 0
downscale_ratio_z = float(abs(halfbrain.scale_z)) / down_scale_xyz # [um / um] = dimensionless
skip_z = int(down_scale_xyz / abs(halfbrain.scale_z))
print("\t downscale ratio for xy : {}".format(downscale_ratio_xy))
print("\t downscale ratio for z : {} (skip={})".format(downscale_ratio_z, skip_z))
flip_rot_before = 0
flip_rot_before += 1 if flip_rot_before_info["flipX"] else 0
flip_rot_before += 2 if flip_rot_before_info["flipY"] else 0
flip_rot_before += 4 if flip_rot_before_info["rotCCW"] else 0
flip_rot_before += 8 if flip_rot_before_info["rotCW"] else 0
flip_rot_after = 0
flip_rot_after += 1 if flip_rot_after_info["flipX"] else 0
flip_rot_after += 2 if flip_rot_after_info["flipY"] else 0
if flip_rot_before_info["rotCCW"] or flip_rot_before_info["rotCW"]:
width_loaded = input_image_info["height"]
height_loaded = input_image_info["width"]
else:
width_loaded = input_image_info["width"]
height_loaded = input_image_info["height"]
num_xnames = len(halfbrain.list_xnames)
num_ynames = len(halfbrain.list_ynames)
param_dict = {
"width": width_loaded,
"height": height_loaded,
"num_xnames": num_xnames,
"num_ynames": num_ynames,
"downscale_ratio_xy": downscale_ratio_xy,
"downscale_ratio_z": downscale_ratio_z,
"overlap_left": input_image_info["left_margin"],
"overlap_right": input_image_info["right_margin"],
"overlap_top": input_image_info["top_margin"],
"overlap_bottom": input_image_info["bottom_margin"],
"flip_rot_before": flip_rot_before,
"flip_rot_after": flip_rot_after,
"imgformat": 1, # bin
"showgrid": 0, # no grid
}
# compute ScaleMerged parameters for cell coordinate transformation
# apply transformation as in ScaleMerge
strip_width = input_image_info["width"] - input_image_info["left_margin"] - input_image_info["right_margin"]
strip_height = input_image_info["height"] - input_image_info["top_margin"] - input_image_info["bottom_margin"]
if flip_rot_before_info["rotCCW"] or flip_rot_before_info["rotCW"]:
strip_width,strip_height = strip_height,strip_width
# max int less than or equal strip_width * downscale_ratio_xy
sampled_width = int(strip_width * downscale_ratio_xy)
sampled_height = int(strip_height * downscale_ratio_xy)
actual_downscale_ratio_x = sampled_width / strip_width # [pixel / pixel] = dimensionless
actual_downscale_ratio_y = sampled_height / strip_height # [pixel / pixel] = dimensionless
kernel_width = strip_width / sampled_width
kernel_height = strip_height / sampled_height
merged_width = sampled_width * num_xnames
merged_height = sampled_height * num_ynames
margin_left = input_image_info["left_margin"] * actual_downscale_ratio_x
margin_right = input_image_info["right_margin"] * actual_downscale_ratio_x
margin_top = input_image_info["top_margin"] * actual_downscale_ratio_y
margin_bottom = input_image_info["bottom_margin"] * actual_downscale_ratio_y
if flip_rot_before_info["flipX"]:
margin_left,margin_right = margin_right,margin_left
if flip_rot_before_info["flipY"]:
margin_top,margin_bottom = margin_bottom,margin_top
if flip_rot_before_info["rotCCW"]:
margin_left,margin_top,margin_right,margin_bottom = margin_top,margin_right,margin_bottom,margin_left
if flip_rot_before_info["rotCW"]:
margin_left,margin_top,margin_right,margin_bottom = margin_bottom,margin_left,margin_top,margin_right
if flip_rot_after_info["flipX"]:
margin_left,margin_right = margin_right,margin_left
if flip_rot_after_info["flipY"]:
margin_top,margin_bottom = margin_bottom,margin_top
print("\t original: {} x {} x ({} x {})".format(input_image_info["width"], input_image_info["height"], num_xnames, num_ynames))
print("\t strip: {} x {} x ({} x {})".format(strip_width, strip_height, num_xnames, num_ynames))
print("\t sampled: {} x {} x ({} x {})".format(sampled_width, sampled_height, num_xnames, num_ynames))
print("\t merged: {} x {}".format(merged_width, merged_height))
print("\t actual downscale ratio : {:.7f} x {:.7f}".format(actual_downscale_ratio_x, actual_downscale_ratio_y))
print("\t merged_mergin: L:{:.3f} R:{:.3f} T:{:.3f} B:{:.3f}".format(margin_left,margin_right,margin_top, margin_bottom))
param_dict.update({
"merged_margin_left": margin_left,
"merged_margin_right": margin_right,
"merged_margin_top": margin_top,
"merged_margin_bottom": margin_bottom,
"strip_width": strip_width,
"strip_height": strip_height,
"sampled_width": sampled_width,
"sampled_height": sampled_height,
"actual_downscale_ratio_x": actual_downscale_ratio_x,
"actual_downscale_ratio_y": actual_downscale_ratio_y,
"kernel_width": kernel_width,
"kernel_height": kernel_height,
"merged_width": merged_width,
"merged_height": merged_height,
})
if is_FW:
self.skip_z_FW = skip_z
self.param_scalemerge_FW = param_dict
else:
self.skip_z_RV = skip_z
self.param_scalemerge_RV = param_dict
return
def scalemerge(self, num_cpus=-1, dry_run=False, path_exec="./ScaleMerge"):
print("[*] Starting scalemerge...")
# Let's start merging FW & RV using boundary information
scale_z_FW = self.halfbrain_FW.scale_z
scale_z_RV = self.halfbrain_RV.scale_z
if self.params["merge_info"]["use_at_boundary"] == "FW":
use_FW_at_boundary = True
elif self.params["merge_info"]["use_at_boundary"] == "RV":
use_FW_at_boundary = False
else:
raise TypeError
print("\t FW length: {}".format(len(self.fnames_FW)))
print("\t RV length: {}".format(len(self.fnames_RV)))
indices_FW = range(len(self.fnames_FW))
indices_RV = range(len(self.fnames_RV))
zflip = self.params["scale_info"]["flip_rot"]["flipZ"]
print("\t z flip: {}".format("on" if zflip else "off"))
is_halfsize = False
if len(self.zs_global_FW) > 0:
zs_global_FW0 = self.zs_global_FW[0]
zs_global_FW1 = self.zs_global_FW[-1]
else:
zs_global_FW0 = None
zs_global_FW1 = None
is_halfsize = True
if len(self.zs_global_RV) > 0:
zs_global_RV0 = self.zs_global_RV[0]
zs_global_RV1 = self.zs_global_RV[-1]
else:
zs_global_RV0 = None
zs_global_RV1 = None
is_halfsize = True
if scale_z_FW * scale_z_RV > 0:
print("[Case 1-4]")
print("\t scale_z_FW", scale_z_FW)
print("\t zs_global_FW[0]:", zs_global_FW0)
print("\t zs_global_FW[-1]:", zs_global_FW1)
print("\t zs_global_RV[0]:", zs_global_RV0)
print("\t zs_global_RV[-1]:", zs_global_RV1)
# suppose FW & RV is growing in the same direction,
# there is 4 scenarios for merging.
if scale_z_FW > 0 and (is_halfsize or self.zs_global_FW[0] < self.zs_global_RV[0]):
print("->[Case 1]")
# [case 1]
# merged: |-FW-->|--RV-->
# FW: |-FW---->
# RV: |---RV-->
# if halfsize, case2 and case1 comes to the same
indices_FW_strip = indices_FW[:self.iz_FW_boundary+1][::-1][::self.skip_z_FW][::-1]
indices_RV_strip = indices_RV[self.iz_RV_boundary:][::self.skip_z_RV]
if use_FW_at_boundary:
indices_RV_strip = indices_RV_strip[1:]
else:
indices_FW_strip = indices_FW_strip[:-1]
is_FWs = [True for _ in indices_FW_strip] + [False for _ in indices_RV_strip]
merging_fnames = [self.fnames_FW[i] for i in indices_FW_strip] + [self.fnames_RV[i] for i in indices_RV_strip]
elif scale_z_FW > 0 and self.zs_global_RV[0] < self.zs_global_FW[0]:
print("->[Case 2]")
# [case 2]
# mergped: |-RV-->|--FW-->
# FW: |---FW-->
# RV: |-RV---->
indices_RV_strip = indices_RV[:self.iz_RV_boundary+1][::-1][::self.skip_z_RV][::-1]
indices_FW_strip = indices_FW[self.iz_FW_boundary:][::self.skip_z_FW]
if use_FW_at_boundary:
indices_RV_strip = indices_RV_strip[:-1]
else:
indices_FW_strip = indices_FW_strip[1:]
is_FWs = [False for _ in indices_RV_strip] + [True for _ in indices_FW_strip]
merging_fnames = [self.fnames_RV[i] for i in indices_RV_strip] + [self.fnames_FW[i] for i in indices_FW_strip]
elif scale_z_FW < 0 and (is_halfsize or self.zs_global_FW[0] < self.zs_global_RV[0]):
print("->[Case 3]")
# [case 3] (reverse case 1)
# merged: |-FW-->|--RV-->
# FW: <-FW----|
# RV: <---RV--|
# if halfsize, case3 and case4 comes to the same
indices_FW_strip = indices_FW[self.iz_FW_boundary:][::self.skip_z_FW][::-1]
indices_RV_strip = indices_RV[:self.iz_RV_boundary+1][::-1][::self.skip_z_RV]
if use_FW_at_boundary:
indices_RV_strip = indices_RV_strip[1:]
else:
indices_FW_strip = indices_FW_strip[:-1]
is_FWs = [True for _ in indices_FW_strip] + [False for _ in indices_RV_strip]
merging_fnames = [self.fnames_FW[i] for i in indices_FW_strip] + [self.fnames_RV[i] for i in indices_RV_strip]
elif scale_z_FW < 0 and self.zs_global_RV[0] < self.zs_global_FW[0]:
print("->[Case 4]")
# [case 4] : reverse case2
# mergped: |-RV-->|--FW-->
# FW: <---FW--|
# RV: <-RV----|
indices_RV_strip = indices_RV[self.iz_RV_boundary:][::self.skip_z_RV][::-1]
indices_FW_strip = indices_FW[:self.iz_FW_boundary+1][::-1][::self.skip_z_FW]
if use_FW_at_boundary:
indices_RV_strip = indices_RV_strip[:-1]
else:
indices_FW_strip = indices_FW_strip[1:]
is_FWs = [False for _ in indices_RV_strip] + [True for _ in indices_FW_strip]
merging_fnames = [self.fnames_RV[i] for i in indices_RV_strip] + [self.fnames_FW[i] for i in indices_FW_strip]
else:
raise TypeError
elif scale_z_FW * scale_z_RV < 0:
# suppose FW & RV is growing in the opposite direction,
# there is 4 scenarios
print("[Case 5-8]")
print("\t scale_z_FW", scale_z_FW)
print("\t zs_global_FW[0]:", zs_global_FW0)
print("\t zs_global_FW[-1]:", zs_global_FW1)
print("\t zs_global_RV[0]:", zs_global_RV0)
print("\t zs_global_RV[-1]:", zs_global_RV1)
if scale_z_FW < 0 and (is_halfsize or self.zs_global_FW[-1] < self.zs_global_RV[0]):
print("->[Case 5]")
# [case 5]
# merged: |-FW-->|--RV-->
# FW: <-FW----|
# RV: |---RV-->
indices_FW_strip = indices_FW[self.iz_FW_boundary:][::self.skip_z_FW][::-1]
indices_RV_strip = indices_RV[self.iz_RV_boundary:][::self.skip_z_RV]
if use_FW_at_boundary:
indices_RV_strip = indices_RV_strip[1:]
else:
indices_FW_strip = indices_FW_strip[:-1]
is_FWs = [True for _ in indices_FW_strip] + [False for _ in indices_RV_strip]
merging_fnames = [self.fnames_FW[i] for i in indices_FW_strip] + [self.fnames_RV[i] for i in indices_RV_strip]
elif scale_z_FW > 0 and (is_halfsize or self.zs_global_FW[-1] > self.zs_global_RV[0]):
print("->[Case 6]")
# [case 6]
# merged: |-RV-->|--FW-->
# FW: |---FW-->
# RV: <-RV----|
indices_RV_strip = indices_RV[self.iz_RV_boundary:][::self.skip_z_RV][::-1]
indices_FW_strip = indices_FW[self.iz_FW_boundary:][::self.skip_z_FW]
if use_FW_at_boundary:
indices_RV_strip = indices_RV_strip[:-1]
else:
indices_FW_strip = indices_FW_strip[1:]
is_FWs = [False for _ in indices_RV_strip] + [True for _ in indices_FW_strip]
merging_fnames = [self.fnames_RV[i] for i in indices_RV_strip] + [self.fnames_FW[i] for i in indices_FW_strip]
elif scale_z_FW > 0 and self.zs_global_FW[-1] < self.zs_global_RV[0]:
print("->[Case 7]")
# [case 7] : reverse case5
raise NotImplementedError
elif scale_z_FW < 0 and self.zs_global_FW[-1] > self.zs_global_RV[0]:
print("->[Case 8]")
# [case 8] : reverse case6
raise NotImplementedError
else:
raise TypeError
else:
raise TypeError
# save boundary point for picking valid cell candidates
if is_halfsize:
self.bound_z_global_FW = (-np.inf, +np.inf)
self.bound_z_global_RV = (-np.inf, +np.inf)
elif is_FWs[0]:
self.bound_z_global_FW = (-np.inf, self.zs_global_FW[self.iz_FW_boundary])
self.bound_z_global_RV = (self.zs_global_RV[self.iz_RV_boundary], +np.inf)
else:
self.bound_z_global_RV = (-np.inf, self.zs_global_RV[self.iz_RV_boundary])
self.bound_z_global_FW = (self.zs_global_FW[self.iz_FW_boundary], +np.inf)
self.merged_depth = len(merging_fnames)
print("\tmerged depth: {}".format(self.merged_depth))
if is_FWs[0]:
self.new_origin_z_global = self.zs_global_FW[indices_FW_strip[0]]
else:
self.new_origin_z_global = self.zs_global_RV[indices_RV_strip[0]]
print("\tnew z_global origin : {}".format(self.new_origin_z_global))
if zflip:
is_FWs = is_FWs[::-1]
merging_fnames = merging_fnames[::-1]
# write paramfiles for each process of ScaleMerge
total_z_merged = len(merging_fnames)
mergedfile_mean_basedir = os.path.join(self.params["dst_basedir"], "zs_mean")
mergedfile_max_basedir = os.path.join(self.params["dst_basedir"], "zs_max")
mergedfile_min_basedir = os.path.join(self.params["dst_basedir"], "zs_min")
if not os.path.exists(mergedfile_mean_basedir):
os.makedirs(mergedfile_mean_basedir)
if not os.path.exists(mergedfile_max_basedir):
os.makedirs(mergedfile_max_basedir)
if not os.path.exists(mergedfile_min_basedir):
os.makedirs(mergedfile_min_basedir)
mergedfile_mean_basename = os.path.join(mergedfile_mean_basedir, "{i:04d}.tif")
mergedfile_max_basename = os.path.join(mergedfile_max_basedir, "{i:04d}.tif")
mergedfile_min_basename = os.path.join(mergedfile_min_basedir, "{i:04d}.tif")
mergedfiles = [(
mergedfile_mean_basename.format(i=i),
mergedfile_max_basename.format(i=i),
mergedfile_min_basename.format(i=i),
)for i in range(total_z_merged)]
paramfiles = [self.write_paramfile(i,is_FW,merging_fname)
for i,(is_FW,merging_fname) in enumerate(zip(is_FWs, merging_fnames))]
if not dry_run:
joblib.Parallel(n_jobs=num_cpus, verbose=10)([
joblib.delayed(run_ScaleMerge)(paramfile,mergedfile, path_exec, print_output=False)
for paramfile, mergedfile in zip(paramfiles,mergedfiles)
])
print("[*] Concatenating tiff images to single tiff({})".format(self.single_mergedfile_mean))
img_mergedsingle_mean = np.empty((len(mergedfiles), self.param_scalemerge_FW["merged_height"], self.param_scalemerge_FW["merged_width"]), dtype=np.uint16)
img_mergedsingle_max = np.empty_like(img_mergedsingle_mean)
img_mergedsingle_min = np.empty_like(img_mergedsingle_mean)
for i,(mergedfile_mean,mergedfile_max,mergedfile_min) in enumerate(mergedfiles):
img_mergedsingle_mean[i,:,:] = tifffile.imread(mergedfile_mean)
img_mergedsingle_max[i,:,:] = tifffile.imread(mergedfile_max)
img_mergedsingle_min[i,:,:] = tifffile.imread(mergedfile_min)
tifffile.imsave(self.single_mergedfile_mean, img_mergedsingle_mean)
tifffile.imsave(self.single_mergedfile_max, img_mergedsingle_max)
tifffile.imsave(self.single_mergedfile_min, img_mergedsingle_min)
print("[*] Deleting temporary tiff images")
shutil.rmtree(mergedfile_mean_basedir)
shutil.rmtree(mergedfile_max_basedir)
shutil.rmtree(mergedfile_min_basedir)
else:
print("[*] Skipping ScaleMerge for images")
for paramfile in paramfiles:
os.remove(paramfile)
return
def write_paramfile(self, i, is_FW, merging_fname):
paramfile = "/tmp/param_merge_{randomID}_{i:04d}.txt".format(randomID = np.random.randint(2**31), i=i)
if is_FW:
param_dict = self.param_scalemerge_FW
halfbrain = self.halfbrain_FW
else:
param_dict = self.param_scalemerge_RV
halfbrain = self.halfbrain_RV
param_text = "{width}:{height}:{num_xnames}:{num_ynames}:{downscale_ratio_xy}:{overlap_left}:{overlap_right}:{overlap_top}:{overlap_bottom}:{flip_rot_before}:{flip_rot_after}:{imgformat}:{showgrid}\n".format(**param_dict)
for yname in halfbrain.list_ynames:
for xname in halfbrain.list_xnames:
imagestack = halfbrain.get_imagestack_by_xyname(xname,yname)
img = imagestack.get_imagefile_by_fname(merging_fname)
fullpath = img.fullpath if not img.is_dummy else ""
param_text += fullpath + "\n"
with open(paramfile, "w") as f:
f.write(param_text)
return paramfile
class WholeBrainCells(object):
def __init__(self, paramfile, wholebrain_images=None, clf=None):
if wholebrain_images:
self.wholebrain_images = wholebrain_images
else:
self.wholebrain_images = WholeBrainImages(paramfile)
self.halfbrain_cells_FW = HalfBrainCells(
self.wholebrain_images.params["HDoG_paramfile"]["FW"],
is_FW = True,
halfbrain_images=self.wholebrain_images.halfbrain_FW,
clf=clf
)
self.halfbrain_cells_RV = HalfBrainCells(
self.wholebrain_images.params["HDoG_paramfile"]["RV"],
is_FW = False,
halfbrain_images=self.wholebrain_images.halfbrain_RV,
clf=clf
)
# average mode or not (default: false)
is_ave_FW = self.halfbrain_cells_FW.halfbrain_images.params["HDoG_param"].get("is_ave_mode", False)
is_ave_RV = self.halfbrain_cells_RV.halfbrain_images.params["HDoG_param"].get("is_ave_mode", False)
assert is_ave_FW == is_ave_RV
self.is_ave = is_ave_FW
def scalemerge(self):
# should be called after scalemerge()
print("[*] Starting scalemerge for HDoG result...")
cellstacks_FW = self.halfbrain_cells_FW.dict_stacks
cellstacks_RV = self.halfbrain_cells_RV.dict_stacks
param_scalemerge_FW = self.wholebrain_images.param_scalemerge_FW
param_scalemerge_RV = self.wholebrain_images.param_scalemerge_RV
# scale and merge
org_scale_xy_FW = float(abs(self.wholebrain_images.halfbrain_FW.scale_xy))
org_scale_z_FW = float(abs(self.wholebrain_images.halfbrain_FW.scale_z))
org_scale_xy_RV = float(abs(self.wholebrain_images.halfbrain_RV.scale_xy))
org_scale_z_RV = float(abs(self.wholebrain_images.halfbrain_RV.scale_z))
offset_x_FW = self.wholebrain_images.halfbrain_FW.list_offset_xs[0]
offset_y_FW = self.wholebrain_images.halfbrain_FW.list_offset_ys[0]
offset_x_RV = self.wholebrain_images.halfbrain_RV.list_offset_xs[0]
offset_y_RV = self.wholebrain_images.halfbrain_RV.list_offset_ys[0]
print("\t offset_FW: {},{},{}".format(offset_x_FW,offset_y_FW,self.wholebrain_images.new_origin_z_global))
print("\t offset_RV: {},{},{}".format(offset_x_RV,offset_y_RV,self.wholebrain_images.new_origin_z_global))
# flip rot after
flip_rot_after_info = self.wholebrain_images.params["scale_info"]["flip_rot"]
A_FW = np.zeros((3,3))
A_FW[:2,:2] = np.array(self.wholebrain_images.halfbrain_FW.params["coordinate_info"]["affine_global"])[:2,:2]
A_FW[2,2] = 1.
A_FW[np.nonzero(A_FW)] = 1.
b_FW = np.zeros(3)
A_RV = np.zeros((3,3))
A_RV[:2,:2] = np.array(self.wholebrain_images.halfbrain_RV.params["coordinate_info"]["affine_global"])[:2,:2]
A_RV[2,2] = 1.
A_RV[np.nonzero(A_RV)] = 1.
b_RV = np.zeros(3)
if flip_rot_after_info["flipX"]:
b_FW[0] += param_scalemerge_FW["merged_width"]
A_FW[0,:] *= -1
b_RV[0] += param_scalemerge_RV["merged_width"]
A_RV[0,:] *= -1
if flip_rot_after_info["flipY"]:
b_FW[1] += param_scalemerge_FW["merged_height"]
A_FW[1,:] *= -1
b_RV[1] += param_scalemerge_RV["merged_height"]
A_RV[1,:] *= -1
if flip_rot_after_info["flipZ"]:
b_FW[2] += self.wholebrain_images.merged_depth
A_FW[2,:] *= -1
b_RV[2] += self.wholebrain_images.merged_depth
A_RV[2,:] *= -1
def process_stack(dst_path, cellstack, bound_z, margin_left, margin_top,
offset_x, offset_y, offset_z, coeff_x, coeff_y, coeff_z, A, b):
print("[*] Dumping merged data to {}".format(dst_path))
if bound_z[0] > bound_z[1]:
smallest_z,largest_z = bound_z[1],bound_z[0]
else:
smallest_z,largest_z = bound_z
data_scaled = np.zeros(cellstack.data_global.shape[0], dtype=dt_scalemerged)
data_scaled["is_valid"] = np.bitwise_and(
smallest_z <= cellstack.data_global["merged_z"],
cellstack.data_global["merged_z"] <= largest_z)
#print("\tz_range 1: {} - {}".format(data_valid["centroid_z"].min(), data_valid["centroid_z"].max()))
centroid_scaled = np.zeros((cellstack.data_global.shape[0],3), dtype=np.float32)
centroid_scaled[:,0] = (cellstack.data_global["merged_x"] - offset_x) * coeff_x - margin_left
centroid_scaled[:,1] = (cellstack.data_global["merged_y"] - offset_y) * coeff_y - margin_top
centroid_scaled[:,2] = (cellstack.data_global["merged_z"] - offset_z) * coeff_z
#print("\tz_range 2: {} - {}".format(centroid_scaled[:,2].min(), centroid_scaled[:,2].max()))
centroid_fliprot = A.dot(centroid_scaled.T).T + b
data_scaled["scaled_x"] = centroid_fliprot[:,0]
data_scaled["scaled_y"] = centroid_fliprot[:,1]
data_scaled["scaled_z"] = centroid_fliprot[:,2]
#print("\tz_range 3: {} - {}".format(data_valid["centroid_z"].min(), data_valid["centroid_z"].max()))
joblib.dump(data_scaled, dst_path, compress=3)
return np.count_nonzero(data_scaled["is_valid"])
dst_basedir = self.wholebrain_images.params["dst_basedir"]
dst_basedir_FW = os.path.join(dst_basedir,"FW")
dst_basedir_RV = os.path.join(dst_basedir,"RV")
if not os.path.exists(dst_basedir_FW):
os.makedirs(dst_basedir_FW)
if not os.path.exists(dst_basedir_RV):
os.makedirs(dst_basedir_RV)
# Note: parallelizable loop
dict_num_cells = {}
for xyname,cellstack in cellstacks_FW.items():
if cellstack.is_dummy: continue
dst_path = os.path.join(dst_basedir_FW, "{}_{}.pkl".format(xyname[1],xyname[0]))
num_cells = process_stack(dst_path, cellstack,
self.wholebrain_images.bound_z_global_FW,
param_scalemerge_FW["merged_margin_left"],
param_scalemerge_FW["merged_margin_top"],
offset_x_FW, offset_y_FW, self.wholebrain_images.new_origin_z_global,
param_scalemerge_FW["actual_downscale_ratio_x"] / org_scale_xy_FW,
param_scalemerge_FW["actual_downscale_ratio_y"] / org_scale_xy_FW,
param_scalemerge_FW["downscale_ratio_z"] / org_scale_z_FW,
A_FW, b_FW)
dict_num_cells[dst_path] = num_cells
for xyname,cellstack in cellstacks_RV.items():
if cellstack.is_dummy: continue
dst_path = os.path.join(dst_basedir_RV, "{}_{}.pkl".format(xyname[1],xyname[0]))
num_cells = process_stack(dst_path, cellstack,
self.wholebrain_images.bound_z_global_RV,
param_scalemerge_RV["merged_margin_left"],
param_scalemerge_RV["merged_margin_top"],
offset_x_RV, offset_y_RV, self.wholebrain_images.new_origin_z_global,
param_scalemerge_RV["actual_downscale_ratio_x"] / org_scale_xy_RV,
param_scalemerge_RV["actual_downscale_ratio_y"] / org_scale_xy_RV,
param_scalemerge_RV["downscale_ratio_z"] / org_scale_z_RV,
A_RV, b_RV)
dict_num_cells[dst_path] = num_cells
# saving information
joblib.dump(dict_num_cells, os.path.join(dst_basedir, "info.pkl"), compress=3)
return
def main():
args = docopt(__doc__)
wb_images = WholeBrainImages(args["PARAM_FILE"])
if args["images"]:
wb_images.scalemerge(num_cpus=int(args["-p"]), dry_run=False, path_exec=args["--exec"])
elif args["cells"]:
wb_images.scalemerge(num_cpus=int(args["-p"]), dry_run=True, path_exec=args["--exec"])
wb_cells = WholeBrainCells(args["PARAM_FILE"], wholebrain_images=wb_images)
wb_cells.scalemerge()
elif args["full"]:
wb_images.scalemerge(num_cpus=int(args["-p"]), dry_run=False, path_exec=args["--exec"])
wb_cells = WholeBrainCells(args["PARAM_FILE"], wholebrain_images=wb_images)
wb_cells.scalemerge()
if __name__ == "__main__":
main()
| 50.946203 | 229 | 0.615349 |
f75bb65996c0940d32b2f8dbd0c51209be544e04 | 6,124 | py | Python | components/aws/sagemaker/batch_transform/src/batch_transform.py | cclauss/pipelines | 2592307cceb72fdb61be2673f67d7b4a4bd12023 | [
"Apache-2.0"
] | null | null | null | components/aws/sagemaker/batch_transform/src/batch_transform.py | cclauss/pipelines | 2592307cceb72fdb61be2673f67d7b4a4bd12023 | [
"Apache-2.0"
] | null | null | null | components/aws/sagemaker/batch_transform/src/batch_transform.py | cclauss/pipelines | 2592307cceb72fdb61be2673f67d7b4a4bd12023 | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
from pathlib2 import Path
from common import _utils
def main(argv=None):
parser = argparse.ArgumentParser(description='SageMaker Batch Transformation Job')
parser.add_argument('--region', type=str.strip, required=True, help='The region where the cluster launches.')
parser.add_argument('--job_name', type=str.strip, required=False, help='The name of the transform job.', default='')
parser.add_argument('--model_name', type=str.strip, required=True, help='The name of the model that you want to use for the transform job.')
parser.add_argument('--max_concurrent', type=_utils.str_to_int, required=False, help='The maximum number of parallel requests that can be sent to each instance in a transform job.', default='0')
parser.add_argument('--max_payload', type=_utils.str_to_int, required=False, help='The maximum allowed size of the payload, in MB.', default='6')
parser.add_argument('--batch_strategy', choices=['MultiRecord', 'SingleRecord', ''], type=str.strip, required=False, help='The number of records to include in a mini-batch for an HTTP inference request.', default='')
parser.add_argument('--environment', type=_utils.str_to_json_dict, required=False, help='The dictionary of the environment variables to set in the Docker container. Up to 16 key-value entries in the map.', default='{}')
parser.add_argument('--input_location', type=str.strip, required=True, help='The S3 location of the data source that is associated with a channel.')
parser.add_argument('--data_type', choices=['ManifestFile', 'S3Prefix', 'AugmentedManifestFile', ''], type=str.strip, required=False, help='Data type of the input. Can be ManifestFile, S3Prefix, or AugmentedManifestFile.', default='S3Prefix')
parser.add_argument('--content_type', type=str.strip, required=False, help='The multipurpose internet mail extension (MIME) type of the data.', default='')
parser.add_argument('--split_type', choices=['None', 'Line', 'RecordIO', 'TFRecord', ''], type=str.strip, required=False, help='The method to use to split the transform job data files into smaller batches.', default='None')
parser.add_argument('--compression_type', choices=['None', 'Gzip', ''], type=str.strip, required=False, help='If the transform data is compressed, the specification of the compression type.', default='None')
parser.add_argument('--output_location', type=str.strip, required=True, help='The Amazon S3 path where you want Amazon SageMaker to store the results of the transform job.')
parser.add_argument('--accept', type=str.strip, required=False, help='The MIME type used to specify the output data.')
parser.add_argument('--assemble_with', choices=['None', 'Line', ''], type=str.strip, required=False, help='Defines how to assemble the results of the transform job as a single S3 object. Either None or Line.')
parser.add_argument('--output_encryption_key', type=str.strip, required=False, help='The AWS KMS key that Amazon SageMaker uses to encrypt the model artifacts.', default='')
parser.add_argument('--input_filter', type=str.strip, required=False, help='A JSONPath expression used to select a portion of the input data to pass to the algorithm.', default='')
parser.add_argument('--output_filter', type=str.strip, required=False, help='A JSONPath expression used to select a portion of the joined dataset to save in the output file for a batch transform job.', default='')
parser.add_argument('--join_source', choices=['None', 'Input', ''], type=str.strip, required=False, help='Specifies the source of the data to join with the transformed data.', default='None')
parser.add_argument('--instance_type', choices=['ml.m4.xlarge', 'ml.m4.2xlarge', 'ml.m4.4xlarge', 'ml.m4.10xlarge', 'ml.m4.16xlarge', 'ml.m5.large', 'ml.m5.xlarge', 'ml.m5.2xlarge', 'ml.m5.4xlarge',
'ml.m5.12xlarge', 'ml.m5.24xlarge', 'ml.c4.xlarge', 'ml.c4.2xlarge', 'ml.c4.4xlarge', 'ml.c4.8xlarge', 'ml.p2.xlarge', 'ml.p2.8xlarge', 'ml.p2.16xlarge', 'ml.p3.2xlarge', 'ml.p3.8xlarge', 'ml.p3.16xlarge',
'ml.c5.xlarge', 'ml.c5.2xlarge', 'ml.c5.4xlarge', 'ml.c5.9xlarge', 'ml.c5.18xlarge'], type=str.strip, required=True, help='The ML compute instance type for the transform job.', default='ml.m4.xlarge')
parser.add_argument('--instance_count', type=_utils.str_to_int, required=False, help='The number of ML compute instances to use in the transform job.')
parser.add_argument('--resource_encryption_key', type=str.strip, required=False, help='The AWS KMS key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance(s).', default='')
parser.add_argument('--tags', type=_utils.str_to_json_dict, required=False, help='An array of key-value pairs, to categorize AWS resources.', default='{}')
parser.add_argument('--output_location_file', type=str.strip, required=True, help='File path where the program will write the Amazon S3 URI of the transform job results.')
args = parser.parse_args()
logging.getLogger().setLevel(logging.INFO)
client = _utils.get_client(args.region)
logging.info('Submitting Batch Transformation request to SageMaker...')
batch_job_name = _utils.create_transform_job(client, vars(args))
logging.info('Batch Job request submitted. Waiting for completion...')
_utils.wait_for_transform_job(client, batch_job_name)
Path(args.output_location_file).parent.mkdir(parents=True, exist_ok=True)
Path(args.output_location_file).write_text(unicode(args.output_location))
logging.info('Batch Transformation creation completed.')
if __name__== "__main__":
main()
| 94.215385 | 244 | 0.75147 |
f75bc9656041281005d2405f217d2685c7e74f87 | 21,738 | py | Python | chapter_2_collection/diarize.py | fancyerii/voicebook | def82da8577086d0361643a05fec2463006533a9 | [
"Apache-2.0"
] | 1 | 2020-03-05T01:19:17.000Z | 2020-03-05T01:19:17.000Z | chapter_2_collection/diarize.py | fancyerii/voicebook | def82da8577086d0361643a05fec2463006533a9 | [
"Apache-2.0"
] | null | null | null | chapter_2_collection/diarize.py | fancyerii/voicebook | def82da8577086d0361643a05fec2463006533a9 | [
"Apache-2.0"
] | null | null | null | '''
================================================
## VOICEBOOK REPOSITORY ##
================================================
repository name: voicebook
repository version: 1.0
repository link: https://github.com/jim-schwoebel/voicebook
author: Jim Schwoebel
author contact: js@neurolex.co
description: a book and repo to get you started programming voice applications in Python - 10 chapters and 200+ scripts.
license category: opensource
license: Apache 2.0 license
organization name: NeuroLex Laboratories, Inc.
location: Seattle, WA
website: https://neurolex.ai
release date: 2018-09-28
This code (voicebook) is hereby released under a Apache 2.0 license license.
For more information, check out the license terms below.
================================================
## LICENSE TERMS ##
================================================
Copyright 2018 NeuroLex Laboratories, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
================================================
## SERVICE STATEMENT ##
================================================
If you are using the code written for a larger project, we are
happy to consult with you and help you with deployment. Our team
has >10 world experts in Kafka distributed architectures, microservices
built on top of Node.js / Python / Docker, and applying machine learning to
model speech and text data.
We have helped a wide variety of enterprises - small businesses,
researchers, enterprises, and/or independent developers.
If you would like to work with us let us know @ js@neurolex.co.
================================================
## DIARIZE.PY ##
================================================
This function takes in a speech sample and diarizes it for 2 speakers.
The output files are stored in a folder structure with Speaker A and Speaker B.
It is assumed to be a 2 speaker diarization problem.
The output .zip file is named filename[0:-4]+'diarization.zip' and contains:
--->filename[0:-4]+'.json'
--> speaker 1 folder
--> speaker 1 sections (multiple .wav files)
--> speaker 1 stiched togetehr (single .wav file)
--> speaker 2 folder
--> speaker 2 sections (multiple .wav files)
--> speaker 2 stich (single .wav file)
Diarization is done with the pyaudioanalysis3 library.
'''
import os, json, importlib, scipy, shutil, ffmpy, time, sys, getpass, zipfile
import speech_recognition as sr_audio
from pydub import AudioSegment
import numpy as np
if 'pyAudioAnalysis3' not in os.listdir():
os.system("git clone git@github.com:NeuroLexDiagnostics/pyAudioAnalysis3.git")
sys.path.append(os.getcwd()+'/pyAudioAnalysis3')
import audioTrainTest as aT
import audioBasicIO
import audioFeatureExtraction as aF
import audioSegmentation as aS
##INITIALIZE FUNCTIONS FOR DIARIZATION
####################################################################################
def exportfile(newAudio,time1,time2,filename,i,speaknum):
#Exports to a wav file in the current path.
newAudio2 = newAudio[time1:time2]
print('making '+filename[0:-4]+'_'+str(speaknum)+'_'+str(i)+'_'+str(time1/1000)+'_'+str(time2/1000)+'.wav')
newAudio2.export(filename[0:-4]+'_'+str(speaknum)+'_'+str(i)+'_'+str(time1/1000)+'_'+str(time2/1000)+'.wav', format="wav")
return filename[0:-4]+'_'+str(speaknum)+'_'+str(i)+'_'+str(time1/1000)+'_'+str(time2/1000)+'.wav'
def stitchtogether(dirlist,dirloc,filename):
try:
#assumes already in proper directory
for i in range(len(dirlist)):
if i ==0:
sound=AudioSegment.from_wav(dirloc+'/'+str(dirlist[i]))
else:
sound=sound+AudioSegment.from_wav(dirloc+'/'+str(dirlist[i]))
sound.export(dirloc+'/'+filename, format="wav")
except:
print('error stitching...')
def stereo2mono(audiodata,filename):
newaudiodata = list()
for i in range(len(audiodata)):
d = audiodata[i][0]/2 + audiodata[i][1]/2
newaudiodata.append(d)
return np.array(newaudiodata, dtype='int16')
#to apply this function, SR=sample rate usually 44100
#wavfile.write(newfilename, sr, newaudiodata)
def convertformat(filename):
newfilename=filename[0:-4]+'.wav'
ff = ffmpy.FFmpeg(
inputs={filename:None},
outputs={newfilename: None}
)
ff.run()
return newfilename
def zipdir(path, ziph):
# ziph is zipfile handle
for root, dirs, files in os.walk(path):
for file in files:
ziph.write(os.path.join(root, file))
def transcribe_audio_google(filename):
# transcribe the audio (note this is only done if a voice sample)
r=sr_audio.Recognizer()
with sr_audio.AudioFile(filename) as source:
audio = r.record(source)
text=r.recognize_google_cloud(audio)
return text
def transcribe_audio_sphinx(filename):
# transcribe the audio (note this is only done if a voice sample)
r=sr_audio.Recognizer()
with sr_audio.AudioFile(filename) as source:
audio = r.record(source)
text=r.recognize_sphinx(audio)
print('transcript: '+text)
return text
##GO TO HOST DIRECTORY AND BEGIN BULK PROCESSING
####################################################################################
#host directory in app is likely /usr/app/...
hostdir=os.getcwd()
curdir=os.listdir()
#now create some folders if they have not already been created
incoming_dir=hostdir+'/diarize-incoming/'
processed_dir=hostdir+'/diarize-processed/'
try:
os.chdir(incoming_dir)
curdir=os.listdir()
if 'data' not in curdir:
#this is necessary for diarnization
shutil.copytree(hostdir+'/pyaudioanalysis3/data/',os.getcwd()+'/data/')
except:
os.mkdir(incoming_dir)
os.chdir(incoming_dir)
curdir=os.listdir()
if 'data' not in curdir:
#this is necessary for diarization
shutil.copytree(hostdir+'/pyaudioanalysis3/data/',os.getcwd()+'/data/')
try:
os.chdir(processed_dir)
except:
os.mkdir(processed_dir)
#change to incoming directory to look for samples
os.chdir(incoming_dir)
#initialize sleep time for worker (default is 1 second)
sleeptime=1
# now initialize process list with files already in the directory
processlist=os.listdir()
convertformat_list=list()
#error counts will help us debug later
errorcount=0
processcount=0
#initialize t for infinite loop
t=1
#infinite loop for worker now begins with while loop...
while t>0:
#go to incoming directory
os.chdir(incoming_dir)
listdir=os.listdir()
print(listdir)
#try statement to avoid errors
try:
if listdir==['.DS_Store'] or listdir == ['data'] or listdir==['data','.DS_Store'] or listdir==[]:
#pass if no files are processible
print('no files found...')
else:
#look for any files that have not been previously in the directory
for i in range(len(listdir)):
if listdir[i]=='.DS_Store' or listdir[i]=='data':
pass
else:
#convert format if not .wav
if listdir[i][-4:] != '.wav':
filename=convertformat(listdir[i])
os.remove(listdir[i])
else:
filename=listdir[i]
#log start time for later
start_time=time.time()
if filename not in processlist:
print('processing '+filename)
processlist.append(listdir[i])
filesize=os.path.getsize(filename)
if filesize > int(500):
#if over 20 minute of audio collected (10.580MB), assume 2 speakers
shutil.copy(incoming_dir+filename,hostdir+'/pyaudioanalysis3/data/'+filename)
g=aS.speakerDiarization(filename,2,mtSize=2.0,mtStep=0.2,stWin=0.05,LDAdim=35, PLOT=False)
s0seg=list()
s1seg=list()
allseg=list()
for i in range(len(g)-1):
if i==0:
start=i/5.0
else:
if g[i]==g[i+1]:
pass
#continue where left off to find start length, 20 milliseconds
else:
if g[i+1]==0:
end=i/5.0
s1seg.append([start,end])
allseg.append([0,[start,end]])
start=(i+1)/5.0
elif g[i+1]==1:
end=i/5.0
s0seg.append([start,end])
allseg.append([1, [start,end]])
start=(i+1)/5.0
else:
print('error')
#now save this data in individual segments
newAudio = AudioSegment.from_wav(filename)
diarizedir=os.getcwd()+'/'+filename[0:-4]+'_diarization'
try:
os.mkdir(diarizedir)
os.chdir(diarizedir)
except:
os.chdir(diarizedir)
#copy file to this directory and delete from other directory
shutil.move(incoming_dir+filename,os.getcwd()+'/'+filename)
#diarize speaker 1
print('diarizing speaker 1')
curdir=os.getcwd()
newdir1=curdir+'/1'
try:
os.mkdir(newdir1)
os.chdir(newdir1)
except:
os.chdir(newdir1)
for i in range(len(s0seg)):
filename2=filename[0:-4]+'_speaker_1'+str(i)+'.wav'
print(('making file @ %s to %s')%(str(s0seg[i][0]),str(s0seg[i][1])))
exportfile(newAudio,s0seg[i][0]*1000,s0seg[i][1]*1000,filename,i,1)
curdir=os.getcwd()
listdir=os.listdir(curdir)
removedfilelist1=list()
keptfilelist1=list()
for i in range(len(listdir)):
if os.path.getsize(listdir[i]) < 300000:
removedfile=[listdir[i], os.path.getsize(listdir[i])]
removedfilelist1.append(removedfile)
os.remove(listdir[i])
else:
keptfile=[listdir[i],os.path.getsize(listdir[i])]
keptfilelist1.append(keptfile)
#speaker 1 stitched size
s1stitchedsize=0
for i in range(len(keptfilelist1)):
s1stitchedsize=s1stitchedsize+int(keptfilelist1[i][1])
#speaker 2
os.chdir(diarizedir)
curdir=os.getcwd()
newdir2=curdir+'/2'
try:
os.mkdir(newdir2)
os.chdir(newdir2)
except:
os.chdir(newdir2)
print('diarizing speaker 2')
for i in range(len(s1seg)):
filename2=filename[0:-4]+'_speaker_2'+str(i)+'.wav'
print(('making file @ %s to %s')%(str(s1seg[i][0]),str(s1seg[i][1])))
exportfile(newAudio,s1seg[i][0]*1000,s1seg[i][1]*1000,filename,i,2)
curdir=os.getcwd()
listdir=os.listdir(curdir)
removedfilelist2=list()
keptfilelist2=list()
##now delete files that are less than 300 KB
for i in range(len(listdir)):
if os.path.getsize(listdir[i]) < 300000:
removedfile=[listdir[i], os.path.getsize(listdir[i])]
removedfilelist2.append(removedfile)
os.remove(listdir[i])
else:
keptfile=[listdir[i],os.path.getsize(listdir[i])]
keptfilelist2.append(keptfile)
#speaker 2 stitched size
s2stitchedsize=0
for i in range(len(keptfilelist2)):
s2stitchedsize=s2stitchedsize+int(keptfilelist2[i][1])
# all segments
os.chdir(diarizedir)
curdir=os.getcwd()
newdir3=curdir+'/all'
try:
os.mkdir(newdir3)
os.chdir(newdir3)
except:
os.chdir(newdir3)
print('transcribing session')
master_transcript=open('transcript.txt','w')
for i in range(len(allseg)):
print(('making file @ %s to %s')%(str(allseg[i][1][0]),str(allseg[i][1][1])))
filename2=str(i)+'_'+str(allseg[i][0])+'.wav'
filename2=exportfile(newAudio,allseg[i][1][0]*1000,allseg[i][1][1]*1000,filename,i,2)
new_filename=str(i)+'_'+str(allseg[i][0])+'.wav'
os.rename(filename2,new_filename)
os.system('ffmpeg -i %s -ac 1 -acodec pcm_s16le -ar 16000 %s -y'%(new_filename,new_filename))
if i == 0:
speaker='102334'
try:
try:
transcript=transcribe_audio_google(new_filename)
except:
transcript=transcribe_audio_sphinx(new_filename)
if str(allseg[i][0]) != speaker:
speaker=str(allseg[i][0])
master_transcript.write('\n\nspeaker %s: %s '%(str(allseg[i][0]), transcript))
print('\n\nspeaker %s: %s '%(str(allseg[i][0]), transcript))
else:
speaker=str(allseg[i][0])
master_transcript.write('%s'%(transcript))
print(transcript)
except:
print('failed transcript')
master_transcript.close()
transcript=open('transcript.txt').read()
#calculate processing time
end_time=time.time()
processtime=end_time-start_time
#this is the .json serializable diarization
os.chdir(diarizedir)
data={
'filename':filename,
'file location':diarizedir,
'file size':filesize,
'processing time':processtime,
'processcount':processcount,
'errorcount':errorcount,
'data':list(g),
'master transcript': transcript,
'allseg': allseg,
'speaker 1':s0seg,
'speaker 2':s1seg,
'speaker 1 kept segments':keptfilelist1,
'speaker 1 stitched size':s1stitchedsize,
'speaker 1 folder location':newdir1,
'speaker 2 kept segments':keptfilelist2,
'speaker 2 stitched size':s2stitchedsize,
'speaker 2 folder location':newdir2,
'speaker 1 deleted segments':removedfilelist1,
'speaker 2 deleted segments':removedfilelist2,
}
#write to json
os.chdir(diarizedir)
with open(filename[0:-4]+'.json', 'w') as f:
json.dump(data, f)
f.close()
#read the db
g=json.loads(open(filename[0:-4]+'.json').read())
keptlist1=g['speaker 1 kept segments']
keptloc1=g['speaker 1 folder location']
filelist1=list()
for i in range(len(keptlist1)):
filelist1.append(str(keptlist1[i][0]))
keptlist2=g['speaker 2 kept segments']
keptloc2=g['speaker 2 folder location']
filelist2=list()
for i in range(len(keptlist2)):
filelist2.append(str(keptlist2[i][0]))
#save stitch to locations where segments are
os.chdir(keptloc1)
try:
print('stitching to location 1: ' + keptloc1)
print(filelist1)
stitchtogether(filelist1,keptloc1,'stitched_1.wav')
except:
print('error stitching 1')
#save stitch to locations where segments are
os.chdir(keptloc2)
try:
print('stiching to location 2: ' + keptloc2)
print(filelist2)
stitchtogether(filelist2,keptloc2,'stitched_2.wav')
except:
print('error stitching 2')
#go back to the incoming dir folder for further processing
os.chdir(incoming_dir)
#zip the entire directory into a .zip file and move to processed_dir folder
shutil.make_archive(filename[0:-4]+'_diarization','zip',filename[0:-4]+'_diarization/')
shutil.move(incoming_dir+filename[0:-4]+'_diarization.zip',processed_dir+filename[0:-4]+'_diarization.zip')
#delete the directory using shutil
shutil.rmtree(filename[0:-4]+'_diarization')
#update processcount
processcount=processcount+1
else:
errorcount=errorcount+1
os.remove(filename)
print('skipping file, need to resample (too small size)')
#sleep to avoid server overhead
print('sleeping...')
time.sleep(sleeptime)
except:
print('error')
print('sleeping...')
errorcount=errorcount+1
time.sleep(sleeptime)
| 42.960474 | 143 | 0.457172 |
f75be1c9161eec3f2b8cb1f4cad4183e1ba9d351 | 14,106 | py | Python | CPCTrans/main.py | yliu1229/CPCTR | 66fcd336ee69fd18b322853f195c5b65b4a046b7 | [
"MIT"
] | null | null | null | CPCTrans/main.py | yliu1229/CPCTR | 66fcd336ee69fd18b322853f195c5b65b4a046b7 | [
"MIT"
] | null | null | null | CPCTrans/main.py | yliu1229/CPCTR | 66fcd336ee69fd18b322853f195c5b65b4a046b7 | [
"MIT"
] | null | null | null | import os
import sys
import time
import re
import argparse
import numpy as np
from tqdm import tqdm
from tensorboardX import SummaryWriter
import matplotlib.pyplot as plt
plt.switch_backend('agg')
sys.path.append('../Utils')
from CPCTrans.dataset_3d import *
from CPCTrans.model_3d import *
from Backbone.resnet import neq_load_customized
from Utils.augmentation import *
from Utils.utils import AverageMeter, save_checkpoint, denorm, calc_topk_accuracy
import torch
import torch.optim as optim
from torch.utils import data
from torchvision import datasets, models, transforms
import torchvision.utils as vutils
torch.backends.cudnn.benchmark = True
parser = argparse.ArgumentParser()
parser.add_argument('--net', default='resnet18', type=str)
parser.add_argument('--model', default='cpc-trans', type=str)
parser.add_argument('--dataset', default='ucf101', type=str)
parser.add_argument('--num_seq', default=8, type=int, help='number of video blocks')
parser.add_argument('--pred_step', default=3, type=int)
parser.add_argument('--ds', default=3, type=int, help='frame downsampling rate')
parser.add_argument('--batch_size', default=16, type=int)
parser.add_argument('--lr', default=1e-3, type=float, help='learning rate')
parser.add_argument('--wd', default=1e-5, type=float, help='weight decay')
parser.add_argument('--resume', default='', type=str, help='path of model to resume')
parser.add_argument('--pretrain', default='', type=str, help='path of pretrained model')
parser.add_argument('--epochs', default=300, type=int, help='number of total epochs to run')
parser.add_argument('--start_epoch', default=1, type=int, help='manual epoch number (useful on restarts)')
parser.add_argument('--gpu', default='0', type=str)
parser.add_argument('--print_freq', default=200, type=int, help='frequency of printing output during training')
parser.add_argument('--reset_lr', action='store_true', help='Reset learning rate when resume training?')
parser.add_argument('--prefix', default='tmp', type=str, help='prefix of checkpoint filename')
parser.add_argument('--train_what', default='all', type=str)
parser.add_argument('--img_dim', default=128, type=int)
def main():
torch.manual_seed(0)
np.random.seed(0)
global args;
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu)
global cuda;
cuda = torch.device('cuda')
### CPC with TransformerEncoder model ###
if args.model == 'cpc-trans':
model = CPC_Trans(sample_size=args.img_dim,
num_seq=args.num_seq,
network=args.net,
pred_step=args.pred_step)
else:
raise ValueError('wrong model!')
model = model.to(cuda)
global criterion;
criterion = nn.CrossEntropyLoss()
params = model.parameters()
optimizer = optim.Adam(params, lr=args.lr, weight_decay=args.wd)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=100, gamma=0.5)
args.old_lr = None
best_acc = 0
global iteration;
iteration = 0
### restart training ###
if args.resume:
if os.path.isfile(args.resume):
args.old_lr = float(re.search('_lr(.+?)_', args.resume).group(1))
print("=> loading resumed checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume, map_location=torch.device('cpu'))
args.start_epoch = checkpoint['epoch']
iteration = checkpoint['iteration']
best_acc = checkpoint['best_acc']
model.load_state_dict(checkpoint['state_dict'])
if not args.reset_lr: # if didn't reset lr, load old optimizer
optimizer.load_state_dict(checkpoint['optimizer'])
else:
print('==== Change lr from %f to %f ====' % (args.old_lr, args.lr))
print("=> loaded resumed checkpoint '{}' (epoch {}) with best_acc {}".format(args.resume, checkpoint['epoch'], best_acc))
else:
print("[Warning] no checkpoint found at '{}'".format(args.resume))
if args.pretrain:
if os.path.isfile(args.pretrain):
print("=> loading pretrained checkpoint '{}'".format(args.pretrain))
checkpoint = torch.load(args.pretrain, map_location=torch.device('cpu'))
model = neq_load_customized(model, checkpoint['state_dict'])
print("=> loaded pretrained checkpoint '{}' (epoch {})"
.format(args.pretrain, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.pretrain))
### load data ###
if args.dataset == 'ucf101': # designed for ucf101, short size=256, rand crop to 224x224 then scale to 128x128
transform = transforms.Compose([
RandomHorizontalFlip(consistent=True),
RandomCrop(size=224, consistent=True),
Scale(size=(args.img_dim, args.img_dim)),
RandomGray(consistent=False, p=0.5),
ColorJitter(brightness=0.5, contrast=0.5, saturation=0.5, hue=0.25, p=1.0),
ToTensor(),
Normalize()
])
elif args.dataset == 'k400': # designed for kinetics400, short size=150, rand crop to 128x128
transform = transforms.Compose([
RandomSizedCrop(size=args.img_dim, consistent=True, p=1.0),
RandomHorizontalFlip(consistent=True),
RandomGray(consistent=False, p=0.5),
ColorJitter(brightness=0.5, contrast=0.5, saturation=0.5, hue=0.25, p=1.0),
ToTensor(),
Normalize()
])
train_loader = get_data(transform, 'train')
val_loader = get_data(transform, 'val')
# setup tools
global de_normalize;
de_normalize = denorm()
global img_path;
img_path, model_path = set_path(args)
global writer_train
try: # old version
writer_val = SummaryWriter(log_dir=os.path.join(img_path, 'val'))
writer_train = SummaryWriter(log_dir=os.path.join(img_path, 'train'))
except: # v1.7
writer_val = SummaryWriter(logdir=os.path.join(img_path, 'val'))
writer_train = SummaryWriter(logdir=os.path.join(img_path, 'train'))
print('-- start main loop --')
### main loop ###
for epoch in range(args.start_epoch, args.epochs):
train_loss, train_acc, train_accuracy_list = train(train_loader, model, optimizer, epoch)
val_loss, val_acc, val_accuracy_list = validate(val_loader, model, epoch)
scheduler.step()
print('\t Epoch: ', epoch, 'with lr: ', scheduler.get_last_lr())
# save curve
writer_train.add_scalar('global/loss', train_loss, epoch)
writer_train.add_scalar('global/accuracy', train_acc, epoch)
writer_val.add_scalar('global/loss', val_loss, epoch)
writer_val.add_scalar('global/accuracy', val_acc, epoch)
writer_train.add_scalar('accuracy/top1', train_accuracy_list[0], epoch)
writer_train.add_scalar('accuracy/top3', train_accuracy_list[1], epoch)
writer_train.add_scalar('accuracy/top5', train_accuracy_list[2], epoch)
writer_val.add_scalar('accuracy/top1', val_accuracy_list[0], epoch)
writer_val.add_scalar('accuracy/top3', val_accuracy_list[1], epoch)
writer_val.add_scalar('accuracy/top5', val_accuracy_list[2], epoch)
# save check_point
is_best = val_acc > best_acc;
best_acc = max(val_acc, best_acc)
save_checkpoint({'epoch': epoch + 1,
'net': args.net,
'state_dict': model.state_dict(),
'best_acc': best_acc,
'optimizer': optimizer.state_dict(),
'iteration': iteration},
is_best, filename=os.path.join(model_path, 'epoch%s.pth.tar' % str(epoch + 1)), keep_all=False)
print('Training from ep %d to ep %d finished' % (args.start_epoch, args.epochs))
def process_output(mask):
'''task mask as input, compute the target for contrastive loss'''
(B, NP, SQ, B2, NS, _) = mask.size() # [B, P, SQ, B, N, SQ]
target = mask == 1
target = target * 1
target.requires_grad = False
return target, (B, B2, NS, NP, SQ)
def train(data_loader, model, optimizer, epoch):
losses = AverageMeter()
accuracy = AverageMeter()
accuracy_list = [AverageMeter(), AverageMeter(), AverageMeter()]
model.train()
global iteration
for idx, input_seq in enumerate(data_loader):
tic = time.time()
input_seq = input_seq.to(cuda)
B = input_seq.size(0)
[score_, mask_] = model(input_seq)
# visualize
if (iteration == 0) or (iteration == args.print_freq):
if B > 2: input_seq = input_seq[0:2, :]
writer_train.add_image('input_seq',
de_normalize(vutils.make_grid(
input_seq.view(-1, 3, args.img_dim, args.img_dim),
nrow=args.num_seq)),
iteration)
del input_seq
if idx == 0: target_, (_, B2, NS, NP, SQ) = process_output(mask_)
score_flattened = score_.view(B * NP * SQ, B2 * NS * SQ)
target_flattened = target_.contiguous().view(B * NP * SQ, B2 * NS * SQ)
target_flattened = target_flattened.argmax(dim=1)
loss = criterion(score_flattened, target_flattened)
top1, top3, top5 = calc_topk_accuracy(score_flattened, target_flattened, (1, 3, 5))
accuracy_list[0].update(top1.item(), B)
accuracy_list[1].update(top3.item(), B)
accuracy_list[2].update(top5.item(), B)
losses.update(loss.item(), B)
accuracy.update(top1.item(), B)
del score_
optimizer.zero_grad()
loss.backward()
optimizer.step()
del loss
if idx % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Loss {loss.val:.6f} ({loss.local_avg:.4f})\t'
'Acc: top1 {3:.4f}; top3 {4:.4f}; top5 {5:.4f} T:{6:.2f}\t'.format(
epoch, idx, len(data_loader), top1, top3, top5, time.time() - tic, loss=losses))
writer_train.add_scalar('local/loss', losses.val, iteration)
writer_train.add_scalar('local/accuracy', accuracy.val, iteration)
iteration += 1
return losses.local_avg, accuracy.local_avg, [i.local_avg for i in accuracy_list]
def validate(data_loader, model, epoch):
losses = AverageMeter()
accuracy = AverageMeter()
accuracy_list = [AverageMeter(), AverageMeter(), AverageMeter()]
model.eval()
with torch.no_grad():
for idx, input_seq in tqdm(enumerate(data_loader), total=len(data_loader)):
input_seq = input_seq.to(cuda)
B = input_seq.size(0)
[score_, mask_] = model(input_seq)
del input_seq
if idx == 0: target_, (_, B2, NS, NP, SQ) = process_output(mask_)
score_flattened = score_.view(B * NP * SQ, B2 * NS * SQ)
target_flattened = target_.contiguous().view(B * NP * SQ, B2 * NS * SQ)
target_flattened = target_flattened.argmax(dim=1)
loss = criterion(score_flattened, target_flattened)
top1, top3, top5 = calc_topk_accuracy(score_flattened, target_flattened, (1, 3, 5))
losses.update(loss.item(), B)
accuracy.update(top1.item(), B)
accuracy_list[0].update(top1.item(), B)
accuracy_list[1].update(top3.item(), B)
accuracy_list[2].update(top5.item(), B)
print('[{0}/{1}] Loss {loss.local_avg:.4f}\t'
'Acc: top1 {2:.4f}; top3 {3:.4f}; top5 {4:.4f} \t'.format(
epoch, args.epochs, *[i.avg for i in accuracy_list], loss=losses))
return losses.local_avg, accuracy.local_avg, [i.local_avg for i in accuracy_list]
def get_data(transform, mode='train'):
print('Loading data for "%s" ...' % mode)
if args.dataset == 'k400':
pass
elif args.dataset == 'ucf101':
dataset = UCF101_3d(mode=mode,
transform=transform,
num_seq=args.num_seq,
downsample=args.ds,
which_split=3)
else:
raise ValueError('dataset not supported')
sampler = data.RandomSampler(dataset)
if mode == 'train':
data_loader = data.DataLoader(dataset,
batch_size=args.batch_size,
sampler=sampler,
shuffle=False,
num_workers=2,
pin_memory=True,
drop_last=True)
elif mode == 'val':
data_loader = data.DataLoader(dataset,
batch_size=args.batch_size,
sampler=sampler,
shuffle=False,
num_workers=2,
pin_memory=True,
drop_last=True)
print('"%s" dataset size: %d' % (mode, len(dataset)))
return data_loader
def set_path(args):
if args.resume:
exp_path = os.path.dirname(os.path.dirname(args.resume))
else:
exp_path = 'log_{args.prefix}/{args.dataset}-{args.img_dim}_{0}_{args.model}_\
bs{args.batch_size}_lr{1}_seq{args.num_seq}_pred{args.pred_step}_ds{args.ds}_\
train-{args.train_what}{2}'.format(
'r%s' % args.net[6::], \
args.old_lr if args.old_lr is not None else args.lr, \
'_pt=%s' % args.pretrain.replace('/', '-') if args.pretrain else '', \
args=args)
img_path = os.path.join(exp_path, 'img')
model_path = os.path.join(exp_path, 'model')
if not os.path.exists(img_path): os.makedirs(img_path)
if not os.path.exists(model_path): os.makedirs(model_path)
return img_path, model_path
if __name__ == '__main__':
main()
| 41.245614 | 133 | 0.607188 |
f75bfc6bd70d285e8df66e56086560c9c887971c | 30,707 | py | Python | tests/test_actionAngleTorus.py | turnergarrow/galpy | 7132eddbf2dab491fe137790e31eacdc604b0534 | [
"BSD-3-Clause"
] | null | null | null | tests/test_actionAngleTorus.py | turnergarrow/galpy | 7132eddbf2dab491fe137790e31eacdc604b0534 | [
"BSD-3-Clause"
] | null | null | null | tests/test_actionAngleTorus.py | turnergarrow/galpy | 7132eddbf2dab491fe137790e31eacdc604b0534 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import print_function, division
import os
import sys
import pytest
import warnings
import numpy
from galpy.util import galpyWarning
from test_actionAngle import reset_warning_registry
_TRAVIS= bool(os.getenv('TRAVIS'))
PY2= sys.version < '3'
# Print all galpyWarnings always for tests of warnings
warnings.simplefilter("always",galpyWarning)
#Basic sanity checking: circular orbit should have constant R, zero vR, vT=vc
def test_actionAngleTorus_basic():
from galpy.actionAngle import actionAngleTorus
from galpy.potential import MWPotential, rl, vcirc, \
FlattenedPowerPotential, PlummerPotential
tol= -4.
jr= 10.**-10.
jz= 10.**-10.
aAT= actionAngleTorus(pot=MWPotential)
# at R=1, Lz=1
jphi= 1.
angler= numpy.linspace(0.,2.*numpy.pi,101)
anglephi= numpy.linspace(0.,2.*numpy.pi,101)+1.
anglez= numpy.linspace(0.,2.*numpy.pi,101)+2.
RvR= aAT(jr,jphi,jz,angler,anglephi,anglez).T
assert numpy.all(numpy.fabs(RvR[0]-rl(MWPotential,jphi)) < 10.**tol), \
'circular orbit does not have constant radius for actionAngleTorus'
assert numpy.all(numpy.fabs(RvR[1]) < 10.**tol), \
'circular orbit does not have zero radial velocity for actionAngleTorus'
assert numpy.all(numpy.fabs(RvR[2]-vcirc(MWPotential,rl(MWPotential,jphi))) < 10.**tol), \
'circular orbit does not have constant vT=vc for actionAngleTorus'
assert numpy.all(numpy.fabs(RvR[3]) < 10.**tol), \
'circular orbit does not have zero vertical height for actionAngleTorus'
assert numpy.all(numpy.fabs(RvR[4]) < 10.**tol), \
'circular orbit does not have zero vertical velocity for actionAngleTorus'
# at Lz=1.5, using Plummer
tol= -3.25
pp= PlummerPotential(normalize=1.)
aAT= actionAngleTorus(pot=pp)
jphi= 1.5
RvR= aAT(jr,jphi,jz,angler,anglephi,anglez).T
assert numpy.all(numpy.fabs(RvR[0]-rl(pp,jphi)) < 10.**tol), \
'circular orbit does not have constant radius for actionAngleTorus'
assert numpy.all(numpy.fabs(RvR[1]) < 10.**tol), \
'circular orbit does not have zero radial velocity for actionAngleTorus'
assert numpy.all(numpy.fabs(RvR[2]-vcirc(pp,rl(pp,jphi))) < 10.**tol), \
'circular orbit does not have constant vT=vc for actionAngleTorus'
assert numpy.all(numpy.fabs(RvR[3]) < 10.**tol), \
'circular orbit does not have zero vertical height for actionAngleTorus'
assert numpy.all(numpy.fabs(RvR[4]) < 10.**tol), \
'circular orbit does not have zero vertical velocity for actionAngleTorus'
# at Lz=0.5, using FlattenedPowerPotential
tol= -4.
fp= FlattenedPowerPotential(normalize=1.)
aAT= actionAngleTorus(pot=fp)
jphi= 0.5
RvR= aAT(jr,jphi,jz,angler,anglephi,anglez).T
assert numpy.all(numpy.fabs(RvR[0]-rl(fp,jphi)) < 10.**tol), \
'circular orbit does not have constant radius for actionAngleTorus'
assert numpy.all(numpy.fabs(RvR[1]) < 10.**tol), \
'circular orbit does not have zero radial velocity for actionAngleTorus'
assert numpy.all(numpy.fabs(RvR[2]-vcirc(fp,rl(fp,jphi))) < 10.**tol), \
'circular orbit does not have constant vT=vc for actionAngleTorus'
assert numpy.all(numpy.fabs(RvR[3]) < 10.**tol), \
'circular orbit does not have zero vertical height for actionAngleTorus'
assert numpy.all(numpy.fabs(RvR[4]) < 10.**tol), \
'circular orbit does not have zero vertical velocity for actionAngleTorus'
return None
#Basic sanity checking: close-to-circular orbit should have freq. = epicycle freq.
def test_actionAngleTorus_basic_freqs():
from galpy.actionAngle import actionAngleTorus
from galpy.potential import epifreq, omegac, verticalfreq, rl, \
JaffePotential, PowerSphericalPotential, HernquistPotential
tol= -3.
jr= 10.**-6.
jz= 10.**-6.
jp= JaffePotential(normalize=1.)
aAT= actionAngleTorus(pot=jp)
# at Lz=1
jphi= 1.
om= aAT.Freqs(jr,jphi,jz)
assert numpy.fabs((om[0]-epifreq(jp,rl(jp,jphi)))/om[0]) < 10.**tol, \
'Close-to-circular orbit does not have Or=kappa for actionAngleTorus'
assert numpy.fabs((om[1]-omegac(jp,rl(jp,jphi)))/om[1]) < 10.**tol, \
'Close-to-circular orbit does not have Ophi=omega for actionAngleTorus'
assert numpy.fabs((om[2]-verticalfreq(jp,rl(jp,jphi)))/om[2]) < 10.**tol, \
'Close-to-circular orbit does not have Oz=nu for actionAngleTorus'
# at Lz=1.5, w/ different potential
pp= PowerSphericalPotential(normalize=1.)
aAT= actionAngleTorus(pot=pp)
jphi= 1.5
om= aAT.Freqs(jr,jphi,jz)
assert numpy.fabs((om[0]-epifreq(pp,rl(pp,jphi)))/om[0]) < 10.**tol, \
'Close-to-circular orbit does not have Or=kappa for actionAngleTorus'
assert numpy.fabs((om[1]-omegac(pp,rl(pp,jphi)))/om[1]) < 10.**tol, \
'Close-to-circular orbit does not have Ophi=omega for actionAngleTorus'
assert numpy.fabs((om[2]-verticalfreq(pp,rl(pp,jphi)))/om[2]) < 10.**tol, \
'Close-to-circular orbit does not have Oz=nu for actionAngleTorus'
# at Lz=0.5, w/ different potential
tol= -2.5 # appears more difficult
hp= HernquistPotential(normalize=1.)
aAT= actionAngleTorus(pot=hp)
jphi= 0.5
om= aAT.Freqs(jr,jphi,jz)
assert numpy.fabs((om[0]-epifreq(hp,rl(hp,jphi)))/om[0]) < 10.**tol, \
'Close-to-circular orbit does not have Or=kappa for actionAngleTorus'
assert numpy.fabs((om[1]-omegac(hp,rl(hp,jphi)))/om[1]) < 10.**tol, \
'Close-to-circular orbit does not have Ophi=omega for actionAngleTorus'
assert numpy.fabs((om[2]-verticalfreq(hp,rl(hp,jphi)))/om[2]) < 10.**tol, \
'Close-to-circular orbit does not have Oz=nu for actionAngleTorus'
return None
#Test that orbit from actionAngleTorus is the same as an integrated orbit
def test_actionAngleTorus_orbit():
from galpy.actionAngle import actionAngleTorus
from galpy.potential import MWPotential2014
from galpy.orbit import Orbit
# Set up instance
aAT= actionAngleTorus(pot=MWPotential2014,tol=10.**-5.)
jr,jphi,jz= 0.05,1.1,0.025
# First calculate frequencies and the initial RvR
RvRom= aAT.xvFreqs(jr,jphi,jz,
numpy.array([0.]),
numpy.array([1.]),
numpy.array([2.]))
om= RvRom[1:]
# Angles along an orbit
ts= numpy.linspace(0.,100.,1001)
angler= ts*om[0]
anglephi= 1.+ts*om[1]
anglez= 2.+ts*om[2]
# Calculate the orbit using actionAngleTorus
RvR= aAT(jr,jphi,jz,angler,anglephi,anglez).T
# Calculate the orbit using orbit integration
orb= Orbit([RvRom[0][0,0],RvRom[0][0,1],RvRom[0][0,2],
RvRom[0][0,3],RvRom[0][0,4],RvRom[0][0,5]])
orb.integrate(ts,MWPotential2014)
# Compare
tol= -3.
assert numpy.all(numpy.fabs(orb.R(ts)-RvR[0]) < 10.**tol), \
'Integrated orbit does not agree with torus orbit in R'
assert numpy.all(numpy.fabs(orb.vR(ts)-RvR[1]) < 10.**tol), \
'Integrated orbit does not agree with torus orbit in vR'
assert numpy.all(numpy.fabs(orb.vT(ts)-RvR[2]) < 10.**tol), \
'Integrated orbit does not agree with torus orbit in vT'
assert numpy.all(numpy.fabs(orb.z(ts)-RvR[3]) < 10.**tol), \
'Integrated orbit does not agree with torus orbit in z'
assert numpy.all(numpy.fabs(orb.vz(ts)-RvR[4]) < 10.**tol), \
'Integrated orbit does not agree with torus orbit in vz'
assert numpy.all(numpy.fabs((orb.phi(ts)-RvR[5]+numpy.pi) % (2.*numpy.pi) -numpy.pi) < 10.**tol), \
'Integrated orbit does not agree with torus orbit in phi'
return None
# Test that actionAngleTorus w/ interp pot gives same freqs as regular pot
# Doesn't work well: TM aborts because our interpolated forces aren't
# consistent enough with the potential for TM's taste, but we test that it at
# at least works somewhat
def test_actionAngleTorus_interppot_freqs():
from galpy.actionAngle import actionAngleTorus
from galpy.potential import LogarithmicHaloPotential, interpRZPotential
lp= LogarithmicHaloPotential(normalize=1.)
ip= interpRZPotential(RZPot=lp,
interpPot=True,
interpDens=True,interpRforce=True,interpzforce=True,
enable_c=True)
aAT= actionAngleTorus(pot=lp)
aATi= actionAngleTorus(pot=ip)
jr,jphi,jz= 0.05,1.1,0.02
om= aAT.Freqs(jr,jphi,jz)
omi= aATi.Freqs(jr,jphi,jz)
assert numpy.fabs((om[0]-omi[0])/om[0]) < 0.2, 'Radial frequency computed using the torus machine does not agree between potential and interpolated potential'
assert numpy.fabs((om[1]-omi[1])/om[1]) < 0.2, 'Azimuthal frequency computed using the torus machine does not agree between potential and interpolated potential'
assert numpy.fabs((om[2]-omi[2])/om[2]) < 0.8, 'Vertical frequency computed using the torus machine does not agree between potential and interpolated potential'
return None
#Test the actionAngleTorus against an isochrone potential: actions
def test_actionAngleTorus_Isochrone_actions():
from galpy.potential import IsochronePotential
from galpy.actionAngle import actionAngleTorus, \
actionAngleIsochrone
ip= IsochronePotential(normalize=1.,b=1.2)
aAI= actionAngleIsochrone(ip=ip)
tol= -6.
aAT= actionAngleTorus(pot=ip,tol=tol)
jr,jphi,jz= 0.075,1.1,0.05
angler= numpy.array([0.])
anglephi= numpy.array([numpy.pi])
anglez= numpy.array([numpy.pi/2.])
# Calculate position from aAT
RvR= aAT(jr,jphi,jz,angler,anglephi,anglez).T
# Calculate actions from aAI
ji= aAI(*RvR)
djr= numpy.fabs((ji[0]-jr)/jr)
dlz= numpy.fabs((ji[1]-jphi)/jphi)
djz= numpy.fabs((ji[2]-jz)/jz)
assert djr < 10.**tol, 'actionAngleTorus and actionAngleIsochrone applied to isochrone potential disagree for Jr at %f%%' % (djr*100.)
assert dlz < 10.**tol, 'actionAngleTorus and actionAngleIsochrone applied to isochrone potential disagree for Jr at %f%%' % (dlz*100.)
assert djz < 10.**tol, 'actionAngleTorus and actionAngleIsochrone applied to isochrone potential disagree for Jr at %f%%' % (djz*100.)
return None
#Test the actionAngleTorus against an isochrone potential: frequencies and angles
def test_actionAngleTorus_Isochrone_freqsAngles():
from galpy.potential import IsochronePotential
from galpy.actionAngle import actionAngleTorus, \
actionAngleIsochrone
ip= IsochronePotential(normalize=1.,b=1.2)
aAI= actionAngleIsochrone(ip=ip)
tol= -6.
aAT= actionAngleTorus(pot=ip,tol=tol)
jr,jphi,jz= 0.075,1.1,0.05
angler= numpy.array([0.1])+numpy.linspace(0.,numpy.pi,101)
angler= angler % (2.*numpy.pi)
anglephi= numpy.array([numpy.pi])+numpy.linspace(0.,numpy.pi,101)
anglephi= anglephi % (2.*numpy.pi)
anglez= numpy.array([numpy.pi/2.])+numpy.linspace(0.,numpy.pi,101)
anglez= anglez % (2.*numpy.pi)
# Calculate position from aAT
RvRom= aAT.xvFreqs(jr,jphi,jz,angler,anglephi,anglez)
# Calculate actions, frequencies, and angles from aAI
ws= aAI.actionsFreqsAngles(*RvRom[0].T)
dOr= numpy.fabs((ws[3]-RvRom[1]))
dOp= numpy.fabs((ws[4]-RvRom[2]))
dOz= numpy.fabs((ws[5]-RvRom[3]))
dar= numpy.fabs((ws[6]-angler))
dap= numpy.fabs((ws[7]-anglephi))
daz= numpy.fabs((ws[8]-anglez))
dar[dar > numpy.pi]-= 2.*numpy.pi
dar[dar < -numpy.pi]+= 2.*numpy.pi
dap[dap > numpy.pi]-= 2.*numpy.pi
dap[dap < -numpy.pi]+= 2.*numpy.pi
daz[daz > numpy.pi]-= 2.*numpy.pi
daz[daz < -numpy.pi]+= 2.*numpy.pi
assert numpy.all(dOr < 10.**tol), 'actionAngleTorus and actionAngleIsochrone applied to isochrone potential disagree for Or at %f%%' % (numpy.nanmax(dOr)*100.)
assert numpy.all(dOp < 10.**tol), 'actionAngleTorus and actionAngleIsochrone applied to isochrone potential disagree for Ophi at %f%%' % (numpy.nanmax(dOp)*100.)
assert numpy.all(dOz < 10.**tol), 'actionAngleTorus and actionAngleIsochrone applied to isochrone potential disagree for Oz at %f%%' % (numpy.nanmax(dOz)*100.)
assert numpy.all(dar < 10.**tol), 'actionAngleTorus and actionAngleIsochrone applied to isochrone potential disagree for ar at %f' % (numpy.nanmax(dar))
assert numpy.all(dap < 10.**tol), 'actionAngleTorus and actionAngleIsochrone applied to isochrone potential disagree for aphi at %f' % (numpy.nanmax(dap))
assert numpy.all(daz < 10.**tol), 'actionAngleTorus and actionAngleIsochrone applied to isochrone potential disagree for az at %f' % (numpy.nanmax(daz))
return None
#Test the actionAngleTorus against a Staeckel potential: actions
def test_actionAngleTorus_Staeckel_actions():
from galpy.potential import KuzminKutuzovStaeckelPotential
from galpy.actionAngle import actionAngleTorus, \
actionAngleStaeckel
delta= 1.2
kp= KuzminKutuzovStaeckelPotential(normalize=1.,Delta=delta)
aAS= actionAngleStaeckel(pot=kp,delta=delta,c=True)
tol= -3.
aAT= actionAngleTorus(pot=kp,tol=tol)
jr,jphi,jz= 0.075,1.1,0.05
angler= numpy.array([0.])
anglephi= numpy.array([numpy.pi])
anglez= numpy.array([numpy.pi/2.])
# Calculate position from aAT
RvR= aAT(jr,jphi,jz,angler,anglephi,anglez).T
# Calculate actions from aAI
ji= aAS(*RvR)
djr= numpy.fabs((ji[0]-jr)/jr)
dlz= numpy.fabs((ji[1]-jphi)/jphi)
djz= numpy.fabs((ji[2]-jz)/jz)
assert djr < 10.**tol, 'actionAngleTorus and actionAngleStaeckel applied to Staeckel potential disagree for Jr at %f%%' % (djr*100.)
assert dlz < 10.**tol, 'actionAngleTorus and actionAngleStaeckel applied to Staeckel potential disagree for Jr at %f%%' % (dlz*100.)
assert djz < 10.**tol, 'actionAngleTorus and actionAngleStaeckel applied to Staeckel potential disagree for Jr at %f%%' % (djz*100.)
return None
#Test the actionAngleTorus against an isochrone potential: frequencies and angles
def test_actionAngleTorus_Staeckel_freqsAngles():
from galpy.potential import KuzminKutuzovStaeckelPotential
from galpy.actionAngle import actionAngleTorus, \
actionAngleStaeckel
delta= 1.2
kp= KuzminKutuzovStaeckelPotential(normalize=1.,Delta=delta)
aAS= actionAngleStaeckel(pot=kp,delta=delta,c=True)
tol= -3.
aAT= actionAngleTorus(pot=kp,tol=tol)
jr,jphi,jz= 0.075,1.1,0.05
angler= numpy.array([0.1])+numpy.linspace(0.,numpy.pi,101)
angler= angler % (2.*numpy.pi)
anglephi= numpy.array([numpy.pi])+numpy.linspace(0.,numpy.pi,101)
anglephi= anglephi % (2.*numpy.pi)
anglez= numpy.array([numpy.pi/2.])+numpy.linspace(0.,numpy.pi,101)
anglez= anglez % (2.*numpy.pi)
# Calculate position from aAT
RvRom= aAT.xvFreqs(jr,jphi,jz,angler,anglephi,anglez)
# Calculate actions, frequencies, and angles from aAI
ws= aAS.actionsFreqsAngles(*RvRom[0].T)
dOr= numpy.fabs((ws[3]-RvRom[1]))
dOp= numpy.fabs((ws[4]-RvRom[2]))
dOz= numpy.fabs((ws[5]-RvRom[3]))
dar= numpy.fabs((ws[6]-angler))
dap= numpy.fabs((ws[7]-anglephi))
daz= numpy.fabs((ws[8]-anglez))
dar[dar > numpy.pi]-= 2.*numpy.pi
dar[dar < -numpy.pi]+= 2.*numpy.pi
dap[dap > numpy.pi]-= 2.*numpy.pi
dap[dap < -numpy.pi]+= 2.*numpy.pi
daz[daz > numpy.pi]-= 2.*numpy.pi
daz[daz < -numpy.pi]+= 2.*numpy.pi
assert numpy.all(dOr < 10.**tol), 'actionAngleTorus and actionAngleStaeckel applied to Staeckel potential disagree for Or at %f%%' % (numpy.nanmax(dOr)*100.)
assert numpy.all(dOp < 10.**tol), 'actionAngleTorus and actionAngleStaeckel applied to Staeckel potential disagree for Ophi at %f%%' % (numpy.nanmax(dOp)*100.)
assert numpy.all(dOz < 10.**tol), 'actionAngleTorus and actionAngleStaeckel applied to Staeckel potential disagree for Oz at %f%%' % (numpy.nanmax(dOz)*100.)
assert numpy.all(dar < 10.**tol), 'actionAngleTorus and actionAngleStaeckel applied to Staeckel potential disagree for ar at %f' % (numpy.nanmax(dar))
assert numpy.all(dap < 10.**tol), 'actionAngleTorus and actionAngleStaeckel applied to Staeckel potential disagree for aphi at %f' % (numpy.nanmax(dap))
assert numpy.all(daz < 10.**tol), 'actionAngleTorus and actionAngleStaeckel applied to Staeckel potential disagree for az at %f' % (numpy.nanmax(daz))
return None
#Test the actionAngleTorus against a general potential w/ actionAngleIsochroneApprox: actions
def test_actionAngleTorus_isochroneApprox_actions():
from galpy.potential import MWPotential2014
from galpy.actionAngle import actionAngleTorus, \
actionAngleIsochroneApprox
aAIA= actionAngleIsochroneApprox(pot=MWPotential2014,b=0.8)
tol= -2.5
aAT= actionAngleTorus(pot=MWPotential2014,tol=tol)
jr,jphi,jz= 0.075,1.1,0.05
angler= numpy.array([0.])
anglephi= numpy.array([numpy.pi])
anglez= numpy.array([numpy.pi/2.])
# Calculate position from aAT
RvR= aAT(jr,jphi,jz,angler,anglephi,anglez).T
# Calculate actions from aAIA
ji= aAIA(*RvR)
djr= numpy.fabs((ji[0]-jr)/jr)
dlz= numpy.fabs((ji[1]-jphi)/jphi)
djz= numpy.fabs((ji[2]-jz)/jz)
assert djr < 10.**tol, 'actionAngleTorus and actionAngleIsochroneApprox applied to MWPotential2014 potential disagree for Jr at %f%%' % (djr*100.)
assert dlz < 10.**tol, 'actionAngleTorus and actionAngleIsochroneApprox applied to MWPotential2014 potential disagree for Jr at %f%%' % (dlz*100.)
assert djz < 10.**tol, 'actionAngleTorus and actionAngleMWPotential2014 applied to MWPotential2014 potential disagree for Jr at %f%%' % (djz*100.)
return None
#Test the actionAngleTorus against a general potential w/ actionAngleIsochrone: frequencies and angles
def test_actionAngleTorus_isochroneApprox_freqsAngles():
from galpy.potential import MWPotential2014
from galpy.actionAngle import actionAngleTorus, \
actionAngleIsochroneApprox
aAIA= actionAngleIsochroneApprox(pot=MWPotential2014,b=0.8)
tol= -3.5
aAT= actionAngleTorus(pot=MWPotential2014,tol=tol)
jr,jphi,jz= 0.075,1.1,0.05
angler= numpy.array([0.1])+numpy.linspace(0.,numpy.pi,21)
angler= angler % (2.*numpy.pi)
anglephi= numpy.array([numpy.pi])+numpy.linspace(0.,numpy.pi,21)
anglephi= anglephi % (2.*numpy.pi)
anglez= numpy.array([numpy.pi/2.])+numpy.linspace(0.,numpy.pi,21)
anglez= anglez % (2.*numpy.pi)
# Calculate position from aAT
RvRom= aAT.xvFreqs(jr,jphi,jz,angler,anglephi,anglez)
# Calculate actions, frequencies, and angles from aAI
ws= aAIA.actionsFreqsAngles(*RvRom[0].T)
dOr= numpy.fabs((ws[3]-RvRom[1]))
dOp= numpy.fabs((ws[4]-RvRom[2]))
dOz= numpy.fabs((ws[5]-RvRom[3]))
dar= numpy.fabs((ws[6]-angler))
dap= numpy.fabs((ws[7]-anglephi))
daz= numpy.fabs((ws[8]-anglez))
dar[dar > numpy.pi]-= 2.*numpy.pi
dar[dar < -numpy.pi]+= 2.*numpy.pi
dap[dap > numpy.pi]-= 2.*numpy.pi
dap[dap < -numpy.pi]+= 2.*numpy.pi
daz[daz > numpy.pi]-= 2.*numpy.pi
daz[daz < -numpy.pi]+= 2.*numpy.pi
assert numpy.all(dOr < 10.**tol), 'actionAngleTorus and actionAngleIsochroneApprox applied to MWPotential2014 potential disagree for Or at %f%%' % (numpy.nanmax(dOr)*100.)
assert numpy.all(dOp < 10.**tol), 'actionAngleTorus and actionAngleIsochroneApprox applied to MWPotential2014 potential disagree for Ophi at %f%%' % (numpy.nanmax(dOp)*100.)
assert numpy.all(dOz < 10.**tol), 'actionAngleTorus and actionAngleIsochroneApprox applied to MWPotential2014 potential disagree for Oz at %f%%' % (numpy.nanmax(dOz)*100.)
assert numpy.all(dar < 10.**tol), 'actionAngleTorus and actionAngleIsochroneApprox applied to MWPotential2014 potential disagree for ar at %f' % (numpy.nanmax(dar))
assert numpy.all(dap < 10.**tol), 'actionAngleTorus and actionAngleIsochroneApprox applied to MWPotential2014 potential disagree for aphi at %f' % (numpy.nanmax(dap))
assert numpy.all(daz < 10.**tol), 'actionAngleTorus and actionAngleIsochroneApprox applied to MWPotential2014 potential disagree for az at %f' % (numpy.nanmax(daz))
return None
# Test that the frequencies returned by hessianFreqs are the same as those returned by Freqs
def test_actionAngleTorus_hessian_freqs():
from galpy.potential import MWPotential2014
from galpy.actionAngle import actionAngleTorus
aAT= actionAngleTorus(pot=MWPotential2014)
jr,jphi,jz= 0.075,1.1,0.05
fO= aAT.Freqs(jr,jphi,jz)[:3]
hO= aAT.hessianFreqs(jr,jphi,jz)[1:4]
assert numpy.all(numpy.fabs(numpy.array(fO)-numpy.array(hO)) < 10.**-8.), 'actionAngleTorus methods Freqs and hessianFreqs return different frequencies'
return None
# Test that the Hessian is approximately symmetric
def test_actionAngleTorus_hessian_symm():
from galpy.potential import MWPotential2014
from galpy.actionAngle import actionAngleTorus
aAT= actionAngleTorus(pot=MWPotential2014)
jr,jphi,jz= 0.075,1.1,0.05
h= aAT.hessianFreqs(jr,jphi,jz,tol=0.0001,nosym=True)[0]
assert numpy.all(numpy.fabs((h-h.T)/h) < 0.03), 'actionAngleTorus Hessian is not symmetric'
return None
# Test that the Hessian is approximately correct
def test_actionAngleTorus_hessian_linear():
from galpy.potential import MWPotential2014
from galpy.actionAngle import actionAngleTorus
aAT= actionAngleTorus(pot=MWPotential2014)
jr,jphi,jz= 0.075,1.1,0.05
h= aAT.hessianFreqs(jr,jphi,jz,tol=0.0001,nosym=True)[0]
dj= numpy.array([0.02,0.005,-0.01])
do_fromhessian= numpy.dot(h,dj)
O= numpy.array(aAT.Freqs(jr,jphi,jz)[:3])
do= numpy.array(aAT.Freqs(jr+dj[0],jphi+dj[1],jz+dj[2])[:3])-O
assert numpy.all(numpy.fabs((do_fromhessian-do)/O)< 0.001), 'actionAngleTorus Hessian does not return good approximation to dO/dJ'
return None
# Test that the frequencies returned by xvJacobianFreqs are the same as those returned by Freqs
def test_actionAngleTorus_jacobian_freqs():
from galpy.potential import MWPotential2014
from galpy.actionAngle import actionAngleTorus
aAT= actionAngleTorus(pot=MWPotential2014)
jr,jphi,jz= 0.075,1.1,0.05
fO= aAT.Freqs(jr,jphi,jz)[:3]
hO= aAT.xvJacobianFreqs(jr,jphi,jz,
numpy.array([0.]),numpy.array([1.]),
numpy.array([2.]))[3:6]
assert numpy.all(numpy.fabs(numpy.array(fO)-numpy.array(hO)) < 10.**-8.), 'actionAngleTorus methods Freqs and xvJacobianFreqs return different frequencies'
return None
# Test that the Hessian returned by xvJacobianFreqs are the same as those returned by hessianFreqs
def test_actionAngleTorus_jacobian_hessian():
from galpy.potential import MWPotential2014
from galpy.actionAngle import actionAngleTorus
aAT= actionAngleTorus(pot=MWPotential2014)
jr,jphi,jz= 0.075,1.1,0.05
fO= aAT.hessianFreqs(jr,jphi,jz)[0]
hO= aAT.xvJacobianFreqs(jr,jphi,jz,
numpy.array([0.]),numpy.array([1.]),
numpy.array([2.]))[2]
assert numpy.all(numpy.fabs(numpy.array(fO)-numpy.array(hO)) < 10.**-8.), 'actionAngleTorus methods hessianFreqs and xvJacobianFreqs return different Hessians'
return None
# Test that the xv returned by xvJacobianFreqs are the same as those returned by __call__
def test_actionAngleTorus_jacobian_xv():
from galpy.potential import MWPotential2014
from galpy.actionAngle import actionAngleTorus
aAT= actionAngleTorus(pot=MWPotential2014)
jr,jphi,jz= 0.075,1.1,0.05
angler= numpy.array([0.,1.])
anglephi= numpy.array([1.,2.])
anglez= numpy.array([2.,3.])
fO= aAT(jr,jphi,jz,angler,anglephi,anglez)
hO= aAT.xvJacobianFreqs(jr,jphi,jz,angler,anglephi,anglez)[0]
assert numpy.all(numpy.fabs(numpy.array(fO)-numpy.array(hO)) < 10.**-8.), 'actionAngleTorus methods __call__ and xvJacobianFreqs return different xv'
return None
# Test that the determinant of the Jacobian returned by xvJacobianFreqs is close to 1/R (should be 1 for rectangular coordinates, 1/R for cylindrical
def test_actionAngleTorus_jacobian_detone():
from galpy.potential import MWPotential2014
from galpy.actionAngle import actionAngleTorus
aAT= actionAngleTorus(pot=MWPotential2014,dJ=0.0001)
jr,jphi,jz= 0.075,1.1,0.05
angler= numpy.array([0.,1.])
anglephi= numpy.array([1.,2.])
anglez= numpy.array([2.,3.])
jf= aAT.xvJacobianFreqs(jr,jphi,jz,angler,anglephi,anglez)
assert numpy.fabs(jf[0][0,0]*numpy.fabs(numpy.linalg.det(jf[1][0]))-1) < 0.01, 'Jacobian returned by actionAngleTorus method xvJacobianFreqs does not have the expected determinant'
assert numpy.fabs(jf[0][1,0]*numpy.fabs(numpy.linalg.det(jf[1][1]))-1) < 0.01, 'Jacobian returned by actionAngleTorus method xvJacobianFreqs does not have the expected determinant'
return None
# Test that Jacobian returned by xvJacobianFreqs is approximately correct
def test_actionAngleTorus_jacobian_linear():
from galpy.potential import MWPotential2014
from galpy.actionAngle import actionAngleTorus
aAT= actionAngleTorus(pot=MWPotential2014)
jr,jphi,jz= 0.075,1.1,0.05
angler= numpy.array([0.5])
anglephi= numpy.array([1.])
anglez= numpy.array([2.])
jf= aAT.xvJacobianFreqs(jr,jphi,jz,angler,anglephi,anglez)
xv= aAT(jr,jphi,jz,angler,anglephi,anglez)
dja= 2.*numpy.array([0.001,0.002,0.003,-0.002,0.004,0.002])
xv_direct= aAT(jr+dja[0],jphi+dja[1],jz+dja[2],
angler+dja[3],anglephi+dja[4],anglez+dja[5])
xv_fromjac= xv+numpy.dot(jf[1],dja)
assert numpy.all(numpy.fabs((xv_fromjac-xv_direct)/xv_direct) < 0.01), 'Jacobian returned by actionAngleTorus method xvJacobianFreqs does not appear to be correct'
return None
#Test error when potential is not implemented in C
def test_actionAngleTorus_nocerr():
from galpy.actionAngle import actionAngleTorus
from test_potential import BurkertPotentialNoC
bp= BurkertPotentialNoC()
try:
aAT= actionAngleTorus(pot=bp)
except RuntimeError: pass
else:
raise AssertionError("actionAngleTorus initialization with potential w/o C should have given a RuntimeError, but didn't")
return None
#Test error when potential is not axisymmetric
def test_actionAngleTorus_nonaxierr():
from galpy.actionAngle import actionAngleTorus
from galpy.potential import TriaxialNFWPotential
np= TriaxialNFWPotential(normalize=1.,b=0.9)
try:
aAT= actionAngleTorus(pot=np)
except RuntimeError: pass
else:
raise AssertionError("actionAngleTorus initialization with non-axisymmetric potential should have given a RuntimeError, but didn't")
return None
# Test the Autofit torus warnings
def test_actionAngleTorus_AutoFitWarning():
from galpy.potential import LogarithmicHaloPotential
from galpy.actionAngle import actionAngleTorus
lp= LogarithmicHaloPotential(normalize=1.,q=0.9)
aAT= actionAngleTorus(pot=lp,tol=10.**-8.)
# These should give warnings
jr, jp, jz= 0.27209033, 1.80253892, 0.6078445
ar, ap, az= numpy.array([1.95732492]), numpy.array([6.16753224]), \
numpy.array([4.08233059])
#Turn warnings into errors to test for them
import warnings
with warnings.catch_warnings(record=True) as w:
if PY2: reset_warning_registry('galpy')
warnings.simplefilter("always",galpyWarning)
aAT(jr,jp,jz,ar,ap,az)
# Should raise warning bc of Autofit, might raise others
raisedWarning= False
for wa in w:
raisedWarning= (str(wa.message) == "actionAngleTorus' AutoFit exited with non-zero return status -3: Fit failed the goal by more than 2")
if raisedWarning: break
assert raisedWarning, "actionAngleTorus with flattened LogarithmicHaloPotential and a particular orbit should have thrown a warning, but didn't"
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always",galpyWarning)
aAT.xvFreqs(jr,jp,jz,ar,ap,az)
# Should raise warning bc of Autofit, might raise others
raisedWarning= False
for wa in w:
raisedWarning= (str(wa.message) == "actionAngleTorus' AutoFit exited with non-zero return status -3: Fit failed the goal by more than 2")
if raisedWarning: break
assert raisedWarning, "actionAngleTorus with flattened LogarithmicHaloPotential and a particular orbit should have thrown a warning, but didn't"
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always",galpyWarning)
aAT.Freqs(jr,jp,jz)
# Should raise warning bc of Autofit, might raise others
raisedWarning= False
for wa in w:
raisedWarning= (str(wa.message) == "actionAngleTorus' AutoFit exited with non-zero return status -3: Fit failed the goal by more than 2")
if raisedWarning: break
assert raisedWarning, "actionAngleTorus with flattened LogarithmicHaloPotential and a particular orbit should have thrown a warning, but didn't"
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always",galpyWarning)
aAT.hessianFreqs(jr,jp,jz)
# Should raise warning bc of Autofit, might raise others
raisedWarning= False
for wa in w:
raisedWarning= (str(wa.message) == "actionAngleTorus' AutoFit exited with non-zero return status -3: Fit failed the goal by more than 2")
if raisedWarning: break
assert raisedWarning, "actionAngleTorus with flattened LogarithmicHaloPotential and a particular orbit should have thrown a warning, but didn't"
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always",galpyWarning)
aAT.xvJacobianFreqs(jr,jp,jz,ar,ap,az)
# Should raise warning bc of Autofit, might raise others
raisedWarning= False
for wa in w:
raisedWarning= (str(wa.message) == "actionAngleTorus' AutoFit exited with non-zero return status -3: Fit failed the goal by more than 2")
if raisedWarning: break
assert raisedWarning, "actionAngleTorus with flattened LogarithmicHaloPotential and a particular orbit should have thrown a warning, but didn't"
return None
def test_MWPotential_warning_torus():
# Test that using MWPotential throws a warning, see #229
from galpy.actionAngle import actionAngleTorus
from galpy.potential import MWPotential
if PY2: reset_warning_registry('galpy')
warnings.simplefilter("error",galpyWarning)
try:
aAA= actionAngleTorus(pot=MWPotential)
except: pass
else:
raise AssertionError("actionAngleTorus with MWPotential should have thrown a warning, but didn't")
#Turn warnings back into warnings
warnings.simplefilter("always",galpyWarning)
return None
| 52.851979 | 184 | 0.702055 |
f75c10a90632f4b521b5383bbf5cf33532139ce2 | 3,062 | py | Python | tests/task_plugin_test.py | pchoisel/girder_worker | 66e1e8b82ee3e64421b59111a76b84852eec7947 | [
"Apache-2.0"
] | 37 | 2016-01-26T19:21:23.000Z | 2021-06-10T14:12:59.000Z | tests/task_plugin_test.py | pchoisel/girder_worker | 66e1e8b82ee3e64421b59111a76b84852eec7947 | [
"Apache-2.0"
] | 290 | 2016-01-27T14:02:10.000Z | 2022-01-24T16:50:27.000Z | tests/task_plugin_test.py | pchoisel/girder_worker | 66e1e8b82ee3e64421b59111a76b84852eec7947 | [
"Apache-2.0"
] | 29 | 2016-02-17T17:54:47.000Z | 2022-03-17T23:36:17.000Z | from girder_worker import entrypoint
from girder_worker.__main__ import main
from girder_worker.entrypoint import discover_tasks
import mock
import pytest
def setup_function(func):
if hasattr(func, 'pytestmark'):
for m in func.pytestmark:
if m.name == 'namespace':
namespace = m.args[0]
func.original = entrypoint.NAMESPACE
entrypoint.NAMESPACE = namespace
def teardown_function(func):
if hasattr(func, 'original'):
entrypoint.NAMESPACE = func.original
@pytest.mark.namespace('girder_worker._test_plugins.valid_plugins')
def test_get_extension_manager():
mgr = entrypoint.get_extension_manager()
names = sorted(mgr.names())
assert names == ['plugin1', 'plugin2']
@pytest.mark.namespace('girder_worker._test_plugins.valid_plugins')
def test_import_all_includes():
with mock.patch('girder_worker.entrypoint.import_module') as imp:
entrypoint.import_all_includes()
imp.assert_has_calls(
[mock.call('girder_worker._test_plugins.tasks')],
any_order=True)
@pytest.mark.namespace('girder_worker._test_plugins.invalid_plugins')
def test_invalid_plugins():
with pytest.raises(Exception):
entrypoint.get_plugin_task_modules()
@pytest.mark.namespace('girder_worker._test_plugins.valid_plugins')
def test_external_plugins():
with mock.patch('girder_worker.app.app') as app:
discover_tasks(app)
app.conf.update.assert_any_call({'CELERY_INCLUDE':
['girder_worker._test_plugins.tasks']})
@pytest.mark.namespace('girder_worker._test_plugins.valid_plugins')
def test_get_extensions():
with mock.patch('girder_worker.__main__.app'):
main()
extensions = sorted(entrypoint.get_extensions())
assert extensions == ['plugin1', 'plugin2']
@pytest.mark.namespace('girder_worker._test_plugins.valid_plugins')
def test_get_module_tasks():
with mock.patch('girder_worker.__main__.app'):
main()
extensions = sorted(entrypoint.get_module_tasks('girder_worker._test_plugins.tasks'))
assert extensions == [
'girder_worker._test_plugins.tasks.celery_task',
'girder_worker._test_plugins.tasks.function_task'
]
@pytest.mark.namespace('girder_worker._test_plugins.valid_plugins')
def test_get_extension_tasks():
with mock.patch('girder_worker.__main__.app'):
main()
extensions = sorted(entrypoint.get_extension_tasks('plugin2'))
assert extensions == [
'girder_worker._test_plugins.tasks.celery_task',
'girder_worker._test_plugins.tasks.function_task'
]
@pytest.mark.namespace('girder_worker._test_plugins.valid_plugins')
def test_get_extension_tasks_celery():
with mock.patch('girder_worker.__main__.app'):
main()
extensions = sorted(entrypoint.get_extension_tasks('plugin2', celery_only=True))
assert extensions == [
'girder_worker._test_plugins.tasks.celery_task'
]
| 33.648352 | 93 | 0.701502 |
f75c1108626161702826e7b0c972a2d11ea7bbd4 | 1,007 | py | Python | marqeta/response_models/commando_mode_nested_transition.py | marqeta/marqeta-python | 66fa690eb910825c510a391720b0fe717fac0234 | [
"MIT"
] | 21 | 2019-04-12T09:02:17.000Z | 2022-02-18T11:39:06.000Z | marqeta/response_models/commando_mode_nested_transition.py | marqeta/marqeta-python | 66fa690eb910825c510a391720b0fe717fac0234 | [
"MIT"
] | 1 | 2020-07-22T21:27:40.000Z | 2020-07-23T17:38:43.000Z | marqeta/response_models/commando_mode_nested_transition.py | marqeta/marqeta-python | 66fa690eb910825c510a391720b0fe717fac0234 | [
"MIT"
] | 10 | 2019-05-08T14:20:37.000Z | 2021-09-20T18:09:26.000Z | from datetime import datetime, date
from marqeta.response_models import datetime_object
import json
import re
class CommandoModeNestedTransition(object):
def __init__(self, json_response):
self.json_response = json_response
def __str__(self):
return json.dumps(self.json_response, default=self.json_serial)
@staticmethod
def json_serial(o):
if isinstance(o, datetime) or isinstance(o, date):
return o.__str__()
@property
def commando_enabled(self):
return self.json_response.get('commando_enabled', None)
@property
def reason(self):
return self.json_response.get('reason', None)
@property
def channel(self):
return self.json_response.get('channel', None)
@property
def username(self):
return self.json_response.get('username', None)
def __repr__(self):
return '<Marqeta.response_models.commando_mode_nested_transition.CommandoModeNestedTransition>' + self.__str__()
| 25.175 | 121 | 0.701092 |
f75c1d9eb94577e27f0fdee7d82c2e048246833a | 1,829 | py | Python | tests/test_preprocessing_glove.py | bbreton3/glove_tf_21 | 16b18bdb2d41c104dcd9159c0a760336bb5fd4d1 | [
"MIT"
] | 1 | 2020-04-18T16:33:05.000Z | 2020-04-18T16:33:05.000Z | tests/test_preprocessing_glove.py | bbreton3/glove_tf_21 | 16b18bdb2d41c104dcd9159c0a760336bb5fd4d1 | [
"MIT"
] | 7 | 2020-11-13T17:44:25.000Z | 2022-02-10T01:16:13.000Z | tests/test_preprocessing_glove.py | bbreton3/glove_tf_21 | 16b18bdb2d41c104dcd9159c0a760336bb5fd4d1 | [
"MIT"
] | null | null | null | from glove_tf_21.utils.file_utils import save_labels
import numpy as np
import os
def test_cooc_count(preprocessing_glove, ix_sequences_full, cooc_dict):
output_cooc = dict()
for ix_seq in ix_sequences_full:
output_cooc = preprocessing_glove.cooc_count(output_cooc, ix_seq)
assert len(output_cooc) == len(cooc_dict)
for key, val in cooc_dict.items():
assert np.allclose(output_cooc[key], val)
def test_cooc_dict_to_sparse(preprocessing_glove_fit, cooc_dict, cooc_matrix_sparse):
sparse_cooc_mat = preprocessing_glove_fit.cooc_dict_to_sparse(cooc_dict)
assert np.sum(sparse_cooc_mat != cooc_matrix_sparse) == 0.0
def test_glove_formatter(preprocessing_glove, cooc_matrix_sparse, cooc_rows, cooc_cols, cooc_data):
test_cooc_rows, test_cooc_cols, test_cooc_data = preprocessing_glove.glove_formatter(cooc_matrix_sparse)
assert np.allclose(test_cooc_rows, cooc_rows)
assert np.allclose(test_cooc_cols, cooc_cols)
assert np.allclose(test_cooc_data, cooc_data)
def test_get_labels(preprocessing_glove_fit, vocab):
assert preprocessing_glove_fit.get_labels() == vocab
def test_get_cooc_mat(preprocessing_glove_fit, corpus_file_path, cooc_matrix_sparse, temp_folder_path):
test_cooc_matrix_sparse = preprocessing_glove_fit.get_cooc_mat(corpus_file_path)
assert np.sum(test_cooc_matrix_sparse != cooc_matrix_sparse) == 0.0
empty_file_path = os.path.join(temp_folder_path, "empty_file.txt")
save_labels([""], empty_file_path)
assert np.sum(preprocessing_glove_fit.get_cooc_mat(empty_file_path)) == 0.0
os.remove(empty_file_path)
def test_call(preprocessing_glove_fit):
cooc_rows, cooc_cols, cooc_data, cooc = preprocessing_glove_fit()
assert len(cooc_rows) == 40
assert len(cooc_cols) == 40
assert len(cooc_data) == 40
| 34.509434 | 108 | 0.785128 |
f75c31ebee1c575c487db6678c659f1e492c04e5 | 37,312 | py | Python | skorch/callbacks/logging.py | TheAutumnOfRice/skorch | 6d778fc38d797644c847a9c87dd23299eea63087 | [
"BSD-3-Clause"
] | 2,748 | 2019-03-19T11:43:01.000Z | 2022-03-31T13:55:28.000Z | skorch/callbacks/logging.py | TheAutumnOfRice/skorch | 6d778fc38d797644c847a9c87dd23299eea63087 | [
"BSD-3-Clause"
] | 392 | 2019-03-19T11:17:04.000Z | 2022-03-29T21:36:53.000Z | skorch/callbacks/logging.py | TheAutumnOfRice/skorch | 6d778fc38d797644c847a9c87dd23299eea63087 | [
"BSD-3-Clause"
] | 197 | 2019-03-27T09:18:25.000Z | 2022-03-27T00:15:23.000Z | """ Callbacks for printing, logging and log information."""
import sys
import time
import tempfile
from contextlib import suppress
from numbers import Number
from itertools import cycle
from pathlib import Path
import numpy as np
import tqdm
from tabulate import tabulate
from skorch.utils import Ansi
from skorch.dataset import get_len
from skorch.callbacks import Callback
__all__ = ['EpochTimer', 'NeptuneLogger', 'WandbLogger', 'PrintLog', 'ProgressBar',
'TensorBoard', 'SacredLogger', 'MlflowLogger']
def filter_log_keys(keys, keys_ignored=None):
"""Filter out keys that are generally to be ignored.
This is used by several callbacks to filter out keys from history
that should not be logged.
Parameters
----------
keys : iterable of str
All keys.
keys_ignored : iterable of str or None (default=None)
If not None, collection of extra keys to be ignored.
"""
keys_ignored = keys_ignored or ()
for key in keys:
if not (
key == 'epoch' or
(key in keys_ignored) or
key.endswith('_best') or
key.endswith('_batch_count') or
key.startswith('event_')
):
yield key
class EpochTimer(Callback):
"""Measures the duration of each epoch and writes it to the
history with the name ``dur``.
"""
def __init__(self, **kwargs):
super(EpochTimer, self).__init__(**kwargs)
self.epoch_start_time_ = None
def on_epoch_begin(self, net, **kwargs):
self.epoch_start_time_ = time.time()
def on_epoch_end(self, net, **kwargs):
net.history.record('dur', time.time() - self.epoch_start_time_)
class NeptuneLogger(Callback):
"""Logs results from history to Neptune
Neptune is a lightweight experiment tracking tool.
You can read more about it here: https://neptune.ai
Use this callback to automatically log all interesting values from
your net's history to Neptune.
The best way to log additional information is to log directly to the
experiment object or subclass the ``on_*`` methods.
To monitor resource consumption install psutil
>>> pip install psutil
You can view example experiment logs here:
https://ui.neptune.ai/o/shared/org/skorch-integration/e/SKOR-13/charts
Examples
--------
>>> # Install neptune
>>> pip install neptune-client
>>> # Create a neptune experiment object
>>> import neptune
...
... # We are using api token for an anonymous user.
... # For your projects use the token associated with your neptune.ai account
>>> neptune.init(api_token='ANONYMOUS',
... project_qualified_name='shared/skorch-integration')
...
... experiment = neptune.create_experiment(
... name='skorch-basic-example',
... params={'max_epochs': 20,
... 'lr': 0.01},
... upload_source_files=['skorch_example.py'])
>>> # Create a neptune_logger callback
>>> neptune_logger = NeptuneLogger(experiment, close_after_train=False)
>>> # Pass a logger to net callbacks argument
>>> net = NeuralNetClassifier(
... ClassifierModule,
... max_epochs=20,
... lr=0.01,
... callbacks=[neptune_logger])
>>> # Log additional metrics after training has finished
>>> from sklearn.metrics import roc_auc_score
... y_pred = net.predict_proba(X)
... auc = roc_auc_score(y, y_pred[:, 1])
...
... neptune_logger.experiment.log_metric('roc_auc_score', auc)
>>> # log charts like ROC curve
... from scikitplot.metrics import plot_roc
... import matplotlib.pyplot as plt
...
... fig, ax = plt.subplots(figsize=(16, 12))
... plot_roc(y, y_pred, ax=ax)
... neptune_logger.experiment.log_image('roc_curve', fig)
>>> # log net object after training
... net.save_params(f_params='basic_model.pkl')
... neptune_logger.experiment.log_artifact('basic_model.pkl')
>>> # close experiment
... neptune_logger.experiment.stop()
Parameters
----------
experiment : neptune.experiments.Experiment
Instantiated ``Experiment`` class.
log_on_batch_end : bool (default=False)
Whether to log loss and other metrics on batch level.
close_after_train : bool (default=True)
Whether to close the ``Experiment`` object once training
finishes. Set this parameter to False if you want to continue
logging to the same Experiment or if you use it as a context
manager.
keys_ignored : str or list of str (default=None)
Key or list of keys that should not be logged to
Neptune. Note that in addition to the keys provided by the
user, keys such as those starting with 'event_' or ending on
'_best' are ignored by default.
Attributes
----------
first_batch_ : bool
Helper attribute that is set to True at initialization and changes
to False on first batch end. Can be used when we want to log things
exactly once.
.. _Neptune: https://www.neptune.ai
"""
def __init__(
self,
experiment,
log_on_batch_end=False,
close_after_train=True,
keys_ignored=None,
):
self.experiment = experiment
self.log_on_batch_end = log_on_batch_end
self.close_after_train = close_after_train
self.keys_ignored = keys_ignored
def initialize(self):
self.first_batch_ = True
keys_ignored = self.keys_ignored
if isinstance(keys_ignored, str):
keys_ignored = [keys_ignored]
self.keys_ignored_ = set(keys_ignored or [])
self.keys_ignored_.add('batches')
return self
def on_batch_end(self, net, **kwargs):
if self.log_on_batch_end:
batch_logs = net.history[-1]['batches'][-1]
for key in filter_log_keys(batch_logs.keys(), self.keys_ignored_):
self.experiment.log_metric(key, batch_logs[key])
self.first_batch_ = False
def on_epoch_end(self, net, **kwargs):
"""Automatically log values from the last history step."""
history = net.history
epoch_logs = history[-1]
epoch = epoch_logs['epoch']
for key in filter_log_keys(epoch_logs.keys(), self.keys_ignored_):
self.experiment.log_metric(key, x=epoch, y=epoch_logs[key])
def on_train_end(self, net, **kwargs):
if self.close_after_train:
self.experiment.stop()
class WandbLogger(Callback):
"""Logs best model and metrics to `Weights & Biases <https://docs.wandb.com/>`_
Use this callback to automatically log best trained model, all metrics from
your net's history, model topology and computer resources to Weights & Biases
after each epoch.
Every file saved in `wandb_run.dir` is automatically logged to W&B servers.
See `example run
<https://app.wandb.ai/borisd13/skorch/runs/s20or4ct/overview?workspace=user-borisd13>`_
Examples
--------
>>> # Install wandb
... pip install wandb
>>> import wandb
>>> from skorch.callbacks import WandbLogger
>>> # Create a wandb Run
... wandb_run = wandb.init()
>>> # Alternative: Create a wandb Run without having a W&B account
... wandb_run = wandb.init(anonymous="allow)
>>> # Log hyper-parameters (optional)
... wandb_run.config.update({"learning rate": 1e-3, "batch size": 32})
>>> net = NeuralNet(..., callbacks=[WandbLogger(wandb_run)])
>>> net.fit(X, y)
Parameters
----------
wandb_run : wandb.wandb_run.Run
wandb Run used to log data.
save_model : bool (default=True)
Whether to save a checkpoint of the best model and upload it
to your Run on W&B servers.
keys_ignored : str or list of str (default=None)
Key or list of keys that should not be logged to
tensorboard. Note that in addition to the keys provided by the
user, keys such as those starting with 'event_' or ending on
'_best' are ignored by default.
"""
def __init__(
self,
wandb_run,
save_model=True,
keys_ignored=None,
):
self.wandb_run = wandb_run
self.save_model = save_model
self.keys_ignored = keys_ignored
def initialize(self):
keys_ignored = self.keys_ignored
if isinstance(keys_ignored, str):
keys_ignored = [keys_ignored]
self.keys_ignored_ = set(keys_ignored or [])
self.keys_ignored_.add('batches')
return self
def on_train_begin(self, net, **kwargs):
"""Log model topology and add a hook for gradients"""
self.wandb_run.watch(net.module_)
def on_epoch_end(self, net, **kwargs):
"""Log values from the last history step and save best model"""
hist = net.history[-1]
keys_kept = filter_log_keys(hist, keys_ignored=self.keys_ignored_)
logged_vals = {k: hist[k] for k in keys_kept}
self.wandb_run.log(logged_vals)
# save best model
if self.save_model and hist['valid_loss_best']:
model_path = Path(self.wandb_run.dir) / 'best_model.pth'
with model_path.open('wb') as model_file:
net.save_params(f_params=model_file)
class PrintLog(Callback):
"""Print useful information from the model's history as a table.
By default, ``PrintLog`` prints everything from the history except
for ``'batches'``.
To determine the best loss, ``PrintLog`` looks for keys that end on
``'_best'`` and associates them with the corresponding loss. E.g.,
``'train_loss_best'`` will be matched with ``'train_loss'``. The
:class:`skorch.callbacks.EpochScoring` callback takes care of
creating those entries, which is why ``PrintLog`` works best in
conjunction with that callback.
``PrintLog`` treats keys with the ``'event_'`` prefix in a special
way. They are assumed to contain information about occasionally
occuring events. The ``False`` or ``None`` entries (indicating
that an event did not occur) are not printed, resulting in empty
cells in the table, and ``True`` entries are printed with ``+``
symbol. ``PrintLog`` groups all event columns together and pushes
them to the right, just before the ``'dur'`` column.
*Note*: ``PrintLog`` will not result in good outputs if the number
of columns varies between epochs, e.g. if the valid loss is only
present on every other epoch.
Parameters
----------
keys_ignored : str or list of str (default=None)
Key or list of keys that should not be part of the printed
table. Note that in addition to the keys provided by the user,
keys such as those starting with 'event_' or ending on '_best'
are ignored by default.
sink : callable (default=print)
The target that the output string is sent to. By default, the
output is printed to stdout, but the sink could also be a
logger, etc.
tablefmt : str (default='simple')
The format of the table. See the documentation of the ``tabulate``
package for more detail. Can be 'plain', 'grid', 'pipe', 'html',
'latex', among others.
floatfmt : str (default='.4f')
The number formatting. See the documentation of the ``tabulate``
package for more details.
stralign : str (default='right')
The alignment of columns with strings. Can be 'left', 'center',
'right', or ``None`` (disable alignment). Default is 'right' (to
be consistent with numerical columns).
"""
def __init__(
self,
keys_ignored=None,
sink=print,
tablefmt='simple',
floatfmt='.4f',
stralign='right',
):
self.keys_ignored = keys_ignored
self.sink = sink
self.tablefmt = tablefmt
self.floatfmt = floatfmt
self.stralign = stralign
def initialize(self):
self.first_iteration_ = True
keys_ignored = self.keys_ignored
if isinstance(keys_ignored, str):
keys_ignored = [keys_ignored]
self.keys_ignored_ = set(keys_ignored or [])
self.keys_ignored_.add('batches')
return self
def format_row(self, row, key, color):
"""For a given row from the table, format it (i.e. floating
points and color if applicable).
"""
value = row[key]
if isinstance(value, bool) or value is None:
return '+' if value else ''
if not isinstance(value, Number):
return value
# determine if integer value
is_integer = float(value).is_integer()
template = '{}' if is_integer else '{:' + self.floatfmt + '}'
# if numeric, there could be a 'best' key
key_best = key + '_best'
if (key_best in row) and row[key_best]:
template = color + template + Ansi.ENDC.value
return template.format(value)
def _sorted_keys(self, keys):
"""Sort keys, dropping the ones that should be ignored.
The keys that are in ``self.ignored_keys`` or that end on
'_best' are dropped. Among the remaining keys:
* 'epoch' is put first;
* 'dur' is put last;
* keys that start with 'event_' are put just before 'dur';
* all remaining keys are sorted alphabetically.
"""
sorted_keys = []
# make sure 'epoch' comes first
if ('epoch' in keys) and ('epoch' not in self.keys_ignored_):
sorted_keys.append('epoch')
# ignore keys like *_best or event_*
for key in filter_log_keys(sorted(keys), keys_ignored=self.keys_ignored_):
if key != 'dur':
sorted_keys.append(key)
# add event_* keys
for key in sorted(keys):
if key.startswith('event_') and (key not in self.keys_ignored_):
sorted_keys.append(key)
# make sure 'dur' comes last
if ('dur' in keys) and ('dur' not in self.keys_ignored_):
sorted_keys.append('dur')
return sorted_keys
def _yield_keys_formatted(self, row):
colors = cycle([color.value for color in Ansi if color != color.ENDC])
for key, color in zip(self._sorted_keys(row.keys()), colors):
formatted = self.format_row(row, key, color=color)
if key.startswith('event_'):
key = key[6:]
yield key, formatted
def table(self, row):
headers = []
formatted = []
for key, formatted_row in self._yield_keys_formatted(row):
headers.append(key)
formatted.append(formatted_row)
return tabulate(
[formatted],
headers=headers,
tablefmt=self.tablefmt,
floatfmt=self.floatfmt,
stralign=self.stralign,
)
def _sink(self, text, verbose):
if (self.sink is not print) or verbose:
self.sink(text)
# pylint: disable=unused-argument
def on_epoch_end(self, net, **kwargs):
data = net.history[-1]
verbose = net.verbose
tabulated = self.table(data)
if self.first_iteration_:
header, lines = tabulated.split('\n', 2)[:2]
self._sink(header, verbose)
self._sink(lines, verbose)
self.first_iteration_ = False
self._sink(tabulated.rsplit('\n', 1)[-1], verbose)
if self.sink is print:
sys.stdout.flush()
class ProgressBar(Callback):
"""Display a progress bar for each epoch.
The progress bar includes elapsed and estimated remaining time for
the current epoch, the number of batches processed, and other
user-defined metrics. The progress bar is erased once the epoch is
completed.
``ProgressBar`` needs to know the total number of batches per
epoch in order to display a meaningful progress bar. By default,
this number is determined automatically using the dataset length
and the batch size. If this heuristic does not work for some
reason, you may either specify the number of batches explicitly
or let the ``ProgressBar`` count the actual number of batches in
the previous epoch.
For jupyter notebooks a non-ASCII progress bar can be printed
instead. To use this feature, you need to have `ipywidgets
<https://ipywidgets.readthedocs.io/en/stable/user_install.html>`_
installed.
Parameters
----------
batches_per_epoch : int, str (default='auto')
Either a concrete number or a string specifying the method used
to determine the number of batches per epoch automatically.
``'auto'`` means that the number is computed from the length of
the dataset and the batch size. ``'count'`` means that the
number is determined by counting the batches in the previous
epoch. Note that this will leave you without a progress bar at
the first epoch.
detect_notebook : bool (default=True)
If enabled, the progress bar determines if its current environment
is a jupyter notebook and switches to a non-ASCII progress bar.
postfix_keys : list of str (default=['train_loss', 'valid_loss'])
You can use this list to specify additional info displayed in the
progress bar such as metrics and losses. A prerequisite to this is
that these values are residing in the history on batch level already,
i.e. they must be accessible via
>>> net.history[-1, 'batches', -1, key]
"""
def __init__(
self,
batches_per_epoch='auto',
detect_notebook=True,
postfix_keys=None
):
self.batches_per_epoch = batches_per_epoch
self.detect_notebook = detect_notebook
self.postfix_keys = postfix_keys or ['train_loss', 'valid_loss']
def in_ipynb(self):
try:
return get_ipython().__class__.__name__ == 'ZMQInteractiveShell'
except NameError:
return False
def _use_notebook(self):
return self.in_ipynb() if self.detect_notebook else False
def _get_batch_size(self, net, training):
name = 'iterator_train' if training else 'iterator_valid'
net_params = net.get_params()
return net_params.get(name + '__batch_size', net_params['batch_size'])
def _get_batches_per_epoch_phase(self, net, dataset, training):
if dataset is None:
return 0
batch_size = self._get_batch_size(net, training)
return int(np.ceil(get_len(dataset) / batch_size))
def _get_batches_per_epoch(self, net, dataset_train, dataset_valid):
return (self._get_batches_per_epoch_phase(net, dataset_train, True) +
self._get_batches_per_epoch_phase(net, dataset_valid, False))
def _get_postfix_dict(self, net):
postfix = {}
for key in self.postfix_keys:
try:
postfix[key] = net.history[-1, 'batches', -1, key]
except KeyError:
pass
return postfix
# pylint: disable=attribute-defined-outside-init
def on_batch_end(self, net, **kwargs):
self.pbar_.set_postfix(self._get_postfix_dict(net), refresh=False)
self.pbar_.update()
# pylint: disable=attribute-defined-outside-init, arguments-differ
def on_epoch_begin(self, net, dataset_train=None, dataset_valid=None, **kwargs):
# Assume it is a number until proven otherwise.
batches_per_epoch = self.batches_per_epoch
if self.batches_per_epoch == 'auto':
batches_per_epoch = self._get_batches_per_epoch(
net, dataset_train, dataset_valid
)
elif self.batches_per_epoch == 'count':
if len(net.history) <= 1:
# No limit is known until the end of the first epoch.
batches_per_epoch = None
else:
batches_per_epoch = len(net.history[-2, 'batches'])
if self._use_notebook():
self.pbar_ = tqdm.tqdm_notebook(total=batches_per_epoch, leave=False)
else:
self.pbar_ = tqdm.tqdm(total=batches_per_epoch, leave=False)
def on_epoch_end(self, net, **kwargs):
self.pbar_.close()
def __getstate__(self):
# don't save away the temporary pbar_ object which gets created on
# epoch begin anew anyway. This avoids pickling errors with tqdm.
state = self.__dict__.copy()
del state['pbar_']
return state
def rename_tensorboard_key(key):
"""Rename keys from history to keys in TensorBoard
Specifically, prefixes all names with "Loss/" if they seem to be
losses.
"""
if key.startswith('train') or key.startswith('valid'):
key = 'Loss/' + key
return key
class TensorBoard(Callback):
"""Logs results from history to TensorBoard
"TensorBoard provides the visualization and tooling needed for
machine learning experimentation" (tensorboard_)
Use this callback to automatically log all interesting values from
your net's history to tensorboard after each epoch.
The best way to log additional information is to subclass this
callback and add your code to one of the ``on_*`` methods.
Examples
--------
>>> # Example to log the bias parameter as a histogram
>>> def extract_bias(module):
... return module.hidden.bias
>>> class MyTensorBoard(TensorBoard):
... def on_epoch_end(self, net, **kwargs):
... bias = extract_bias(net.module_)
... epoch = net.history[-1, 'epoch']
... self.writer.add_histogram('bias', bias, global_step=epoch)
... super().on_epoch_end(net, **kwargs) # call super last
Parameters
----------
writer : torch.utils.tensorboard.writer.SummaryWriter
Instantiated ``SummaryWriter`` class.
close_after_train : bool (default=True)
Whether to close the ``SummaryWriter`` object once training
finishes. Set this parameter to False if you want to continue
logging with the same writer or if you use it as a context
manager.
keys_ignored : str or list of str (default=None)
Key or list of keys that should not be logged to
tensorboard. Note that in addition to the keys provided by the
user, keys such as those starting with 'event_' or ending on
'_best' are ignored by default.
key_mapper : callable or function (default=rename_tensorboard_key)
This function maps a key name from the history to a tag in
tensorboard. This is useful because tensorboard can
automatically group similar tags if their names start with the
same prefix, followed by a forward slash. By default, this
callback will prefix all keys that start with "train" or "valid"
with the "Loss/" prefix.
.. _tensorboard: https://www.tensorflow.org/tensorboard/
"""
def __init__(
self,
writer,
close_after_train=True,
keys_ignored=None,
key_mapper=rename_tensorboard_key,
):
self.writer = writer
self.close_after_train = close_after_train
self.keys_ignored = keys_ignored
self.key_mapper = key_mapper
def initialize(self):
self.first_batch_ = True
keys_ignored = self.keys_ignored
if isinstance(keys_ignored, str):
keys_ignored = [keys_ignored]
self.keys_ignored_ = set(keys_ignored or [])
self.keys_ignored_.add('batches')
return self
def on_batch_end(self, net, **kwargs):
self.first_batch_ = False
def add_scalar_maybe(self, history, key, tag, global_step=None):
"""Add a scalar value from the history to TensorBoard
Will catch errors like missing keys or wrong value types.
Parameters
----------
history : skorch.History
History object saved as attribute on the neural net.
key : str
Key of the desired value in the history.
tag : str
Name of the tag used in TensorBoard.
global_step : int or None
Global step value to record.
"""
hist = history[-1]
val = hist.get(key)
if val is None:
return
global_step = global_step if global_step is not None else hist['epoch']
with suppress(NotImplementedError):
# pytorch raises NotImplementedError on wrong types
self.writer.add_scalar(
tag=tag,
scalar_value=val,
global_step=global_step,
)
def on_epoch_end(self, net, **kwargs):
"""Automatically log values from the last history step."""
history = net.history
hist = history[-1]
epoch = hist['epoch']
for key in filter_log_keys(hist, keys_ignored=self.keys_ignored_):
tag = self.key_mapper(key)
self.add_scalar_maybe(history, key=key, tag=tag, global_step=epoch)
def on_train_end(self, net, **kwargs):
if self.close_after_train:
self.writer.close()
class SacredLogger(Callback):
"""Logs results from history to Sacred.
Sacred is a tool to help you configure, organize, log and reproduce
experiments. Developed at IDSIA. See https://github.com/IDSIA/sacred.
Use this callback to automatically log all interesting values from
your net's history to Sacred.
If you want to log additional information, you can simply add it to
``History``. See the documentation on ``Callbacks``, and ``Scoring`` for
more information. Alternatively you can subclass this callback and extend
the ``on_*`` methods.
To use this logger, you first have to install Sacred:
$ pip install sacred
You might also install pymongo to use a mongodb backend. See the upstream_
documentation for more details. Once you have installed it, you can set up
a simple experiment and pass this Logger as a callback to your skorch
estimator:
# contents of sacred-experiment.py
>>> import numpy as np
>>> from sacred import Experiment
>>> from sklearn.datasets import make_classification
>>> from skorch.callbacks.logging import SacredLogger
>>> from skorch.callbacks.scoring import EpochScoring
>>> from skorch import NeuralNetClassifier
>>> from skorch.toy import make_classifier
>>> ex = Experiment()
>>> @ex.config
>>> def my_config():
... max_epochs = 20
... lr = 0.01
>>> X, y = make_classification()
>>> X, y = X.astype(np.float32), y.astype(np.int64)
>>> @ex.automain
>>> def main(_run, max_epochs, lr):
... # Take care to add additional scoring callbacks *before* the logger.
... net = NeuralNetClassifier(
... make_classifier(),
... max_epochs=max_epochs,
... lr=0.01,
... callbacks=[EpochScoring("f1"), SacredLogger(_run)]
... )
... # now fit your estimator to your data
... net.fit(X, y)
Then call this from the command line, e.g. like this:
``python sacred-script.py with max_epochs=15``
You can also change other options on the command line and optionally
specify a backend.
Parameters
----------
experiment : sacred.Experiment
Instantiated ``Experiment`` class.
log_on_batch_end : bool (default=False)
Whether to log loss and other metrics on batch level.
log_on_epoch_end : bool (default=True)
Whether to log loss and other metrics on epoch level.
batch_suffix : str (default=None)
A string that will be appended to all logged keys. By default (if set to
``None``) "_batch" is used if batch and epoch logging are both enabled
and no suffix is used otherwise.
epoch_suffix : str (default=None)
A string that will be appended to all logged keys. By default (if set to
``None``) "_epoch" is used if batch and epoch logging are both enabled
and no suffix is used otherwise.
keys_ignored : str or list of str (default=None)
Key or list of keys that should not be logged to Sacred. Note that in
addition to the keys provided by the user, keys such as those starting
with 'event_' or ending on '_best' are ignored by default.
.. _upstream: https://github.com/IDSIA/sacred#installing
"""
def __init__(
self,
experiment,
log_on_batch_end=False,
log_on_epoch_end=True,
batch_suffix=None,
epoch_suffix=None,
keys_ignored=None,
):
self.experiment = experiment
self.log_on_batch_end = log_on_batch_end
self.log_on_epoch_end = log_on_epoch_end
self.batch_suffix = batch_suffix
self.epoch_suffix = epoch_suffix
self.keys_ignored = keys_ignored
def initialize(self):
keys_ignored = self.keys_ignored
if isinstance(keys_ignored, str):
keys_ignored = [keys_ignored]
self.keys_ignored_ = set(keys_ignored or [])
self.keys_ignored_.add("batches")
self.batch_suffix_ = self.batch_suffix
self.epoch_suffix_ = self.epoch_suffix
if self.batch_suffix_ is None:
self.batch_suffix_ = (
"_batch" if self.log_on_batch_end and self.log_on_epoch_end else ""
)
if self.epoch_suffix_ is None:
self.epoch_suffix_ = (
"_epoch" if self.log_on_batch_end and self.log_on_epoch_end else ""
)
return self
def on_batch_end(self, net, **kwargs):
if not self.log_on_batch_end:
return
batch_logs = net.history[-1]["batches"][-1]
for key in filter_log_keys(batch_logs.keys(), self.keys_ignored_):
# skorch does not keep a batch count, but sacred will
# automatically associate the results with a counter.
self.experiment.log_scalar(key + self.batch_suffix_, batch_logs[key])
def on_epoch_end(self, net, **kwargs):
"""Automatically log values from the last history step."""
if not self.log_on_epoch_end:
return
epoch_logs = net.history[-1]
epoch = epoch_logs["epoch"]
for key in filter_log_keys(epoch_logs.keys(), self.keys_ignored_):
self.experiment.log_scalar(key + self.epoch_suffix_, epoch_logs[key], epoch)
class MlflowLogger(Callback):
"""Logs results from history and artifact to Mlflow
"MLflow is an open source platform for managing
the end-to-end machine learning lifecycle" (:doc:`mlflow:index`)
Use this callback to automatically log your metrics
and create/log artifacts to mlflow.
The best way to log additional information is to log directly to the
experiment object or subclass the ``on_*`` methods.
To use this logger, you first have to install Mlflow:
.. code-block::
$ pip install mlflow
Examples
--------
Mlflow :doc:`fluent API <mlflow:python_api/mlflow>`:
>>> import mlflow
>>> net = NeuralNetClassifier(net, callbacks=[MLflowLogger()])
>>> with mlflow.start_run():
... net.fit(X, y)
Custom :py:class:`run <mlflow.entities.Run>` and
:py:class:`client <mlflow.tracking.MlflowClient>`:
>>> from mlflow.tracking import MlflowClient
>>> client = MlflowClient()
>>> experiment = client.get_experiment_by_name('Default')
>>> run = client.create_run(experiment.experiment_id)
>>> net = NeuralNetClassifier(..., callbacks=[MlflowLogger(run, client)])
>>> net.fit(X, y)
Parameters
----------
run : mlflow.entities.Run (default=None)
Instantiated :py:class:`mlflow.entities.Run` class.
By default (if set to ``None``),
:py:func:`mlflow.active_run` is used to get the current run.
client : mlflow.tracking.MlflowClient (default=None)
Instantiated :py:class:`mlflow.tracking.MlflowClient` class.
By default (if set to ``None``),
``MlflowClient()`` is used, which by default has:
- the tracking URI set by :py:func:`mlflow.set_tracking_uri`
- the registry URI set by :py:func:`mlflow.set_registry_uri`
create_artifact : bool (default=True)
Whether to create artifacts for the network's
params, optimizer, criterion and history.
See :ref:`save_load`
terminate_after_train : bool (default=True)
Whether to terminate the ``Run`` object once training finishes.
log_on_batch_end : bool (default=False)
Whether to log loss and other metrics on batch level.
log_on_epoch_end : bool (default=True)
Whether to log loss and other metrics on epoch level.
batch_suffix : str (default=None)
A string that will be appended to all logged keys. By default (if set to
``None``) ``'_batch'`` is used if batch and epoch logging are both enabled
and no suffix is used otherwise.
epoch_suffix : str (default=None)
A string that will be appended to all logged keys. By default (if set to
``None``) ``'_epoch'`` is used if batch and epoch logging are both enabled
and no suffix is used otherwise.
keys_ignored : str or list of str (default=None)
Key or list of keys that should not be logged to Mlflow. Note that in
addition to the keys provided by the user, keys such as those starting
with ``'event_'`` or ending on ``'_best'`` are ignored by default.
"""
def __init__(
self,
run=None,
client=None,
create_artifact=True,
terminate_after_train=True,
log_on_batch_end=False,
log_on_epoch_end=True,
batch_suffix=None,
epoch_suffix=None,
keys_ignored=None,
):
self.run = run
self.client = client
self.create_artifact = create_artifact
self.terminate_after_train = terminate_after_train
self.log_on_batch_end = log_on_batch_end
self.log_on_epoch_end = log_on_epoch_end
self.batch_suffix = batch_suffix
self.epoch_suffix = epoch_suffix
self.keys_ignored = keys_ignored
def initialize(self):
self.run_ = self.run
if self.run_ is None:
import mlflow
self.run_ = mlflow.active_run()
self.client_ = self.client
if self.client_ is None:
from mlflow.tracking import MlflowClient
self.client_ = MlflowClient()
keys_ignored = self.keys_ignored
if isinstance(keys_ignored, str):
keys_ignored = [keys_ignored]
self.keys_ignored_ = set(keys_ignored or [])
self.keys_ignored_.add('batches')
self.batch_suffix_ = self._init_suffix(self.batch_suffix, '_batch')
self.epoch_suffix_ = self._init_suffix(self.epoch_suffix, '_epoch')
return self
def _init_suffix(self, suffix, default):
if suffix is not None:
return suffix
return default if self.log_on_batch_end and self.log_on_epoch_end else ''
def on_train_begin(self, net, **kwargs):
self._batch_count = 0
def on_batch_end(self, net, training, **kwargs):
if not self.log_on_batch_end:
return
self._batch_count += 1
batch_logs = net.history[-1]['batches'][-1]
self._iteration_log(batch_logs, self.batch_suffix_, self._batch_count)
def on_epoch_end(self, net, **kwargs):
if not self.log_on_epoch_end:
return
epoch_logs = net.history[-1]
self._iteration_log(epoch_logs, self.epoch_suffix_, len(net.history))
def _iteration_log(self, logs, suffix, step):
for key in filter_log_keys(logs.keys(), self.keys_ignored_):
self.client_.log_metric(
self.run_.info.run_id,
key + suffix,
logs[key],
step=step,
)
def on_train_end(self, net, **kwargs):
try:
self._log_artifacts(net)
finally:
if self.terminate_after_train:
self.client_.set_terminated(self.run_.info.run_id)
def _log_artifacts(self, net):
if not self.create_artifact:
return
with tempfile.TemporaryDirectory(prefix='skorch_mlflow_logger_') as dirpath:
dirpath = Path(dirpath)
params_filepath = dirpath / 'params.pth'
optimizer_filepath = dirpath / 'optimizer.pth'
criterion_filepath = dirpath / 'criterion.pth'
history_filepath = dirpath / 'history.json'
net.save_params(
f_params=params_filepath,
f_optimizer=optimizer_filepath,
f_criterion=criterion_filepath,
f_history=history_filepath,
)
self.client_.log_artifact(self.run_.info.run_id, params_filepath)
self.client_.log_artifact(self.run_.info.run_id, optimizer_filepath)
self.client_.log_artifact(self.run_.info.run_id, criterion_filepath)
self.client_.log_artifact(self.run_.info.run_id, history_filepath)
| 35.2 | 91 | 0.639633 |
f75c455870e4611cc859ce7df7434daa1dfa7d0c | 9,928 | py | Python | pyserver/pymysql3/tests/capabilities.py | joeedh/fairmotion | 5c322fc012cdd94ddc2f21d68264c845b3c2c770 | [
"MIT"
] | 1 | 2015-05-22T14:11:17.000Z | 2015-05-22T14:11:17.000Z | pyserver/pymysql3/tests/capabilities.py | joeedh/fairmotion | 5c322fc012cdd94ddc2f21d68264c845b3c2c770 | [
"MIT"
] | 2 | 2021-09-02T20:01:35.000Z | 2022-01-26T19:47:35.000Z | pyserver/pymysql3/tests/capabilities.py | joeedh/fairmotion | 5c322fc012cdd94ddc2f21d68264c845b3c2c770 | [
"MIT"
] | null | null | null | #!/usr/bin/env python -O
""" Script to test database capabilities and the DB-API interface
for functionality and memory leaks.
Adapted from a script by M-A Lemburg.
"""
from time import time
import array
import unittest
class DatabaseTest(unittest.TestCase):
db_module = None
connect_args = ()
connect_kwargs = dict()
create_table_extra = ''
rows = 10
debug = False
def setUp(self):
import gc
db = self.db_module.connect(*self.connect_args, **self.connect_kwargs)
self.connection = db
self.cursor = db.cursor()
self.BLOBText = ''.join([chr(i) for i in range(256)] * 100);
self.BLOBUText = ''.join([chr(i) for i in range(16384)])
self.BLOBBinary = self.db_module.Binary(''.join([chr(i) for i in range(256)] * 16))
leak_test = True
def tearDown(self):
if self.leak_test:
import gc
del self.cursor
orphans = gc.collect()
self.assertFalse(orphans, "%d orphaned objects found after deleting cursor" % orphans)
del self.connection
orphans = gc.collect()
self.assertFalse(orphans, "%d orphaned objects found after deleting connection" % orphans)
def table_exists(self, name):
try:
self.cursor.execute('select * from %s where 1=0' % name)
except:
return False
else:
return True
def quote_identifier(self, ident):
return '"%s"' % ident
def new_table_name(self):
i = id(self.cursor)
while True:
name = self.quote_identifier('tb%08x' % i)
if not self.table_exists(name):
return name
i = i + 1
def create_table(self, columndefs):
""" Create a table using a list of column definitions given in
columndefs.
generator must be a function taking arguments (row_number,
col_number) returning a suitable data object for insertion
into the table.
"""
self.table = self.new_table_name()
self.cursor.execute('CREATE TABLE %s (%s) %s' %
(self.table,
',\n'.join(columndefs),
self.create_table_extra))
def check_data_integrity(self, columndefs, generator):
# insert
self.create_table(columndefs)
insert_statement = ('INSERT INTO %s VALUES (%s)' %
(self.table,
','.join(['{!s}'] * len(columndefs))))
data = [ [ generator(i,j) for j in range(len(columndefs)) ]
for i in range(self.rows) ]
if self.debug:
print(data)
self.cursor.executemany(insert_statement, data)
self.connection.commit()
# verify
self.cursor.execute('select * from %s' % self.table)
l = self.cursor.fetchall()
if self.debug:
print(l)
self.assertEquals(len(l), self.rows)
try:
for i in range(self.rows):
for j in range(len(columndefs)):
self.assertEquals(l[i][j], generator(i,j))
finally:
if not self.debug:
self.cursor.execute('drop table %s' % (self.table))
def test_transactions(self):
columndefs = ( 'col1 INT', 'col2 VARCHAR(255)')
def generator(row, col):
if col == 0: return row
else: return ('%i' % (row%10))*255
self.create_table(columndefs)
insert_statement = ('INSERT INTO %s VALUES (%s)' %
(self.table,
','.join(['{!s}'] * len(columndefs))))
data = [ [ generator(i,j) for j in range(len(columndefs)) ]
for i in range(self.rows) ]
self.cursor.executemany(insert_statement, data)
# verify
self.connection.commit()
self.cursor.execute('select * from %s' % self.table)
l = self.cursor.fetchall()
self.assertEquals(len(l), self.rows)
for i in range(self.rows):
for j in range(len(columndefs)):
self.assertEquals(l[i][j], generator(i,j))
delete_statement = 'delete from %s where col1={!s}' % self.table
self.cursor.execute(delete_statement, (0,))
self.cursor.execute('select col1 from %s where col1=%s' % \
(self.table, 0))
l = self.cursor.fetchall()
self.assertFalse(l, "DELETE didn't work")
self.connection.rollback()
self.cursor.execute('select col1 from %s where col1=%s' % \
(self.table, 0))
l = self.cursor.fetchall()
self.assertTrue(len(l) == 1, "ROLLBACK didn't work")
self.cursor.execute('drop table %s' % (self.table))
def test_truncation(self):
columndefs = ( 'col1 INT', 'col2 VARCHAR(255)')
def generator(row, col):
if col == 0: return row
else: return ('{:d}'.format(row%10))*(round(255-self.rows/2)+row)
self.create_table(columndefs)
insert_statement = ('INSERT INTO %s VALUES (%s)' %
(self.table,
','.join(['{!s}'] * len(columndefs))))
try:
self.cursor.execute(insert_statement, (0, '0'*256))
except Warning:
if self.debug: print(self.cursor.messages)
except self.connection.DataError:
pass
else:
self.fail("Over-long column did not generate warnings/exception with single insert")
self.connection.rollback()
try:
for i in range(self.rows):
data = []
for j in range(len(columndefs)):
data.append(generator(i,j))
self.cursor.execute(insert_statement,tuple(data))
except Warning:
if self.debug: print(self.cursor.messages)
except self.connection.DataError:
pass
else:
self.fail("Over-long columns did not generate warnings/exception with execute()")
self.connection.rollback()
try:
data = [ [ generator(i,j) for j in range(len(columndefs)) ]
for i in range(self.rows) ]
self.cursor.executemany(insert_statement, data)
except Warning:
if self.debug: print(self.cursor.messages)
except self.connection.DataError:
pass
else:
self.fail("Over-long columns did not generate warnings/exception with executemany()")
self.connection.rollback()
self.cursor.execute('drop table %s' % (self.table))
def test_CHAR(self):
# Character data
def generator(row,col):
return ('%i' % ((row+col) % 10)) * 255
self.check_data_integrity(
('col1 char(255)','col2 char(255)'),
generator)
def test_INT(self):
# Number data
def generator(row,col):
return row*row
self.check_data_integrity(
('col1 INT',),
generator)
def test_DECIMAL(self):
# DECIMAL
def generator(row,col):
from decimal import Decimal
return Decimal("%d.%02d" % (row, col))
self.check_data_integrity(
('col1 DECIMAL(5,2)',),
generator)
def test_DATE(self):
ticks = time()
def generator(row,col):
return self.db_module.DateFromTicks(ticks+row*86400-col*1313)
self.check_data_integrity(
('col1 DATE',),
generator)
def test_TIME(self):
ticks = time()
def generator(row,col):
return self.db_module.TimeFromTicks(ticks+row*86400-col*1313)
self.check_data_integrity(
('col1 TIME',),
generator)
def test_DATETIME(self):
ticks = time()
def generator(row,col):
return self.db_module.TimestampFromTicks(ticks+row*86400-col*1313)
self.check_data_integrity(
('col1 DATETIME',),
generator)
def test_TIMESTAMP(self):
ticks = time()
def generator(row,col):
return self.db_module.TimestampFromTicks(ticks+row*86400-col*1313)
self.check_data_integrity(
('col1 TIMESTAMP',),
generator)
def test_fractional_TIMESTAMP(self):
ticks = time()
def generator(row,col):
return self.db_module.TimestampFromTicks(ticks+row*86400-col*1313+row*0.7*col/3.0)
self.check_data_integrity(
('col1 TIMESTAMP',),
generator)
def test_LONG(self):
def generator(row,col):
if col == 0:
return row
else:
return self.BLOBUText # 'BLOB Text ' * 1024
self.check_data_integrity(
('col1 INT','col2 LONG'),
generator)
def test_TEXT(self):
def generator(row,col):
return self.BLOBUText # 'BLOB Text ' * 1024
self.check_data_integrity(
('col2 TEXT',),
generator)
def test_LONG_BYTE(self):
def generator(row,col):
if col == 0:
return row
else:
return self.BLOBBinary # 'BLOB\000Binary ' * 1024
self.check_data_integrity(
('col1 INT','col2 LONG BYTE'),
generator)
def test_BLOB(self):
def generator(row,col):
if col == 0:
return row
else:
return self.BLOBBinary # 'BLOB\000Binary ' * 1024
self.check_data_integrity(
('col1 INT','col2 BLOB'),
generator)
| 34.234483 | 102 | 0.535153 |
f75c476988a516fd655e9ed8d719f2bf87767ff1 | 2,931 | py | Python | cumulusci/core/tests/test_salesforce_locators.py | hamedizadpanah-ibm/CumulusCI | eb93723e2da1ca66a7639b3197e6fab02d1bd24a | [
"BSD-3-Clause"
] | 1 | 2020-08-08T03:55:21.000Z | 2020-08-08T03:55:21.000Z | cumulusci/core/tests/test_salesforce_locators.py | Julian88Tex/CumulusCI | 82d5fab71b61fbab53c1b5fc6001452fa3f97da8 | [
"BSD-3-Clause"
] | null | null | null | cumulusci/core/tests/test_salesforce_locators.py | Julian88Tex/CumulusCI | 82d5fab71b61fbab53c1b5fc6001452fa3f97da8 | [
"BSD-3-Clause"
] | null | null | null | import unittest
from robot.libraries.BuiltIn import RobotNotRunningError
from cumulusci.robotframework.Salesforce import Salesforce
from unittest import mock
# FIXME: we shouldn't have to tweak these tests for every
# version. The tests should be smarter.
class TestLocators(unittest.TestCase):
@mock.patch("cumulusci.robotframework.Salesforce.Salesforce.get_latest_api_version")
def test_locators_in_robot_context(self, get_latest_api_version):
"""Verify we can get locators for the current org api version"""
get_latest_api_version.return_value = 49.0
# This instantiates the robot library, mimicking a robot library import.
# We've mocked out the code that would otherwise throw an error since
# we're not running in the context of a robot test. The library should
# return the latest version of the locators.
sf = Salesforce()
expected = "cumulusci.robotframework.locators_49"
actual = sf.locators_module.__name__
message = "expected to load '{}', actually loaded '{}'".format(expected, actual)
self.assertEqual(expected, actual, message)
pass
@mock.patch(
"robot.libraries.BuiltIn.BuiltIn.get_library_instance",
side_effect=RobotNotRunningError(),
)
def test_locators_outside_robot_context(self, builtin_mock):
"""Verify that we get the latest locators if not running in the context of a robot test"""
# This instantiates the robot library, mimicing a robot library import
# however, because we've mocked get_library_instance to throw an error,
# we expect the library to still be instantiated, but with the latest
# version of the locators.
sf = Salesforce()
expected = "cumulusci.robotframework.locators_49"
actual = sf.locators_module.__name__
message = "expected to load '{}', actually loaded '{}'".format(expected, actual)
self.assertEqual(expected, actual, message)
def test_locators_49(self):
"""Verify that locators_49 is a superset of the locators_48
This test is far from perfect, but it should at least flag a
catastrophic error in how locators for a version that augments
the locators from previous versions.
Note: this test assumes that locators_49 doesn't delete any of the
keys from 48.
"""
import cumulusci.robotframework.locators_48 as locators_48
import cumulusci.robotframework.locators_49 as locators_49
keys_48 = set(locators_48.lex_locators)
keys_49 = set(locators_49.lex_locators)
self.assertNotEqual(
id(locators_48.lex_locators),
id(locators_49.lex_locators),
"locators_48.lex_locators and locators_49.lex_locators are the same object",
)
self.assertTrue(len(keys_48) > 0)
self.assertTrue(keys_48.issubset(keys_49))
| 42.478261 | 98 | 0.701808 |
f75c495aaa1058037ba191ae4c2bb90de8f0df1b | 1,285 | py | Python | sdk/cognitiveservices/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/category_py3.py | pjquirk/azure-sdk-for-python | cbf02ec4f177b96eae1dbbba87c34c2c93880150 | [
"MIT"
] | 1 | 2021-09-07T18:36:04.000Z | 2021-09-07T18:36:04.000Z | sdk/cognitiveservices/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/category_py3.py | pjquirk/azure-sdk-for-python | cbf02ec4f177b96eae1dbbba87c34c2c93880150 | [
"MIT"
] | 2 | 2019-10-02T23:37:38.000Z | 2020-10-02T01:17:31.000Z | azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/category_py3.py | xiafu-msft/azure-sdk-for-python | 4d9560cfd519ee60667f3cc2f5295a58c18625db | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Category(Model):
"""An object describing identified category.
:param name: Name of the category.
:type name: str
:param score: Scoring of the category.
:type score: float
:param detail: Details of the identified category.
:type detail:
~azure.cognitiveservices.vision.computervision.models.CategoryDetail
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'score': {'key': 'score', 'type': 'float'},
'detail': {'key': 'detail', 'type': 'CategoryDetail'},
}
def __init__(self, *, name: str=None, score: float=None, detail=None, **kwargs) -> None:
super(Category, self).__init__(**kwargs)
self.name = name
self.score = score
self.detail = detail
| 33.815789 | 92 | 0.582101 |
f75c5b41a23be600e5e460faf0704a5278bd21c4 | 10,921 | py | Python | indico/core/db/sqlalchemy/locations.py | antzshrek/indico | 25bedccf4b648d85dcd4b44d710726f8c85e3731 | [
"MIT"
] | null | null | null | indico/core/db/sqlalchemy/locations.py | antzshrek/indico | 25bedccf4b648d85dcd4b44d710726f8c85e3731 | [
"MIT"
] | null | null | null | indico/core/db/sqlalchemy/locations.py | antzshrek/indico | 25bedccf4b648d85dcd4b44d710726f8c85e3731 | [
"MIT"
] | null | null | null | # This file is part of Indico.
# Copyright (C) 2002 - 2020 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
from sqlalchemy.event import listens_for
from sqlalchemy.ext.declarative import declared_attr
from indico.core.db import db
from indico.util.decorators import strict_classproperty
class LocationMixin(object):
"""Mixin to store location information in a model.
A location in this context can be either a reference to a room in
the roombooking module or a room and location name.
In case the location is inherited, the `location_parent` property
is used to determine the parent object from which the location is
inherited (which may also inherit its location).
"""
#: The name of the backref added to the `Room` model for items
#: which are associated with that room.
location_backref_name = None
#: Whether the item can inherit its location from a parent. If
#: this is ``False``, `location_parent` should not be overridden.
allow_location_inheritance = True
@strict_classproperty
@classmethod
def __auto_table_args(cls):
checks = [db.CheckConstraint("(room_id IS NULL) OR (venue_name = '' AND room_name = '')",
'no_custom_location_if_room'),
db.CheckConstraint("(venue_id IS NULL) OR (venue_name = '')",
'no_venue_name_if_venue_id'),
db.CheckConstraint("(room_id IS NULL) OR (venue_id IS NOT NULL)",
'venue_id_if_room_id')]
if cls.allow_location_inheritance:
checks.append(db.CheckConstraint("NOT inherit_location OR (venue_id IS NULL AND room_id IS NULL AND "
"venue_name = '' AND room_name = '' AND address = '')",
'inherited_location'))
fkeys = [db.ForeignKeyConstraint(['venue_id', 'room_id'],
['roombooking.rooms.location_id', 'roombooking.rooms.id'])]
return tuple(checks) + tuple(fkeys)
@classmethod
def register_location_events(cls):
"""Registers sqlalchemy events needed by this mixin.
Call this method after the definition of a model which uses
this mixin class.
"""
@listens_for(cls.own_venue, 'set')
def _venue_changed(target, value, *unused):
if value is not None:
target.own_venue_name = ''
@listens_for(cls.own_room, 'set')
def _room_changed(target, value, *unused):
if value is not None:
target.own_room_name = ''
target.own_venue = value.location
@property
def location_parent(self):
"""The parent object to consult if the location is inherited."""
if not self.allow_location_inheritance:
return None
raise NotImplementedError
@declared_attr
def inherit_location(cls):
if cls.allow_location_inheritance:
return db.Column(
db.Boolean,
nullable=False,
default=True
)
else:
return False
@declared_attr
def own_room_id(cls):
return db.Column(
'room_id',
db.Integer,
db.ForeignKey('roombooking.rooms.id'),
nullable=True,
index=True
)
@declared_attr
def own_venue_id(cls):
return db.Column(
'venue_id',
db.Integer,
db.ForeignKey('roombooking.locations.id'),
nullable=True,
index=True
)
@declared_attr
def own_venue_name(cls):
return db.Column(
'venue_name',
db.String,
nullable=False,
default=''
)
@declared_attr
def own_room_name(cls):
return db.Column(
'room_name',
db.String,
nullable=False,
default=''
)
@declared_attr
def own_address(cls):
return db.Column(
'address',
db.Text,
nullable=False,
default=''
)
@declared_attr
def own_venue(cls):
return db.relationship(
'Location',
foreign_keys=[cls.own_venue_id],
lazy=True,
backref=db.backref(
cls.location_backref_name,
lazy='dynamic'
)
)
@declared_attr
def own_room(cls):
return db.relationship(
'Room',
foreign_keys=[cls.own_room_id],
lazy=True,
backref=db.backref(
cls.location_backref_name,
lazy='dynamic'
)
)
@property
def venue(self):
"""The venue (Location) where this item is located.
This is ``None`` if a custom venue name was entered.
"""
if self.inherit_location and self.location_parent is None:
return None
return self.own_venue if not self.inherit_location else self.location_parent.venue
@venue.setter
def venue(self, venue):
self.own_venue = venue
@property
def room(self):
"""The Room where this item is located.
This is ``None`` if a custom room name was entered.
"""
if self.inherit_location and self.location_parent is None:
return None
return self.own_room if not self.inherit_location else self.location_parent.room
@room.setter
def room(self, room):
self.own_room = room
@property
def venue_name(self):
"""The name of the location where this item is located."""
if self.inherit_location and self.location_parent is None:
return ''
venue = self.venue
if venue is not None:
return venue.name
return self.own_venue_name if not self.inherit_location else self.location_parent.venue_name
@venue_name.setter
def venue_name(self, venue_name):
self.own_venue_name = venue_name
def get_room_name(self, full=True, verbose=False):
"""The name of the room where this item is located.
If both ``full`` and ``verbose`` are set to ``False``, the
"friendly name" will be returned in that case. Both ``full`` and
``verbose`` cannot be set to ``True``.
:param full: If the room has a "friendly name" (e.g. 'Main
Amphitheatre'), a composite name will be returned.
:param verbose: The `verbose_name` of the room will be returned.
"""
assert sum([full, verbose]) <= 1
if self.inherit_location and self.location_parent is None:
return ''
room = self.room
if room is not None:
if full:
return room.full_name
elif verbose and room.verbose_name:
return room.verbose_name
else:
return room.name
return (self.own_room_name if not self.inherit_location
else self.location_parent.get_room_name(full=full, verbose=verbose))
@property
def room_name(self):
"""The name of the room where this item is located."""
return self.get_room_name(full=True)
@room_name.setter
def room_name(self, room_name):
self.own_room_name = room_name
@property
def has_location_info(self):
"""Whether the object has basic location information set"""
return bool(self.venue_name or self.room_name)
@property
def address(self):
"""The address where this item is located."""
if self.inherit_location and self.location_parent is None:
return ''
return self.own_address if not self.inherit_location else self.location_parent.address
@address.setter
def address(self, address):
self.own_address = address
@property
def location_data(self):
"""All location data for the item.
Returns a dict containing ``source``, ``inheriting``, ``room``,
``room_name``, ``venue_name`` and ``address``. The
``source`` is the object the location data is taken from, i.e.
either the item itself or the object the location data is
inherited from.
"""
data_source = self
while data_source and data_source.inherit_location:
data_source = data_source.location_parent
if data_source is None:
return {'source': None, 'venue': None, 'room': None, 'room_name': '', 'venue_name': '', 'address': '',
'inheriting': False}
else:
return {'source': data_source, 'venue': data_source.venue, 'room': data_source.room,
'room_name': data_source.room_name, 'venue_name': data_source.venue_name,
'address': data_source.address, 'inheriting': self.inherit_location}
@location_data.setter
def location_data(self, data):
self.inherit_location = data['inheriting']
self.venue_name = ''
self.room_name = ''
if self.inherit_location:
self.room = None
self.venue = None
self.address = ''
else:
self.room = data.get('room')
self.venue = data.get('venue')
self.address = data.get('address', '')
if not self.room:
self.room_name = data.get('room_name', '')
if not self.venue:
self.venue_name = data.get('venue_name', '')
@property
def widget_location_data(self):
"""All location data for the item, meant to be used in the location
widget.
"""
location_data = self.location_data
return {
'address': location_data['address'],
'room_name': location_data['room_name'],
'room_id': location_data['room'].id if location_data['room'] else '',
'venue_name': location_data['venue_name'],
'venue_id': location_data['venue'].id if location_data['venue'] else '',
}
def get_inherited_widget_location_data(self, init_inheritance):
"""Determine whether to return the object's location or the parent's.
If the object inherits its location, then the location source object is
the object's parent, so return the source's location. If the object
doesn't inherit its location, then the location source object is the
object itself, so return the source's parent location.
"""
return (self.location_parent.widget_location_data if not init_inheritance and self.location_parent
else self.widget_location_data)
| 34.669841 | 114 | 0.594543 |
f75c8902eda9e3eee7a9dfc2b8a317da035d897c | 13,629 | py | Python | 3DCNN.py | YLFF/2004P_Pytorch-Networks | 2d84fe1d904b17d0c55aa2a7a7dba82dea3dae05 | [
"MIT"
] | 454 | 2019-12-26T15:04:02.000Z | 2020-10-24T13:57:35.000Z | 3DCNN.py | YLFF/2004P_Pytorch-Networks | 2d84fe1d904b17d0c55aa2a7a7dba82dea3dae05 | [
"MIT"
] | 1 | 2020-05-14T06:01:18.000Z | 2020-05-14T06:01:18.000Z | 3DCNN.py | YLFF/2004P_Pytorch-Networks | 2d84fe1d904b17d0c55aa2a7a7dba82dea3dae05 | [
"MIT"
] | 17 | 2019-12-27T12:19:47.000Z | 2020-02-03T07:42:05.000Z | # --------------------------------------------------------------------------- #
# ResNet, CVPR2016 bestpaper, https://arxiv.org/abs/1512.03385
# pytorch implementation by Haiyang Liu (haiyangliu1997@gmail.com)
# --------------------------------------------------------------------------- #
import torch
import torch.nn as nn
import torch.nn.functional as F
from config import cfg
from utils import load_cfg,model_complexity
__all__ = ['ResNet18','ResNet34','ResNet50','ResNet101','ResNet152']
class LambdaLayer(nn.Module):
def __init__(self, lambd):
super(LambdaLayer, self).__init__()
self.lambd = lambd
def forward(self, x):
return self.lambd(x)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self,in_dim,out_dim,stride=1,op="A"):
super(BasicBlock,self).__init__()
self.subconv_1 = nn.Sequential(
nn.Conv2d(in_dim,out_dim,3,stride,1,bias=False),
nn.BatchNorm2d(out_dim),
nn.ReLU(inplace=True),)
self.subconv_2 = nn.Sequential(
nn.Conv2d(out_dim,out_dim,3,1,1,bias=False),
nn.BatchNorm2d(out_dim))
if in_dim == out_dim and stride == 1:
self.downsample = nn.Sequential()
elif op == 'A':
self.downsample =LambdaLayer(lambda x: F.pad(x[:, :, ::2, ::2], (0, 0, 0, 0, out_dim//4, out_dim//4), "constant", 0))
else:
self.downsample = nn.Sequential(
nn.Conv2d(in_dim,out_dim,1,stride,0,bias=False),
nn.BatchNorm2d(out_dim),
)
def forward(self,input_):
x_0 = self.subconv_1(input_)
x_1 = self.subconv_2(x_0)
x_input = self.downsample(input_)
x_final = F.relu(x_input + x_1,inplace=True)
return x_final
class BottleNeck(nn.Module):
expansion = 4
def __init__(self,in_dim,out_dim,stride=1):
super(BottleNeck,self).__init__()
self.subconv_1 = nn.Sequential(
nn.Conv2d(in_dim,int(out_dim/self.expansion),1,stride,0,bias=False),
nn.BatchNorm2d(int(out_dim/self.expansion)),
nn.ReLU(inplace=True),)
self.subconv_2 = nn.Sequential(
nn.Conv2d(int(out_dim/self.expansion),
int(out_dim/self.expansion),3,1,1,bias=False),
nn.BatchNorm2d(int(out_dim/self.expansion)),
nn.ReLU(inplace=True),)
self.subconv_3 = nn.Sequential(
nn.Conv2d(int(out_dim/self.expansion),out_dim,1,1,0,bias=False),
nn.BatchNorm2d(out_dim),)
if in_dim == out_dim and stride == 1:
self.downsample = None
else:
self.downsample = nn.Sequential(
nn.Conv2d(in_dim,out_dim,1,stride,0,bias=False),
nn.BatchNorm2d(out_dim),
)
def forward(self,input_):
x_input = input_
x_0 = self.subconv_1(input_)
x_1 = self.subconv_2(x_0)
x_2 = self.subconv_3(x_1)
if self.downsample is not None:
x_input = self.downsample(input_)
print(x_input.shape)
x_final = F.relu(x_input+x_2,inplace=True)
return x_final
class ResNet(nn.Module):
def __init__(self, cfg, logger):
'''
block, BLOCK_LIST, in_dim,
class_num, BASE=64, use_fc=True, CONV1=(7,2,3),
MAX_POOL=True, pretrained=False
'''
super(ResNet,self).__init__()
self.head_conv = nn.Sequential(
nn.Conv2d(cfg.IN_DIM,cfg.BASE,cfg.CONV1[0],cfg.CONV1[1],cfg.CONV1[2],bias=False),
nn.BatchNorm2d(cfg.BASE),
nn.ReLU(inplace=True),)
if cfg.MAX_POOL:
self.maxpool_1 = nn.MaxPool2d(3,2,1)
else:
self.maxpool_1 = nn.Sequential()
block = BottleNeck if cfg.BLOCK == 'bottleneck' else BasicBlock
b_ = block.expansion
self.layer_1 = self._make_layer(block,cfg.BASE,cfg.BASE*b_,cfg.BLOCK_LIST[0],1)
self.layer_2 = self._make_layer(block,cfg.BASE*b_,cfg.BASE*2*b_,cfg.BLOCK_LIST[1],2)
self.layer_3 = self._make_layer(block,cfg.BASE*2*b_,cfg.BASE*4*b_,cfg.BLOCK_LIST[2],2)
self.layer_4 = self._make_layer(block,cfg.BASE*4*b_,cfg.BASE*8*b_,cfg.BLOCK_LIST[3],2)
final_feature = cfg.BASE*4*b_ if cfg.BLOCK_LIST[3] == 0 else cfg.BASE*8*b_
if cfg.USE_FC:
self.avgpool_1 = nn.AdaptiveAvgPool2d((1,1))
self.fc_1 = nn.Sequential(
nn.Flatten(),
nn.Linear(final_feature,cfg.CLASS_NUM),)
else:
self.avgpool_1 = nn.Sequential()
self.fc_1 = nn.Sequential()
self.logger = logger
self.pretrained = cfg.PRETRAINED
self._initialization()
def _initialization(self):
if self.pretrained is not False:
self.modules.load_state_dict(model_zoo.load_url(model_urls[self.pretrained]))
#TODO(liu):check it correct or not.
else:
for name, sub_module in self.named_modules():
if isinstance(sub_module, nn.Conv2d) or isinstance(sub_module, nn.ConvTranspose2d) or \
isinstance(sub_module, nn.Linear):
nn.init.kaiming_normal_(sub_module.weight)
# nn.init.kaiming_normal_(sub_module.weight,mode='fan_out'
# ,nonlinearity='relu')
if self.logger is not None:
self.logger.info('init {}.weight as kaiming_normal_'.format(name))
if sub_module.bias is not None:
nn.init.constant_(sub_module.bias, 0.0)
if self.logger is not None:
self.logger.info('init {}.bias as 0'.format(name))
# elif isinstance(sub_module, nn.BatchNorm2d):
# nn.init.constant_(sub_module.weight,1)
# nn.init.constant_(sub_module.bias,0)
# if self.logger is not None:
# self.logger.info('init {}.weight as constant_ 1'.format(name))
# self.logger.info('init {}.bias as constant_ 0'.format(name))
def _make_layer(self,block,in_dim,out_dim,layer_num,stride):
net_layers = []
if layer_num == 0:
return nn.Sequential()
else:
for layer in range(layer_num):
if layer == 0:
net_layers.append(block(in_dim,out_dim,stride))
else:
net_layers.append(block(out_dim,out_dim,1))
return nn.Sequential(*net_layers)
def forward(self,input_):
x = self.head_conv(input_)
x = self.maxpool_1(x)
x = self.layer_1(x)
x = self.layer_2(x)
x = self.layer_3(x)
x = self.layer_4(x)
x = self.avgpool_1(x)
x = self.fc_1(x)
return x
class ThreeDCNN(nn.Module):
def __init__(self,cfg,logger):
super(ThreeDCNN,self).__init__()
self.res1 = ResNet(cfg,logger)
self.res2 = ResNet(cfg,logger)
self.res3 = ResNet(cfg,logger)
self.getheatmap_1 = nn.Conv2d(128,19,1,1,0)
self.getheatmap_2 = nn.Conv2d(128,19,1,1,0)
self.getheatmap_3 = nn.Conv2d(128,19,1,1,0)
self.getdepth_1 = nn.Conv2d(128,1,1,1,0)
self.getdepth_2 = nn.Conv2d(128,1,1,1,0)
self.getdepth_3 = nn.Conv2d(128,1,1,1,0)
self.tdcnn1 = nn.Conv3d(19,128,3,1,1)#b,in,d,h,w,
self.tdcnn2 = nn.Conv3d(128,128,3,1,1)
self.maxpool3d_1 = nn.MaxPool3d(3,1,0)
self.tdcnn3 = nn.Conv3d(128,128,3,1,1)
self.tdcnn331 = nn.Conv3d(128,128,3,1,1)
self.tdcnn332 = nn.Conv3d(128,128,3,1,1)
self.tdcnn333 = nn.Conv3d(128,128,3,1,1)
self.tdcnn334 = nn.Conv3d(128,128,3,1,1)
self.tdcnn335 = nn.Conv3d(128,128,3,1,1)
self.tdcnn336 = nn.Conv3d(128,128,3,1,1)
self.tdcnn337= nn.Conv3d(128,128,3,1,1)
self.tdcnn338 = nn.Conv3d(128,128,3,1,1)
self.tdcnn339 = nn.Conv3d(128,128,3,1,1)
self.tdcnn3310 = nn.Conv3d(128,128,3,1,1)
self.tdcnn4 = nn.Conv3d(128,128,3,1,1)
self.tdcnn5 = nn.Conv3d(128,19,3,1,1)
self.tdcnn6 = nn.Conv3d(1,128,3,1,1)#b,in,d,h,w,
self.tdcnn7 = nn.Conv3d(128,128,3,1,1)
self.maxpool3d_2 = nn.MaxPool3d(3,1,0)
self.tdcnn8 = nn.Conv3d(128,128,3,1,1)
self.tdcnn88 = nn.Conv3d(128,128,3,1,1)
self.tdcnn9 = nn.Conv3d(128,128,3,1,1)
self.tdcnn10 = nn.Conv3d(128,1,3,1,1)
def forward(self,x):
x1 = x[:,0,:,:,:]
x2 = x[:,1,:,:,:]
x3 = x[:,2,:,:,:]
output1 = self.res1(x1)
output2 = self.res2(x2)
output3 = self.res3(x3)
#print(output1.shape)
de_output1 = self.getdepth_1(output1)
de_output2 = self.getdepth_2(output2)
de_output3 = self.getdepth_3(output3)
he_output1 = self.getheatmap_1(output1)#(b,19,h,w)
he_output2 = self.getheatmap_2(output2)
he_output3 = self.getheatmap_3(output3)
he_3d = torch.cat((he_output1.unsqueeze(2),
he_output2.unsqueeze(2),
he_output3.unsqueeze(2)),dim=2)#(b,19,3,h,w)
de_3d = torch.cat((de_output1.unsqueeze(2),
de_output2.unsqueeze(2),
de_output3.unsqueeze(2)),dim=2)
he_3d = self.tdcnn1(he_3d)
he_3d = self.tdcnn2(he_3d)
he_3d = self.maxpool3d_1(he_3d)
he_3d = self.tdcnn3(he_3d)
he_3d = self.tdcnn331(he_3d)
he_3d = self.tdcnn332(he_3d)
he_3d = self.tdcnn333(he_3d)
he_3d = self.tdcnn334(he_3d)
he_3d = self.tdcnn335(he_3d)
he_3d = self.tdcnn336(he_3d)
he_3d = self.tdcnn337(he_3d)
he_3d = self.tdcnn338(he_3d)
he_3d = self.tdcnn339(he_3d)
he_3d = self.tdcnn3310(he_3d)
he_3d = self.tdcnn4(he_3d)
he_3d = self.tdcnn5(he_3d)
de_3d = self.tdcnn6(de_3d)
de_3d = self.tdcnn7(de_3d)
de_3d = self.maxpool3d_2(de_3d)
de_3d = self.tdcnn8(de_3d)
de_3d = self.tdcnn88(de_3d)
de_3d = self.tdcnn9(de_3d)
de_3d = self.tdcnn10(de_3d)
return de_3d, he_3d
if __name__ == "__main__":
logger = load_cfg(cfg)
model = ThreeDCNN(cfg.MODEL,logger).cuda()
from ptflops import get_model_complexity_info
flops, params = get_model_complexity_info(model, (3,3,368,368),
as_strings=True, print_per_layer_stat=True)
logger.info('{:<30} {:<8}'.format('Computational complexity: ', flops))
logger.info('{:<30} {:<8}'.format('Number of parameters: ', params))
fakeinput = torch.ones((8,3,3,368,368)).cuda()
output = model(fakeinput)
mem = torch.cuda.memory_cached() / 1E9
print(mem)
# ------------------------------- mistakes ---------------------------------- #
# downsample also need add batchnorm
# add first, then relu
# add input, not first conv output.
# no bias for all conv layers
# when using /, need add int()
# usually we use fin_in for LeCun and he init, here we use fan_out
# ---------------------------------- end ------------------------------------ #
# ---------------------------------- notes ---------------------------------- #
# main idea: short cut connection
# parameters: 2.5M Res50, 6M Res152, 1.1M Res20, BN+ReLU
# sgd+momentum 1e-1 0.9 divide 10 * 3
# batch size 256
# weight decay 1e-4
# input: resize and crop samll side to 256×256 then augment to 224
# output: linear 1000 + softmax
# TODO: Check details in training,testing. bn-relu-conv?
# TODO: Training check: False
# ---------------------------------- end ------------------------------------ #
# ------------------------- resnet18 model summary -------------------------- #
# Layer (type) Output Shape Param #
# ================================================================
# Conv2d-1 [-1, 64, 112, 112] 9,408
# BatchNorm2d-2 [-1, 64, 112, 112] 128
# ReLU-3 [-1, 64, 112, 112] 0
# MaxPool2d-4 [-1, 64, 56, 56] 0
# Conv2d-5 [-1, 64, 56, 56] 36,864
# BatchNorm2d-6 [-1, 64, 56, 56] 128
# ReLU-7 [-1, 64, 56, 56] 0
# Conv2d-8 [-1, 64, 56, 56] 36,864
# ...
# BatchNorm2d-54 [-1, 512, 7, 7] 1,024
# ReLU-55 [-1, 512, 7, 7] 0
# Conv2d-56 [-1, 512, 7, 7] 2,359,296
# BatchNorm2d-57 [-1, 512, 7, 7] 1,024
# BasicBlock-58 [-1, 512, 7, 7] 0
# AdaptiveAvgPool2d-59 [-1, 512, 1, 1] 0
# Flatten-60 [-1, 512] 0
# Linear-61 [-1, 1000] 513,000
# Softmax-62 [-1, 1000] 0
# ================================================================
# Total params: 11,689,512
# Trainable params: 11,689,512
# Non-trainable params: 0
# ----------------------------------------------------------------
# Input size (MB): 0.57
# Forward/backward pass size (MB): 57.06
# Params size (MB): 44.59
# Estimated Total Size (MB): 102.23
# ---------------------------------- end ------------------------------------ # | 38.069832 | 129 | 0.525424 |
f75ce26d914323394f3cea7ed8f74f28ce6f1602 | 906 | py | Python | racing_data/meet.py | predictive-punter/racing_data | 0d1b0ad0fe7591ce859d528af719349c0c7534d3 | [
"MIT"
] | 15 | 2017-04-08T05:22:49.000Z | 2021-04-20T17:33:22.000Z | racing_data/meet.py | phillc73/racing_data | 0d1b0ad0fe7591ce859d528af719349c0c7534d3 | [
"MIT"
] | 54 | 2016-07-21T10:35:45.000Z | 2016-07-30T23:06:50.000Z | racing_data/meet.py | phillc73/racing_data | 0d1b0ad0fe7591ce859d528af719349c0c7534d3 | [
"MIT"
] | 7 | 2016-12-15T06:02:54.000Z | 2020-04-20T15:32:55.000Z | from . import Entity
class Meet(Entity):
"""A meet represents a collection of races occurring at a given track on a given date"""
def __str__(self):
return '{track} on {date:%Y-%m-%d}'.format(track=self['track'], date=self['date'].astimezone(self.provider.local_timezone))
@property
def has_expired(self):
"""Expire meets that were last updated prior to their actual date"""
return self['updated_at'] < self['date'] or super(Meet, self).has_expired
@property
def races(self):
"""Return a list of races occurring at this meet"""
return self.get_cached_property('races', self.provider.get_races_by_meet, self)
def is_equivalent_to(self, other_meet):
"""This meet is equivalent to other_meet if both have the same date and track"""
return self['date'] == other_meet['date'] and self['track'] == other_meet['track']
| 33.555556 | 131 | 0.66777 |
f75cf9bcb6eb50703ecaf3f3d3de676a716e4588 | 38,508 | py | Python | adafruit_circuitpython_libs/adafruit-circuitpython-bundle-py-20210214/lib/adafruit_bno08x/__init__.py | jacoblb64/pico_rgb_keypad_hid | 3251ca6a98ef86d9f98c54f639c4d61810601a0b | [
"MIT"
] | 47 | 2021-02-15T23:02:36.000Z | 2022-03-04T21:30:03.000Z | adafruit_circuitpython_libs/adafruit-circuitpython-bundle-py-20210214/lib/adafruit_bno08x/__init__.py | jacoblb64/pico_rgb_keypad_hid | 3251ca6a98ef86d9f98c54f639c4d61810601a0b | [
"MIT"
] | 7 | 2021-02-19T20:00:08.000Z | 2022-01-14T10:51:12.000Z | adafruit_circuitpython_libs/adafruit-circuitpython-bundle-py-20210214/lib/adafruit_bno08x/__init__.py | jacoblb64/pico_rgb_keypad_hid | 3251ca6a98ef86d9f98c54f639c4d61810601a0b | [
"MIT"
] | 14 | 2021-02-20T17:40:56.000Z | 2022-01-01T19:53:38.000Z | # pylint:disable=too-many-lines
# SPDX-FileCopyrightText: Copyright (c) 2020 Bryan Siepert for Adafruit Industries
#
# SPDX-License-Identifier: MIT
"""
`adafruit_bno08x`
================================================================================
Helper library for the Hillcrest Laboratories BNO08x IMUs
* Author(s): Bryan Siepert
Implementation Notes
--------------------
**Hardware:**
* `Adafruit BNO08x Breakout <https:www.adafruit.com/products/4754>`_
**Software and Dependencies:**
* Adafruit CircuitPython firmware for the supported boards:
https:# github.com/adafruit/circuitpython/releases
* `Adafruit's Bus Device library <https:# github.com/adafruit/Adafruit_CircuitPython_BusDevice>`_
"""
__version__ = "1.0.4"
__repo__ = "https:# github.com/adafruit/Adafruit_CircuitPython_BNO08x.git"
from struct import unpack_from, pack_into
from collections import namedtuple
import time
from micropython import const
# TODO: Remove on release
from .debug import channels, reports
# TODO: shorten names
# Channel 0: the SHTP command channel
BNO_CHANNEL_SHTP_COMMAND = const(0)
BNO_CHANNEL_EXE = const(1)
_BNO_CHANNEL_CONTROL = const(2)
_BNO_CHANNEL_INPUT_SENSOR_REPORTS = const(3)
_BNO_CHANNEL_WAKE_INPUT_SENSOR_REPORTS = const(4)
_BNO_CHANNEL_GYRO_ROTATION_VECTOR = const(5)
_GET_FEATURE_REQUEST = const(0xFE)
_SET_FEATURE_COMMAND = const(0xFD)
_GET_FEATURE_RESPONSE = const(0xFC)
_BASE_TIMESTAMP = const(0xFB)
_TIMESTAMP_REBASE = const(0xFA)
_SHTP_REPORT_PRODUCT_ID_RESPONSE = const(0xF8)
_SHTP_REPORT_PRODUCT_ID_REQUEST = const(0xF9)
_FRS_WRITE_REQUEST = const(0xF7)
_FRS_WRITE_DATA = const(0xF6)
_FRS_WRITE_RESPONSE = const(0xF5)
_FRS_READ_REQUEST = const(0xF4)
_FRS_READ_RESPONSE = const(0xF3)
_COMMAND_REQUEST = const(0xF2)
_COMMAND_RESPONSE = const(0xF1)
# DCD/ ME Calibration commands and sub-commands
_SAVE_DCD = const(0x6)
_ME_CALIBRATE = const(0x7)
_ME_CAL_CONFIG = const(0x00)
_ME_GET_CAL = const(0x01)
# Calibrated Acceleration (m/s2)
BNO_REPORT_ACCELEROMETER = const(0x01)
# Calibrated gyroscope (rad/s).
BNO_REPORT_GYROSCOPE = const(0x02)
# Magnetic field calibrated (in µTesla). The fully calibrated magnetic field measurement.
BNO_REPORT_MAGNETOMETER = const(0x03)
# Linear acceleration (m/s2). Acceleration of the device with gravity removed
BNO_REPORT_LINEAR_ACCELERATION = const(0x04)
# Rotation Vector
BNO_REPORT_ROTATION_VECTOR = const(0x05)
BNO_REPORT_GAME_ROTATION_VECTOR = const(0x08)
BNO_REPORT_GEOMAGNETIC_ROTATION_VECTOR = const(0x09)
BNO_REPORT_STEP_COUNTER = const(0x11)
BNO_REPORT_RAW_ACCELEROMETER = const(0x14)
BNO_REPORT_RAW_GYROSCOPE = const(0x15)
BNO_REPORT_RAW_MAGNETOMETER = const(0x16)
BNO_REPORT_SHAKE_DETECTOR = const(0x19)
BNO_REPORT_STABILITY_CLASSIFIER = const(0x13)
BNO_REPORT_ACTIVITY_CLASSIFIER = const(0x1E)
BNO_REPORT_GYRO_INTEGRATED_ROTATION_VECTOR = const(0x2A)
# TODOz:
# Calibrated Acceleration (m/s2)
# Euler Angles (in degrees?)
# CALIBRATION
# RAW ACCEL, MAG, GYRO # Sfe says each needs the non-raw enabled to work
_DEFAULT_REPORT_INTERVAL = const(50000) # in microseconds = 50ms
_QUAT_READ_TIMEOUT = 0.500 # timeout in seconds
_PACKET_READ_TIMEOUT = 2.000 # timeout in seconds
_FEATURE_ENABLE_TIMEOUT = 2.0
_DEFAULT_TIMEOUT = 2.0
_BNO08X_CMD_RESET = const(0x01)
_QUAT_Q_POINT = const(14)
_BNO_HEADER_LEN = const(4)
_Q_POINT_14_SCALAR = 2 ** (14 * -1)
_Q_POINT_12_SCALAR = 2 ** (12 * -1)
# _Q_POINT_10_SCALAR = 2 ** (10 * -1)
_Q_POINT_9_SCALAR = 2 ** (9 * -1)
_Q_POINT_8_SCALAR = 2 ** (8 * -1)
_Q_POINT_4_SCALAR = 2 ** (4 * -1)
_GYRO_SCALAR = _Q_POINT_9_SCALAR
_ACCEL_SCALAR = _Q_POINT_8_SCALAR
_QUAT_SCALAR = _Q_POINT_14_SCALAR
_GEO_QUAT_SCALAR = _Q_POINT_12_SCALAR
_MAG_SCALAR = _Q_POINT_4_SCALAR
_REPORT_LENGTHS = {
_SHTP_REPORT_PRODUCT_ID_RESPONSE: 16,
_GET_FEATURE_RESPONSE: 17,
_COMMAND_RESPONSE: 16,
_SHTP_REPORT_PRODUCT_ID_RESPONSE: 16,
_BASE_TIMESTAMP: 5,
_TIMESTAMP_REBASE: 5,
}
# these raw reports require their counterpart to be enabled
_RAW_REPORTS = {
BNO_REPORT_RAW_ACCELEROMETER: BNO_REPORT_ACCELEROMETER,
BNO_REPORT_RAW_GYROSCOPE: BNO_REPORT_GYROSCOPE,
BNO_REPORT_RAW_MAGNETOMETER: BNO_REPORT_MAGNETOMETER,
}
_AVAIL_SENSOR_REPORTS = {
BNO_REPORT_ACCELEROMETER: (_Q_POINT_8_SCALAR, 3, 10),
BNO_REPORT_GYROSCOPE: (_Q_POINT_9_SCALAR, 3, 10),
BNO_REPORT_MAGNETOMETER: (_Q_POINT_4_SCALAR, 3, 10),
BNO_REPORT_LINEAR_ACCELERATION: (_Q_POINT_8_SCALAR, 3, 10),
BNO_REPORT_ROTATION_VECTOR: (_Q_POINT_14_SCALAR, 4, 14),
BNO_REPORT_GEOMAGNETIC_ROTATION_VECTOR: (_Q_POINT_12_SCALAR, 4, 14),
BNO_REPORT_GAME_ROTATION_VECTOR: (_Q_POINT_14_SCALAR, 4, 12),
BNO_REPORT_STEP_COUNTER: (1, 1, 12),
BNO_REPORT_SHAKE_DETECTOR: (1, 1, 6),
BNO_REPORT_STABILITY_CLASSIFIER: (1, 1, 6),
BNO_REPORT_ACTIVITY_CLASSIFIER: (1, 1, 16),
BNO_REPORT_RAW_ACCELEROMETER: (1, 3, 16),
BNO_REPORT_RAW_GYROSCOPE: (1, 3, 16),
BNO_REPORT_RAW_MAGNETOMETER: (1, 3, 16),
}
_INITIAL_REPORTS = {
BNO_REPORT_ACTIVITY_CLASSIFIER: {
"Tilting": -1,
"most_likely": "Unknown",
"OnStairs": -1,
"On-Foot": -1,
"Other": -1,
"On-Bicycle": -1,
"Still": -1,
"Walking": -1,
"Unknown": -1,
"Running": -1,
"In-Vehicle": -1,
},
BNO_REPORT_STABILITY_CLASSIFIER: "Unknown",
BNO_REPORT_ROTATION_VECTOR: (0.0, 0.0, 0.0, 0.0),
BNO_REPORT_GAME_ROTATION_VECTOR: (0.0, 0.0, 0.0, 0.0),
BNO_REPORT_GEOMAGNETIC_ROTATION_VECTOR: (0.0, 0.0, 0.0, 0.0),
}
_ENABLED_ACTIVITIES = (
0x1FF # All activities; 1 bit set for each of 8 activities, + Unknown
)
DATA_BUFFER_SIZE = const(512) # data buffer size. obviously eats ram
PacketHeader = namedtuple(
"PacketHeader",
["channel_number", "sequence_number", "data_length", "packet_byte_count",],
)
REPORT_ACCURACY_STATUS = [
"Accuracy Unreliable",
"Low Accuracy",
"Medium Accuracy",
"High Accuracy",
]
class PacketError(Exception):
"""Raised when the packet couldnt be parsed"""
pass # pylint:disable=unnecessary-pass
def _elapsed(start_time):
return time.monotonic() - start_time
############ PACKET PARSING ###########################
def _parse_sensor_report_data(report_bytes):
"""Parses reports with only 16-bit fields"""
data_offset = 4 # this may not always be true
report_id = report_bytes[0]
scalar, count, _report_length = _AVAIL_SENSOR_REPORTS[report_id]
if report_id in _RAW_REPORTS:
# raw reports are unsigned
format_str = "<H"
else:
format_str = "<h"
results = []
accuracy = unpack_from("<B", report_bytes, offset=2)[0]
accuracy &= 0b11
for _offset_idx in range(count):
total_offset = data_offset + (_offset_idx * 2)
raw_data = unpack_from(format_str, report_bytes, offset=total_offset)[0]
scaled_data = raw_data * scalar
results.append(scaled_data)
results_tuple = tuple(results)
return (results_tuple, accuracy)
def _parse_step_couter_report(report_bytes):
return unpack_from("<H", report_bytes, offset=8)[0]
def _parse_stability_classifier_report(report_bytes):
classification_bitfield = unpack_from("<B", report_bytes, offset=4)[0]
return ["Unknown", "On Table", "Stationary", "Stable", "In motion"][
classification_bitfield
]
# report_id
# feature_report_id
# feature_flags
# change_sensitivity
# report_interval
# batch_interval_word
# sensor_specific_configuration_word
def _parse_get_feature_response_report(report_bytes):
return unpack_from("<BBBHIII", report_bytes)
# 0 Report ID = 0x1E
# 1 Sequence number
# 2 Status
# 3 Delay
# 4 Page Number + EOS
# 5 Most likely state
# 6-15 Classification (10 x Page Number) + confidence
def _parse_activity_classifier_report(report_bytes):
activities = [
"Unknown",
"In-Vehicle", # look
"On-Bicycle", # at
"On-Foot", # all
"Still", # this
"Tilting", # room
"Walking", # for
"Running", # activities
"OnStairs",
]
end_and_page_number = unpack_from("<B", report_bytes, offset=4)[0]
# last_page = (end_and_page_number & 0b10000000) > 0
page_number = end_and_page_number & 0x7F
most_likely = unpack_from("<B", report_bytes, offset=5)[0]
confidences = unpack_from("<BBBBBBBBB", report_bytes, offset=6)
classification = {}
classification["most_likely"] = activities[most_likely]
for idx, raw_confidence in enumerate(confidences):
confidence = (10 * page_number) + raw_confidence
activity_string = activities[idx]
classification[activity_string] = confidence
return classification
def _parse_shake_report(report_bytes):
shake_bitfield = unpack_from("<H", report_bytes, offset=4)[0]
return (shake_bitfield & 0x111) > 0
def parse_sensor_id(buffer):
"""Parse the fields of a product id report"""
if not buffer[0] == _SHTP_REPORT_PRODUCT_ID_RESPONSE:
raise AttributeError("Wrong report id for sensor id: %s" % hex(buffer[0]))
sw_major = unpack_from("<B", buffer, offset=2)[0]
sw_minor = unpack_from("<B", buffer, offset=3)[0]
sw_patch = unpack_from("<H", buffer, offset=12)[0]
sw_part_number = unpack_from("<I", buffer, offset=4)[0]
sw_build_number = unpack_from("<I", buffer, offset=8)[0]
return (sw_part_number, sw_major, sw_minor, sw_patch, sw_build_number)
def _parse_command_response(report_bytes):
# CMD response report:
# 0 Report ID = 0xF1
# 1 Sequence number
# 2 Command
# 3 Command sequence number
# 4 Response sequence number
# 5 R0-10 A set of response values. The interpretation of these values is specific
# to the response for each command.
report_body = unpack_from("<BBBBB", report_bytes)
response_values = unpack_from("<BBBBBBBBBBB", report_bytes, offset=5)
return (report_body, response_values)
def _insert_command_request_report(
command, buffer, next_sequence_number, command_params=None
):
if command_params and len(command_params) > 9:
raise AttributeError(
"Command request reports can only have up to 9 arguments but %d were given"
% len(command_params)
)
for _i in range(12):
buffer[_i] = 0
buffer[0] = _COMMAND_REQUEST
buffer[1] = next_sequence_number
buffer[2] = command
if command_params is None:
return
for idx, param in enumerate(command_params):
buffer[3 + idx] = param
def _report_length(report_id):
if report_id < 0xF0: # it's a sensor report
return _AVAIL_SENSOR_REPORTS[report_id][2]
return _REPORT_LENGTHS[report_id]
def _separate_batch(packet, report_slices):
# get first report id, loop up its report length
# read that many bytes, parse them
next_byte_index = 0
while next_byte_index < packet.header.data_length:
report_id = packet.data[next_byte_index]
required_bytes = _report_length(report_id)
unprocessed_byte_count = packet.header.data_length - next_byte_index
# handle incomplete remainder
if unprocessed_byte_count < required_bytes:
raise RuntimeError("Unprocessable Batch bytes", unprocessed_byte_count)
# we have enough bytes to read
# add a slice to the list that was passed in
report_slice = packet.data[next_byte_index : next_byte_index + required_bytes]
report_slices.append([report_slice[0], report_slice])
next_byte_index = next_byte_index + required_bytes
# class Report:
# _buffer = bytearray(DATA_BUFFER_SIZE)
# _report_obj = Report(_buffer)
# @classmethod
# def get_report(cls)
# return cls._report_obj
class Packet:
"""A class representing a Hillcrest LaboratorySensor Hub Transport packet"""
def __init__(self, packet_bytes):
self.header = self.header_from_buffer(packet_bytes)
data_end_index = self.header.data_length + _BNO_HEADER_LEN
self.data = packet_bytes[_BNO_HEADER_LEN:data_end_index]
def __str__(self):
length = self.header.packet_byte_count
outstr = "\n\t\t********** Packet *************\n"
outstr += "DBG::\t\t HEADER:\n"
outstr += "DBG::\t\t Data Len: %d\n" % (self.header.data_length)
outstr += "DBG::\t\t Channel: %s (%d)\n" % (
channels[self.channel_number],
self.channel_number,
)
if self.channel_number in [
_BNO_CHANNEL_CONTROL,
_BNO_CHANNEL_INPUT_SENSOR_REPORTS,
]:
if self.report_id in reports:
outstr += "DBG::\t\t \tReport Type: %s (0x%x)\n" % (
reports[self.report_id],
self.report_id,
)
else:
outstr += "DBG::\t\t \t** UNKNOWN Report Type **: %s\n" % hex(
self.report_id
)
if (
self.report_id > 0xF0
and len(self.data) >= 6
and self.data[5] in reports
):
outstr += "DBG::\t\t \tSensor Report Type: %s(%s)\n" % (
reports[self.data[5]],
hex(self.data[5]),
)
if (
self.report_id == 0xFC
and len(self.data) >= 6
and self.data[1] in reports
):
outstr += "DBG::\t\t \tEnabled Feature: %s(%s)\n" % (
reports[self.data[1]],
hex(self.data[5]),
)
outstr += "DBG::\t\t Sequence number: %s\n" % self.header.sequence_number
outstr += "\n"
outstr += "DBG::\t\t Data:"
for idx, packet_byte in enumerate(self.data[:length]):
packet_index = idx + 4
if (packet_index % 4) == 0:
outstr += "\nDBG::\t\t[0x{:02X}] ".format(packet_index)
outstr += "0x{:02X} ".format(packet_byte)
outstr += "\n"
outstr += "\t\t*******************************\n"
return outstr
@property
def report_id(self):
"""The Packet's Report ID"""
return self.data[0]
@property
def channel_number(self):
"""The packet channel"""
return self.header.channel_number
@classmethod
def header_from_buffer(cls, packet_bytes):
"""Creates a `PacketHeader` object from a given buffer"""
packet_byte_count = unpack_from("<H", packet_bytes)[0]
packet_byte_count &= ~0x8000
channel_number = unpack_from("<B", packet_bytes, offset=2)[0]
sequence_number = unpack_from("<B", packet_bytes, offset=3)[0]
data_length = max(0, packet_byte_count - 4)
header = PacketHeader(
channel_number, sequence_number, data_length, packet_byte_count
)
return header
@classmethod
def is_error(cls, header):
"""Returns True if the header is an error condition"""
if header.channel_number > 5:
return True
if header.packet_byte_count == 0xFFFF and header.sequence_number == 0xFF:
return True
return False
class BNO08X: # pylint: disable=too-many-instance-attributes, too-many-public-methods
"""Library for the BNO08x IMUs from Hillcrest Laboratories
:param ~busio.I2C i2c_bus: The I2C bus the BNO08x is connected to.
"""
def __init__(self, reset=None, debug=False):
self._debug = debug
self._reset = reset
self._dbg("********** __init__ *************")
self._data_buffer = bytearray(DATA_BUFFER_SIZE)
self._command_buffer = bytearray(12)
self._packet_slices = []
# TODO: this is wrong there should be one per channel per direction
self._sequence_number = [0, 0, 0, 0, 0, 0]
self._two_ended_sequence_numbers = {
"send": {}, # holds the next seq number to send with the report id as a key
"receive": {},
}
self._dcd_saved_at = -1
self._me_calibration_started_at = -1
self._calibration_complete = False
self._magnetometer_accuracy = 0
self._wait_for_initialize = True
self._init_complete = False
self._id_read = False
# for saving the most recent reading when decoding several packets
self._readings = {}
self.initialize()
def initialize(self):
"""Initialize the sensor"""
for _ in range(3):
self.hard_reset()
self.soft_reset()
try:
if self._check_id():
break
except: # pylint:disable=bare-except
time.sleep(0.5)
else:
raise RuntimeError("Could not read ID")
@property
def magnetic(self):
"""A tuple of the current magnetic field measurements on the X, Y, and Z axes"""
self._process_available_packets() # decorator?
try:
return self._readings[BNO_REPORT_MAGNETOMETER]
except KeyError:
raise RuntimeError("No magfield report found, is it enabled?") from None
@property
def quaternion(self):
"""A quaternion representing the current rotation vector"""
self._process_available_packets()
try:
return self._readings[BNO_REPORT_ROTATION_VECTOR]
except KeyError:
raise RuntimeError("No quaternion report found, is it enabled?") from None
@property
def geomagnetic_quaternion(self):
"""A quaternion representing the current geomagnetic rotation vector"""
self._process_available_packets()
try:
return self._readings[BNO_REPORT_GEOMAGNETIC_ROTATION_VECTOR]
except KeyError:
raise RuntimeError(
"No geomag quaternion report found, is it enabled?"
) from None
@property
def game_quaternion(self):
"""A quaternion representing the current rotation vector expressed as a quaternion with no
specific reference for heading, while roll and pitch are referenced against gravity. To
prevent sudden jumps in heading due to corrections, the `game_quaternion` property is not
corrected using the magnetometer. Some drift is expected"""
self._process_available_packets()
try:
return self._readings[BNO_REPORT_GAME_ROTATION_VECTOR]
except KeyError:
raise RuntimeError(
"No game quaternion report found, is it enabled?"
) from None
@property
def steps(self):
"""The number of steps detected since the sensor was initialized"""
self._process_available_packets()
try:
return self._readings[BNO_REPORT_STEP_COUNTER]
except KeyError:
raise RuntimeError("No steps report found, is it enabled?") from None
@property
def linear_acceleration(self):
"""A tuple representing the current linear acceleration values on the X, Y, and Z
axes in meters per second squared"""
self._process_available_packets()
try:
return self._readings[BNO_REPORT_LINEAR_ACCELERATION]
except KeyError:
raise RuntimeError("No lin. accel report found, is it enabled?") from None
@property
def acceleration(self):
"""A tuple representing the acceleration measurements on the X, Y, and Z
axes in meters per second squared"""
self._process_available_packets()
try:
return self._readings[BNO_REPORT_ACCELEROMETER]
except KeyError:
raise RuntimeError("No accel report found, is it enabled?") from None
@property
def gyro(self):
"""A tuple representing Gyro's rotation measurements on the X, Y, and Z
axes in radians per second"""
self._process_available_packets()
try:
return self._readings[BNO_REPORT_GYROSCOPE]
except KeyError:
raise RuntimeError("No gyro report found, is it enabled?") from None
@property
def shake(self):
"""True if a shake was detected on any axis since the last time it was checked
This property has a "latching" behavior where once a shake is detected, it will stay in a
"shaken" state until the value is read. This prevents missing shake events but means that
this property is not guaranteed to reflect the shake state at the moment it is read
"""
self._process_available_packets()
try:
shake_detected = self._readings[BNO_REPORT_SHAKE_DETECTOR]
# clear on read
if shake_detected:
self._readings[BNO_REPORT_SHAKE_DETECTOR] = False
return shake_detected
except KeyError:
raise RuntimeError("No shake report found, is it enabled?") from None
@property
def stability_classification(self):
"""Returns the sensor's assessment of it's current stability, one of:
* "Unknown" - The sensor is unable to classify the current stability
* "On Table" - The sensor is at rest on a stable surface with very little vibration
* "Stationary" - The sensor’s motion is below the stable threshold but\
the stable duration requirement has not been met. This output is only available when\
gyro calibration is enabled
* "Stable" - The sensor’s motion has met the stable threshold and duration requirements.
* "In motion" - The sensor is moving.
"""
self._process_available_packets()
try:
stability_classification = self._readings[BNO_REPORT_STABILITY_CLASSIFIER]
return stability_classification
except KeyError:
raise RuntimeError(
"No stability classification report found, is it enabled?"
) from None
@property
def activity_classification(self):
"""Returns the sensor's assessment of the activity that is creating the motions\
that it is sensing, one of:
* "Unknown"
* "In-Vehicle"
* "On-Bicycle"
* "On-Foot"
* "Still"
* "Tilting"
* "Walking"
* "Running"
* "On Stairs"
"""
self._process_available_packets()
try:
activity_classification = self._readings[BNO_REPORT_ACTIVITY_CLASSIFIER]
return activity_classification
except KeyError:
raise RuntimeError(
"No activity classification report found, is it enabled?"
) from None
@property
def raw_acceleration(self):
"""Returns the sensor's raw, unscaled value from the accelerometer registers"""
self._process_available_packets()
try:
raw_acceleration = self._readings[BNO_REPORT_RAW_ACCELEROMETER]
return raw_acceleration
except KeyError:
raise RuntimeError(
"No raw acceleration report found, is it enabled?"
) from None
@property
def raw_gyro(self):
"""Returns the sensor's raw, unscaled value from the gyro registers"""
self._process_available_packets()
try:
raw_gyro = self._readings[BNO_REPORT_RAW_GYROSCOPE]
return raw_gyro
except KeyError:
raise RuntimeError("No raw gyro report found, is it enabled?") from None
@property
def raw_magnetic(self):
"""Returns the sensor's raw, unscaled value from the magnetometer registers"""
self._process_available_packets()
try:
raw_magnetic = self._readings[BNO_REPORT_RAW_MAGNETOMETER]
return raw_magnetic
except KeyError:
raise RuntimeError("No raw magnetic report found, is it enabled?") from None
def begin_calibration(self):
"""Begin the sensor's self-calibration routine"""
# start calibration for accel, gyro, and mag
self._send_me_command(
[
1, # calibrate accel
1, # calibrate gyro
1, # calibrate mag
_ME_CAL_CONFIG,
0, # calibrate planar acceleration
0, # 'on_table' calibration
0, # reserved
0, # reserved
0, # reserved
]
)
self._calibration_complete = False
@property
def calibration_status(self):
"""Get the status of the self-calibration"""
self._send_me_command(
[
0, # calibrate accel
0, # calibrate gyro
0, # calibrate mag
_ME_GET_CAL,
0, # calibrate planar acceleration
0, # 'on_table' calibration
0, # reserved
0, # reserved
0, # reserved
]
)
return self._magnetometer_accuracy
def _send_me_command(self, subcommand_params):
start_time = time.monotonic()
local_buffer = self._command_buffer
_insert_command_request_report(
_ME_CALIBRATE,
self._command_buffer, # should use self._data_buffer :\ but send_packet don't
self._get_report_seq_id(_COMMAND_REQUEST),
subcommand_params,
)
self._send_packet(_BNO_CHANNEL_CONTROL, local_buffer)
self._increment_report_seq(_COMMAND_REQUEST)
while _elapsed(start_time) < _DEFAULT_TIMEOUT:
self._process_available_packets()
if self._me_calibration_started_at > start_time:
break
def save_calibration_data(self):
"""Save the self-calibration data"""
# send a DCD save command
start_time = time.monotonic()
local_buffer = bytearray(12)
_insert_command_request_report(
_SAVE_DCD,
local_buffer, # should use self._data_buffer :\ but send_packet don't
self._get_report_seq_id(_COMMAND_REQUEST),
)
self._send_packet(_BNO_CHANNEL_CONTROL, local_buffer)
self._increment_report_seq(_COMMAND_REQUEST)
while _elapsed(start_time) < _DEFAULT_TIMEOUT:
self._process_available_packets()
if self._dcd_saved_at > start_time:
return
raise RuntimeError("Could not save calibration data")
############### private/helper methods ###############
# # decorator?
def _process_available_packets(self, max_packets=None):
processed_count = 0
while self._data_ready:
if max_packets and processed_count > max_packets:
return
# print("reading a packet")
try:
new_packet = self._read_packet()
except PacketError:
continue
self._handle_packet(new_packet)
processed_count += 1
self._dbg("")
# print("Processed", processed_count, "packets")
self._dbg("")
self._dbg("")
self._dbg(" ** DONE! **")
def _wait_for_packet_type(self, channel_number, report_id=None, timeout=5.0):
if report_id:
report_id_str = " with report id %s" % hex(report_id)
else:
report_id_str = ""
self._dbg("** Waiting for packet on channel", channel_number, report_id_str)
start_time = time.monotonic()
while _elapsed(start_time) < timeout:
new_packet = self._wait_for_packet()
if new_packet.channel_number == channel_number:
if report_id:
if new_packet.report_id == report_id:
return new_packet
else:
return new_packet
if new_packet.channel_number not in (
BNO_CHANNEL_EXE,
BNO_CHANNEL_SHTP_COMMAND,
):
self._dbg("passing packet to handler for de-slicing")
self._handle_packet(new_packet)
raise RuntimeError("Timed out waiting for a packet on channel", channel_number)
def _wait_for_packet(self, timeout=_PACKET_READ_TIMEOUT):
start_time = time.monotonic()
while _elapsed(start_time) < timeout:
if not self._data_ready:
continue
new_packet = self._read_packet()
return new_packet
raise RuntimeError("Timed out waiting for a packet")
# update the cached sequence number so we know what to increment from
# TODO: this is wrong there should be one per channel per direction
# and apparently per report as well
def _update_sequence_number(self, new_packet):
channel = new_packet.channel_number
seq = new_packet.header.sequence_number
self._sequence_number[channel] = seq
def _handle_packet(self, packet):
# split out reports first
try:
_separate_batch(packet, self._packet_slices)
while len(self._packet_slices) > 0:
self._process_report(*self._packet_slices.pop())
except Exception as error:
print(packet)
raise error
def _handle_control_report(self, report_id, report_bytes):
if report_id == _SHTP_REPORT_PRODUCT_ID_RESPONSE:
(
sw_part_number,
sw_major,
sw_minor,
sw_patch,
sw_build_number,
) = parse_sensor_id(report_bytes)
self._dbg("FROM PACKET SLICE:")
self._dbg("*** Part Number: %d" % sw_part_number)
self._dbg("*** Software Version: %d.%d.%d" % (sw_major, sw_minor, sw_patch))
self._dbg("\tBuild: %d" % (sw_build_number))
self._dbg("")
if report_id == _GET_FEATURE_RESPONSE:
get_feature_report = _parse_get_feature_response_report(report_bytes)
_report_id, feature_report_id, *_remainder = get_feature_report
self._readings[feature_report_id] = _INITIAL_REPORTS.get(
feature_report_id, (0.0, 0.0, 0.0)
)
if report_id == _COMMAND_RESPONSE:
self._handle_command_response(report_bytes)
def _handle_command_response(self, report_bytes):
(report_body, response_values) = _parse_command_response(report_bytes)
(
_report_id,
_seq_number,
command,
_command_seq_number,
_response_seq_number,
) = report_body
# status, accel_en, gyro_en, mag_en, planar_en, table_en, *_reserved) = response_values
command_status, *_rest = response_values
if command == _ME_CALIBRATE and command_status == 0:
self._me_calibration_started_at = time.monotonic()
if command == _SAVE_DCD:
if command_status == 0:
self._dcd_saved_at = time.monotonic()
else:
raise RuntimeError("Unable to save calibration data")
def _process_report(self, report_id, report_bytes):
if report_id >= 0xF0:
self._handle_control_report(report_id, report_bytes)
return
self._dbg("\tProcessing report:", reports[report_id])
if self._debug:
outstr = ""
for idx, packet_byte in enumerate(report_bytes):
packet_index = idx
if (packet_index % 4) == 0:
outstr += "\nDBG::\t\t[0x{:02X}] ".format(packet_index)
outstr += "0x{:02X} ".format(packet_byte)
self._dbg(outstr)
self._dbg("")
if report_id == BNO_REPORT_STEP_COUNTER:
self._readings[report_id] = _parse_step_couter_report(report_bytes)
return
if report_id == BNO_REPORT_SHAKE_DETECTOR:
shake_detected = _parse_shake_report(report_bytes)
# shake not previously detected - auto cleared by 'shake' property
try:
if not self._readings[BNO_REPORT_SHAKE_DETECTOR]:
self._readings[BNO_REPORT_SHAKE_DETECTOR] = shake_detected
except KeyError:
pass
return
if report_id == BNO_REPORT_STABILITY_CLASSIFIER:
stability_classification = _parse_stability_classifier_report(report_bytes)
self._readings[BNO_REPORT_STABILITY_CLASSIFIER] = stability_classification
return
if report_id == BNO_REPORT_ACTIVITY_CLASSIFIER:
activity_classification = _parse_activity_classifier_report(report_bytes)
self._readings[BNO_REPORT_ACTIVITY_CLASSIFIER] = activity_classification
return
sensor_data, accuracy = _parse_sensor_report_data(report_bytes)
if report_id == BNO_REPORT_MAGNETOMETER:
self._magnetometer_accuracy = accuracy
# TODO: FIXME; Sensor reports are batched in a LIFO which means that multiple reports
# for the same type will end with the oldest/last being kept and the other
# newer reports thrown away
self._readings[report_id] = sensor_data
# TODO: Make this a Packet creation
@staticmethod
def _get_feature_enable_report(
feature_id, report_interval=_DEFAULT_REPORT_INTERVAL, sensor_specific_config=0
):
set_feature_report = bytearray(17)
set_feature_report[0] = _SET_FEATURE_COMMAND
set_feature_report[1] = feature_id
pack_into("<I", set_feature_report, 5, report_interval)
pack_into("<I", set_feature_report, 13, sensor_specific_config)
return set_feature_report
# TODO: add docs for available features
# TODO2: I think this should call an fn that imports all the bits for the given feature
# so we're not carrying around stuff for extra features
def enable_feature(self, feature_id):
"""Used to enable a given feature of the BNO08x"""
self._dbg("\n********** Enabling feature id:", feature_id, "**********")
if feature_id == BNO_REPORT_ACTIVITY_CLASSIFIER:
set_feature_report = self._get_feature_enable_report(
feature_id, sensor_specific_config=_ENABLED_ACTIVITIES
)
else:
set_feature_report = self._get_feature_enable_report(feature_id)
feature_dependency = _RAW_REPORTS.get(feature_id, None)
# if the feature was enabled it will have a key in the readings dict
if feature_dependency and feature_dependency not in self._readings:
self._dbg("Enabling feature depencency:", feature_dependency)
self.enable_feature(feature_dependency)
self._dbg("Enabling", feature_id)
self._send_packet(_BNO_CHANNEL_CONTROL, set_feature_report)
start_time = time.monotonic() # 1
while _elapsed(start_time) < _FEATURE_ENABLE_TIMEOUT:
self._process_available_packets(max_packets=10)
if feature_id in self._readings:
return
raise RuntimeError("Was not able to enable feature", feature_id)
def _check_id(self):
self._dbg("\n********** READ ID **********")
if self._id_read:
return True
data = bytearray(2)
data[0] = _SHTP_REPORT_PRODUCT_ID_REQUEST
data[1] = 0 # padding
self._dbg("\n** Sending ID Request Report **")
self._send_packet(_BNO_CHANNEL_CONTROL, data)
self._dbg("\n** Waiting for packet **")
# _a_ packet arrived, but which one?
while True:
self._wait_for_packet_type(
_BNO_CHANNEL_CONTROL, _SHTP_REPORT_PRODUCT_ID_RESPONSE
)
sensor_id = self._parse_sensor_id()
if sensor_id:
self._id_read = True
return True
self._dbg("Packet didn't have sensor ID report, trying again")
return False
def _parse_sensor_id(self):
if not self._data_buffer[4] == _SHTP_REPORT_PRODUCT_ID_RESPONSE:
return None
sw_major = self._get_data(2, "<B")
sw_minor = self._get_data(3, "<B")
sw_patch = self._get_data(12, "<H")
sw_part_number = self._get_data(4, "<I")
sw_build_number = self._get_data(8, "<I")
self._dbg("")
self._dbg("*** Part Number: %d" % sw_part_number)
self._dbg("*** Software Version: %d.%d.%d" % (sw_major, sw_minor, sw_patch))
self._dbg(" Build: %d" % (sw_build_number))
self._dbg("")
# TODO: this is only one of the numbers!
return sw_part_number
def _dbg(self, *args, **kwargs):
if self._debug:
print("DBG::\t\t", *args, **kwargs)
def _get_data(self, index, fmt_string):
# index arg is not including header, so add 4 into data buffer
data_index = index + 4
return unpack_from(fmt_string, self._data_buffer, offset=data_index)[0]
# pylint:disable=no-self-use
@property
def _data_ready(self):
raise RuntimeError("Not implemented")
def hard_reset(self):
"""Hardware reset the sensor to an initial unconfigured state"""
if not self._reset:
return
import digitalio # pylint:disable=import-outside-toplevel
self._reset.direction = digitalio.Direction.OUTPUT
self._reset.value = True
time.sleep(0.01)
self._reset.value = False
time.sleep(0.01)
self._reset.value = True
time.sleep(0.01)
def soft_reset(self):
"""Reset the sensor to an initial unconfigured state"""
self._dbg("Soft resetting...", end="")
data = bytearray(1)
data[0] = 1
_seq = self._send_packet(BNO_CHANNEL_EXE, data)
time.sleep(0.5)
_seq = self._send_packet(BNO_CHANNEL_EXE, data)
time.sleep(0.5)
for _i in range(3):
try:
_packet = self._read_packet()
except PacketError:
time.sleep(0.5)
self._dbg("OK!")
# all is good!
def _send_packet(self, channel, data):
raise RuntimeError("Not implemented")
def _read_packet(self):
raise RuntimeError("Not implemented")
def _increment_report_seq(self, report_id):
current = self._two_ended_sequence_numbers.get(report_id, 0)
self._two_ended_sequence_numbers[report_id] = (current + 1) % 256
def _get_report_seq_id(self, report_id):
return self._two_ended_sequence_numbers.get(report_id, 0)
| 35.589649 | 98 | 0.636699 |
f75cfc6fbc2038eba7fd11782abf69dd68f71bc2 | 3,904 | py | Python | resnet.py | BXuan694/basemodel-pytorch | a36c96904580be902e323db17eebbe2ea1f54176 | [
"MIT"
] | 1 | 2019-03-18T15:35:34.000Z | 2019-03-18T15:35:34.000Z | resnet.py | BXuan694/basemodel-pytorch | a36c96904580be902e323db17eebbe2ea1f54176 | [
"MIT"
] | null | null | null | resnet.py | BXuan694/basemodel-pytorch | a36c96904580be902e323db17eebbe2ea1f54176 | [
"MIT"
] | 1 | 2019-03-19T01:06:47.000Z | 2019-03-19T01:06:47.000Z | '''ResNet in PyTorch.
For Pre-activation ResNet, see 'preact_resnet.py'.
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion*planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, numClasses=257):
super(ResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512*block.expansion*4, numClasses)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def ResNet18():
return ResNet(BasicBlock, [2,2,2,2])
def ResNet34():
return ResNet(BasicBlock, [3,4,6,3])
def ResNet50():
return ResNet(Bottleneck, [3,4,6,3])
def ResNet101():
return ResNet(Bottleneck, [3,4,23,3])
def ResNet152():
return ResNet(Bottleneck, [3,8,36,3]) | 34.548673 | 102 | 0.626537 |
f75d3544ffa19cc489ce532ee8d14ab4f09c6953 | 3,830 | py | Python | datar/base/trig_hb.py | stjordanis/datar | 4e2b5db026ad35918954576badef9951928c0cb1 | [
"MIT"
] | 110 | 2021-03-09T04:10:40.000Z | 2022-03-13T10:28:20.000Z | datar/base/trig_hb.py | sthagen/datar | 1218a549e2f0547c7b5a824ca6d9adf1bf96ba46 | [
"MIT"
] | 54 | 2021-06-20T18:53:44.000Z | 2022-03-29T22:13:07.000Z | datar/base/trig_hb.py | sthagen/datar | 1218a549e2f0547c7b5a824ca6d9adf1bf96ba46 | [
"MIT"
] | 11 | 2021-06-18T03:03:14.000Z | 2022-02-25T11:48:26.000Z | """Trigonometric and Hyperbolic Functions"""
from typing import Callable
import numpy
from pipda import register_func
from ..core.contexts import Context
from ..core.types import FloatOrIter
from .constants import pi
def _register_trig_hb_func(name: str, np_name: str, doc: str) -> Callable:
"""Register trigonometric and hyperbolic function"""
np_fun = getattr(numpy, np_name)
if name.endswith("pi"):
func = lambda x: np_fun(x * pi)
else:
# ufunc cannot set context
func = lambda x: np_fun(x)
func = register_func(None, context=Context.EVAL, func=func)
func.__name__ = name
func.__doc__ = doc
return func
sin = _register_trig_hb_func(
"sin",
"sin",
doc="""The sine function
Args:
x: a numeric value or iterable
Returns:
The sine value of `x`
""",
)
cos = _register_trig_hb_func(
"cos",
"cos",
doc="""The cosine function
Args:
x: a numeric value or iterable
Returns:
The cosine value of `x`
""",
)
tan = _register_trig_hb_func(
"tan",
"tan",
doc="""The tangent function
Args:
x: a numeric value or iterable
Returns:
The tangent value of `x`
""",
)
acos = _register_trig_hb_func(
"acos",
"arccos",
doc="""The arc-cosine function
Args:
x: a numeric value or iterable
Returns:
The arc-cosine value of `x`
""",
)
asin = _register_trig_hb_func(
"acos",
"arcsin",
doc="""The arc-sine function
Args:
x: a numeric value or iterable
Returns:
The arc-sine value of `x`
""",
)
atan = _register_trig_hb_func(
"acos",
"arctan",
doc="""The arc-sine function
Args:
x: a numeric value or iterable
Returns:
The arc-sine value of `x`
""",
)
sinpi = _register_trig_hb_func(
"sinpi",
"sin",
doc="""The sine function
Args:
x: a numeric value or iterable, which is the multiple of pi
Returns:
The sine value of `x`
""",
)
cospi = _register_trig_hb_func(
"cospi",
"cos",
doc="""The cosine function
Args:
x: a numeric value or iterable, which is the multiple of pi
Returns:
The cosine value of `x`
""",
)
tanpi = _register_trig_hb_func(
"tanpi",
"tan",
doc="""The tangent function
Args:
x: a numeric value or iterable, which is the multiple of pi
Returns:
The tangent value of `x`
""",
)
cosh = _register_trig_hb_func(
"cosh",
"cosh",
doc="""Hyperbolic cosine
Args:
x: a numeric value or iterable
Returns:
The hyperbolic cosine value of `x`
""",
)
sinh = _register_trig_hb_func(
"sinh",
"sinh",
doc="""Hyperbolic sine
Args:
x: a numeric value or iterable
Returns:
The hyperbolic sine value of `x`
""",
)
tanh = _register_trig_hb_func(
"tanh",
"tanh",
doc="""Hyperbolic tangent
Args:
x: a numeric value or iterable
Returns:
The hyperbolic tangent value of `x`
""",
)
acosh = _register_trig_hb_func(
"acosh",
"arccosh",
doc="""Hyperbolic arc-cosine
Args:
x: a numeric value or iterable
Returns:
The hyperbolic arc-cosine value of `x`
""",
)
asinh = _register_trig_hb_func(
"asinh",
"arcsinh",
doc="""Hyperbolic arc-sine
Args:
x: a numeric value or iterable
Returns:
The hyperbolic arc-sine value of `x`
""",
)
atanh = _register_trig_hb_func(
"atanh",
"arctanh",
doc="""Hyperbolic arc-tangent
Args:
x: a numeric value or iterable
Returns:
The hyperbolic arc-tangent value of `x`
""",
)
@register_func(None, context=Context.EVAL)
def atan2(y: FloatOrIter, x: FloatOrIter) -> FloatOrIter:
"""Calculates the angle between the x-axis and the vector (0,0) -> (x,y)
Args:
y: and
x: The end coordinates of the vector
Returns:
The angle between x-axis and vector (0,0) -> (x,y)
"""
return numpy.arctan2(y, x)
| 16.228814 | 76 | 0.636031 |
f75d98745f8bb5f44f016f73dddb5848239e46a1 | 34,823 | py | Python | packages/python/pyfora/test/PyAstFreeVariableAnalyses_test.py | ufora/ufora | 04db96ab049b8499d6d6526445f4f9857f1b6c7e | [
"Apache-2.0",
"CC0-1.0",
"MIT",
"BSL-1.0",
"BSD-3-Clause"
] | 571 | 2015-11-05T20:07:07.000Z | 2022-01-24T22:31:09.000Z | packages/python/pyfora/test/PyAstFreeVariableAnalyses_test.py | timgates42/ufora | 04db96ab049b8499d6d6526445f4f9857f1b6c7e | [
"Apache-2.0",
"CC0-1.0",
"MIT",
"BSL-1.0",
"BSD-3-Clause"
] | 218 | 2015-11-05T20:37:55.000Z | 2021-05-30T03:53:50.000Z | packages/python/pyfora/test/PyAstFreeVariableAnalyses_test.py | timgates42/ufora | 04db96ab049b8499d6d6526445f4f9857f1b6c7e | [
"Apache-2.0",
"CC0-1.0",
"MIT",
"BSL-1.0",
"BSD-3-Clause"
] | 40 | 2015-11-07T21:42:19.000Z | 2021-05-23T03:48:19.000Z | # Copyright 2015 Ufora Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ast
import copy
import os
import textwrap
import unittest
import pyfora.pyAst.PyAstFreeVariableAnalyses as PyAstFreeVariableAnalyses
import pyfora.pyAst.NodeVisitorBases as NodeVisitorBases
import pyfora.Exceptions as Exceptions
import pyfora.pyAst.PyAstUtil as PyAstUtil
class PyAstFreeVariableAnalyses_test(unittest.TestCase):
@classmethod
def setUpClass(cls):
filename = \
os.path.abspath(
os.path.join(
os.path.dirname(__file__), # the path of this module
'../pyAst/PyAstUtil.py'
)
)
with open(filename) as fhandle:
cls.some_python_code = fhandle.read()
def test_multiple_assignment(self):
tree = ast.parse(
textwrap.dedent(
"""
x = y = z = w
"""
)
)
self.assertEqual(
set(['w']),
PyAstFreeVariableAnalyses.getFreeVariables(tree)
)
def test_members(self):
tree = ast.parse(
textwrap.dedent(
"""
def f(arg):
if arg:
x.y = 3
return x
"""
)
)
expectedResult = set(['x'])
self.assertEqual(
expectedResult,
PyAstFreeVariableAnalyses.getFreeVariables(tree)
)
def test_variables_assigned_in_interior_scope(self):
tree = ast.parse(
textwrap.dedent(
"""
def f():
x = 10
return y
return x
"""
)
)
expectedResult = set(['x', 'y'])
self.assertEqual(
expectedResult,
PyAstFreeVariableAnalyses.getFreeVariables(tree)
)
def test_variables_assigned_by_list_comp_are_not_free(self):
tree = ast.parse(
textwrap.dedent(
"""
x = range(10)
if isinstance(x, slice):
return [x[slice] for slice in x]
"""
)
)
expectedResult = set(['range', 'isinstance'])
self.assertEqual(
expectedResult,
PyAstFreeVariableAnalyses.getFreeVariables(tree)
)
def test_variables_assigned_by_generator_comp_are_free(self):
tree = ast.parse(
textwrap.dedent(
"""
x = range(10)
if isinstance(x, slice):
return list(x[slice] for slice in x)
"""
)
)
expectedResult = set(['range', 'isinstance', 'slice', 'list'])
self.assertEqual(
expectedResult,
PyAstFreeVariableAnalyses.getFreeVariables(tree)
)
def test_call_and_then_member(self):
tree = ast.parse(
textwrap.dedent(
"""
def f():
return g(1).__str__()
"""
)
)
self.assertEqual(
set(['g']),
PyAstFreeVariableAnalyses.getFreeVariables(tree)
)
def test_call_and_then_member_chain(self):
tree = ast.parse(
textwrap.dedent(
"""
def f():
return g(1).__str__()
"""
)
)
self.assertEqual(
set([('g',)]),
PyAstFreeVariableAnalyses.getFreeVariableMemberAccessChains(tree)
)
def test_freeVariables_assignToSelf(self):
tree = ast.parse(
textwrap.dedent(
"""
x = x
def f():
y = y
class C(object):
z = z
"""
)
)
self.assertEqual(
set(['object', 'z']),
PyAstFreeVariableAnalyses.getFreeVariables(tree)
)
def test_freeVariables_classLocalVar_1(self):
tree = ast.parse(
textwrap.dedent(
"""
class C(object):
x = 0
"""
)
)
self.assertEqual(
set(['object']),
PyAstFreeVariableAnalyses.getFreeVariables(tree)
)
def test_freeVariables_classLocalVar_2(self):
tree = ast.parse(
textwrap.dedent(
"""
class C(object):
x = 0
y = x
"""
)
)
self.assertEqual(
set(['object']),
PyAstFreeVariableAnalyses.getFreeVariables(tree)
)
def test_freeVariables_classLocalVar_3(self):
tree = ast.parse(
textwrap.dedent(
"""
class C(object):
y = x
x = 0
"""
)
)
self.assertEqual(
set(['x', 'object']),
PyAstFreeVariableAnalyses.getFreeVariables(tree)
)
def test_freeVariables_functionDef_1(self):
tree = ast.parse(
textwrap.dedent(
"""def f(x, y = z, w = unbound, *args, **kwargs):
z = 2
z + args + x + y + kwargs
x = args + kwargs
nonExistent1 += 2
nonExistent2 = 2
nonExistent3
"""
)
)
self.assertEqual(
set(['z', 'unbound', 'nonExistent3']),
PyAstFreeVariableAnalyses.getFreeVariables(tree)
)
def test_freeVariables_functionDef_2(self):
tree = ast.parse(
textwrap.dedent(
"""def outer():
def f(x):
return len(f)"""
)
)
self.assertEqual(
set(['len']),
PyAstFreeVariableAnalyses.getFreeVariables(tree)
)
def test_freeVariables_classDef_1(self):
tree = ast.parse(
textwrap.dedent(
"""class C(object):
def f(x):
return len(f)"""
)
)
self.assertEqual(
set(['object', 'len', 'f']),
PyAstFreeVariableAnalyses.getFreeVariables(tree)
)
def test_freeVariables_classDef_2(self):
tree = ast.parse(
textwrap.dedent(
"""
@decorator
class C(B, D):
def f(self):
return self.g + C + g
def g(self):
return 0"""
)
)
self.assertEqual(
set(['g', 'decorator', 'B', 'D']),
PyAstFreeVariableAnalyses.getFreeVariables(tree)
)
def test_freeVariables_classDef_3(self):
tree = ast.parse(
textwrap.dedent(
"""
@decorator
class C(B, D):
def g(self):
return 0
def f(self):
return self.g + C + g
"""
)
)
self.assertEqual(
set(['g', 'decorator', 'B', 'D']),
PyAstFreeVariableAnalyses.getFreeVariables(tree)
)
def test_freeVariables_classDef_4(self):
tree = ast.parse(
textwrap.dedent(
"""
class A: pass
class B(A, C, D): pass
"""
)
)
self.assertEqual(
set(('C', 'D')),
PyAstFreeVariableAnalyses.getFreeVariables(tree)
)
def test_freeVariables_classDef_decorator(self):
tree = ast.parse(
textwrap.dedent(
"""
class C10:
@staticmethod
def g(x):
return x + 1
def f(self, arg):
return C10.g(arg)
"""
)
)
self.assertEqual(
set(['staticmethod']),
PyAstFreeVariableAnalyses.getFreeVariables(tree)
)
def test_freeVariables_For(self):
tree = ast.parse(
textwrap.dedent(
"""
for x in elt:
x + 2
"""
)
)
self.assertEqual(
set(['elt']),
PyAstFreeVariableAnalyses.getFreeVariables(tree)
)
def test_freeVariables_Assign(self):
tree = ast.parse(
textwrap.dedent(
"""
(x, y), z = w
"""
)
)
self.assertEqual(
set(['w']),
PyAstFreeVariableAnalyses.getFreeVariables(tree)
)
def test_freeVariables_Assign_2(self):
tree = ast.parse(
textwrap.dedent(
"""
w = ((x, y), z)
"""
)
)
self.assertEqual(
set(['x', 'y', 'z']),
PyAstFreeVariableAnalyses.getFreeVariables(tree)
)
def test_freeVariables_Sequence_1(self):
# this might seem a little strange,
# but ast parses this as a module
tree = ast.parse(
textwrap.dedent(
"""
x = 2
x + 3
"""
)
)
self.assertEqual(
set(),
PyAstFreeVariableAnalyses.getFreeVariables(tree)
)
def test_freeVariables_Sequence_2(self):
# this might seem a little strange,
# but ast parses this as a module
tree = ast.parse(
textwrap.dedent(
"""
def f(arg):
return arg + x
x = 3
"""
)
)
self.assertEqual(
set(),
PyAstFreeVariableAnalyses.getFreeVariables(tree)
)
def test_freeVariables_Sequence_3(self):
tree = ast.parse(
textwrap.dedent(
"""
def f():
x + 2
x = 3
"""
)
)
self.assertEqual(
set([]),
PyAstFreeVariableAnalyses.getFreeVariables(tree)
)
def test_freeVariables_Sequence_4(self):
tree = ast.parse(
textwrap.dedent(
"""
def f():
x = 3
x + 2
"""
)
)
self.assertEqual(
set(),
PyAstFreeVariableAnalyses.getFreeVariables(tree)
)
def test_freeVariables_Sequence_5(self):
tree = ast.parse(
textwrap.dedent(
"""
class C:
x = 2
def fn(self, arg):
return arg + x
fn
"""
)
)
self.assertEqual(
set(['x', 'fn']),
PyAstFreeVariableAnalyses.getFreeVariables(tree)
)
def test_freeVariables_Sequence_6(self):
tree = ast.parse(
textwrap.dedent(
"""
class C:
def f(self, arg):
return arg + x
x = 2
f
"""
)
)
self.assertEqual(
set(['x', 'f']),
PyAstFreeVariableAnalyses.getFreeVariables(tree)
)
def test_freeVariables_Sequence_7(self):
tree = ast.parse(
textwrap.dedent(
"""
def func():
def f(self, arg):
return arg + x
x = 2
f
class C:
pass
C
"""
)
)
self.assertEqual(
set([]),
PyAstFreeVariableAnalyses.getFreeVariables(tree)
)
def test_freeVariables_Lambda_1(self):
tree = ast.parse(
textwrap.dedent(
"""
lambda x, y = z, *args, **kwargs: (x, y, args, kwargs, free)
"""
)
)
self.assertEqual(
set(['z', 'free']),
PyAstFreeVariableAnalyses.getFreeVariables(tree)
)
def test_freeVariables_ListComp_1(self):
tree = ast.parse(
textwrap.dedent(
"""
[val for val in x if val == free]
"""
)
)
self.assertEqual(
set(['x', 'free']),
PyAstFreeVariableAnalyses.getFreeVariables(tree)
)
def test_freeVariables_ListComp_2(self):
tree = ast.parse(
textwrap.dedent(
"""
[val for val in x if val == free]
"""
)
)
self.assertEqual(
set(['free', 'x']),
PyAstFreeVariableAnalyses.getFreeVariables(tree)
)
def test_freeVariables_ListComp_3(self):
tree = ast.parse(
textwrap.dedent(
"""
x = [1,2,3]
[y for val in x]
"""
)
)
self.assertEqual(
set(['y']),
PyAstFreeVariableAnalyses.getFreeVariables(tree)
)
def test_freeVariables_ListComp_4(self):
tree = ast.parse(
textwrap.dedent(
"""
[(x, y) for x in [1,2,3] for y in [3,1,4] if x != y]
"""
)
)
self.assertEqual(
set(),
PyAstFreeVariableAnalyses.getFreeVariables(tree)
)
def test_freeVariables_SetComp_1(self):
tree = ast.parse(
textwrap.dedent(
"""
{v for x in q}
"""
)
)
self.assertEqual(
set(['v', 'q']),
PyAstFreeVariableAnalyses.getFreeVariables(tree)
)
def test_freeVariables_DictComp_1(self):
tree = ast.parse(
textwrap.dedent(
"""
d = { x: y for x, y in o.f() }
"""
)
)
self.assertEqual(
set(['o']),
PyAstFreeVariableAnalyses.getFreeVariables(tree)
)
def test_freeVariables_DictComp_2(self):
tree = ast.parse(
textwrap.dedent(
"""
d = { x1: y1 for x2, y2 in o.f() }
"""
)
)
self.assertEqual(
set(['o', 'x1', 'y1']),
PyAstFreeVariableAnalyses.getFreeVariables(tree)
)
def test_freeVariables_functionCalls(self):
tree = ast.parse(
textwrap.dedent(
"""
x = 2
f(x, y, z = 2, w = x, q = free, *args, **kwargs)
"""
)
)
self.assertEqual(
set(['f', 'y', 'args', 'kwargs', 'free']),
PyAstFreeVariableAnalyses.getFreeVariables(tree)
)
def test_freeVariables_nestedScopes_1(self):
tree = ast.parse(
textwrap.dedent(
"""
def f(x):
y = 2
class C:
def g(self, arg):
x + y + arg
"""
)
)
self.assertEqual(
set([]),
PyAstFreeVariableAnalyses.getFreeVariables(tree)
)
def test_freeVariables_nestedScopes_2(self):
tree = ast.parse(
textwrap.dedent(
"""def f(x):
y = x + z
o.notFree = 3
class C:
def g(self):
return w + f(x) + C
C
"""
)
)
self.assertEqual(
set(['z', 'w', 'o']),
PyAstFreeVariableAnalyses.getFreeVariables(tree)
)
def test_freeVariables_nestedScopes_3(self):
tree = ast.parse(
textwrap.dedent(
"""
def f(x):
y = 2
class C:
def g(self, arg):
x + y + arg
C
"""
)
)
self.assertEqual(
set(['C']),
PyAstFreeVariableAnalyses.getFreeVariables(tree)
)
# In the following two tests, 'x' within function 'f' is local,
# regardless to whether there is a global 'x' in a parent scope.
def test_freeVariables_notFreeNotDefinedOnAllPaths_1(self):
tree = ast.parse(
textwrap.dedent(
"""
x = 42
def f(arg):
if arg:
x = 3
return x
"""
)
)
self.assertEqual(
set([]),
PyAstFreeVariableAnalyses.getFreeVariables(tree)
)
def test_freeVariables_notFreeNotDefinedOnAllPaths_2(self):
tree = ast.parse(
textwrap.dedent(
"""
def f(arg):
if arg:
x = 3
return x
"""
)
)
self.assertEqual(
set([]),
PyAstFreeVariableAnalyses.getFreeVariables(tree)
)
def test_freeVariables_globalStmt_1(self):
tree = ast.parse(
textwrap.dedent(
"""
def f():
global x
x = 3
return x
"""
)
)
with self.assertRaises(Exceptions.PythonToForaConversionError):
PyAstFreeVariableAnalyses.getFreeVariables(tree)
def test_freeVariables_globalStmt_2(self):
tree = ast.parse(
textwrap.dedent(
"""
def f():
global x
return x
"""
)
)
with self.assertRaises(Exceptions.PythonToForaConversionError):
PyAstFreeVariableAnalyses.getFreeVariables(tree)
def test_freeVariables_forLoop(self):
tree = ast.parse(
textwrap.dedent(
"""
def f(arg):
tr = x
for x in xrange(0, arg):
tr += x
return tr
"""
)
)
self.assertEqual(
set(['xrange']),
PyAstFreeVariableAnalyses.getFreeVariables(tree)
)
def test_freeVariables_whileLoop(self):
tree = ast.parse(
textwrap.dedent(
"""
def f(arg):
tr = 0
while x in range(0, arg):
tr += 1
return tr
"""
)
)
self.assertEqual(
set(['x', 'range']),
PyAstFreeVariableAnalyses.getFreeVariables(tree)
)
def test_freeVariables_inAssignment(self):
tree = ast.parse(
textwrap.dedent(
"""
def f(arg):
y = x
return y
"""
)
)
self.assertEqual(
set(['x']),
PyAstFreeVariableAnalyses.getFreeVariables(tree)
)
def test_freeVariables_withStatement(self):
tree = ast.parse(
textwrap.dedent(
"""
def f(arg):
with x as e:
return e
"""
)
)
self.assertEqual(
set(['x']),
PyAstFreeVariableAnalyses.getFreeVariables(tree)
)
def test_freeVariables_tryExcept(self):
tree = ast.parse(
textwrap.dedent(
"""
def f(x):
try:
x += 1
except Exception as e:
print e.message
except ValueError:
print "Wrong Value"
finally:
x -= 1
return x
"""
)
)
self.assertEqual(
set(['Exception', 'ValueError']),
PyAstFreeVariableAnalyses.getFreeVariables(tree)
)
def test_freeVariables_CompsAndGenExp1(self):
tree = ast.parse(
textwrap.dedent(
"""
[elt1 for elt1 in container1]
{elt2 for elt2 in container2}
{elt3: elt4 for elt4 in container4 for elt3 in container3}
(x*y for x in range(10) for y in bar(x))
"""
)
)
expectedResult = set(['container1', 'container2', 'container3',
'container4', 'bar', 'range'])
self.assertEqual(
expectedResult,
PyAstFreeVariableAnalyses.getFreeVariables(tree)
)
def test_freeVariables_CompsAndGenExp2(self):
tree = ast.parse(
textwrap.dedent(
"""
[elt01 for elt1 in container1]
{elt02 for elt2 in container2}
{elt03: elt04 for elt4 in container4 for elt3 in container3}
(x0*y0 for x in range(10) for y in bar(x))
"""
)
)
expectedResult = set(['container1', 'container2', 'container3',
'container4', 'bar', 'range', 'elt01',
'elt02', 'elt03', 'elt04', 'x0', 'y0'])
self.assertEqual(
expectedResult,
PyAstFreeVariableAnalyses.getFreeVariables(tree)
)
def test_freeVariables_class_context(self):
tree = ast.parse(
textwrap.dedent(
"""
class testClass:
def __init__(self, x):
self.x = x
def f(self):
return testClass("a")
"""
)
)
self.assertEqual(
set([]),
PyAstFreeVariableAnalyses.getFreeVariables(tree.body[0])
)
def test_freeVariables_function_context(self):
tree = ast.parse(
textwrap.dedent(
"""
def fib(x):
if x < 2:
return 1
else:
return fib(x-1) + fib(x-2)
"""
)
)
self.assertEqual(
set([]),
PyAstFreeVariableAnalyses.getFreeVariables(tree.body[0], isClassContext=False)
)
self.assertEqual(
set(['fib']),
PyAstFreeVariableAnalyses.getFreeVariables(tree.body[0], isClassContext=True)
)
def test_freeVariables_name_substring_bug(self):
tree = ast.parse(
textwrap.dedent(
"""
al = 10
l
"""
)
)
expectedResult = set(['l'])
self.assertEqual(
expectedResult,
PyAstFreeVariableAnalyses.getFreeVariables(tree)
)
##########################################################################
# Free Variable Member Access Chain Tests
##########################################################################
def test_freeVariablesMemberAccessChain_onFunctionDefNode(self):
tree1 = ast.parse(
textwrap.dedent(
"""
def g(arg):
if arg < 0:
return x + arg
return x * h(arg - 1, g)
"""
)
)
res = PyAstFreeVariableAnalyses.getFreeVariableMemberAccessChains(tree1)
self.assertEqual(
set([('h',), ('x',)]),
res
)
tree2 = PyAstUtil.functionDefOrLambdaAtLineNumber(tree1, 2)
self.assertEqual(
set([('h',), ('x',)]),
PyAstFreeVariableAnalyses.getFreeVariableMemberAccessChains(tree2, False)
)
def test_memberAccessChain_1(self):
tree = ast.parse("x.y.z.w")
self.assertEqual(
('x', 'y', 'z', 'w'),
PyAstFreeVariableAnalyses._memberAccessChainOrNone(
tree.body[0].value
)
)
def test_memberAccessChain_2(self):
tree = ast.parse("(1).y.z.w")
self.assertIsNone(
PyAstFreeVariableAnalyses._memberAccessChainOrNone(
tree.body[0].value
)
)
def test_freeVariableMemberAccessChains_1(self):
tree = ast.parse(
textwrap.dedent(
"""
def f():
x
x = 2
def f(x):
x.y
z.w.q
"""
)
)
res = PyAstFreeVariableAnalyses.getFreeVariableMemberAccessChains(tree)
self.assertEqual(
set([('z', 'w', 'q')]),
res
)
def test_freeVariableMemberAccessChains_2(self):
tree = ast.parse(
textwrap.dedent(
"""
def f():
y = 2
class C:
def g(self, arg):
x.y.z + y + arg
x = 2
C.f
"""
)
)
res = PyAstFreeVariableAnalyses.getFreeVariableMemberAccessChains(tree)
self.assertEqual(
set([('C', 'f')]),
res
)
def test_freeVariableMemberAccessChains_3(self):
tree = ast.parse(
textwrap.dedent(
"""
class C:
x.y.z = 3
def f(self):
y = 2
class C:
def g(self, arg):
x.y.z + y + arg
C.f
"""
)
)
res = PyAstFreeVariableAnalyses.getFreeVariableMemberAccessChains(tree)
self.assertEqual(
set([('x', 'y', 'z')]),
res
)
def test_freeVariableMemberAccessChains_4(self):
tree = ast.parse(
textwrap.dedent(
"""
x.y.z = 1
q = x.y.z
"""
)
)
res = PyAstFreeVariableAnalyses.getFreeVariableMemberAccessChains(tree)
self.assertEqual(
set([('x', 'y', 'z')]),
res
)
def test_freeVariableMemberAccessChains_5(self):
tree = ast.parse(
textwrap.dedent(
"""
x.y.z = 2
"""
)
)
res = PyAstFreeVariableAnalyses.getFreeVariableMemberAccessChains(tree)
self.assertEqual(
set([('x', 'y', 'z')]),
res
)
def test_freeVariableMemberAccessChains_6(self):
tree = ast.parse(
textwrap.dedent(
"""
q = x.y.z = 2
"""
)
)
res = PyAstFreeVariableAnalyses.getFreeVariableMemberAccessChains(tree)
self.assertEqual(
set([('x', 'y', 'z')]),
res
)
def test_TransvisitorsDontModifyTree(self):
tree = ast.parse(self.some_python_code)
# deep-copy tree because transformers modify the AST in place
# making the test of areAstsIdentical(tree, tree') meaningless
tree1 = copy.deepcopy(tree)
noopTransformer = NodeVisitorBases.SemanticOrderNodeTransvisitor()
tree2 = noopTransformer.visit(tree1)
self.assertIsNotNone(tree2)
self.assertTrue(PyAstUtil.areAstsIdentical(tree, tree2))
scopeMgr = NodeVisitorBases.InScopeSaveRestoreValue(
lambda : True,
lambda x: None)
noopTransformer = NodeVisitorBases.GenericScopedTransvisitor(scopeMgr)
tree3 = noopTransformer.visit(tree1)
self.assertIsNotNone(tree3)
self.assertTrue(PyAstUtil.areAstsIdentical(tree, tree3))
freeVarsVisitor = PyAstFreeVariableAnalyses._FreeVariableMemberAccessChainsTransvisitor()
tree4 = freeVarsVisitor.visit(tree1)
self.assertIsNotNone(tree4)
self.assertTrue(PyAstUtil.areAstsIdentical(tree, tree4))
def test_FreeVariableTransformer_1(self):
tree = ast.parse(
textwrap.dedent(
"""
def g(x):
return x.y.z
"""
)
)
# deep-copy tree because transformers modify the AST in place
# making the test of areAstsIdentical(tree, tree') meaningless
tree1 = copy.deepcopy(tree)
tree2 = PyAstFreeVariableAnalyses.collapseFreeVariableMemberAccessChains(
tree1, {('x', 'y', 'z'):'x_y_z'}, isClassContext=False)
self.assertIsNotNone(tree2)
self.assertTrue(PyAstUtil.areAstsIdentical(tree, tree2))
def test_FreeVariableTransformer_2(self):
tree = ast.parse(
textwrap.dedent(
"""
def g(y):
return x.y.z
"""
)
)
# deep-copy tree because transformers modify the AST in place
# making the test of areAstsIdentical(tree, tree') meaningless
tree1 = copy.deepcopy(tree)
tree2 = PyAstFreeVariableAnalyses.collapseFreeVariableMemberAccessChains(
tree1, {('x', 'y', 'z'):'x_y_z'}, isClassContext=False)
self.assertIsNotNone(tree2)
self.assertFalse(PyAstUtil.areAstsIdentical(tree, tree2))
def test_FreeVariableTransformer_3(self):
tree = ast.parse(
textwrap.dedent(
"""
def g(y):
return x.y.z.__str__()
"""
)
)
# deep-copy tree because transformers modify the AST in place
# making the test of areAstsIdentical(tree, tree') meaningless
tree1 = copy.deepcopy(tree)
tree2 = PyAstFreeVariableAnalyses.collapseFreeVariableMemberAccessChains(
tree1, {('x', 'y', 'z'):'x_y_z'}, isClassContext=False)
self.assertIsNotNone(tree2)
self.assertFalse(PyAstUtil.areAstsIdentical(tree, tree2))
def test_FreeVariableTransformer_4(self):
tree = ast.parse(
textwrap.dedent(
"""
def f(x,y):
def g(x):
return x.y.z
return x.y.z, g(y)
"""
)
)
# deep-copy tree because transformers modify the AST in place
# making the test of areAstsIdentical(tree, tree') meaningless
tree1 = copy.deepcopy(tree)
tree2 = PyAstFreeVariableAnalyses.collapseFreeVariableMemberAccessChains(
tree1, {('x', 'y', 'z'):'x_y_z'}, isClassContext=False)
self.assertIsNotNone(tree2)
self.assertTrue(PyAstUtil.areAstsIdentical(tree, tree2))
def test_FreeVariableTransformer_5(self):
tree = ast.parse(
textwrap.dedent(
"""
def f(x,y):
def g(z):
return x.y.z
return x.y.z, g(y)
"""
)
)
# deep-copy tree because transformers modify the AST in place
# making the test of areAstsIdentical(tree, tree') meaningless
tree1 = copy.deepcopy(tree)
tree2 = PyAstFreeVariableAnalyses.collapseFreeVariableMemberAccessChains(
tree1, {('x', 'y', 'z'):'x_y_z'}, isClassContext=False)
self.assertIsNotNone(tree2)
self.assertTrue(PyAstUtil.areAstsIdentical(tree, tree2))
def test_FreeVariableTransformer_6(self):
tree = ast.parse(
textwrap.dedent(
"""
def f(y,z):
def g(x):
return x.y.z
return x.y.z, g(y)
"""
)
)
# deep-copy tree because transformers modify the AST in place
# making the test of areAstsIdentical(tree, tree') meaningless
tree1 = copy.deepcopy(tree)
tree2 = PyAstFreeVariableAnalyses.collapseFreeVariableMemberAccessChains(
tree1, {('x', 'y', 'z'):'x_y_z'}, isClassContext=False)
self.assertIsNotNone(tree2)
self.assertFalse(PyAstUtil.areAstsIdentical(tree, tree2))
if __name__ == "__main__":
unittest.main()
| 28.357492 | 97 | 0.435775 |
f75dac59ec17719df865826b8d86750f76ed68eb | 3,418 | py | Python | statsd/setup.py | dvanderveer/integrations-core | 41dd9950296455457c9b7342584153678503d5aa | [
"BSD-3-Clause"
] | null | null | null | statsd/setup.py | dvanderveer/integrations-core | 41dd9950296455457c9b7342584153678503d5aa | [
"BSD-3-Clause"
] | null | null | null | statsd/setup.py | dvanderveer/integrations-core | 41dd9950296455457c9b7342584153678503d5aa | [
"BSD-3-Clause"
] | null | null | null | # Always prefer setuptools over distutils
from setuptools import setup
# To use a consistent encoding
from codecs import open
from os import path
import json
import re
here = path.abspath(path.dirname(__file__))
def parse_req_line(line):
line = line.strip()
if not line or line.startswith('--hash') or line[0] == '#':
return None
req = line.rpartition('#')
if len(req[1]) == 0:
line = req[2].strip()
else:
line = req[1].strip()
if '--hash=' in line:
line = line[:line.find('--hash=')].strip()
if ';' in line:
line = line[:line.find(';')].strip()
if '\\' in line:
line = line[:line.find('\\')].strip()
return line
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
# Parse requirements
runtime_reqs = ['datadog_checks_base']
with open(path.join(here, 'requirements.txt'), encoding='utf-8') as f:
for line in f.readlines():
req = parse_req_line(line)
if req:
runtime_reqs.append(req)
def read(*parts):
with open(path.join(here, *parts), 'r') as fp:
return fp.read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
# https://packaging.python.org/guides/single-sourcing-package-version/
version = find_version("datadog_checks", "statsd", "__init__.py")
manifest_version = None
with open(path.join(here, 'manifest.json'), encoding='utf-8') as f:
manifest = json.load(f)
manifest_version = manifest.get('version')
if version != manifest_version:
raise Exception("Inconsistent versioning in module and manifest - aborting wheel build")
setup(
name='datadog-statsd',
version=version,
description='The Statsd check',
long_description=long_description,
keywords='datadog agent statsd check',
# The project's main homepage.
url='https://github.com/DataDog/integrations-core',
# Author details
author='Datadog',
author_email='packages@datadoghq.com',
# License
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Topic :: System :: Monitoring',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
],
# The package we're going to ship
packages=['datadog_checks.statsd'],
# Run-time dependencies
install_requires=list(set(runtime_reqs)),
# Development dependencies, run with:
# $ pip install -e .[dev]
extras_require={
'dev': [
'check-manifest',
'datadog_agent_tk>=5.15',
],
},
# Testing setup and dependencies
tests_require=[
'nose',
'coverage',
'datadog_agent_tk>=5.15',
],
test_suite='nose.collector',
# Extra files to ship with the wheel package
package_data={b'datadog_checks.statsd': ['conf.yaml.example']},
include_package_data=True,
)
| 28.247934 | 92 | 0.631363 |
f75db6d92b4879ecfdc8c7bce21b887d2c00195c | 4,100 | py | Python | hw4/cs285/agents/mb_agent.py | erfanMhi/Deep-Reinforcement-Learning-CS285 | 4b832c232889b0f867c5d43b4f28c588607cfd60 | [
"MIT"
] | 1 | 2020-08-16T13:01:27.000Z | 2020-08-16T13:01:27.000Z | hw4/cs285/agents/mb_agent.py | erfanMhi/Deep-Reinforcement-Learning-CS285 | 4b832c232889b0f867c5d43b4f28c588607cfd60 | [
"MIT"
] | null | null | null | hw4/cs285/agents/mb_agent.py | erfanMhi/Deep-Reinforcement-Learning-CS285 | 4b832c232889b0f867c5d43b4f28c588607cfd60 | [
"MIT"
] | 3 | 2020-12-16T15:24:39.000Z | 2022-01-27T21:29:34.000Z | from .base_agent import BaseAgent
from cs285.models.ff_model import FFModel
from cs285.policies.MPC_policy import MPCPolicy
from cs285.infrastructure.replay_buffer import ReplayBuffer
from cs285.infrastructure.utils import *
class MBAgent(BaseAgent):
def __init__(self, sess, env, agent_params):
super(MBAgent, self).__init__()
self.env = env.unwrapped
self.sess = sess
self.agent_params = agent_params
self.ensemble_size = self.agent_params['ensemble_size']
self.dyn_models = []
for i in range(self.ensemble_size):
model = FFModel(sess,
self.agent_params['ac_dim'],
self.agent_params['ob_dim'],
self.agent_params['n_layers'],
self.agent_params['size'],
self.agent_params['learning_rate'],
scope='dyn_model_{}'.format(i))
self.dyn_models.append(model)
self.actor = MPCPolicy(sess,
self.env,
ac_dim = self.agent_params['ac_dim'],
dyn_models = self.dyn_models,
horizon = self.agent_params['mpc_horizon'],
N = self.agent_params['mpc_num_action_sequences'],
)
self.replay_buffer = ReplayBuffer()
def train(self, ob_no, ac_na, re_n, next_ob_no, terminal_n):
# training a MB agent refers to updating the predictive model using observed state transitions
# NOTE: each model in the ensemble is trained on a different random batch of size batch_size
losses = []
num_data = ob_no.shape[0]
num_data_per_ens = int(num_data/self.ensemble_size)
for i in range(self.ensemble_size):
# select which datapoints to use for this model of the ensemble
# you might find the num_data_per_env variable defined above useful
ens_top_idx = (i+1) * num_data_per_ens
ens_bottom_idx = i * num_data_per_ens
observations = ob_no[ens_bottom_idx: ens_top_idx]# TODO(Q1)
actions = ac_na[ens_bottom_idx: ens_top_idx] # TODO(Q1)
next_observations = next_ob_no[ens_bottom_idx: ens_top_idx] # TODO(Q1)
# use datapoints to update one of the dyn_models
model = self.dyn_models[i] # TODO(Q1)
loss = model.update(observations, actions, next_observations, self.data_statistics)
losses.append(loss)
avg_loss = np.mean(losses)
return avg_loss
def add_to_replay_buffer(self, paths, add_sl_noise=False):
# add data to replay buffer
self.replay_buffer.add_rollouts(paths, noised=add_sl_noise)
# get updated mean/std of the data in our replay buffer
self.data_statistics = {'obs_mean': np.mean(self.replay_buffer.obs, axis=0),
'obs_std': np.std(self.replay_buffer.obs, axis=0),
'acs_mean': np.mean(self.replay_buffer.acs, axis=0),
'acs_std': np.std(self.replay_buffer.acs, axis=0),
'delta_mean': np.mean(
self.replay_buffer.next_obs - self.replay_buffer.obs,
axis=0),
'delta_std': np.std(
self.replay_buffer.next_obs - self.replay_buffer.obs,
axis=0),
}
# update the actor's data_statistics too, so actor.get_action can be calculated correctly
self.actor.data_statistics = self.data_statistics
def sample(self, batch_size):
# NOTE: The size of the batch returned here is sampling batch_size * ensemble_size,
# so each model in our ensemble can get trained on batch_size data
return self.replay_buffer.sample_random_data(batch_size*self.ensemble_size) | 46.590909 | 102 | 0.579756 |
f75e11209704b4c1ca00d10b546afdd97cf2b6e8 | 1,618 | py | Python | tests/test_scan.py | eruvanos/dynafile | 207425b073a963b01c677b697e74842b429c004a | [
"MIT"
] | null | null | null | tests/test_scan.py | eruvanos/dynafile | 207425b073a963b01c677b697e74842b429c004a | [
"MIT"
] | null | null | null | tests/test_scan.py | eruvanos/dynafile | 207425b073a963b01c677b697e74842b429c004a | [
"MIT"
] | null | null | null | from _operator import itemgetter
from dynafile import Dynafile
def test_scan_all_items(tmp_path):
db = Dynafile(tmp_path / "db")
aa = {
"PK": "1",
"SK": "aa",
}
ab = {
"PK": "1",
"SK": "ab",
}
ac = {
"PK": "1",
"SK": "ac",
}
ba = {
"PK": "2",
"SK": "ba",
}
db.put_item(item=aa)
db.put_item(item=ab)
db.put_item(item=ac)
db.put_item(item=ba)
items = set(map(itemgetter("SK"), db.scan()))
assert items == {"aa", "ab", "ac", "ba"}
def test_scan_with_callable_filter(tmp_path):
db = Dynafile(tmp_path / "db")
aa = {
"PK": "1",
"SK": "aa",
}
ab = {
"PK": "1",
"SK": "ab",
}
ac = {
"PK": "1",
"SK": "ac",
}
ba = {
"PK": "2",
"SK": "ba",
}
db.put_item(item=aa)
db.put_item(item=ab)
db.put_item(item=ac)
db.put_item(item=ba)
items = set(map(itemgetter("SK"), db.scan(_filter=lambda i: i["SK"].startswith("a"))))
assert items == {"aa", "ab", "ac"}
def test_scan_with_string_filter(tmp_path):
db = Dynafile(tmp_path / "db")
aa = {
"PK": "1",
"SK": "aa",
}
ab = {
"PK": "1",
"SK": "ab",
}
ac = {
"PK": "1",
"SK": "ac",
}
ba = {
"PK": "2",
"SK": "ba",
}
db.put_item(item=aa)
db.put_item(item=ab)
db.put_item(item=ac)
db.put_item(item=ba)
items = set(map(itemgetter("SK"), db.scan(_filter="SK =~ /^a/")))
assert items == {"aa", "ab", "ac"}
| 17.212766 | 90 | 0.434487 |
f75e1afa2f582ef72566a9f1236eb47e92500b9e | 515 | py | Python | geoq/training/admin.py | kaydoh/geoq | 6f10818d0cc3cef4ba8113e8b047d27e79b2f8b0 | [
"MIT"
] | 471 | 2015-01-05T15:16:26.000Z | 2022-03-28T05:06:11.000Z | geoq/training/admin.py | kaydoh/geoq | 6f10818d0cc3cef4ba8113e8b047d27e79b2f8b0 | [
"MIT"
] | 109 | 2015-01-06T20:00:58.000Z | 2022-03-11T23:17:53.000Z | geoq/training/admin.py | kaydoh/geoq | 6f10818d0cc3cef4ba8113e8b047d27e79b2f8b0 | [
"MIT"
] | 100 | 2015-01-05T15:16:39.000Z | 2021-12-01T12:13:13.000Z | # This technical data was produced for the U. S. Government under Contract No. W15P7T-13-C-F600, and
# is subject to the Rights in Technical Data-Noncommercial Items clause at DFARS 252.227-7013 (FEB 2012)
from reversion.admin import VersionAdmin
from django.contrib.gis import admin
from .models import *
class ObjectAdmin(VersionAdmin,):
pass
@admin.register(Training)
class TrainingAdmin(ObjectAdmin):
list_display = ('name', 'category', 'private', 'primary_contact')
list_filter = ('category',)
| 32.1875 | 104 | 0.757282 |
f75e1bbd91fd329bea50425c828ffbf5b2ab1b32 | 120 | py | Python | desafio_certo.py | franciol/Servidor_de_desafios | 3ce3de561af5ce5182a3d0f5db0a7dfd3b51a8fc | [
"MIT"
] | null | null | null | desafio_certo.py | franciol/Servidor_de_desafios | 3ce3de561af5ce5182a3d0f5db0a7dfd3b51a8fc | [
"MIT"
] | null | null | null | desafio_certo.py | franciol/Servidor_de_desafios | 3ce3de561af5ce5182a3d0f5db0a7dfd3b51a8fc | [
"MIT"
] | null | null | null | """Arquivo a ser enviado ao sistema"""
def desafio1(number):
"""Função que retorna o valor entrado"""
return 0
| 20 | 44 | 0.666667 |
f75e37c8f400e30099681450ab0f9a8bda1e56f0 | 703 | py | Python | tests/integration/integration_test.py | sohomiitb/hackathonCN | 99dfe67d1f6884dc99c7c71663807dcdf0617722 | [
"MIT"
] | null | null | null | tests/integration/integration_test.py | sohomiitb/hackathonCN | 99dfe67d1f6884dc99c7c71663807dcdf0617722 | [
"MIT"
] | null | null | null | tests/integration/integration_test.py | sohomiitb/hackathonCN | 99dfe67d1f6884dc99c7c71663807dcdf0617722 | [
"MIT"
] | null | null | null | import requests
import json
test_sample = json.dumps({'data': [
[1,2,3,4,54,6,7,8,88,10],
[10,9,8,37,36,45,4,33,2,1]
]})
test_sample = str(test_sample)
def test_ml_service(scoreurl, scorekey):
assert scoreurl != None
if scorekey is None:
headers = {'Content-Type':'application/json'}
else:
headers = {'Content-Type':'application/json', 'Authorization':('Bearer ' + scorekey)}
resp = requests.post(scoreurl, test_sample, headers=headers)
assert resp.status_code == requests.codes.ok
assert resp.text != None
assert resp.headers.get('content-type') == 'application/json'
assert int(resp.headers.get('Content-Length')) > 0
| 30.565217 | 94 | 0.642959 |
f75e5cebd956a4025bf0265b1e7dd05df153d431 | 1,205 | py | Python | appengine/findit/services/test_failure/test_results_service.py | allaparthi/monorail | e18645fc1b952a5a6ff5f06e0c740d75f1904473 | [
"BSD-3-Clause"
] | 2 | 2021-04-13T21:22:18.000Z | 2021-09-07T02:11:57.000Z | appengine/findit/services/test_failure/test_results_service.py | allaparthi/monorail | e18645fc1b952a5a6ff5f06e0c740d75f1904473 | [
"BSD-3-Clause"
] | 21 | 2020-09-06T02:41:05.000Z | 2022-03-02T04:40:01.000Z | appengine/findit/services/test_failure/test_results_service.py | allaparthi/monorail | e18645fc1b952a5a6ff5f06e0c740d75f1904473 | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Provides functions for Findit's special operations on test results."""
import base64
def GetFailedTestsInformationFromTestResult(test_results_object):
""" Gets logs for failed tests.
In the case where we have found reliable failed tests but no failure log, use
test locations as failure log.
Args:
test_results_object(BaseTestResults): Test result object.
Returns:
An dict of logs for failed tests.
An dict of consistently failed tests.
"""
failed_test_log, reliable_failed_tests = (
test_results_object.GetFailedTestsInformation())
# Uses test location as test failure log if there is no failure log.
for test_name in reliable_failed_tests:
if not failed_test_log.get(test_name):
# No failure log for this test.
test_location, _ = test_results_object.GetTestLocation(test_name)
if not test_location or not test_location.get('file'):
continue
failed_test_log[test_name] = base64.b64encode(test_location['file'])
return failed_test_log, reliable_failed_tests
| 34.428571 | 79 | 0.756017 |
f75e684997ed998bb609136426eba7604a6d9f60 | 394 | py | Python | python/excel/iterating_over_cell_values.py | y2ghost/work | b7f5b02db9dc0df6157bc799ddb4a1ac9d574cf3 | [
"MIT"
] | null | null | null | python/excel/iterating_over_cell_values.py | y2ghost/work | b7f5b02db9dc0df6157bc799ddb4a1ac9d574cf3 | [
"MIT"
] | null | null | null | python/excel/iterating_over_cell_values.py | y2ghost/work | b7f5b02db9dc0df6157bc799ddb4a1ac9d574cf3 | [
"MIT"
] | null | null | null | from openpyxl import load_workbook
def iterating_over_values(path):
workbook = load_workbook(filename=path)
sheet = workbook.active
for value in sheet.iter_rows(min_row=1, max_row=3,
min_col=1, max_col=3,
values_only=True):
print(value)
if __name__ == '__main__':
iterating_over_values('books.xlsx')
| 26.266667 | 54 | 0.609137 |
f75e9101a08e5e6ea8785eb222e080eeb9033012 | 2,532 | py | Python | shaker/cli.py | benjaminws/shaker | d9195422b8bef64bc751fffee8706f1abbd656a3 | [
"BSD-2-Clause"
] | 1 | 2015-11-05T10:28:03.000Z | 2015-11-05T10:28:03.000Z | shaker/cli.py | benjaminws/shaker | d9195422b8bef64bc751fffee8706f1abbd656a3 | [
"BSD-2-Clause"
] | null | null | null | shaker/cli.py | benjaminws/shaker | d9195422b8bef64bc751fffee8706f1abbd656a3 | [
"BSD-2-Clause"
] | null | null | null | from shaker.version import __version__
import optparse
def parse_cli():
parser = optparse.OptionParser(
usage="%prog [options] profile",
version="%%prog %s" % __version__)
parser.add_option(
'-a', '--ami', dest='ec2_ami_id', metavar='AMI',
help='Build instance from AMI')
parser.add_option(
'-d', '--distro', dest='distro',
metavar='DISTRO', default='',
help="Build minion (ubuntu, debian, squeeze, oneiric, etc.)")
parser.add_option('--ec2-group', dest='ec2_security_group')
parser.add_option('--ec2-key', dest='ec2_key_name')
parser.add_option('--ec2-zone', dest='ec2_zone', default='')
parser.add_option(
'--config-dir', dest='config_dir',
help="Configuration directory")
parser.add_option(
'--dry-run', dest='dry_run',
action='store_true', default=False,
help="Log the initialization setup, but don't launch the instance")
parser.add_option(
'--to-profile', dest='to_profile',
default=False,
help="Save options to a specified profile"
)
parser.add_option(
'-m', '--master', dest='salt_master',
metavar='SALT_MASTER', default='',
help="Connect salt minion to SALT_MASTER")
parser.add_option(
'--hostname', dest='hostname',
metavar='HOSTNAME', default='',
help="Assign HOSTNAME to salt minion")
parser.add_option(
'--domain', dest='domain',
metavar='DOMAIN', default='',
help="Assign DOMAIN name to salt minion")
import shaker.log
parser.add_option('-l',
'--log-level',
dest='log_level',
default='info',
choices=shaker.log.LOG_LEVELS.keys(),
help='Log level: %s. \nDefault: %%default' %
', '.join(shaker.log.LOG_LEVELS.keys())
)
(opts, args) = parser.parse_args()
if len(args) < 1:
if opts.ec2_ami_id or opts.distro:
profile = None
else:
print parser.format_help().strip()
errmsg = "\nError: Specify shaker profile or EC2 ami or distro"
raise SystemExit(errmsg)
else:
profile = args[0]
import shaker.config
config_dir = shaker.config.get_config_dir(opts.config_dir)
shaker.log.start_logger(
__name__,
os.path.join(config_dir, 'shaker.log'),
opts.log_level)
if opts.ec2_ami_id:
opts.distro = '' # mutually exclusive
return opts, config_dir, profile
| 35.166667 | 75 | 0.593602 |
f75e96d03943d80dfc2c33ec15de4bf9add6c2ce | 1,022 | py | Python | experiments/arcbench_data_preparation/arc_model_data_preparation.py | joschout/Multi-Directional-Rule-Set-Learning | ef0620b115f4e0fd7fba3e752d238a8020c1ca6b | [
"Apache-2.0"
] | 3 | 2020-08-03T19:25:44.000Z | 2021-06-27T22:25:55.000Z | experiments/arcbench_data_preparation/arc_model_data_preparation.py | joschout/Multi-Directional-Rule-Set-Learning | ef0620b115f4e0fd7fba3e752d238a8020c1ca6b | [
"Apache-2.0"
] | null | null | null | experiments/arcbench_data_preparation/arc_model_data_preparation.py | joschout/Multi-Directional-Rule-Set-Learning | ef0620b115f4e0fd7fba3e752d238a8020c1ca6b | [
"Apache-2.0"
] | 2 | 2020-08-07T22:54:28.000Z | 2021-02-18T06:11:01.000Z | import pandas as pd
from experiments.arcbench_data_preparation.reworked_one_hot_encoding import get_original_data_fold_abs_file_name, \
TrainTestEnum
from mdrsl.data_handling.nan_data_filtering import remove_instances_with_nans_in_column
from mdrsl.data_handling.reorder_dataset_columns import reorder_columns
def prepare_arc_data(
dataset_name: str,
fold_i: int,
target_attribute: str,
train_test: TrainTestEnum
) -> pd.DataFrame:
# read in original (discretized) training/test data
# reorder the data so the target column is last
original_data_fold_abs_file_name = get_original_data_fold_abs_file_name(dataset_name, fold_i, train_test)
df_original_column_order = pd.read_csv(original_data_fold_abs_file_name, delimiter=',')
df_reordered = reorder_columns(df_original_column_order, target_attribute)
# REMOVE INSTANCES WITH NAN AS TARGET VALUE:
df_reordered = remove_instances_with_nans_in_column(df_reordered, target_attribute)
return df_reordered
| 42.583333 | 115 | 0.807241 |
f75e9d9e5e6169334b512a07db12fa97a0871bb9 | 4,286 | py | Python | src/UniFam.py | jcrosskey/unifam | 7ff125f6bb0b5093a60ffda779847ef537c02790 | [
"MIT"
] | null | null | null | src/UniFam.py | jcrosskey/unifam | 7ff125f6bb0b5093a60ffda779847ef537c02790 | [
"MIT"
] | null | null | null | src/UniFam.py | jcrosskey/unifam | 7ff125f6bb0b5093a60ffda779847ef537c02790 | [
"MIT"
] | null | null | null | '''
UniFam.py
pipeline
Created by JJ Chai on 02/24/2014
Last modified Mon Apr 20 16:58:56 EDT 2015
Copyright (c) 2014 JJ Chai (ORNL). All rights reserved.
'''
# Import Python modules
import configparser
import argparse
import sys
from datetime import datetime
# Import local modules
import UniFam_lib # in this directory
import UniFam_lib_batch
## Version
version_str = "1.1.0"
''' 0.0.1. first version of pipeline, including prodigal, hmmsearch, and annotation
all configuration options are in a file. User can(and must) change the options in the config file to customize.
0.0.2. Added more information for pathologic module of the analysis, and rRNA, tRNA
analysis.
1.0.0 First Stable version for release, UniFam 1.0.0
1.1.0 Added README file to describe output files; zip pathway inference output results
'''
parser = argparse.ArgumentParser(description="Annotation of contigs/proteins using UniFam",
prog = 'UniFam', #program name
prefix_chars='-', # prefix for options
fromfile_prefix_chars='@', # if options are read from file, '@args.txt'
conflict_handler='resolve', # for handling conflict options
add_help=True, # include help in the options
formatter_class=argparse.ArgumentDefaultsHelpFormatter # print default values for options in help message
)
## version control
parser.add_argument("--version", action="version",version='%(prog)s {}'.format(version_str))
## --verbose mode, default: false (quiet mode)
parser.add_argument("-v", "--verbose", action="store_true",help="verbose mode, more output")
#parser.add_argument("-n", "--dryrun", action="store_true",help="dryrun, only print commands, do not execute")
parser.add_argument("-b", "--batch", action="store_true",help="batch mode to construct pathway", dest="batch")
## input files and directories
## configuration file, required
parser.add_argument("-c",help="configuration file",dest='configFile',required=True)
## input file, required
parser.add_argument("-i",help="input fasta file (contig or protein fasta/faa file)",dest='inputFile',required=True)
## output file, now this is removed, determined from the prefix argument instead
## parser.add_argument("-o",help="output annotation file",dest='outputFile',required=True)
## =================================================================
## main function
## =================================================================
def main(argv=None):
if argv is None:
args = parser.parse_args()
## print some information
if args.verbose:
sys.stdout.write('running verbosely\n')
sys.stdout.write('configuration file is: {0}\n'.format(args.configFile))
sys.stdout.write('input fasta file is: {0}\n'.format(args.inputFile))
#sys.stdout.write('output file is: {}\n'.format(args.outputFile))
else:
sys.stdout.write('\n')
# display work start, and time record
start_time = datetime.now()
sys.stderr.write("\n===============================================================================\n")
sys.stderr.write("Welcome to UniFam v{0}: \n".format(version_str))
# read configuration file
config = configparser.ConfigParser()
config.read(args.configFile)
# Annotating with UniFam
if args.batch:
UniFam_lib_batch.UniFam(args.inputFile,config,args.verbose)
else:
UniFam_lib.UniFam(args.inputFile,config,args.verbose)
# write the configuration file to standard output for checking
# config.write(sys.stdout)
## display work end, and time record, elapsed time
finish_time = datetime.now()
duration = finish_time - start_time
sys.stderr.write("\nTotal Elapsed Time = [%s] \n" % duration)
sys.stderr.write("===============================================================================\n")
##==============================================================
## call from command line (instead of interactively)
##==============================================================
if __name__ == '__main__':
sys.exit(main())
| 40.433962 | 138 | 0.603126 |
f75ee654261c2a65968b7c24776d8541f15000b8 | 53 | py | Python | eupy/native/__init__.py | fivosts/eupy | 9e21ee2cf3ba5666db85723384c7b3422c71286a | [
"MIT"
] | null | null | null | eupy/native/__init__.py | fivosts/eupy | 9e21ee2cf3ba5666db85723384c7b3422c71286a | [
"MIT"
] | 4 | 2020-04-03T00:03:48.000Z | 2020-06-24T18:37:03.000Z | eupy/native/__init__.py | fivosts/eupy | 9e21ee2cf3ba5666db85723384c7b3422c71286a | [
"MIT"
] | null | null | null | __all__ = ['logger', 'plotter', 'profiler', 'shell']
| 26.5 | 52 | 0.622642 |
f75f10b73659b2a0b1773878cea57701dc7c283f | 18,509 | py | Python | tests/layers/test_layers_convolution.py | OliverZijia/tensorlayer2 | 01113b53e84a3bbb298b9c35ebd53254e487350f | [
"Apache-2.0"
] | null | null | null | tests/layers/test_layers_convolution.py | OliverZijia/tensorlayer2 | 01113b53e84a3bbb298b9c35ebd53254e487350f | [
"Apache-2.0"
] | null | null | null | tests/layers/test_layers_convolution.py | OliverZijia/tensorlayer2 | 01113b53e84a3bbb298b9c35ebd53254e487350f | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import unittest
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
import tensorlayer as tl
from tensorlayer.layers import *
from tensorlayer.models import *
from tests.utils import CustomTestCase
class Layer_Convolution_1D_Test(CustomTestCase):
@classmethod
def setUpClass(cls):
print("\n#################################")
cls.batch_size = 8
cls.inputs_shape = [cls.batch_size, 100, 1]
cls.input_layer = Input(cls.inputs_shape, name='input_layer')
cls.n1 = tl.layers.Conv1dLayer(
shape=(5, 1, 32), stride=2
)(cls.input_layer)
cls.n2 = tl.layers.Conv1d(
n_filter=32, filter_size=5, stride=2
)(cls.n1)
cls.n3 = tl.layers.DeConv1dLayer(
shape=(5, 64, 32), outputs_shape=(cls.batch_size, 50, 64), strides=(1, 2, 1), name='deconv1dlayer'
)(cls.n2)
cls.n4 = tl.layers.SeparableConv1d(
n_filter=32, filter_size=3, strides=2, padding='SAME', act=tf.nn.relu, name='separable_1d'
)(cls.n3)
cls.n5 = tl.layers.SubpixelConv1d(
scale=2, act=tf.nn.relu, in_channels=32, name='subpixel_1d'
)(cls.n4)
cls.model = Model(
inputs=cls.input_layer, outputs=cls.n5
)
print("Testing Conv1d model: \n", cls.model)
@classmethod
def tearDownClass(cls):
pass
# tf.reset_default_graph()
def test_layer_n1(self):
# self.assertEqual(len(self.n1.all_layers), 2)
# self.assertEqual(len(self.n1.all_params), 2)
# self.assertEqual(self.n1.count_params(), 192)
self.assertEqual(len(self.n1._info[0].layer.weights), 2)
self.assertEqual(self.n1.get_shape().as_list()[1:], [50, 32])
def test_layer_n2(self):
# self.assertEqual(len(self.n2.all_layers), 3)
# self.assertEqual(len(self.n2.all_params), 4)
# self.assertEqual(self.n2.count_params(), 5344)
self.assertEqual(len(self.n2._info[0].layer.weights), 2)
self.assertEqual(self.n2.get_shape().as_list()[1:], [25, 32])
def test_layer_n3(self):
# self.assertEqual(len(self.n2.all_layers), 3)
# self.assertEqual(len(self.n2.all_params), 4)
# self.assertEqual(self.n2.count_params(), 5344)
self.assertEqual(len(self.n3._info[0].layer.weights), 2)
self.assertEqual(self.n3.get_shape().as_list()[1:], [50, 64])
def test_layer_n4(self):
# self.assertEqual(len(self.n2.all_layers), 3)
# self.assertEqual(len(self.n2.all_params), 4)
# self.assertEqual(self.n2.count_params(), 5344)
self.assertEqual(len(self.n4._info[0].layer.weights), 3)
self.assertEqual(self.n4.get_shape().as_list()[1:], [25, 32])
def test_layer_n5(self):
# self.assertEqual(len(self.n2.all_layers), 3)
# self.assertEqual(len(self.n2.all_params), 4)
# self.assertEqual(self.n2.count_params(), 5344)
self.assertEqual(self.n5.get_shape().as_list()[1:], [50, 16])
# def test_layer_n3(self):
#
# self.assertEqual(len(self.n3.all_layers), 4)
# self.assertEqual(len(self.n3.all_params), 7)
# self.assertEqual(self.n3.count_params(), 6496)
# self.assertEqual(self.n3.outputs.get_shape().as_list()[1:], [23, 32])
# FIXME: TF2.0 only supports NHWC now
# class Layer_Convolution_1D_NCW_Test(CustomTestCase):
#
# @classmethod
# def setUpClass(cls):
# print("\n#################################")
#
# cls.batch_size = 8
# cls.inputs_shape = [cls.batch_size, 1, 100]
# cls.input_layer = Input(cls.inputs_shape, name='input_layer')
#
# cls.n1 = tl.layers.Conv1dLayer(
# shape=(5, 1, 32), stride=2, data_format="NCW"
# )(cls.input_layer)
# cls.n2 = tl.layers.Conv1d(
# n_filter=32, filter_size=5, stride=2, data_format='channels_first'
# )(cls.n1)
# cls.model = Model(inputs=cls.input_layer, outputs=cls.n2)
# print("Testing Conv1d model: \n", cls.model)
#
# # cls.n3 = tl.layers.SeparableConv1d(
# # cls.n2, n_filter=32, filter_size=3, strides=1, padding='VALID', act=tf.nn.relu, name='separable_1d'
# # )
#
# @classmethod
# def tearDownClass(cls):
# pass
# # tf.reset_default_graph()
#
# def test_layer_n1(self):
#
# # self.assertEqual(len(self.n1.all_layers), 2)
# # self.assertEqual(len(self.n1.all_params), 2)
# # self.assertEqual(self.n1.count_params(), 192)
# self.assertEqual(len(self.n1._info[0].layer.weights), 2)
# self.assertEqual(self.n1.get_shape().as_list()[1:], [50, 32])
#
# def test_layer_n2(self):
#
# # self.assertEqual(len(self.n2.all_layers), 3)
# # self.assertEqual(len(self.n2.all_params), 4)
# # self.assertEqual(self.n2.count_params(), 5344)
# self.assertEqual(len(self.n2._info[0].layer.weights), 2)
# self.assertEqual(self.n2.get_shape().as_list()[1:], [25, 32])
#
# # def test_layer_n3(self):
# #
# # self.assertEqual(len(self.n3.all_layers), 4)
# # self.assertEqual(len(self.n3.all_params), 7)
# # self.assertEqual(self.n3.count_params(), 6496)
# # self.assertEqual(self.n3.outputs.get_shape().as_list()[1:], [23, 32])
class Layer_Convolution_2D_Test(CustomTestCase):
@classmethod
def setUpClass(cls):
print("\n#################################")
cls.batch_size = 5
cls.inputs_shape = [cls.batch_size, 400, 400, 3]
cls.input_layer = Input(cls.inputs_shape, name='input_layer')
cls.n1 = tl.layers.Conv2dLayer(
act=tf.nn.relu, shape=(5, 5, 3, 32), strides=(1, 2, 2, 1), padding='SAME',
b_init=tf.constant_initializer(value=0.0),
name='conv2dlayer'
)(cls.input_layer)
cls.n2 = tl.layers.Conv2d(
n_filter=32, filter_size=(3, 3), strides=(2, 2), act=None, name='conv2d'
)(cls.n1)
cls.n3 = tl.layers.Conv2d(
n_filter=32, filter_size=(3, 3), strides=(2, 2), act=tf.nn.relu, b_init=None, name='conv2d_no_bias'
)(cls.n2)
cls.n4 = tl.layers.DeConv2dLayer(
shape=(5, 5, 32, 32), outputs_shape=(cls.batch_size, 100, 100, 32), strides=(1, 2, 2, 1), name='deconv2dlayer'
)(cls.n3)
cls.n5 = tl.layers.DeConv2d(
n_filter=32, filter_size=(3, 3), strides=(2, 2), name='DeConv2d'
)(cls.n4)
cls.n6 = tl.layers.DepthwiseConv2d(
filter_size=(3, 3), strides=(1, 1), dilation_rate=(2, 2), act=tf.nn.relu, depth_multiplier=2, name='depthwise'
)(cls.n5)
cls.n7 = tl.layers.Conv2d(
n_filter=32, filter_size=(3, 3), strides=(2, 2), act=tf.nn.relu, in_channels=64, name='conv2d2'
)(cls.n6)
cls.n8 = tl.layers.BinaryConv2d(
n_filter=64, filter_size=(3, 3), strides=(2, 2), act=tf.nn.relu, in_channels=32, name='binaryconv2d'
)(cls.n7)
cls.n9 = tl.layers.SeparableConv2d(
n_filter=32, filter_size=(3, 3), strides=(2, 2), act=tf.nn.relu, name='separableconv2d'
)(cls.n8)
cls.n10 = tl.layers.GroupConv2d(
n_filter=64, filter_size=(3, 3), strides=(2, 2), n_group=2, name='group'
)(cls.n9)
cls.n11 = tl.layers.DorefaConv2d(
n_filter=32, filter_size=(5, 5), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='dorefaconv2d'
)(cls.n10)
cls.n12 = tl.layers.TernaryConv2d(
n_filter=64, filter_size=(5, 5), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='ternaryconv2d'
)(cls.n11)
cls.n13 = tl.layers.QuanConv2d(
n_filter=32, filter_size=(5, 5), strides=(1, 1), act=tf.nn.relu, padding='SAME', name='quancnn2d'
)(cls.n12)
cls.n14 = tl.layers.SubpixelConv2d(
scale=2, act=tf.nn.relu, name='subpixelconv2d'
)(cls.n13)
cls.model = Model(cls.input_layer, cls.n14)
print("Testing Conv2d model: \n", cls.model)
# cls.n12 = tl.layers.QuanConv2d(cls.n11, 64, (5, 5), (1, 1), act=tf.nn.relu, padding='SAME', name='quancnn')
@classmethod
def tearDownClass(cls):
pass
# tf.reset_default_graph()
def test_layer_n1(self):
# self.assertEqual(len(self.n1.all_layers), 2)
# self.assertEqual(len(self.n1.all_params), 2)
# self.assertEqual(self.n1.count_params(), 2432)
self.assertEqual(len(self.n1._info[0].layer.weights), 2)
self.assertEqual(self.n1.get_shape().as_list()[1:], [200, 200, 32])
def test_layer_n2(self):
# self.assertEqual(len(self.n2.all_layers), 3)
# self.assertEqual(len(self.n2.all_params), 4)
# self.assertEqual(self.n2.count_params(), 11680)
self.assertEqual(len(self.n2._info[0].layer.weights), 2)
self.assertEqual(self.n2.get_shape().as_list()[1:], [100, 100, 32])
def test_layer_n3(self):
# self.assertEqual(len(self.n3.all_layers), 4)
# self.assertEqual(len(self.n3.all_params), 5)
# self.assertEqual(self.n3.count_params(), 20896)
self.assertEqual(len(self.n3._info[0].layer.weights), 1) # b_init is None
self.assertEqual(self.n3.get_shape().as_list()[1:], [50, 50, 32])
def test_layer_n4(self):
# self.assertEqual(len(self.n4.all_layers), 5)
# self.assertEqual(len(self.n4.all_params), 7)
# self.assertEqual(self.n4.count_params(), 46528)
self.assertEqual(len(self.n4._info[0].layer.weights), 2)
self.assertEqual(self.n4.get_shape().as_list()[1:], [100, 100, 32])
def test_layer_n5(self):
# self.assertEqual(len(self.n5.all_layers), 6)
# self.assertEqual(len(self.n5.all_params), 9)
# self.assertEqual(self.n5.count_params(), 55776)
self.assertEqual(len(self.n5._info[0].layer.weights), 2)
self.assertEqual(self.n5.get_shape().as_list()[1:], [200, 200, 32])
def test_layer_n6(self):
# self.assertEqual(len(self.n6.all_layers), 7)
# self.assertEqual(len(self.n6.all_params), 11)
# self.assertEqual(self.n6.count_params(), 56416)
self.assertEqual(len(self.n6._info[0].layer.weights), 2)
self.assertEqual(self.n6.get_shape().as_list()[1:], [200, 200, 64])
def test_layer_n7(self):
# self.assertEqual(len(self.n7.all_layers), 8)
# self.assertEqual(len(self.n7.all_params), 13)
# self.assertEqual(self.n7.count_params(), 74880)
self.assertEqual(len(self.n7._info[0].layer.weights), 2)
self.assertEqual(self.n7.get_shape().as_list()[1:], [100, 100, 32])
def test_layer_n8(self):
# self.assertEqual(len(self.n7.all_layers), 8)
# self.assertEqual(len(self.n7.all_params), 13)
# self.assertEqual(self.n7.count_params(), 74880)
self.assertEqual(len(self.n8._info[0].layer.weights), 2)
self.assertEqual(self.n8.get_shape().as_list()[1:], [50, 50, 64])
def test_layer_n9(self):
# self.assertEqual(len(self.n7.all_layers), 8)
# self.assertEqual(len(self.n7.all_params), 13)
# self.assertEqual(self.n7.count_params(), 74880)
self.assertEqual(len(self.n9._info[0].layer.weights), 3)
self.assertEqual(self.n9.get_shape().as_list()[1:], [24, 24, 32])
def test_layer_n10(self):
# self.assertEqual(len(self.n7.all_layers), 8)
# self.assertEqual(len(self.n7.all_params), 13)
# self.assertEqual(self.n7.count_params(), 74880)
self.assertEqual(len(self.n10._info[0].layer.weights), 2)
self.assertEqual(self.n10.get_shape().as_list()[1:], [12, 12, 64])
def test_layer_n11(self):
# self.assertEqual(len(self.n7.all_layers), 8)
# self.assertEqual(len(self.n7.all_params), 13)
# self.assertEqual(self.n7.count_params(), 74880)
self.assertEqual(len(self.n11._info[0].layer.weights), 2)
self.assertEqual(self.n11.get_shape().as_list()[1:], [12, 12, 32])
def test_layer_n12(self):
# self.assertEqual(len(self.n7.all_layers), 8)
# self.assertEqual(len(self.n7.all_params), 13)
# self.assertEqual(self.n7.count_params(), 74880)
self.assertEqual(len(self.n12._info[0].layer.weights), 2)
self.assertEqual(self.n12.get_shape().as_list()[1:], [12, 12, 64])
def test_layer_n13(self):
# self.assertEqual(len(self.n7.all_layers), 8)
# self.assertEqual(len(self.n7.all_params), 13)
# self.assertEqual(self.n7.count_params(), 74880)
self.assertEqual(len(self.n13._info[0].layer.weights), 2)
self.assertEqual(self.n13.get_shape().as_list()[1:], [12, 12, 32])
def test_layer_n14(self):
self.assertEqual(self.n14.get_shape().as_list()[1:], [24, 24, 8])
# def test_layer_n8(self):
#
# self.assertEqual(len(self.n8.all_layers), 9)
# self.assertEqual(len(self.n8.all_params), 15)
# self.assertEqual(self.n8.count_params(), 79520)
# self.assertEqual(self.n8.outputs.get_shape().as_list()[1:], [50, 50, 32])
#
# def test_layer_n9(self):
#
# self.assertEqual(len(self.n9.all_layers), 10)
# self.assertEqual(len(self.n9.all_params), 18)
# self.assertEqual(self.n9.count_params(), 80864)
# self.assertEqual(self.n9.outputs.get_shape().as_list()[1:], [48, 48, 32])
#
# def test_layer_n10(self):
#
# self.assertEqual(len(self.n10.all_layers), 11)
# self.assertEqual(len(self.n10.all_params), 20)
# self.assertEqual(self.n10.count_params(), 132128)
# self.assertEqual(self.n10.outputs.get_shape().as_list()[1:], [48, 48, 64])
#
# def test_layer_n11(self):
#
# self.assertEqual(len(self.n11.all_layers), 12)
# self.assertEqual(len(self.n11.all_params), 22)
# self.assertEqual(self.n11.count_params(), 150592)
# self.assertEqual(self.n11.outputs.get_shape().as_list()[1:], [96, 96, 32])
#
# def test_layer_n12(self):
#
# self.assertEqual(len(self.n12.all_layers), 13)
# self.assertEqual(len(self.n12.all_params), 24)
# self.assertEqual(self.n12.count_params(), 201856)
# self.assertEqual(self.n12.outputs.get_shape().as_list()[1:], [96, 96, 64])
class Layer_Convolution_3D_Test(CustomTestCase):
@classmethod
def setUpClass(cls):
print("\n#################################")
cls.batch_size = 5
cls.inputs_shape = [cls.batch_size, 20, 20, 20, 3]
cls.input_layer = Input(cls.inputs_shape, name='input_layer')
cls.n1 = tl.layers.Conv3dLayer(
shape=(2, 2, 2, 3, 32), strides=(1, 2, 2, 2, 1)
)(cls.input_layer)
cls.n2 = tl.layers.DeConv3dLayer(
shape=(2, 2, 2, 128, 32), outputs_shape=(cls.batch_size, 20, 20, 20, 128), strides=(1, 2, 2, 2, 1)
)(cls.n1)
cls.n3 = tl.layers.Conv3d(
n_filter=64, filter_size=(3, 3, 3), strides=(3, 3, 3), act=tf.nn.relu, b_init=None, in_channels=128, name='conv3d_no_bias'
)(cls.n2)
cls.n4 = tl.layers.DeConv3d(
n_filter=32, filter_size=(3, 3, 3), strides=(2, 2, 2)
)(cls.n3)
cls.model = Model(inputs=cls.input_layer, outputs=cls.n4)
print("Testing Conv3d model: \n", cls.model)
@classmethod
def tearDownClass(cls):
pass
# tf.reset_default_graph()
def test_layer_n1(self):
# self.assertEqual(len(self.n1.all_layers), 2)
# self.assertEqual(len(self.n1.all_params), 2)
# self.assertEqual(self.n1.count_params(), 800)
self.assertEqual(len(self.n1._info[0].layer.weights), 2)
self.assertEqual(self.n1.get_shape().as_list()[1:], [10, 10, 10, 32])
def test_layer_n2(self):
# self.assertEqual(len(self.n2.all_layers), 3)
# self.assertEqual(len(self.n2.all_params), 4)
# self.assertEqual(self.n2.count_params(), 33696)
self.assertEqual(len(self.n2._info[0].layer.weights), 2)
self.assertEqual(self.n2.get_shape().as_list()[1:], [20, 20, 20, 128])
def test_layer_n3(self):
# self.assertEqual(len(self.n3.all_layers), 4)
# self.assertEqual(len(self.n3.all_params), 6)
# self.assertEqual(self.n3.count_params(), 144320)
self.assertEqual(len(self.n3._info[0].layer.weights), 1) # b_init is None
self.assertEqual(self.n3.get_shape().as_list()[1:], [7, 7, 7, 64])
def test_layer_n4(self):
# self.assertEqual(len(self.n3.all_layers), 4)
# self.assertEqual(len(self.n3.all_params), 6)
# self.assertEqual(self.n3.count_params(), 144320)
self.assertEqual(len(self.n4._info[0].layer.weights), 2)
self.assertEqual(self.n4.get_shape().as_list()[1:], [14, 14, 14, 32])
# class Layer_DeformableConvolution_Test(CustomTestCase):
#
# @classmethod
# def setUpClass(cls):
#
# cls.batch_size = 5
# cls.inputs_shape = [cls.batch_size, 299, 299, 3]
# cls.input_layer = Input(cls.inputs_shape, name='input_layer')
#
# offset1 = tl.layers.Conv2d(
# 18, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME', name='offset1'
# )(cls.input_layer)
# cls.net1 = tl.layers.DeformableConv2d(
# offset1, 32, (3, 3), act=tf.nn.relu, name='deformable1'
# )(cls.input_layer)
#
# offset2 = tl.layers.Conv2d(
# 18, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME', name='offset2'
# )(cls.net1)
# cls.net2 = tl.layers.DeformableConv2d(
# offset2, 64, (3, 3), act=tf.nn.relu, name='deformable2'
# )(cls.net1)
#
# @classmethod
# def tearDownClass(cls):
# pass
#
# def test_layer_n1(self):
#
# self.assertEqual(len(self.net1.all_layers), 2)
# self.assertEqual(len(self.net1.all_params), 2)
# self.assertEqual(self.net1.count_params(), 896)
# self.assertEqual(self.net1.outputs.get_shape().as_list()[1:], [299, 299, 32])
#
# def test_layer_n2(self):
#
# self.assertEqual(len(self.net2.all_layers), 3)
# self.assertEqual(len(self.net2.all_params), 4)
# self.assertEqual(self.net2.count_params(), 19392)
# self.assertEqual(self.net2.outputs.get_shape().as_list()[1:], [299, 299, 64])
if __name__ == '__main__':
tl.logging.set_verbosity(tl.logging.DEBUG)
unittest.main()
| 38.084362 | 134 | 0.607164 |
f75f24b70858f398a5be3908ae2432e2736ed3f9 | 12,866 | py | Python | tests/platform_tests/mellanox/check_sysfs.py | vincent201881/sonic-mgmt | 4f02bb5f91600ae5180ace1620a718caf02c63a1 | [
"Apache-2.0"
] | null | null | null | tests/platform_tests/mellanox/check_sysfs.py | vincent201881/sonic-mgmt | 4f02bb5f91600ae5180ace1620a718caf02c63a1 | [
"Apache-2.0"
] | null | null | null | tests/platform_tests/mellanox/check_sysfs.py | vincent201881/sonic-mgmt | 4f02bb5f91600ae5180ace1620a718caf02c63a1 | [
"Apache-2.0"
] | null | null | null | """
Helper script for checking status of sysfs.
This script contains re-usable functions for checking status of hw-management related sysfs.
"""
import logging
from tests.common.utilities import wait_until
def check_sysfs(dut):
"""
@summary: Check various hw-management related sysfs under /var/run/hw-management
"""
logging.info("Check broken symbolinks")
broken_symbolinks = dut.command("find /var/run/hw-management -xtype l")
assert len(broken_symbolinks["stdout_lines"]) == 0, \
"Found some broken symbolinks: %s" % str(broken_symbolinks["stdout_lines"])
logging.info("Check content of some key files")
file_asic = dut.command("cat /var/run/hw-management/thermal/asic")
try:
asic_temp = float(file_asic["stdout"]) / 1000
assert 0 < asic_temp < 105, "Abnormal ASIC temperature: %s" % file_asic["stdout"]
except Exception as e:
assert False, "Bad content in /var/run/hw-management/thermal/asic: %s" % repr(e)
dut_hwsku = dut.facts["hwsku"]
from tests.common.mellanox_data import SWITCH_MODELS
fan_count = SWITCH_MODELS[dut_hwsku]["fans"]["number"]
fan_min_speed = 0
fan_max_speed = 0
for fan_id in range(1, fan_count + 1):
if SWITCH_MODELS[dut_hwsku]["fans"]["hot_swappable"]:
fan_status = "/var/run/hw-management/thermal/fan{}_status".format(fan_id)
fan_status_content = dut.command("cat %s" % fan_status)
assert fan_status_content["stdout"] == "1", "Content of %s is not 1" % fan_status
fan_fault = "/var/run/hw-management/thermal/fan{}_fault".format(fan_id)
fan_fault_content = dut.command("cat %s" % fan_fault)
assert fan_fault_content["stdout"] == "0", "Content of %s is not 0" % fan_fault
fan_min = "/var/run/hw-management/thermal/fan{}_min".format(fan_id)
try:
fan_min_content = dut.command("cat %s" % fan_min)
fan_min_speed = int(fan_min_content["stdout"])
assert fan_min_speed > 0, "Bad fan minimum speed: %s" % str(fan_min_speed)
except Exception as e:
assert "Get content from %s failed, exception: %s" % (fan_min, repr(e))
fan_max = "/var/run/hw-management/thermal/fan{}_max".format(fan_id)
try:
fan_max_content = dut.command("cat %s" % fan_max)
fan_max_speed = int(fan_max_content["stdout"])
assert fan_max_speed > 10000, "Bad fan maximum speed: %s" % str(fan_max_speed)
except Exception as e:
assert "Get content from %s failed, exception: %s" % (fan_max, repr(e))
fan_speed_set = "/var/run/hw-management/thermal/fan{}_speed_set".format(fan_id)
fan_speed_set_content = dut.command("cat %s" % fan_speed_set)
fan_set_speed = int(fan_speed_set_content["stdout"])
max_tolerance_speed = ((float(fan_set_speed) / 256) * fan_max_speed) * (1 + 0.5)
min_tolerance_speed = ((float(fan_set_speed) / 256) * fan_max_speed) * (1 - 0.5)
fan_speed_get = "/var/run/hw-management/thermal/fan{}_speed_get".format(fan_id)
assert wait_until(30, 5, _check_fan_speed_in_range, dut, fan_min_speed, fan_max_speed,
min_tolerance_speed, max_tolerance_speed, fan_speed_get), \
"Fan speed not in range"
cpu_temp_high_counter = 0
cpu_temp_list = []
cpu_crit_temp_list = []
cpu_pack_count = SWITCH_MODELS[dut_hwsku]["cpu_pack"]["number"]
if cpu_pack_count != 0:
cpu_pack_temp_file = "/var/run/hw-management/thermal/cpu_pack"
cpu_pack_temp_file_output = dut.command("cat %s" % cpu_pack_temp_file)
cpu_pack_temp = float(cpu_pack_temp_file_output["stdout"])/1000
cpu_pack_max_temp_file = "/var/run/hw-management/thermal/cpu_pack_max"
cpu_pack_max_temp_file_output = dut.command("cat %s" % cpu_pack_max_temp_file)
cpu_pack_max_temp = float(cpu_pack_max_temp_file_output["stdout"])/1000
cpu_pack_crit_temp_file = "/var/run/hw-management/thermal/cpu_pack_crit"
cpu_pack_crit_temp_file_output = dut.command("cat %s" % cpu_pack_crit_temp_file)
cpu_pack_crit_temp = float(cpu_pack_crit_temp_file_output["stdout"])/1000
assert cpu_pack_max_temp <= cpu_pack_crit_temp, "Bad CPU pack max temp or critical temp, %s, %s " \
% (str(cpu_pack_max_temp), str(cpu_pack_crit_temp))
if cpu_pack_temp >= cpu_pack_crit_temp:
cpu_temp_high_counter += 1
cpu_temp_list.append(cpu_pack_temp)
cpu_crit_temp_list.append(cpu_pack_crit_temp)
cpu_core_count = SWITCH_MODELS[dut_hwsku]["cpu_cores"]["number"]
for core_id in range(0, cpu_core_count):
cpu_core_temp_file = "/var/run/hw-management/thermal/cpu_core{}".format(core_id)
cpu_core_temp_file_output = dut.command("cat %s" % cpu_core_temp_file)
cpu_core_temp = float(cpu_core_temp_file_output["stdout"])/1000
cpu_core_max_temp_file = "/var/run/hw-management/thermal/cpu_core{}_max".format(core_id)
cpu_core_max_temp_file_output = dut.command("cat %s" % cpu_core_max_temp_file)
cpu_core_max_temp = float(cpu_core_max_temp_file_output["stdout"])/1000
cpu_core_crit_temp_file = "/var/run/hw-management/thermal/cpu_core{}_crit".format(core_id)
cpu_core_crit_temp_file_output = dut.command("cat %s" % cpu_core_crit_temp_file)
cpu_core_crit_temp = float(cpu_core_crit_temp_file_output["stdout"])/1000
assert cpu_core_max_temp <= cpu_core_crit_temp, "Bad CPU core%d max temp or critical temp, %s, %s " \
% (core_id, str(cpu_core_max_temp), str(cpu_core_crit_temp))
if cpu_core_temp >= cpu_core_crit_temp:
cpu_temp_high_counter += 1
cpu_temp_list.append(cpu_core_temp)
cpu_crit_temp_list.append(cpu_core_crit_temp)
if cpu_temp_high_counter > 0:
logging.info("CPU temperatures {}".format(cpu_temp_list))
logging.info("CPU critical temperatures {}".format(cpu_crit_temp_list))
assert False, "At least {} of the CPU cores or pack is overheated".format(cpu_temp_high_counter)
psu_count = SWITCH_MODELS[dut_hwsku]["psus"]["number"]
for psu_id in range(1, psu_count + 1):
if SWITCH_MODELS[dut_hwsku]["psus"]["hot_swappable"]:
# If the PSU is poweroff, all PSU thermal related sensors are not available.
# In that case, just skip the following tests
psu_status_file = "/var/run/hw-management/thermal/psu{}_status".format(psu_id)
psu_status_output = dut.command("cat %s" % psu_status_file)
psu_status = int(psu_status_output["stdout"])
if not psu_status:
logging.info("PSU %d doesn't exist, skipped" % psu_id)
continue
psu_pwr_status_file = "/var/run/hw-management/thermal/psu{}_pwr_status".format(psu_id)
psu_pwr_status_output = dut.command("cat %s" % psu_pwr_status_file)
psu_pwr_status = int(psu_pwr_status_output["stdout"])
if not psu_pwr_status:
logging.info("PSU %d isn't poweron, skipped" % psu_id)
continue
psu_temp_file = "/var/run/hw-management/thermal/psu{}_temp".format(psu_id)
psu_temp_file_output = dut.command("cat %s" % psu_temp_file)
psu_temp = float(psu_temp_file_output["stdout"])/1000
psu_max_temp_file = "/var/run/hw-management/thermal/psu{}_temp_max".format(psu_id)
psu_max_temp_file_output = dut.command("cat %s" % psu_max_temp_file)
psu_max_temp = float(psu_max_temp_file_output["stdout"])/1000
assert psu_temp < psu_max_temp, "PSU%d overheated, temp: %s" % (psu_id, str(psu_temp))
psu_max_temp_alarm_file = "/var/run/hw-management/thermal/psu{}_temp_max_alarm".format(psu_id)
psu_max_temp_alarm_file_output = dut.command("cat %s" % psu_max_temp_alarm_file)
assert psu_max_temp_alarm_file_output["stdout"] == '0', "PSU{} temp alarm set".format(psu_id)
psu_fan_speed_get = "/var/run/hw-management/thermal/psu{}_fan1_speed_get".format(psu_id)
try:
psu_fan_speed_get_content = dut.command("cat %s" % psu_fan_speed_get)
psu_fan_speed = int(psu_fan_speed_get_content["stdout"])
assert psu_fan_speed > 1000, "Bad fan speed: %s" % str(psu_fan_speed)
except Exception as e:
assert "Get content from %s failed, exception: %s" % (psu_fan_speed_get, repr(e))
sfp_count = SWITCH_MODELS[dut_hwsku]["ports"]["number"]
for sfp_id in range(1, sfp_count + 1):
sfp_temp_fault_file = "/var/run/hw-management/thermal/module{}_temp_fault".format(sfp_id)
sfp_temp_fault_file_output = dut.command("cat %s" % sfp_temp_fault_file)
assert sfp_temp_fault_file_output["stdout"] == '0', "SFP%d temp fault" % sfp_id
sfp_temp_file = "/var/run/hw-management/thermal/module{}_temp_input".format(sfp_id)
sfp_temp_file_output = dut.command("cat %s" % sfp_temp_file)
if sfp_temp_file_output["stdout"] != '0':
sfp_temp = float(sfp_temp_file_output["stdout"])/1000
else:
sfp_temp = 0
sfp_temp_crit_file = "/var/run/hw-management/thermal/module{}_temp_crit".format(sfp_id)
sfp_temp_crit_file_output = dut.command("cat %s" % sfp_temp_crit_file)
if sfp_temp_crit_file_output["stdout"] != '0':
sfp_temp_crit = float(sfp_temp_crit_file_output["stdout"])/1000
else:
sfp_temp_crit = 0
sfp_temp_emergency_file = "/var/run/hw-management/thermal/module{}_temp_emergency".format(sfp_id)
sfp_temp_emergency_file_output = dut.command("cat %s" % sfp_temp_emergency_file)
if sfp_temp_emergency_file_output["stdout"] != '0':
sfp_temp_emergency = float(sfp_temp_emergency_file_output["stdout"])/1000
else:
sfp_temp_emergency = 0
if sfp_temp_crit != 0:
assert sfp_temp < sfp_temp_crit, "SFP%d overheated, temp%s" % (sfp_id, str(sfp_temp))
assert sfp_temp_crit < sfp_temp_emergency, "Wrong SFP critical temp or emergency temp, " \
"critical temp: %s emergency temp: %s" \
% (str(sfp_temp_crit), str(sfp_temp_emergency))
def check_psu_sysfs(dut, psu_id, psu_state):
"""
@summary: Check psu related sysfs under /var/run/hw-management/thermal against psu_state
"""
psu_exist = "/var/run/hw-management/thermal/psu%s_status" % psu_id
if psu_state == "NOT PRESENT":
psu_exist_content = dut.command("cat %s" % psu_exist)
logging.info("PSU state %s file %s read %s" % (psu_state, psu_exist, psu_exist_content["stdout"]))
assert psu_exist_content["stdout"] == "0", "CLI returns NOT PRESENT while %s contains %s" % \
(psu_exist, psu_exist_content["stdout"])
else:
from tests.common.mellanox_data import SWITCH_MODELS
dut_hwsku = dut.facts["hwsku"]
hot_swappabe = SWITCH_MODELS[dut_hwsku]["psus"]["hot_swappable"]
if hot_swappabe:
psu_exist_content = dut.command("cat %s" % psu_exist)
logging.info("PSU state %s file %s read %s" % (psu_state, psu_exist, psu_exist_content["stdout"]))
assert psu_exist_content["stdout"] == "1", "CLI returns %s while %s contains %s" % \
(psu_state, psu_exist, psu_exist_content["stdout"])
psu_pwr_state = "/var/run/hw-management/thermal/psu%s_pwr_status" % psu_id
psu_pwr_state_content = dut.command("cat %s" % psu_pwr_state)
logging.info("PSU state %s file %s read %s" % (psu_state, psu_pwr_state, psu_pwr_state_content["stdout"]))
assert (psu_pwr_state_content["stdout"] == "1" and psu_state == "OK") \
or (psu_pwr_state_content["stdout"] == "0" and psu_state == "NOT OK"),\
"sysfs content %s mismatches with psu_state %s" % (psu_pwr_state_content["stdout"], psu_state)
def _check_fan_speed_in_range(dut, min_speed, max_speed, low_threshold, high_threshold, sysfs_path):
try:
fan_speed_get_content = dut.command("cat %s" % sysfs_path)
fan_speed = int(fan_speed_get_content["stdout"])
logging.info("fan speed: {}, min_speed: {}, max_speed: {}, low_threshold: {}, high_threshold: {}".format(
fan_speed,
min_speed,
max_speed,
low_threshold,
high_threshold
))
return min_speed < fan_speed < max_speed and low_threshold < fan_speed < high_threshold
except Exception as e:
assert "Get content from %s failed, exception: %s" % (sysfs_path, repr(e))
| 53.832636 | 116 | 0.656459 |
f75f56da07863bf68a68f89adfb3a1524d06cf9c | 661 | py | Python | scripts/spectrogram_example.py | KISMED-TUDa/ECG_Classification | 7df7b6d28287f592536cdbf01b6aec73e7b045ef | [
"MIT"
] | 3 | 2021-12-07T17:08:00.000Z | 2021-12-08T23:16:57.000Z | scripts/spectrogram_example.py | KISMED-TUDa/ECG_Classification | 7df7b6d28287f592536cdbf01b6aec73e7b045ef | [
"MIT"
] | 1 | 2021-12-09T00:33:41.000Z | 2021-12-09T15:59:48.000Z | scripts/spectrogram_example.py | KISMED-TUDa/ECG_Classification | 7df7b6d28287f592536cdbf01b6aec73e7b045ef | [
"MIT"
] | 1 | 2021-07-30T14:53:48.000Z | 2021-07-30T14:53:48.000Z | from scipy.signal import spectrogram
import numpy as np
import matplotlib.pyplot as plt
import torch
import torchaudio
from wettbewerb import load_references
if __name__ == '__main__':
ecg_leads = load_references("../data/training/")[0]
for ecg_lead_ in ecg_leads:
if ecg_lead_.shape[0] == 18000:
ecg_lead = torch.from_numpy(ecg_lead_).float()
break
print(ecg_lead.shape)
s = torchaudio.transforms.Spectrogram(n_fft=64, win_length=64, hop_length=32, power=2, normalized=False)(ecg_lead)
s = torch.log(s.clamp(min=1e-08))
print(s.shape)
plt.imshow(s, aspect="auto")
plt.show()
print(s.shape)
| 30.045455 | 118 | 0.694402 |
f75f98477c9240f0a7e7a888bb994709084f6e5f | 1,342 | py | Python | rns/util.py | matwilso/relation-networks | 66c67b342a90ae3699e576dcec883c329905b2e0 | [
"MIT"
] | null | null | null | rns/util.py | matwilso/relation-networks | 66c67b342a90ae3699e576dcec883c329905b2e0 | [
"MIT"
] | null | null | null | rns/util.py | matwilso/relation-networks | 66c67b342a90ae3699e576dcec883c329905b2e0 | [
"MIT"
] | null | null | null | import tensorflow as tf
def merge_summaries(sd, id):
summaries = []
for key in sd.keys():
summaries.append(tf.summary.scalar(key, sd[key]))
for key in id.keys():
summaries.append(tf.summary.image(key, id[key]))
return tf.summary.merge(summaries)
def pack_images(images, rows, cols):
"""Helper utility to make a field of images."""
shape = tf.shape(images)
width = shape[-3]
height = shape[-2]
depth = shape[-1]
images = tf.reshape(images, (-1, width, height, depth))
batch = tf.shape(images)[0]
rows = tf.minimum(rows, batch)
cols = tf.minimum(batch // rows, cols)
images = images[:rows * cols]
images = tf.reshape(images, (rows, cols, width, height, depth))
images = tf.transpose(images, [0, 2, 1, 3, 4])
images = tf.reshape(images, [1, rows * width, cols * height, depth])
return images
def image_tile_summary(name, tensor, rows=8, cols=8):
tf.summary.image(name, pack_images(tensor, rows, cols), max_outputs=3)
def cartesian_product(a,b):
a, b = a[None, :, None], b[:, None, None]
prod = tf.concat([b + tf.zeros_like(a), tf.zeros_like(b) + a], axis = 2)
#new_shape = tf.stack([-1, tf.shape(cartesian_product)[-1]])
#cartesian_product = tf.reshape(cartesian_product, new_shape)
prod = tf.reshape(prod, [-1])
return prod
| 36.27027 | 76 | 0.638599 |
f75fa6c39c363d3440d893269a09f7b6f67c5cae | 3,019 | py | Python | docs/conf.py | VolodymyrChapman/cellpose | 4a4e78cda44a0728b5bfd27b389e418679ec2b0e | [
"BSD-3-Clause"
] | 504 | 2020-02-04T06:42:53.000Z | 2022-03-31T06:13:11.000Z | docs/conf.py | VolodymyrChapman/cellpose | 4a4e78cda44a0728b5bfd27b389e418679ec2b0e | [
"BSD-3-Clause"
] | 457 | 2020-02-04T20:53:06.000Z | 2022-03-30T07:30:32.000Z | docs/conf.py | VolodymyrChapman/cellpose | 4a4e78cda44a0728b5bfd27b389e418679ec2b0e | [
"BSD-3-Clause"
] | 208 | 2020-02-04T15:50:20.000Z | 2022-03-31T14:57:48.000Z | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'cellpose'
copyright = '2020, Carsen Stringer & Marius Pachitariu'
author = 'Carsen Stringer & Marius Pachitariu'
# The full version, including alpha/beta/rc tags
release = '0.7.2'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.intersphinx",
"sphinx.ext.mathjax",
"sphinx.ext.viewcode",
'sphinx.ext.napoleon']
#extensions = ['sphinx.ext.autodoc',
# 'sphinx.ext.mathjax',
# 'sphinx.ext.viewcode',
# 'sphinx.ext.autosummary',
# 'sphinx.ext.doctest',
# 'sphinx.ext.inheritance_diagram',
# 'autoapi.extension',
# 'sphinx.ext.napoleon']
autoapi_dirs = ['../cellpose']
source_suffix='.rst'
master_doc = 'index'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
html_logo = '_static/favicon.ico'
html_favicon = '_static/favicon.ico'
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
html_theme_path = ["_themes", ]
html_theme_options = {
'canonical_url': '',
'analytics_id': 'UA-XXXXXXX-1', # Provided by Google in your dashboard
'logo_only': False,
'display_version': True,
'prev_next_buttons_location': 'top',
'style_external_links': False,
'style_nav_header_background': 'black',
# Toc options
'collapse_navigation': True,
'sticky_navigation': True,
'navigation_depth': 4,
'includehidden': True,
'titles_only': False
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static'] | 32.462366 | 79 | 0.654521 |
f75fa91922dc4e630aeb219758e3e459b8cab923 | 6,143 | py | Python | models/StereoCNN/Resnet_module.py | daili0015/ModelFeast | 0689ced4d0f37be438d3a91908e5e4cc5b7d54b8 | [
"MIT"
] | 247 | 2019-03-05T07:12:29.000Z | 2022-03-29T01:51:17.000Z | models/StereoCNN/Resnet_module.py | jungerschwarz/ModelFeast | 03afca0b129532135910ee2ac72a3b85be795289 | [
"MIT"
] | 8 | 2019-05-21T03:05:27.000Z | 2021-12-09T03:22:51.000Z | models/StereoCNN/Resnet_module.py | jungerschwarz/ModelFeast | 03afca0b129532135910ee2ac72a3b85be795289 | [
"MIT"
] | 47 | 2019-03-05T07:14:13.000Z | 2021-11-11T01:04:28.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: zcy
# @Date: 2019-02-14 19:29:27
# @Last Modified by: zcy
# @Last Modified time: 2019-02-15 15:06:31
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from functools import partial
__all__ = ['ResNet', 'BasicBlock', 'Bottleneck']
def conv3x3x3(in_planes, out_planes, stride=1):
# 3x3x3 convolution with padding
return nn.Conv3d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=1,
bias=False)
def downsample_basic_block(x, planes, stride):
out = F.avg_pool3d(x, kernel_size=1, stride=stride)
zero_pads = torch.Tensor(
out.size(0), planes - out.size(1), out.size(2), out.size(3),
out.size(4)).zero_()
if isinstance(out.data, torch.cuda.FloatTensor):
zero_pads = zero_pads.cuda()
out = torch.cat([out.data, zero_pads], dim=1)
return out
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm3d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3x3(planes, planes)
self.bn2 = nn.BatchNorm3d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv3d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm3d(planes)
self.conv2 = nn.Conv3d(
planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm3d(planes)
self.conv3 = nn.Conv3d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm3d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self,
block,
layers,
shortcut_type='B',
n_classes=400,
in_channels=3):
super(ResNet, self).__init__()
first_features = 64 if in_channels==3 else 32
self.inplanes = first_features
self.conv1 = nn.Conv3d(
in_channels,
first_features,
kernel_size=7,
stride=(1, 2, 2),
padding=(3, 3, 3),
bias=False)
self.bn1 = nn.BatchNorm3d(first_features)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool3d(kernel_size=(3, 3, 3), stride=2, padding=1)
self.layer1 = self._make_layer(block, first_features, layers[0], shortcut_type)
self.layer2 = self._make_layer(
block, 128, layers[1], shortcut_type, stride=2)
self.layer3 = self._make_layer(
block, 256, layers[2], shortcut_type, stride=2)
self.layer4 = self._make_layer(
block, 512, layers[3], shortcut_type, stride=2)
# last_duration = int(math.ceil(sample_duration / 16))
# last_size = int(math.ceil(sample_size / 32))
# self.avgpool = nn.AvgPool3d(
# (last_duration, last_size, last_size), stride=1)
self.fc = nn.Linear(512 * block.expansion, n_classes)
for m in self.modules():
if isinstance(m, nn.Conv3d):
m.weight = nn.init.kaiming_normal_(m.weight, mode='fan_out')
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, shortcut_type, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
if shortcut_type == 'A':
downsample = partial(
downsample_basic_block,
planes=planes * block.expansion,
stride=stride)
else:
downsample = nn.Sequential(
nn.Conv3d(
self.inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=False), nn.BatchNorm3d(planes * block.expansion))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
# x = self.avgpool(x)
x = F.adaptive_avg_pool3d(x, (1, 1, 1))
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
if __name__ == '__main__':
a = 64
img_size=(a, a)
model = resnet10(n_classes=2, in_channels=1)
x = torch.randn(3, 1, 22, img_size[0], img_size[1])
# (BatchSize, channels, depth, h, w)
y = model(x)
print(y.size())
| 29.252381 | 87 | 0.562917 |
f75fb23d82a52351d2f8e2805783df5a90cd225a | 24,187 | py | Python | openpype/settings/entities/dict_mutable_keys_entity.py | yosuperdope/OpenPype | 0c90df97ddb8cda291a4f66d35da58b3deb94a71 | [
"MIT"
] | 44 | 2019-03-19T04:56:35.000Z | 2021-04-23T12:05:08.000Z | openpype/settings/entities/dict_mutable_keys_entity.py | jrsndl/pype | f9d80ef2c0663921291c5f47d24bea51fc43bac7 | [
"MIT"
] | 655 | 2020-03-17T15:10:21.000Z | 2021-04-23T18:22:52.000Z | openpype/settings/entities/dict_mutable_keys_entity.py | jrsndl/pype | f9d80ef2c0663921291c5f47d24bea51fc43bac7 | [
"MIT"
] | 21 | 2019-03-19T04:56:38.000Z | 2021-04-23T09:10:59.000Z | import re
import copy
from .lib import (
NOT_SET,
OverrideState
)
from . import EndpointEntity
from .exceptions import (
DefaultsNotDefined,
InvalidKeySymbols,
StudioDefaultsNotDefined,
RequiredKeyModified,
EntitySchemaError
)
from openpype.settings.constants import (
METADATA_KEYS,
M_DYNAMIC_KEY_LABEL,
M_ENVIRONMENT_KEY,
KEY_REGEX,
KEY_ALLOWED_SYMBOLS
)
class DictMutableKeysEntity(EndpointEntity):
"""Dictionary entity that has mutable keys.
Keys of entity's children can be modified, removed or added. Children have
defined entity type so it is not possible to have 2 different entity types
as children.
TODOs:
- cleanup children on pop
- remove child's reference to parent
- clear callbacks
"""
schema_types = ["dict-modifiable"]
_default_label_wrap = {
"use_label_wrap": True,
"collapsible": True,
"collapsed": True
}
_miss_arg = object()
def __getitem__(self, key):
if key not in self.children_by_key:
self.add_key(key)
return self.children_by_key[key]
def __setitem__(self, key, value):
self.set_key_value(key, value)
def __iter__(self):
for key in self.keys():
yield key
def __contains__(self, key):
return key in self.children_by_key
def pop(self, key, *args, **kwargs):
if key in self.required_keys:
raise RequiredKeyModified(self.path, key)
result = self.children_by_key.pop(key, *args, **kwargs)
self.on_change()
return result
def get(self, key, default=None):
return self.children_by_key.get(key, default)
def keys(self):
return self.children_by_key.keys()
def values(self):
return self.children_by_key.values()
def items(self):
return self.children_by_key.items()
def clear(self):
for key in tuple(self.children_by_key.keys()):
self.pop(key)
def set(self, value):
new_value = self.convert_to_valid_type(value)
prev_keys = set(self.keys())
for _key, _value in new_value.items():
self.set_key_value(_key, _value)
if _key in prev_keys:
prev_keys.remove(_key)
for key in prev_keys:
self.pop(key)
def _convert_to_valid_type(self, value):
try:
return dict(value)
except Exception:
pass
return super(DictMutableKeysEntity, self)._convert_to_valid_type(value)
def set_key_value(self, key, value):
# TODO Check for value type if is Settings entity?
child_obj = self.children_by_key.get(key)
if not child_obj:
if not self.store_as_list and not KEY_REGEX.match(key):
raise InvalidKeySymbols(self.path, key)
child_obj = self.add_key(key)
child_obj.set(value)
def change_key(self, old_key, new_key):
if old_key in self.required_keys:
raise RequiredKeyModified(self.path, old_key)
if new_key == old_key:
return
if not self.store_as_list and not KEY_REGEX.match(new_key):
raise InvalidKeySymbols(self.path, new_key)
self.children_by_key[new_key] = self.children_by_key.pop(old_key)
self._on_key_label_change()
def _on_key_label_change(self):
if self._override_state is OverrideState.STUDIO:
self._has_studio_override = True
elif self._override_state is OverrideState.PROJECT:
self._has_project_override = True
self.on_change()
def _add_key(self, key, _ingore_key_validation=False):
if key in self.children_by_key:
self.pop(key)
if (
not _ingore_key_validation
and not self.store_as_list
and not KEY_REGEX.match(key)
):
raise InvalidKeySymbols(self.path, key)
if self.value_is_env_group:
item_schema = copy.deepcopy(self.item_schema)
item_schema["env_group_key"] = key
else:
item_schema = self.item_schema
new_child = self.create_schema_object(item_schema, self, True)
self.children_by_key[key] = new_child
return new_child
def add_key(self, key):
new_child = self._add_key(key)
new_child.set_override_state(
self._override_state, self._ignore_missing_defaults
)
self.on_change()
return new_child
def change_child_key(self, child_entity, new_key):
old_key = None
for key, child in self.children_by_key.items():
if child is child_entity:
old_key = key
break
self.change_key(old_key, new_key)
def get_child_key(self, child_entity):
for key, child in self.children_by_key.items():
if child is child_entity:
return key
return None
# Label methods
def get_child_label(self, child_entity):
return self.children_label_by_id.get(child_entity.id)
def set_child_label(self, child_entity, label):
self.children_label_by_id[child_entity.id] = label
self._on_key_label_change()
def get_key_label(self, key):
child_entity = self.children_by_key[key]
return self.get_child_label(child_entity)
def set_key_label(self, key, label):
child_entity = self.children_by_key[key]
self.set_child_label(child_entity, label)
def _item_initalization(self):
self._default_metadata = {}
self._studio_override_metadata = {}
self._project_override_metadata = {}
self.initial_value = None
self._ignore_child_changes = False
self.valid_value_types = (dict, )
self.value_on_not_set = {}
self.children_by_key = {}
self.children_label_by_id = {}
self.store_as_list = self.schema_data.get("store_as_list") or False
self.value_is_env_group = (
self.schema_data.get("value_is_env_group") or False
)
self.required_keys = self.schema_data.get("required_keys") or []
self.collapsible_key = self.schema_data.get("collapsible_key") or False
# GUI attributes
self.hightlight_content = (
self.schema_data.get("highlight_content") or False
)
object_type = self.schema_data.get("object_type") or {}
if not isinstance(object_type, dict):
# Backwards compatibility
object_type = {
"type": object_type
}
input_modifiers = self.schema_data.get("input_modifiers") or {}
if input_modifiers:
self.log.warning((
"Used deprecated key `input_modifiers` to define item."
" Rather use `object_type` as dictionary with modifiers."
))
object_type.update(input_modifiers)
self.item_schema = object_type
if self.value_is_env_group:
self.item_schema["env_group_key"] = ""
if self.group_item is None:
self.is_group = True
def schema_validations(self):
# Allow to have not set label if keys are collapsible
# - this it to bypass label validation
used_temp_label = False
if self.is_group and not self.label and self.collapsible_key:
used_temp_label = True
self.label = "LABEL"
super(DictMutableKeysEntity, self).schema_validations()
if used_temp_label:
self.label = None
if self.value_is_env_group and self.store_as_list:
reason = "Item can't store environments metadata to list output."
raise EntitySchemaError(self, reason)
if not self.schema_data.get("object_type"):
reason = (
"Modifiable dictionary must have specified `object_type`."
)
raise EntitySchemaError(self, reason)
# TODO Ability to store labels should be defined with different key
if self.collapsible_key and self.file_item is None:
reason = (
"Modifiable dictionary with collapsible keys is not under"
" file item so can't store metadata."
)
raise EntitySchemaError(self, reason)
# Validate object type schema
child_validated = False
for child_entity in self.children_by_key.values():
child_entity.schema_validations()
child_validated = True
break
if not child_validated:
key = "__tmp__"
tmp_child = self._add_key(key)
tmp_child.schema_validations()
self.children_by_key.pop(key)
def get_child_path(self, child_obj):
result_key = None
for key, _child_obj in self.children_by_key.items():
if _child_obj is child_obj:
result_key = key
break
if result_key is None:
raise ValueError("Didn't found child {}".format(child_obj))
return "/".join([self.path, result_key])
def on_child_change(self, _child_entity):
if self._ignore_child_changes:
return
if self._override_state is OverrideState.STUDIO:
self._has_studio_override = True
elif self._override_state is OverrideState.PROJECT:
self._has_project_override = True
self.on_change()
def _get_metadata_for_state(self, state):
if (
state is OverrideState.PROJECT
and self._project_override_value is not NOT_SET
):
return self._project_override_metadata
if (
state >= OverrideState.STUDIO
and self._studio_override_value is not NOT_SET
):
return self._studio_override_metadata
return self._default_metadata
def _metadata_for_current_state(self):
return self._get_metadata_for_state(self._override_state)
def set_override_state(self, state, ignore_missing_defaults):
# Trigger override state change of root if is not same
if self.root_item.override_state is not state:
self.root_item.set_override_state(state)
return
# TODO change metadata
self._override_state = state
self._ignore_missing_defaults = ignore_missing_defaults
# Ignore if is dynamic item and use default in that case
if not self.is_dynamic_item and not self.is_in_dynamic_item:
if state > OverrideState.DEFAULTS:
if (
not self.has_default_value
and not ignore_missing_defaults
):
raise DefaultsNotDefined(self)
elif state > OverrideState.STUDIO:
if (
not self.had_studio_override
and not ignore_missing_defaults
):
raise StudioDefaultsNotDefined(self)
if state is OverrideState.STUDIO:
self._has_studio_override = self.had_studio_override
elif state is OverrideState.PROJECT:
self._has_project_override = self.had_project_override
self._has_studio_override = self.had_studio_override
using_project_overrides = False
using_studio_overrides = False
using_default_values = False
if (
state is OverrideState.PROJECT
and self.had_project_override
):
using_project_overrides = True
value = self._project_override_value
metadata = self._project_override_metadata
elif (
state >= OverrideState.STUDIO
and self.had_studio_override
):
using_studio_overrides = True
value = self._studio_override_value
metadata = self._studio_override_metadata
else:
using_default_values = True
value = self._default_value
metadata = self._default_metadata
if value is NOT_SET:
using_default_values = False
value = self.value_on_not_set
using_values_from_state = False
if state is OverrideState.PROJECT:
using_values_from_state = using_project_overrides
elif state is OverrideState.STUDIO:
using_values_from_state = using_studio_overrides
elif state is OverrideState.DEFAULTS:
using_values_from_state = using_default_values
new_value = copy.deepcopy(value)
if using_values_from_state:
initial_value = copy.deepcopy(value)
initial_value.update(metadata)
# Simulate `clear` method without triggering value change
for key in tuple(self.children_by_key.keys()):
self.children_by_key.pop(key)
for required_key in self.required_keys:
if required_key not in new_value:
new_value[required_key] = NOT_SET
# Create new children
children_label_by_id = {}
metadata_labels = metadata.get(M_DYNAMIC_KEY_LABEL) or {}
for _key, _value in new_value.items():
label = metadata_labels.get(_key)
if self.store_as_list or KEY_REGEX.match(_key):
child_entity = self._add_key(_key)
else:
# Replace invalid characters with underscore
# - this is safety to not break already existing settings
new_key = self._convert_to_regex_valid_key(_key)
if not using_values_from_state:
child_entity = self._add_key(new_key)
else:
child_entity = self._add_key(
_key, _ingore_key_validation=True
)
self.change_key(_key, new_key)
_key = new_key
if not label:
label = metadata_labels.get(new_key)
child_entity.update_default_value(_value)
if using_project_overrides:
child_entity.update_project_value(_value)
elif using_studio_overrides:
child_entity.update_studio_value(_value)
if label:
children_label_by_id[child_entity.id] = label
child_entity.set_override_state(state, ignore_missing_defaults)
self.children_label_by_id = children_label_by_id
_settings_value = self.settings_value()
if using_values_from_state:
if _settings_value is NOT_SET:
initial_value = NOT_SET
elif self.store_as_list:
new_initial_value = []
for key, value in _settings_value:
if key in initial_value:
new_initial_value.append([key, initial_value.pop(key)])
for key, value in initial_value.items():
new_initial_value.append([key, value])
initial_value = new_initial_value
else:
initial_value = _settings_value
self.initial_value = initial_value
def _convert_to_regex_valid_key(self, key):
return re.sub(
r"[^{}]+".format(KEY_ALLOWED_SYMBOLS),
"_",
key
)
def children_key_by_id(self):
return {
child_entity.id: key
for key, child_entity in self.children_by_key.items()
}
@property
def value(self):
if self.store_as_list:
output = []
for key, child_entity in self.children_by_key.items():
output.append([key, child_entity.value])
return output
output = {}
for key, child_entity in self.children_by_key.items():
output[key] = child_entity.value
return output
@property
def metadata(self):
output = {}
if not self.children_label_by_id:
return output
children_key_by_id = self.children_key_by_id()
label_metadata = {}
for child_id, label in self.children_label_by_id.items():
key = children_key_by_id.get(child_id)
if key:
label_metadata[key] = label
output[M_DYNAMIC_KEY_LABEL] = label_metadata
return output
@property
def has_unsaved_changes(self):
if (
self._override_state is OverrideState.PROJECT
and self._has_project_override != self.had_project_override
):
return True
elif (
self._override_state is OverrideState.STUDIO
and self._has_studio_override != self.had_studio_override
):
return True
if self._child_has_unsaved_changes:
return True
if self.metadata != self._metadata_for_current_state():
return True
if self.settings_value() != self.initial_value:
return True
return False
@property
def _child_has_unsaved_changes(self):
for child_obj in self.children_by_key.values():
if child_obj.has_unsaved_changes:
return True
return False
@property
def has_studio_override(self):
return self._has_studio_override or self._child_has_studio_override
@property
def _child_has_studio_override(self):
if self._override_state >= OverrideState.STUDIO:
for child_obj in self.children_by_key.values():
if child_obj.has_studio_override:
return True
return False
@property
def has_project_override(self):
return self._has_project_override or self._child_has_project_override
@property
def _child_has_project_override(self):
if self._override_state >= OverrideState.PROJECT:
for child_obj in self.children_by_key.values():
if child_obj.has_project_override:
return True
return False
def _settings_value(self):
if self.store_as_list:
output = []
for key, child_entity in self.children_by_key.items():
child_value = child_entity.settings_value()
output.append([key, child_value])
return output
output = {}
for key, child_entity in self.children_by_key.items():
child_value = child_entity.settings_value()
# TODO child should have setter of env group key se child can
# know what env group represents.
if self.value_is_env_group:
if key not in child_value[M_ENVIRONMENT_KEY]:
_metadata = child_value[M_ENVIRONMENT_KEY]
_m_keykey = tuple(_metadata.keys())[0]
env_keys = child_value[M_ENVIRONMENT_KEY].pop(_m_keykey)
child_value[M_ENVIRONMENT_KEY][key] = env_keys
output[key] = child_value
output.update(self.metadata)
return output
def _prepare_value(self, value):
metadata = {}
if isinstance(value, dict):
for key in METADATA_KEYS:
if key in value:
metadata[key] = value.pop(key)
return value, metadata
def update_default_value(self, value):
value = self._check_update_value(value, "default")
has_default_value = value is not NOT_SET
if has_default_value:
for required_key in self.required_keys:
if required_key not in value:
has_default_value = False
break
self.has_default_value = has_default_value
value, metadata = self._prepare_value(value)
self._default_value = value
self._default_metadata = metadata
def update_studio_value(self, value):
value = self._check_update_value(value, "studio override")
value, metadata = self._prepare_value(value)
self._studio_override_value = value
self._studio_override_metadata = metadata
self.had_studio_override = value is not NOT_SET
def update_project_value(self, value):
value = self._check_update_value(value, "project override")
value, metadata = self._prepare_value(value)
self._project_override_value = value
self._project_override_metadata = metadata
self.had_project_override = value is not NOT_SET
def _discard_changes(self, on_change_trigger):
if not self._can_discard_changes:
return
self.set_override_state(
self._override_state, self._ignore_missing_defaults
)
on_change_trigger.append(self.on_change)
def _add_to_studio_default(self, _on_change_trigger):
self._has_studio_override = True
self.on_change()
def _remove_from_studio_default(self, on_change_trigger):
if not self._can_remove_from_studio_default:
return
value = self._default_value
if value is NOT_SET:
value = self.value_on_not_set
new_value = copy.deepcopy(value)
self._ignore_child_changes = True
# Simulate `clear` method without triggering value change
for key in tuple(self.children_by_key.keys()):
self.children_by_key.pop(key)
metadata = self._get_metadata_for_state(OverrideState.DEFAULTS)
metadata_labels = metadata.get(M_DYNAMIC_KEY_LABEL) or {}
children_label_by_id = {}
# Create new children
for _key, _value in new_value.items():
new_key = self._convert_to_regex_valid_key(_key)
child_entity = self._add_key(new_key)
child_entity.update_default_value(_value)
label = metadata_labels.get(_key)
if label:
children_label_by_id[child_entity.id] = label
child_entity.set_override_state(
self._override_state, self._ignore_missing_defaults
)
self.children_label_by_id = children_label_by_id
self._ignore_child_changes = False
self._has_studio_override = False
on_change_trigger.append(self.on_change)
def _add_to_project_override(self, _on_change_trigger):
self._has_project_override = True
self.on_change()
def _remove_from_project_override(self, on_change_trigger):
if not self._can_remove_from_project_override:
return
if self._has_studio_override:
value = self._studio_override_value
elif self.has_default_value:
value = self._default_value
else:
value = self.value_on_not_set
new_value = copy.deepcopy(value)
self._ignore_child_changes = True
# Simulate `clear` method without triggering value change
for key in tuple(self.children_by_key.keys()):
self.children_by_key.pop(key)
metadata = self._get_metadata_for_state(OverrideState.STUDIO)
metadata_labels = metadata.get(M_DYNAMIC_KEY_LABEL) or {}
children_label_by_id = {}
# Create new children
for _key, _value in new_value.items():
new_key = self._convert_to_regex_valid_key(_key)
child_entity = self._add_key(new_key)
child_entity.update_default_value(_value)
if self._has_studio_override:
child_entity.update_studio_value(_value)
label = metadata_labels.get(_key)
if label:
children_label_by_id[child_entity.id] = label
child_entity.set_override_state(
self._override_state, self._ignore_missing_defaults
)
self.children_label_by_id = children_label_by_id
self._ignore_child_changes = False
self._has_project_override = False
on_change_trigger.append(self.on_change)
def reset_callbacks(self):
super(DictMutableKeysEntity, self).reset_callbacks()
for child_entity in self.children_by_key.values():
child_entity.reset_callbacks()
| 33.269601 | 79 | 0.623972 |
f75fcc68f69839268ac35d8fd902fcdaa112a170 | 12,245 | py | Python | dynamic_programming/64_minimum_path_sum.py | shawlu95/Algorithm-Toolbox | b6c7b2228d8e70e0842e0bad607533a2c8322cf0 | [
"MIT"
] | null | null | null | dynamic_programming/64_minimum_path_sum.py | shawlu95/Algorithm-Toolbox | b6c7b2228d8e70e0842e0bad607533a2c8322cf0 | [
"MIT"
] | null | null | null | dynamic_programming/64_minimum_path_sum.py | shawlu95/Algorithm-Toolbox | b6c7b2228d8e70e0842e0bad607533a2c8322cf0 | [
"MIT"
] | 2 | 2020-02-07T20:49:02.000Z | 2020-02-11T06:01:55.000Z | # top down, classic 2D O(mn) time, O(mn) space
# top down, 1-D O(mn) time, O(n) space
# top down, O(mn) time, O(1) space
# bottom up, classic 2D O(mn) time, O(mn) space
# bottom up, 1-D O(mn) time, O(n) space
# bottom up, O(mn) time, O(1) space
# left-right, classic 2D O(mn) time, O(mn) space
# left-right, 1-D O(mn) time, O(m) space
# left-right, O(mn) time, O(1) space
# right-left, classic 2D O(mn) time, O(mn) space
# right-left, 1-D O(mn) time, O(m) space
# right-left, O(mn) time, O(1) space
# BFS, starting with top left
# BFS, starting with bottom right
class Solution:
# top down, classic 2D O(mn) time, O(mn) space
def minPathSum1(self, grid):
nrows = len(grid)
ncols = len(grid[0])
ans = [[0 for i in range(ncols)] for j in range(nrows)]
for r in range(nrows):
for c in range(ncols):
if r == 0 and c > 0:
# top row
ans[r][c] = grid[r][c] + ans[r][c - 1]
elif r > 0 and c == 0:
# left col
ans[r][c] = grid[r][c] + ans[r - 1][c]
elif r > 0 and c > 0:
# everywhere else
ans[r][c] = grid[r][c] + min(ans[r - 1][c], ans[r][c - 1])
else:
# top left corner
ans[r][c] = grid[r][c]
return ans[-1][-1]
# top down, 1-D O(mn) time, O(n) space
def minPathSum2(self, grid):
nrows = len(grid)
ncols = len(grid[0])
ans = [0 for i in range(ncols)]
for r in range(nrows):
for c in range(ncols):
if r == 0 and c > 0:
# top row
ans[c] = grid[r][c] + ans[c - 1]
elif r > 0 and c == 0:
# left col
ans[c] = grid[r][c] + ans[c]
elif r > 0 and c > 0:
# everywhere else
ans[c] = grid[r][c] + min(ans[c], ans[c - 1])
else:
# top left corner
ans[c] = grid[r][c]
return ans[-1]
# top down, O(mn) time, O(1) space
def minPathSum3(self, grid):
nrows = len(grid)
ncols = len(grid[0])
for r in range(nrows):
for c in range(ncols):
if r == 0 and c > 0:
# top row
grid[r][c] = grid[r][c] + grid[r][c - 1]
elif r > 0 and c == 0:
# left col
grid[r][c] = grid[r][c] + grid[r - 1][c]
elif r > 0 and c > 0:
# everywhere else
grid[r][c] = grid[r][c] + min(grid[r - 1][c], grid[r][c - 1])
return grid[-1][-1]
# bottom up, classic 2D O(mn) time, O(mn) space
def minPathSum4(self, grid):
nrows = len(grid)
ncols = len(grid[0])
ans = [[0 for i in range(ncols)] for j in range(nrows)]
for r in range(nrows - 1, - 1, - 1):
for c in range(ncols - 1, - 1, - 1):
if r == nrows - 1 and c < ncols - 1:
# bottom row
ans[r][c] = grid[r][c] + ans[r][c + 1]
elif r < nrows - 1 and c == ncols - 1:
# right col
ans[r][c] = grid[r][c] + ans[r + 1][c]
elif r < nrows - 1 and c < ncols - 1:
# everywhere else
ans[r][c] = grid[r][c] + min(ans[r][c + 1], ans[r + 1][c])
else:
# bottom right
ans[-1][-1] = grid[-1][-1]
return ans[0][0]
# bottom up, 1-D O(mn) time, O(n) space
def minPathSum5(self, grid):
nrows = len(grid)
ncols = len(grid[0])
ans = [0 for i in range(ncols)]
for r in range(nrows - 1, - 1, - 1):
for c in range(ncols - 1, - 1, - 1):
if r == nrows - 1 and c < ncols - 1:
# bottom row
ans[c] = grid[r][c] + ans[c + 1]
elif r < nrows - 1 and c == ncols - 1:
# right col
ans[c] = grid[r][c] + ans[c]
elif r < nrows - 1 and c < ncols - 1:
# everywhere else
ans[c] = grid[r][c] + min(ans[c + 1], ans[c])
else:
# bottom right
ans[-1] = grid[-1][-1]
return ans[0]
# bottom up, O(mn) time, O(1) space
def minPathSum6(self, grid):
nrows = len(grid)
ncols = len(grid[0])
for r in range(nrows - 1, - 1, - 1):
for c in range(ncols - 1, - 1, - 1):
if r == nrows - 1 and c < ncols - 1:
# bottom row
grid[r][c] = grid[r][c] + grid[r][c + 1]
elif r < nrows - 1 and c == ncols - 1:
# right col
grid[r][c] = grid[r][c] + grid[r + 1][c]
elif r < nrows - 1 and c < ncols - 1:
# everywhere else
grid[r][c] = grid[r][c] + min(grid[r][c + 1], grid[r + 1][c])
else:
# bottom right corner
grid[-1][-1] = grid[-1][-1]
return grid[0][0]
# left-right, classic 2D O(mn) time, O(mn) space
def minPathSum7(self, grid):
nrows = len(grid)
ncols = len(grid[0])
ans = [[0 for i in range(ncols)] for j in range(nrows)]
for c in range(ncols):
for r in range(nrows):
if r == 0 and c > 0:
# top row
ans[r][c] = grid[r][c] + ans[r][c - 1]
elif r > 0 and c == 0:
# left col
ans[r][c] = grid[r][c] + ans[r - 1][c]
elif r > 0 and c > 0:
# everywhere else
ans[r][c] = grid[r][c] + min(ans[r - 1][c], ans[r][c - 1])
else:
# top left corner
ans[r][c] = grid[r][c]
return ans[-1][-1]
# left-right, 1-D O(mn) time, O(m) space
def minPathSum8(self, grid):
nrows = len(grid)
ncols = len(grid[0])
ans = [0 for i in range(nrows)]
for c in range(ncols):
for r in range(nrows):
if r == 0 and c > 0:
# top row
ans[r] = grid[r][c] + ans[r]
elif r > 0 and c == 0:
# left col
ans[r] = grid[r][c] + ans[r - 1]
elif r > 0 and c > 0:
# everywhere else
ans[r] = grid[r][c] + min(ans[r - 1], ans[r])
else:
# top left corner
ans[r] = grid[r][c]
return ans[-1]
# left-right, O(mn) time, O(1) space
def minPathSum9(self, grid):
nrows = len(grid)
ncols = len(grid[0])
for c in range(ncols):
for r in range(nrows):
if r == 0 and c > 0:
# top row
grid[r][c] = grid[r][c] + grid[r][c - 1]
elif r > 0 and c == 0:
# left col
grid[r][c] = grid[r][c] + grid[r - 1][c]
elif r > 0 and c > 0:
# everywhere else
grid[r][c] = grid[r][c] + min(grid[r - 1][c], grid[r][c - 1])
return grid[-1][-1]
# right-left, classic 2D O(mn) time, O(mn) space
def minPathSum10(self, grid):
nrows = len(grid)
ncols = len(grid[0])
ans = [[0 for i in range(ncols)] for j in range(nrows)]
for c in range(ncols - 1, - 1, - 1):
for r in range(nrows - 1, - 1, - 1):
if r == nrows - 1 and c < ncols - 1:
# bottom row
ans[r][c] = grid[r][c] + ans[r][c + 1]
elif r < nrows - 1 and c == ncols - 1:
# right col
ans[r][c] = grid[r][c] + ans[r + 1][c]
elif r < nrows - 1 and c < ncols - 1:
# everywhere else
ans[r][c] = grid[r][c] + min(ans[r][c + 1], ans[r + 1][c])
else:
# bottom right
ans[-1][-1] = grid[-1][-1]
return ans[0][0]
# right-left, 1-D O(mn) time, O(m) space
def minPathSum11(self, grid):
nrows = len(grid)
ncols = len(grid[0])
ans = [0 for i in range(nrows)]
for c in range(ncols - 1, - 1, - 1):
for r in range(nrows - 1, - 1, - 1):
if r == nrows - 1 and c < ncols - 1:
# bottom row
ans[r] = grid[r][c] + ans[r]
elif r < nrows - 1 and c == ncols - 1:
# right col
ans[r] = grid[r][c] + ans[r + 1]
elif r < nrows - 1 and c < ncols - 1:
# everywhere else
ans[r] = grid[r][c] + min(ans[r], ans[r + 1])
else:
# bottom right
ans[-1] = grid[-1][-1]
return ans[0]
# right-left, O(mn) time, O(1) space
def minPathSum12(self, grid):
nrows = len(grid)
ncols = len(grid[0])
for c in range(ncols - 1, - 1, - 1):
for r in range(nrows - 1, - 1, - 1):
if r == nrows - 1 and c < ncols - 1:
# bottom row
grid[r][c] = grid[r][c] + grid[r][c + 1]
elif r < nrows - 1 and c == ncols - 1:
# right col
grid[r][c] = grid[r][c] + grid[r + 1][c]
elif r < nrows - 1 and c < ncols - 1:
# everywhere else
grid[r][c] = grid[r][c] + min(grid[r][c + 1], grid[r + 1][c])
return grid[0][0]
# BFS, starting with top left
def minPathSum13(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
queue = [(0, 0)]
while queue:
loc = queue[0]
del queue[0]
# if at top row
if loc[0] == 0 and loc[1] > 0:
grid[loc[0]][loc[1]] += grid[loc[0]][loc[1] - 1]
# if at left col
elif loc[0] > 0 and loc[1] == 0:
grid[loc[0]][loc[1]] += grid[loc[0] - 1][loc[1]]
elif loc[0] > 0 and loc[1] > 0:
grid[loc[0]][loc[1]] += min(grid[loc[0] - 1][loc[1]], grid[loc[0]][loc[1] - 1])
# append right
if loc[1] + 1 < len(grid[0]):
if (loc[0], loc[1] + 1) not in queue:
queue.append((loc[0], loc[1] + 1))
# append down
if loc[0] + 1 < len(grid):
if (loc[0] + 1, loc[1]) not in queue:
queue.append((loc[0] + 1, loc[1]))
return grid[-1][-1]
# BFS, starting with bottom right
def minPathSum14(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
nrows = len(grid)
ncols = len(grid[0])
queue = [(nrows - 1, ncols - 1)]
while queue:
loc = queue[0]
del queue[0]
# if at bottom row
if loc[0] == nrows - 1 and loc[1] < ncols - 1:
grid[loc[0]][loc[1]] += grid[loc[0]][loc[1] + 1]
# if at right col
elif loc[0] < nrows - 1 and loc[1] == ncols - 1:
grid[loc[0]][loc[1]] += grid[loc[0] + 1][loc[1]]
elif loc[0] < nrows - 1 and loc[1] < ncols - 1:
grid[loc[0]][loc[1]] += min(grid[loc[0] + 1][loc[1]], grid[loc[0]][loc[1] + 1])
# append above
if loc[1] - 1 >= 0:
if (loc[0], loc[1] - 1) not in queue:
queue.append((loc[0], loc[1] - 1))
# append left
if loc[0] - 1 >= 0:
if (loc[0] - 1, loc[1]) not in queue:
queue.append((loc[0] - 1, loc[1]))
return grid[0][0]
solver = Solution()
print(solver.minPathSum14([
[1,3,1],
[1,5,1],
[4,2,1]
])) | 35.699708 | 95 | 0.393712 |
f7601cfd8e8be0a1b154818dbb6267fccde25717 | 6,804 | py | Python | run.py | maeda6uiui/BingImageDownloader | 5ccb781828633abdc8be9ec9bddfe46d99f9c6a1 | [
"MIT"
] | null | null | null | run.py | maeda6uiui/BingImageDownloader | 5ccb781828633abdc8be9ec9bddfe46d99f9c6a1 | [
"MIT"
] | null | null | null | run.py | maeda6uiui/BingImageDownloader | 5ccb781828633abdc8be9ec9bddfe46d99f9c6a1 | [
"MIT"
] | null | null | null | #!python3.6
import argparse
import glob
import hashlib
import logging
import multiprocessing
import os
import pathlib
import shutil
import sys
from icrawler.builtin import BingImageCrawler
from typing import Any,List
sys.path.append(".")
from postprocessing import format_images
logging_fmt = "%(asctime)s %(levelname)s: %(message)s"
logging.basicConfig(format=logging_fmt)
def get_md5_hash(keyword:str)->str:
return hashlib.md5(keyword.encode()).hexdigest()
def split_list(l:List[Any],n:int):
for i in range(n):
yield l[i*len(l)//n:(i+1)*len(l)//n]
def crawl_images(
keyword:str,
max_num_images:int,
save_dir:str,
feeder_threads:int,
parser_threads:int,
downloader_threads:int):
crawler=BingImageCrawler(
feeder_threads=feeder_threads,
parser_threads=parser_threads,
downloader_threads=downloader_threads,
log_level=logging.ERROR,
storage={"root_dir":save_dir},
)
crawler.crawl(keyword=keyword,max_num=max_num_images)
def formatter_worker(**kwargs):
target_dirs:List[str]=kwargs["target_dirs"]
image_width:int=kwargs["image_width"]
image_height:int=kwargs["image_height"]
for target_dir in target_dirs:
format_images(target_dir,image_width,image_height)
def archive_images(
archive_filepath:str,
archive_format:str,
save_root_dir:str):
shutil.make_archive(archive_filepath,archive_format,base_dir=save_root_dir)
shutil.rmtree(save_root_dir)
def main(args):
keywords_filepath:str=args.keywords_filepath
max_num_images:int=args.max_num_images
image_width:int=args.image_width
image_height:int=args.image_height
save_root_dir:str=args.save_root_dir
archive_save_dir:str=args.archive_save_dir
archive_format:str=args.archive_format
overwrite:bool=args.overwrite
progress_log_filepath:str=args.progress_log_filepath
index_lower_bound:int=args.index_lower_bound
index_upper_bound:int=args.index_upper_bound
feeder_threads:int=args.feeder_threads
parser_threads:int=args.parser_threads
downloader_threads:int=args.downloader_threads
num_formatter_processes:int=args.num_formatter_processes
num_keywords_per_archive:int=args.num_keywords_per_archive
no_format_images:bool=args.no_format_images
no_archive_images:bool=args.no_archive_images
os.makedirs(save_root_dir,exist_ok=overwrite)
os.makedirs(archive_save_dir,exist_ok=overwrite)
progress_logger=logging.getLogger("progress_loggger")
progress_logger.setLevel(level=logging.INFO)
handler=logging.FileHandler(progress_log_filepath,"a",encoding="utf-8")
handler.setLevel(logging.INFO)
handler.setFormatter(logging.Formatter(logging_fmt))
progress_logger.addHandler(handler)
progress_logger.info(args)
with open(keywords_filepath,"r",encoding="utf-8") as r:
keywords=r.read().splitlines()
if index_upper_bound<0:
index_upper_bound=len(keywords)
for idx in range(index_lower_bound,index_upper_bound,num_keywords_per_archive):
progress_logger.info("Batch start index: {}".format(idx))
#Download
batch_keywords=keywords[idx:idx+num_keywords_per_archive]
for i,keyword in enumerate(batch_keywords):
progress_logger.info("{}\t{}".format(idx+i,keyword))
title_hash=get_md5_hash(keyword)
save_dir=os.path.join(save_root_dir,title_hash)
os.makedirs(save_dir,exist_ok=True)
info_filepath=os.path.join(save_dir,"info.txt")
with open(info_filepath,"w",encoding="utf-8") as w:
w.write("{}\n".format(keyword))
crawl_images(keyword,max_num_images,save_dir,feeder_threads,parser_threads,downloader_threads)
#Format
if not no_format_images:
subdirs=glob.glob(os.path.join(save_root_dir,"*"))
subbatch_subdirs=list(split_list(subdirs,num_formatter_processes))
formatter_processes:List[multiprocessing.Process]=[]
for i in range(num_formatter_processes):
kwargs={
"target_dirs":subbatch_subdirs[i],
"image_width":image_width,
"image_height":image_height
}
formatter_process=multiprocessing.Process(target=formatter_worker,kwargs=kwargs)
formatter_processes.append(formatter_process)
for formatter_process in formatter_processes:
formatter_process.start()
for formatter_process in formatter_processes:
formatter_process.join()
#Archive
if not no_archive_images:
batch_start_index=idx
batch_end_index=min(idx+num_keywords_per_archive,index_upper_bound)
archive_filepath=os.path.join(archive_save_dir,"images_{}_{}".format(batch_start_index,batch_end_index))
save_root_dir_path=pathlib.Path(save_root_dir)
archive_root_dir=str(save_root_dir_path.parent)
archive_base_dir=save_root_dir_path.name
shutil.make_archive(
archive_filepath,
archive_format,
root_dir=archive_root_dir,
base_dir=archive_base_dir
)
shutil.rmtree(save_root_dir)
progress_logger.info("Created an archive file {}".format(archive_filepath))
progress_logger.info("====================")
if __name__=="__main__":
parser=argparse.ArgumentParser()
parser.add_argument("--keywords_filepath",type=str,default="./keywords.txt")
parser.add_argument("--max_num_images",type=int,default=200)
parser.add_argument("--image_width",type=int,default=256)
parser.add_argument("--image_height",type=int,default=256)
parser.add_argument("--save_root_dir",type=str,default="./Image")
parser.add_argument("--archive_save_dir",type=str,default="./Archive")
parser.add_argument("--archive_format",type=str,default="gztar")
parser.add_argument("--overwrite",action="store_true")
parser.add_argument("--progress_log_filepath",type=str,default="./progress.txt")
parser.add_argument("--index_lower_bound",type=int,default=0)
parser.add_argument("--index_upper_bound",type=int,default=-1)
parser.add_argument("--feeder_threads",type=int,default=4)
parser.add_argument("--parser_threads",type=int,default=4)
parser.add_argument("--downloader_threads",type=int,default=8)
parser.add_argument("--num_formatter_processes",type=int,default=4)
parser.add_argument("--num_keywords_per_archive",type=int,default=100)
parser.add_argument("--no_format_images",action="store_true")
parser.add_argument("--no_archive_images",action="store_true")
args=parser.parse_args()
main(args)
| 38.011173 | 116 | 0.714139 |
f7602ef8d38bca1d07a0553f2dc11970743a3a15 | 1,268 | py | Python | yeoboseyo/services/reddit.py | foxmask/yeoboseyo-django | 8dc1df9373f4fc27502aa9097e3724a87a2b8ce6 | [
"BSD-3-Clause"
] | 2 | 2020-11-18T08:18:17.000Z | 2021-02-27T13:17:20.000Z | yeoboseyo/services/reddit.py | foxmask/yeoboseyo-django | 8dc1df9373f4fc27502aa9097e3724a87a2b8ce6 | [
"BSD-3-Clause"
] | null | null | null | yeoboseyo/services/reddit.py | foxmask/yeoboseyo-django | 8dc1df9373f4fc27502aa9097e3724a87a2b8ce6 | [
"BSD-3-Clause"
] | null | null | null | # coding: utf-8
"""
여보세요 Service Reddit
"""
# std lib
from __future__ import unicode_literals
from django.conf import settings
from logging import getLogger
# external lib
from praw import Reddit as RedditAPI
# yeoboseyo
from yeoboseyo.services import Service
# create logger
logger = getLogger(__name__)
__all__ = ['Reddit']
class Reddit(Service):
"""
Service Mastodon
"""
def __init__(self):
super().__init__()
self.reddit = RedditAPI(client_id=settings.REDDIT_CLIENT_ID,
client_secret=settings.REDDIT_CLIENT_SECRET,
password=settings.REDDIT_PASSWORD,
user_agent=settings.REDDIT_USERAGENT,
username=settings.REDDIT_USERNAME)
def save_data(self, trigger, entry) -> bool:
"""
Post a new toot to Mastodon
:param trigger: current trigger
:param entry: data from Feeds
:return: boolean
"""
status = False
try:
self.reddit.subreddit(trigger.reddit).submit(entry.title, url=entry.link)
status = True
except ValueError as e:
logger.error(e)
status = False
return status
| 26.978723 | 85 | 0.599369 |
f7603efdb3fe6b96f2e80c8801bfbb4190439307 | 1,446 | py | Python | tempest_zigzag/cli.py | rcbops/tempest-zigzag | 12f4bc528c9b88263f04394e5f31755519aa02e8 | [
"Apache-2.0"
] | null | null | null | tempest_zigzag/cli.py | rcbops/tempest-zigzag | 12f4bc528c9b88263f04394e5f31755519aa02e8 | [
"Apache-2.0"
] | 2 | 2019-01-14T22:48:35.000Z | 2019-02-25T20:06:06.000Z | tempest_zigzag/cli.py | rcbops/tempest-zigzag | 12f4bc528c9b88263f04394e5f31755519aa02e8 | [
"Apache-2.0"
] | 2 | 2019-01-08T20:19:16.000Z | 2019-01-10T22:12:30.000Z | # -*- coding: utf-8 -*-
"""Console script for tempest-zigzag."""
# ======================================================================================================================
# Imports
# ======================================================================================================================
from __future__ import absolute_import
import click
import sys
from tempest_zigzag.tempest_zigzag import TempestZigZag
from tempest_zigzag.tempest_testcase_xml import TempestXMLAccessError
# ======================================================================================================================
# Main
# ======================================================================================================================
@click.command()
@click.argument('junit_input_file', type=click.Path(exists=True))
@click.argument('test_list', type=click.Path(exists=True))
@click.argument('config_file', type=click.Path(exists=True))
def main(junit_input_file, test_list, config_file):
"""Process multiple files created by tempest into a single accurate junit xml artifact"""
try:
click.echo(str(TempestZigZag.process_xml(junit_input_file, test_list, config_file)))
except(TempestXMLAccessError, Exception) as e:
click.echo(click.style(str(e), fg='red'))
click.echo(click.style("\nFailed!", fg='red'))
sys.exit(1)
if __name__ == "__main__":
main() # pragma: no cover
| 42.529412 | 120 | 0.488935 |
f7606830ca6c82474d973395beffb0d9e09b57f6 | 20,167 | py | Python | sdk/resources/azure-mgmt-resource/azure/mgmt/resource/links/v2016_09_01/aio/operations/_operations.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | 1 | 2022-02-01T18:50:12.000Z | 2022-02-01T18:50:12.000Z | sdk/resources/azure-mgmt-resource/azure/mgmt/resource/links/v2016_09_01/aio/operations/_operations.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | null | null | null | sdk/resources/azure-mgmt-resource/azure/mgmt/resource/links/v2016_09_01/aio/operations/_operations.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | null | null | null | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._operations import build_operations_list_request, build_resource_links_create_or_update_request, build_resource_links_delete_request, build_resource_links_get_request, build_resource_links_list_at_source_scope_request, build_resource_links_list_at_subscription_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class Operations:
"""Operations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.resource.links.v2016_09_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
**kwargs: Any
) -> AsyncIterable["_models.OperationListResult"]:
"""Lists all of the available Microsoft.Resources REST API operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OperationListResult or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.links.v2016_09_01.models.OperationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2016-09-01") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.OperationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_operations_list_request(
api_version=api_version,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_operations_list_request(
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("OperationListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': "/providers/Microsoft.Resources/operations"} # type: ignore
class ResourceLinksOperations:
"""ResourceLinksOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.resource.links.v2016_09_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def delete( # pylint: disable=inconsistent-return-statements
self,
link_id: str,
**kwargs: Any
) -> None:
"""Deletes a resource link with the specified ID.
:param link_id: The fully qualified ID of the resource link. Use the format,
/subscriptions/{subscription-id}/resourceGroups/{resource-group-name}/{provider-namespace}/{resource-type}/{resource-name}/Microsoft.Resources/links/{link-name}.
For example,
/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myGroup/Microsoft.Web/sites/mySite/Microsoft.Resources/links/myLink.
:type link_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2016-09-01") # type: str
request = build_resource_links_delete_request(
link_id=link_id,
api_version=api_version,
template_url=self.delete.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': "/{linkId}"} # type: ignore
@distributed_trace_async
async def create_or_update(
self,
link_id: str,
parameters: "_models.ResourceLink",
**kwargs: Any
) -> "_models.ResourceLink":
"""Creates or updates a resource link between the specified resources.
:param link_id: The fully qualified ID of the resource link. Use the format,
/subscriptions/{subscription-id}/resourceGroups/{resource-group-name}/{provider-namespace}/{resource-type}/{resource-name}/Microsoft.Resources/links/{link-name}.
For example,
/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myGroup/Microsoft.Web/sites/mySite/Microsoft.Resources/links/myLink.
:type link_id: str
:param parameters: Parameters for creating or updating a resource link.
:type parameters: ~azure.mgmt.resource.links.v2016_09_01.models.ResourceLink
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ResourceLink, or the result of cls(response)
:rtype: ~azure.mgmt.resource.links.v2016_09_01.models.ResourceLink
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ResourceLink"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2016-09-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'ResourceLink')
request = build_resource_links_create_or_update_request(
link_id=link_id,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self.create_or_update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ResourceLink', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ResourceLink', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': "/{linkId}"} # type: ignore
@distributed_trace_async
async def get(
self,
link_id: str,
**kwargs: Any
) -> "_models.ResourceLink":
"""Gets a resource link with the specified ID.
:param link_id: The fully qualified Id of the resource link. For example,
/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myGroup/Microsoft.Web/sites/mySite/Microsoft.Resources/links/myLink.
:type link_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ResourceLink, or the result of cls(response)
:rtype: ~azure.mgmt.resource.links.v2016_09_01.models.ResourceLink
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ResourceLink"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2016-09-01") # type: str
request = build_resource_links_get_request(
link_id=link_id,
api_version=api_version,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ResourceLink', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': "/{linkId}"} # type: ignore
@distributed_trace
def list_at_subscription(
self,
filter: Optional[str] = None,
**kwargs: Any
) -> AsyncIterable["_models.ResourceLinkResult"]:
"""Gets all the linked resources for the subscription.
:param filter: The filter to apply on the list resource links operation. The supported filter
for list resource links is targetId. For example, $filter=targetId eq {value}. Default value is
None.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ResourceLinkResult or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.links.v2016_09_01.models.ResourceLinkResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2016-09-01") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.ResourceLinkResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_resource_links_list_at_subscription_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
filter=filter,
template_url=self.list_at_subscription.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_resource_links_list_at_subscription_request(
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ResourceLinkResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_at_subscription.metadata = {'url': "/subscriptions/{subscriptionId}/providers/Microsoft.Resources/links"} # type: ignore
@distributed_trace
def list_at_source_scope(
self,
scope: str,
filter: Optional[str] = "atScope()",
**kwargs: Any
) -> AsyncIterable["_models.ResourceLinkResult"]:
"""Gets a list of resource links at and below the specified source scope.
:param scope: The fully qualified ID of the scope for getting the resource links. For example,
to list resource links at and under a resource group, set the scope to
/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myGroup.
:type scope: str
:param filter: The filter to apply when getting resource links. To get links only at the
specified scope (not below the scope), use Filter.atScope(). Possible values are "atScope()" or
None. Default value is "atScope()".
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ResourceLinkResult or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.links.v2016_09_01.models.ResourceLinkResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2016-09-01") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.ResourceLinkResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_resource_links_list_at_source_scope_request(
scope=scope,
api_version=api_version,
filter=filter,
template_url=self.list_at_source_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_resource_links_list_at_source_scope_request(
scope=scope,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ResourceLinkResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_at_source_scope.metadata = {'url': "/{scope}/providers/Microsoft.Resources/links"} # type: ignore
| 43 | 287 | 0.651014 |
f7608ca86a64d630034e720d97e67718695205f2 | 1,123 | py | Python | expected_results/users/valid_users_templates/john_doe.py | ikostan/ParaBankSeleniumAutomation | e28a886adba89b82a60831ad96a3a8f00f863116 | [
"Unlicense"
] | 4 | 2019-08-12T19:36:58.000Z | 2021-09-14T18:48:27.000Z | expected_results/users/valid_users_templates/john_doe.py | ikostan/ParaBankSeleniumAutomation | e28a886adba89b82a60831ad96a3a8f00f863116 | [
"Unlicense"
] | 1 | 2021-06-02T00:01:00.000Z | 2021-06-02T00:01:00.000Z | expected_results/users/valid_users_templates/john_doe.py | ikostan/ParaBankSeleniumAutomation | e28a886adba89b82a60831ad96a3a8f00f863116 | [
"Unlicense"
] | 2 | 2019-08-12T10:06:00.000Z | 2020-12-25T05:52:40.000Z | # Created by Egor Kostan.
# GitHub: https://github.com/ikostan
# LinkedIn: https://www.linkedin.com/in/egor-kostan/
class JohnDoe:
FIRST_NAME = 'John'
LAST_NAME = 'Doe'
ADDRESS = '9805 Cambridge Street'
CITY = 'NY'
STATE = 'Brooklyn'
ZIP_CODE = '11235'
PHONE = '718-437-9185'
USERNAME = 'johndoe'
PASSWORD = USERNAME + ZIP_CODE
'''
NATIONAL ID (Social Security Number)
Audit(s):
SSNs beginning with 8 or 9 are not valid (e.g., 891-40-8025).
SSNs with zeros in the fourth and fifth digits are not valid (e.g., 565-00-8035).
Source: http://www.calstate.edu/hrpims/awppm/Fields/NATIONAL_ID_Social_Security_Number_.htm
Social Security Administration (SSA) had never issued these Social Security Numbers:
123-45-6789
SSN’s having “000” or "666” as the first three left-most digits
SSN’s equal to or greater than “773” as the first three left-most digits
SSN’s having “00” as the fourth and fifth digits
SSN’s having “0000” as the last four digits
Source: https://primepay.com/blog/valid-social-security-number
'''
SSN = '050–13-8035'
INIT_BALANCE = 5125.00
MIN_BALANCE = 100.00
| 27.390244 | 92 | 0.715049 |
f760b6267d6009ab0fdb0417e574e500071072c2 | 35,299 | py | Python | src/scorer.py | nobu-g/cohesion-analysis | bf2e22c1aff51f96fd2aaef6359839646548c3be | [
"MIT"
] | 12 | 2020-12-25T11:13:17.000Z | 2021-12-28T05:19:46.000Z | src/scorer.py | nobu-g/cohesion-analysis | bf2e22c1aff51f96fd2aaef6359839646548c3be | [
"MIT"
] | 1 | 2020-12-25T09:26:26.000Z | 2020-12-25T09:26:34.000Z | src/scorer.py | nobu-g/cohesion-analysis | bf2e22c1aff51f96fd2aaef6359839646548c3be | [
"MIT"
] | 1 | 2022-02-25T13:22:47.000Z | 2022-02-25T13:22:47.000Z | import argparse
import io
import logging
import sys
from collections import OrderedDict
from dataclasses import dataclass
from pathlib import Path
from typing import List, Dict, Set, Union, Optional, TextIO
import pandas as pd
from jinja2 import Template, Environment, FileSystemLoader
from kyoto_reader import KyotoReader, Document, Argument, SpecialArgument, BaseArgument, Predicate, Mention, BasePhrase
from pyknp import BList
from utils.constants import CASE2YOMI
from utils.util import is_pas_target, is_bridging_target, is_coreference_target
logger = logging.getLogger(__name__)
logger.setLevel(logging.WARNING)
class Scorer:
"""A class to evaluate system output.
To evaluate system output with this class, you have to prepare gold data and system prediction data as instances of
:class:`kyoto_reader.Document`
Args:
documents_pred (List[Document]): システム予測文書集合
documents_gold (List[Document]): 正解文書集合
target_cases (List[str]): 評価の対象とする格 (kyoto_reader.ALL_CASES を参照)
target_exophors (List[str]): 評価の対象とする外界照応の照応先 (kyoto_reader.ALL_EXOPHORS を参照)
bridging (bool): 橋渡し照応の評価を行うかどうか (default: False)
coreference (bool): 共参照の評価を行うかどうか (default: False)
pas_target (str): 述語項構造解析において述語として扱う対象 ('pred': 用言, 'noun': 体言, 'all': 両方, '': 述語なし (default: pred))
Attributes:
cases (List[str]): 評価の対象となる格
doc_ids: (List[str]): 評価の対象となる文書の文書ID集合
did2document_pred (Dict[str, Document]): 文書IDからシステム予測文書を引くための辞書
did2document_gold (Dict[str, Document]): 文書IDから正解文書を引くための辞書
bridging (bool): 橋渡し照応の評価を行うかどうか
coreference (bool): 共参照の評価を行うかどうか
pas_target (str): 述語項構造解析において述語として扱う対象
comp_result (Dict[tuple, str]): 正解と予測を比較した結果を格納するための辞書
sub_scorers (List[SubScorer]): 文書ごとの評価を行うオブジェクトのリスト
relax_exophors (Dict[str, str]): 「不特定:人1」などを「不特定:人」として評価するためのマップ
"""
DEPTYPE2ANALYSIS = OrderedDict([('overt', 'overt'),
('dep', 'dep'),
('intra', 'zero_intra'),
('inter', 'zero_inter'),
('exo', 'zero_exophora')])
def __init__(self,
documents_pred: List[Document],
documents_gold: List[Document],
target_cases: List[str],
target_exophors: List[str],
bridging: bool = False,
coreference: bool = False,
pas_target: str = 'pred'):
# long document may have been ignored
assert set(doc.doc_id for doc in documents_pred) <= set(doc.doc_id for doc in documents_gold)
self.cases: List[str] = target_cases if pas_target != '' else []
self.doc_ids: List[str] = [doc.doc_id for doc in documents_pred]
self.did2document_pred: Dict[str, Document] = {doc.doc_id: doc for doc in documents_pred}
self.did2document_gold: Dict[str, Document] = {doc.doc_id: doc for doc in documents_gold}
self.bridging: bool = bridging
self.coreference: bool = coreference
self.pas_target: str = pas_target
self.comp_result: Dict[tuple, str] = {}
self.sub_scorers: List[SubScorer] = []
self.relax_exophors: Dict[str, str] = {}
for exophor in target_exophors:
self.relax_exophors[exophor] = exophor
if exophor in ('不特定:人', '不特定:物', '不特定:状況'):
for n in ('1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11'):
self.relax_exophors[exophor + n] = exophor
def run(self) -> 'ScoreResult':
"""読み込んだ正解文書集合とシステム予測文書集合に対して評価を行う
Returns:
ScoreResult: 評価結果のスコア
"""
self.comp_result = {}
self.sub_scorers = []
all_result = None
for doc_id in self.doc_ids:
sub_scorer = SubScorer(self.did2document_pred[doc_id], self.did2document_gold[doc_id],
cases=self.cases,
bridging=self.bridging,
coreference=self.coreference,
relax_exophors=self.relax_exophors,
pas_target=self.pas_target)
if all_result is None:
all_result = sub_scorer.run()
else:
all_result += sub_scorer.run()
self.sub_scorers.append(sub_scorer)
self.comp_result.update({(doc_id, *key): val for key, val in sub_scorer.comp_result.items()})
return all_result
def write_html(self, output_file: Union[str, Path]) -> None:
"""正解データとシステム予測の比較をHTML形式で書き出し
Args:
output_file (Union[str, Path]): 出力先ファイル
"""
data: List[tuple] = []
for sub_scorer in self.sub_scorers:
gold_tree = ''
for sid in sub_scorer.document_gold.sid2sentence.keys():
with io.StringIO() as string:
self._draw_tree(sid,
sub_scorer.predicates_gold,
sub_scorer.mentions_gold,
sub_scorer.bridgings_gold,
sub_scorer.document_gold,
fh=string)
gold_tree += string.getvalue()
pred_tree = ''
for sid in sub_scorer.document_pred.sid2sentence.keys():
with io.StringIO() as string:
self._draw_tree(sid,
sub_scorer.predicates_pred,
sub_scorer.mentions_pred,
sub_scorer.bridgings_pred,
sub_scorer.document_pred,
fh=string)
pred_tree += string.getvalue()
data.append((sub_scorer.document_gold.sentences, gold_tree, pred_tree))
env = Environment(loader=FileSystemLoader(str(Path(__file__).parent)))
template: Template = env.get_template('template.html')
with Path(output_file).open('wt') as f:
f.write(template.render({'data': data}))
def _draw_tree(self,
sid: str,
predicates: List[BasePhrase],
mentions: List[BasePhrase],
anaphors: List[BasePhrase],
document: Document,
fh: Optional[TextIO] = None,
html: bool = True
) -> None:
"""Write the predicate-argument structures, coreference relations, and bridging anaphora relations of the
specified sentence in tree format.
Args:
sid (str): 出力対象の文ID
predicates (List[BasePhrase]): documentに含まれる全ての述語
mentions (List[BasePhrase]): documentに含まれる全てのメンション
anaphors (List[BasePhrase]): documentに含まれる全ての橋渡し照応詞
document (Document): 出力対象の文が含まれる文書
fh (Optional[TextIO]): 出力ストリーム
html (bool): HTML形式で出力するかどうか
"""
result2color = {anal: 'blue' for anal in Scorer.DEPTYPE2ANALYSIS.values()}
result2color.update({'overt': 'green', 'wrong': 'red', None: 'gray'})
result2color_coref = {'correct': 'blue', 'wrong': 'red', None: 'gray'}
blist: BList = document.sid2sentence[sid].blist
with io.StringIO() as string:
blist.draw_tag_tree(fh=string, show_pos=False)
tree_strings = string.getvalue().rstrip('\n').split('\n')
assert len(tree_strings) == len(blist.tag_list())
all_targets = [m.core for m in document.mentions.values()]
tid2predicate: Dict[int, BasePhrase] = {predicate.tid: predicate for predicate in predicates
if predicate.sid == sid}
tid2mention: Dict[int, BasePhrase] = {mention.tid: mention for mention in mentions if mention.sid == sid}
tid2bridging: Dict[int, BasePhrase] = {anaphor.tid: anaphor for anaphor in anaphors if anaphor.sid == sid}
for tid in range(len(tree_strings)):
tree_strings[tid] += ' '
if tid in tid2predicate:
predicate = tid2predicate[tid]
arguments = document.get_arguments(predicate)
for case in self.cases:
args = arguments[case]
if case == 'ガ':
args += arguments['判ガ']
targets = set()
for arg in args:
target = str(arg)
if all_targets.count(str(arg)) > 1 and isinstance(arg, Argument):
target += str(arg.dtid)
targets.add(target)
result = self.comp_result.get((document.doc_id, predicate.dtid, case), None)
if html:
tree_strings[tid] += f'<font color="{result2color[result]}">{case}:{",".join(targets)}</font> '
else:
tree_strings[tid] += f'{case}:{",".join(targets)} '
if self.bridging and tid in tid2bridging:
anaphor = tid2bridging[tid]
arguments = document.get_arguments(anaphor)
args = arguments['ノ'] + arguments['ノ?']
targets = set()
for arg in args:
target = str(arg)
if all_targets.count(str(arg)) > 1 and isinstance(arg, Argument):
target += str(arg.dtid)
targets.add(target)
result = self.comp_result.get((document.doc_id, anaphor.dtid, 'ノ'), None)
if html:
tree_strings[tid] += f'<font color="{result2color[result]}">ノ:{",".join(targets)}</font> '
else:
tree_strings[tid] += f'ノ:{",".join(targets)} '
if self.coreference and tid in tid2mention:
targets = set()
src_dtid = tid2mention[tid].dtid
if src_dtid in document.mentions:
src_mention = document.mentions[src_dtid]
tgt_mentions_relaxed = SubScorer.filter_mentions(
document.get_siblings(src_mention, relax=True), src_mention)
for tgt_mention in tgt_mentions_relaxed:
target: str = tgt_mention.core
if all_targets.count(target) > 1:
target += str(tgt_mention.dtid)
targets.add(target)
for eid in src_mention.eids:
entity = document.entities[eid]
if entity.exophor in self.relax_exophors:
targets.add(entity.exophor)
result = self.comp_result.get((document.doc_id, src_dtid, '='), None)
if html:
tree_strings[tid] += f'<font color="{result2color_coref[result]}">=:{",".join(targets)}</font>'
else:
tree_strings[tid] += '=:' + ','.join(targets)
print('\n'.join(tree_strings), file=fh)
class SubScorer:
"""Scorer for single document pair.
Args:
document_pred (Document): システム予測文書
document_gold (Document): 正解文書
cases (List[str]): 評価の対象とする格
bridging (bool): 橋渡し照応の評価を行うかどうか (default: False)
coreference (bool): 共参照の評価を行うかどうか (default: False)
relax_exophors (Dict[str, str]): 「不特定:人1」などを「不特定:人」として評価するためのマップ
pas_target (str): 述語項構造解析において述語として扱う対象
Attributes:
doc_id (str): 対象の文書ID
document_pred (Document): システム予測文書
document_gold (Document): 正解文書
cases (List[str]): 評価の対象となる格
pas (bool): 述語項構造の評価を行うかどうか
bridging (bool): 橋渡し照応の評価を行うかどうか
coreference (bool): 共参照の評価を行うかどうか
comp_result (Dict[tuple, str]): 正解と予測を比較した結果を格納するための辞書
relax_exophors (Dict[str, str]): 「不特定:人1」などを「不特定:人」として評価するためのマップ
predicates_pred: (List[BasePhrase]): システム予測文書に含まれる述語
bridgings_pred: (List[BasePhrase]): システム予測文書に含まれる橋渡し照応詞
mentions_pred: (List[BasePhrase]): システム予測文書に含まれるメンション
predicates_gold: (List[BasePhrase]): 正解文書に含まれる述語
bridgings_gold: (List[BasePhrase]): 正解文書に含まれる橋渡し照応詞
mentions_gold: (List[BasePhrase]): 正解文書に含まれるメンション
"""
def __init__(self,
document_pred: Document,
document_gold: Document,
cases: List[str],
bridging: bool,
coreference: bool,
relax_exophors: Dict[str, str],
pas_target: str):
assert document_pred.doc_id == document_gold.doc_id
self.doc_id: str = document_gold.doc_id
self.document_pred: Document = document_pred
self.document_gold: Document = document_gold
self.cases: List[str] = cases
self.pas: bool = pas_target != ''
self.bridging: bool = bridging
self.coreference: bool = coreference
self.comp_result: Dict[tuple, str] = {}
self.relax_exophors: Dict[str, str] = relax_exophors
self.predicates_pred: List[BasePhrase] = []
self.bridgings_pred: List[BasePhrase] = []
self.mentions_pred: List[BasePhrase] = []
for bp in document_pred.bp_list():
if is_pas_target(bp, verbal=(pas_target in ('pred', 'all')), nominal=(pas_target in ('noun', 'all'))):
self.predicates_pred.append(bp)
if self.bridging and is_bridging_target(bp):
self.bridgings_pred.append(bp)
if self.coreference and is_coreference_target(bp):
self.mentions_pred.append(bp)
self.predicates_gold: List[BasePhrase] = []
self.bridgings_gold: List[BasePhrase] = []
self.mentions_gold: List[BasePhrase] = []
for bp in document_gold.bp_list():
if is_pas_target(bp, verbal=(pas_target in ('pred', 'all')), nominal=(pas_target in ('noun', 'all'))):
self.predicates_gold.append(bp)
if self.bridging and is_bridging_target(bp):
self.bridgings_gold.append(bp)
if self.coreference and is_coreference_target(bp):
self.mentions_gold.append(bp)
def run(self) -> 'ScoreResult':
"""Perform evaluation for the given gold document and system prediction document.
Returns:
ScoreResult: 評価結果のスコア
"""
self.comp_result = {}
measures_pas = self._evaluate_pas() if self.pas else None
measures_bridging = self._evaluate_bridging() if self.bridging else None
measure_coref = self._evaluate_coref() if self.coreference else None
return ScoreResult(measures_pas, measures_bridging, measure_coref)
def _evaluate_pas(self) -> pd.DataFrame:
"""calculate predicate-argument structure analysis scores"""
# measures: Dict[str, Dict[str, Measure]] = OrderedDict(
# (case, OrderedDict((anal, Measure()) for anal in Scorer.DEPTYPE2ANALYSIS.values()))
# for case in self.cases)
measures = pd.DataFrame([[Measure() for _ in Scorer.DEPTYPE2ANALYSIS.values()] for _ in self.cases],
index=self.cases, columns=Scorer.DEPTYPE2ANALYSIS.values())
dtid2predicate_pred: Dict[int, Predicate] = {pred.dtid: pred for pred in self.predicates_pred}
dtid2predicate_gold: Dict[int, Predicate] = {pred.dtid: pred for pred in self.predicates_gold}
for dtid in range(len(self.document_pred.bp_list())):
if dtid in dtid2predicate_pred:
predicate_pred = dtid2predicate_pred[dtid]
arguments_pred = self.document_pred.get_arguments(predicate_pred, relax=False)
else:
arguments_pred = None
if dtid in dtid2predicate_gold:
predicate_gold = dtid2predicate_gold[dtid]
arguments_gold = self.document_gold.get_arguments(predicate_gold, relax=False)
arguments_gold_relaxed = self.document_gold.get_arguments(predicate_gold, relax=True)
else:
predicate_gold = arguments_gold = arguments_gold_relaxed = None
for case in self.cases:
args_pred: List[BaseArgument] = arguments_pred[case] if arguments_pred is not None else []
assert len(args_pred) in (0, 1) # Our analyzer predicts one argument for one predicate
if predicate_gold is not None:
args_gold = self._filter_args(arguments_gold[case], predicate_gold)
args_gold_relaxed = self._filter_args(
arguments_gold_relaxed[case] + (arguments_gold_relaxed['判ガ'] if case == 'ガ' else []),
predicate_gold)
else:
args_gold = args_gold_relaxed = []
key = (dtid, case)
# calculate precision
if args_pred:
arg = args_pred[0]
if arg in args_gold_relaxed:
# use dep_type of gold argument if possible
arg_gold = args_gold_relaxed[args_gold_relaxed.index(arg)]
analysis = Scorer.DEPTYPE2ANALYSIS[arg_gold.dep_type]
self.comp_result[key] = analysis
measures.at[case, analysis].correct += 1
else:
# system出力のdep_typeはgoldのものと違うので不整合が起きるかもしれない
analysis = Scorer.DEPTYPE2ANALYSIS[arg.dep_type]
self.comp_result[key] = 'wrong' # precision が下がる
measures.at[case, analysis].denom_pred += 1
# calculate recall
# 正解が複数ある場合、そのうち一つが当てられていればそれを正解に採用
# いずれも当てられていなければ、relax されていない項から一つを選び正解に採用
if args_gold or (self.comp_result.get(key, None) in Scorer.DEPTYPE2ANALYSIS.values()):
arg_gold = None
for arg in args_gold_relaxed:
if arg in args_pred:
arg_gold = arg # 予測されている項を優先して正解の項に採用
break
if arg_gold is not None:
analysis = Scorer.DEPTYPE2ANALYSIS[arg_gold.dep_type]
assert self.comp_result[key] == analysis
else:
analysis = Scorer.DEPTYPE2ANALYSIS[args_gold[0].dep_type]
if args_pred:
assert self.comp_result[key] == 'wrong'
else:
self.comp_result[key] = 'wrong' # recall が下がる
measures.at[case, analysis].denom_gold += 1
return measures
def _filter_args(self,
args: List[BaseArgument],
predicate: Predicate,
) -> List[BaseArgument]:
filtered_args = []
for arg in args:
if isinstance(arg, SpecialArgument):
if arg.exophor not in self.relax_exophors: # filter out non-target exophors
continue
arg.exophor = self.relax_exophors[arg.exophor] # 「不特定:人1」なども「不特定:人」として扱う
else:
assert isinstance(arg, Argument)
# filter out self-anaphora and cataphoras
if predicate.dtid == arg.dtid or (predicate.dtid < arg.dtid and arg.sid != predicate.sid):
continue
filtered_args.append(arg)
return filtered_args
def _evaluate_bridging(self) -> pd.Series:
"""calculate bridging anaphora resolution scores"""
measures: Dict[str, Measure] = OrderedDict((anal, Measure()) for anal in Scorer.DEPTYPE2ANALYSIS.values())
dtid2anaphor_pred: Dict[int, Predicate] = {pred.dtid: pred for pred in self.bridgings_pred}
dtid2anaphor_gold: Dict[int, Predicate] = {pred.dtid: pred for pred in self.bridgings_gold}
for dtid in range(len(self.document_pred.bp_list())):
if dtid in dtid2anaphor_pred:
anaphor_pred = dtid2anaphor_pred[dtid]
antecedents_pred: List[BaseArgument] = \
self._filter_args(self.document_pred.get_arguments(anaphor_pred, relax=False)['ノ'], anaphor_pred)
else:
antecedents_pred = []
assert len(antecedents_pred) in (0, 1) # in bert_pas_analysis, predict one argument for one predicate
if dtid in dtid2anaphor_gold:
anaphor_gold: Predicate = dtid2anaphor_gold[dtid]
antecedents_gold: List[BaseArgument] = \
self._filter_args(self.document_gold.get_arguments(anaphor_gold, relax=False)['ノ'], anaphor_gold)
arguments: Dict[str, List[BaseArgument]] = self.document_gold.get_arguments(anaphor_gold, relax=True)
antecedents_gold_relaxed: List[BaseArgument] = \
self._filter_args(arguments['ノ'] + arguments['ノ?'], anaphor_gold)
else:
antecedents_gold = antecedents_gold_relaxed = []
key = (dtid, 'ノ')
# calculate precision
if antecedents_pred:
antecedent_pred = antecedents_pred[0]
if antecedent_pred in antecedents_gold_relaxed:
# use dep_type of gold antecedent if possible
antecedent_gold = antecedents_gold_relaxed[antecedents_gold_relaxed.index(antecedent_pred)]
analysis = Scorer.DEPTYPE2ANALYSIS[antecedent_gold.dep_type]
if analysis == 'overt':
analysis = 'dep'
self.comp_result[key] = analysis
measures[analysis].correct += 1
else:
analysis = Scorer.DEPTYPE2ANALYSIS[antecedent_pred.dep_type]
if analysis == 'overt':
analysis = 'dep'
self.comp_result[key] = 'wrong'
measures[analysis].denom_pred += 1
# calculate recall
if antecedents_gold or (self.comp_result.get(key, None) in Scorer.DEPTYPE2ANALYSIS.values()):
antecedent_gold = None
for ant in antecedents_gold_relaxed:
if ant in antecedents_pred:
antecedent_gold = ant # 予測されている先行詞を優先して正解の先行詞に採用
break
if antecedent_gold is not None:
analysis = Scorer.DEPTYPE2ANALYSIS[antecedent_gold.dep_type]
if analysis == 'overt':
analysis = 'dep'
assert self.comp_result[key] == analysis
else:
analysis = Scorer.DEPTYPE2ANALYSIS[antecedents_gold[0].dep_type]
if analysis == 'overt':
analysis = 'dep'
if antecedents_pred:
assert self.comp_result[key] == 'wrong'
else:
self.comp_result[key] = 'wrong'
measures[analysis].denom_gold += 1
return pd.Series(measures)
def _evaluate_coref(self) -> pd.Series:
"""calculate coreference resolution scores"""
measure = Measure()
dtid2mention_pred: Dict[int, Mention] = {bp.dtid: self.document_pred.mentions[bp.dtid]
for bp in self.mentions_pred
if bp.dtid in self.document_pred.mentions}
dtid2mention_gold: Dict[int, Mention] = {bp.dtid: self.document_gold.mentions[bp.dtid]
for bp in self.mentions_gold
if bp.dtid in self.document_gold.mentions}
for dtid in range(len(self.document_pred.bp_list())):
if dtid in dtid2mention_pred:
src_mention_pred = dtid2mention_pred[dtid]
tgt_mentions_pred = \
self.filter_mentions(self.document_pred.get_siblings(src_mention_pred), src_mention_pred)
exophors_pred = {e.exophor for e in map(self.document_pred.entities.get, src_mention_pred.eids)
if e.is_special}
else:
tgt_mentions_pred = exophors_pred = set()
if dtid in dtid2mention_gold:
src_mention_gold = dtid2mention_gold[dtid]
tgt_mentions_gold = self.filter_mentions(self.document_gold.get_siblings(src_mention_gold, relax=False),
src_mention_gold)
tgt_mentions_gold_relaxed = self.filter_mentions(
self.document_gold.get_siblings(src_mention_gold, relax=True), src_mention_gold)
exophors_gold = {self.relax_exophors[e.exophor] for e
in map(self.document_gold.entities.get, src_mention_gold.eids)
if e.is_special and e.exophor in self.relax_exophors}
exophors_gold_relaxed = {self.relax_exophors[e.exophor] for e
in map(self.document_gold.entities.get, src_mention_gold.all_eids)
if e.is_special and e.exophor in self.relax_exophors}
else:
tgt_mentions_gold = tgt_mentions_gold_relaxed = exophors_gold = exophors_gold_relaxed = set()
key = (dtid, '=')
# calculate precision
if tgt_mentions_pred or exophors_pred:
if (tgt_mentions_pred & tgt_mentions_gold_relaxed) or (exophors_pred & exophors_gold_relaxed):
self.comp_result[key] = 'correct'
measure.correct += 1
else:
self.comp_result[key] = 'wrong'
measure.denom_pred += 1
# calculate recall
if tgt_mentions_gold or exophors_gold or (self.comp_result.get(key, None) == 'correct'):
if (tgt_mentions_pred & tgt_mentions_gold_relaxed) or (exophors_pred & exophors_gold_relaxed):
assert self.comp_result[key] == 'correct'
else:
self.comp_result[key] = 'wrong'
measure.denom_gold += 1
return pd.Series([measure], index=['all'])
@staticmethod
def filter_mentions(tgt_mentions: Set[Mention], src_mention: Mention) -> Set[Mention]:
"""filter out cataphors"""
return {tgt_mention for tgt_mention in tgt_mentions if tgt_mention.dtid < src_mention.dtid}
@dataclass(frozen=True)
class ScoreResult:
"""A data class for storing the numerical result of an evaluation"""
measures_pas: Optional[pd.DataFrame]
measures_bridging: Optional[pd.Series]
measure_coref: Optional[pd.Series]
def to_dict(self) -> Dict[str, Dict[str, 'Measure']]:
"""convert data to dictionary"""
df_all = pd.DataFrame(index=['all_case'])
if self.pas:
df_pas: pd.DataFrame = self.measures_pas.copy()
df_pas['zero'] = df_pas['zero_intra'] + df_pas['zero_inter'] + df_pas['zero_exophora']
df_pas['dep_zero'] = df_pas['zero'] + df_pas['dep']
df_pas['all'] = df_pas['dep_zero'] + df_pas['overt']
df_all = pd.concat([df_pas, df_all])
df_all.loc['all_case'] = df_pas.sum(axis=0)
if self.bridging:
df_bar = self.measures_bridging.copy()
df_bar['zero'] = df_bar['zero_intra'] + df_bar['zero_inter'] + df_bar['zero_exophora']
df_bar['dep_zero'] = df_bar['zero'] + df_bar['dep']
assert df_bar['overt'] == Measure() # No overt in BAR
df_bar['all'] = df_bar['dep_zero']
df_all.at['all_case', 'bridging'] = df_bar['all']
if self.coreference:
df_all.at['all_case', 'coreference'] = self.measure_coref['all']
return {k1: {k2: v2 for k2, v2 in v1.items() if pd.notnull(v2)}
for k1, v1 in df_all.to_dict(orient='index').items()}
def export_txt(self,
destination: Union[str, Path, TextIO]
) -> None:
"""Export the evaluation results in a text format.
Args:
destination (Union[str, Path, TextIO]): 書き出す先
"""
lines = []
for key, ms in self.to_dict().items():
lines.append(f'{key}格' if self.pas and key in self.measures_pas.index else key)
for analysis, measure in ms.items():
lines.append(f' {analysis}')
lines.append(f' precision: {measure.precision:.4f} ({measure.correct}/{measure.denom_pred})')
lines.append(f' recall : {measure.recall:.4f} ({measure.correct}/{measure.denom_gold})')
lines.append(f' F : {measure.f1:.4f}')
text = '\n'.join(lines) + '\n'
if isinstance(destination, str) or isinstance(destination, Path):
with Path(destination).open('wt') as writer:
writer.write(text)
elif isinstance(destination, io.TextIOBase):
destination.write(text)
def export_csv(self,
destination: Union[str, Path, TextIO],
sep: str = ','
) -> None:
"""Export the evaluation results in a csv format.
Args:
destination (Union[str, Path, TextIO]): 書き出す先
sep (str): 区切り文字 (default: ',')
"""
text = ''
result_dict = self.to_dict()
text += 'case' + sep
text += sep.join(result_dict['all_case'].keys()) + '\n'
for case, measures in result_dict.items():
text += CASE2YOMI.get(case, case) + sep
text += sep.join(f'{measure.f1:.6}' for measure in measures.values())
text += '\n'
if isinstance(destination, str) or isinstance(destination, Path):
with Path(destination).open('wt') as writer:
writer.write(text)
elif isinstance(destination, io.TextIOBase):
destination.write(text)
@property
def pas(self):
"""Whether self includes the score of predicate-argument structure analysis."""
return self.measures_pas is not None
@property
def bridging(self):
"""Whether self includes the score of bridging anaphora resolution."""
return self.measures_bridging is not None
@property
def coreference(self):
"""Whether self includes the score of coreference resolution."""
return self.measure_coref is not None
def __add__(self, other: 'ScoreResult') -> 'ScoreResult':
measures_pas = self.measures_pas + other.measures_pas if self.pas else None
measures_bridging = self.measures_bridging + other.measures_bridging if self.bridging else None
measure_coref = self.measure_coref + other.measure_coref if self.coreference else None
return ScoreResult(measures_pas, measures_bridging, measure_coref)
@dataclass
class Measure:
"""A data class to calculate and represent F-measure"""
denom_pred: int = 0
denom_gold: int = 0
correct: int = 0
def __add__(self, other: 'Measure'):
return Measure(self.denom_pred + other.denom_pred,
self.denom_gold + other.denom_gold,
self.correct + other.correct)
def __eq__(self, other: 'Measure'):
return self.denom_pred == other.denom_pred and \
self.denom_gold == other.denom_gold and \
self.correct == other.correct
@property
def precision(self) -> float:
if self.denom_pred == 0:
return .0
return self.correct / self.denom_pred
@property
def recall(self) -> float:
if self.denom_gold == 0:
return .0
return self.correct / self.denom_gold
@property
def f1(self) -> float:
if self.denom_pred + self.denom_gold == 0:
return .0
return 2 * self.correct / (self.denom_pred + self.denom_gold)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--prediction-dir', default=None, type=str,
help='path to directory where system output KWDLC files exist (default: None)')
parser.add_argument('--gold-dir', default=None, type=str,
help='path to directory where gold KWDLC files exist (default: None)')
parser.add_argument('--coreference', '--coref', '--cr', action='store_true', default=False,
help='perform coreference resolution')
parser.add_argument('--bridging', '--brg', '--bar', action='store_true', default=False,
help='perform bridging anaphora resolution')
parser.add_argument('--case-string', type=str, default='ガ,ヲ,ニ,ガ2',
help='case strings separated by ","')
parser.add_argument('--exophors', '--exo', type=str, default='著者,読者,不特定:人,不特定:物',
help='exophor strings separated by ","')
parser.add_argument('--read-prediction-from-pas-tag', action='store_true', default=False,
help='use <述語項構造:> tag instead of <rel > tag in prediction files')
parser.add_argument('--pas-target', choices=['', 'pred', 'noun', 'all'], default='pred',
help='PAS analysis evaluation target (pred: verbal predicates, noun: nominal predicates)')
parser.add_argument('--result-html', default=None, type=str,
help='path to html file which prediction result is exported (default: None)')
parser.add_argument('--result-csv', default=None, type=str,
help='path to csv file which prediction result is exported (default: None)')
args = parser.parse_args()
reader_gold = KyotoReader(Path(args.gold_dir), extract_nes=False, use_pas_tag=False)
reader_pred = KyotoReader(
Path(args.prediction_dir),
extract_nes=False,
use_pas_tag=args.read_prediction_from_pas_tag,
)
documents_pred = reader_pred.process_all_documents()
documents_gold = reader_gold.process_all_documents()
assert set(args.case_string.split(',')) <= set(CASE2YOMI.keys())
msg = '"ノ" found in case string. If you want to perform bridging anaphora resolution, specify "--bridging" ' \
'option instead'
assert 'ノ' not in args.case_string.split(','), msg
scorer = Scorer(documents_pred, documents_gold,
target_cases=args.case_string.split(','),
target_exophors=args.exophors.split(','),
coreference=args.coreference,
bridging=args.bridging,
pas_target=args.pas_target)
result = scorer.run()
if args.result_html:
scorer.write_html(Path(args.result_html))
if args.result_csv:
result.export_csv(args.result_csv)
result.export_txt(sys.stdout)
if __name__ == '__main__':
main()
| 48.222678 | 120 | 0.575342 |
f760cd754822e6c117f6f18b47329bbd5af54f18 | 407 | py | Python | setup.py | ethanmiller/tablesnap | c1573247a21fdcdaa422d795ea31c821c4180e28 | [
"BSD-3-Clause"
] | null | null | null | setup.py | ethanmiller/tablesnap | c1573247a21fdcdaa422d795ea31c821c4180e28 | [
"BSD-3-Clause"
] | null | null | null | setup.py | ethanmiller/tablesnap | c1573247a21fdcdaa422d795ea31c821c4180e28 | [
"BSD-3-Clause"
] | null | null | null | from setuptools import setup
setup(
name='tablesnap',
version='0.7.2',
author='Jeremy Grosser',
author_email='jeremy@synack.me',
url='https://github.com/JeremyGrosser/tablesnap',
scripts=[
'tablesnap',
'tableslurp',
'tablechop'
],
install_requires=[
'pyinotify',
'boto>=2.6.0',
'argparse',
'python-dateutil',
],
)
| 19.380952 | 53 | 0.555283 |
f7610c48b982c798ed895bc73badd3854b56d97c | 17,389 | py | Python | ost/s1_core/search.py | Scartography/OpenSarToolkit | 53c19e1c6c21260e73518eb9106ce7305539a83d | [
"MIT"
] | null | null | null | ost/s1_core/search.py | Scartography/OpenSarToolkit | 53c19e1c6c21260e73518eb9106ce7305539a83d | [
"MIT"
] | null | null | null | ost/s1_core/search.py | Scartography/OpenSarToolkit | 53c19e1c6c21260e73518eb9106ce7305539a83d | [
"MIT"
] | null | null | null |
'''
Based on a set of search parameters the script will create a query
on www.scihub.copernicus.eu and return the results either
as shapefile, sqlite, or write to a PostGreSQL database.
------------------
Usage
------------------
python3 search.py -a /path/to/aoi-shapefile.shp -b 2018-01-01 -e 2018-31-12
-t GRD -m VV -b IW -o /path/to/search.shp
-a defines ISO3 country code or path to an ESRI shapefile
-s defines the satellite platform (Sentinel-1, Sentinel-2, etc.)
-b defines start date*
-e defines end date for search*
-t defines the product type (i.e. RAW,SLC or GRD)*
-m defines the polarisation mode (VV, VH, HH or HV)*
-b defines the beammode (IW,EW or SM)*
-o defines output that can be a shapefile (ending with .shp),
a SQLite DB (ending with .sqlite) or a PostGreSQL DB (no suffix)
-u the scihub username*
-p the scihub secret password*
* optional, i.e will look for all available products as well as ask for
username and password during script execution
'''
import os
import sys
import logging
import datetime
from urllib.error import URLError
import xml.dom.minidom
import dateutil.parser
import geopandas as gpd
from shapely.wkt import dumps, loads
from ost.helpers.db import pgHandler
from ost.helpers import scihub
from ost.errors import EmptySearchError
logger = logging.getLogger(__name__)
def _query_scihub(apihub, opener, query):
"""
Get the data from the scihub catalogue
and write it to a GeoPandas GeoDataFrame
"""
# create empty GDF
columns = [
'identifier', 'polarisationmode', 'orbitdirection',
'acquisitiondate', 'relativeorbitnumber', 'orbitnumber',
'producttype', 'slicenumber', 'size', 'beginposition',
'endposition', 'lastrelativeorbitnumber', 'lastorbitnumber',
'uuid', 'platformidentifier', 'missiondatatakeid',
'swathidentifier', 'ingestiondate', 'sensoroperationalmode',
'footprint'
]
crs = {'init': 'epsg:4326'}
geo_df = gpd.GeoDataFrame(columns=columns,
crs=crs,
geometry='footprint'
)
dom = None
# we need this for the paging
index = 0
rows = 99
next_page = 1
while next_page:
# construct the final url
url = apihub + query + "&rows={}&start={}".format(rows, index)
try:
# get the request
req = opener.open(url)
response = req.read().decode('utf-8')
if response == '':
raise ConnectionError('No response or page empty!!')
dom = xml.dom.minidom.parseString(response)
except URLError as err:
if hasattr(err, 'reason'):
logger.debug('We failed to connect to the server.')
logger.debug('Reason: ', err.reason)
sys.exit()
elif hasattr(err, 'code'):
logger.debug('The server couldn\'t fulfill the request.')
logger.debug('Error code: ', err.code)
sys.exit()
if dom is None:
return 'empty'
acq_list = []
# loop thorugh each entry (with all metadata)
for node in dom.getElementsByTagName('entry'):
# we get all the date entries
dict_date = {
s.getAttribute('name'):
dateutil.parser.parse(s.firstChild.data).astimezone(
dateutil.tz.tzutc()
)
for s in node.getElementsByTagName('date')
}
# we get all the int entries
dict_int = {
s.getAttribute('name'): s.firstChild.data
for s in node.getElementsByTagName('int')
}
# we create a filter for the str entries (we do not want all) and get them
dict_str = {
s.getAttribute('name'): s.firstChild.data
for s in node.getElementsByTagName('str')
}
# merge the dicts and append to the catalogue list
acq = dict(dict_date, **dict_int, **dict_str)
# fill in emtpy fields in dict by using identifier
if 'swathidentifier'not in acq.keys():
acq['swathidentifier'] = acq['identifier'].split("_")[1]
if 'producttype'not in acq.keys():
acq['producttype'] = acq['identifier'].split("_")[2]
if 'slicenumber'not in acq.keys():
acq['slicenumber'] = 0
# append all scenes from this page to a list
acq_list.append([acq['identifier'],
acq['polarisationmode'],
acq['orbitdirection'],
acq['beginposition'].strftime('%Y%m%d'),
acq['relativeorbitnumber'],
acq['orbitnumber'],
acq['producttype'],
acq['slicenumber'],
acq['size'],
acq['beginposition'].isoformat(),
acq['endposition'].isoformat(),
acq['lastrelativeorbitnumber'],
acq['lastorbitnumber'],
acq['uuid'],
acq['platformidentifier'],
acq['missiondatatakeid'],
acq['swathidentifier'],
acq['ingestiondate'].isoformat(),
acq['sensoroperationalmode'],
loads(acq['footprint'])
])
# transofmr all results from that page to a gdf
gdf = gpd.GeoDataFrame(acq_list,
columns=columns,
crs=crs,
geometry='footprint'
)
# append the gdf to the full gdf
geo_df = geo_df.append(gdf)
# retrieve next page and set index up by 99 entries
next_page = scihub.next_page(dom)
index += rows
return geo_df
def _to_shapefile(gdf, outfile, append=False):
# check if file is there
if os.path.isfile(outfile):
# in case we want to append, we load the old one and add the new one
if append:
columns = [
'id', 'identifier', 'polarisationmode',
'orbitdirection', 'acquisitiondate', 'relativeorbit',
'orbitnumber', 'product_type', 'slicenumber', 'size',
'beginposition', 'endposition',
'lastrelativeorbitnumber', 'lastorbitnumber',
'uuid', 'platformidentifier', 'missiondatatakeid',
'swathidentifier', 'ingestiondate',
'sensoroperationalmode', 'geometry'
]
# get existing geodataframe from file
old_df = gpd.read_file(outfile)
old_df.columns = columns
# drop id
old_df.drop('id', axis=1, inplace=True)
# append new results
gdf.columns = columns[1:]
gdf = old_df.append(gdf)
# remove duplicate entries
gdf.drop_duplicates(subset='identifier', inplace=True)
else:
# remove old file
os.remove(outfile)
os.remove('{}.cpg'.format(outfile[:-4]))
os.remove('{}.prj'.format(outfile[:-4]))
os.remove('{}.shx'.format(outfile[:-4]))
os.remove('{}.dbf'.format(outfile[:-4]))
# calculate new index
gdf.insert(loc=0, column='id', value=range(1, 1 + len(gdf)))
# write to new file
gdf.to_file(outfile)
return outfile
def _to_postgis(gdf, db_connect, outtable):
# check if tablename already exists
db_connect.cursor.execute('SELECT EXISTS (SELECT * FROM '
'information_schema.tables WHERE '
'LOWER(table_name) = '
'LOWER(\'{}\'))'.format(outtable))
result = db_connect.cursor.fetchall()
if result[0][0] is False:
logger.debug('INFO: Table {} does not exist in the database.'
'Creating it...'.format(outtable)
)
db_connect.pgCreateS1('{}'.format(outtable))
maxid = 1
else:
try:
maxid = db_connect.pgSQL('SELECT max(id) FROM {}'.format(outtable))
maxid = maxid[0][0]
if maxid is None:
maxid = 0
logger.debug('INFO: Table {} already exists with {} entries. Will add'
'all non-existent results to this table.'.format(outtable,
maxid
)
)
maxid = maxid + 1
except:
raise RuntimeError('ERROR: Existent table {} does not seem to be'
'compatible with Sentinel-1'
'data.'.format(outtable))
# add an index as first column
gdf.insert(loc=0, column='id', value=range(maxid, maxid + len(gdf)))
db_connect.pgSQLnoResp('SELECT UpdateGeometrySRID(\'{}\', '
'\'geometry\', 0);'.format(outtable.lower()))
# construct the SQL INSERT line
for _index, row in gdf.iterrows():
row['geometry'] = dumps(row['footlogger.debug'])
row.drop('footlogger.debug', inplace=True)
identifier = row.identifier
uuid = row.uuid
line = tuple(row.tolist())
# first check if scene is already in the table
result = db_connect.pgSQL('SELECT uuid FROM {} WHERE '
'uuid = \'{}\''.format(outtable, uuid))
try:
test_query = result[0][0]
except IndexError:
logger.debug('Inserting scene {} to {}'.format(identifier, outtable))
db_connect.pgInsert(outtable, line)
# apply the dateline correction routine
db_connect.pgDateline(outtable, uuid)
maxid += 1
else:
logger.debug('Scene {} already exists within table {}.'.format(identifier,
outtable)
)
logger.debug('INFO: Inserted {} entries into {}.'.format(len(gdf), outtable))
logger.debug('INFO: Table {} now contains {} entries.'.format(outtable,
maxid - 1)
)
logger.debug('INFO: Optimising database table.')
# drop index if existent
try:
db_connect.pgSQLnoResp('DROP INDEX {}_gix;'.format(outtable.lower()))
except:
pass
# create geometry index and vacuum analyze
db_connect.pgSQLnoResp('SELECT UpdateGeometrySRID(\'{}\', '
'\'geometry\', 4326);'.format(outtable.lower()))
db_connect.pgSQLnoResp('CREATE INDEX {}_gix ON {} USING GIST '
'(geometry);'.format(outtable, outtable.lower()))
db_connect.pgSQLnoResp('VACUUM ANALYZE {};'.format(outtable.lower()))
def check_availability(inventory_gdf, download_dir, data_mount):
'''This function checks if the data is already downloaded or
available through a mount point on DIAS cloud
'''
from ost import Sentinel1Scene
# add download path, or set to None if not found
inventory_gdf['download_path'] = inventory_gdf.identifier.apply(
lambda row: Sentinel1Scene(row).get_path(download_dir, data_mount)
)
return inventory_gdf
def scihub_catalogue(
query_string,
output,
append=False,
uname=None,
pword=None
):
'''This is the main search function on scihub
'''
# retranslate Path object to string
output = str(output)
# get connected to scihub
base_url = 'https://scihub.copernicus.eu/dhus/'
opener = scihub.connect(base_url, uname, pword)
action = 'search?q='
apihub = base_url + action
# get the catalogue in a dict
gdf = _query_scihub(apihub, opener, query_string)
if gdf.empty:
raise EmptySearchError(
'Nothing found, either something is wrong with your credentials'
' or scihub is down, or wrong search are set!'
)
# define output
if output[-7:] == ".sqlite":
logger.debug('INFO: writing to an sqlite file')
# gdfInv2Sqlite(gdf, output)
elif output[-4:] == ".shp":
logger.debug('INFO: writing inventory data to shape file: {}'.format(output))
_to_shapefile(gdf, output, append)
else:
logger.debug('INFO: writing inventory data toPostGIS table: {}'.format(output))
db_connect = pgHandler()
_to_postgis(gdf, db_connect, output)
return output
if __name__ == "__main__":
import argparse
from ost.helpers import utils
# get the current date
NOW = datetime.datetime.now()
NOW = NOW.strftime("%Y-%m-%d")
# write a description
DESCRIPT = """
This is a command line client for the inventory of Sentinel-1
data on the Copernicus Scihub server.
Output can be either an:
- exisiting PostGreSQL database
- newly created or existing SqLite database
- ESRI Shapefile
"""
EPILOG = """
Examples:
search.py -a /path/to/aoi-shapefile.shp -b 2018-01-01
-e 2018-31-12
"""
# create a PARSER
PARSER = argparse.ArgumentParser(description=DESCRIPT, epilog=EPILOG)
# username/password scihub
PARSER.add_argument("-user", "--username",
help="Your username of scihub.copernicus.eu ",
default=None
)
PARSER.add_argument("-pass", "--password",
help="Your secret password of scihub.copernicus.eu ",
default=None
)
PARSER.add_argument("-a", "--areaofinterest",
help=('The Area of Interest as a WKT geometry'),
dest='aoi', default='*',
)
PARSER.add_argument("-b", "--begindate",
help="The Start Date (format: YYYY-MM-DD) ",
default="2014-10-01",
type=lambda x: utils.is_valid_date(PARSER, x)
)
PARSER.add_argument("-e", "--enddate",
help="The End Date (format: YYYY-MM-DD)",
default=NOW,
type=lambda x: utils.is_valid_date(PARSER, x)
)
PARSER.add_argument("-t", "--producttype",
help="The Product Type (RAW, SLC, GRD, *) ",
default='*'
)
PARSER.add_argument("-p", "--polarisation",
help="The Polarisation Mode (VV, VH, HH, HV, *) ",
default='*'
)
PARSER.add_argument("-m", "--beammode",
help="The Beam Mode (IW, EW, SM, *) ",
default='*'
)
# output parameters
PARSER.add_argument("-o", "--output",
help=('Output format/file. Can be a shapefile'
'(ending with .shp), a SQLite file'
'(ending with .sqlite) or a PostGreSQL table'
'(connection needs to be configured). '
),
required=True
)
ARGS = PARSER.parse_args()
# construct the search command (do not change)
AOI = scihub.create_aoi_str(ARGS.aoi)
TOI = scihub.create_toi_str(ARGS.begindate, ARGS.enddate)
PRODUCT_SPECS = scihub.create_s1_product_specs(ARGS.producttype,
ARGS.polarisation,
ARGS.beammode
)
QUERY = scihub.create_query('Sentinel-1', AOI, TOI, PRODUCT_SPECS)
# execute full search
out_destination = scihub_catalogue(QUERY,
ARGS.output,
append=False,
uname=ARGS.username,
pword=ARGS.password
)
print('DONE: Output written to "%s"' % out_destination)
| 39.974713 | 88 | 0.503077 |
f7610cede53dbc3dc053489011b6c970e78ef3d0 | 9,501 | py | Python | dataset.py | vish119/Neural-Network-for-XOR-and-Binary-Image-Classification | 10e27cc5618c2c399826af06aae6a2e86b4b3df2 | [
"MIT"
] | null | null | null | dataset.py | vish119/Neural-Network-for-XOR-and-Binary-Image-Classification | 10e27cc5618c2c399826af06aae6a2e86b4b3df2 | [
"MIT"
] | null | null | null | dataset.py | vish119/Neural-Network-for-XOR-and-Binary-Image-Classification | 10e27cc5618c2c399826af06aae6a2e86b4b3df2 | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
import sys
sys.path.append('../../core')
from imgutils import *
class xor(object):
"""
Class that creates a xor dataset. Note that for the grading of the project, this method
might be changed, although it's output format will not be. This implies we might use other
methods to create data. You must assume that the dataset will be blind and your machine is
capable of running any dataset. Although the dataset will not be changed drastically and will
hold the XOR style .
"""
def __init__(self):
self.dimensions = 2
self.positive_means = [[-1, -1], [1, 1]]
self.negative_means = [[-1, 1], [1, -1]]
self.covariance = [[0.35, 0.1], [0.1, 0.35]]
def query_data(self, **kwargs):
"""
Once initialized, this method will create more data.
Args:
samples: number of samples of data needed (optional, default randomly 10k - 50k)
Returns:
tuple: data a tuple, ``(x,y)``
``x`` is a two dimensional ndarray ordered such that axis 0 is independent
data and data is spread along axis 1.
``y`` is a 1D ndarray it will be of the same length as axis 0 of x.
"""
if 'samples' in kwargs.keys():
samples = kwargs['samples']
else:
samples = np.random.randint(low=1000, high=5000)
# make positive samples
dim1, dim2 = np.random.multivariate_normal(self.positive_means[0],
self.covariance, samples / 4).T
positive = np.stack((dim1, dim2), axis=1)
dim1, dim2 = np.random.multivariate_normal(self.positive_means[1],
self.covariance, samples / 4).T
positive = np.concatenate((positive, np.stack((dim1, dim2), axis=1)), axis=0)
labels = np.ones(positive.shape[0])
# make the negative samples
dim1, dim2 = np.random.multivariate_normal(self.negative_means[0],
self.covariance, samples / 4).T
negative = np.stack((dim1, dim2), axis=1)
dim1, dim2 = np.random.multivariate_normal(self.negative_means[1],
self.covariance, samples / 4).T
negative = np.concatenate((negative, np.stack((dim1, dim2), axis=1)), axis=0)
labels = np.concatenate((labels, np.zeros(negative.shape[0])), axis=0)
data = np.concatenate((positive, negative), axis=0)
assert data.shape[0] == labels.shape[0]
perm = np.random.permutation(labels.shape[0])
data = data[perm, :]
labels = labels[perm]
return (data, np.asarray(labels, dtype='int'))
def plot(self, data, labels):
"""
This method will plot the data as created by this dataset generator.
Args:
data: as produced by the ``query_data`` method's first element.
labels: as produced by the ``query_data`` method's second element.
"""
positive = data[labels == 1, :]
negative = data[labels == 0, :]
plt.plot(positive[:, 0], positive[:, 1], 'bo', negative[:, 0], negative[:, 1], 'rs')
plt.axis('equal')
plt.title('XOR Dataset')
plt.xlabel('Dimension 1')
plt.ylabel('Dimension 2')
plt.show()
def _demo(self):
"""
This is a demonstration method that will plot a version of the dataset on the screen.
"""
data, labels = self.query_data(samples=5000)
self.plot(data, labels)
class waldo(object):
"""
Class that creates the waldo dataset.
Args:
dimensions: <tuple> the dimensions of image (optional, default randomly (28,28))
noise: controls the variance of the noise being applied.
img: <tuple>load and use an image that is not default ('waldo.jpg', 'not_waldo.jpg')
"""
def __init__(self, **kwargs):
if 'dimensions' in kwargs.keys():
self.sample_height = kwargs['dimensions'][0]
self.sample_width = kwargs['dimensions'][1]
else:
self.sample_height = 28
self.sample_width = 28
if 'img' in kwargs.keys():
img_waldo, img_not_waldo = kwargs['img']
else:
img_waldo, img_not_waldo = ('waldo.jpg', 'not_waldo.jpg')
if 'noise' in kwargs.keys():
self.var = kwargs['noise']
else:
if self.sample_width < 32 and self.sample_height < 32:
self.var = 0.02
elif self.sample_width < 64 and self.sample_heigh < 64:
self.var = 0.07
else:
self.var = 0.1
img = imread(img_waldo) # Load the image
self.waldo = rgb2gray(img) # convert to grayscale
self.waldo = normalize(self.waldo)
img = imread(img_not_waldo)
self.not_waldo = rgb2gray(img)
self.not_waldo = normalize(self.not_waldo)
self.reshape_low_height = np.floor(self.sample_height * 0.35)
self.reshape_high_height = np.floor(self.sample_height * 0.95)
self.reshape_low_width = np.floor(self.sample_width * 0.35)
self.reshape_high_width = np.floor(self.sample_width * 0.95)
def _query_sample(self, img):
"""
This is an internal method that creates a data samples.
Notes:
This creates one sample.
"""
sample = np.random.randint(low=0, high=256,
size=(self.sample_height, self.sample_width))
rshp = (np.random.randint(low=self.reshape_low_height, high=self.reshape_high_height + 1),
np.random.randint(low=self.reshape_low_width, high=self.reshape_high_width + 1))
img_reshaped = imresize(img, size=rshp)
img_sample = imnoise(img_reshaped, mode='gaussian', var=self.var, clip=True)
img_sample = imnoise(img_sample, mode='s&p', clip=True) * 255
current_img_height = img_sample.shape[0]
current_img_width = img_sample.shape[1]
height_low = 1
height_high = self.sample_height - current_img_height - 1
width_low = 1
width_high = self.sample_width - current_img_width - 1
img_x_pos = np.random.randint(low=height_low, high=height_high + 1)
img_y_pos = np.random.randint(low=width_low, high=width_high + 1)
sample[img_x_pos: img_x_pos + current_img_height,
img_y_pos: img_y_pos + current_img_width] = 0.7 * img_sample + \
0.3 * sample[img_x_pos: img_x_pos + current_img_height,
img_y_pos: img_y_pos + current_img_width]
return np.asarray(sample, dtype='uint8').flatten()
def _query_negative_sample(self):
"""
This is an internal method that creates negative data samples.
Notes:
This creates one sample.
"""
sample = self._query_sample(img=self.not_waldo)
return sample
def _query_positive_sample(self):
"""
This is an internal method that creates positive data samples.
Notes:
This creates one sample.
"""
sample = self._query_sample(img=self.waldo)
return sample
def query_data(self, **kwargs):
"""
Once initialized, this method will create data.
Args:
samples: number of samples of data needed (optional, default randomly 10k - 50k)
Returns:
tuple: data a tuple, ``(x,y)``
``x`` is a two dimensional ndarray ordered such that axis 0 is independent
data and data is spread along axis 1.
``y`` is a 1D ndarray it will be of the same length as axis 0 or x. Will be integer.
"""
if 'samples' in kwargs.keys():
samples = kwargs['samples']
else:
samples = np.random.randint(low=100, high=500)
# Create dummy arrays
data = np.zeros((samples, self.sample_height * self.sample_width))
labels = np.zeros((samples,), dtype='int')
for sample in xrange(samples):
labels[sample] = np.random.randint(low=0, high=2)
if labels[sample] == 1:
data[sample] = self._query_positive_sample()
else:
data[sample] = self._query_negative_sample()
return (data, labels)
def _demo(self):
"""
This is a demonstration method that will display a random positive and negative samples.
"""
sample_positive = self._query_positive_sample().reshape(self.sample_height,
self.sample_width)
imshow(sample_positive, window='positive')
sample_negative = self._query_negative_sample().reshape(self.sample_height,
self.sample_width)
imshow(sample_negative, window='negative')
def display_sample(self, sample, title='image'):
"""
This method will display a particular smaple in the dataset generated.
Args:
sample: provide one row of data
title: (optional) title of the window for image display
"""
imshow(sample.reshape(self.sample_height, self.sample_width), window=title) | 39.753138 | 107 | 0.581518 |