hexsha
stringlengths 40
40
| size
int64 2
1.05M
| ext
stringclasses 9
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
193
| max_stars_repo_name
stringlengths 6
109
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequence | max_stars_count
int64 1
36.6k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
193
| max_issues_repo_name
stringlengths 6
109
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequence | max_issues_count
int64 1
29.8k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
193
| max_forks_repo_name
stringlengths 6
109
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequence | max_forks_count
int64 1
11.2k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 2
1.05M
| avg_line_length
float64 1
404k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f7414e3f469f6d399018e369e8cc8df87c4996fc | 11,287 | py | Python | gorillabot/plugins/link.py | pep7/GorillaBot | b29ae1f05423494f7e122906efe24e9ffcd1421e | [
"MIT"
] | null | null | null | gorillabot/plugins/link.py | pep7/GorillaBot | b29ae1f05423494f7e122906efe24e9ffcd1421e | [
"MIT"
] | null | null | null | gorillabot/plugins/link.py | pep7/GorillaBot | b29ae1f05423494f7e122906efe24e9ffcd1421e | [
"MIT"
] | null | null | null | # Copyright (c) 2013-2016 Molly White
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software
# and associated documentation files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from plugins.util import command, get_url
from urllib.parse import quote
from html import unescape
from datetime import datetime
import json
import re
@command()
def link(m, urls=None, wikilinks=None):
"""Retrieve a description of the link, or provide a link to the English Wikipedia article if
formatted as a wikilink."""
#- !link URL
#-
#- ```irc
#- < GorillaWarfare> !link http://molly.github.io/GorillaBot/
#- < GorillaBot> Link: GorillaBot
#- < GorillaWarfare> !link https://www.youtube.com/watch?v=aSarf4-REgk
#- < GorillaBot> Link: "Baby Gorilla Reunites With Mother" (01:43). Uploaded Mar 24,
#- 2014. 164347 views. 513 likes, 32 dislikes.
#- < GorillaWarfare> !link [[Gorilla]]
#- < GorillaBot> https://en.wikipedia.org/wiki/Gorilla
#- ```
#-
#- Provide information about the given link, or provide a link to the English Wikipedia article
#- if formatted as a wikilink.
#-
#- In order to provide rich information about YouTube videos, you must provide a YouTube API
#- key when configuring the bot. You can get an API key by registering a project in the [
#- Google Developer Console](https://console.developers.google.com/). Without a key,
#- the normal linking will be used.
#-
#- #### Settings
#- * `auto` - All links and wikilinks entered in the chat will be parsed, regardless of whether
#- they're prefaced with `!link`.
if not (urls or wikilinks):
urls = re.findall(r'(https?://\S+)', m.body)
wikilinks = re.findall(r'\[{2}(.*?)\]{2}', m.body)
if not (urls or wikilinks):
m.bot.private_message(m.location, "Please provide a link.")
return
for url in urls:
if "youtube.com" in url or "youtu.be" in url:
message = youtube(m, url)
elif "reddit.com" in url:
message = reddit(m, url)
else:
message = generic(m, url)
if message:
m.bot.private_message(m.location, "Link: " + clean(message))
for wikilink in wikilinks:
safe = wikilink.replace(" ", "_")
if safe[-1] == ")":
safe = safe[:-1] + "%29"
m.bot.private_message(m.location, "https://en.wikipedia.org/wiki/" + safe)
@command("relevantxkcd")
def xkcd(m):
"""Get an xkcd comic based on given arguments."""
#- !xkcd [number|query]
#-
#- ```irc
#- < GorillaWarfare> !xkcd batman acne
#- < GorillaBot> xkcd: Complexion: http://xkcd.com/700/
#- < GorillaWarfare> !xkcd 700
#- < GorillaBot> xkcd: Complexion: http://xkcd.com/700/
#- < GorillaWarfare> !xkcd
#- < GorillaBot> xkcd: Telescope Names: http://xkcd.com/1294/
#- ```
#-
#- Without any arguments, this provides a random xkcd comic. When a number is supplied,
#- it tries to return the xkcd comic with that given number. When a query string is supplied,
#- it tries to return the xkcd comic that most closely matches that query.
if len(m.line) == 1:
url = "http://c.xkcd.com/random/comic/"
html = get_url(m, url)
message = xkcd_direct(html)
if message is None:
m.bot.logger.error("Couldn't get random xkcd comic.")
message = "Sorry, I'm broken. Tell GorillaWarfare to fix me."
elif len(m.line) == 2 and m.line[1].isdigit():
url = "http://xkcd.com/{0}/".format(m.line[1])
html = get_url(m, url)
if html is None:
m.bot.private_message(m.location, "There is no xkcd #{0}.".format(m.line[1]))
return
message = xkcd_direct(html, url)
if message is None:
m.bot.logger.error("Couldn't get xkcd comic #{0}.".format(m.line[1]))
message = "Sorry, I'm broken. Tell GorillaWarfare to fix me."
else:
query = " ".join(m.line[1:])
url = 'https://ajax.googleapis.com/ajax/services/search/web?v=1.0&q=site:xkcd.com%20{' \
'0}'.format(quote(query))
html = get_url(m, url)
message = xkcd_google(html)
m.bot.private_message(m.location, message)
def clean(title):
"""Clean the title so entities are unescaped and there's no weird spacing."""
return unescape(re.sub('[\s]+', ' ', title))
def youtube(m, url):
"""Retrieve information about the YouTube video."""
api_key = m.bot.configuration["youtube"]
if api_key:
match = re.search(r'youtu(?:be.com/watch\?v=|\.be/)(.+?)(?:\?|&|\Z)', url)
if match:
video_id = match.group(1)
m.bot.logger.info("Retrieving information from the YouTube API for {}.".format(url))
api_url = "https://www.googleapis.com/youtube/v3/videos?id={id}" \
"&key={key}&part=snippet,contentDetails,statistics"
resp = get_url(m, api_url.format(id=video_id, key=api_key))
if resp:
# Load JSON
blob = json.loads(resp)["items"][0]
# Parse uploaded time
raw_time = datetime.strptime(blob["snippet"]["publishedAt"],
"%Y-%m-%dT%H:%M:%S.%fZ")
pretty_time = raw_time.strftime("%b %d, %Y")
# Parse video duration
dur_match = re.match(r'PT(?:(\d+)H)?(?:(\d+)M)?(\d+)S',
blob["contentDetails"]["duration"])
if dur_match:
hr, min, sec = dur_match.groups()
if hr:
pretty_dur = ":".join([hr, "00" if min is None else min.zfill(2), sec.zfill(2)])
else:
pretty_dur = ":".join(["00" if min is None else min.zfill(2), sec.zfill(2)])
else:
pretty_dur = ""
# Format and return message
return "\"{title}\" ({duration}). Uploaded {date}. {views} views. {likes} likes, " \
"{dislikes} dislikes.".format(title=blob["snippet"]["title"],
duration=pretty_dur, date=pretty_time,
views=blob["statistics"]["viewCount"],
likes=blob["statistics"]["likeCount"],
dislikes=blob["statistics"]["dislikeCount"])
# If there's no API key stored, or the URL is poorly formatted, fall back to generic linking
return generic(m, url)
def reddit(m, url):
"""Retrieve information about the Reddit link."""
# I'm sorry
reddit_regex = re.compile(r'reddit\.com/(?:u(?:ser)?/(?P<user>.+?)(?:\Z|#|/)|r/(?P<sub>.+?)'
r'(?:/?\Z|/comments/(?P<id>.+?)(?:/?\Z|/(?P<title>.+?)'
r'(?:/?\Z|/(?P<cid>.+?)(?:/|#|\Z)))))')
match = re.search(reddit_regex, url)
if match:
m.bot.logger.info("Retrieving information from the Reddit API for {}.".format(url))
if match.group("user"):
api_url = "http://www.reddit.com/user/{}/about.json"
user = match.group("user")
resp = get_url(m, api_url.format(user))
blob = json.loads(resp)["data"]
return "User {name}: {link_karma} link karma, {comment_karma} comment " \
"karma.".format(**blob)
else:
api_url = "http://www.reddit.com/api/info.json?id={}"
sub, id, cid = match.group("sub"), match.group("id"), match.group("cid")
if cid:
resp = get_url(m, api_url.format("t1_" + cid))
blob = json.loads(resp)["data"]["children"][0]["data"]
parent_resp = get_url(m, api_url.format("t3_" + id))
parent_blob = json.loads(parent_resp)["data"]["children"][0]["data"]
parent_blob["nsfw"] = " \x0304[NSFW]\x03" if parent_blob["over_18"] else ""
return "Comment by {user} on \"{title}\"{nsfw} in /r/{sub}. {up}↑.".format(
user=blob["author"], title=parent_blob["title"], nsfw=parent_blob["nsfw"],
sub=blob["subreddit"], up=blob["ups"])
elif id:
resp = get_url(m, api_url.format("t3_" + id))
blob = json.loads(resp)["data"]["children"][0]["data"]
blob["nsfw"] = "\x0304[NSFW]\x03" if blob["over_18"] else ""
return "\"{title}\" in /r/{subreddit}. {ups}↑. {nsfw}".format(**blob)
else:
api_url = "http://www.reddit.com/r/{}/about.json"
resp = get_url(m, api_url.format(sub))
blob = json.loads(resp)["data"]
blob["nsfw"] = "\x0304[NSFW]\x03" if blob["over18"] else ""
return "/r/{display_name}. {title}. {subscribers} subscribers." \
" {nsfw}".format(**blob)
return generic(m, url)
def generic(m, url):
"""Retrieve the title of the webpage."""
m.bot.logger.info("Retrieving link for {}.".format(url))
html = get_url(m, url, True)
if html:
title_regex = re.compile(r'<title>(.+?)</title>', re.DOTALL)
match = re.search(title_regex, html)
if match:
return match.group(1)
else:
m.bot.logger.info("No title element found.")
return None
def xkcd_direct(html, url=None):
"""Try to return a title and link for a direct link to an xkcd comic."""
if not html:
return None
if not url:
url_match = re.search(r'Permanent link to this comic: ([^\s<>]+)', html)
if url_match:
url = url_match.group(1)
else:
return None
match = re.search(r'<title>(.+?)</title>', html)
if match:
return match.group(1) + ": " + url
else:
return None
def xkcd_google(html):
"""Try to pull out the first Google result for an xkcd comic."""
blob = json.loads(html)
results = blob['responseData']['results']
if results == []:
return "Could not retrieve xkcd using this query."
else:
return results[0]['titleNoFormatting'] + ': ' + results[0]['unescapedUrl']
| 44.437008 | 104 | 0.565341 |
f74188f78260ec762de0025163fd477eb97cc78e | 4,892 | py | Python | examples/transfer_bo.py | zhanglei1172/bbobenchmark | 841bffdddc1320ac2676e378d20f8b176a7e6cf7 | [
"MIT"
] | 2 | 2021-09-06T02:06:22.000Z | 2021-12-09T10:46:56.000Z | examples/transfer_bo.py | zhanglei1172/bbobenchmark | 841bffdddc1320ac2676e378d20f8b176a7e6cf7 | [
"MIT"
] | null | null | null | examples/transfer_bo.py | zhanglei1172/bbobenchmark | 841bffdddc1320ac2676e378d20f8b176a7e6cf7 | [
"MIT"
] | null | null | null | import numpy as np
# import matplotlib.pyplot as plt
from xbbo.configspace.space import DenseConfiguration, DenseConfigurationSpace
from ConfigSpace.hyperparameters import UniformFloatHyperparameter
from ConfigSpace.conditions import LessThanCondition
# from xbbo.search_algorithm.transfer_tst_optimizer import SMBO
# from xbbo.search_algorithm.transfer_taf_optimizer import SMBO
# from xbbo.search_algorithm.transfer_rgpe_mean_optimizer import SMBO
# from xbbo.search_algorithm.transfer_taf_rgpe_optimizer import SMBO
# from xbbo.search_algorithm.transfer_RMoGP_optimizer import SMBO
from xbbo.search_algorithm.transfer_bo_optimizer import SMBO
from xbbo.search_space.offline_hp import Model
from xbbo.utils.constants import MAXINT
from xbbo.surrogate.transfer.base_surrogate import BaseModel
def rosenbrock_2d(x):
""" The 2 dimensional Rosenbrock function as a toy model
The Rosenbrock function is well know in the optimization community and
often serves as a toy problem. It can be defined for arbitrary
dimensions. The minimium is always at x_i = 1 with a function value of
zero. All input parameters are continuous. The search domain for
all x's is the interval [-5, 10].
"""
x1 = x["x0"]
# x2 = x["x1"]
x2 = x.get('x1', x1)
val = 100. * (x2 - x1 ** 2.) ** 2. + (1 - x1) ** 2.
return val
def branin(config):
x1, x2 = config['x1'], config['x2']
y = (x2 - 5.1 / (4 * np.pi ** 2) * x1 ** 2 + 5 / np.pi * x1 - 6) ** 2 \
+ 10 * (1 - 1 / (8 * np.pi)) * np.cos(x1) + 10
return y
def build_space(rng):
cs = DenseConfigurationSpace(seed=rng.randint(10000))
x0 = UniformFloatHyperparameter("x0", -5, 10, default_value=-3)
x1 = UniformFloatHyperparameter("x1", -5, 10, default_value=-4)
cs.add_hyperparameters([x0, x1])
con = LessThanCondition(x1, x0, 1.)
cs.add_condition(con)
return cs
def build_branin_space(rng):
cs = DenseConfigurationSpace(seed=rng.randint(10000))
x1 = UniformFloatHyperparameter("x1", -5, 10, default_value=0)
x2 = UniformFloatHyperparameter("x2", 0, 15, default_value=0)
cs.add_hyperparameters([x1, x2])
return cs
if __name__ == "__main__":
MAX_CALL = 30
rng = np.random.RandomState(42)
test_model = Model(None, rng.randint(MAXINT), test_task='a6a', )
cs = DenseConfigurationSpace(seed=rng.randint(MAXINT))
confs = test_model.get_api_config()
for conf in confs:
cs.add_hyperparameter(UniformFloatHyperparameter(conf, confs[conf]['range'][0], confs[conf]['range'][1]))
blackbox_func = test_model.evaluate
base_models = []
for i in range(len(test_model.old_D_x)):
base_models.append(BaseModel(cs, rng=rng,do_optimize=False))
base_models[-1].train(test_model.old_D_x[i], test_model.old_D_y[i])
# use transfer
# hpopt = SMBO(space=cs, seed=rng.randint(10000), total_limit=MAX_CALL, initial_design='sobol', surrogate='gp', acq_func='ei', weight_srategy='kernel', acq_opt='rs', base_models=base_models) # vanila bo
# hpopt = SMBO(space=cs, seed=rng.randint(10000), total_limit=MAX_CALL, initial_design='sobol', surrogate='tst', acq_func='ei', weight_srategy='kernel', acq_opt='rs', base_models=base_models) # TST-R
# hpopt = SMBO(space=cs, seed=rng.randint(10000), total_limit=MAX_CALL, initial_design='sobol', surrogate='gp', acq_func='taf', weight_srategy='kernel', acq_opt='rs', base_models=base_models) # TAF
# hpopt = SMBO(space=cs, seed=rng.randint(10000), total_limit=MAX_CALL, initial_design='sobol', surrogate='tst', acq_func='ei', weight_srategy='rw', acq_opt='rs', base_models=base_models) # RGPE(mean)
# hpopt = SMBO(space=cs, seed=rng.randint(10000), total_limit=MAX_CALL, initial_design='sobol', surrogate='gp', acq_func='taf', weight_srategy='rw', acq_opt='rs', base_models=base_models) # TAF(rw)
hpopt = SMBO(space=cs, seed=rng.randint(10000), total_limit=MAX_CALL, initial_design='sobol', surrogate='gp', acq_func='mogp', weight_srategy='rw', acq_opt='rs', base_models=base_models) # RMoGP
# not use transfer
# hpopt = SMBO(space=cs, seed=rng.randint(10000), total_limit=MAX_CALL, initial_design='sobol', surrogate='gp', acq_opt='rs_ls', base_models=[]])
# Example call of the black-box function
def_value = blackbox_func(cs.get_default_configuration())
print("Default Value: %.2f" % def_value)
# ---- Begin BO-loop ----
for i in range(MAX_CALL):
# suggest
trial_list = hpopt.suggest()
# evaluate
value = blackbox_func(trial_list[0].config_dict)
# observe
trial_list[0].add_observe_value(observe_value=value)
hpopt.observe(trial_list=trial_list)
print(value)
# plt.plot(hpopt.trials.get_history()[0])
# plt.savefig('./out/rosenbrock_bo_gp.png')
# plt.show()
print('find best value:{}'.format(hpopt.trials.get_best()[0]))
| 48.435644 | 206 | 0.705233 |
f741a188fda6bef0c338259d1d5661b648d7d929 | 108 | py | Python | route/__init__.py | 0xwhoami/growtopia-chemsynth-router | 08ad29ccd3ec6b36ddb48662f84073102392979f | [
"CC0-1.0"
] | null | null | null | route/__init__.py | 0xwhoami/growtopia-chemsynth-router | 08ad29ccd3ec6b36ddb48662f84073102392979f | [
"CC0-1.0"
] | null | null | null | route/__init__.py | 0xwhoami/growtopia-chemsynth-router | 08ad29ccd3ec6b36ddb48662f84073102392979f | [
"CC0-1.0"
] | null | null | null | #!/usr/bin/env python
"""
Copyright (c) 2020-End_Of_Life
See the file 'LICENSE' for copying permission
"""
| 15.428571 | 45 | 0.712963 |
f741b19a862036a25420498abb6c38930096315b | 9,213 | py | Python | src/infi/pyutils/lazy.py | jasonjorge/infi.asi | 78a4c34a421102f99b959a659cf7303804627d9b | [
"BSD-3-Clause"
] | 1 | 2022-02-12T20:30:55.000Z | 2022-02-12T20:30:55.000Z | src/infi/pyutils/lazy.py | jasonjorge/infi.asi | 78a4c34a421102f99b959a659cf7303804627d9b | [
"BSD-3-Clause"
] | 5 | 2015-11-08T14:50:42.000Z | 2020-06-23T14:42:33.000Z | src/infi/pyutils/lazy.py | jasonjorge/infi.asi | 78a4c34a421102f99b959a659cf7303804627d9b | [
"BSD-3-Clause"
] | 4 | 2015-02-22T09:06:59.000Z | 2022-02-12T20:30:55.000Z | # Adapted from http://wiki.python.org/moin/PythonDecoratorLibrary#Cached_Properties
import itertools
import time
from .decorators import wraps
from .python_compat import iteritems
from logging import getLogger
from types import MethodType, FunctionType
logger = getLogger(__name__)
class cached_property(object):
"""Decorator for read-only properties evaluated only once.
It can be used to created a cached property like this::
import random
# the class containing the property must be a new-style class
class MyClass(object):
# create property whose value is cached for ten minutes
@cached_method
def randint(self):
# will only be evaluated every 10 min. at maximum.
return random.randint(0, 100)
The value is cached in the '_cache' attribute of the object inst that
has the property getter method wrapped by this decorator. The '_cache'
attribute value is a dictionary which has a key for every property of the
object which is wrapped by this decorator. Each entry in the cache is
created only when the property is accessed for the first time and is a
two-element tuple with the last computed property value and the last time
it was updated in seconds since the epoch.
To expire a cached property value manually just do::
del inst._cache[<property name>]
"""
def __init__(self, fget, doc=None):
super(cached_property, self).__init__()
self.fget = fget
self.__doc__ = doc or fget.__doc__
self.__name__ = fget.__name__
self.__module__ = fget.__module__
def __get__(self, inst, owner):
try:
value = inst._cache[self.__name__]
except (KeyError, AttributeError):
value = self.fget(inst)
try:
cache = inst._cache
except AttributeError:
cache = inst._cache = {}
cache[self.__name__] = value
return value
_cached_method_id_allocator = itertools.count()
def _get_instancemethod_cache_entry(method_id, *args, **kwargs):
if len(args) + len(kwargs) == 0:
return method_id
try:
kwargs_keys = list(kwargs.keys())
kwargs_keys.sort()
key = (method_id,) + args + tuple([kwargs[key] for key in kwargs_keys])
_ = {key: None}
return key
except TypeError:
return None
def cached_method(func):
"""Decorator that caches a method's return value each time it is called.
If called later with the same arguments, the cached value is returned, and
not re-evaluated.
"""
method_id = next(_cached_method_id_allocator)
@wraps(func)
def callee(inst, *args, **kwargs):
key = _get_instancemethod_cache_entry(method_id, *args, **kwargs)
if key is None:
logger.debug("Passed arguments to {0} are mutable, so the returned value will not be cached".format(func.__name__))
return func(inst, *args, **kwargs)
try:
value = inst._cache[key]
except (KeyError, AttributeError):
value = func(inst, *args, **kwargs)
try:
inst._cache[key] = value
except AttributeError:
inst._cache = {}
inst._cache[key] = value
return value
callee.__cached_method__ = True
callee.__method_id__ = method_id
return callee
class cached_method_with_custom_cache(object):
def __init__(self, cache_class=None):
if cache_class is None:
cache_class = dict
self.cache_class = cache_class
def __call__(self, func):
"""Decorator that caches a method's return value each time it is called.
If called later with the same arguments, the cached value is returned, and
not re-evaluated.
decorated class must implement inst.init_cache() which creates inst._cache dictionary.
"""
method_id = next(_cached_method_id_allocator)
@wraps(func)
def callee(inst, *args, **kwargs):
key = _get_instancemethod_cache_entry(method_id, *args, **kwargs)
func_name = func.__name__
if key is None:
logger.debug("Passed arguments to {0} are mutable, so the returned value will not be cached".format(func_name))
return func(inst, *args, **kwargs)
try:
return inst._cache[func_name][key]
except (KeyError, AttributeError):
value = func(inst, *args, **kwargs)
if not hasattr(inst, "_cache"):
inst._cache = CacheData()
if inst._cache.get(func_name, None) is None:
#cache class creator returns a dict
inst._cache[func_name] = self.cache_class()
inst._cache[func_name][key] = value
return value
callee.__cached_method__ = True
callee.__method_id__ = method_id
return callee
def _get_function_cache_entry(args, kwargs):
return (tuple(args), frozenset(iteritems(kwargs)))
def cached_function(func):
"""Decorator that caches a function's return value each time it is called.
If called later with the same arguments, the cached value is returned, and
not re-evaluated.
"""
@wraps(func)
def callee(*args, **kwargs):
key = _get_function_cache_entry(args, kwargs)
try:
value = func._cache[key]
except (KeyError, AttributeError):
value = func(*args, **kwargs)
if not hasattr(func, '_cache'):
setattr(func, '_cache', {})
func._cache[key] = value
return value
callee._cache = func._cache = dict()
callee.__cached_method__ = True
return callee
def clear_cache(self):
if hasattr(self, '_cache'):
getattr(self, '_cache').clear()
def clear_cached_entry(self, *args, **kwargs):
if isinstance(self, MethodType) and getattr(self, '__cached_method__', False):
method = self
self = getattr(method, 'im_self', getattr(method, '__self__', None))
if self is None:
return
key = _get_instancemethod_cache_entry(method.__method_id__, *args, **kwargs)
elif isinstance(self, FunctionType) and getattr(self, '__cached_method__', False):
key = _get_function_cache_entry(args, kwargs)
else:
return
_ = getattr(self, '_cache', {}).pop(key, None)
def populate_cache(self, attributes_to_skip=[]):
"""this method attempts to get all the lazy cached properties and methods
There are two special cases:
- Some attributes may not be available and raises exceptions.
If you wish to skip these, pass them in the attributes_to_skip list
- The calling of cached methods is done without any arguments, and catches TypeError exceptions
for the case a cached method requires arguments. The exception is logged."""
from inspect import getmembers
for key, value in getmembers(self):
if key in attributes_to_skip:
continue
if hasattr(value, "__cached_method__"):
logger.debug("getting attribute %s from %s", repr(key), repr(self))
try:
_ = value()
except TypeError as e:
logger.exception(e)
class LazyImmutableDict(object):
""" Use this object when you have a list of keys but fetching the values is expensive,
and you want to do it in a lazy fasion"""
def __init__(self, dict):
self._dict = dict
def __getitem__(self, key):
value = self._dict[key]
if value is None:
value = self._dict[key] = self._create_value(key)
return value
def keys(self):
return self._dict.keys()
def __contains__(self, key):
return self._dict.__contains__(key)
def has_key(self, key):
return key in self._dict
def __len__(self):
return len(self._dict)
def _create_value(self, key):
raise NotImplementedError()
class CacheData(dict):
def __init__(self):
super(CacheData, self).__init__()
self._is_valid = set()
def __getitem__(self, key):
if key not in self._is_valid:
logger.debug("cache found invalidate., updating cache for {0}".format(key))
raise KeyError
return dict.__getitem__(self, key)
def __setitem__(self, key, value):
ret_val = dict.__setitem__(self, key, value)
self._is_valid.add(key)
return ret_val
def invalidate(self):
logger.debug("Invalidate cache")
self._is_valid = set()
class TimerCacheData(CacheData):
def __init__(self, poll_time):
super(TimerCacheData, self).__init__()
self.poll_time = poll_time
def __getitem__(self, key):
next_poll_time, value = CacheData.__getitem__(self, key)
if time.time() > next_poll_time:
raise KeyError
return value
def __setitem__(self, key, value):
next_poll_time = time.time() + self.poll_time
ret_val = CacheData.__setitem__(self, key, (next_poll_time, value))
return ret_val
| 36.41502 | 127 | 0.635189 |
f741d28c120ea874d240f15469308faf34286a81 | 3,151 | py | Python | src/radical/ensemblemd/kernel_plugins/misc/diff.py | chemlove/radical.ensemblemd | 0ec4b127760d2fee88d4eae1768fecec4bdd6b21 | [
"MIT"
] | null | null | null | src/radical/ensemblemd/kernel_plugins/misc/diff.py | chemlove/radical.ensemblemd | 0ec4b127760d2fee88d4eae1768fecec4bdd6b21 | [
"MIT"
] | null | null | null | src/radical/ensemblemd/kernel_plugins/misc/diff.py | chemlove/radical.ensemblemd | 0ec4b127760d2fee88d4eae1768fecec4bdd6b21 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""A kernel that compares two ASCII files and outputs the differences in a detailed
format.
"""
__author__ = "Ioannis Paraskevakos <i.paraskev@rutgers.edu>"
__copyright__ = "Copyright 2014, http://radical.rutgers.edu"
__license__ = "MIT"
from copy import deepcopy
from radical.ensemblemd.exceptions import ArgumentError
from radical.ensemblemd.exceptions import NoKernelConfigurationError
from radical.ensemblemd.kernel_plugins.kernel_base import KernelBase
# ------------------------------------------------------------------------------
#
_KERNEL_INFO = {
"name": "misc.diff",
"description": "Counts the differences between two ASCII files.",
"arguments": {"--inputfile1=":
{
"mandatory": True,
"description": "The first input ASCII file."
},
"--inputfile2=":
{
"mandatory": True,
"description": "The second input ASCII file."
},
"--outputfile=":
{
"mandatory": True,
"description": "The output file containing the difference count."
},
},
"machine_configs":
{
"*": {
"environment" : None,
"pre_exec" : None,
"executable" : "diff",
"uses_mpi" : False
}
}
}
# ------------------------------------------------------------------------------
#
class Kernel(KernelBase):
# --------------------------------------------------------------------------
#
def __init__(self):
"""Le constructor.
"""
super(Kernel, self).__init__(_KERNEL_INFO)
# --------------------------------------------------------------------------
#
@staticmethod
def get_name():
return _KERNEL_INFO["name"]
# --------------------------------------------------------------------------
#
def _bind_to_resource(self, resource_key):
"""(PRIVATE) Implements parent class method.
"""
if resource_key not in _KERNEL_INFO["machine_configs"]:
if "*" in _KERNEL_INFO["machine_configs"]:
# Fall-back to generic resource key
resource_key = "*"
else:
raise NoKernelConfigurationError(kernel_name=_KERNEL_INFO["name"], resource_key=resource_key)
cfg = _KERNEL_INFO["machine_configs"][resource_key]
executable = "/bin/bash"
arguments = ['-l', '-c', 'diff -U 0 {input1} {input2} | grep ^@ | wc -l > {output}'.format(
input1 = self.get_arg("--inputfile1="),
input2 = self.get_arg("--inputfile2="),
output = self.get_arg("--outputfile="))
]
self._executable = executable
self._arguments = arguments
self._environment = cfg["environment"]
self._uses_mpi = cfg["uses_mpi"]
self._pre_exec = None
| 33.88172 | 109 | 0.458267 |
f74252377db9d6fa8eeb07059e4b49ddfc329441 | 1,766 | py | Python | samples/snippets/quickstart/quickstart.py | glaswasser/python-vision | 706c314a86b8f35c313bb3e907ae84317dca1a0b | [
"Apache-2.0"
] | null | null | null | samples/snippets/quickstart/quickstart.py | glaswasser/python-vision | 706c314a86b8f35c313bb3e907ae84317dca1a0b | [
"Apache-2.0"
] | null | null | null | samples/snippets/quickstart/quickstart.py | glaswasser/python-vision | 706c314a86b8f35c313bb3e907ae84317dca1a0b | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#$env:GOOGLE_APPLICATION_CREDENTIALS="C:\Users\heinz\OneDrive - Yagora GmbH\Heinz Yagora privat\privat\computer_vision\python-vision\FMCG-Vision-71451fde95cf.json"
def run_quickstart():
# [START vision_quickstart]
import io
import os
# Imports the Google Cloud client library
# [START vision_python_migration_import]
from google.cloud import vision
# [END vision_python_migration_import]
# Instantiates a client
# [START vision_python_migration_client]
client = vision.ImageAnnotatorClient()
# [END vision_python_migration_client]
# The name of the image file to annotate
file_name = os.path.abspath('resources/wakeupcat.jpg')
# Loads the image into memory
with io.open(file_name, 'rb') as image_file:
content = image_file.read()
image = vision.Image(content=content)
# Performs label detection on the image file
response = client.label_detection(image=image)
labels = response.label_annotations
print('Labels:')
for label in labels:
print(label.description)
# [END vision_quickstart]
if __name__ == '__main__':
run_quickstart()
| 32.703704 | 163 | 0.734994 |
f7425d36fe0f6897968f289c5528c4b52be383e4 | 23,070 | py | Python | utils/check_repo.py | slowy07/transformers | 7223844df9738719ee335428a326cd712f506806 | [
"Apache-2.0"
] | 15 | 2021-08-10T01:10:44.000Z | 2022-01-20T15:23:49.000Z | utils/check_repo.py | 4nalog/transformers | 76cadb7943c8492ec481f4f3925e9e8793a32c9d | [
"Apache-2.0"
] | 1 | 2020-03-21T21:38:47.000Z | 2020-03-21T21:38:50.000Z | utils/check_repo.py | 4nalog/transformers | 76cadb7943c8492ec481f4f3925e9e8793a32c9d | [
"Apache-2.0"
] | 1 | 2021-04-02T20:42:10.000Z | 2021-04-02T20:42:10.000Z | # coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import inspect
import os
import re
import warnings
from pathlib import Path
from transformers import is_flax_available, is_tf_available, is_torch_available
from transformers.file_utils import ENV_VARS_TRUE_VALUES
from transformers.models.auto import get_values
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_repo.py
PATH_TO_TRANSFORMERS = "src/transformers"
PATH_TO_TESTS = "tests"
PATH_TO_DOC = "docs/source"
# Update this list with models that are supposed to be private.
PRIVATE_MODELS = [
"DPRSpanPredictor",
"T5Stack",
"TFDPRSpanPredictor",
]
# Update this list for models that are not tested with a comment explaining the reason it should not be.
# Being in this list is an exception and should **not** be the rule.
IGNORE_NON_TESTED = PRIVATE_MODELS.copy() + [
# models to ignore for not tested
"BigBirdPegasusEncoder", # Building part of bigger (tested) model.
"BigBirdPegasusDecoder", # Building part of bigger (tested) model.
"BigBirdPegasusDecoderWrapper", # Building part of bigger (tested) model.
"DetrEncoder", # Building part of bigger (tested) model.
"DetrDecoder", # Building part of bigger (tested) model.
"DetrDecoderWrapper", # Building part of bigger (tested) model.
"M2M100Encoder", # Building part of bigger (tested) model.
"M2M100Decoder", # Building part of bigger (tested) model.
"Speech2TextEncoder", # Building part of bigger (tested) model.
"Speech2TextDecoder", # Building part of bigger (tested) model.
"LEDEncoder", # Building part of bigger (tested) model.
"LEDDecoder", # Building part of bigger (tested) model.
"BartDecoderWrapper", # Building part of bigger (tested) model.
"BartEncoder", # Building part of bigger (tested) model.
"BertLMHeadModel", # Needs to be setup as decoder.
"BlenderbotSmallEncoder", # Building part of bigger (tested) model.
"BlenderbotSmallDecoderWrapper", # Building part of bigger (tested) model.
"BlenderbotEncoder", # Building part of bigger (tested) model.
"BlenderbotDecoderWrapper", # Building part of bigger (tested) model.
"MBartEncoder", # Building part of bigger (tested) model.
"MBartDecoderWrapper", # Building part of bigger (tested) model.
"MegatronBertLMHeadModel", # Building part of bigger (tested) model.
"MegatronBertEncoder", # Building part of bigger (tested) model.
"MegatronBertDecoder", # Building part of bigger (tested) model.
"MegatronBertDecoderWrapper", # Building part of bigger (tested) model.
"PegasusEncoder", # Building part of bigger (tested) model.
"PegasusDecoderWrapper", # Building part of bigger (tested) model.
"DPREncoder", # Building part of bigger (tested) model.
"ProphetNetDecoderWrapper", # Building part of bigger (tested) model.
"ReformerForMaskedLM", # Needs to be setup as decoder.
"TFDPREncoder", # Building part of bigger (tested) model.
"TFElectraMainLayer", # Building part of bigger (tested) model (should it be a TFPreTrainedModel ?)
"TFRobertaForMultipleChoice", # TODO: fix
"SeparableConv1D", # Building part of bigger (tested) model.
]
# Update this list with test files that don't have a tester with a `all_model_classes` variable and which don't
# trigger the common tests.
TEST_FILES_WITH_NO_COMMON_TESTS = [
"test_modeling_camembert.py",
"test_modeling_flax_mt5.py",
"test_modeling_mbart.py",
"test_modeling_mt5.py",
"test_modeling_pegasus.py",
"test_modeling_tf_camembert.py",
"test_modeling_tf_mt5.py",
"test_modeling_tf_xlm_roberta.py",
"test_modeling_xlm_prophetnet.py",
"test_modeling_xlm_roberta.py",
]
# Update this list for models that are not in any of the auto MODEL_XXX_MAPPING. Being in this list is an exception and
# should **not** be the rule.
IGNORE_NON_AUTO_CONFIGURED = PRIVATE_MODELS.copy() + [
# models to ignore for model xxx mapping
"BeitForMaskedImageModeling",
"CLIPTextModel",
"CLIPVisionModel",
"FlaxCLIPTextModel",
"FlaxCLIPVisionModel",
"FlaxWav2Vec2ForCTC",
"DetrForSegmentation",
"DPRReader",
"FlaubertForQuestionAnswering",
"GPT2DoubleHeadsModel",
"LukeForEntityClassification",
"LukeForEntityPairClassification",
"LukeForEntitySpanClassification",
"OpenAIGPTDoubleHeadsModel",
"RagModel",
"RagSequenceForGeneration",
"RagTokenForGeneration",
"TFDPRReader",
"TFGPT2DoubleHeadsModel",
"TFOpenAIGPTDoubleHeadsModel",
"TFRagModel",
"TFRagSequenceForGeneration",
"TFRagTokenForGeneration",
"Wav2Vec2ForCTC",
"HubertForCTC",
"XLMForQuestionAnswering",
"XLNetForQuestionAnswering",
"SeparableConv1D",
"VisualBertForRegionToPhraseAlignment",
"VisualBertForVisualReasoning",
"VisualBertForQuestionAnswering",
"VisualBertForMultipleChoice",
"TFWav2Vec2ForCTC",
"TFHubertForCTC",
]
# This is to make sure the transformers module imported is the one in the repo.
spec = importlib.util.spec_from_file_location(
"transformers",
os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
transformers = spec.loader.load_module()
# If some modeling modules should be ignored for all checks, they should be added in the nested list
# _ignore_modules of this function.
def get_model_modules():
"""Get the model modules inside the transformers library."""
_ignore_modules = [
"modeling_auto",
"modeling_encoder_decoder",
"modeling_marian",
"modeling_mmbt",
"modeling_outputs",
"modeling_retribert",
"modeling_utils",
"modeling_flax_auto",
"modeling_flax_utils",
"modeling_transfo_xl_utilities",
"modeling_tf_auto",
"modeling_tf_outputs",
"modeling_tf_pytorch_utils",
"modeling_tf_utils",
"modeling_tf_transfo_xl_utilities",
]
modules = []
for model in dir(transformers.models):
# There are some magic dunder attributes in the dir, we ignore them
if not model.startswith("__"):
model_module = getattr(transformers.models, model)
for submodule in dir(model_module):
if submodule.startswith("modeling") and submodule not in _ignore_modules:
modeling_module = getattr(model_module, submodule)
if inspect.ismodule(modeling_module):
modules.append(modeling_module)
return modules
def get_models(module, include_pretrained=False):
"""Get the objects in module that are models."""
models = []
model_classes = (transformers.PreTrainedModel, transformers.TFPreTrainedModel, transformers.FlaxPreTrainedModel)
for attr_name in dir(module):
if not include_pretrained and ("Pretrained" in attr_name or "PreTrained" in attr_name):
continue
attr = getattr(module, attr_name)
if isinstance(attr, type) and issubclass(attr, model_classes) and attr.__module__ == module.__name__:
models.append((attr_name, attr))
return models
def is_a_private_model(model):
"""Returns True if the model should not be in the main init."""
if model in PRIVATE_MODELS:
return True
# Wrapper, Encoder and Decoder are all privates
if model.endswith("Wrapper"):
return True
if model.endswith("Encoder"):
return True
if model.endswith("Decoder"):
return True
return False
def check_models_are_in_init():
"""Checks all models defined in the library are in the main init."""
models_not_in_init = []
dir_transformers = dir(transformers)
for module in get_model_modules():
models_not_in_init += [
model[0] for model in get_models(module, include_pretrained=True) if model[0] not in dir_transformers
]
# Remove private models
models_not_in_init = [model for model in models_not_in_init if not is_a_private_model(model)]
if len(models_not_in_init) > 0:
raise Exception(f"The following models should be in the main init: {','.join(models_not_in_init)}.")
# If some test_modeling files should be ignored when checking models are all tested, they should be added in the
# nested list _ignore_files of this function.
def get_model_test_files():
"""Get the model test files."""
_ignore_files = [
"test_modeling_common",
"test_modeling_encoder_decoder",
"test_modeling_marian",
"test_modeling_tf_common",
]
test_files = []
for filename in os.listdir(PATH_TO_TESTS):
if (
os.path.isfile(f"{PATH_TO_TESTS}/{filename}")
and filename.startswith("test_modeling")
and not os.path.splitext(filename)[0] in _ignore_files
):
test_files.append(filename)
return test_files
# This is a bit hacky but I didn't find a way to import the test_file as a module and read inside the tester class
# for the all_model_classes variable.
def find_tested_models(test_file):
"""Parse the content of test_file to detect what's in all_model_classes"""
# This is a bit hacky but I didn't find a way to import the test_file as a module and read inside the class
with open(os.path.join(PATH_TO_TESTS, test_file), "r", encoding="utf-8", newline="\n") as f:
content = f.read()
all_models = re.findall(r"all_model_classes\s+=\s+\(\s*\(([^\)]*)\)", content)
# Check with one less parenthesis as well
all_models += re.findall(r"all_model_classes\s+=\s+\(([^\)]*)\)", content)
if len(all_models) > 0:
model_tested = []
for entry in all_models:
for line in entry.split(","):
name = line.strip()
if len(name) > 0:
model_tested.append(name)
return model_tested
def check_models_are_tested(module, test_file):
"""Check models defined in module are tested in test_file."""
# XxxPreTrainedModel are not tested
defined_models = get_models(module)
tested_models = find_tested_models(test_file)
if tested_models is None:
if test_file in TEST_FILES_WITH_NO_COMMON_TESTS:
return
return [
f"{test_file} should define `all_model_classes` to apply common tests to the models it tests. "
+ "If this intentional, add the test filename to `TEST_FILES_WITH_NO_COMMON_TESTS` in the file "
+ "`utils/check_repo.py`."
]
failures = []
for model_name, _ in defined_models:
if model_name not in tested_models and model_name not in IGNORE_NON_TESTED:
failures.append(
f"{model_name} is defined in {module.__name__} but is not tested in "
+ f"{os.path.join(PATH_TO_TESTS, test_file)}. Add it to the all_model_classes in that file."
+ "If common tests should not applied to that model, add its name to `IGNORE_NON_TESTED`"
+ "in the file `utils/check_repo.py`."
)
return failures
def check_all_models_are_tested():
"""Check all models are properly tested."""
modules = get_model_modules()
test_files = get_model_test_files()
failures = []
for module in modules:
test_file = f"test_{module.__name__.split('.')[-1]}.py"
if test_file not in test_files:
failures.append(f"{module.__name__} does not have its corresponding test file {test_file}.")
new_failures = check_models_are_tested(module, test_file)
if new_failures is not None:
failures += new_failures
if len(failures) > 0:
raise Exception(f"There were {len(failures)} failures:\n" + "\n".join(failures))
def get_all_auto_configured_models():
"""Return the list of all models in at least one auto class."""
result = set() # To avoid duplicates we concatenate all model classes in a set.
if is_torch_available():
for attr_name in dir(transformers.models.auto.modeling_auto):
if attr_name.startswith("MODEL_") and attr_name.endswith("MAPPING_NAMES"):
result = result | set(get_values(getattr(transformers.models.auto.modeling_auto, attr_name)))
if is_tf_available():
for attr_name in dir(transformers.models.auto.modeling_tf_auto):
if attr_name.startswith("TF_MODEL_") and attr_name.endswith("MAPPING_NAMES"):
result = result | set(get_values(getattr(transformers.models.auto.modeling_tf_auto, attr_name)))
if is_flax_available():
for attr_name in dir(transformers.models.auto.modeling_flax_auto):
if attr_name.startswith("FLAX_MODEL_") and attr_name.endswith("MAPPING_NAMES"):
result = result | set(get_values(getattr(transformers.models.auto.modeling_flax_auto, attr_name)))
return [cls for cls in result]
def ignore_unautoclassed(model_name):
"""Rules to determine if `name` should be in an auto class."""
# Special white list
if model_name in IGNORE_NON_AUTO_CONFIGURED:
return True
# Encoder and Decoder should be ignored
if "Encoder" in model_name or "Decoder" in model_name:
return True
return False
def check_models_are_auto_configured(module, all_auto_models):
"""Check models defined in module are each in an auto class."""
defined_models = get_models(module)
failures = []
for model_name, _ in defined_models:
if model_name not in all_auto_models and not ignore_unautoclassed(model_name):
failures.append(
f"{model_name} is defined in {module.__name__} but is not present in any of the auto mapping. "
"If that is intended behavior, add its name to `IGNORE_NON_AUTO_CONFIGURED` in the file "
"`utils/check_repo.py`."
)
return failures
def check_all_models_are_auto_configured():
"""Check all models are each in an auto class."""
missing_backends = []
if not is_torch_available():
missing_backends.append("PyTorch")
if not is_tf_available():
missing_backends.append("TensorFlow")
if not is_flax_available():
missing_backends.append("Flax")
if len(missing_backends) > 0:
missing = ", ".join(missing_backends)
if os.getenv("TRANSFORMERS_IS_CI", "").upper() in ENV_VARS_TRUE_VALUES:
raise Exception(
"Full quality checks require all backends to be installed (with `pip install -e .[dev]` in the "
f"Transformers repo, the following are missing: {missing}."
)
else:
warnings.warn(
"Full quality checks require all backends to be installed (with `pip install -e .[dev]` in the "
f"Transformers repo, the following are missing: {missing}. While it's probably fine as long as you "
"didn't make any change in one of those backends modeling files, you should probably execute the "
"command above to be on the safe side."
)
modules = get_model_modules()
all_auto_models = get_all_auto_configured_models()
failures = []
for module in modules:
new_failures = check_models_are_auto_configured(module, all_auto_models)
if new_failures is not None:
failures += new_failures
if len(failures) > 0:
raise Exception(f"There were {len(failures)} failures:\n" + "\n".join(failures))
_re_decorator = re.compile(r"^\s*@(\S+)\s+$")
def check_decorator_order(filename):
"""Check that in the test file `filename` the slow decorator is always last."""
with open(filename, "r", encoding="utf-8", newline="\n") as f:
lines = f.readlines()
decorator_before = None
errors = []
for i, line in enumerate(lines):
search = _re_decorator.search(line)
if search is not None:
decorator_name = search.groups()[0]
if decorator_before is not None and decorator_name.startswith("parameterized"):
errors.append(i)
decorator_before = decorator_name
elif decorator_before is not None:
decorator_before = None
return errors
def check_all_decorator_order():
"""Check that in all test files, the slow decorator is always last."""
errors = []
for fname in os.listdir(PATH_TO_TESTS):
if fname.endswith(".py"):
filename = os.path.join(PATH_TO_TESTS, fname)
new_errors = check_decorator_order(filename)
errors += [f"- {filename}, line {i}" for i in new_errors]
if len(errors) > 0:
msg = "\n".join(errors)
raise ValueError(
f"The parameterized decorator (and its variants) should always be first, but this is not the case in the following files:\n{msg}"
)
def find_all_documented_objects():
"""Parse the content of all doc files to detect which classes and functions it documents"""
documented_obj = []
for doc_file in Path(PATH_TO_DOC).glob("**/*.rst"):
with open(doc_file, "r", encoding="utf-8", newline="\n") as f:
content = f.read()
raw_doc_objs = re.findall(r"(?:autoclass|autofunction):: transformers.(\S+)\s+", content)
documented_obj += [obj.split(".")[-1] for obj in raw_doc_objs]
return documented_obj
# One good reason for not being documented is to be deprecated. Put in this list deprecated objects.
DEPRECATED_OBJECTS = [
"AutoModelWithLMHead",
"BartPretrainedModel",
"DataCollator",
"DataCollatorForSOP",
"GlueDataset",
"GlueDataTrainingArguments",
"LineByLineTextDataset",
"LineByLineWithRefDataset",
"LineByLineWithSOPTextDataset",
"PretrainedBartModel",
"PretrainedFSMTModel",
"SingleSentenceClassificationProcessor",
"SquadDataTrainingArguments",
"SquadDataset",
"SquadExample",
"SquadFeatures",
"SquadV1Processor",
"SquadV2Processor",
"TFAutoModelWithLMHead",
"TFBartPretrainedModel",
"TextDataset",
"TextDatasetForNextSentencePrediction",
"Wav2Vec2ForMaskedLM",
"Wav2Vec2Tokenizer",
"glue_compute_metrics",
"glue_convert_examples_to_features",
"glue_output_modes",
"glue_processors",
"glue_tasks_num_labels",
"squad_convert_examples_to_features",
"xnli_compute_metrics",
"xnli_output_modes",
"xnli_processors",
"xnli_tasks_num_labels",
]
# Exceptionally, some objects should not be documented after all rules passed.
# ONLY PUT SOMETHING IN THIS LIST AS A LAST RESORT!
UNDOCUMENTED_OBJECTS = [
"AddedToken", # This is a tokenizers class.
"BasicTokenizer", # Internal, should never have been in the main init.
"CharacterTokenizer", # Internal, should never have been in the main init.
"DPRPretrainedReader", # Like an Encoder.
"MecabTokenizer", # Internal, should never have been in the main init.
"ModelCard", # Internal type.
"SqueezeBertModule", # Internal building block (should have been called SqueezeBertLayer)
"TFDPRPretrainedReader", # Like an Encoder.
"TransfoXLCorpus", # Internal type.
"WordpieceTokenizer", # Internal, should never have been in the main init.
"absl", # External module
"add_end_docstrings", # Internal, should never have been in the main init.
"add_start_docstrings", # Internal, should never have been in the main init.
"cached_path", # Internal used for downloading models.
"convert_tf_weight_name_to_pt_weight_name", # Internal used to convert model weights
"logger", # Internal logger
"logging", # External module
"requires_backends", # Internal function
]
# This list should be empty. Objects in it should get their own doc page.
SHOULD_HAVE_THEIR_OWN_PAGE = [
# Benchmarks
"PyTorchBenchmark",
"PyTorchBenchmarkArguments",
"TensorFlowBenchmark",
"TensorFlowBenchmarkArguments",
]
def ignore_undocumented(name):
"""Rules to determine if `name` should be undocumented."""
# NOT DOCUMENTED ON PURPOSE.
# Constants uppercase are not documented.
if name.isupper():
return True
# PreTrainedModels / Encoders / Decoders / Layers / Embeddings / Attention are not documented.
if (
name.endswith("PreTrainedModel")
or name.endswith("Decoder")
or name.endswith("Encoder")
or name.endswith("Layer")
or name.endswith("Embeddings")
or name.endswith("Attention")
):
return True
# Submodules are not documented.
if os.path.isdir(os.path.join(PATH_TO_TRANSFORMERS, name)) or os.path.isfile(
os.path.join(PATH_TO_TRANSFORMERS, f"{name}.py")
):
return True
# All load functions are not documented.
if name.startswith("load_tf") or name.startswith("load_pytorch"):
return True
# is_xxx_available functions are not documented.
if name.startswith("is_") and name.endswith("_available"):
return True
# Deprecated objects are not documented.
if name in DEPRECATED_OBJECTS or name in UNDOCUMENTED_OBJECTS:
return True
# MMBT model does not really work.
if name.startswith("MMBT"):
return True
if name in SHOULD_HAVE_THEIR_OWN_PAGE:
return True
return False
def check_all_objects_are_documented():
"""Check all models are properly documented."""
documented_objs = find_all_documented_objects()
modules = transformers._modules
objects = [c for c in dir(transformers) if c not in modules and not c.startswith("_")]
undocumented_objs = [c for c in objects if c not in documented_objs and not ignore_undocumented(c)]
if len(undocumented_objs) > 0:
raise Exception(
"The following objects are in the public init so should be documented:\n - "
+ "\n - ".join(undocumented_objs)
)
def check_repo_quality():
"""Check all models are properly tested and documented."""
print("Checking all models are public.")
check_models_are_in_init()
print("Checking all models are properly tested.")
check_all_decorator_order()
check_all_models_are_tested()
print("Checking all objects are properly documented.")
check_all_objects_are_documented()
print("Checking all models are in at least one auto class.")
check_all_models_are_auto_configured()
if __name__ == "__main__":
check_repo_quality()
| 40.831858 | 141 | 0.687603 |
f7426ea00976e99a807367df8f057a858b319b91 | 7,958 | py | Python | example/Bayesian/More advanced examples with FE models - Sfepy/material_homogenization.py | volpatto/UQpy | acbe1d6e655e98917f56b324f019881ea9ccca82 | [
"MIT"
] | 1 | 2020-09-03T12:10:39.000Z | 2020-09-03T12:10:39.000Z | example/Bayesian/More advanced examples with FE models - Sfepy/material_homogenization.py | volpatto/UQpy | acbe1d6e655e98917f56b324f019881ea9ccca82 | [
"MIT"
] | null | null | null | example/Bayesian/More advanced examples with FE models - Sfepy/material_homogenization.py | volpatto/UQpy | acbe1d6e655e98917f56b324f019881ea9ccca82 | [
"MIT"
] | 1 | 2021-04-04T14:17:55.000Z | 2021-04-04T14:17:55.000Z | #!/usr/bin/env python
# This code was adapted from http://sfepy.org/doc-devel/mat_optim.html.
from __future__ import print_function
from __future__ import absolute_import
import sys
sys.path.append('.')
import matplotlib as mlp
import matplotlib.pyplot as plt
from matplotlib.collections import PolyCollection
from mpl_toolkits.mplot3d.art3d import Poly3DCollection, Line3DCollection
import numpy as np
from sfepy.base.base import Struct, output
from sfepy.base.log import Log
from sfepy import data_dir
class MaterialSimulator(object):
@staticmethod
def create_app(filename, is_homog=False, **kwargs):
from sfepy.base.conf import ProblemConf, get_standard_keywords
from sfepy.homogenization.homogen_app import HomogenizationApp
from sfepy.applications import PDESolverApp
required, other = get_standard_keywords()
if is_homog:
required.remove('equations')
conf = ProblemConf.from_file(filename, required, other,
define_args=kwargs)
options = Struct(output_filename_trunk=None,
save_ebc=False,
save_ebc_nodes=False,
save_regions=False,
save_regions_as_groups=False,
save_field_meshes=False,
solve_not=False,
)
output.set_output(filename='sfepy_log.txt', quiet=True)
if is_homog:
app = HomogenizationApp(conf, options, 'material_opt_micro:')
else:
app = PDESolverApp(conf, options, 'material_opt_macro:')
app.conf.opt_data = {}
opts = conf.options
if hasattr(opts, 'parametric_hook'): # Parametric study.
parametric_hook = conf.get_function(opts.parametric_hook)
app.parametrize(parametric_hook)
return app
def __init__(self, macro_fn, micro_fn, phis, plot_meshes_bool=False):
self.macro_app = self.create_app(macro_fn, is_homog=False, is_opt=True)
self.micro_app = self.create_app(micro_fn, is_homog=True, is_opt=True)
self.phis = phis
self.plot_meshes_bool = plot_meshes_bool
@staticmethod
def rotate_mat(D, angle):
s = np.sin(angle)
c = np.cos(angle)
s2 = s**2
c2 = c**2
sc = s * c
T = np.array([[c2, 0, s2, 0, 2*sc,0],
[0, 1, 0, 0, 0, 0],
[s2, 0, c2, 0, -2*sc, 0],
[0, 0, 0, c, 0, -s],
[-sc, 0, sc, 0, c2 - s2, 0],
[0, 0, 0, s, 0, c]])
return np.dot(np.dot(T, D), T.T)
def plot_meshes(self):
# plot mesh for micro problem
pb = self.micro_app.problem
coors = pb.domain.mesh.coors
#print(set(coors[:,2]))
graph = pb.domain.mesh.get_conn(pb.domain.mesh.descs[0])
graph_slice = np.zeros((graph.shape[0], 4))
for j in range(graph.shape[0]):
graph_slice[j,:] = graph[j,coors[graph[j,:],2] == 0]
cells_matrix = pb.domain.regions['Ym'].get_cells()
cells_fibers = pb.domain.regions['Yf'].get_cells()
fig = plt.figure(figsize = (12, 5))
ax = fig.add_subplot(121)
pc = PolyCollection(verts=coors[graph[cells_matrix,0:4],:2], facecolors='white',
edgecolors='black')
ax.add_collection(pc)
pc = PolyCollection(verts=coors[graph[cells_fibers,0:4],:2], facecolors='gray',
edgecolors='black')
ax.add_collection(pc)
ax.axis('equal')
ax.set_title('2D plot of microstructure')
ax = fig.add_subplot(122, projection='3d')
for e in range(graph.shape[0]):
if e in cells_fibers:
color = 'gray'
else:
color = 'white'
tupleList = coors[graph[e,:],:]
vertices = [[0, 1, 2, 3], [4, 5, 6, 7],
[0, 1, 5, 4], [1, 2, 6, 5], [2, 3, 7, 6], [3, 0, 4, 7]]
verts = [[tupleList[vertices[ix][iy]] for iy in range(len(vertices[0]))]
for ix in range(len(vertices))]
pc3d = Poly3DCollection(verts=verts, facecolors=color,
edgecolors='black', linewidths=1, alpha=0.5)
ax.add_collection3d(pc3d)
ax.set_title('3D plot of microstructure')
plt.show(fig)
# plot mesh for macro problem
pb = self.macro_app.problem
coors = pb.domain.mesh.coors
graph = pb.domain.mesh.get_conn(pb.domain.mesh.descs[0])
fig2 = plt.figure(figsize=(5,6))
ax = fig2.add_subplot(111, projection='3d')
for e in range(graph.shape[0]):
tupleList = coors[graph[e,:],:]
vertices = [[0, 1, 2, 3], [4, 5, 6, 7],
[0, 1, 5, 4], [1, 2, 6, 5], [2, 3, 7, 6], [3, 0, 4, 7]]
verts = [[tupleList[vertices[ix][iy]] for iy in range(len(vertices[0]))]
for ix in range(len(vertices))]
pc3d = Poly3DCollection(verts=verts, facecolors='white',
edgecolors='black', linewidths=1, alpha=0.5)
ax.add_collection3d(pc3d)
ax.set_xlim3d(-0.03, 0.03)
ax.set_ylim3d(-0.01, 0.01)
ax.set_zlim3d(-0.01, 0.1)
ax.set_title('3D plot of macro system')
plt.show(fig2)
return None
def mat_eval(self, x):
mic_od = self.micro_app.conf.opt_data
mac_od = self.macro_app.conf.opt_data
mic_od['coefs'] = {}
mic_od['mat_params'] = x_norm2real(x)
self.micro_app()
D = mic_od['D_homog']
comp_k = []
for phi in self.phis:
#print('phi = %d' % phi)
mac_od['D_homog'] = self.rotate_mat(D, np.deg2rad(phi))
self.macro_app()
comp_k.append(mac_od['k'])
# added by Audrey: get a plot of a slice of the mesh
if self.plot_meshes_bool:
self.plot_meshes()
return comp_k
def bounds():
x_L = [120e9, 0.2, 2e9, 0.2]
x_U = [200e9, 0.45, 8e9, 0.45]
return x_L, x_U
def x_norm2real(x):
x_L, x_U = np.array(bounds())
return x * (x_U - x_L) + x_L
def x_real2norm(x):
x_L, x_U = np.array(bounds())
return (x - x_L) / (x_U - x_L)
micro_filename = data_dir + '/examples/homogenization/' + 'homogenization_opt.py'
macro_filename = data_dir + '/examples/homogenization/' + 'linear_elasticity_opt.py'
def one_simulation(x0, plot_meshes_bool=False):
"""
This function is the main callable here: it takes in as input the parameter vector,
here x0=[E_fiber, nu_fiber, E_matrix, nu_matrix], and returns the simulated output
(here slope of the force-elongation curve obtained during a tensile test), to be compared
with the measured data.
"""
x0 = x0.reshape((-1, ))
phis = [0, 30, 60, 90]
#exp_data = zip([0, 30, 60, 90], [1051140., 197330., 101226., 95474.])
ms = MaterialSimulator(macro_filename, micro_filename,
phis,
plot_meshes_bool=plot_meshes_bool)
qoi = ms.mat_eval(x0)
return qoi
def one_simulation_2params(x0, plot_meshes_bool=False):
x0 = x0.reshape((-1, ))
x0 = np.array([x0[0], 0.45, x0[1], 0.])
phis = [0, 30, 60, 90]
#exp_data = zip([0, 30, 60, 90], [1051140., 197330., 101226., 95474.])
ms = MaterialSimulator(macro_filename, micro_filename,
phis, plot_meshes_bool=plot_meshes_bool)
qoi = ms.mat_eval(x0)
return qoi
def one_simulation_2params_rvs(x0, plot_meshes_bool=False):
x0 = x0.reshape((-1, ))
x0 = np.array([x0[0], 0.45, x0[1], 0.])
phis = [0, 30, 60, 90]
ms = MaterialSimulator(macro_filename, micro_filename,
phis,
plot_meshes_bool=plot_meshes_bool)
qoi = ms.mat_eval(x0)
qoi = np.tile(np.array(qoi), 100)
return qoi
| 36.3379 | 93 | 0.574265 |
f7427e3e091e4b7bc43db2bc2372dfafe57fa435 | 1,335 | py | Python | alipay/aop/api/response/AlipayFundCouponWufuLiveAcceptResponse.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 213 | 2018-08-27T16:49:32.000Z | 2021-12-29T04:34:12.000Z | alipay/aop/api/response/AlipayFundCouponWufuLiveAcceptResponse.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 29 | 2018-09-29T06:43:00.000Z | 2021-09-02T03:27:32.000Z | alipay/aop/api/response/AlipayFundCouponWufuLiveAcceptResponse.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 59 | 2018-08-27T16:59:26.000Z | 2022-03-25T10:08:15.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.CommonPrizeModelVo import CommonPrizeModelVo
class AlipayFundCouponWufuLiveAcceptResponse(AlipayResponse):
def __init__(self):
super(AlipayFundCouponWufuLiveAcceptResponse, self).__init__()
self._prize_list = None
self._prized = None
@property
def prize_list(self):
return self._prize_list
@prize_list.setter
def prize_list(self, value):
if isinstance(value, list):
self._prize_list = list()
for i in value:
if isinstance(i, CommonPrizeModelVo):
self._prize_list.append(i)
else:
self._prize_list.append(CommonPrizeModelVo.from_alipay_dict(i))
@property
def prized(self):
return self._prized
@prized.setter
def prized(self, value):
self._prized = value
def parse_response_content(self, response_content):
response = super(AlipayFundCouponWufuLiveAcceptResponse, self).parse_response_content(response_content)
if 'prize_list' in response:
self.prize_list = response['prize_list']
if 'prized' in response:
self.prized = response['prized']
| 31.046512 | 111 | 0.662172 |
f742bfecd3ff1bea6cfcdc0bc20e45e69326710e | 1,523 | py | Python | tests/version_spit_check.py | XIThing/esdl-mapeditor | 9f4cd4a58714ea67aeb532e88e88f0435a87dbd5 | [
"Apache-2.0"
] | null | null | null | tests/version_spit_check.py | XIThing/esdl-mapeditor | 9f4cd4a58714ea67aeb532e88e88f0435a87dbd5 | [
"Apache-2.0"
] | 14 | 2020-09-30T21:16:46.000Z | 2021-11-08T18:54:34.000Z | tests/version_spit_check.py | XIThing/esdl-mapeditor | 9f4cd4a58714ea67aeb532e88e88f0435a87dbd5 | [
"Apache-2.0"
] | 1 | 2020-09-17T12:48:57.000Z | 2020-09-17T12:48:57.000Z | # This work is based on original code developed and copyrighted by TNO 2020.
# Subsequent contributions are licensed to you by the developers of such code and are
# made available to the Project under one or several contributor license agreements.
#
# This work is licensed to you under the Apache License, Version 2.0.
# You may obtain a copy of the license at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Contributors:
# TNO - Initial implementation
# Manager:
# TNO
#version = ''
#import re
#m = re.match(r"\D??(\d{1,2}+)\.?(\d{1,2}+)\.?(\d+)?.*", version)
# splitted = version.split('.')
# major = splitted[0] if splitted[0] is not None and splitted[0] is not '' else 1
# minor = splitted[1] if len(splitted) > 1 and splitted[1] is not None else '0'
# rest = ''.join(splitted[2:]) if len(splitted) > 2 else ''
# # increment
# try:
# import re
# minor = re.split(r"\D", minor)[0]
# if len(splitted) > 1 and splitted[1] is not None:
# minor = int(minor) + 1
# else
# except ValueError:
# minor = 1
# # store
# version_updated = "{}.{}".format(major, minor)
# if rest is not '':
# version_updated = "{}.{}".format(version_updated, rest)
# print(version_updated)
version = 2
version = '' if version is None else str(version)
try:
import re
splitted = re.split(r"\D", version)
print(splitted)
major = splitted[0]
major = int(major) + 1
except ValueError:
major = 1
print(str(major))
| 31.729167 | 87 | 0.620486 |
f742e8a83d8dc2b3ed20891ea8ef42174aca4a4d | 1,831 | py | Python | toontown/suit/DistributedGridGoon.py | LittleNed/toontown-stride | 1252a8f9a8816c1810106006d09c8bdfe6ad1e57 | [
"Apache-2.0"
] | 3 | 2020-01-02T08:43:36.000Z | 2020-07-05T08:59:02.000Z | toontown/suit/DistributedGridGoon.py | NoraTT/Historical-Commits-Project-Altis-Source | fe88e6d07edf418f7de6ad5b3d9ecb3d0d285179 | [
"Apache-2.0"
] | null | null | null | toontown/suit/DistributedGridGoon.py | NoraTT/Historical-Commits-Project-Altis-Source | fe88e6d07edf418f7de6ad5b3d9ecb3d0d285179 | [
"Apache-2.0"
] | 4 | 2019-06-20T23:45:23.000Z | 2020-10-14T20:30:15.000Z | from pandac.PandaModules import *
from direct.interval.IntervalGlobal import *
from toontown.battle.BattleProps import *
from direct.directnotify import DirectNotifyGlobal
from toontown.suit import DistributedGoon
from toontown.toonbase import ToontownGlobals
from toontown.coghq import MovingPlatform
class DistributedGridGoon(DistributedGoon.DistributedGoon):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedGoon')
def __init__(self, cr, type = 'sg'):
try:
self.DistributedGridGoon_initialized
return
except:
self.DistributedGridGoon_initialized = 1
DistributedGoon.DistributedGoon.__init__(self, cr, type)
def generate(self):
DistributedGoon.DistributedGoon.generate(self)
self.ignore(self.uniqueName('wallHit'))
self.mazeWalkTrack = None
def delete(self):
if self.mazeWalkTrack:
self.mazeWalkTrack.pause()
del self.mazeWalkTrack
DistributedGoon.DistributedGoon.delete(self)
def setH(self, h):
self.h = h
def setPathPts(self, xi, yi, zi, xf, yf, zf):
self.notify.debug('setPathPts')
if self.mazeWalkTrack:
self.mazeWalkTrack.pause()
del self.mazeWalkTrack
self.mazeWalkTrack = None
curPos = Point3(xi, yi, zi)
nextPos = Point3(xf, yf, zf)
distance = Vec3(curPos - nextPos).length()
duration = distance / self.velocity
self.mazeWalkTrack = Sequence(Func(self.headsUp, nextPos[0], nextPos[1], nextPos[2]), LerpPosInterval(self, duration=duration, pos=nextPos, startPos=curPos), name=self.uniqueName('mazeWalkTrack'))
self.mazeWalkTrack.start()
def enterWalk(self, avId = None, ts = 0):
pass
def exitWalk(self):
pass
| 34.54717 | 204 | 0.673949 |
f7430f6ac609fa063a5c93eb798b63904e75a5c0 | 1,066 | py | Python | sdks/python/test/test_AlertingGithubBugtrackerSettings.py | Brantone/appcenter-sdks | eeb063ecf79908b6e341fb00196d2cd9dc8f3262 | [
"MIT"
] | null | null | null | sdks/python/test/test_AlertingGithubBugtrackerSettings.py | Brantone/appcenter-sdks | eeb063ecf79908b6e341fb00196d2cd9dc8f3262 | [
"MIT"
] | 6 | 2019-10-23T06:38:53.000Z | 2022-01-22T07:57:58.000Z | sdks/python/test/test_AlertingGithubBugtrackerSettings.py | Brantone/appcenter-sdks | eeb063ecf79908b6e341fb00196d2cd9dc8f3262 | [
"MIT"
] | 2 | 2019-10-23T06:31:05.000Z | 2021-08-21T17:32:47.000Z | # coding: utf-8
"""
App Center Client
Microsoft Visual Studio App Center API # noqa: E501
OpenAPI spec version: preview
Contact: benedetto.abbenanti@gmail.com
Project Repository: https://github.com/b3nab/appcenter-sdks
"""
from __future__ import absolute_import
import unittest
import appcenter_sdk
from AlertingGithubBugtrackerSettings.clsAlertingGithubBugtrackerSettings import AlertingGithubBugtrackerSettings # noqa: E501
from appcenter_sdk.rest import ApiException
class TestAlertingGithubBugtrackerSettings(unittest.TestCase):
"""AlertingGithubBugtrackerSettings unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testAlertingGithubBugtrackerSettings(self):
"""Test AlertingGithubBugtrackerSettings"""
# FIXME: construct object with mandatory attributes with example values
# model = appcenter_sdk.models.clsAlertingGithubBugtrackerSettings.AlertingGithubBugtrackerSettings() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 26.65 | 127 | 0.756098 |
f7430f9bf69f737e29287ec59200f7dbfa4334ff | 15,010 | py | Python | storm_kit/mpc/control/control_utils.py | rgap/storm | 5f477d6fa58c6c1ec8d8e2b57c3b21844cae17ac | [
"MIT"
] | null | null | null | storm_kit/mpc/control/control_utils.py | rgap/storm | 5f477d6fa58c6c1ec8d8e2b57c3b21844cae17ac | [
"MIT"
] | null | null | null | storm_kit/mpc/control/control_utils.py | rgap/storm | 5f477d6fa58c6c1ec8d8e2b57c3b21844cae17ac | [
"MIT"
] | null | null | null | #
# MIT License
#
# Copyright (c) 2020-2021 NVIDIA CORPORATION.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.#
import math
import numpy as np
import torch
from torch.distributions.multivariate_normal import MultivariateNormal
import ghalton
def scale_ctrl(ctrl, action_lows, action_highs, squash_fn='clamp'):
if len(ctrl.shape) == 1:
ctrl = ctrl[np.newaxis, :, np.newaxis]
act_half_range = (action_highs - action_lows) / 2.0
act_mid_range = (action_highs + action_lows) / 2.0
if squash_fn == 'clamp':
# ctrl = torch.clamp(ctrl, action_lows[0], action_highs[0])
ctrl = torch.max(torch.min(ctrl, action_highs), action_lows)
return ctrl
elif squash_fn == 'clamp_rescale':
ctrl = torch.clamp(ctrl, -1.0, 1.0)
elif squash_fn == 'tanh':
ctrl = torch.tanh(ctrl)
elif squash_fn == 'identity':
return ctrl
return act_mid_range.unsqueeze(0) + ctrl * act_half_range.unsqueeze(0)
#######################
## STOMP Covariance ##
#######################
def get_stomp_cov(horizon, d_action,
tensor_args={'device':torch.device('cpu'),'dtype':torch.float32},
cov_mode='vel', RETURN_R=False):
""" Computes the covariance matrix following STOMP motion planner
Coefficients from here: https://en.wikipedia.org/wiki/Finite_difference_coefficient
More info here: https://github.com/ros-industrial/stomp_ros/blob/7fe40fbe6ad446459d8d4889916c64e276dbf882/stomp_core/src/utils.cpp#L36
"""
acc_fd_array = [0,-1 / 12, 4 / 3, -5 / 2, 4 / 3, -1 / 12, 0]
#acc_fd_array = [1/90, -3/20, 3/2, -49/18, 3/2 , -3/20, 1/90 ]
#jerk_fd_array = [0, 1 / 12.0, -17 / 12.0, 46 / 12.0, -46 / 12.0, 17 / 12.0, -1 / 12.0]
jerk_fd_array = [1 / 8.0, -1, 13/8, 0 , -13/8, 1, -1/8]
#snap_fd_array = [-1/6, 2.0, -13/2, 28/3, -13/2, 2, -1/6]
snap_fd_array = [0, 1, -4, 6, -4, 1, 0]
#vel_fd_array = [0, 1.0/12.0 , -2.0/3.0 , 0 , 2.0/3.0 , -1.0/12.0 , 0 ]
vel_fd_array = [0, 0 , 1, -2 , 1,0, 0 ]
fd_array = acc_fd_array
A = torch.zeros((d_action * horizon, d_action * horizon), device=tensor_args['device'],dtype=torch.float64)
if(cov_mode == 'vel'):
for k in range(d_action):
for i in range(0, horizon):
for j in range(-3,4):
#print(j)
index = i + j
if(index < 0):
index = 0
continue
if(index >= horizon):
index = horizon - 1
continue
A[k * horizon + i,k * horizon + index] = fd_array[j + 3]
elif(cov_mode == 'acc'):
for k in range(d_action):
for i in range(0, horizon):
for j in range(-3,4):
#print(j)
index = i + j
if(index < 0):
index = 0
continue
if(index >= horizon):
index = horizon - 1
continue
if(index >= horizon/2):
#print(k * horizon + index - horizon//2)
A[k * horizon + i,k * horizon - index - horizon//2 -1] = fd_array[j + 3] #* float((horizon-index) / horizon)
else:
A[k * horizon + i,k * horizon + index] = fd_array[j + 3] #* float(index/horizon)
#plt.imshow(A)
#plt.show()
R = torch.matmul(A.transpose(-2,-1), A)
#print(R[:horizon, :horizon])
#plt.imshow(R)
#plt.show()
#print(R)
#print(torch.det(R))
cov = torch.inverse(R)
cov = cov / torch.max(torch.abs(cov))
#plt.imshow(cov)
#plt.show()
# also compute the cholesky decomposition:
scale_tril = torch.zeros((d_action * horizon, d_action * horizon), **tensor_args)
scale_tril = torch.linalg.cholesky(cov)
'''
k = 0
act_cov_matrix = cov[k * horizon:k * horizon + horizon, k * horizon:k * horizon + horizon]
print(act_cov_matrix.shape)
print(torch.det(act_cov_matrix))
local_cholesky = matrix_cholesky(act_cov_matrix)
for k in range(d_action):
scale_tril[k * horizon:k * horizon + horizon,k * horizon:k * horizon + horizon] = local_cholesky
'''
cov = cov.to(**tensor_args)
scale_tril = scale_tril.to(**tensor_args) #* 0.1
scale_tril = scale_tril / torch.max(scale_tril)
if(RETURN_R):
return cov, scale_tril, R
return cov, scale_tril
#######################
## Gaussian Sampling ##
#######################
def generate_noise(cov, shape, base_seed, filter_coeffs=None, device=torch.device('cpu')):
"""
Generate correlated Gaussian samples using autoregressive process
"""
torch.manual_seed(base_seed)
beta_0, beta_1, beta_2 = filter_coeffs
N = cov.shape[0]
m = MultivariateNormal(loc=torch.zeros(N).to(device), covariance_matrix=cov)
eps = m.sample(sample_shape=shape)
# eps = np.random.multivariate_normal(mean=np.zeros((N,)), cov = cov, size=shape)
if filter_coeffs is not None:
for i in range(2, eps.shape[1]):
eps[:,i,:] = beta_0*eps[:,i,:] + beta_1*eps[:,i-1,:] + beta_2*eps[:,i-2,:]
return eps
def generate_noise_np(cov, shape, base_seed, filter_coeffs=None):
"""
Generate correlated noisy samples using autoregressive process
"""
np.random.seed(base_seed)
beta_0, beta_1, beta_2 = filter_coeffs
N = cov.shape[0]
eps = np.random.multivariate_normal(mean=np.zeros((N,)), cov = cov, size=shape)
if filter_coeffs is not None:
for i in range(2, eps.shape[1]):
eps[:,i,:] = beta_0*eps[:,i,:] + beta_1*eps[:,i-1,:] + beta_2*eps[:,i-2,:]
return eps
###########################
## Quasi-Random Sampling ##
###########################
def generate_prime_numbers(num):
def is_prime(n):
for j in range(2, ((n //2) + 1),1):
if n % j == 0:
return False
return True
primes = [0] * num #torch.zeros(num, device=device)
primes[0] = 2
curr_num = 1
for i in range(1, num):
while True:
curr_num += 2
if is_prime(curr_num):
primes[i] = curr_num
break
return primes
def generate_van_der_corput_sample(idx, base):
f, r = 1.0, 0
while idx > 0:
f /= base*1.0
r += f * (idx % base)
idx = idx // base
return r
def generate_van_der_corput_samples_batch(idx_batch, base):
inp_device = idx_batch.device
batch_size = idx_batch.shape[0]
f = 1.0 #torch.ones(batch_size, device=inp_device)
r = torch.zeros(batch_size, device=inp_device)
while torch.any(idx_batch > 0):
f /= base*1.0
r += f * (idx_batch % base) #* (idx_batch > 0)
idx_batch = idx_batch // base
return r
# def generate_van_der_corput_samples_batch_2(idx_batch, bases):
# inp_device = idx_batch.device
# batch_size = idx_batch.shape[0]
# f = torch.ones(batch_size, device=inp_device)
# r = torch.zeros(batch_size, device=inp_device)
# while torch.any(idx_batch > 0):
# f /= bases*1.0
# r += f * (idx_batch % base) #* (idx_batch > 0)
# idx_batch = idx_batch // base
# return r
def generate_halton_samples(num_samples, ndims, bases=None, use_ghalton=True, seed_val=123, device=torch.device('cpu'), float_dtype=torch.float64):
if not use_ghalton:
samples = torch.zeros(num_samples, ndims, device=device, dtype=float_dtype)
if not bases:
bases = generate_prime_numbers(ndims)
idx_batch = torch.arange(1,num_samples+1, device=device)
for dim in range(ndims):
samples[:, dim] = generate_van_der_corput_samples_batch(idx_batch, bases[dim])
else:
if ndims <= 100:
perms = ghalton.EA_PERMS[:ndims]
sequencer = ghalton.GeneralizedHalton(perms)
else:
sequencer = ghalton.GeneralizedHalton(ndims, seed_val)
samples = torch.tensor(sequencer.get(num_samples), device=device, dtype=float_dtype)
return samples
def generate_gaussian_halton_samples(num_samples, ndims, bases=None, use_ghalton=True, seed_val=123, device=torch.device('cpu'), float_dtype=torch.float64):
uniform_halton_samples = generate_halton_samples(num_samples, ndims, bases, use_ghalton, seed_val, device, float_dtype)
gaussian_halton_samples = torch.sqrt(torch.tensor([2.0],device=device,dtype=float_dtype)) * torch.erfinv(2 * uniform_halton_samples - 1)
return gaussian_halton_samples
def generate_gaussian_sobol_samples(num_samples, ndims, seed_val, device=torch.device('cpu'), float_dtype=torch.float64):
soboleng = torch.quasirandom.SobolEngine(dimension=ndims, scramble=True, seed=seed_val)
uniform_sobol_samples = soboleng.draw(num_samples).to(device)
gaussian_sobol_samples = torch.sqrt(torch.tensor([2.0],device=device,dtype=float_dtype)) * torch.erfinv(2 * uniform_sobol_samples - 1)
return gaussian_sobol_samples
########################
## Gaussian Utilities ##
########################
def gaussian_logprob(mean, cov, x, cov_type="full"):
"""
Calculate gaussian log prob for given input batch x
Parameters
----------
mean (np.ndarray): [N x num_samples] batch of means
cov (np.ndarray): [N x N] covariance matrix
x (np.ndarray): [N x num_samples] batch of sample values
Returns
--------
log_prob (np.ndarray): [num_sampls] log probability of each sample
"""
N = cov.shape[0]
if cov_type == "diagonal":
cov_diag = cov.diagonal()
cov_inv = np.diag(1.0 / cov_diag)
cov_logdet = np.sum(np.log(cov_diag))
else:
cov_logdet = np.log(np.linalg.det(cov))
cov_inv = np.linalg.inv(cov)
diff = (x - mean).T
mahalanobis_dist = -0.5 * np.sum((diff @ cov_inv) * diff, axis=1)
const1 = -0.5 * N * np.log(2.0 * np.pi)
const2 = -0.5*cov_logdet
log_prob = mahalanobis_dist + const1 + const2
return log_prob
def gaussian_logprobgrad(mean, cov, x, cov_type="full"):
if cov_type == "diagonal":
cov_inv = np.diag(1.0/cov.diagonal())
else:
cov_inv = np.linalg.inv(cov)
diff = (x - mean).T
grad = diff @ cov_inv
return grad
def gaussian_entropy(cov=None, L=None): #, cov_type="full"):
"""
Entropy of multivariate gaussian given either covariance
or cholesky decomposition of covariance
"""
if cov is not None:
inp_device = cov.device
cov_logdet = torch.log(torch.det(cov))
# print(np.linalg.det(cov.cpu().numpy()))
# print(torch.det(cov))
N = cov.shape[0]
else:
inp_device = L.device
cov_logdet = 2.0 * torch.sum(torch.log(torch.diagonal(L)))
N = L.shape[0]
# if cov_type == "diagonal":
# cov_logdet = np.sum(np.log(cov.diagonal()))
# else:
# cov_logdet = np.log(np.linalg.det(cov))
term1 = 0.5 * cov_logdet
# pi = torch.tensor([math.pi], device=inp_device)
# pre-calculate 1.0 + torch.log(2.0*pi) = 2.837877066
term2 = 0.5 * N * 2.837877066
ent = term1 + term2
return ent.to(inp_device)
def gaussian_kl(mean0, cov0, mean1, cov1, cov_type="full"):
"""
KL-divergence between Gaussians given mean and covariance
KL(p||q) = E_{p}[log(p) - log(q)]
"""
N = cov0.shape[0]
if cov_type == "diagonal":
cov1_diag = cov1.diagonal()
cov1_inv = np.diag(1.0 / cov1_diag)
cov0_logdet = np.sum(np.log(cov0.diagonal()))
cov1_logdet = np.sum(np.log(cov1_diag))
else:
cov1_inv = np.linalg.inv(cov1)
cov0_logdet = np.log(np.linalg.det(cov0))
cov1_logdet = np.log(np.linalg.det(cov1))
term1 = 0.5 * np.trace(cov1_inv @ cov0)
diff = (mean1 - mean0).T
mahalanobis_dist = 0.5 * np.sum((diff @ cov1_inv) * diff, axis=1)
term3 = 0.5 * (-1.0*N + cov1_logdet - cov0_logdet)
return term1 + mahalanobis_dist + term3
def cost_to_go(cost_seq, gamma_seq):
"""
Calculate (discounted) cost to go for given cost sequence
"""
# if torch.any(gamma_seq == 0):
# return cost_seq
cost_seq = gamma_seq * cost_seq # discounted cost sequence
# cost_seq = torch.cumsum(cost_seq[:, ::-1], axis=-1)[:, ::-1] # cost to go (but scaled by [1 , gamma, gamma*2 and so on])
cost_seq = torch.fliplr(torch.cumsum(torch.fliplr(cost_seq), axis=-1)) # cost to go (but scaled by [1 , gamma, gamma*2 and so on])
cost_seq /= gamma_seq # un-scale it to get true discounted cost to go
return cost_seq
def cost_to_go_np(cost_seq, gamma_seq):
"""
Calculate (discounted) cost to go for given cost sequence
"""
# if np.any(gamma_seq == 0):
# return cost_seq
cost_seq = gamma_seq * cost_seq # discounted reward sequence
cost_seq = np.cumsum(cost_seq[:, ::-1], axis=-1)[:, ::-1] # cost to go (but scaled by [1 , gamma, gamma*2 and so on])
cost_seq /= gamma_seq # un-scale it to get true discounted cost to go
return cost_seq
############
##Cholesky##
############
def matrix_cholesky(A):
L = torch.zeros_like(A)
for i in range(A.shape[-1]):
for j in range(i+1):
s = 0.0
for k in range(j):
s = s + L[i,k] * L[j,k]
L[i,j] = torch.sqrt(A[i,i] - s) if (i == j) else \
(1.0 / L[j,j] * (A[i,j] - s))
return L
# Batched Cholesky decomp
def batch_cholesky(A):
L = torch.zeros_like(A)
for i in range(A.shape[-1]):
for j in range(i+1):
s = 0.0
for k in range(j):
s = s + L[...,i,k] * L[...,j,k]
L[...,i,j] = torch.sqrt(A[...,i,i] - s) if (i == j) else \
(1.0 / L[...,j,j] * (A[...,i,j] - s))
return L
| 36.256039 | 156 | 0.593205 |
f7435cd562697e1cc976e916003e38001a1a1351 | 16,047 | py | Python | seqio/loggers.py | ayushkumar63123/seqio | 23bcb59df59798074d7d5896a131980137c69ec8 | [
"Apache-2.0"
] | 144 | 2021-04-16T00:43:10.000Z | 2022-03-21T08:51:27.000Z | seqio/loggers.py | ayushkumar63123/seqio | 23bcb59df59798074d7d5896a131980137c69ec8 | [
"Apache-2.0"
] | 49 | 2021-04-26T06:28:51.000Z | 2022-03-28T23:55:16.000Z | seqio/loggers.py | ayushkumar63123/seqio | 23bcb59df59798074d7d5896a131980137c69ec8 | [
"Apache-2.0"
] | 13 | 2021-04-27T10:28:27.000Z | 2022-03-10T18:43:37.000Z | # Copyright 2021 The SeqIO Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes for logging evaluation metrics and inference results."""
import abc
import base64
import itertools
import json
import os
import time
from typing import Any, Mapping, Optional, Sequence, Type
from absl import logging
import numpy as np
from seqio import metrics as metrics_lib
import tensorflow as tf
import tensorflow_datasets as tfds
class Logger(abc.ABC):
"""Abstract base class for logging.
Attributes:
output_dir: a directory to save the logging results (e.g., TensorBoard
summary) as well as the evaluation results (e.g., "inputs_pretokenized",
"target_pretokenize" and "prediction").
"""
def __init__(self, output_dir):
self.output_dir = output_dir
@abc.abstractmethod
def __call__(self, task_name: str, step: int,
metrics: Mapping[str, metrics_lib.MetricValue],
dataset: tf.data.Dataset, inferences: Mapping[str,
Sequence[Any]],
targets: Sequence[Any]) -> None:
"""Logs the metrics and inferences for each task.
Args:
task_name: The name of the task these datapoints are relevant to.
step: The timestep to place this datapoint at.
metrics: A mapping from series names to numeric datapoints to be added to
that series.
dataset: The Task dataset.
inferences: Mapping from inference type ("predictions", "scores") to the
model outputs, aligned with the dataset.
targets: The postprocessed targets, aligned with the dataset.
"""
...
class PyLoggingLogger(Logger):
"""A logger that writes metrics using the standard Python log."""
def __init__(self, output_dir: str, level: int = logging.INFO):
self._level = level
super().__init__(output_dir)
def __call__(self, task_name: str, step: int,
metrics: Mapping[str, metrics_lib.MetricValue],
dataset: tf.data.Dataset, inferences: Mapping[str,
Sequence[Any]],
targets: Sequence[Any]) -> None:
del dataset
del inferences
del targets
for metric_name, metric_value in metrics.items():
if isinstance(metric_value, metrics_lib.Scalar):
strvalue = f"{metric_value.value:.3f}"
elif isinstance(metric_value, metrics_lib.Text):
strvalue = metric_value.textdata
else:
strvalue = f"unloggable type {type(metric_value)}"
logging.info("%s/%s at step %d: %s", task_name, metric_name, step,
strvalue)
class TensorBoardLogger(Logger):
"""A logger that writes metrics to TensorBoard summaries."""
def __init__(self, output_dir: str):
"""TensorBoardLogger initializer.
Args:
output_dir: The base directory where all logs will be written.
"""
super().__init__(output_dir)
self._summary_writers = {}
def _get_summary_writer(self, summary_dir: str) -> tf.summary.SummaryWriter:
"""Get or create a summary writer for a specific task.
Args:
summary_dir: The task we are getting the writer for.
Returns:
The summary writer associated with the directory.
"""
if summary_dir not in self._summary_writers:
self._summary_writers[summary_dir] = tf.summary.create_file_writer(
summary_dir, flush_millis=120)
return self._summary_writers[summary_dir]
def _write_metric(self, tag: str, value: metrics_lib.MetricValue, step: int,
writer: tf.summary.SummaryWriter):
"""Log a metric value to tensorboard, dispatched on value type."""
if isinstance(value, metrics_lib.Scalar):
value: metrics_lib.Scalar = value
value = float(np.array(value.value))
with writer.as_default():
tf.summary.scalar(name=tag, data=value, step=step)
elif isinstance(value, metrics_lib.Image):
value: metrics_lib.Image = value
image = tf.convert_to_tensor(value.image)
with writer.as_default():
tf.summary.image(
name=tag, data=image, step=step, max_outputs=value.max_outputs)
elif isinstance(value, metrics_lib.Audio):
value: metrics_lib.Audio = value
audio = tf.convert_to_tensor(value.audiodata, dtype=tf.float32)
with writer.as_default():
tf.summary.audio(
name=tag,
data=audio,
sample_rate=value.sample_rate,
step=step,
max_outputs=value.max_outputs,
encoding="wav")
elif isinstance(value, metrics_lib.Histogram):
value: metrics_lib.Histogram = value
values = np.array(value.values)
with writer.as_default():
tf.summary.histogram(
name=tag, data=values, step=step, buckets=value.bins)
elif isinstance(value, metrics_lib.Text):
value: metrics_lib.Text = value
if not isinstance(value.textdata, (str, bytes)):
raise ValueError("`textdata` should be of the type `str` or `bytes`.")
with writer.as_default():
tf.summary.text(name=tag, data=tf.constant(value.textdata), step=step)
elif isinstance(value, metrics_lib.Generic):
with writer.as_default():
tf.summary.write(
tag=tag, tensor=value.tensor, metadata=value.metadata, step=step)
else:
raise TypeError(
f"Value type not understood, got '{type(value).__name__}'.")
def __call__(self, task_name: str, step: int,
metrics: Mapping[str, metrics_lib.MetricValue],
dataset: tf.data.Dataset, inferences: Mapping[str,
Sequence[Any]],
targets: Sequence[Any]) -> None:
"""Log metrics to tensorboard.
Args:
task_name: The name of the task these datapoints are relevant to.
step: The timestep to place this datapoint at.
metrics: A mapping from series names to numeric datapoints to be added to
that series.
dataset: The Task dataset, which is unused by this logger.
inferences: The model outputs, which are unused by this logger.
targets: The postprocessed targets, which are unused by this logger.
"""
del dataset
del inferences
del targets
if step is None:
logging.warning("Step number for the logging session is not provided. "
"A dummy value of -1 will be used.")
step = -1
writer = self._get_summary_writer(os.path.join(self.output_dir, task_name))
for metric_name, metric_value in metrics.items():
# We prefix the tag with "eval/" for backward compatibility.
# TODO(adarob): Find a way to remove this or make it an option.
self._write_metric(
tag=f"eval/{metric_name}",
value=metric_value,
step=step,
writer=writer)
writer.flush()
class TensorBoardLoggerV1(Logger):
"""A logger that writes metrics to TensorBoard summaries in TF1."""
def __init__(self, output_dir: str):
"""TensorBoardLoggerV1 initializer.
Args:
output_dir: The base directory where all logs will be written.
"""
super().__init__(output_dir)
self._summary_writers = {}
def _get_summary_writer(self, task_name: str) -> tf.summary.SummaryWriter:
"""Create (if needed) and return a SummaryWriter for a given task."""
if task_name not in self._summary_writers:
with tf.compat.v1.Graph().as_default():
self._summary_writers[task_name] = tf.compat.v1.summary.FileWriter(
os.path.join(self.output_dir, task_name))
return self._summary_writers[task_name]
def __call__(self,
task_name: str,
step: int,
metrics: Mapping[str, metrics_lib.Scalar],
dataset: tf.data.Dataset,
inferences: Mapping[str, Sequence[Any]],
targets: Sequence[Any]) -> None:
"""Log the eval results and optionally write summaries for TensorBoard.
Note:
This is the default implementation using tensorflow v1 operations. This
only supports logging metrics of the Scalar type.
Args:
task_name: The name of the task these datapoints are relevant to.
step: The timestep to place this datapoint at.
metrics: A mapping from series names to numeric datapoints to be added to
that series.
dataset: The Task dataset, which is unused by this logger.
inferences: The model outputs, which are unused by this logger.
targets: The postprocessed targets, which are unused by this logger.
"""
del dataset
del inferences
del targets
if step is None:
logging.warning("Step number for the logging session is not provided. "
"A dummy value of -1 will be used.")
step = -1
summary_writer = self._get_summary_writer(task_name)
for metric_name, metric_value in metrics.items():
if not isinstance(metric_value, metrics_lib.Scalar):
raise ValueError(f"Value for metric '{metric_name}' should be of "
f"type 'Scalar, got '{type(metric_value).__name__}'.")
summary = tf.compat.v1.Summary()
tag = f"eval/{metric_name}"
logging.info("%s at step %d: %.3f", tag, step, metric_value.value)
summary.value.add(tag=tag, simple_value=metric_value.value)
summary_writer.add_summary(summary, step)
summary_writer.flush()
class TensorAndNumpyEncoder(json.JSONEncoder):
"""JSON Encoder to use when encoding dicts with tensors and numpy arrays."""
def __init__(self, *args, max_ndarray_size=32, **kwargs):
self.max_ndarray_size = max_ndarray_size
super().__init__(*args, **kwargs)
def default(self, obj):
if isinstance(obj, tf.Tensor):
if obj.dtype == tf.bfloat16:
# bfloat16 not supported, convert to float32.
obj = tf.cast(obj, tf.float32)
obj = obj.numpy()
if isinstance(obj, np.ndarray):
obj_dtype = obj.dtype
if str(obj.dtype) == "bfloat16":
# bfloat16 not supported, convert to float32.
obj = obj.astype(np.float32)
if obj.size <= self.max_ndarray_size:
return obj.tolist() # Convert arrays to lists of py-native types.
else:
# If the ndarray is larger than allowed, return a summary string
# instead of the entire array.
first_five_str = str(obj.reshape([-1])[:5].tolist())[1:-1]
return (
f"{type(obj).__name__}(shape={obj.shape}, dtype={obj_dtype}); "
f"first: {first_five_str} ...")
elif (np.issubdtype(type(obj), np.number) or
np.issubdtype(type(obj), np.bool_)):
return obj.item() # Convert most primitive np types to py-native types.
elif hasattr(obj, "dtype") and obj.dtype == tf.bfloat16.as_numpy_dtype:
return float(obj)
elif isinstance(obj, bytes):
# JSON doesn't support bytes. First, try to decode using utf-8 in case
# it's text. Otherwise, just base64 encode the bytes.
try:
return obj.decode("utf-8")
except UnicodeDecodeError:
return base64.b64encode(obj)
return json.JSONEncoder.default(self, obj)
class JSONLogger(Logger):
"""A logger that writes metrics and model outputs to JSONL files."""
def __init__(
self,
output_dir: str,
write_n_results: Optional[int] = None,
json_encoder_cls: Type[json.JSONEncoder] = TensorAndNumpyEncoder):
"""JSONLogger constructor.
Args:
output_dir: The base directory where all logs will be written.
write_n_results: number of scores/predictions to be written to the file at
each step. If None, scores and predictions from all examples are
written.
json_encoder_cls: Class to use for serializing JSON to file.
"""
super().__init__(output_dir)
self._write_n_results = write_n_results
self._json_encoder_cls = json_encoder_cls
def __call__(self,
task_name: str,
step: int,
metrics: Mapping[str, metrics_lib.MetricValue],
dataset: tf.data.Dataset,
inferences: Mapping[str, Sequence[Any]],
targets: Sequence[Any]) -> None:
if step is None:
logging.warning("Step number for the logging session is not provided. "
"A dummy value of -1 will be used.")
step = -1
metrics_fname = os.path.join(self.output_dir, f"{task_name}-metrics.jsonl")
serializable_metrics = {}
for metric_name, metric_value in metrics.items():
if isinstance(metric_value, metrics_lib.Scalar):
serializable_metrics[metric_name] = metric_value.value
elif isinstance(metric_value, metrics_lib.Text):
serializable_metrics[metric_name] = metric_value.textdata
else:
logging.warning(
"Skipping JSON logging of non-serializable metric '%s' of type %s.",
metric_name, type(metric_value))
if metrics:
logging.info("Appending metrics to %s", metrics_fname)
# We simulate an atomic append for filesystems that do not suppport
# mode="a".
file_contents = ""
if tf.io.gfile.exists(metrics_fname):
with tf.io.gfile.GFile(metrics_fname, "r") as f:
file_contents = f.read()
with tf.io.gfile.GFile(metrics_fname + ".tmp", "w") as f:
f.write(file_contents)
f.write(
json.dumps({
"step": step,
**serializable_metrics
}, cls=self._json_encoder_cls))
f.write("\n")
tf.io.gfile.rename(metrics_fname + ".tmp", metrics_fname, overwrite=True)
if self._write_n_results == 0:
return
write_tick = time.time()
inferences_fname = os.path.join(self.output_dir,
f"{task_name}-{step:06}.jsonl")
logging.info("Writing inferences to %s", inferences_fname)
with tf.io.gfile.GFile(inferences_fname, "w") as f:
examples_with_scores = itertools.zip_longest(
tfds.as_numpy(dataset), inferences.get("predictions", []),
targets, inferences.get("scores", []))
if self._write_n_results:
examples_with_scores = itertools.islice(
examples_with_scores, 0, self._write_n_results)
for inp, prediction, target, score in examples_with_scores:
# tfds.as_numpy does not convert ragged tensors
for k in inp:
if isinstance(inp[k], tf.RaggedTensor):
inp[k] = inp[k].numpy()
json_dict = {"input": inp}
# Only write `prediction` if it is JSON serializable.
if prediction is not None:
try:
json.dumps(prediction, cls=self._json_encoder_cls)
json_dict["prediction"] = prediction
except TypeError:
logging.warning("`prediction` is not JSON serializable",
exc_info=True)
# Only write `target` if it is JSON serializable.
try:
json.dumps(target, cls=self._json_encoder_cls)
json_dict["target"] = target
except TypeError:
logging.warning("`target` is not JSON serializable", exc_info=True)
if score is not None:
json_dict["score"] = score
json_str = json.dumps(json_dict, cls=self._json_encoder_cls)
f.write(json_str + "\n")
write_time = time.time() - write_tick
logging.info("Writing completed in %02f seconds (%02f examples/sec).",
write_time,
len(inferences) / write_time)
| 38.026066 | 80 | 0.647785 |
f7436947b0127f7e035d1d3e1ed4f83760e00c34 | 2,214 | py | Python | tests/python/pants_test/help/test_help_formatter.py | dturner-tw/pants | 3a04f2e46bf2b8fb0a7999c09e4ffdf9057ed33f | [
"Apache-2.0"
] | null | null | null | tests/python/pants_test/help/test_help_formatter.py | dturner-tw/pants | 3a04f2e46bf2b8fb0a7999c09e4ffdf9057ed33f | [
"Apache-2.0"
] | null | null | null | tests/python/pants_test/help/test_help_formatter.py | dturner-tw/pants | 3a04f2e46bf2b8fb0a7999c09e4ffdf9057ed33f | [
"Apache-2.0"
] | 1 | 2019-06-10T17:24:34.000Z | 2019-06-10T17:24:34.000Z | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import unittest
from pants.help.help_formatter import HelpFormatter
from pants.help.help_info_extracter import OptionHelpInfo
class OptionHelpFormatterTest(unittest.TestCase):
def format_help_for_foo(self, **kwargs):
ohi = OptionHelpInfo(registering_class=type(None), display_args=['--foo'],
scoped_cmd_line_args=['--foo'], unscoped_cmd_line_args=['--foo'],
typ=bool, fromfile=False, default=None, help='help for foo',
deprecated_version=None, deprecated_message=None, deprecated_hint=None,
choices=None)
ohi = ohi._replace(**kwargs)
lines = HelpFormatter(scope='', show_recursive=False, show_advanced=False,
color=False).format_option(ohi)
self.assertEquals(len(lines), 2)
self.assertIn('help for foo', lines[1])
return lines[0]
def test_format_help(self):
line = self.format_help_for_foo(default='MYDEFAULT')
self.assertEquals('--foo (default: MYDEFAULT)', line)
def test_format_help_fromfile(self):
line = self.format_help_for_foo(fromfile=True)
self.assertEquals('--foo (@fromfile value supported) (default: None)', line)
def test_suppress_advanced(self):
args = ['--foo']
kwargs = {'advanced': True}
lines = HelpFormatter(scope='', show_recursive=False, show_advanced=False,
color=False).format_options('', '', [(args, kwargs)])
self.assertEquals(0, len(lines))
lines = HelpFormatter(scope='', show_recursive=True, show_advanced=True,
color=False).format_options('', '', [(args, kwargs)])
print(lines)
self.assertEquals(5, len(lines))
def test_format_help_choices(self):
line = self.format_help_for_foo(typ=str, default='kiwi', choices='apple, banana, kiwi')
self.assertEquals('--foo (one of: [apple, banana, kiwi] default: kiwi)', line)
| 44.28 | 96 | 0.672087 |
f7438d9ecc8eceb41e12585b9b81484d7c841141 | 3,114 | py | Python | pycatia/in_interfaces/files.py | Tian-Jionglu/pycatia | b315aeb3a74846f134ff6b67b3a6334b9d3905fa | [
"MIT"
] | 1 | 2020-04-27T13:59:10.000Z | 2020-04-27T13:59:10.000Z | pycatia/in_interfaces/files.py | Luanee/pycatia | ea5eef8178f73de12404561c00baf7a7ca30da59 | [
"MIT"
] | null | null | null | pycatia/in_interfaces/files.py | Luanee/pycatia | ea5eef8178f73de12404561c00baf7a7ca30da59 | [
"MIT"
] | null | null | null | #! usr/bin/python3.6
"""
Module initially auto generated using V5Automation files from CATIA V5 R28 on 2020-06-11 12:40:47.360445
.. warning::
The notes denoted "CAA V5 Visual Basic Help" are to be used as reference only.
They are there as a guide as to how the visual basic / catscript functions work
and thus help debugging in pycatia.
"""
from pycatia.in_interfaces.file import File
from pycatia.system_interfaces.collection import Collection
class Files(Collection):
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445)
| System.IUnknown
| System.IDispatch
| System.CATBaseUnknown
| System.CATBaseDispatch
| System.Collection
| Files
|
| A collection of all the file objects in a folder.
| It lists all the files contained in the folder. It allows to retrieve File
| objects.
"""
def __init__(self, com_object):
super().__init__(com_object, child_object=File)
self.files = com_object
def item(self, i_number: int) -> File:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445))
| o Func Item(long iNumber) As File
|
| Returns a file using its index or its name from the file
| collection.
|
| Parameters:
|
| iIndex
| The index or the name of the file to retrieve from the collection
| of files. As a numerics, this index is the rank of the file in the collection.
| The index of the first file in the collection is 1, and the index of the last
| file is Count. As a string, it is the name you assigned to the file using the
|
|
| AnyObject.Name property.
| Returns:
| The retrieved file
| Example:
| This example retrieves in ThisFile the third file, and it ThatFile the
| file named MyFile. in the TestFiles file collection.
|
| Dim ThisFile As File
| Set ThisFile = TestFiles.Item(3)
| Dim ThatFile As File
| Set ThatFile = TestFiles.Item("MyFile")
:param int i_number:
:return: File
:rtype: File
"""
return File(self.files.Item(i_number))
def __getitem__(self, n: int) -> File:
if (n + 1) > self.count:
raise StopIteration
return File(self.files.item(n + 1))
def __repr__(self):
return f'Files(name="{self.name}")'
| 36.209302 | 108 | 0.496146 |
f7438fb7972362af0e6a82e3fe654cfd9d62a05f | 9,310 | py | Python | flask_sqlite_web/sqliteFunctions.py | haibozhucloud/flask_sqlite_web | d0b407b359557bfd3cbb516d365f06c67f8d61d0 | [
"MIT"
] | 3 | 2019-01-15T06:49:34.000Z | 2021-08-06T02:57:20.000Z | flask_sqlite_web/sqliteFunctions.py | haibozhucloud/flask_sqlite_web | d0b407b359557bfd3cbb516d365f06c67f8d61d0 | [
"MIT"
] | null | null | null | flask_sqlite_web/sqliteFunctions.py | haibozhucloud/flask_sqlite_web | d0b407b359557bfd3cbb516d365f06c67f8d61d0 | [
"MIT"
] | null | null | null | #########################################
# Author : Haibo Zhu
# Email : haibo.zhu@hotmail.com
# created : 2018-11-06 13:38
# Last modified : 2018-11-08 14:49
# Filename : sqliteFunctions.py
# Description :
#########################################
import re
from functools import wraps
import types
import sys
import sqlite3
import pprint
import time
'''
reload(sys)
sys.setdefaultencoding("utf-8")
'''
def execRule(i):
def do_assignment(to_func):
to_func.run = i
return to_func
return do_assignment
class rules:
""" base rules applied to all modifications """
def __init__(self,colData,postData,tables,method):
self.colData = colData
self.postData = postData
self.method = method
self.methods = ['GET','POST','PUT','DELETE']
self.tables = tables
if self.colData['name'] in self.postData:
self.value = self.postData[self.colData['name']]
else:
self.value = '0'
@execRule(1)
def validTable(self):
""" check if table is in object """
if 'table' not in self.postData or self.postData['table'] not in self.tables:
raise ValueError('invalid table `%s`' % self.postData['table'])
@execRule(2)
def validAction(self):
""" check if action is valid """
if self.method is None:
raise ValueError('no method in request')
elif self.method not in self.methods:
raise ValueError('invalid method `%s`' % self.method)
@execRule(3)
def idRequired(self):
""" check if id parameter passed for edit/delete functions """
if self.method == 'put' or self.method == 'delete':
if 'id' not in self.postData:
raise ValueError('Request must include an id')
@execRule(4)
def notNull(self):
""" check if null value passed for not null columns """
if self.colData['name'] not in self.postData or self.value=='':
if self.colData['notNull'] is 1 and self.colData['primaryKey'] is 0:
raise ValueError('%s field required' % self.colData['name'])
@execRule(5)
def integer(self):
""" check if integer for integer affinity columns """
if self.colData['dataType'].lower() in ['integer','int','tinyint','smallint','mediumint','bigint','unisgned big int','int2','int8']:
try:
int(self.value)
except Exception as e:
raise ValueError('Non integer value `%s` for field %s' % (self.value, self.colData['name']) )
@execRule(6)
def real(self):
""" check if float for real affinity columns """
if self.colData['dataType'].lower() in ['real','float','double','double precision']:
try:
float(self.value)
except Exception as e:
raise ValueError('Non real/float value `%s` for field %s' % (self.value, self.colData['name']) )
class sqliteAdminFunctions:
""" functions for SQLite3 Admin tool """
def __init__(self,con,tables=[],extraRules=[]):
self.db = con
self.extraRules = extraRules
self.tables = self.tableList(tables)
def dict_factory(self,cursor, row):
""" function to return sqlite results in dict """
d = {}
for idx, col in enumerate(cursor.description):
try:
d[col[0]] = row[idx]
except:
d[col[0]] = "invalid byte"
return d
def tableList(self,tables):
if len(tables) > 0:
return tables
else:
c = self.db.session.execute('SELECT name FROM sqlite_master WHERE type = "table"')
tables= c.fetchall()
if ('sqlite_sequence',) in tables:
tables.remove(('sqlite_sequence',))
return [row[0] for row in tables]
def tableContents(self,table,sort,dir,offset):
""" create list of tables for admin """
res = {}
if table in self.tables:
res['schema'] = self.tableSchemas(table)
if res['schema'][0]['primaryKey'] == 1:
res['primaryKey'] = res['schema'][0]['name']
c = self.db.session.execute('select count({}) as c from {}'.format(res['primaryKey'],table))
res['count'] = c.fetchone()['c']
if sort == '': sort = res['primaryKey']
l = self.db.session.execute('select * from {} order by {} {} limit {},50'.format(table,sort,dir,int(offset)*50))
res['contents'] = l.fetchall()
return res
else:
raise ValueError('No primary key for first column in table `%s`' % table)
else:
raise ValueError('invalid table `%s`' % table)
def tableSchemas(self,table):
""" return table schemas by column """
sch = self.db.session.execute('PRAGMA table_info({})'.format(table))
sch_ls = sch
return [{'name':row[1],'dataType':row[2],'notNull':row[3],'primaryKey':row[5]} for row in sch_ls]
def tableCols(self, table):
tb_info = self.db.session.execute('PRAGMA table_info({})'.format(table) )
sch = tb_info.fetchall()
res = []
for row in sch:
c = ''
c += row[1]
c += ' ' + row[2]
if row[5] == 1:
c += ' PRIMARY KEY AUTOINCREMENT'
res.append(c)
return res
def checkValid(self,q,method):
""" validate admin input """
if 'table' not in q:
raise ValueError('no table value provided')
elif q['table'] not in self.tables:
raise ValueError('invalid table `%s`' % q['table'])
else:
for col in self.tableSchemas(q['table']):
# iterate through rules
r = rules(col,q,self.tables,method) # instantiate rules objecy
if len(self.extraRules)>0:
# add extra rules
for i,x in enumerate(self.extraRules):
x.run = 7+i
def add_method(self, method, i):
setattr(self.__class__, 'extraRule%d' % i, method)
add_method(r,x,i)
# perform rule check
funcs = sorted(
[ getattr(r, field) for field in dir(r) if hasattr(getattr(r, field), "run") ],
key = (lambda field: field.run)
)
for func in funcs:
try:
func()
except Exception as e:
raise
def editTables(self,q,method):
""" edit tables """
qString = ''
qParams = []
# validate input
self.checkValid(q,method)
# create copy of request
q2 = q.copy()
del q2['table']
# edit
ret = ''
if method == 'PUT':
del q2['id']
del q2['primaryKey']
qString = 'update %s set %s where %s=?' % (q['table'],', '.join("%s=?" %p for p in q2.keys()),q['primaryKey'])
qParams = [v for k,v in q2.items()]
qParams.append(q[q['primaryKey']])
# add
elif method == 'POST':
del q2['primaryKey']
qString = 'insert into %s (%s) values (%s)' % (q['table'],','.join(q2.keys()),','.join("?" for p in q2.keys()) )
qParams = [v for k,v in q2.items()]
ret = '<a href="" class="alert-link">Refresh Page</a>'
# delete
elif method == 'DELETE':
qString = 'delete from %s where %s=?' % (q['table'],q['primaryKey'])
qParams = [q['id']]
ret = 'Row deleted'
# execute sql
self.db.session.execute(qString,qParams)
self.db.session.commit()
return ret
#检查表中是否已存在列
def checkColumn(self,col,table):
sch = self.tableSchemas(table)
cols = [{'col':row['name']} for row in sch]
if {'col':col} in cols:
return True
return False
def addCol(self,new_col,new_col_type,table):
#增加新字段
if self.checkColumn(new_col,table):
return(0)
command = "ALTER TABLE {} ADD {} {};".format(table, new_col, new_col_type)
res = self.db.session.execute(command)
self.db.session.commit()
def delRow(self, table, id):
command = "delete from {} where id={}".format(table,id)
self.db.session.execute(command)
self.db.session.commit()
def saveRow(self, row, table,id):
r = []
for k,v in row.items():
r.append("{}={}".format(k,v))
command = "update {} set {} where id={}".format(table,",".join(r),id)
self.db.session.execute(command)
self.db.session.commit()
def delCol(self, del_col, table):
cols = self.tableCols(table)
deleted = False
for c in cols:
if c.find(del_col) == 0:
cols.remove(c)
deleted = True
break
if deleted == False:
return
cp_cols= [row.split()[0] for row in cols]
try:
command = "drop table if exists 'temp'"
self.db.session.execute(command)
self.db.session.commit()
command = "create table temp({})".format(','.join(cols))
self.db.session.execute(command)
self.db.session.commit()
command = "insert into {}({}) select {} from {}".format('temp', ','.join(cp_cols), ','.join(cp_cols), table)
self.db.session.execute(command)
self.db.session.commit()
command = "drop table if exists {}".format(table)
self.db.session.execute(command)
self.db.session.commit()
command = "alter table temp rename to {}".format(table)
self.db.session.execute(command)
self.db.session.commit()
except self.db.Error as ex:
print(" !!! Exception:".format(ex))
def addRow(self, table, row):
values = []
keys = []
for k,v in row.items():
values.append(v)
keys.append(k)
command = "insert into {}({}) values({})".format(table, ",".join(keys),",".join(values))
self.db.session.execute(command)
self.db.session.commit()
| 30.12945 | 136 | 0.588829 |
f743b9c07354f5c2f9fa0aee2bcab50a14abbd00 | 2,589 | py | Python | src/lyxnotebook/run_lyxnotebook.py | abarker/lyxNotebook | c458b21e1b183b94172414e14dea671e1f3d4b22 | [
"MIT"
] | 12 | 2015-07-16T13:39:04.000Z | 2022-02-14T15:36:10.000Z | src/lyxnotebook/run_lyxnotebook.py | abarker/lyxNotebook | c458b21e1b183b94172414e14dea671e1f3d4b22 | [
"MIT"
] | 4 | 2020-03-11T00:33:50.000Z | 2020-05-21T22:05:13.000Z | src/lyxnotebook/run_lyxnotebook.py | abarker/lyxNotebook | c458b21e1b183b94172414e14dea671e1f3d4b22 | [
"MIT"
] | 3 | 2015-07-16T13:39:06.000Z | 2020-04-15T19:17:45.000Z | #! /usr/bin/python
"""
=========================================================================
This file is part of LyX Notebook, which works with LyX but is an
independent project. License details (MIT) can be found in the file
COPYING.
Copyright (c) 2012 Allen Barker
=========================================================================
This script runs the Lyx Notebook program. It mainly checks to make sure
that there is no other active Lyx Notebook process running, and if not then
it launches an instance of the main `ControllerOfLyxAndInterpreters` class.
"""
import os
import sys
import time
from .config_file_processing import config_dict
from .controller_of_lyx_and_interpreters import ControllerOfLyxAndInterpreters
from . import gui
lyx_user_directory = config_dict["lyx_user_directory"]
lockfile_path = os.path.abspath(os.path.expanduser(
os.path.join(lyx_user_directory, "lyxNotebook.lockfile")))
def main():
"""
Make sure this script is not already running in an existing process.
This method uses a lock file containing the PID, and was modified from code at
http://shoaibmir.wordpress.com/2009/12/14/pid-lock-file-in-python/
"""
# First check if a lock file already exists.
if os.path.exists(lockfile_path):
# If the lockfile is already there then check the PID number in the lock file.
pidfile = open(lockfile_path, "r")
pidfile.seek(0)
old_PID = pidfile.readline()
# Check if the PID from the lock file matches the current process PID.
if os.path.exists(os.path.join("/proc", old_PID)):
msg = ("You already have an instance of LyX Notebook running."
"\nIt is running as process {}. Exiting.".format(old_PID))
print(msg)
gui.text_warning_popup(msg)
time.sleep(4) # For when a new terminal opened, so message can be read.
sys.exit(1)
else:
# Lock file exists but the program is not running... remove it.
os.remove(lockfile_path)
# Put the current PID in the lock file.
pidfile = open(lockfile_path, "w")
pidfile.write(str(os.getpid()))
pidfile.close()
#
# Start the main controller class.
#
print("\n===================================================\n")
print("Starting the LyX Notebook program...")
print("Version from source directory:\n ",
config_dict["lyx_notebook_source_dir"])
controller = ControllerOfLyxAndInterpreters("lyxNotebookClient")
controller.server_notify_loop()
| 36.464789 | 86 | 0.631904 |
f743bcd58d973e63c4a15c53633ff87895a7e899 | 2,122 | py | Python | conans/client/uploader.py | TyRoXx/conan | 644516b5126f78f46275a9f6a01148183c9d149f | [
"MIT"
] | null | null | null | conans/client/uploader.py | TyRoXx/conan | 644516b5126f78f46275a9f6a01148183c9d149f | [
"MIT"
] | null | null | null | conans/client/uploader.py | TyRoXx/conan | 644516b5126f78f46275a9f6a01148183c9d149f | [
"MIT"
] | null | null | null | import os
from conans.errors import ConanException, NotFoundException
from conans.model.ref import PackageReference
class ConanUploader(object):
def __init__(self, paths, user_io, remote_manager, remote):
self._paths = paths
self._user_io = user_io
self._remote_manager = remote_manager
self._remote = remote
def upload_conan(self, conan_ref, force=False, all_packages=False):
"""Uploads the conans identified by conan_ref"""
export_path = self._paths.export(conan_ref)
if os.path.exists(export_path):
if not force:
self._check_package_date(conan_ref)
self._user_io.out.info("Uploading %s" % str(conan_ref))
self._remote_manager.upload_conan(conan_ref, self._remote)
if all_packages:
for index, package_id in enumerate(self._paths.conan_packages(conan_ref)):
total = len(self._paths.conan_packages(conan_ref))
self.upload_package(PackageReference(conan_ref, package_id), index + 1, total)
else:
self._user_io.out.error("There is no local conanfile exported as %s"
% str(conan_ref))
def upload_package(self, package_ref, index=1, total=1):
"""Uploads the package identified by package_id"""
msg = ("Uploading package %d/%d: %s" % (index, total, str(package_ref.package_id)))
self._user_io.out.info(msg)
self._remote_manager.upload_package(package_ref, self._remote)
def _check_package_date(self, conan_ref):
try:
remote_conan_digest = self._remote_manager.get_conan_digest(conan_ref, self._remote)
except NotFoundException:
return # First upload
local_digest = self._paths.load_digest(conan_ref)
if remote_conan_digest.time > local_digest.time:
raise ConanException("Remote conans is newer than local conans: "
"\n Remote date: %s\n Local date: %s" %
(remote_conan_digest.time, local_digest.time))
| 42.44 | 98 | 0.640905 |
f743dd1533365cffd345d7d20e4b9a39695c05d4 | 593 | py | Python | ogr2osm/__init__.py | roelderickx/ogr2pbf | 7ac99488d8daa9452e2a41e40bf2554dc720166d | [
"MIT"
] | null | null | null | ogr2osm/__init__.py | roelderickx/ogr2pbf | 7ac99488d8daa9452e2a41e40bf2554dc720166d | [
"MIT"
] | null | null | null | ogr2osm/__init__.py | roelderickx/ogr2pbf | 7ac99488d8daa9452e2a41e40bf2554dc720166d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
'''
Copyright (c) 2012-2021 Roel Derickx, Paul Norman <penorman@mac.com>,
Sebastiaan Couwenberg <sebastic@xs4all.nl>, The University of Vermont
<andrew.guertin@uvm.edu>, github contributors
Released under the MIT license, as given in the file LICENSE, which must
accompany any distribution of this code.
'''
from .translation_base_class import TranslationBase
from .datawriter_base_class import DataWriterBase
from .osm_datawriter import OsmDataWriter
from .pbf_datawriter import PbfDataWriter
from .ogr_datasource import OgrDatasource
from .osm_data import OsmData
| 32.944444 | 72 | 0.806071 |
f743f48c490c683fd697c5d1f8f67b90aef3bce7 | 1,367 | py | Python | knx/text/postagger/default_tagger.py | gofortargets/CNN_brandsafety | 77c9048a7aca9458879585925e98220435c052e9 | [
"Apache-2.0"
] | 2 | 2017-07-27T03:50:37.000Z | 2017-09-28T06:42:52.000Z | knx/text/postagger/default_tagger.py | gofortargets/CNN_brandsafety | 77c9048a7aca9458879585925e98220435c052e9 | [
"Apache-2.0"
] | null | null | null | knx/text/postagger/default_tagger.py | gofortargets/CNN_brandsafety | 77c9048a7aca9458879585925e98220435c052e9 | [
"Apache-2.0"
] | null | null | null | import logging
LOGGER = logging.getLogger(__name__)
try:
# from .import stanford_tagger as postagger
# LOGGER.debug('Use stanford_tagger')
from . import perceptron_tagger as postagger
LOGGER.debug('Use perceptron_tagger')
except:
from . import nltk_tagger as postagger
LOGGER.debug('Use nltk_tagger')
def tag(text):
"""Returns the POS tags of the text, using the default POS tagger (currently PerceptronTagger)
Parameters
----------
text : str or iterable
This is the text to be processed.
If it's a str, it will be sentence tokenized and word tokenized using nltk
If it's an iterable, it will be assumed to be a list of tokens
Returns
-------
tags : list
List of (word, pos) tuples
"""
return postagger.tag(text)
if __name__ == '__main__':
import time
start_time = time.time()
print tag('The horse raced past the barn fell.')
print 'Done tagging in %.3fs' % (time.time() - start_time)
start_time = time.time()
print tag(['The', 'horse', 'raced', 'past', 'the', 'barn', 'fell', '.'])
print 'Done tagging (tokenized) in %.3fs' % (time.time() - start_time)
while True:
sentence = raw_input('Enter a sentence: ')
start_time = time.time()
print tag(sentence)
print 'Done in %.3fs' % (time.time() - start_time)
| 31.790698 | 98 | 0.637893 |
f743fd4aab41716900ee63a6f68f8af1b6a32303 | 1,567 | py | Python | hooks/obsolete_messages.py | mondeja/pre-commit-po-hooks | 6c96941357699b283c46528d34821631e1aff324 | [
"BSD-3-Clause"
] | null | null | null | hooks/obsolete_messages.py | mondeja/pre-commit-po-hooks | 6c96941357699b283c46528d34821631e1aff324 | [
"BSD-3-Clause"
] | 2 | 2021-08-25T04:56:36.000Z | 2022-02-25T16:47:39.000Z | hooks/obsolete_messages.py | mondeja/pre-commit-po-hooks | 6c96941357699b283c46528d34821631e1aff324 | [
"BSD-3-Clause"
] | null | null | null | """Checks for obsolete messages in PO files.
Returns an error code if a PO file has an obsolete message.
"""
import argparse
import sys
def check_obsolete_messages(filenames, quiet=False):
"""Warns about all obsolete messages found in a set of PO files.
Parameters
----------
filenames : list
Set of file names to check.
quiet : bool, optional
Enabled, don't print output to stderr when an obsolete message is found.
Returns
-------
int: 0 if no obsolete messages found, 1 otherwise.
"""
exitcode = 0
for filename in filenames:
with open(filename) as f:
content_lines = f.readlines()
_inside_obsolete_message = False
for i, line in enumerate(content_lines):
if not _inside_obsolete_message and line.startswith("#~ "):
_inside_obsolete_message = True
exitcode = 1
if not quiet:
sys.stderr.write(f"Found obsolete message at {filename}:{i + 1}\n")
elif _inside_obsolete_message and not line.startswith("#~ "):
_inside_obsolete_message = False
return exitcode
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"filenames", nargs="*", help="Filenames to check for obsolete messages"
)
parser.add_argument("-q", "--quiet", action="store_true", help="Supress output")
args = parser.parse_args()
return check_obsolete_messages(args.filenames, quiet=args.quiet)
if __name__ == "__main__":
exit(main())
| 27.017241 | 87 | 0.634333 |
f743fe0f5d383fefb27261a22b64df7f8debdb2b | 1,273 | py | Python | migrations/versions/0122_add_service_letter_contact.py | cds-snc/notifier-api | 90b385ec49efbaee7e607516fc7d9f08991af813 | [
"MIT"
] | 41 | 2019-11-28T16:58:41.000Z | 2022-01-28T21:11:16.000Z | migrations/versions/0122_add_service_letter_contact.py | cds-snc/notification-api | b1c1064f291eb860b494c3fa65ac256ad70bf47c | [
"MIT"
] | 1,083 | 2019-07-08T12:57:24.000Z | 2022-03-08T18:53:40.000Z | migrations/versions/0122_add_service_letter_contact.py | cds-snc/notifier-api | 90b385ec49efbaee7e607516fc7d9f08991af813 | [
"MIT"
] | 9 | 2020-01-24T19:56:43.000Z | 2022-01-27T21:36:53.000Z | """
Revision ID: 0122_add_service_letter_contact
Revises: 0121_nullable_logos
Create Date: 2017-09-21 12:16:02.975120
"""
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import postgresql
revision = "0122_add_service_letter_contact"
down_revision = "0121_nullable_logos"
def upgrade():
op.create_table(
"service_letter_contacts",
sa.Column("id", postgresql.UUID(as_uuid=True), nullable=False),
sa.Column("service_id", postgresql.UUID(as_uuid=True), nullable=False),
sa.Column("contact_block", sa.Text(), nullable=False),
sa.Column("is_default", sa.Boolean(), nullable=False),
sa.Column("created_at", sa.DateTime(), nullable=False),
sa.Column("updated_at", sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(
["service_id"],
["services.id"],
),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(
op.f("ix_service_letter_contact_service_id"),
"service_letter_contacts",
["service_id"],
unique=False,
)
def downgrade():
op.drop_index(
op.f("ix_service_letter_contact_service_id"),
table_name="service_letter_contacts",
)
op.drop_table("service_letter_contacts")
| 28.288889 | 79 | 0.6685 |
f744154f010a7e14fc8b2467e849f89c2f8b44a6 | 2,581 | py | Python | scons/templates/buster_py2_options.py | markendr/esys-escript.github.io | 0023eab09cd71f830ab098cb3a468e6139191e8d | [
"Apache-2.0"
] | null | null | null | scons/templates/buster_py2_options.py | markendr/esys-escript.github.io | 0023eab09cd71f830ab098cb3a468e6139191e8d | [
"Apache-2.0"
] | null | null | null | scons/templates/buster_py2_options.py | markendr/esys-escript.github.io | 0023eab09cd71f830ab098cb3a468e6139191e8d | [
"Apache-2.0"
] | null | null | null |
##############################################################################
#
# Copyright (c) 2003-2020 by The University of Queensland
# http://www.uq.edu.au
#
# Primary Business: Queensland, Australia
# Licensed under the Apache License, version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
#
# Development until 2012 by Earth Systems Science Computational Center (ESSCC)
# Development 2012-2013 by School of Earth Sciences
# Development from 2014 by Centre for Geoscience Computing (GeoComp)
# Development from 2019 by School of Earth and Environmental Sciences
#
##############################################################################
# This is a template configuration file for escript on Debian/GNU Linux.
# Refer to README_FIRST for usage instructions.
escript_opts_version = 203
#cxx_extra = '-Wno-literal-suffix'
openmp = True
#mpi = 'OPENMPI'
import os
d_mpi_path = '/usr/include/openmpi'
mpi_prefix = os.path.split(os.path.realpath(d_mpi_path))[0]
mpi_libs = ['mpi_cxx', 'mpi']
netcdf = 4
umfpack = True
umfpack_prefix = ['/usr/include/suitesparse', '/usr/lib']
umfpack_libs = ['umfpack', 'blas', 'amd']
lapack_prefix = ['/usr/include/atlas', '/usr/lib/atlas-base']
#silo = True
silo_libs = ['siloh5', 'hdf5_openmpi']
dudley_assemble_flags = '-funroll-loops'
pythoncmd="/usr/bin/python2"
pythonlibname = 'python2.7'
pythonlibpath = '/usr/lib/x86_64-linux-gnu/'
pythonincpath = '/usr/include/python2.7'
import subprocess
import os
p = subprocess.Popen(["ld","--verbose"], stdout=subprocess.PIPE)
out,err = p.communicate()
spath = [x[13:-3] for x in out.split() if 'SEARCH_DIR' in x]
p2name = ''
p3name = ''
for name in spath:
try:
l=os.listdir(name)
p2res=[x for x in l if x.startswith('libboost_python2') and x.endswith('.so')]
p3res=[x for x in l if x.startswith('libboost_python3') and x.endswith('.so')]
if len(p2name)==0 and len(p2res)>0:
p2name=p2res[-1]
if len(p3name)==0 and len(p3res)>0:
p3name=p3res[-1]
except OSError:
pass
# boost-python library/libraries to link against
boost_libs = [p2name[3:-3]]
# this can be used by options files importing us
boost_py2_libs = [p2name[3:-3]]
boost_py3_libs = [p3name[3:-3]]
from site_init import getdebbuildflags
# Now we add the debian build flags
debstuff = getdebbuildflags()
if len(debstuff) > 0:
print("Building with the following additional flags from debian: "+str(debstuff))
for i in debstuff:
k=i[0]
v=i[1]
try:
exec(k+"+=' "+v+"'")
except NameError:
exec(k+"='"+v+"'")
mathjax_path='/usr/share/javascript/mathjax/MathJax.js'
| 30.72619 | 83 | 0.667183 |
f7441e5310ab0a958ac934c6fcc8198fb0c61d64 | 301 | py | Python | battleforcastile/utils/display_power.py | battleforcastile/battleforcastile | 65223fcb56ecc550f1a7c7b70beadff22c866d85 | [
"MIT"
] | null | null | null | battleforcastile/utils/display_power.py | battleforcastile/battleforcastile | 65223fcb56ecc550f1a7c7b70beadff22c866d85 | [
"MIT"
] | 1 | 2021-08-21T10:16:03.000Z | 2021-08-21T10:16:03.000Z | battleforcastile/utils/display_power.py | battleforcastile/battleforcastile | 65223fcb56ecc550f1a7c7b70beadff22c866d85 | [
"MIT"
] | null | null | null | def display_power(power: dict) -> str:
name = power['name']
invocation_unit = power['invocation']['card_name']
invocation_instances = power['invocation']['num_instances']
cost = power['cost']
return f'{name} (Invocation: {invocation_instances} {invocation_unit}) - Cost: {cost}'
| 33.444444 | 90 | 0.681063 |
f74441db2773a5a826056f981dd9aed9b7cc6d50 | 14,337 | py | Python | openrl/algorithms/imitation/imitation_learning.py | natetsang/open-rl | 426723d0d6759672ce77e02afeb55cbeb68fcfb0 | [
"MIT"
] | 2 | 2021-09-10T18:52:35.000Z | 2022-01-03T19:48:06.000Z | openrl/algorithms/imitation/imitation_learning.py | natetsang/open-rl | 426723d0d6759672ce77e02afeb55cbeb68fcfb0 | [
"MIT"
] | 1 | 2021-12-28T17:42:23.000Z | 2021-12-28T17:42:23.000Z | openrl/algorithms/imitation/imitation_learning.py | natetsang/open-rl | 426723d0d6759672ce77e02afeb55cbeb68fcfb0 | [
"MIT"
] | null | null | null | import gym
import time
import pickle
import argparse
import numpy as np
import tensorflow as tf
from typing import Callable, Union, Tuple, List
from models.models import actor_fc_discrete_network, actor_critic_fc_discrete_network
from algorithms.imitation.utils import plot_training_results
from util.replay_buffer import ReplayBuffer
# Set up
GAMMA = 0.99
LEARNING_RATE = 0.0001
class ImitationAgent:
def __init__(self,
environment: gym.Env,
model_fn: Callable[..., tf.keras.Model],
optimizer: tf.keras.optimizers,
run_dagger: bool,
expert_policy,
expert_data_path,
replay_buffer: ReplayBuffer,
model_kwargs: dict = None,
train_kwargs: dict = None,
save_dir: str = None) -> None:
# Env vars
self.env = environment
self.state_dims = model_kwargs.get('state_dims')
self.num_actions = model_kwargs.get('num_actions')
num_hidden_layers = model_kwargs.get("num_hidden_layers")
hidden_size = model_kwargs.get("hidden_size")
# Algorithm
self.run_dagger = run_dagger
# Expert
self.expert_policy = expert_policy
self.expert_data = ImitationAgent.load_expert_data(expert_data_path)
# Actor model
self.model = model_fn(state_dims=self.state_dims,
num_actions=self.num_actions,
num_hidden_layers=num_hidden_layers,
hidden_size=hidden_size)
self.optimizer = optimizer
self.loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False) # Discrete action space only
# Replay buffer
self.replay_buffer = replay_buffer
# Training vars
self.cur_episode = 0
self.total_steps = 0
self.max_ep_len = train_kwargs.get("max_ep_len")
self.batch_size = train_kwargs.get("batch_size") # Batch size of data collection from buffer
self.train_batch_size = train_kwargs.get('train_batch_size') # Batch size for training models
self.eval_batch_size = train_kwargs.get('eval_batch_size') # Batch size for eval
self.num_agent_train_steps_per_iter = train_kwargs.get('num_agent_train_steps_per_iter') # Grad updates per run
# Save directories
self.save_dir = save_dir
def save_models(self) -> None:
self.model.save(self.save_dir)
def load_models(self) -> tf.keras.Model:
self.model = tf.keras.models.load_model(self.save_dir)
return self.model
@staticmethod
def load_expert_data(path):
with open(path, 'rb') as f:
expert_data = pickle.load(f)
return expert_data
def sample_random_trajectory(self) -> Tuple[List[Tuple], Union[int, float]]:
"""
Sample 1 trajectory.
:param max_path_length: the maximum number of steps to take in the trajectory
:param random: whether or not to sample actions randomly or using MPC
:return:
"""
state = tf.expand_dims(tf.convert_to_tensor(self.env.reset()), 0)
num_steps = 0
total_rewards = 0
transitions = [] # transition tuples (s,a,r,s',d)
while True:
num_steps += 1
action_prob = self.model(state)
action = np.random.choice(self.num_actions, p=np.squeeze(action_prob))
next_state, reward, done, _ = self.env.step(action)
next_state = tf.reshape(next_state, [1, self.state_dims])
total_rewards += reward
if done or num_steps > self.max_ep_len:
transitions.append((state, action, reward, next_state, 1))
break
transitions.append((state, action, reward, next_state, 0))
state = next_state
return transitions, total_rewards
def sample_n_trajectories(self) -> Tuple[List, List, int]:
"""
Sample `self.batch_size` trajectories. Each trajectory should be no longer than
`max_path_length` steps/transitions. Note that transitions are different than trajectories!
A transition is a tuple (s,a,r,s',d) and a trajectory is made up of 1 to `max_path_length` transitions.
:param batch_size: The number of transitions to sample.
:param max_path_length: The maximum steps/transitions per trajectory
:param random: Boolean to indicate whether or not to sample actions randomly or via MPC
:return:
"""
num_steps_this_batch = 0
trajectory_rewards = []
transitions = []
while num_steps_this_batch < self.batch_size:
traj, rews = self.sample_random_trajectory()
num_steps_this_batch += len(traj)
trajectory_rewards.append(rews)
# Note that we're extending, not appending, because we don't care about trajectories, we care about
# the transitions. If we appended, it would be ([[(tran 1), (tran 2)], ..., [(tran n), (tran n+1)]],
# where each sublist is a trajectory. But by extending, it's instead ([(tran 1), ..., (tran n)]
transitions.extend(traj)
return transitions, trajectory_rewards, num_steps_this_batch
def relabel_actions_with_expert(self, transitions: List[Tuple]) -> List[Tuple]:
"""
Given a batch of transition tuples, query the Expert Policy and update the action based on
the Expert. This is the key difference between vanilla behavioral cloning and DAgger. This
step is equivalent to asking a human expert to label our dataset with actions the correct actions.
"""
updated_transitions = []
for transition in transitions:
state, action, reward, next_state, done = transition
action_prob, _ = self.expert_policy(state)
expert_action = np.argmax(np.squeeze(action_prob))
updated_transitions.append((state, expert_action, reward, next_state, done))
return updated_transitions
def train_episode(self) -> List:
# Step 1: Sample trajectories
if self.cur_episode == 0:
# Load expert_data
transitions = self.expert_data
else:
# Or sample trajectories using current policy
transitions, _, _ = self.sample_n_trajectories()
# Step 2: For DAgger only, ask expert policy to label data with actions
if self.run_dagger and self.cur_episode > 0:
transitions = self.relabel_actions_with_expert(transitions)
# Step 3: Store the sampled transitions in the replay buffer
self.replay_buffer.store_transitions_batch(transitions)
# Step 4: Train model!
losses = []
for train_step in range(self.num_agent_train_steps_per_iter):
# Sample a random batch of data from the replay buffer
states, actions, _, _, _ = self.replay_buffer.sample(batch_size=self.train_batch_size)
with tf.GradientTape() as tape:
action_prob = self.model(states)
loss = self.loss_fn(actions, action_prob)
grads = tape.gradient(loss, self.model.trainable_variables)
self.optimizer.apply_gradients(zip(grads, self.model.trainable_variables))
losses.append(loss)
self.cur_episode += 1
return losses
def run_agent(self, render=False) -> Tuple[float, int]:
total_reward, total_steps = 0, 0
state = self.env.reset()
done = False
while not done:
if render:
self.env.render()
# Select action
action_prob = self.model(tf.expand_dims(state, axis=0))
action = np.argmax(np.squeeze(action_prob))
# Interact with environment
state, reward, done, _ = self.env.step(action)
# Bookkeeping
total_reward += reward
total_steps += 1
return total_reward, total_steps
def main() -> None:
# Check input params
if args.run_dagger:
assert args.epochs > 1, "DAgger needs more than 1 iteration of training, where each iter" \
"we query the expert and train"
else:
assert args.epochs == 1, "Vanilla behavior cloning collects expert data only once and does traditional" \
"supervised learning on that dataset."
# Create environment
env = gym.make(args.env)
# Set seeds
if args.seed:
np.random.seed(args.seed)
tf.random.set_seed(args.seed)
env.seed(args.seed)
# Create helper vars for model creation
_state_dims = len(env.observation_space.high)
_action_dims = 1
_num_actions = env.action_space.n
# Create Replay Buffer
buffer = ReplayBuffer(state_dims=_state_dims, action_dims=_action_dims)
# Instantiate optimizer
opt = tf.keras.optimizers.Adam(learning_rate=LEARNING_RATE)
# Instantiate expert policy from file
# TODO >> I think it's a bit cleaner to load the entire model instead of just the weights
# but I'm getting a TF error that I think was fixed in a later version. I should probably
# try updating the version and seeing if it fixes itself.
expert = actor_critic_fc_discrete_network(state_dims=_state_dims,
num_actions=_num_actions,
num_hidden_layers=2,
hidden_size=128)
expert.load_weights(args.expert_policy_file)
# Create agent
agent = ImitationAgent(environment=env,
model_fn=actor_fc_discrete_network,
optimizer=opt,
replay_buffer=buffer,
run_dagger=args.run_dagger,
expert_policy=expert,
expert_data_path=args.expert_data,
model_kwargs=dict(state_dims=_state_dims,
num_actions=_num_actions,
num_hidden_layers=2,
hidden_size=256),
train_kwargs=dict(max_ep_len=args.max_ep_len,
batch_size=args.batch_size,
train_batch_size=args.train_batch_size,
eval_batch_size=args.eval_batch_size,
num_agent_train_steps_per_iter=args.num_agent_train_steps_per_iter)
)
# Run training
ep_mean_rewards_history, ep_max_rewards_history, ep_min_rewards_history = [], [], []
ep_mean_loss_history, ep_max_loss_history, ep_min_loss_history = [], [], []
ep_steps_history = []
ep_wallclock_history = []
start = time.time()
for e in range(args.epochs):
# Run one episode
ep_loss = agent.train_episode()
ep_rew, ep_steps = agent.run_agent()
# Prepare for logging
mean_ep_rew, max_ep_rew, min_ep_rew, std_ep_rew = np.mean(ep_rew), np.max(ep_rew), np.min(ep_rew), np.std(ep_rew)
mean_ep_loss, max_ep_loss, min_ep_loss = np.mean(ep_loss), np.max(ep_loss), np.min(ep_loss)
ep_wallclock_history.append(time.time() - start)
ep_mean_rewards_history.append(mean_ep_rew)
ep_max_rewards_history.append(max_ep_rew)
ep_min_rewards_history.append(min_ep_rew)
ep_mean_loss_history.append(mean_ep_loss)
ep_max_loss_history.append(max_ep_loss)
ep_min_loss_history.append(min_ep_loss)
ep_steps_history.append(ep_steps)
template = "EPISODE {} | mean ep reward: {:.2f} - max ep reward: {:.2f}" \
" - min ep reward: {:.2f} - std ep reward: {:.2f} - mean ep loss {:.2f}"
print(template.format(e, mean_ep_rew, max_ep_rew, min_ep_rew, std_ep_rew, mean_ep_loss))
# Now that we've completed training, let's plot the results
print(f"Training time elapsed (sec): {round(time.time() - start, 2)}")
# Let's evaluate the performance of the trained agent
print("Beginning evaluation of trained agent!")
eval_rew = []
for i in range(50):
ep_rew, ep_steps = agent.run_agent()
eval_rew.append(ep_rew)
print(f"Evaluation rewards: mean - {np.mean(eval_rew)} | min - {np.min(eval_rew)} | max - {np.max(eval_rew)}")
# Plot summary of results
plot_training_results(mean_rewards_history=ep_mean_rewards_history,
max_rew_history=ep_max_rewards_history,
min_rew_history=ep_min_rewards_history,
mean_loss_history=ep_mean_loss_history,
max_loss_history=ep_max_loss_history,
min_loss_history=ep_min_loss_history,
steps_history=ep_steps_history,
wallclock_history=ep_wallclock_history,
save_dir="./results.png")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--seed", type=int, default=1)
parser.add_argument("--env", type=str, default="CartPole-v0")
parser.add_argument('--expert_policy_file', type=str, default='./checkpoints/expert_model_weights')
parser.add_argument('--expert_data', type=str, default='expert_data.pkl')
# parser.add_argument("--run_dagger", action="store_false")
parser.add_argument("--run_dagger", type=bool, default=False)
parser.add_argument("--epochs", type=int, default=1)
parser.add_argument('--max_ep_len', type=int, default=100) # max trajectory length
parser.add_argument('--num_agent_train_steps_per_iter', type=int, default=20) # number of grad updates per iter
parser.add_argument('--batch_size', type=int, default=1000) # num steps/transitions to sample for itr 1+
parser.add_argument('--train_batch_size', type=int, default=512) # training batch size per model
parser.add_argument('--eval_batch_size', type=int, default=400) # steps collected per eval iteration
args = parser.parse_args()
main()
| 43.054054 | 121 | 0.625584 |
f74451751805bace06484342526dd58e1fdcc09d | 7,045 | py | Python | train.py | BXuan694/SOLO-pytorch | aef0ac47ce6989f6633fe4f71070bd6944c39abb | [
"Apache-2.0"
] | 2 | 2022-03-19T12:58:58.000Z | 2022-03-19T15:01:02.000Z | train.py | BXuan694/SOLO-pytorch | aef0ac47ce6989f6633fe4f71070bd6944c39abb | [
"Apache-2.0"
] | 1 | 2022-03-19T15:06:25.000Z | 2022-03-23T01:48:58.000Z | train.py | BXuan694/SOLO-pytorch | aef0ac47ce6989f6633fe4f71070bd6944c39abb | [
"Apache-2.0"
] | null | null | null | from data.config import cfg, process_funcs_dict
from data.coco import CocoDataset
from data.loader import build_dataloader
#from modules.solov1 import SOLOV1 as solo
# from modules.solov2 import SOLOV2 as solo
from modules.solov1d import SOLOV1 as solo
import time
import torch
import numpy as np
# 梯度均衡
def clip_grads(params_):
params_ = list(filter(lambda p: p.requires_grad and p.grad is not None, params_))
if len(params_) > 0:
return torch.nn.utils.clip_grad.clip_grad_norm_(params_, max_norm=35, norm_type=2)
# 设置新学习率
def set_lr(optimizer_, newLr_):
for paramGroup_ in optimizer_.param_groups:
paramGroup_['lr'] = newLr_
# 设置requires_grad为False
def gradinator(x_):
x_.requires_grad = False
return x_
# 设置pipline
def build_process_pipeline(pipelineConfgs_):
assert isinstance(pipelineConfgs_, list)
process_pipelines = []
for pConfig_ in pipelineConfgs_:
assert isinstance(pConfig_, dict) and 'type' in pConfig_
args = pConfig_.copy()
obj_type = args.pop('type')
if isinstance(obj_type, str):
process_pipelines.append(process_funcs_dict[obj_type](**args))
return process_pipelines
# 计算warmup学习率
def get_warmup_lr(curIter_, totalIters_, baseLr_, warmupRatio_, warmUpOption='linear'):
if warmUpOption == 'constant':
warmupLr = baseLr_ * warmupRatio_
elif warmUpOption == 'linear':
k = (1 - curIter_ / totalIters_) * (1 - warmupRatio_)
warmupLr = baseLr_ * (1 - k)
elif warmUpOption == 'exp':
k = warmupRatio_**(1 - curIter_ / totalIters_)
warmupLr = baseLr_ * k
return warmupLr
def train(globalStartEpoch, totalEpoches):
# train process pipelines func
trainTransformsPiplines = build_process_pipeline(cfg.train_pipeline)
print(trainTransformsPiplines)
# build datashet
casiadata = CocoDataset(ann_file=cfg.dataset.train_info,
pipeline = trainTransformsPiplines,
img_prefix = cfg.dataset.trainimg_prefix,
data_root=cfg.dataset.train_prefix)
torchdataLoader = build_dataloader(casiadata, cfg.imgs_per_gpu, cfg.workers_per_gpu, num_gpus=cfg.num_gpus, shuffle=True)
if cfg.resume_from is None:
model = solo(cfg, pretrained=None, mode='train')
print('cfg.resume_from is None')
else:
model = solo(cfg, pretrained=cfg.resume_from, mode='train')
model = model.cuda()
model = model.train()
lrOri = cfg.optimizer['lr']
lrStages = cfg.lr_config["step"]
lrList = np.full(totalEpoches, lrOri)
for ii in range(len(lrStages)):
lrList[lrStages[ii]:]*=0.1
print("starting epoch: ", globalStartEpoch)
print("lr adapting stages: ", end=' ')
for ii in range(len(lrStages)):
print(cfg.lr_config["step"][ii], end=" ")
print("\ntotal training epoches: ", totalEpoches)
optimizer_config = cfg.optimizer
optimizer = torch.optim.SGD(model.parameters(), lr=optimizer_config['lr'], momentum=optimizer_config['momentum'], weight_decay=optimizer_config['weight_decay'])
batchSize = cfg.imgs_per_gpu * cfg.num_gpus
epochSize = len(casiadata) // batchSize
# nums of trained epoches, idx of epoch to start
pastEpoches = globalStartEpoch
# nums of trained iters, idx of iter to start
pastIters = (globalStartEpoch-1) * epochSize
# nums of left epoches
leftEpoches = totalEpoches - pastEpoches + 1
# nums of left iters
leftIters = leftEpoches * epochSize
print('##### begin train ######')
currentIter = 0
for epoch in range(leftEpoches):
currentEpoch = epoch + pastEpoches
# 终止训练
if currentEpoch >= totalEpoches:
print("Current epoch is larger than setting epoch nums, training stop.")
return
# 仅用于打印
loss_sum = 0.0
loss_ins = 0.0
loss_cate = 0.0
for j, data in enumerate(torchdataLoader):
iterStartTime = time.time()
if cfg.lr_config['warmup'] is not None and pastIters < cfg.lr_config['warmup_iters']:
cur_lr = get_warmup_lr(pastIters, cfg.lr_config['warmup_iters'],
optimizer_config['lr'], cfg.lr_config['warmup_ratio'],
cfg.lr_config['warmup'])
else:
cur_lr = lrList[currentEpoch]
set_lr(optimizer, cur_lr)
imgs = gradinator(data['img'].data[0].cuda())
img_meta = data['img_metas'].data[0] #图片的一些原始信息
gt_bboxes = []
for bbox in data['gt_bboxes'].data[0]:
bbox = gradinator(bbox.cuda())
gt_bboxes.append(bbox)
gt_masks = data['gt_masks'].data[0] #cpu numpy data
gt_labels = []
for label in data['gt_labels'].data[0]:
label = gradinator(label.cuda())
gt_labels.append(label)
loss = model.forward(img=imgs,
img_meta=img_meta,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels,
gt_masks=gt_masks)
losses = loss['loss_ins'] + loss['loss_cate']
loss_sum += losses.cpu().item()
loss_ins += loss['loss_ins'].cpu().item()
loss_cate += loss['loss_cate'].cpu().item()
optimizer.zero_grad()
losses.backward()
if torch.isfinite(losses).item():
grad_norm = clip_grads(model.parameters()) #梯度平衡
optimizer.step()
else:
NotImplementedError("loss type error!can't backward!")
leftIters -= 1
pastIters += 1
currentIter += 1
showIters = 10
if j%int(showIters) == 0 and j != 0:
iterLastTime = time.time() - iterStartTime
left_seconds = iterLastTime * leftIters
left_minutes = left_seconds / 60.0
left_hours = left_minutes / 60.0
left_days = left_hours // 24
left_hours = left_hours % 24
out_srt = 'epoch:['+str(currentEpoch)+']/['+str(totalEpoches)+'],' # end of epoch of idx currentEpoch
out_srt = out_srt + '['+str(j)+']/'+str(epochSize)+'], left_time: ' + str(left_days)+'days '+format(left_hours,'.2f')+'hours,'
print(out_srt, "loss:", format(loss_sum/showIters,'.4f'), 'loss_ins:', format(loss_ins/showIters,'.4f'), "loss_cate:", format(loss_cate/showIters,'.4f'), "lr:", format(cur_lr,'.8f'))
loss_sum = 0.0
loss_ins = 0.0
loss_cate = 0.0
leftEpoches -= 1
save_name = "./weights/solo1/" + cfg.name + "_epoch_" + str(currentEpoch) + ".pth"
model.save_weights(save_name)
if __name__ == '__main__':
train(globalStartEpoch=cfg.epoch_iters_start, totalEpoches=cfg.total_epoch) #设置本次训练的起始epoch
| 37.673797 | 198 | 0.60582 |
f7446ab7e38ca354c89e6ca03d9847a06491b576 | 3,098 | py | Python | chainer/links/cnet/link_cnet_linear.py | asrlabncku/RAP | 11fab37c8d98257ec0aed1b306aa9709a3a51328 | [
"MIT"
] | null | null | null | chainer/links/cnet/link_cnet_linear.py | asrlabncku/RAP | 11fab37c8d98257ec0aed1b306aa9709a3a51328 | [
"MIT"
] | null | null | null | chainer/links/cnet/link_cnet_linear.py | asrlabncku/RAP | 11fab37c8d98257ec0aed1b306aa9709a3a51328 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
import numpy
from chainer import link
from chainer.functions.cnet import function_cnet_linear
import math
from chainer import initializers
from chainer import cuda
class CnetLinear(link.Link):
"""Binary Linear layer (a.k.a. binary fully-connected layer).
This is a link that wraps the :func:`~chainer.functions.linear` function,
and holds a weight matrix ``W`` and optionally a bias vector ``b`` as
parameters.
The weight matrix ``W`` is initialized with i.i.d. Gaussian samples, each
of which has zero mean and deviation :math:`\\sqrt{1/\\text{in_size}}`. The
bias vector ``b`` is of size ``out_size``. Each element is initialized with
the ``bias`` value. If ``nobias`` argument is set to True, then this link
does not hold a bias vector.
Args:
in_size (int): Dimension of input vectors.
out_size (int): Dimension of output vectors.
wscale (float): Scaling factor of the weight matrix.
bias (float): Initial bias value.
nobias (bool): If True, then this function does not use the bias.
initialW (2-D array): Initial weight value. If ``None``, then this
function uses to initialize ``wscale``.
initial_bias (1-D array): Initial bias value. If ``None``, then this
function uses to initialize ``bias``.
.. seealso:: :func:`~chainer.functions.linear`
Attributes:
W (~chainer.Variable): Weight parameter.
b (~chainer.Variable): Bias parameter.
"""
def __init__(self, in_size, out_size, wscale=1, bias=0, nobias=False,
initialW=None, initial_bias=None):
super(CnetLinear, self).__init__()
self.cname = "l_cnet_linear"
# For backward compatibility
self.initialW = initialW
self.wscale = wscale
self.out_size = out_size
# For backward compatibility, the scale of weights is proportional to
# the square root of wscale.
self._W_initializer = initializers._get_initializer(
initialW, math.sqrt(wscale))
if in_size is None:
self.add_uninitialized_param('W')
else:
self._initialize_params(in_size)
if nobias:
self.b = None
else:
if initial_bias is None:
initial_bias = bias
bias_initializer = initializers._get_initializer(initial_bias)
self.add_param('b', out_size, initializer=bias_initializer)
def _initialize_params(self, in_size):
self.add_param('W', (self.out_size, in_size), initializer=self._W_initializer)
def __call__(self, x):
"""Applies the linear layer.
Args:
x (~chainer.Variable): Batch of input vectors.
Returns:
~chainer.Variable: Output of the linear layer.
"""
if self.has_uninitialized_params:
with cuda.get_device(self._device_id):
self._initialize_params(x.size // len(x.data))
return function_cnet_linear.cnet_linear(x, self.W, self.b)
| 34.043956 | 86 | 0.643641 |
f744817e85fdb412c5892b7d65c53e54ebb20095 | 322 | py | Python | django-edx-courseware/urls.py | mjrulesamrat/django-edx-courseware | ec47a67e94b71ff95bb009fa95c47c76c21553da | [
"MIT"
] | null | null | null | django-edx-courseware/urls.py | mjrulesamrat/django-edx-courseware | ec47a67e94b71ff95bb009fa95c47c76c21553da | [
"MIT"
] | null | null | null | django-edx-courseware/urls.py | mjrulesamrat/django-edx-courseware | ec47a67e94b71ff95bb009fa95c47c76c21553da | [
"MIT"
] | null | null | null | __author__ = 'Jay Modi'
from django.conf import settings
from django.conf.urls import patterns, include, url
from .views import course_data
urlpatterns = [
url(
r'^courses-data/{}$'.format(
settings.COURSE_ID_PATTERN,
),
course_data,
name='edx_course_progress',
),
]
| 18.941176 | 51 | 0.63354 |
f7448f164d943f9205f7ff40068c9d8aff1896f8 | 613 | py | Python | apps/sushi/migrations/0023_sushicredentials_last_updated_by.py | remocrevo/celus | 682b13168eb475d7f970502113e756e40a899877 | [
"MIT"
] | null | null | null | apps/sushi/migrations/0023_sushicredentials_last_updated_by.py | remocrevo/celus | 682b13168eb475d7f970502113e756e40a899877 | [
"MIT"
] | null | null | null | apps/sushi/migrations/0023_sushicredentials_last_updated_by.py | remocrevo/celus | 682b13168eb475d7f970502113e756e40a899877 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.5 on 2019-10-21 07:13
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('sushi', '0022_sushi_credentials_locking'),
]
operations = [
migrations.AddField(
model_name='sushicredentials',
name='last_updated_by',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL),
),
]
| 27.863636 | 122 | 0.690049 |
f74491c60ad7411487d7485ccbfd1015420a2fac | 1,005 | py | Python | the_millionaire/users/admin.py | mrsaemir/the_millionaire | 3fe2c9c114b43648b1c2e039589733bbf778b7f6 | [
"MIT"
] | null | null | null | the_millionaire/users/admin.py | mrsaemir/the_millionaire | 3fe2c9c114b43648b1c2e039589733bbf778b7f6 | [
"MIT"
] | null | null | null | the_millionaire/users/admin.py | mrsaemir/the_millionaire | 3fe2c9c114b43648b1c2e039589733bbf778b7f6 | [
"MIT"
] | null | null | null | from django.contrib import admin
from django.contrib.auth import admin as auth_admin
from django.contrib.auth import get_user_model
from django.utils.translation import gettext_lazy as _
from the_millionaire.users.forms import UserChangeForm, UserCreationForm
User = get_user_model()
@admin.register(User)
class UserAdmin(auth_admin.UserAdmin):
form = UserChangeForm
add_form = UserCreationForm
fieldsets = (
(None, {"fields": ("username", "password")}),
(_("Personal info"), {"fields": ("name", "email")}),
(
_("Permissions"),
{
"fields": (
"is_active",
"is_staff",
"is_superuser",
"groups",
"user_permissions",
),
},
),
(_("Important dates"), {"fields": ("last_login", "date_joined")}),
)
list_display = ["username", "name", "is_superuser"]
search_fields = ["name"]
| 28.714286 | 74 | 0.559204 |
f74493fb8ff35c1840480038021304caa1379835 | 6,855 | py | Python | photons.py | mfkoerner/icarus | eb480596be127f760d10531d27569290df3e8ff9 | [
"MIT"
] | 2 | 2018-02-21T23:23:53.000Z | 2019-05-22T11:05:03.000Z | photons.py | mfkoerner/icarus | eb480596be127f760d10531d27569290df3e8ff9 | [
"MIT"
] | null | null | null | photons.py | mfkoerner/icarus | eb480596be127f760d10531d27569290df3e8ff9 | [
"MIT"
] | null | null | null | ########################################
# written for Python 3 #
# by Doug Fabini (fabini@mrl.ucsb.edu) #
########################################
'''
This script requires the following files to be located in 'baseDir':
- IBZKPT (to extract number of k points) POSSIBLY NO LONGER NEEDED
- DOSCAR (to extract bandgap)
- OUTCAR (to extract dielectric properties and energy resolution)
Currently only handles an isotropic equivalent for the dielectric / absorption tensors.
'''
# import packages, apply stylesheet
import config
import os
from electrons import np, plt, getTotalDOS, bandgap
# ****************** #
# DATA I/O FUNCTIONS #
# ****************** #
def getNkPts(bd):
''' Parse OUTCAR for number of k-points '''
fname = os.path.join(bd, 'OUTCAR')
# print(fname) #debug line
with open(fname, 'r') as f:
for line in f:
if 'irreducible k-points' in line:
# print(line) #debug line
return int(line.split()[1])
break
def getDielectric(bd, anisotropic=False):
''' Parse OUTCAR for dielectric properties, convert to appropriate form '''
fname = os.path.join(bd, 'OUTCAR')
with open(fname, 'r') as f:
raw = []
lnImag, lnReal = 0, 0
for i, line in enumerate(f):
raw.append(line)
if 'NEDOS' in line: #This an below find number points per section and start of lines for sections
NEDOS = int(line.split()[5])
if 'IMAGINARY DIELECTRIC' in line and lnImag is 0: #Selecting the first set of Dielectric numbers from VASP
lnImag = i
if 'REAL DIELECTRIC' in line and lnReal is 0:
lnReal = i
EepsRe, EepsIm = [], []
for i in range(lnImag+3,lnImag+NEDOS+3): #All of the imaginary dielectric components (NEDOS components and start point of lnImag+3)
if len(raw[i]) < 5: #Checking for early termination of DIELECTRIC DATA (printing to output)
print('DIELECTRIC DATA TERMINATED AT ONLY {} POINTS'.format(i-lnImag-3))
break
EepsIm.append([float(ri) for ri in raw[i].strip('\n').split()]) #Energy (frequency) then X,Y,Z,XY,YZ,ZX for imaginary component
E = np.array(EepsIm)[:,0] #Energies pulled from first part of EepsIm
for i in range(lnReal+3,lnReal+NEDOS+3):
if len(raw[i]) < 5:
# print('DIELECTRIC DATA TERMINATED AT ONLY {} POINTS'.format(i-lnReal-3))
break
EepsRe.append([float(ri) for ri in raw[i].strip('\n').split()]) #Real part from above
if anisotropic:
epsIm = np.array([row[1:] for row in EepsIm])
epsRe = np.array([row[1:] for row in EepsRe])
else:
epsIm = np.array([isotropic(row[1:]) for row in EepsIm]) #epsIm is the isotropic equivilent values for each energy
epsRe = np.array([isotropic(row[1:]) for row in EepsRe]) #Real part for epsIm, this time is epsRe
return E, epsRe + 1j*epsIm #Returns list of isotropic equivalent values
def saveResults(bd, E, alpha, eps):
''' Store absorption coefficient and dielectric function '''
out = np.hstack((E, alpha, eps.real, eps.imag))
out = np.reshape(out, (-1, 4), order='F')
np.savetxt(os.path.join(bd, 'optical.csv'), out, header='h*nu (eV), alpha_iso (cm^-1), Re[eps_iso] (eps_0), Im[eps_iso] (eps_0)')
def getSolarSpectrum():
''' Get direct+diffuse solar irradiance at global tilt, ASTM G173-03 '''
d = np.loadtxt('data/ASTMG173.dat')
return d[:,0], d[:,2]
# ****************** #
# ANALYSIS FUNCTIONS #
# ****************** #
def nm2eV(lam):
''' Convert wavelength in nm to energy in eV '''
h = 4.136e-15 # Planck constant, eV / s
c = 2.998e8 # speed of light, m / s
return h*c/(lam*1e-9)
def eV2nm(hnu):
''' Convert energy in eV to wavelength in nm '''
h = 4.136e-15 # Planck constant, eV / s
c = 2.998e8 # speed of light, m / s
return h*c/hnu*1e9
def isotropic(sixElements):
''' Returns an isotropic equivalent value for a symmetric 3x3 matrix '''
xx, yy, zz, xy, yz, zx = sixElements
A = np.array([[xx, xy, zx], [xy, yy, yz], [zx, yz, zz]])
eigval, _ = np.linalg.eigh(A)
return np.mean(eigval)
def dielec2optical(hnu, eps):
''' Calculate complex refractive index and absorption coefficient from dielectric function '''
h = 4.136e-15 # Planck constant, eV / s
c = 2.998e8 # speed of light, m / s
N = np.sqrt(eps)
alpha = 4*np.pi/(h*c)*hnu*N.imag/100 # divisor of 100 takes from m-1 to cm-1
return N, alpha
def FOM(hnu, alpha, Eg):
xx = np.linspace(100, eV2nm(Eg), int(1e4)) #proper range of light to think about (100 nm [13eV] to band gap wavelength)
xSun, ySun = getSolarSpectrum() #xSun -> wavelength of sun, ySun -> intensity of sun
yySun = np.interp(xx, xSun, ySun) #ySun calculated at the points for xx (so that we have the right resolution)
yyMat = np.interp(xx, np.flipud(eV2nm(hnu[1:])), np.flipud(alpha[1:])) #absorption as a function of wavelength
from scipy.integrate import cumtrapz #Trapezoidal numeric integration
return xx, yySun, yyMat, cumtrapz(yySun*yyMat, xx) #FOM is the last value, which is integral of sum intensity time absorption along wavel
# ****************** #
# PLOTTING FUNCTIONS #
# ****************** #
def plotDielectric(ax, E, eps, N, El=(0, 10)):
''' Plot complex dielectric function and complex refractive index '''
ax.plot(E, eps.real, label='$\\epsilon_r\\prime$')
ax.plot(E, eps.imag, label='$\\epsilon_r\\prime\\prime$')
ax.plot(E, N.real, label='$n$')
ax.plot(E, N.imag, label='$k$')
ax.set_xlim(El)
ax.set_xlabel('$h\\nu$ (eV)')
ax.legend()
def plotAbsorption(ax, hnu, alpha, xl=(0, 4), yl=(1e2, 1e7), rel2eg=None, lbl=None, wavelength=False):
''' Plot absorption coefficient '''
if wavelength:
if rel2eg is not None:
raise Exception('Relative to gap option not available when plotting by wavelength')
lh, = ax.semilogy(eV2nm(hnu), alpha, '.-', label=lbl)
ax.set_xlabel('$\\lambda$ (nm)')
elif not wavelength and rel2eg is None:
lh, = ax.semilogy(hnu, alpha, '.-', label=lbl)
ax.set_xlabel('$h\\nu$ (eV)')
else:
lh, = ax.semilogy(hnu-rel2eg, alpha, '.-', label=lbl)
ax.set_xlabel('$h\\nu-E_g$ (eV)')
ax.set_xlim(xl)
ax.set_ylim(yl)
ax.set_ylabel('$\\alpha$ (cm$^{-1}$)')
return lh
# ********** #
# HIGH LEVEL #
# ********** #
def optical(bd, save=False):
''' DESCRIPTION GOES HERE '''
Nk = getNkPts(bd) #Gets number of irreducible kpoints but never uses it :O
E, eps = getDielectric(bd) #Gets lists of E and equivilent eigenvalues (real + i*imag) for dialectric function
N, alpha = dielec2optical(E, eps) #N (dielectric constant) and alpha (absorption coefficient) from dielectric equivilent eigenvalues
Edos, tdos = getTotalDOS(bd) #arrays of len NEDOS with energy and DOS at that energy
Eg = bandgap(Edos, tdos) #Calculates bandgap from DOS data
if save:
saveResults(bd, E, alpha, eps) #Saves Energy, absorption, eigenvalue to basedir/optical.csv
return E, alpha, eps, N, Eg #Returns Energy, absorption, eigenvalue, refractive index, bandgap
| 38.296089 | 144 | 0.65485 |
f7452da095e19f5494b730624c0e1cac04e623b7 | 3,552 | py | Python | etl/jobs/transformation/patient_transformer_job.py | PDXFinder/pdcm-etl | 3caf80938d9e80f1fc7bb0724cba6ae68a8d315e | [
"Apache-2.0"
] | 1 | 2021-05-24T14:54:48.000Z | 2021-05-24T14:54:48.000Z | etl/jobs/transformation/patient_transformer_job.py | PDCMFinder/pdcm-etl | df0006e4ad5ca2ddf9c1387e28a0b7fb24f195de | [
"Apache-2.0"
] | 37 | 2022-02-09T18:19:13.000Z | 2022-03-29T12:14:19.000Z | etl/jobs/transformation/patient_transformer_job.py | PDCMFinder/pdcm-etl | df0006e4ad5ca2ddf9c1387e28a0b7fb24f195de | [
"Apache-2.0"
] | null | null | null | import sys
from pyspark.sql import DataFrame, SparkSession
from etl.constants import Constants
from etl.jobs.util.cleaner import init_cap_and_trim_all
from etl.jobs.util.dataframe_functions import transform_to_fk
from etl.jobs.util.id_assigner import add_id
def main(argv):
"""
Creates a parquet file with patient data.
:param list argv: the list elements should be:
[1]: Parquet file path with raw model data
[2]: Parquet file path with diagnosis data
[3]: Parquet file path with ethnicity data
[4]: Parquet file path with provider group data
[5]: Output file
"""
raw_patient_parquet_path = argv[1]
diagnosis_parquet_path = argv[2]
ethnicity_parquet_path = argv[3]
provider_group_parquet_path = argv[4]
output_path = argv[5]
spark = SparkSession.builder.getOrCreate()
raw_patient_df = spark.read.parquet(raw_patient_parquet_path)
diagnosis_df = spark.read.parquet(diagnosis_parquet_path)
ethnicity_df = spark.read.parquet(ethnicity_parquet_path)
provider_group_df = spark.read.parquet(provider_group_parquet_path)
patient_df = transform_patient(raw_patient_df, diagnosis_df, ethnicity_df, provider_group_df)
# Temporary fix to remove null rows
patient_df = patient_df.where("external_patient_id is not null")
patient_df.write.mode("overwrite").parquet(output_path)
def transform_patient(
raw_patient_df: DataFrame,
diagnosis_df: DataFrame,
ethnicity_df: DataFrame,
provider_group_df: DataFrame) -> DataFrame:
patient_df = clean_data_before_join(raw_patient_df)
patient_df = set_fk_diagnosis(patient_df, diagnosis_df)
patient_df = set_fk_ethnicity(patient_df, ethnicity_df)
patient_df = set_fk_provider_group(patient_df, provider_group_df)
patient_df = set_external_id(patient_df)
patient_df = add_id(patient_df, "id")
patient_df = get_columns_expected_order(patient_df)
return patient_df
def clean_data_before_join(patient_df: DataFrame) -> DataFrame:
patient_df = patient_df.withColumn("ethnicity", init_cap_and_trim_all("ethnicity"))
return patient_df.drop_duplicates()
def set_fk_diagnosis(raw_patient_df: DataFrame, diagnosis_df: DataFrame) -> DataFrame:
patient_df = transform_to_fk(
raw_patient_df, diagnosis_df, "initial_diagnosis", "name", "id", "initial_diagnosis_id")
return patient_df
def set_fk_ethnicity(patient_df: DataFrame, ethnicity_df: DataFrame) -> DataFrame:
patient_df = transform_to_fk(patient_df, ethnicity_df, "ethnicity", "name", "id", "ethnicity_id")
return patient_df
def set_fk_provider_group(patient_df: DataFrame, provider_group_df: DataFrame) -> DataFrame:
provider_group_df = provider_group_df.withColumnRenamed("id", "provider_group_id")
patient_df = patient_df.join(provider_group_df, on=[Constants.DATA_SOURCE_COLUMN], how='left')
return patient_df
def set_external_id(patient_df: DataFrame) -> DataFrame:
return patient_df.withColumnRenamed("patient_id", "external_patient_id")
def get_columns_expected_order(patient_df: DataFrame) -> DataFrame:
return patient_df.select(
"id",
"external_patient_id",
"sex",
"history",
"ethnicity_id",
"ethnicity_assessment_method",
"initial_diagnosis_id",
"age_at_initial_diagnosis",
"provider_group_id",
Constants.DATA_SOURCE_COLUMN)
if __name__ == "__main__":
sys.exit(main(sys.argv))
| 37 | 101 | 0.7317 |
f7454d807cf9346638540e5b9826adcab1db2bb3 | 8,481 | py | Python | neo/io/brainwaredamio.py | deeptimittal12/python-neo | 7409f47b5debd4d2a75bbf0e77ac10562446c97a | [
"BSD-3-Clause"
] | 1 | 2020-06-08T14:00:03.000Z | 2020-06-08T14:00:03.000Z | neo/io/brainwaredamio.py | deeptimittal12/python-neo | 7409f47b5debd4d2a75bbf0e77ac10562446c97a | [
"BSD-3-Clause"
] | null | null | null | neo/io/brainwaredamio.py | deeptimittal12/python-neo | 7409f47b5debd4d2a75bbf0e77ac10562446c97a | [
"BSD-3-Clause"
] | null | null | null | '''
Class for reading from Brainware DAM files
DAM files are binary files for holding raw data. They are broken up into
sequence of Segments, each containing a single raw trace and parameters.
The DAM file does NOT contain a sampling rate, nor can it be reliably
calculated from any of the parameters. You can calculate it from
the "sweep length" attribute if it is present, but it isn't always present.
It is more reliable to get it from the corresponding SRC file or F32 file if
you have one.
The DAM file also does not divide up data into Blocks, so only a single
Block is returned..
Brainware was developed by Dr. Jan Schnupp and is availabe from
Tucker Davis Technologies, Inc.
http://www.tdt.com/downloads.htm
Neither Dr. Jan Schnupp nor Tucker Davis Technologies, Inc. had any part in the
development of this code
The code is implemented with the permission of Dr. Jan Schnupp
Author: Todd Jennings
'''
# import needed core python modules
import os
import os.path
# numpy and quantities are already required by neo
import numpy as np
import quantities as pq
# needed core neo modules
from neo.core import (AnalogSignal, Block,
ChannelIndex, Segment)
# need to subclass BaseIO
from neo.io.baseio import BaseIO
class BrainwareDamIO(BaseIO):
"""
Class for reading Brainware raw data files with the extension '.dam'.
The read_block method returns the first Block of the file. It will
automatically close the file after reading.
The read method is the same as read_block.
Note:
The file format does not contain a sampling rate. The sampling rate
is set to 1 Hz, but this is arbitrary. If you have a corresponding .src
or .f32 file, you can get the sampling rate from that. It may also be
possible to infer it from the attributes, such as "sweep length", if
present.
Usage:
>>> from neo.io.brainwaredamio import BrainwareDamIO
>>> damfile = BrainwareDamIO(filename='multi_500ms_mulitrep_ch1.dam')
>>> blk1 = damfile.read()
>>> blk2 = damfile.read_block()
>>> print blk1.segments
>>> print blk1.segments[0].analogsignals
>>> print blk1.units
>>> print blk1.units[0].name
>>> print blk2
>>> print blk2[0].segments
"""
is_readable = True # This class can only read data
is_writable = False # write is not supported
# This class is able to directly or indirectly handle the following objects
# You can notice that this greatly simplifies the full Neo object hierarchy
supported_objects = [Block, ChannelIndex,
Segment, AnalogSignal]
readable_objects = [Block]
writeable_objects = []
has_header = False
is_streameable = False
# This is for GUI stuff: a definition for parameters when reading.
# This dict should be keyed by object (`Block`). Each entry is a list
# of tuple. The first entry in each tuple is the parameter name. The
# second entry is a dict with keys 'value' (for default value),
# and 'label' (for a descriptive name).
# Note that if the highest-level object requires parameters,
# common_io_test will be skipped.
read_params = {Block: []}
# do not support write so no GUI stuff
write_params = None
name = 'Brainware DAM File'
extensions = ['dam']
mode = 'file'
def __init__(self, filename=None):
'''
Arguments:
filename: the filename
'''
BaseIO.__init__(self)
self._path = filename
self._filename = os.path.basename(filename)
self._fsrc = None
def read(self, lazy=False, **kargs):
'''
Reads raw data file "fname" generated with BrainWare
'''
assert not lazy, 'Do not support lazy'
return self.read_block(lazy=lazy)
def read_block(self, lazy=False, **kargs):
'''
Reads a block from the raw data file "fname" generated
with BrainWare
'''
assert not lazy, 'Do not support lazy'
# there are no keyargs implemented to so far. If someone tries to pass
# them they are expecting them to do something or making a mistake,
# neither of which should pass silently
if kargs:
raise NotImplementedError('This method does not have any '
'arguments implemented yet')
self._fsrc = None
block = Block(file_origin=self._filename)
# create the objects to store other objects
chx = ChannelIndex(file_origin=self._filename,
channel_ids=np.array([1]),
index=np.array([0]),
channel_names=np.array(['Chan1'], dtype='S'))
# load objects into their containers
block.channel_indexes.append(chx)
# open the file
with open(self._path, 'rb') as fobject:
# while the file is not done keep reading segments
while True:
seg = self._read_segment(fobject)
# if there are no more Segments, stop
if not seg:
break
# store the segment and signals
seg.analogsignals[0].channel_index = chx
block.segments.append(seg)
# remove the file object
self._fsrc = None
block.create_many_to_one_relationship()
return block
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# IMPORTANT!!!
# These are private methods implementing the internal reading mechanism.
# Due to the way BrainWare DAM files are structured, they CANNOT be used
# on their own. Calling these manually will almost certainly alter your
# position in the file in an unrecoverable manner, whether they throw
# an exception or not.
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
def _read_segment(self, fobject):
'''
Read a single segment with a single analogsignal
Returns the segment or None if there are no more segments
'''
try:
# float64 -- start time of the AnalogSignal
t_start = np.fromfile(fobject, dtype=np.float64, count=1)[0]
except IndexError:
# if there are no more Segments, return
return False
# int16 -- index of the stimulus parameters
seg_index = np.fromfile(fobject, dtype=np.int16, count=1)[0].tolist()
# int16 -- number of stimulus parameters
numelements = np.fromfile(fobject, dtype=np.int16, count=1)[0]
# read the name strings for the stimulus parameters
paramnames = []
for _ in range(numelements):
# unit8 -- the number of characters in the string
numchars = np.fromfile(fobject, dtype=np.uint8, count=1)[0]
# char * numchars -- a single name string
name = np.fromfile(fobject, dtype=np.uint8, count=numchars)
# exclude invalid characters
name = str(name[name >= 32].view('c').tostring())
# add the name to the list of names
paramnames.append(name)
# float32 * numelements -- the values for the stimulus parameters
paramvalues = np.fromfile(fobject, dtype=np.float32, count=numelements)
# combine parameter names and the parameters as a dict
params = dict(zip(paramnames, paramvalues))
# int32 -- the number elements in the AnalogSignal
numpts = np.fromfile(fobject, dtype=np.int32, count=1)[0]
# int16 * numpts -- the AnalogSignal itself
signal = np.fromfile(fobject, dtype=np.int16, count=numpts)
sig = AnalogSignal(signal.astype(np.float) * pq.mV,
t_start=t_start * pq.d,
file_origin=self._filename,
sampling_period=1. * pq.s,
copy=False)
# Note: setting the sampling_period to 1 s is arbitrary
# load the AnalogSignal and parameters into a new Segment
seg = Segment(file_origin=self._filename,
index=seg_index,
**params)
seg.analogsignals = [sig]
return seg
| 35.78481 | 79 | 0.607476 |
f7457bda10287c1c64239c9802aa4e6293ce12bf | 15,413 | py | Python | canalystii/device.py | projectgus/python-canalystii | 7ef3a4a369a88976291ea91f93ef2b8c322855b9 | [
"BSD-3-Clause"
] | 3 | 2021-09-09T16:30:20.000Z | 2022-03-11T10:29:19.000Z | canalystii/device.py | projectgus/python-canalystii | 7ef3a4a369a88976291ea91f93ef2b8c322855b9 | [
"BSD-3-Clause"
] | null | null | null | canalystii/device.py | projectgus/python-canalystii | 7ef3a4a369a88976291ea91f93ef2b8c322855b9 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2021 Angus Gratton
#
# SPDX-License-Identifier: Apache-2.0
#
# This module contains device-level interface to Canalyst-II
import ctypes
import logging
import usb.core
import time
from . import protocol
logger = logging.getLogger(__name__)
# "Fast" lookups to go from channel to USB endpoint number
CHANNEL_TO_COMMAND_EP = [2, 4] # Command EP for channels 0,1
CHANNEL_TO_MESSAGE_EP = [1, 3] # CAN Message EP for channels 0, 1
class CanalystDevice(object):
"""Encapsulates a low-level USB interface to a Canalyst-II device.
Constructing an instance of this class will cause pyusb to acquire the
relevant USB interface, and retain it until the object is garbage collected.
:param device:_index if more than one Canalyst-II device is connected, this is
the index to use in the list.
:param usb_device: Optional argument to ignore device_index and provide an instance
of a pyusb device object directly.
:param bitrate: If set, both channels are initialized to the specified bitrate and
started automatically. If unset (default) then the "init" method must be called
before using either channel.
:param timing0: Optional parameter to provide BTR timing directly. Either both or
neither timing0 and timing1 must be set, and setting these arguments is mutually
exclusive with setting bitrate. If set, both channels are initialized and started
automatically.
"""
# Small optimization, build common command packet one time
COMMAND_MESSAGE_STATUS = protocol.SimpleCommand(protocol.COMMAND_MESSAGE_STATUS)
MESSAGE_BUF_LEN = ctypes.sizeof(protocol.MessageBuffer)
def __init__(
self, device_index=0, usb_device=None, bitrate=None, timing0=None, timing1=None
):
"""Constructor function."""
if usb_device is not None:
self._dev = usb_device
else:
devices = list(
usb.core.find(
idVendor=protocol.USB_ID_VENDOR,
idProduct=protocol.USB_ID_PRODUCT,
find_all=True,
)
)
if not devices:
raise ValueError("No Canalyst-II USB device found")
if len(devices) <= device_index:
raise ValueError(
f"Can't open device_index {device_index}, only {len(devices)} devices found."
)
self._dev = devices[device_index]
active_config = self._dev.get_active_configuration()
if active_config is None or active_config.bConfigurationValue != 1:
self._dev.set_configuration(1)
self._initialized = [False, False]
self._started = [False, False]
# Check this looks like the firmware we expect: as this is an unofficial driver,
# we don't know if other versions might are out there.
if self._dev.product != "Chuangxin Tech USBCAN/CANalyst-II":
logger.warning(
f"Unexpected USB product string: {self._dev.product}. Firmware version may be unsupported."
)
interfaces = self._dev.get_active_configuration().interfaces()
if len(interfaces) != 1:
logger.warning(
f"Unexpected interface count {len(interfaces)}. Firmware version may be unsupported."
)
endpoints = interfaces[0].endpoints()
# For whatever reason FW has 6 bidirectional BULK endpoints!
if len(endpoints) != 12:
logger.warning(
f"Unexpected endpoint count {len(endpoints)}. Firmware version mayb e unsupported."
)
if bitrate is not None or timing0 is not None:
# if not specified, don't initialize yet
self.init(0, bitrate, timing0, timing1)
self.init(1, bitrate, timing0, timing1)
def __del__(self):
# In theory pyusb should manage this, but in order to allow a new device
# object to be created later (in the same process) it seems the device needs to be reset (which
# calls dispose internally)
try:
self._dev.reset()
except AttributeError:
pass
def clear_rx_buffer(self, channel):
"""Clears the device's receive buffer for the specified channel.
Note that this doesn't seem to 100% work in the device firmware, on a busy bus
it's possible to receive a small number of "old" messages even after calling this.
:param channel: Channel (0 or 1) to clear the RX buffer on.
"""
self.send_command(
channel, protocol.SimpleCommand(protocol.COMMAND_CLEAR_RX_BUFFER)
)
def flush_tx_buffer(self, channel, timeout=0):
"""Check if all pending messages have left the hardware TX buffer and optionally keep polling until
this happens or a timeout is reached.
Note that due to hardware limitations, "no messages in TX buffer" doesn't necessarily mean
that the messages were sent successfully - for the default send type 0 (see Message.send_type), the
hardware will attempt bus arbitration multiple times but if it fails then it will still "send" the
message. It also doesn't consider the ACK status of the message.
:param channel: Channel (0 or 1) to flush the TX buffer on.
:param timeout: Optional number of seconds to continue polling for empty TX buffer. If 0 (default),
this function will immediately return the current status of the send buffer.
:return: True if flush is successful (no pending messages to send), False if flushing timed out.
"""
deadline = None
while deadline is None or time.time() < deadline:
if deadline is None and timeout is not None:
deadline = time.time() + timeout
resp = self.send_command(
channel,
self.COMMAND_MESSAGE_STATUS,
protocol.MessageStatusResponse,
)
if resp.tx_pending == 0:
return True
return False # timeout!
def send_command(self, channel, command_packet, response_class=None):
"""Low-level function to send a command packet to the channel and optionally wait for a response.
:param channel: Channel (0 or 1) to flush the TX buffer on.
:param command_packet: Data to send to the channel. Usually this will be a ctypes Structure, but can be
anything that supports a bytes buffer interface.
:param response_class: If None (default) then this function doesn't expect to read anything back from the
device. If not None, should be a ctypes class - 64 bytes will be read into a buffer and returned as an
object of this type.
"""
ep = CHANNEL_TO_COMMAND_EP[channel]
self._dev.write(ep, memoryview(command_packet).cast("B"))
if response_class:
response = self._dev.read(ep | 0x80, 0x40)
if len(response) < ctypes.sizeof(response_class):
raise RuntimeError(
f"Expected response minimum {ctypes.sizeof(response_class)} bytes, got {len(response)} bytes."
)
return response_class.from_buffer(response)
def init(self, channel, bitrate=None, timing0=None, timing1=None, start=True):
"""Initialize channel to a particular baud rate. This can be called more than once to change
the channel bit rate.
:param channel: Channel (0 or 1) to initialize.
:param bitrate: Bitrate to set for the channel. Either this argument of both
timing0 and timing1 must be set.
:param timing0: Raw BTR0 timing value to determine the bitrate. If this argument is set,
timing1 must also be set and bitrate argument must be unset.
:param timing1: Raw BTR1 timing value to determine the bitrate. If this argument is set,
timing0 must also be set and bitrate argument must be unset.
:param start: If True (default) then the channel is started after being initialized.
If set to False, the channel will not be started until the start function is called
manually.
"""
if bitrate is None and timing0 is None and timing1 is None:
raise ValueError(
"Either bitrate or both timing0/timing1 parameters are required"
)
if bitrate is not None:
if timing0 is not None or timing1 is not None:
raise ValueError(
"If bitrate parameter is set, both timing0 and timing1 parameters should be None"
)
try:
timing0, timing1 = TIMINGS[bitrate]
except KeyError:
raise ValueError(f"Bitrate {bitrate} is not supported")
if timing0 is None or timing1 is None:
raise ValueError(
"To set raw timings, both timing0 and timing1 parameters are required"
)
init_packet = protocol.InitCommand(
command=protocol.COMMAND_INIT,
acc_code=0x1,
acc_mask=0xFFFFFFFF,
filter=0x1, # placeholder
timing0=timing0,
timing1=timing1,
mode=0x0, # placeholder
unknown2=0x1,
) # placeholder
self.send_command(channel, init_packet)
self._initialized[channel] = True
self.start(channel)
def stop(self, channel):
"""Stop this channel. CAN messages won't be sent or received on this channel until it is started again.
:param channel: Channel (0 or 1) to stop. The channel must already be initialized.
"""
if not self._initialized[channel]:
raise RuntimeError(f"Channel {channel} is not initialized.")
self.send_command(channel, protocol.SimpleCommand(protocol.COMMAND_STOP))
self._started[channel] = False
def start(self, channel):
"""Start this channel. This allows CAN messages to be sent and received. The hardware
will buffer received messages until the receive() function is called.
:param channel: Channel (0 or 1) to start. The channel must already be initialized.
"""
if not self._initialized[channel]:
raise RuntimeError(f"Channel {channel} is not initialized.")
self.send_command(channel, protocol.SimpleCommand(protocol.COMMAND_START))
self._started[channel] = True
def receive(self, channel):
"""Poll the hardware for received CAN messages and return them all as a list.
:param channel: Channel (0 or 1) to poll. The channel must be started.
:return: List of Message objects representing received CAN messages, in order.
"""
if not self._initialized[channel]:
raise RuntimeError(f"Channel {channel} is not initialized.")
if not self._started[channel]:
raise RuntimeError(f"Channel {channel} is stopped, can't receive messages.")
status = self.send_command(
channel, self.COMMAND_MESSAGE_STATUS, protocol.MessageStatusResponse
)
if status.rx_pending == 0:
return []
# Calculate how large our read should be, add one buffer to try and avoid issues
# caused by fragmentation (sometimes the RX message is in the next buffer not the
# current one)
rx_buffer_num = (status.rx_pending + 2) // 3 + 1
rx_buffer_size = rx_buffer_num * self.MESSAGE_BUF_LEN
message_ep = CHANNEL_TO_MESSAGE_EP[channel]
rx_data = self._dev.read(message_ep | 0x80, rx_buffer_size)
assert len(rx_data) % self.MESSAGE_BUF_LEN == 0
num_buffers = len(rx_data) // self.MESSAGE_BUF_LEN
# Avoid copying data here, parse the MessageBuffer structures but return
# a list of Message objects all pointing into the original USB data
# buffer. This is a little wasteful of total RAM but should be faster,
# and we assume the caller is going to process these into another format
# anyhow.
result = []
message_bufs = (protocol.MessageBuffer * num_buffers).from_buffer(rx_data)
for buf in message_bufs:
count = buf.count
assert 0 <= count <= 3
result += buf.messages[:count]
return result
def send(self, channel, messages, flush_timeout=None):
"""Send one or more CAN messages to the channel.
:param channel: Channel (0 or 1) to send to. The channel must be started.
:param messages: Either a single Message object, or a list of
Message objects to send.
:param flush_timeout: If set, don't return until TX buffer is flushed or timeout is
reached.
Setting this parameter causes the software to poll the device continuously
for the buffer state. If None (default) then the function returns immediately,
when some CAN messages may still be waiting to sent due to CAN bus arbitration.
See flush_tx_buffer() function for details.
:return: None if flush_timeout is None (default). Otherwise True if all messages sent
(or failed), False if timeout reached.
"""
if not self._initialized[channel]:
raise RuntimeError(f"Channel {channel} is not initialized.")
if not self._started[channel]:
raise RuntimeError(f"Channel {channel} is stopped, can't send messages.")
if isinstance(messages, protocol.Message):
messages = [messages]
tx_buffer_num = (len(messages) + 2) // 3
buffers = (protocol.MessageBuffer * tx_buffer_num)()
for idx, msg in enumerate(messages):
buf_idx = idx // 3
buffers[buf_idx].count += 1
buffers[buf_idx].messages[idx % 3] = msg
message_ep = CHANNEL_TO_MESSAGE_EP[channel]
self._dev.write(message_ep, memoryview(buffers).cast("B"))
if flush_timeout is not None:
return self.flush_tx_buffer(channel, flush_timeout)
def get_can_status(self, channel):
"""Return some internal CAN-related values. The actual meaning of these is currently unknown.
:return: Instance of the CANStatusResponse structure. Note the field names may not be accurate.
"""
if not self._initialized[channel]:
logger.warning(
f"Channel {channel} is not initialized, CAN status may be invalid."
)
return self.send_command(
channel,
protocol.SimpleCommand(protocol.COMMAND_CAN_STATUS),
protocol.CANStatusResponse,
)
# Lookup from bitrate to Timing0 (BTR0), Timing1 (BTR1) values
TIMINGS = {
5000: (0xBF, 0xFF),
10000: (0x31, 0x1C),
20000: (0x18, 0x1C),
33330: (0x09, 0x6F),
40000: (0x87, 0xFF),
50000: (0x09, 0x1C),
66660: (0x04, 0x6F),
80000: (0x83, 0xFF),
83330: (0x03, 0x6F),
100000: (0x04, 0x1C),
125000: (0x03, 0x1C),
200000: (0x81, 0xFA),
250000: (0x01, 0x1C),
400000: (0x80, 0xFA),
500000: (0x00, 0x1C),
666000: (0x80, 0xB6),
800000: (0x00, 0x16),
1000000: (0x00, 0x14),
}
| 44.417867 | 114 | 0.635957 |
f74581304b1ec7afe6efed1c1a84fed53c39ec0a | 3,460 | py | Python | app/celery/process_ses_receipts_tasks.py | alphagov-mirror/notifications-api | 4a2e47b118c51f0ad45e87c89521f6087b1fcc2f | [
"MIT"
] | null | null | null | app/celery/process_ses_receipts_tasks.py | alphagov-mirror/notifications-api | 4a2e47b118c51f0ad45e87c89521f6087b1fcc2f | [
"MIT"
] | null | null | null | app/celery/process_ses_receipts_tasks.py | alphagov-mirror/notifications-api | 4a2e47b118c51f0ad45e87c89521f6087b1fcc2f | [
"MIT"
] | null | null | null | from datetime import datetime, timedelta
import iso8601
from celery.exceptions import Retry
from flask import current_app, json
from notifications_utils.statsd_decorators import statsd
from sqlalchemy.orm.exc import NoResultFound
from app import notify_celery, statsd_client
from app.clients.email.aws_ses import get_aws_responses
from app.config import QueueNames
from app.dao import notifications_dao
from app.models import NOTIFICATION_PENDING, NOTIFICATION_SENDING
from app.notifications.notifications_ses_callback import (
_check_and_queue_callback_task,
_check_and_queue_complaint_callback_task,
determine_notification_bounce_type,
handle_complaint,
)
@notify_celery.task(bind=True, name="process-ses-result", max_retries=5, default_retry_delay=300)
@statsd(namespace="tasks")
def process_ses_results(self, response):
try:
ses_message = json.loads(response['Message'])
notification_type = ses_message['notificationType']
bounce_message = None
if notification_type == 'Bounce':
notification_type, bounce_message = determine_notification_bounce_type(notification_type, ses_message)
elif notification_type == 'Complaint':
_check_and_queue_complaint_callback_task(*handle_complaint(ses_message))
return True
aws_response_dict = get_aws_responses(notification_type)
notification_status = aws_response_dict['notification_status']
reference = ses_message['mail']['messageId']
try:
notification = notifications_dao.dao_get_notification_or_history_by_reference(reference=reference)
except NoResultFound:
message_time = iso8601.parse_date(ses_message['mail']['timestamp']).replace(tzinfo=None)
if datetime.utcnow() - message_time < timedelta(minutes=5):
current_app.logger.info(
f"notification not found for reference: {reference} (update to {notification_status}). "
f"Callback may have arrived before notification was persisted to the DB. Adding task to retry queue"
)
self.retry(queue=QueueNames.RETRY)
else:
current_app.logger.warning(
f"notification not found for reference: {reference} (update to {notification_status})"
)
return
if bounce_message:
current_app.logger.info(f"SES bounce for notification ID {notification.id}: {bounce_message}")
if notification.status not in [NOTIFICATION_SENDING, NOTIFICATION_PENDING]:
notifications_dao._duplicate_update_warning(
notification=notification,
status=notification_status
)
return
else:
notifications_dao.dao_update_notifications_by_reference(
references=[reference],
update_dict={'status': notification_status}
)
statsd_client.incr('callback.ses.{}'.format(notification_status))
if notification.sent_at:
statsd_client.timing_with_dates('callback.ses.elapsed-time', datetime.utcnow(), notification.sent_at)
_check_and_queue_callback_task(notification)
return True
except Retry:
raise
except Exception as e:
current_app.logger.exception('Error processing SES results: {}'.format(type(e)))
self.retry(queue=QueueNames.RETRY)
| 39.770115 | 120 | 0.697399 |
f745882b7fa076b31cfecf66c9b6f8b109221cd1 | 1,962 | py | Python | download_all_fx_data.py | feilongbk/FX-1-Minute-Data | 83bd7eaef0dee7221e8acfa980ce180571b6ffce | [
"Apache-2.0"
] | 326 | 2017-05-22T07:03:36.000Z | 2022-03-31T21:57:21.000Z | download_all_fx_data.py | feilongbk/FX-1-Minute-Data | 83bd7eaef0dee7221e8acfa980ce180571b6ffce | [
"Apache-2.0"
] | 9 | 2017-12-15T10:30:17.000Z | 2021-06-10T01:07:34.000Z | download_all_fx_data.py | feilongbk/FX-1-Minute-Data | 83bd7eaef0dee7221e8acfa980ce180571b6ffce | [
"Apache-2.0"
] | 140 | 2017-08-07T17:20:48.000Z | 2022-03-29T19:46:23.000Z | import csv
import os
from histdata.api import download_hist_data
def mkdir_p(path):
import errno
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def download_all():
with open('pairs.csv', 'r') as f:
reader = csv.reader(f, delimiter=',')
next(reader, None) # skip the headers
for row in reader:
currency_pair_name, pair, history_first_trading_month = row
year = int(history_first_trading_month[0:4])
print(currency_pair_name)
output_folder = os.path.join('output', pair)
mkdir_p(output_folder)
try:
while True:
could_download_full_year = False
try:
print('-', download_hist_data(year=year,
pair=pair,
output_directory=output_folder,
verbose=False))
could_download_full_year = True
except AssertionError:
pass # lets download it month by month.
month = 1
while not could_download_full_year and month <= 12:
print('-', download_hist_data(year=str(year),
month=str(month),
pair=pair,
output_directory=output_folder,
verbose=False))
month += 1
year += 1
except Exception:
print('[DONE] for currency', currency_pair_name)
if __name__ == '__main__':
download_all()
| 36.333333 | 85 | 0.445464 |
f7458c30cc2fa802624a2a70a9422c6c7e9ff1a0 | 351 | py | Python | BioNetGen-2.3.0/source_Atomizer/stats/wordcloud.py | joseph-hellerstein/RuleBasedProgramming | fb88118ab764035979dc7c2bf8c89a7b484e4472 | [
"MIT"
] | null | null | null | BioNetGen-2.3.0/source_Atomizer/stats/wordcloud.py | joseph-hellerstein/RuleBasedProgramming | fb88118ab764035979dc7c2bf8c89a7b484e4472 | [
"MIT"
] | null | null | null | BioNetGen-2.3.0/source_Atomizer/stats/wordcloud.py | joseph-hellerstein/RuleBasedProgramming | fb88118ab764035979dc7c2bf8c89a7b484e4472 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Wed Mar 27 18:06:08 2013
@author: proto
"""
from pytagcloud import create_tag_image, make_tags
from pytagcloud.lang.counter import get_tag_counts
def cloudText(text,fileName):
tags = make_tags(get_tag_counts(text), maxsize=80)
create_tag_image(tags, fileName, size=(800, 600), fontname='Droid Sans')
| 23.4 | 76 | 0.732194 |
f7459dd9070c8898f768605d0a670f18c376fd61 | 17,023 | py | Python | build/android/run_tests.py | leiferikb/bitpop-private | 4c967307d228e86f07f2576068a169e846c833ca | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1 | 2021-11-15T15:17:43.000Z | 2021-11-15T15:17:43.000Z | build/android/run_tests.py | houseoflifeproperty/bitpop-private | 4c967307d228e86f07f2576068a169e846c833ca | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | build/android/run_tests.py | houseoflifeproperty/bitpop-private | 4c967307d228e86f07f2576068a169e846c833ca | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1 | 2020-11-04T07:24:02.000Z | 2020-11-04T07:24:02.000Z | #!/usr/bin/env python
#
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs all the native unit tests.
1. Copy over test binary to /data/local on device.
2. Resources: chrome/unit_tests requires resources (chrome.pak and en-US.pak)
to be deployed to the device. We use the device's $EXTERNAL_STORAGE as the
base dir (which maps to Context.getExternalFilesDir()).
3. Environment:
3.1. chrome/unit_tests requires (via chrome_paths.cc) a directory named:
$EXTERNAL_STORAGE + /chrome/test/data
4. Run the binary in the device and stream the log to the host.
4.1. Optionally, filter specific tests.
4.2. If we're running a single test suite and we have multiple devices
connected, we'll shard the tests.
5. Clean up the device.
Suppressions:
Individual tests in a test binary can be suppressed by listing it in
the gtest_filter directory in a file of the same name as the test binary,
one test per line. Here is an example:
$ cat gtest_filter/base_unittests_disabled
DataPackTest.Load
ReadOnlyFileUtilTest.ContentsEqual
This file is generated by the tests running on devices. If running on emulator,
additonal filter file which lists the tests only failed in emulator will be
loaded. We don't care about the rare testcases which succeeded on emuatlor, but
failed on device.
"""
import copy
import fnmatch
import logging
import optparse
import os
import signal
import subprocess
import sys
import time
import emulator
from pylib import android_commands
from pylib import buildbot_report
from pylib import cmd_helper
from pylib import debug_info
from pylib import ports
from pylib import run_tests_helper
from pylib import test_options_parser
from pylib.base_test_sharder import BaseTestSharder
from pylib.single_test_runner import SingleTestRunner
_TEST_SUITES = ['base_unittests',
'cc_unittests',
'content_unittests',
'gpu_unittests',
'ipc_tests',
'media_unittests',
'net_unittests',
'sql_unittests',
'sync_unit_tests',
'ui_unittests',
'unit_tests',
'webkit_compositor_bindings_unittests',
]
def FullyQualifiedTestSuites(exe, option_test_suite, build_type):
"""Get a list of absolute paths to test suite targets.
Args:
exe: if True, use the executable-based test runner.
option_test_suite: the test_suite specified as an option.
build_type: 'Release' or 'Debug'.
"""
test_suite_dir = os.path.join(cmd_helper.OutDirectory.get(), build_type)
if option_test_suite:
all_test_suites = [option_test_suite]
else:
all_test_suites = _TEST_SUITES
if exe:
qualified_test_suites = [os.path.join(test_suite_dir, t)
for t in all_test_suites]
else:
# out/(Debug|Release)/$SUITE_apk/$SUITE-debug.apk
qualified_test_suites = [os.path.join(test_suite_dir,
t + '_apk',
t + '-debug.apk')
for t in all_test_suites]
for t, q in zip(all_test_suites, qualified_test_suites):
if not os.path.exists(q):
raise Exception('Test suite %s not found in %s.\n'
'Supported test suites:\n %s\n'
'Ensure it has been built.\n' %
(t, q, _TEST_SUITES))
return qualified_test_suites
class TimeProfile(object):
"""Class for simple profiling of action, with logging of cost."""
def __init__(self, description):
self._description = description
self.Start()
def Start(self):
self._starttime = time.time()
def Stop(self):
"""Stop profiling and dump a log."""
if self._starttime:
stoptime = time.time()
logging.info('%fsec to perform %s',
stoptime - self._starttime, self._description)
self._starttime = None
class Xvfb(object):
"""Class to start and stop Xvfb if relevant. Nop if not Linux."""
def __init__(self):
self._pid = 0
def _IsLinux(self):
"""Return True if on Linux; else False."""
return sys.platform.startswith('linux')
def Start(self):
"""Start Xvfb and set an appropriate DISPLAY environment. Linux only.
Copied from tools/code_coverage/coverage_posix.py
"""
if not self._IsLinux():
return
proc = subprocess.Popen(['Xvfb', ':9', '-screen', '0', '1024x768x24',
'-ac'],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
self._pid = proc.pid
if not self._pid:
raise Exception('Could not start Xvfb')
os.environ['DISPLAY'] = ':9'
# Now confirm, giving a chance for it to start if needed.
for _ in range(10):
proc = subprocess.Popen('xdpyinfo >/dev/null', shell=True)
_, retcode = os.waitpid(proc.pid, 0)
if retcode == 0:
break
time.sleep(0.25)
if retcode != 0:
raise Exception('Could not confirm Xvfb happiness')
def Stop(self):
"""Stop Xvfb if needed. Linux only."""
if self._pid:
try:
os.kill(self._pid, signal.SIGKILL)
except:
pass
del os.environ['DISPLAY']
self._pid = 0
class TestSharder(BaseTestSharder):
"""Responsible for sharding the tests on the connected devices."""
def __init__(self, attached_devices, test_suite, gtest_filter,
test_arguments, timeout, cleanup_test_files, tool,
log_dump_name, fast_and_loose, build_type, in_webkit_checkout):
BaseTestSharder.__init__(self, attached_devices, build_type)
self.test_suite = test_suite
self.test_suite_basename = os.path.basename(test_suite)
self.gtest_filter = gtest_filter or ''
self.test_arguments = test_arguments
self.timeout = timeout
self.cleanup_test_files = cleanup_test_files
self.tool = tool
self.log_dump_name = log_dump_name
self.fast_and_loose = fast_and_loose
self.in_webkit_checkout = in_webkit_checkout
self.all_tests = []
if not self.gtest_filter:
# No filter has been specified, let's add all tests then.
self.all_tests, self.attached_devices = self._GetAllEnabledTests()
self.tests = self.all_tests
def _GetAllEnabledTests(self):
"""Get all enabled tests and available devices.
Obtains a list of enabled tests from the test package on the device,
then filters it again using the diabled list on the host.
Returns:
Tuple of (all enabled tests, available devices).
Raises Exception if all devices failed.
"""
# TODO(frankf): This method is doing too much in a non-systematic way.
# If the intention is to drop flaky devices, why not go through all devices
# instead of breaking on the first succesfull run?
available_devices = list(self.attached_devices)
while available_devices:
try:
return (self._GetTestsFromDevice(available_devices[-1]),
available_devices)
except Exception as e:
logging.warning('Failed obtaining tests from %s %s',
available_devices[-1], e)
available_devices.pop()
raise Exception('No device available to get the list of tests.')
def _GetTestsFromDevice(self, device):
logging.info('Obtaining tests from %s', device)
test_runner = SingleTestRunner(
device,
self.test_suite,
self.gtest_filter,
self.test_arguments,
self.timeout,
self.cleanup_test_files,
self.tool,
0,
not not self.log_dump_name,
self.fast_and_loose,
self.build_type,
self.in_webkit_checkout)
# The executable/apk needs to be copied before we can call GetAllTests.
test_runner.test_package.StripAndCopyExecutable()
all_tests = test_runner.test_package.GetAllTests()
disabled_list = test_runner.GetDisabledTests()
# Only includes tests that do not have any match in the disabled list.
all_tests = filter(lambda t:
not any([fnmatch.fnmatch(t, disabled_pattern)
for disabled_pattern in disabled_list]),
all_tests)
return all_tests
def CreateShardedTestRunner(self, device, index):
"""Creates a suite-specific test runner.
Args:
device: Device serial where this shard will run.
index: Index of this device in the pool.
Returns:
A SingleTestRunner object.
"""
device_num = len(self.attached_devices)
shard_size = (len(self.tests) + device_num - 1) / device_num
shard_test_list = self.tests[index * shard_size : (index + 1) * shard_size]
test_filter = ':'.join(shard_test_list) + self.gtest_filter
return SingleTestRunner(
device,
self.test_suite,
test_filter,
self.test_arguments,
self.timeout,
self.cleanup_test_files, self.tool, index,
not not self.log_dump_name,
self.fast_and_loose,
self.build_type,
self.in_webkit_checkout)
def OnTestsCompleted(self, test_runners, test_results):
"""Notifies that we completed the tests."""
test_results.LogFull('Unit test', os.path.basename(self.test_suite),
self.build_type, self.all_tests)
test_results.PrintAnnotation()
if self.log_dump_name:
# Zip all debug info outputs into a file named by log_dump_name.
debug_info.GTestDebugInfo.ZipAndCleanResults(
os.path.join(
cmd_helper.OutDirectory.get(), self.build_type,
'debug_info_dumps'),
self.log_dump_name)
def _RunATestSuite(options):
"""Run a single test suite.
Helper for Dispatch() to allow stop/restart of the emulator across
test bundles. If using the emulator, we start it on entry and stop
it on exit.
Args:
options: options for running the tests.
Returns:
0 if successful, number of failing tests otherwise.
"""
step_name = os.path.basename(options.test_suite).replace('-debug.apk', '')
buildbot_report.PrintNamedStep(step_name)
attached_devices = []
buildbot_emulators = []
if options.use_emulator:
for n in range(options.emulator_count):
t = TimeProfile('Emulator launch %d' % n)
avd_name = None
if n > 0:
# Creates a temporary AVD for the extra emulators.
avd_name = 'run_tests_avd_%d' % n
buildbot_emulator = emulator.Emulator(avd_name, options.fast_and_loose)
buildbot_emulator.Launch(kill_all_emulators=n == 0)
t.Stop()
buildbot_emulators.append(buildbot_emulator)
attached_devices.append(buildbot_emulator.device)
# Wait for all emulators to boot completed.
map(lambda buildbot_emulator: buildbot_emulator.ConfirmLaunch(True),
buildbot_emulators)
elif options.test_device:
attached_devices = [options.test_device]
else:
attached_devices = android_commands.GetAttachedDevices()
if not attached_devices:
logging.critical('A device must be attached and online.')
buildbot_report.PrintError()
return 1
# Reset the test port allocation. It's important to do it before starting
# to dispatch any tests.
if not ports.ResetTestServerPortAllocation():
raise Exception('Failed to reset test server port.')
if options.gtest_filter:
logging.warning('Sharding is not possible with these configurations.')
attached_devices = [attached_devices[0]]
sharder = TestSharder(
attached_devices,
options.test_suite,
options.gtest_filter,
options.test_arguments,
options.timeout,
options.cleanup_test_files,
options.tool,
options.log_dump,
options.fast_and_loose,
options.build_type,
options.webkit)
test_results = sharder.RunShardedTests()
for buildbot_emulator in buildbot_emulators:
buildbot_emulator.Shutdown()
return len(test_results.failed)
def Dispatch(options):
"""Dispatches the tests, sharding if possible.
If options.use_emulator is True, all tests will be run in new emulator
instance.
Args:
options: options for running the tests.
Returns:
0 if successful, number of failing tests otherwise.
"""
if options.test_suite == 'help':
ListTestSuites()
return 0
if options.use_xvfb:
xvfb = Xvfb()
xvfb.Start()
all_test_suites = FullyQualifiedTestSuites(options.exe, options.test_suite,
options.build_type)
failures = 0
for suite in all_test_suites:
# Give each test suite its own copy of options.
test_options = copy.deepcopy(options)
test_options.test_suite = suite
failures += _RunATestSuite(test_options)
if options.use_xvfb:
xvfb.Stop()
return failures
def ListTestSuites():
"""Display a list of available test suites."""
print 'Available test suites are:'
for test_suite in _TEST_SUITES:
print test_suite
def main(argv):
option_parser = optparse.OptionParser()
test_options_parser.AddTestRunnerOptions(option_parser, default_timeout=0)
option_parser.add_option('-s', '--suite', dest='test_suite',
help='Executable name of the test suite to run '
'(use -s help to list them)')
option_parser.add_option('--out-directory', dest='out_directory',
help='Path to the out/ directory, irrespective of '
'the build type. Only for non-Chromium uses.')
option_parser.add_option('-d', '--device', dest='test_device',
help='Target device the test suite to run ')
option_parser.add_option('-f', '--gtest_filter', dest='gtest_filter',
help='gtest filter')
option_parser.add_option('-a', '--test_arguments', dest='test_arguments',
help='Additional arguments to pass to the test')
option_parser.add_option('-L', dest='log_dump',
help='file name of log dump, which will be put in '
'subfolder debug_info_dumps under the same '
'directory in where the test_suite exists.')
option_parser.add_option('-e', '--emulator', dest='use_emulator',
action='store_true',
help='Run tests in a new instance of emulator')
option_parser.add_option('-n', '--emulator_count',
type='int', default=1,
help='Number of emulators to launch for running the '
'tests.')
option_parser.add_option('-x', '--xvfb', dest='use_xvfb',
action='store_true',
help='Use Xvfb around tests (ignored if not Linux)')
option_parser.add_option('--webkit', action='store_true',
help='Run the tests from a WebKit checkout.')
option_parser.add_option('--fast', '--fast_and_loose', dest='fast_and_loose',
action='store_true',
help='Go faster (but be less stable), '
'for quick testing. Example: when tracking down '
'tests that hang to add to the disabled list, '
'there is no need to redeploy the test binary '
'or data to the device again. '
'Don\'t use on bots by default!')
option_parser.add_option('--repeat', dest='repeat', type='int',
default=2,
help='Repeat count on test timeout')
option_parser.add_option('--exit_code', action='store_true',
help='If set, the exit code will be total number '
'of failures.')
option_parser.add_option('--exe', action='store_true',
help='If set, use the exe test runner instead of '
'the APK.')
options, args = option_parser.parse_args(argv)
if len(args) > 1:
print 'Unknown argument:', args[1:]
option_parser.print_usage()
sys.exit(1)
run_tests_helper.SetLogLevel(options.verbose_count)
if options.out_directory:
cmd_helper.OutDirectory.set(options.out_directory)
if options.use_emulator:
emulator.DeleteAllTempAVDs()
failed_tests_count = Dispatch(options)
# Failures of individual test suites are communicated by printing a
# STEP_FAILURE message.
# Returning a success exit status also prevents the buildbot from incorrectly
# marking the last suite as failed if there were failures in other suites in
# the batch (this happens because the exit status is a sum of all failures
# from all suites, but the buildbot associates the exit status only with the
# most recent step).
if options.exit_code:
return failed_tests_count
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| 35.464583 | 80 | 0.654702 |
f745aabe794cc25ba4b2b6225a3914d2e99e5882 | 9,709 | py | Python | data/params.py | liulisixin/unsupervised-learning-intrinsic-images | 0d4ad151d203885c87122bcc305c787210b28a5c | [
"MIT"
] | 61 | 2018-04-05T21:17:28.000Z | 2019-08-10T08:43:58.000Z | data/params.py | liulisixin/unsupervised-learning-intrinsic-images | 0d4ad151d203885c87122bcc305c787210b28a5c | [
"MIT"
] | 4 | 2018-07-06T13:38:27.000Z | 2019-08-03T09:37:10.000Z | data/params.py | liulisixin/unsupervised-learning-intrinsic-images | 0d4ad151d203885c87122bcc305c787210b28a5c | [
"MIT"
] | 13 | 2019-08-13T02:11:54.000Z | 2021-07-06T09:27:27.000Z | import copy
import json
import random
import hashlib
import numpy as np
class IntrinsicParameters():
""" Global parameter values for the algorithm """
def __init__(self):
#: if True, print progress to the console
self.logging = False
#: if True, use a fixed seed for k-means clustering
self.fixed_seed = False
#: number of iterations for the global loop
self.n_iters = 25
#: number of iterations for the dense CRF
self.n_crf_iters = 10
#: if ``True``, split clusters at the end
self.split_clusters = True
#: Pixels k units apart vertically or horizontally are smoothed.
#: The paper only uses k=1.
self.shading_smooth_k = 1
#: method used to initialize the shading smoothness term:
#: "none": omit this term for the first iteration
#: "image": use the image itself (intensity channel)
#: "constant": constant 0.5
self.shading_blur_init_method = 'none'
#: standard deviation for blurring the shading channel
self.shading_blur_sigma = 0.1
#: exponent by which the blur size decreases each iteration
self.shading_blur_iteration_pow = 1
#: if ``True``, blur in log space. if ``False``, blur in linear
#: space and then convert to log.
self.shading_blur_log = True
#: kmeans initialization: weight given to the intensity channel
self.kmeans_intensity_scale = 0.5
#: kmeans initialization: number of clusters (labels) to use
self.kmeans_n_clusters = 20
#: kmeans initialization: max pixels to consider at once
#: (if the image has more than this, the image is randomly subsampled)
self.kmeans_max_samples = 2000000
#: weight of the absolute reflectance prior
self.abs_reflectance_weight = 0
#: weight of the absolute shading prior
self.abs_shading_weight = 500.0
#: gray-point of absolute shading term
self.abs_shading_gray_point = 0.5
#: if ``True``, compute shading error in log space
self.abs_shading_log = True
#: weight of the shading smoothness unary term
self.shading_target_weight = 20000.0
#: norm used to penalize shading smoothness deviations
self.shading_target_norm = "L2"
#: interpret labels as RGB (intensity with chromaticity), thereby
#: penalizing deviations from grayscale in the shading channel (though
#: the final answer is always grayscale anyway)
self.shading_target_chromaticity = False
#: weight of the chromaticity term: each reflectance intensity is
#: assigned a chromaticity (from the kmeans initialization) and is
#: encouraged to be assigned to image pixels that share the same
#: chromaticity.
self.chromaticity_weight = 0
#: which norm is used for chromaticity
self.chromaticity_norm = "L1"
#: compute reflectance distance in log space for the pairwise terms
self.pairwise_intensity_log = True
#: include chromaticity in pairwise term
self.pairwise_intensity_chromaticity = True
#: weight of the pairwise term
self.pairwise_weight = 10000.0
#: bilateral standard deviation: pairwise pixel distance
self.theta_p = 0.1
#: bilateral standard deviation: intensity
self.theta_l = 0.12
#: bilateral standard deviation: chromaticity
self.theta_c = 0.025
# bilateral standard deviation: Luminance
self.theta_L = 0.025
#: if True, keep the median of all intensities fixed in stage 2. This
#: doesn't really change much, since the solver is damped anyway.
self.stage2_maintain_median_intensity = True
#: which norm to use when minimizing shading differences in stage 2
self.stage2_norm = "L1"
#: if True, interpret labels as RGB instead of intensity
self.stage2_chromaticity = False
#: parameters to be saved/loaded
ALL_PARAMS = [
'n_iters',
'n_crf_iters',
'split_clusters',
'kmeans_n_clusters',
'kmeans_max_samples',
'shading_blur_init_method',
'shading_blur_method',
'shading_blur_log',
'shading_blur_sigma',
'shading_blur_bilateral_sigma_range',
'shading_blur_iteration_pow',
'shading_smooth_k',
'kmeans_intensity_scale',
'abs_reflectance_weight',
'abs_shading_log',
'abs_shading_weight',
'abs_shading_gray_point',
'shading_target_weight',
'shading_target_norm',
'shading_target_chromaticity',
'chromaticity_weight',
'chromaticity_norm',
'pairwise_intensity_log',
'pairwise_intensity_chromaticity',
'pairwise_weight',
'theta_p',
'theta_l',
'theta_c',
'stage2_norm',
'stage2_chromaticity',
'stage2_maintain_median_intensity',
]
#: parameters to be adjusted during training
TRAIN_PARAMS = [
'n_iters',
#'n_crf_iters',
'split_clusters',
'kmeans_intensity_scale',
'kmeans_n_clusters',
'shading_blur_init_method',
#'shading_blur_log',
#'pairwise_intensity_log',
'shading_blur_sigma',
'shading_smooth_k',
'abs_reflectance_weight',
#'abs_shading_log',
'abs_shading_weight',
'abs_shading_gray_point',
'shading_target_weight',
'chromaticity_weight',
'pairwise_weight',
'theta_p',
'theta_l',
'theta_c',
]
#: these parameters are discrete 1-of-N choices
PARAM_CHOICES = {
'shading_blur_init_method': (
"none",
"image",
"constant",
),
}
#: bounds on paramters
PARAM_BOUNDS = {
'n_iters': (1, 30),
'n_crf_iters': (1, 10),
'shading_blur_sigma': (1e-8, 1.0),
'shading_smooth_k': (1, 4),
'kmeans_intensity_scale': (1e-8, 1e10),
'kmeans_n_clusters': (2, 50),
'abs_reflectance_weight': (0, 1e10),
'abs_shading_weight': (0, 1e10),
'abs_shading_gray_point': (0, 1e10),
'shading_target_weight': (0, 1e10),
'chromaticity_weight': (0, 1e10),
'pairwise_weight': (0, 1e16),
'theta_p': (1e-8, 1e10),
'theta_l': (1e-8, 1e10),
'theta_c': (1e-8, 1e10),
}
WEIGHT_PARAMS = [
'abs_reflectance_weight',
'abs_shading_weight',
'shading_target_weight',
'chromaticity_weight',
'pairwise_weight',
]
THETA_PARAMS = [
'theta_p',
'theta_l',
'theta_c',
]
def to_json(self, indent=4, **extra_kwargs):
""" Convert paramters to a JSON-encoded string """
obj = {k: getattr(self, k)
for k in IntrinsicParameters.ALL_PARAMS}
if extra_kwargs:
obj.update(extra_kwargs)
return json.dumps(obj, sort_keys=True, indent=indent)
def __str__(self):
return self.to_json()
def __unicode__(self):
return self.to_json()
@staticmethod
def from_file(filename):
""" Load paramers from ``filename`` (in JSON format) """
return IntrinsicParameters.from_dict(json.load(open(filename)))
@staticmethod
def from_dict(d):
""" Load paramers from a dictionary """
ret = IntrinsicParameters()
for k, v in d.iteritems():
if not k.startswith('_') and k not in IntrinsicParameters.ALL_PARAMS:
raise ValueError("Invalid parameter: %s" % k)
setattr(ret, k, d[k])
return ret
def md5(self):
dump = self.to_json()
m = hashlib.md5()
m.update(dump)
return m.hexdigest()
def save(self, filename, **extra_kwargs):
""" Save paramers to ``filename`` (in JSON format) """
with open(filename, 'w') as f:
f.write(self.to_json(**extra_kwargs))
def clip(self):
""" Clip parameters to be within bounds """
for k, bounds in IntrinsicParameters.PARAM_BOUNDS.iteritems():
v = getattr(self, k)
t = type(v)
setattr(self, k, t(np.clip(v, bounds[0], bounds[1])))
def random_perterbation(
self, mean_num_params=8, std_delta=0.5, seed=None):
""" Return a new set of parameters with a random perterbation. The
number of variables modified is Poisson-distributed with mean
``mean_num_params`` , and each changed variable is multiplied by exp(x)
where x is normally distributed with mean 0 and standard deviation
``std_delta`` """
if seed is not None:
random.seed(seed)
np.random.seed(seed)
# choose a random subset to modify
num_params = len(IntrinsicParameters.TRAIN_PARAMS)
n = np.clip(np.random.poisson(mean_num_params), 1, num_params)
keys = random.sample(IntrinsicParameters.TRAIN_PARAMS, n)
# modify the subset
ret = copy.deepcopy(self)
for k in keys:
v = getattr(ret, k)
t = type(v)
if k in IntrinsicParameters.PARAM_CHOICES:
v = random.choice(IntrinsicParameters.PARAM_CHOICES[k])
elif t == bool:
v = random.choice((False, True))
else:
v *= np.exp(random.normalvariate(0, std_delta))
if t in (int, long):
v = round(v)
setattr(ret, k, t(v))
ret.clip()
return ret
| 31.21865 | 81 | 0.604697 |
f745c68cb1628be5bb8685f132df4cbb3d3c320f | 6,432 | py | Python | utils/video.py | gosticks/body-pose-animation | eb1b5876a845f277d43bfc18dcd48c4a9c694c06 | [
"MIT"
] | null | null | null | utils/video.py | gosticks/body-pose-animation | eb1b5876a845f277d43bfc18dcd48c4a9c694c06 | [
"MIT"
] | null | null | null | utils/video.py | gosticks/body-pose-animation | eb1b5876a845f277d43bfc18dcd48c4a9c694c06 | [
"MIT"
] | null | null | null | from dataset import SMPLyDataset
import pickle
from typing import Tuple
from model import SMPLyModel
from renderer import DefaultRenderer
import cv2
from tqdm import tqdm
import numpy as np
from scipy import interpolate
def make_video(images, video_name: str, fps=30, ext: str = "mp4", post_process_frame=None):
images = np.array(images)
width = images.shape[2]
height = images.shape[1]
fourcc = 0
if ext == "mp4":
fourcc = cv2.VideoWriter_fourcc(*'MP4V')
video_name = video_name + "." + ext
video = cv2.VideoWriter(
video_name, fourcc, fps, (width, height), True)
for idx in tqdm(range(len(images))):
img = images[idx]
im_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
if post_process_frame is not None:
img_rgb = post_process_frame(img=im_rgb, idx=idx)
video.write(im_rgb)
video.release()
print("video saved to:", video_name)
def video_from_pkl(filename, video_name, config, ext: str = "mp4"):
with open(filename, "rb") as fp:
model_outs = pickle.load(fp)
save_to_video(model_outs, video_name, config)
def save_to_video(
sample_output: Tuple,
video_name: str,
config: object,
fps=30,
include_thumbnail=True,
thumbnail_size=0.2,
start_frame_offset=0,
dataset: SMPLyDataset = None,
interpolation_target=None
):
"""
Renders a video from pose, camera tuples. Additionally interpolation can be used to smooth out the animation
Args:
sample_output (Tuple): A tuple of body pose vertices and a camera transformation
video_name (str): name for the resulting video file (can also be a path)
config (object): general run config
fps (int, optional): animation base fps. Defaults to 30.
interpolation_target (int, optional): expand animation fps via interpolation to this target. Defaults to 60.
"""
r = DefaultRenderer(
offscreen=True
)
r.start()
model_anim = SMPLyModel.model_from_conf(config)
if interpolation_target is not None:
if interpolation_target % fps != 0:
print("[error] interpolation target must be a multiple of fps")
return
inter_ratio = int(interpolation_target / fps)
num_intermediate = inter_ratio - 1
sample_output = interpolate_poses(sample_output, num_intermediate)
else:
sample_output = [
(
out.vertices.detach().cpu().numpy()[0],
cam
) for out, cam in sample_output]
frames = []
print("[export] rendering animation frames...", sample_output[0][0].shape)
# just use the first transform
cam_transform = sample_output[0][1]
for vertices, cam_trans in tqdm(sample_output):
r.render_model_geometry(
faces=model_anim.faces,
vertices=vertices,
pose=cam_trans # cam_transform,
)
frames.append(r.get_snapshot())
target_fps = fps
if interpolation_target is not None:
target_fps = interpolation_target
def post_process_frame(img, idx: int):
if not include_thumbnail:
return img
# account for start from frames not zero
idx = start_frame_offset + idx
frame_idx = idx
if interpolation_target is not None:
# account for possible interpolation
frame_idx = int(idx / inter_ratio)
img_path = dataset.get_image_path(frame_idx)
overlay = cv2.imread(img_path)
if overlay is None:
print("[error] image could not be ", img_path)
return img
overlay = cv2.resize(
overlay,
dsize=(
int(overlay.shape[1] * thumbnail_size),
int(overlay.shape[0] * thumbnail_size)
))
img[0:overlay.shape[0], 0:overlay.shape[1]] = overlay
return img
make_video(frames, video_name, target_fps,
post_process_frame=post_process_frame)
def make_video_with_pip(frames, pip_image_path, video_name: str, fps=30, ext: str = "mp4", image_size=0.2):
"""renders a video with a pip frame in the corner
"""
def post_process_frame(img, idx: int):
overlay = cv2.imread(pip_image_path)
if overlay is None:
print("[error] image could not be ", pip_image_path)
return img
overlay = cv2.resize(
overlay,
dsize=(
int(overlay.shape[1] * image_size),
int(overlay.shape[0] * image_size)
))
img[0:overlay.shape[0], 0:overlay.shape[1]] = overlay
return img
make_video(frames, video_name, fps,
post_process_frame=post_process_frame)
def interpolate_poses(poses, num_intermediate=5):
"""
Interpolate vertices and cameras between pairs of frames by adding intermediate results
:param poses: optimized poses
:param num_intermediate: amount of intermediate results to insert between each pair of frames
:return: interpolated poses, list of tuples (body_pose, camera_pose)
"""
new_poses = []
for i in range(len(poses) - 1):
if len(poses) < 2:
return poses
else:
# Shape of one matrix of vertices = torch.Size([1, 10475, 3])
pose_1 = poses[i][0].vertices.detach().cpu().numpy()
pose_2 = poses[i + 1][0].vertices.detach().cpu().numpy()
poses_pair = np.concatenate((pose_1, pose_2), axis=0)
camera_1 = np.expand_dims(poses[i][1], axis=0)
camera_2 = np.expand_dims(poses[i + 1][1], axis=0)
camera_pair = np.concatenate((camera_1, camera_2), axis=0)
x = np.arange(poses_pair.shape[0])
f1 = interpolate.interp1d(x, poses_pair, axis=0)
f2 = interpolate.interp1d(x, camera_pair, axis=0)
evenly_spaced_points = np.linspace(
x[0], x[-1], (poses_pair.shape[0] - 1) * (num_intermediate + 1) + 1)
new_frames = f1(evenly_spaced_points)
new_cameras = f2(evenly_spaced_points)
arr = [(new_frames[i], new_cameras[i])
for i in range(new_frames.shape[0])]
if 0 < i < len(poses) - 1:
# remove first frame that was already added in the last interpolation
arr.pop(0)
new_poses += arr
return new_poses
| 32.649746 | 116 | 0.619403 |
f745d61facecf176819f6e0b1334bf6b5d46de14 | 29,170 | py | Python | tests/unittests/test_network_install.py | cloudify-cosmo/cloudify-nsx-plugin | d7b2abbe384e55aaf47b2c8474ab07f622eb83b5 | [
"Apache-2.0"
] | 2 | 2017-03-08T21:44:54.000Z | 2019-01-16T06:00:16.000Z | tests/unittests/test_network_install.py | cloudify-cosmo/cloudify-nsx-plugin | d7b2abbe384e55aaf47b2c8474ab07f622eb83b5 | [
"Apache-2.0"
] | 3 | 2017-01-26T13:26:31.000Z | 2017-02-03T14:51:45.000Z | tests/unittests/test_network_install.py | cloudify-cosmo/cloudify-nsx-plugin | d7b2abbe384e55aaf47b2c8474ab07f622eb83b5 | [
"Apache-2.0"
] | 5 | 2016-12-28T15:26:02.000Z | 2017-01-30T08:46:10.000Z | # Copyright (c) 2017 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import library.test_nsx_base as test_nsx_base
import pytest
import mock
import copy
import cloudify_nsx.network.dhcp_bind as dhcp_bind
import cloudify_nsx.network.dhcp_pool as dhcp_pool
import cloudify_nsx.network.dlr_dgw as dlr_dgw
import cloudify_nsx.network.bgp_neighbour_filter as bgp_neighbour_filter
import cloudify_nsx.network.esg_firewall as esg_firewall
import cloudify_nsx.network.esg_gateway as esg_gateway
import cloudify_nsx.network.esg_interface as esg_interface
import cloudify_nsx.network.esg_nat as esg_nat
import cloudify_nsx.network.dlr_bgp_neighbour as dlr_bgp_neighbour
import cloudify_nsx.network.dlr_interface as dlr_interface
import cloudify_nsx.network.lswitch as lswitch
import cloudify_nsx.network.ospf_area as ospf_area
import cloudify_nsx.network.ospf_interface as ospf_interface
import cloudify_nsx.network.esg_route as esg_route
import cloudify_nsx.network.relay as relay
import cloudify_nsx.network.routing_ip_prefix as routing_ip_prefix
import cloudify_nsx.network.routing_redistribution as routing_redistribution
from cloudify.state import current_ctx
class NetworkInstallTest(test_nsx_base.NSXBaseTest):
def setUp(self):
super(NetworkInstallTest, self).setUp()
self._regen_ctx()
def tearDown(self):
current_ctx.clear()
super(NetworkInstallTest, self).tearDown()
@pytest.mark.internal
@pytest.mark.unit
def test_esg_firewall_install(self):
"""Check create esg firewall rule"""
# everything by default
self._common_install_create(
"esg_id|id", esg_firewall.create,
{"rule": {"esg_id": "esg_id", "action": "deny"}},
create_args=['firewallRules'],
create_kwargs={
"request_body_dict": {
'firewallRules': {
'firewallRule': {
'direction': None,
'name': None,
'application': None,
'loggingEnabled': 'false',
'matchTranslated': 'false',
'destination': None,
'enabled': 'true',
'source': None,
'action': 'deny'
}
}
},
"uri_parameters": {'edgeId': 'esg_id'}
},
create_response=test_nsx_base.SUCCESS_RESPONSE_ID
)
# Additional values(non default)
self._common_install_create(
"other_esg_id|id", esg_firewall.create,
{"rule": {
"esg_id": "other_esg_id",
"action": "accept",
'loggingEnabled': True,
'matchTranslated': True,
'enabled': False,
'ruleTag': 42,
'description': 'Some Rule',
'source': 'any',
'direction': 'in',
'destination': '8.8.8.8',
'name': 'rule'
}},
create_args=['firewallRules'],
create_kwargs={
"request_body_dict": {
'firewallRules': {
'firewallRule': {
'direction': 'in',
'name': 'rule',
'application': None,
'loggingEnabled': 'true',
'matchTranslated': 'true',
'destination': '8.8.8.8',
'enabled': 'false',
'source': 'any',
'action': 'accept',
'ruleTag': '42',
'description': 'Some Rule'
}
}
},
"uri_parameters": {'edgeId': 'other_esg_id'}
},
create_response=test_nsx_base.SUCCESS_RESPONSE_ID
)
@pytest.mark.internal
@pytest.mark.unit
def test_esg_interface_install(self):
"""Check create esg interface"""
self._common_install_extract_or_read_and_update(
'id|esg_id',
esg_interface.create,
{'interface': {
"esg_id": "esg_id",
"ifindex": "id",
"portgroup_id": "portgroup_id"
}},
read_args=['vnic'], read_kwargs={
'uri_parameters': {'index': 'id', 'edgeId': 'esg_id'}
},
read_response={
'status': 204,
'body': test_nsx_base.EDGE_INTERFACE_BEFORE
},
update_args=['vnic'],
update_kwargs={
'request_body_dict': test_nsx_base.EDGE_INTERFACE_AFTER,
'uri_parameters': {'index': 'id', 'edgeId': 'esg_id'}
},
update_response=test_nsx_base.SUCCESS_RESPONSE
)
@pytest.mark.internal
@pytest.mark.unit
def test_esg_interface_install_all_fields(self):
"""Check create esg interface"""
self._common_install_extract_or_read_and_update(
'id|esg_id',
esg_interface.create,
{'interface': {
"esg_id": "esg_id",
"ifindex": "id",
"name": "name",
"netmask": "255.255.255.0",
"ipaddr": "192.168.3.127",
"secondary_ips": "192.168.3.128",
'prefixlen': "24",
'enable_send_redirects': 'true',
'is_connected': 'true',
'enable_proxy_arp': 'true',
"portgroup_id": "portgroup_id"
}},
read_args=['vnic'], read_kwargs={
'uri_parameters': {'index': 'id', 'edgeId': 'esg_id'}
},
read_response={
'status': 204,
'body': test_nsx_base.EDGE_INTERFACE_BEFORE
},
update_args=['vnic'],
update_kwargs={
'request_body_dict': {
'vnic': {
'portgroupId': 'portgroup_id',
'portgroupName': None,
'type': 'internal',
'enableProxyArp': 'true',
'name': 'name',
'addressGroups': {
'addressGroup': {
'secondaryAddresses': {
'ipAddress': '192.168.3.128'
},
'primaryAddress': '192.168.3.127',
'subnetMask': '255.255.255.0',
'subnetPrefixLength': '24'
}
},
'isConnected': 'true',
'enableSendRedirects': 'true',
'mtu': 1500
}
},
'uri_parameters': {'index': 'id', 'edgeId': 'esg_id'}
},
update_response=test_nsx_base.SUCCESS_RESPONSE
)
@pytest.mark.internal
@pytest.mark.unit
def test_esg_nat_install(self):
"""Check create esg nat rule"""
self._common_install_extract_or_read_and_update(
'esg_id|id', esg_nat.create,
{'rule': {
"esg_id": "esg_id",
"action": "action",
"originalAddress": "originalAddress",
"translatedAddress": "translatedAddress"
}},
extract_args=['edgeNatRules', 'create'], extract_kwargs={},
extract_response={
'natRules': {
'natRule': {}
}
},
create_args=['edgeNatRules'],
create_kwargs={
'uri_parameters': {'edgeId': "esg_id"},
'request_body_dict': test_nsx_base.ESG_NAT_AFTER
},
create_response=test_nsx_base.SUCCESS_RESPONSE_ID
)
self._common_install_extract_or_read_and_update(
'esg_id|id', esg_nat.create,
{'rule': {
"esg_id": "esg_id",
"action": "action",
"originalAddress": "originalAddress",
"translatedAddress": "translatedAddress",
"description": "3",
'vnic': '1',
'ruleTag': '2',
"loggingEnabled": 'true',
'enabled': 'false'
}},
extract_args=['edgeNatRules', 'create'], extract_kwargs={},
extract_response={
'natRules': {
'natRule': {}
}
},
create_args=['edgeNatRules'],
create_kwargs={
'uri_parameters': {'edgeId': "esg_id"},
'request_body_dict': {
'natRules': {
'natRule': {
'translatedPort': 'any',
'action': 'action',
'originalAddress': 'originalAddress',
'translatedAddress': 'translatedAddress',
'vnic': '1',
'ruleTag': '2',
'description': '3',
'enabled': 'false',
'protocol': 'any',
'originalPort': 'any',
'loggingEnabled': 'true'
}
}
}
},
create_response=test_nsx_base.SUCCESS_RESPONSE_ID
)
@pytest.mark.unit
def test_dlr_bgp_neighbour_dlr_install(self):
"""Check define dlr bgp neighbour"""
self._common_use_existing_without_run(
'some_id',
dlr_bgp_neighbour.create_dlr,
{'neighbour': {"dlr_id": "dlr_id",
"ipAddress": "ipAddress",
'remoteAS': 'remoteAS',
'protocolAddress': 'protocolAddress',
'forwardingAddress': 'forwardingAddress'}})
self._common_install_extract_or_read_and_update(
'dlr_id|ip|remoteAS|protocolIp|forwardingIp',
dlr_bgp_neighbour.create_dlr,
{'neighbour': {"dlr_id": "dlr_id",
"ipAddress": "ip",
'remoteAS': 'remoteAS',
'forwardingAddress': 'forwardingIp',
'protocolAddress': 'protocolIp'}},
read_args=['routingBGP'],
read_kwargs={'uri_parameters': {'edgeId': 'dlr_id'}},
read_response={
'body': test_nsx_base.DLR_BGP_NEIGHBOUR_BEFORE,
'status': 204
},
update_args=['routingBGP'],
update_kwargs={
'request_body_dict': test_nsx_base.DLR_BGP_NEIGHBOUR_AFTER,
'uri_parameters': {'edgeId': 'dlr_id'}
},
update_response=test_nsx_base.SUCCESS_RESPONSE_ID
)
@pytest.mark.internal
@pytest.mark.unit
def test_dlr_bgp_neighbour_esg_install(self):
"""Check define esg bgp neighbour"""
self._common_use_existing_without_run(
'esg_id|ip|remoteAS||',
dlr_bgp_neighbour.create_esg,
{'neighbour': {"dlr_id": "dlr_id",
"ipAddress": "ipAddress",
'remoteAS': 'remoteAS'}})
self._common_install_extract_or_read_and_update(
'esg_id|ip|remoteAS||',
dlr_bgp_neighbour.create_esg,
{'neighbour': {"dlr_id": "esg_id",
"ipAddress": "ip",
'remoteAS': 'remoteAS'}},
read_args=['routingBGP'],
read_kwargs={'uri_parameters': {'edgeId': 'esg_id'}},
read_response={
'body': test_nsx_base.EDGE_BGP_NEIGHBOUR_BEFORE,
'status': 204
},
update_args=['routingBGP'],
update_kwargs={
'request_body_dict': test_nsx_base.EDGE_BGP_NEIGHBOUR_AFTER,
'uri_parameters': {'edgeId': 'esg_id'}
},
update_response=test_nsx_base.SUCCESS_RESPONSE_ID
)
@pytest.mark.internal
@pytest.mark.unit
def test_dlr_dgw_install(self):
"""Check create dlr dgw"""
self._common_install_extract_or_read_and_update(
'dlr_id', dlr_dgw.create,
{'gateway': {"dlr_id": "dlr_id", "address": "address"}},
extract_args=['routingConfig', 'update'], extract_kwargs={},
extract_response=test_nsx_base.ROUTING_CONFIG_UPDATE_EXTRACT,
update_args=['routingConfig'],
update_kwargs={
'uri_parameters': {'edgeId': "dlr_id"},
'request_body_dict': {
'routing': test_nsx_base.EDG_STATIC_ROUTING_GATEWAY_AFTER
}
},
update_response=test_nsx_base.SUCCESS_RESPONSE_ID
)
@pytest.mark.internal
@pytest.mark.unit
def test_dhcp_pool_install(self):
"""Check create dhcp pool"""
self._common_use_existing_without_run(
'some_id', dhcp_pool.create,
{'pool': {'esg_id': 'esg_id',
'ip_range': 'ip_range'}})
self._common_install_create(
'esg_id|id', dhcp_pool.create,
{'pool': {'esg_id': 'esg_id',
'ip_range': '192.168.5.128-192.168.5.250',
'default_gateway': '192.168.5.1',
'subnet_mask': '255.255.255.0',
'domain_name': 'internal.test',
'dns_server_1': '8.8.8.8',
'dns_server_2': '192.168.5.1',
'lease_time': 'infinite',
'auto_dns': 'true'}},
create_args=['dhcpPool'],
create_kwargs={
'request_body_dict': {
'ipPool': {
'domainName': 'internal.test',
'leaseTime': 'infinite',
'primaryNameServer': '8.8.8.8',
'secondaryNameServer': '192.168.5.1',
'autoConfigureDNS': 'true',
'subnetMask': '255.255.255.0',
'ipRange': '192.168.5.128-192.168.5.250',
'defaultGateway': '192.168.5.1'
}
},
'uri_parameters': {'edgeId': 'esg_id'}
},
create_response=test_nsx_base.SUCCESS_RESPONSE_ID
)
@pytest.mark.internal
@pytest.mark.unit
def test_dhcp_bind_install(self):
"""Check insert binding rule to dhcp ip"""
self._common_use_existing_without_run(
"some_id", dhcp_bind.create,
{'bind': {"esg_id": "esg_id",
"hostname": "hostname",
"ip": "ip"}})
self._common_install(
"some_id", dhcp_bind.create,
{'bind': {"esg_id": "esg_id",
"hostname": "hostname",
"ip": "ip"}})
@pytest.mark.internal
@pytest.mark.unit
def test_dhcp_bind_install_mac(self):
"""Check insert binding rule to dhcp ip"""
self._common_install_create(
'esg_id|id', dhcp_bind.create,
{'bind': {'esg_id': 'esg_id',
'mac': '11:22:33:44:55:66',
'hostname': 'secret.server',
'ip': '192.168.5.251',
'default_gateway': '192.168.5.1',
'subnet_mask': '255.255.255.0',
'domain_name': 'secret.internal.test',
'dns_server_1': '8.8.8.8',
'dns_server_2': '192.168.5.1',
'lease_time': 'infinite',
'auto_dns': 'true'}},
create_args=['dhcpStaticBinding'],
create_kwargs={
'request_body_dict': {
'staticBinding': {
'subnetMask': '255.255.255.0',
'domainName': 'secret.internal.test',
'primaryNameServer': '8.8.8.8',
'macAddress': '11:22:33:44:55:66',
'leaseTime': 'infinite',
'secondaryNameServer': '192.168.5.1',
'hostname': 'secret.server',
'defaultGateway': '192.168.5.1',
'ipAddress': '192.168.5.251',
'autoConfigureDNS': 'true'
}
},
'uri_parameters': {'edgeId': 'esg_id'}
},
create_response=test_nsx_base.SUCCESS_RESPONSE_ID
)
@pytest.mark.internal
@pytest.mark.unit
def test_dhcp_bind_install_vm(self):
"""Check insert binding rule to dhcp ip"""
self._common_install_create(
'esg_id|id', dhcp_bind.create,
{'bind': {'esg_id': 'esg_id',
'vm_id': 'vm_id',
'vnic_id': 'vnic_id',
'hostname': 'secret.server',
'ip': '192.168.5.251',
'default_gateway': '192.168.5.1',
'subnet_mask': '255.255.255.0',
'domain_name': 'secret.internal.test',
'dns_server_1': '8.8.8.8',
'dns_server_2': '192.168.5.1',
'lease_time': 'infinite',
'auto_dns': 'true'}},
create_args=['dhcpStaticBinding'],
create_kwargs={
'request_body_dict': {
'staticBinding': {
'subnetMask': '255.255.255.0',
'domainName': 'secret.internal.test',
'primaryNameServer': '8.8.8.8',
'vnicId': 'vnic_id',
'vmId': 'vm_id',
'secondaryNameServer': '192.168.5.1',
'hostname': 'secret.server',
'ipAddress': '192.168.5.251',
'defaultGateway': '192.168.5.1',
'leaseTime': 'infinite',
'autoConfigureDNS': 'true'
}
},
'uri_parameters': {'edgeId': 'esg_id'}
},
create_response=test_nsx_base.SUCCESS_RESPONSE_ID
)
@pytest.mark.internal
@pytest.mark.unit
def test_bgp_neighbour_filter_install(self):
"""Check create bgp_neighbour_filter"""
self._common_use_existing_without_run(
'net|esg_id|ip|remoteAS|protocolIp|forwardingIp',
bgp_neighbour_filter.create,
{'filter': {
"neighbour_id": "neighbour_id",
"action": "deny",
"direction": "in",
"network": "network"}})
self._common_install_extract_or_read_and_update(
'net|esg_id|ip|remoteAS|protocolIp|forwardingIp',
bgp_neighbour_filter.create,
{'filter': {
"neighbour_id": "esg_id|ip|remoteAS|protocolIp|forwardingIp",
"action": "deny",
"direction": "in",
"network": "net",
"ipPrefixGe": "ipPrefixGe",
"ipPrefixLe": "ipPrefixLe"
}},
# read
read_args=['routingBGP'],
read_kwargs={'uri_parameters': {'edgeId': 'esg_id'}},
read_response={
'body': test_nsx_base.DLR_BGP_NEIGHBOUR_WITH_FILTER_BEFORE,
'status': 204
},
# update
update_args=['routingBGP'],
update_kwargs={
'request_body_dict':
test_nsx_base.DLR_BGP_NEIGHBOUR_WITH_FILTER_AFTER,
'uri_parameters': {'edgeId': 'esg_id'}
},
update_response=test_nsx_base.SUCCESS_RESPONSE_ID
)
@pytest.mark.internal
@pytest.mark.unit
def test_dlr_interface_install(self):
"""Check create dlr interface"""
self._common_use_existing_without_run(
'id|dlr_id',
dlr_interface.create,
{'interface': {
"dlr_id": "dlr_id",
"interface_ls_id": "interface_ls_id",
"interface_ip": "interface_ip",
"interface_subnet": "interface_subnet"
}})
self._common_install_extract_or_read_and_update(
'id|dlr_id',
dlr_interface.create,
{'interface': {
"dlr_id": "dlr_id",
"interface_ls_id": "interface_ls_id",
"interface_ip": "interface_ip",
"interface_subnet": "interface_subnet"
}},
extract_args=['interfaces', 'create'], extract_kwargs={},
extract_response={
'interfaces': {
'interface': {
'addressGroups': {
'addressGroup': {
'primaryAddress': {
}
}
}
}
}
},
create_args=['interfaces'],
create_kwargs={
'query_parameters_dict': {'action': 'patch'},
'request_body_dict': test_nsx_base.DLR_INTERFACE_CREATE,
'uri_parameters': {'edgeId': 'dlr_id'}
},
create_response={
'status': 204,
'body': test_nsx_base.DLR_INTERFACE_CREATE_RESPONSE
}
)
@pytest.mark.internal
@pytest.mark.unit
def test_lswitch_install(self):
"""Check create logical swicth"""
fake_client, _, kwargs = self._kwargs_regen_client(
"id", {
"switch": {
"name": "name",
"transport_zone": "transport_zone"
}
}
)
fake_client.read = mock.Mock(
return_value=copy.deepcopy({
'status': 204,
'body': test_nsx_base.LSWITCH
})
)
with mock.patch(
'cloudify_nsx.library.nsx_common.NsxClient',
mock.MagicMock(return_value=fake_client)
):
lswitch.create(**kwargs)
self.assertEqual(
self.fake_ctx.instance.runtime_properties['vsphere_network_id'],
"some_port_id"
)
@pytest.mark.internal
@pytest.mark.unit
def test_dhcp_relay_install(self):
"""Check create dhcp relay(dlr)"""
self._common_install(
"some_id", relay.create,
{
'relay': {
"dlr_id": "dlr_id"
}
}
)
# without resource_id
self._regen_ctx()
fake_client, _, kwargs = self._kwargs_regen_client(
None, {
'relay': {
"dlr_id": "dlr_id"
}
}
)
fake_dlr_esg = mock.Mock()
fake_dlr_esg.update_dhcp_relay = mock.MagicMock()
with mock.patch(
'cloudify_nsx.library.nsx_common.NsxClient',
mock.MagicMock(return_value=fake_client)
):
with mock.patch(
'cloudify_nsx.network.relay.cfy_dlr',
fake_dlr_esg
):
relay.create(**kwargs)
fake_dlr_esg.update_dhcp_relay.assert_called_with(
fake_client, 'dlr_id', {}, {}
)
@pytest.mark.internal
@pytest.mark.unit
def test_esg_route_install(self):
"""Check create esg route"""
self._common_install_extract_or_read_and_update(
"esg_id|network|192.168.3.10", esg_route.create,
{'route': {"esg_id": "esg_id", "network": "network",
"next_hop": "192.168.3.10"}},
# read
read_args=['routingConfigStatic'],
read_kwargs={'uri_parameters': {'edgeId': "esg_id"}},
read_response={
'status': 204,
'body': test_nsx_base.EDG_STATIC_ROUTING_BEFORE
},
# update
update_args=['routingConfigStatic'],
update_kwargs={
'uri_parameters': {'edgeId': "esg_id"},
'request_body_dict': {
'staticRouting': {
'staticRoutes': {
'route': [{
'description': None,
'network': 'network',
'mtu': '1500',
'vnic': None,
'nextHop': "192.168.3.10",
'adminDistance': '1'
}]
},
'defaultRoute': {
'vnic': None,
'gatewayAddress': 'address',
'description': None,
'mtu': None
}
}
}
},
update_response=test_nsx_base.SUCCESS_RESPONSE_ID
)
@pytest.mark.internal
@pytest.mark.unit
def test_esg_gateway_install(self):
"""Check create esg gateway"""
self._common_install_extract_or_read_and_update(
"esg_id|dgw_ip", esg_gateway.create,
{"gateway": {"esg_id": "esg_id", "dgw_ip": "dgw_ip"}},
# read
read_args=['routingConfigStatic'],
read_kwargs={'uri_parameters': {'edgeId': "esg_id"}},
read_response={
'status': 204,
'body': test_nsx_base.EDG_STATIC_ROUTING_BEFORE
},
# update
update_args=['routingConfigStatic'],
update_kwargs={
'uri_parameters': {'edgeId': "esg_id"},
'request_body_dict': {
'staticRouting': {
'staticRoutes': {},
'defaultRoute': {
'mtu': '1500',
'vnic': None,
'adminDistance': '1',
'gatewayAddress': 'dgw_ip'
}
}
}
},
update_response=test_nsx_base.SUCCESS_RESPONSE_ID
)
@pytest.mark.internal
@pytest.mark.unit
def test_ospf_interface_install(self):
"""Check create ospf interface"""
self._common_install(
"some_id", ospf_interface.create,
{'interface': {"dlr_id": "dlr_id",
"areaId": "areaId",
"vnic": "vnic"}})
@pytest.mark.internal
@pytest.mark.unit
def test_ospf_area_install(self):
"""Check create ospf area"""
self._common_install(
"some_id", ospf_area.create,
{"area": {"dlr_id": "dlr_id",
"areaId": "areaId",
"type": "nssa"}})
@pytest.mark.internal
@pytest.mark.unit
def test_routing_ip_install(self):
"""Check create routing ip prefix"""
self._common_install(
"some_id", routing_ip_prefix.create,
{
'prefix': {
"dlr_id": "dlr_id",
"name": "name",
"ipAddress": "ipAddress"
}
}
)
@pytest.mark.internal
@pytest.mark.unit
def test_routing_redistribution_install(self):
"""Check create routing redistribution rule"""
self._common_install(
"some_id", routing_redistribution.create,
{
'rule': {
"action": "deny",
"type": "bgp",
"dlr_id": "dlr_id"
}
}
)
if __name__ == '__main__':
unittest.main()
| 37.63871 | 77 | 0.472335 |
f745e6f3a37289bf61f2bd949c9ce1767d1c69a0 | 665 | py | Python | manage.py | KamarulAdha/DRF-ML | 2036ac6b3b3eb593d28ab89c948bf4ad2a4aa6dd | [
"MIT"
] | null | null | null | manage.py | KamarulAdha/DRF-ML | 2036ac6b3b3eb593d28ab89c948bf4ad2a4aa6dd | [
"MIT"
] | null | null | null | manage.py | KamarulAdha/DRF-ML | 2036ac6b3b3eb593d28ab89c948bf4ad2a4aa6dd | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'deploy_ml.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.913043 | 73 | 0.679699 |
f745fd33ee13aea7516b847e8a0906cf09e91960 | 3,265 | py | Python | PyFunceble/converter/internal_url.py | NeolithEra/PyFunceble | 58db861d36224f279a460f4959aaa2e140ce749f | [
"MIT"
] | null | null | null | PyFunceble/converter/internal_url.py | NeolithEra/PyFunceble | 58db861d36224f279a460f4959aaa2e140ce749f | [
"MIT"
] | null | null | null | PyFunceble/converter/internal_url.py | NeolithEra/PyFunceble | 58db861d36224f279a460f4959aaa2e140ce749f | [
"MIT"
] | null | null | null | """
The tool to check the availability or syntax of domains, IPv4, IPv6 or URL.
::
██████╗ ██╗ ██╗███████╗██╗ ██╗███╗ ██╗ ██████╗███████╗██████╗ ██╗ ███████╗
██╔══██╗╚██╗ ██╔╝██╔════╝██║ ██║████╗ ██║██╔════╝██╔════╝██╔══██╗██║ ██╔════╝
██████╔╝ ╚████╔╝ █████╗ ██║ ██║██╔██╗ ██║██║ █████╗ ██████╔╝██║ █████╗
██╔═══╝ ╚██╔╝ ██╔══╝ ██║ ██║██║╚██╗██║██║ ██╔══╝ ██╔══██╗██║ ██╔══╝
██║ ██║ ██║ ╚██████╔╝██║ ╚████║╚██████╗███████╗██████╔╝███████╗███████╗
╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═══╝ ╚═════╝╚══════╝╚═════╝ ╚══════╝╚══════╝
Provides a way to convert our internal URL.
Author:
Nissar Chababy, @funilrys, contactTATAfunilrysTODTODcom
Special thanks:
https://pyfunceble.github.io/special-thanks.html
Contributors:
https://pyfunceble.github.io/contributors.html
Project link:
https://github.com/funilrys/PyFunceble
Project documentation:
https://pyfunceble.readthedocs.io///en/master/
Project homepage:
https://pyfunceble.github.io/
License:
::
MIT License
Copyright (c) 2017, 2018, 2019, 2020 Nissar Chababy
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from PyFunceble.abstracts import Version
from PyFunceble.exceptions import WrongParameterType
from .base import ConverterBase
class InternalUrl(ConverterBase):
"""
Converter of the internal URLs.
.. note::
The internal URLs are actually the URL that has nothing to
do with what we are going to test.
They are only relevant for the software itself.
:param str data_to_convert: The data to convert
"""
def __init__(self, data_to_convert):
if not isinstance(data_to_convert, str):
raise WrongParameterType(
f"<data_to_convert> should be {str}, {type(data_to_convert)} given."
)
super().__init__(data_to_convert)
self.converted_data = self.to_right_url()
def to_right_url(self):
"""
Process the conversion to the right URL.
"""
if Version.is_local_dev():
return self.data_to_convert.replace("master", "dev")
return self.data_to_convert.replace("dev", "master")
| 33.316327 | 88 | 0.596631 |
f746028ed415eb45b51d72258c9ab10b0953f241 | 14,479 | py | Python | rule_based_decision_making.py | QianLabUSC/cognitively-enhanced-decision-framework | 1797ddd41edcbfbfafca5b599ff7ab70f5fdc37f | [
"MIT"
] | null | null | null | rule_based_decision_making.py | QianLabUSC/cognitively-enhanced-decision-framework | 1797ddd41edcbfbfafca5b599ff7ab70f5fdc37f | [
"MIT"
] | 3 | 2021-06-28T15:35:21.000Z | 2021-07-01T06:02:57.000Z | rule_based_decision_making.py | QianLabUSC/cognitively-enhanced-decision-framework | 1797ddd41edcbfbfafca5b599ff7ab70f5fdc37f | [
"MIT"
] | null | null | null | # This FILE is part of multi-legged robot field exploration model
# env_wrapper.py - to obtain user interaction data from website
#
# This programm is explained by roboLAND in university of southern california.
# Please notify the source if you use it
#
# Copyright(c) 2021-2025 Ryoma Liu
# Email: 1196075299@qq.com
from env_wrapper import *
import seaborn as sns
import matplotlib.pyplot as plt
from scipy import optimize
import random
import matplotlib.pylab as pylab
import numpy as np
from PIL import Image
from math import *
class rule_state_machine:
def __init__(self):
'''Initial env info and parameters for decision making
'''
self.states = ['Initial', 'Exploration', 'Verification']
self.current_state = 0
self.env = ENV()
self.hypo_locations = (['No','Feature_low','Feature_middle',
'Feature_high'])
self.hypo_location = 0
self.hypo_samples = (['No','Feature_low', 'Feature_middle',
'Feature_high'])
self.hypo_sample = 0
self.information_matrix = []
self.accuracy_matrix = []
self.fitting_error_matrix = []
def set_init_hypo(self, hypo_location, hypo_sample):
self.hypo_location = hypo_location
self.hypo_sample = hypo_sample
def choose_initial_template(self):
'''choose initial template
According to the initial knowledge and hypothesis, human will select a
experience data sample distribution
Args:
self.hypo_location: inital hypo about data location feature
self.hypo_sample : initial hypo about data sample feature
Returns:
change the initial template in env wrapper
'''
if(self.hypo_location == 0):
location_index = [1,9,13,21]
elif(self.hypo_location == 1):
location_index = [1,4,7,11,16,21]
elif(self.hypo_location == 2):
location_index = [1,5,9,12,15,21]
elif(self.hypo_location == 3):
location_index = [1,6,11,14,17,20]
if(self.hypo_sample == 0):
sample_index = [3,3,3,3]
elif(self.hypo_sample == 1):
sample_index = [5,5,3,3,3,3]
elif(self.hypo_sample == 2):
sample_index = [3,3,5,5,3,3]
elif(self.hypo_sample == 3):
sample_index = [3,3,3,3,5,5]
initial_action = [location_index, sample_index]
self.env.initiate_template(initial_action)
def handle_information_coverage(self):
sample_state = self.env.get_state()
sample_loc = np.array(sample_state[0])
sample_number = np.array(sample_state[1])
sort_index = np.argsort(sample_loc)
sample_loc = sample_loc[sort_index]
sample_number = sample_number[sort_index]
unique_index = np.unique(sample_loc, return_index = True)
sample_loc = sample_loc[unique_index[1]]
sample_number = sample_number[unique_index[1]]
sample_state = [sample_loc, sample_number]
print(sample_state)
self.information_matrix = np.zeros(22) #information matrix in location
self.variable_coverage = np.zeros(20)
for i in range(len(sample_state[0])):
scale = 0.1 * sample_state[1][i] + 1
locs = sample_state[0][i] + 1
self.information_matrix += gauss(locs, scale)
# print(self.information_matrix)
# print(gauss(locs, scale))
# self.plot_line('cool green', np.linspace(1,22,22), gauss(locs, scale), 'test'+str(i))
# print("coverage_matrix: ", self.information_matrix)
mm, erodi = self.env.get_data_state()
mm_mean = np.mean(mm, axis=0)
mm_nonzero = mm[np.nonzero(mm)]
mm_mean_nonzero = mm_mean[np.nonzero(mm_mean)]
start = 0 # 区间左端点
number_of_interval = 20 # 区间个数
length = 1 # 区间长度
intervals = {'{}~{}'.format(length*x+start, length*(x+1)+start): 0 for x in range(number_of_interval)} # 生成区间
result = np.array(interval_statistics(mm_nonzero, intervals))
self.variable_coverage = len(result[(np.nonzero(result))])/len(result)
result_number = np.linspace(0, 19, 20)
variable_information = np.zeros(20)
for i in range(len(result_number)):
single_converage = gauss_variable(result_number[i] +0.5, result[i])
variable_information += single_converage
# feed the variable coverage into the previous belief
self.variable_information = variable_information
# print(mm_mean_nonzero)
# print(sample_state[0])
# p , e = optimize.curve_fit(piecewise_linear_moisture, np.array(sample_state[0])+1, mm_mean_nonzero)
# xloc = np.linspace(1, 22, 22)
# xmoisture = piecewise_linear_moisture(xloc, *p)
# self.mapping_value = []
# for emoisture in xmoisture:
# self.mapping_value.append(variable_information[int(emoisture)])
# print(variable_information)
# print(self.mapping_value)
# plt.plot(xloc,xmoisture )
# plt.show()
def handle_information_accuracy(self):
accuracy_matrix = []
mm, data_state = self.env.get_data_state()
loc_state = self.env.get_state()
# error_cost = np.std(data_state, axis=0)
for col in range(data_state.shape[1]):
if col in loc_state[0]:
effective_data = data_state[:,col][np.nonzero(data_state[:,col])]
# print(effective_data)
median = np.median(effective_data)
k1 = 1.4826
mad = k1 * np.median(np.abs(effective_data-median))
lower_limit = median - (3*mad)
upper_limit = median + (3*mad)
outlier_data_num = (len(effective_data[(effective_data>
upper_limit) & (effective_data<lower_limit)]))
data_samples = len(effective_data)
if(data_samples == 0):
total_cost = 0
elif(data_samples > 0):
total_cost = 1 - 1/(1+ (data_samples - 0.99)/(3*outlier_data_num + 1))
accuracy_matrix.append(total_cost)
else:
accuracy_matrix.append(0)
self.accuracy_matrix = accuracy_matrix
# print('accuracy_matrix: ', self.accuracy_matrix)
def handle_feature_point_detection(self):
loc_state = self.env.get_state()[0]
#print(self.env.get_state())
self.fitting_error_matrix = np.zeros(22)
mm, erodi = self.env.get_data_state()
mm_mean = np.mean(mm, axis=0)
mm_nonzeroindex = (mm_mean != 0)
erodi_mean = np.mean(erodi, axis=0)
self.loc_index = np.linspace(1,22,22)[mm_nonzeroindex]
data_index = mm_mean[mm_nonzeroindex]
data_mean = erodi_mean[mm_nonzeroindex]
p , e = optimize.curve_fit(piecewise_linear, data_index, data_mean)
# print('dfadfaaf', p)
xd = np.linspace(0, np.max(data_index), 22)
fit_curve = piecewise_linear(xd, *p)
fitting_results = piecewise_linear(data_index, *p)
self.fitting_results = fitting_results
fitting_error = fitting_results - data_mean
mm_mean[mm_nonzeroindex] = fitting_error
self.data_index = data_index
self.fitting_error_matrix[mm_nonzeroindex] = fitting_error
# print(data_mean)
nonzero_data_mean = data_mean[np.nonzero(data_mean != 0)]
rmse_data = (sqrt(np.sum(np.power(nonzero_data_mean, 2))/
np.size(nonzero_data_mean)))
# print(rmse_data)
self.rmse_data = rmse_data
# plt.plot(xd, fit_curve)
# plt.plot(data_index, data_mean, "o")
# plt.plot(data_index, fitting_results, "*")
# #plt.plot(data_index, fitting_error, "+")
# plt.show()
# plt.savefig('123.png')
# find the feature point location
array = np.asarray(data_index)
idx = (np.abs(array - p[0])).argmin()
loc_indx = loc_state[idx]
saturation_estimated = int(loc_indx * (p[0]/array[idx]))
self.saturation_selection = np.arange(saturation_estimated - 2, saturation_estimated + 3, 1)
def confidence_model(self):
non_zero_matrix = (self.fitting_error_matrix[np.nonzero
(self.fitting_error_matrix != 0)])
rmse = (sqrt(np.sum(np.power(non_zero_matrix, 2))/
np.size(non_zero_matrix)))
# print(rmse)
# print(self.fitting_error_matrix)
# print(non_zero_matrix)
whole_rmse_percentage = rmse/self.rmse_data
# print(whole_rmse_percentage)
confindence = (0.04 - whole_rmse_percentage) * 30 * self.coverage_criteria
# print(confindence)
def handle_state_judge(self):
if(self.current_state == 0):
self.current_state = 1
elif(self.current_state == 1):
if(np.min(self.accuracy_matrix) > 0.7 and
len(self.information_matrix[self.information_matrix > 0.8]) > 15):
self.current_state = 2
else:
self.current_state = 1
elif(self.current_state == 2):
if(len(self.fitting_error_matrix[self.fitting_error_matrix > 0.8]) > 0):
self.current_state = 1
elif():
self.current_state = 2
def information_model(self):
self.coverage_criteria = (len(self.information_matrix[self.information_matrix
> 0.3]) / 22)
accuracy_matrix = np.array(self.accuracy_matrix)
# print(accuracy_matrix)
self.accuracy_criteria = (len(accuracy_matrix[(accuracy_matrix > 0.6) & (accuracy_matrix != 0)]) /
len(accuracy_matrix[accuracy_matrix != 0]))
# print('accuracy_value:', self.accuracy_criteria) # percentage of locs which the accuracy is lower than 0.6
# print('coverage_value:', self.coverage_criteria) # percentage of locs which the information is lower than 0.8
def take_action(self):
if(self.current_state == 0):
self.choose_initial_template()
elif(self.current_state == 1):
action_loc = np.argmin(self.information_matrix)
self.env.set_action([action_loc],[3])
accuracy_loc = np.where(self.accuracy_matrix < 0.7)
accuracy_samples = np.ones(len(accuracy_loc))
self.env.set_action(accuracy_loc,accuracy_samples)
elif(self.current_state == 2):
fitting_error_loc = np.where(self.fitting_error_matrix > 0.8)
add_loc = []
add_samples = []
current_state = self.env.get_state()
for i in fitting_error_loc:
if not i+1 in current_state[0]:
add_loc.append(i+1)
add_samples.append(3)
if not i-1 in current_state[0]:
add_loc.append(i-1)
add_samples.append(3)
self.env.set_action(add_loc, add_samples)
def plot(self, color, name):
myparams = {
'axes.labelsize': '10',
'xtick.labelsize': '10',
'ytick.labelsize': '10',
'lines.linewidth': 1,
'legend.fontsize': '3',
'font.family': 'Times New Roman',
'figure.figsize': '9, 5' #图片尺寸
}
pylab.rcParams.update(myparams) #更新自己的设置
# line_styles=['ro-','b^-','gs-','ro--','b^--','gs--'] #线型设置
fig1 = plt.figure(1)
a = plt.plot(self.coverage_criteria, self.accuracy_criteria ,marker='o', color=sns.xkcd_rgb[color],
markersize=5)
plt.legend(loc="lower right") #图例位置 右下角
plt.ylabel('accuracy')
plt.xlabel('coverage ')
plt.xlim((0, 1.1))
plt.ylim((0, 1.1))
plt.axvline(x=1, c="b", ls="--", lw=1)
plt.axhline(y=1, c="b", ls="--", lw=1)
plt.savefig(name)
#注意.show()操作后会默认打开一个空白fig,此时保存,容易出现保存的为纯白背景,所以请在show()操作前保存fig.
# plt.show()
def interval_statistics(data, intervals):
if len(data) == 0:
return
for num in data:
for interval in intervals:
lr = tuple(interval.split('~'))
left, right = float(lr[0]), float(lr[1])
if left <= num <= right:
intervals[interval] += 1
results = []
for key, value in intervals.items():
#print("%10s" % key, end='') # 借助 end=''可以不换行
# print("%10s" % value, end='') # "%10s" 右对齐
#print('%16s' % '{:.3%}'.format(value * 1.0 / len(data)))
results.append(value)
return results
def piecewise_linear(x, x0, y0, k1):
# x<x0 ⇒ lambda x: k1*x + y0 - k1*x0
# x>=x0 ⇒ lambda x: k2*x + y0 - k2*x0
return np.piecewise(x, [x < x0, x >= x0], [lambda x:k1*x + y0-k1*x0,
lambda x: y0])
def piecewise_linear_moisture(x, x0, y0, k1, k2):
# x<x0 ⇒ lambda x: k1*x + y0 - k1*x0
# x>=x0 ⇒ lambda x: k2*x + y0 - k2*x0
return np.piecewise(x, [x < x0, x >= x0], [lambda x:k1*x + y0-k1*x0,
lambda x: k2*x + y0 - k2*x0])
def gauss(mean, scale, x=np.linspace(1,22,22), sigma=1):
return scale * np.exp(-np.square(x - mean) / (2 * sigma ** 2))
def gauss_variable(mean, scale, x=np.linspace(0,19,20), sigma=1):
return scale * np.exp(-np.square(x - mean) / (2 * sigma ** 2))
if __name__ == "__main__":
DM = rule_state_machine()
DM.choose_initial_template()
# x = np.linspace(1,22,22)
# information_matrix = gauss(1,0.1).reshape(22,1)
# print(information_matrix)
# sns.set()
# ax = sns.heatmap(information_matrix, vmin=0, vmax=1)
# plt.title('Information Matrix')
# plt.savefig("test.png")
DM.handle_information_accuracy()
DM.handle_information_coverage()
DM.information_model()
DM.plot('cool green','test')
DM.handle_feature_point_detection()
DM.confidence_model()
| 41.133523 | 123 | 0.57794 |
f74624c2e736bef6cbbf662d661e7836effdb8bc | 936 | py | Python | zerver/migrations/0015_attachment.py | dehnert/zulip | f5935e81c7cf2f11ff4ccfcd31d2a1061b8d7ff5 | [
"Apache-2.0"
] | 1 | 2017-07-27T19:49:12.000Z | 2017-07-27T19:49:12.000Z | zerver/migrations/0015_attachment.py | dehnert/zulip | f5935e81c7cf2f11ff4ccfcd31d2a1061b8d7ff5 | [
"Apache-2.0"
] | 9 | 2021-02-08T20:22:36.000Z | 2022-03-11T23:22:45.000Z | zerver/migrations/0015_attachment.py | tobby2002/zulip | 66e7c455759f9368bae16b9a604cf63f8e3524cd | [
"Apache-2.0"
] | 1 | 2021-04-09T05:50:23.000Z | 2021-04-09T05:50:23.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('zerver', '0014_realm_emoji_url_length'),
]
operations = [
migrations.CreateModel(
name='Attachment',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('file_name', models.CharField(max_length=100, db_index=True)),
('path_id', models.TextField(db_index=True)),
('create_time', models.DateTimeField(default=django.utils.timezone.now, db_index=True)),
('messages', models.ManyToManyField(to='zerver.Message')),
('owner', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
),
]
| 33.428571 | 114 | 0.621795 |
f74632da87bc118e8ef70accfd01c8d085a9cbd9 | 2,299 | py | Python | modspectra/tests/test_spectrum_creation.py | Deech08/modspectra | 4af177418f9ac3e1ff30bf99968251ac143a96bc | [
"BSD-3-Clause"
] | 2 | 2020-06-04T13:09:50.000Z | 2020-06-04T13:10:03.000Z | modspectra/tests/test_spectrum_creation.py | Deech08/modspectra | 4af177418f9ac3e1ff30bf99968251ac143a96bc | [
"BSD-3-Clause"
] | 1 | 2020-10-29T19:55:57.000Z | 2020-10-29T19:55:57.000Z | modspectra/tests/test_spectrum_creation.py | Deech08/modspectra | 4af177418f9ac3e1ff30bf99968251ac143a96bc | [
"BSD-3-Clause"
] | null | null | null | import pytest
from numpy.random import randn
from numpy.random import random
import numpy as np
def test_non_detection():
from ..cube import EmissionCube
from astropy.coordinates import SkyCoord
import astropy.units as u
'''
Test that an anti-center pointing returns zero emission
'''
l = 180. + randn()*130.
b = 0. + randn()*20.
while (l > 340.) | (l < 20.): # Ensure actual non-detection
l = 180. + randn()*130.
c = SkyCoord(l = l*u.deg, b = b*u.deg, frame = 'galactic', galcen_distance = 8.127*u.kpc)
spec = EmissionCube.create_DK19_spectrum(c, 0.5 * u.deg, redden = False)
assert np.allclose(spec.value, np.zeros_like(spec.value))
def test_coordinate_error():
from ..cube import EmissionCube
import astropy.units as u
'''
Ensure that a SkyCoord Object is required
'''
l = 0. + randn()*5.
b = 0. + randn()*3.
try:
spec = EmissionCube.create_DK19_spectrum((l,b), 0.5 * u.deg, redden = False)
except TypeError:
assert True
else:
assert False
def test_galcen_distance():
from ..cube import EmissionCube
import astropy.units as u
from astropy.coordinates import SkyCoord
'''
Ensure that a default galcen_distnace is adopted
'''
l = 0. + randn()*5.
b = 0. + randn()*3.
c = SkyCoord(l = l*u.deg, b = b*u.deg, frame = 'galactic')
c2 = SkyCoord(l = l*u.deg, b = b*u.deg, frame = 'galactic', galcen_distance = 8.127*u.kpc)
spec = EmissionCube.create_DK19_spectrum(c, 0.5 * u.deg, redden = False)
spec2 = EmissionCube.create_DK19_spectrum(c2, 0.5 * u.deg, redden = False)
assert np.allclose(spec.value, spec2.value)
def test_radius_degrees():
from ..cube import EmissionCube
import astropy.units as u
from astropy.coordinates import SkyCoord
'''
Ensure that a default units for radius are in
'''
l = 0. + randn()*5.
b = 0. + randn()*3.
c = SkyCoord(l = l*u.deg, b = b*u.deg, frame = 'galactic', galcen_distance = 8.127*u.kpc)
r1 = np.abs( randn()*1000.) * u.arcmin
r2 = r1.to(u.deg).value
spec = EmissionCube.create_DK19_spectrum(c, r1, redden = False)
spec2 = EmissionCube.create_DK19_spectrum(c, r2, redden = False)
assert np.allclose(spec.value, spec2.value)
| 32.842857 | 94 | 0.638104 |
f746c16fea3027a482c679020427437ced86395f | 589 | py | Python | examples/camera.py | cloudmesh/cloudmesh.pi | bdf706b3763031341c41b811749064c293e73c14 | [
"Apache-2.0"
] | 2 | 2017-09-18T00:56:36.000Z | 2018-06-01T23:41:23.000Z | examples/camera.py | cloudmesh/cloudmesh-pi | bdf706b3763031341c41b811749064c293e73c14 | [
"Apache-2.0"
] | 1 | 2018-04-16T18:37:17.000Z | 2018-04-16T18:37:17.000Z | examples/camera.py | cloudmesh/cloudmesh.pi | bdf706b3763031341c41b811749064c293e73c14 | [
"Apache-2.0"
] | 3 | 2017-09-20T11:13:54.000Z | 2017-11-30T23:48:37.000Z | ###
# if you dont have picamera installed run sudo apt-get install python-picamera
# and emable camera in raspberrypi configuration
#
#
# this progam uses raspberry pi onboarc camera to capture an image and save it to a file
#
# wait for 5 seconds
# capture the image
# save the mage to image.jpg file
#
# for more information see
# https://www.raspberrypi.org/documentation/usage/camera/python/README.md
###
import picamera
import time
camera = picamera.PiCamera()
count = 5
while count >0:
print "capturing image in : ", count
count -= 1
time.sleep(1)
camera.capture("image.jpg")
| 21.035714 | 88 | 0.741935 |
f746ea3097b7b326f2fa01df7c61da4f5e6ea7f9 | 15,135 | py | Python | test/test_serialization.py | jkulhanek/torchdata | 2e8b9f613a13c74b424651649f317c7b322131d6 | [
"BSD-3-Clause"
] | null | null | null | test/test_serialization.py | jkulhanek/torchdata | 2e8b9f613a13c74b424651649f317c7b322131d6 | [
"BSD-3-Clause"
] | null | null | null | test/test_serialization.py | jkulhanek/torchdata | 2e8b9f613a13c74b424651649f317c7b322131d6 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
import pickle
import unittest
import warnings
from functools import partial
from io import StringIO
from operator import itemgetter
from typing import List
import expecttest
import torchdata.datapipes.iter as iterdp
from _utils._common_utils_for_test import create_temp_dir, create_temp_files
from torch.utils.data.datapipes.utils.common import DILL_AVAILABLE
from torchdata.datapipes.iter import IterableWrapper
from torchdata.datapipes.map import SequenceWrapper
if DILL_AVAILABLE:
import dill
dill.extend(use_dill=False)
try:
import fsspec
except ImportError:
fsspec = None
try:
import iopath
except ImportError:
iopath = None
try:
import subprocess
import rarfile
try:
rarfile.tool_setup()
subprocess.run(("rar", "-?"), check=True)
except (rarfile.RarCannotExec, subprocess.CalledProcessError):
rarfile = None
except (ModuleNotFoundError, FileNotFoundError):
rarfile = None
try:
import torcharrow
import torcharrow.dtypes as dt
DTYPE = dt.Struct([dt.Field("Values", dt.int32)])
except ImportError:
torcharrow = None
dt = None
DTYPE = None
def _fake_batch_fn(batch):
return [d + 1 for d in batch]
def _fake_fn_ls(x):
return [x, x]
def _filepath_fn(name: str, dir) -> str:
return os.path.join(dir, os.path.basename(name))
def _filter_by_module_availability(datapipes):
filter_set = set()
if fsspec is None:
filter_set.update([iterdp.FSSpecFileLister, iterdp.FSSpecFileOpener, iterdp.FSSpecSaver])
if iopath is None:
filter_set.update([iterdp.IoPathFileLister, iterdp.IoPathFileOpener, iterdp.IoPathSaver])
if rarfile is None:
filter_set.update([iterdp.RarArchiveLoader])
if torcharrow is None or not DILL_AVAILABLE:
filter_set.update([iterdp.DataFrameMaker, iterdp.ParquetDataFrameLoader])
return [dp for dp in datapipes if dp[0] not in filter_set]
class TestIterDataPipeSerialization(expecttest.TestCase):
def setUp(self):
self.temp_dir = create_temp_dir()
self.temp_files = create_temp_files(self.temp_dir)
self.temp_sub_dir = create_temp_dir(self.temp_dir.name)
self.temp_sub_files = create_temp_files(self.temp_sub_dir, 4, False)
def tearDown(self):
try:
self.temp_sub_dir.cleanup()
self.temp_dir.cleanup()
except Exception as e:
warnings.warn(f"TestIterDataPipeSerialization was not able to cleanup temp dir due to {e}")
def _serialization_test_helper(self, datapipe):
serialized_dp = pickle.dumps(datapipe)
deserialized_dp = pickle.loads(serialized_dp)
try:
self.assertEqual(list(datapipe), list(deserialized_dp))
except AssertionError as e:
print(f"{datapipe} is failing.")
raise e
def _serialization_dataframe_test_helper(self, datapipe):
serialized_dp = pickle.dumps(datapipe)
deserialized_dp = pickle.loads(serialized_dp)
for df1, df2 in zip(datapipe, deserialized_dp):
for exp, act in zip(df1, df2):
self.assertEqual(exp, act)
def _serialization_test_for_single_dp(self, dp, is_dataframe=False):
test_helper_fn = self._serialization_dataframe_test_helper if is_dataframe else self._serialization_test_helper
# 1. Testing for serialization before any iteration starts
test_helper_fn(dp)
# 2. Testing for serialization afterDataPipe is partially read
it = iter(dp)
_ = next(it)
test_helper_fn(dp)
# 3. Testing for serialization after DataPipe is fully read
_ = list(it)
test_helper_fn(dp)
def _serialization_test_for_dp_with_children(self, dp1, dp2):
# 1. Testing for serialization before any iteration starts
self._serialization_test_helper(dp1)
self._serialization_test_helper(dp2)
# 2. Testing for serialization after DataPipe is partially read
it1, it2 = iter(dp1), iter(dp2)
_, _ = next(it1), next(it2)
self._serialization_test_helper(dp1)
self._serialization_test_helper(dp2)
# 2.5. Testing for serialization after one child DataPipe is fully read
# (Only for DataPipes with children DataPipes)
_ = list(it1) # fully read one child
self._serialization_test_helper(dp1)
self._serialization_test_helper(dp2)
# 3. Testing for serialization after DataPipe is fully read
_ = list(it2) # fully read the other child
self._serialization_test_helper(dp1)
self._serialization_test_helper(dp2)
def test_serializable(self):
picklable_datapipes: List = [
(iterdp.BatchMapper, IterableWrapper([(0, 0), (0, 0), (0, 0), (0, 0)]), (_fake_batch_fn, 2, 1), {}),
(iterdp.BucketBatcher, IterableWrapper([0, 0, 0, 0, 0, 0, 0]), (5,), {}),
(iterdp.Bz2FileLoader, None, (), {}),
(
iterdp.CSVDictParser,
IterableWrapper(
[("f1", StringIO("Label,1,1\nLabel,2,2\nLabel,3,3")), ("f2", StringIO("L,1,1\r\nL,2,2\r\nL,3,3"))]
),
(),
{},
),
(
iterdp.CSVParser,
IterableWrapper(
[("f1", StringIO("Label,1,1\nLabel,2,2\nLabel,3,3")), ("f2", StringIO("L,1,1\r\nL,2,2\r\nL,3,3"))]
),
(),
{},
),
(iterdp.Cycler, None, (2,), {}),
(iterdp.DataFrameMaker, IterableWrapper([(i,) for i in range(3)]), (), {"dtype": DTYPE}),
(iterdp.Decompressor, None, (), {}),
(iterdp.Enumerator, None, (2,), {}),
(iterdp.FlatMapper, None, (_fake_fn_ls,), {}),
(iterdp.FSSpecFileLister, ".", (), {}),
(iterdp.FSSpecFileOpener, None, (), {}),
(
iterdp.FSSpecSaver,
IterableWrapper([("1.txt", b"DATA1"), ("2.txt", b"DATA2"), ("3.txt", b"DATA3")]),
(),
{"mode": "wb", "filepath_fn": partial(_filepath_fn, dir=self.temp_dir.name)},
),
(iterdp.GDriveReader, None, (), {}),
(iterdp.HashChecker, None, ({},), {}),
(iterdp.Header, None, (3,), {}),
(iterdp.HttpReader, None, (), {}),
# TODO (ejguan): Deterministic serialization is required
# (iterdp.InBatchShuffler, IterableWrapper(range(10)).batch(3), (), {}),
(iterdp.InMemoryCacheHolder, None, (), {}),
(iterdp.IndexAdder, IterableWrapper([{"a": 1, "b": 2}, {"c": 3, "a": 1}]), ("label",), {}),
(iterdp.IoPathFileLister, ".", (), {}),
(iterdp.IoPathFileOpener, None, (), {}),
(
iterdp.IoPathSaver,
IterableWrapper([("1.txt", b"DATA1"), ("2.txt", b"DATA2"), ("3.txt", b"DATA3")]),
(),
{"mode": "wb", "filepath_fn": partial(_filepath_fn, dir=self.temp_dir.name)},
),
(
iterdp.IterKeyZipper,
IterableWrapper([("a", 100), ("b", 200), ("c", 300)]),
(IterableWrapper([("a", 1), ("b", 2), ("c", 3)]), itemgetter(0), itemgetter(0)),
{},
),
(
iterdp.JsonParser,
IterableWrapper(
[
("1.json", StringIO('["fo", {"ba":["baz", null, 1.0, 2]}]')),
("2.json", StringIO('{"__cx__": true, "r": 1, "i": 2}')),
]
),
(),
{},
),
(
iterdp.LineReader,
IterableWrapper(
[("file1", StringIO("Line1\nLine2")), ("file2", StringIO("Line2,1\r\nLine2,2\r\nLine2,3"))]
),
(),
{},
),
(
iterdp.MaxTokenBucketizer,
IterableWrapper(["1", "22", "1", "4444", "333", "1", "22", "22", "333"]),
(4,),
{},
),
(
iterdp.MapKeyZipper,
IterableWrapper([("a", 1), ("b", 2), ("c", 3)]),
(SequenceWrapper({"a": 100, "b": 200, "c": 300}), itemgetter(0)),
{},
),
(iterdp.OnDiskCacheHolder, None, (), {}),
(iterdp.OnlineReader, None, (), {}),
(
iterdp.ParagraphAggregator,
IterableWrapper([("f1", "L1"), ("f1", "L2"), ("f2", "21"), ("f2", "22")]),
(),
{},
),
(iterdp.ParquetDataFrameLoader, None, (), {"dtype": DTYPE}),
(iterdp.RarArchiveLoader, None, (), {}),
(
iterdp.Rows2Columnar,
IterableWrapper([[{"a": 1}, {"b": 2, "a": 1}], [{"a": 1, "b": 200}, {"c": 3}]]),
(),
{},
),
(iterdp.SampleMultiplexer, {IterableWrapper([0] * 10): 0.5, IterableWrapper([1] * 10): 0.5}, (), {}),
(
iterdp.Saver,
IterableWrapper([("1.txt", b"DATA1"), ("2.txt", b"DATA2"), ("3.txt", b"DATA3")]),
(),
{"mode": "wb", "filepath_fn": partial(_filepath_fn, dir=self.temp_dir.name)},
),
(iterdp.TarArchiveLoader, None, (), {}),
(iterdp.TFRecordLoader, None, (), {}),
(iterdp.UnZipper, IterableWrapper([(i, i + 10) for i in range(10)]), (), {"sequence_length": 2}),
(iterdp.XzFileLoader, None, (), {}),
(iterdp.ZipArchiveLoader, None, (), {}),
]
picklable_datapipes = _filter_by_module_availability(picklable_datapipes)
# Skipping value comparison for these DataPipes
# Most of them return streams not comparable by `self.assertEqual`
# Others are similar to caching where the outputs depend on other DataPipes
dp_skip_comparison = {
iterdp.Bz2FileLoader,
iterdp.Decompressor,
iterdp.FileOpener,
iterdp.FSSpecFileOpener,
iterdp.GDriveReader,
iterdp.IoPathFileOpener,
iterdp.HashChecker,
iterdp.HttpReader,
iterdp.OnDiskCacheHolder,
iterdp.OnlineReader,
iterdp.ParquetDataFrameLoader,
iterdp.SampleMultiplexer,
iterdp.RarArchiveLoader,
iterdp.TarArchiveLoader,
iterdp.TFRecordLoader,
iterdp.XzFileLoader,
iterdp.ZipArchiveLoader,
}
# These DataPipes produce multiple DataPipes as outputs and those should be compared
dp_compare_children = {iterdp.UnZipper}
for dpipe, custom_input, dp_args, dp_kwargs in picklable_datapipes:
try:
# Creating input (usually a DataPipe) for the specific dpipe being tested
if custom_input is None:
custom_input = IterableWrapper(range(10))
if dpipe in dp_skip_comparison: # Mke sure they are picklable and loadable (no value comparison)
datapipe = dpipe(custom_input, *dp_args, **dp_kwargs) # type: ignore[call-arg]
serialized_dp = pickle.dumps(datapipe)
_ = pickle.loads(serialized_dp)
elif dpipe in dp_compare_children: # DataPipes that have children
dp1, dp2 = dpipe(custom_input, *dp_args, **dp_kwargs) # type: ignore[call-arg]
self._serialization_test_for_dp_with_children(dp1, dp2)
else: # Single DataPipe that requires comparison
datapipe = dpipe(custom_input, *dp_args, **dp_kwargs) # type: ignore[call-arg]
is_dataframe = issubclass(dpipe, (iterdp.DataFrameMaker, iterdp.ParquetDataFrameLoader))
self._serialization_test_for_single_dp(datapipe, is_dataframe=is_dataframe)
except Exception as e:
print(f"{dpipe} is failing.")
raise e
def test_serializable_with_dill(self):
"""Only for DataPipes that take in a function as argument"""
input_dp = IterableWrapper(range(10))
ref_idp = IterableWrapper(range(10))
ref_mdp = SequenceWrapper(range(10))
unpicklable_datapipes: List = [
(iterdp.BatchMapper, (lambda batch: [d + 1 for d in batch], 2), {}),
(iterdp.FlatMapper, (lambda x: [x, x],), {}),
(iterdp.IterKeyZipper, (ref_idp, lambda x: x, None, True, 100), {}),
(iterdp.MapKeyZipper, (ref_mdp, lambda x: x), {}),
(iterdp.OnDiskCacheHolder, (lambda x: x,), {}),
(iterdp.ParagraphAggregator, (lambda x: x,), {}),
]
# Skipping value comparison for these DataPipes
dp_skip_comparison = {iterdp.OnDiskCacheHolder, iterdp.ParagraphAggregator}
for dpipe, dp_args, dp_kwargs in unpicklable_datapipes:
if DILL_AVAILABLE:
try:
if dpipe in dp_skip_comparison: # Make sure they are picklable/loadable (no value comparison)
datapipe = dpipe(input_dp, *dp_args, **dp_kwargs) # type: ignore[call-arg]
serialized_dp = dill.dumps(datapipe)
_ = dill.loads(serialized_dp)
else:
datapipe = dpipe(input_dp, *dp_args, **dp_kwargs) # type: ignore[call-arg]
self._serialization_test_for_single_dp(datapipe)
except Exception as e:
print(f"{dpipe} is failing.")
raise e
else:
dp_no_attribute_error = (iterdp.OnDiskCacheHolder,)
try:
with warnings.catch_warnings(record=True) as wa:
datapipe = dpipe(input_dp, *dp_args, **dp_kwargs) # type: ignore[call-arg]
self.assertEqual(len(wa), 1)
self.assertRegex(str(wa[0].message), r"^Lambda function is not supported for pickle")
if isinstance(datapipe, dp_no_attribute_error):
_ = pickle.dumps(datapipe)
else:
with self.assertRaises(AttributeError):
_ = pickle.dumps(datapipe)
except Exception as e:
print(f"{dpipe} is failing.")
raise e
class TestMapDataPipeSerialization(expecttest.TestCase):
def test_serializable(self):
pass
def test_serializable_with_dill(self):
"""Only for DataPipes that take in a function as argument"""
pass
if __name__ == "__main__":
unittest.main()
| 40.795148 | 119 | 0.557582 |
f74722dded4b701ea739cfeabcf1fc83a44f0203 | 9,770 | py | Python | mdrsl/rule_generation/association_rule_mining/apyori_impl/mine_mt_rules_from_dataframe_with_apyori.py | joschout/Multi-Directional-Rule-Set-Learning | ef0620b115f4e0fd7fba3e752d238a8020c1ca6b | [
"Apache-2.0"
] | 3 | 2020-08-03T19:25:44.000Z | 2021-06-27T22:25:55.000Z | mdrsl/rule_generation/association_rule_mining/apyori_impl/mine_mt_rules_from_dataframe_with_apyori.py | joschout/Multi-Directional-Rule-Set-Learning | ef0620b115f4e0fd7fba3e752d238a8020c1ca6b | [
"Apache-2.0"
] | null | null | null | mdrsl/rule_generation/association_rule_mining/apyori_impl/mine_mt_rules_from_dataframe_with_apyori.py | joschout/Multi-Directional-Rule-Set-Learning | ef0620b115f4e0fd7fba3e752d238a8020c1ca6b | [
"Apache-2.0"
] | 2 | 2020-08-07T22:54:28.000Z | 2021-02-18T06:11:01.000Z | import random
import numpy as np
from typing import List, Optional, Dict
import pandas as pd
import time
from mdrsl.rule_generation.association_rule_mining.apyori_impl.mine_mt_rules_from_transactions_with_apyori import (
mine_MCARs_from_transactions_using_apyori)
from mdrsl.rule_generation.association_rule_mining.frequent_itemset_mining import (
dataframe_to_list_of_transactions, run_fim_apriori, dataframe_to_list_of_transactions_with_encoding)
from mdrsl.data_structures.rules.multi_target_class_association_rule import MCAR
def mine_MCARs_from_df_using_apyori(df,
min_support: float = 0.1, min_confidence: float = 0.0, min_lift=0.0,
max_length=None) -> List[MCAR]:
transactions = dataframe_to_list_of_transactions(df)
return mine_MCARs_from_transactions_using_apyori(
transactions,
min_support=min_support, min_confidence=min_confidence,
min_lift=min_lift, max_length=max_length)
def mine_MCARs_from_df_using_apyori_with_encodings(df,
min_support: float = 0.1, min_confidence: float = 0.0, min_lift=0.0,
max_length=None) -> List[MCAR]:
transactions, item_encoder = dataframe_to_list_of_transactions_with_encoding(df)
return mine_MCARs_from_transactions_using_apyori(
transactions,
min_support=min_support, min_confidence=min_confidence,
min_lift=min_lift, max_length=max_length, item_encoder=item_encoder)
def mine_MCARs(df, rule_cutoff: int,
sample=False, random_seed=None,
verbose: bool = True,
**top_rules_kwargs) -> List[MCAR]:
transactions: List[List[str]] = dataframe_to_list_of_transactions(df)
mcars: Optional[List[MCAR]] = _top_rules_MIDS(transactions,
target_rule_count=rule_cutoff,
verbose=verbose)
if mcars is None:
raise Exception("no MCARs found as input for MIDS")
if len(mcars) > rule_cutoff:
if sample:
if random_seed is not None:
random.seed(random_seed)
mcars_subset = random.sample(mcars, rule_cutoff)
else:
mcars_subset = mcars[:rule_cutoff]
else:
mcars_subset = mcars
return mcars_subset
if __name__ == '__main__':
df_total = pd.DataFrame({
'A': np.array([1] * 4, dtype='float32'),
'B': np.array([2] * 4, dtype='float32'),
'C': np.array([3] * 4, dtype='float32'),
'D': np.array([4] * 4, dtype='float32')
})
print(df_total)
itemsets = dataframe_to_list_of_transactions(df_total)
support_threshold = 0.1
dataset_transactions = dataframe_to_list_of_transactions(df_total) # type: List[List[str]]
cars = mine_MCARs_from_transactions_using_apyori(dataset_transactions, min_support=support_threshold)
for car in cars:
print(car)
print("---")
fim_frequent_itemsets = run_fim_apriori(df_total, support_threshold)
print(fim_frequent_itemsets)
def _top_rules_MIDS(transactions: List[List[str]],
appearance: Optional[Dict] = None,
target_rule_count: int = 1000,
init_support: float = 0.05,
init_confidence: float = 0.5,
confidence_step: float = 0.05,
support_step: float = 0.05,
min_length: int = 2,
init_max_length: int = 3,
total_timeout: float = 100.0, # max time in seconds
max_iterations: int = 30,
verbose: bool = True
):
"""
Function for finding the best n (target_rule_count) rules from transaction list.
PROBLEM: how to define 'best'?
Iteratively:
Search for the rules under the current mining parameters.
Check the properties of the found rules.
If there is still room for improvement,
Then update the mining parameters,
STOP if:
- max nb of iterations is reached (default: 30).
- the current nb of rules is more than the nb of rules we are looking for.
- the time out is reach
FIND all rules with as constraints:
- min_support
- min_confidence
- max_length
Parameters
----------
:param transactions : 2D array of strings, e.g. [["a:=:1", "b:=:3"], ["a:=:4", "b:=:2"]]
:param appearance : dict - dictionary specifying rule appearance
:param target_rule_count : int - target number of rules to mine
:param init_support : float - support from which to start mining
:param init_confidence : float - confidence from which to start mining
:param confidence_step : float
:param support_step : float
:param min_length : int - minimum len of rules to mine
:param init_max_length : int - maximum len from which to start mining
:param total_timeout : float - maximum execution time of the function
:param max_iterations : int - maximum iterations to try before stopping execution
:param verbose : bool
Returns
-------
list of mined rules. The rules are not ordered.
"""
if appearance is None:
appearance = {}
start_time: float = time.time()
# the length of a rule is at most the length of a transaction. (All transactions have the same length.)
# max_rule_length_wanted = 10
# MAX_RULE_LEN: int = min(len(transactions[0]), max_rule_length_wanted)
MAX_RULE_LEN: int = len(transactions[0])
current_support: float = init_support
current_confidence: float = init_confidence
current_max_length: int = init_max_length
keep_mining: bool = True
is_max_length_decreased_due_timeout = False
current_iteration = 0
last_rule_count = -1
rules: Optional[List[MCAR]] = None
if verbose:
print("STARTING top_rules")
while keep_mining:
current_iteration += 1
if current_iteration > max_iterations:
if verbose:
print("Max iterations reached")
break
if verbose:
print(f"--- iteration {current_iteration} ---")
print((f"Running apriori with setting: "
f"confidence={current_confidence}, "
f"support={current_support}, "
f"min_length={min_length}, "
f"max_length={current_max_length}, "
f"MAX_RULE_LEN={MAX_RULE_LEN}"
))
current_rules: List[MCAR] = mine_MCARs_from_transactions_using_apyori(
transactions, min_support=current_support, min_confidence=current_confidence, max_length=current_max_length)
# rules_current = fim.arules(transactions, supp=support, conf=conf, mode="o", report="sc", appear=appearance,
# zmax=maxlen, zmin=minlen)
current_nb_of_rules = len(current_rules)
# assign
rules = current_rules
if verbose:
print(f"Rule count: {current_nb_of_rules}, Iteration: {current_iteration}")
if current_nb_of_rules >= target_rule_count:
keep_mining = False
if verbose:
print(f"\tTarget rule count satisfied: {target_rule_count}")
else:
current_execution_time = time.time() - start_time
# if timeout limit exceeded
if current_execution_time > total_timeout:
if verbose:
print("Execution time exceeded:", total_timeout)
keep_mining = False
# if we can still increase our rule length AND
# the number of rules found has changed (increased?) since last time AND
# there has
elif current_max_length < MAX_RULE_LEN and last_rule_count != current_nb_of_rules and not is_max_length_decreased_due_timeout:
current_max_length += 1
last_rule_count = current_nb_of_rules
if verbose:
print(f"\tIncreasing max_length {current_max_length}")
# if we can still increase our rule length AND
#
# we can still increase our support
# THEN:
# increase our support
# increment our max length
elif current_max_length < MAX_RULE_LEN and is_max_length_decreased_due_timeout and current_support <= 1 - support_step:
current_support += support_step
current_max_length += 1
last_rule_count = current_nb_of_rules
is_max_length_decreased_due_timeout = False
if verbose:
print(f"\tIncreasing maxlen to {current_max_length}")
print(f"\tIncreasing minsup to {current_support}")
# IF we can still decrease our confidence
# THEN decrease our confidence
elif current_confidence > confidence_step:
current_confidence -= confidence_step
if verbose:
print(f"\tDecreasing confidence to {current_confidence}")
else:
if verbose:
print("\tAll options exhausted")
keep_mining = False
if verbose:
end_of_current_iteration_message = f"--- end iteration {current_iteration} ---"
print(end_of_current_iteration_message)
print("-" * len(end_of_current_iteration_message))
if verbose:
print(f"FINISHED top_rules after {current_iteration} iterations")
return rules
| 37.722008 | 138 | 0.621085 |
f7472b67caaccda28cf0edd2bff43417639ea849 | 4,783 | py | Python | facebook_business/test/async_ad_docs.py | pasha-r/facebook-python-ads-sdk | 76feadd77baed839516b53297628e7a254c8c3c0 | [
"CNRI-Python"
] | null | null | null | facebook_business/test/async_ad_docs.py | pasha-r/facebook-python-ads-sdk | 76feadd77baed839516b53297628e7a254c8c3c0 | [
"CNRI-Python"
] | null | null | null | facebook_business/test/async_ad_docs.py | pasha-r/facebook-python-ads-sdk | 76feadd77baed839516b53297628e7a254c8c3c0 | [
"CNRI-Python"
] | 1 | 2018-09-24T14:04:48.000Z | 2018-09-24T14:04:48.000Z | # Copyright 2014 Facebook, Inc.
# You are hereby granted a non-exclusive, worldwide, royalty-free license to
# use, copy, modify, and distribute this software in source code or binary
# form for use in connection with the web services and APIs provided by
# Facebook.
# As with any software that integrates with the Facebook platform, your use
# of this software is subject to the Facebook Developer Principles and
# Policies [http://developers.facebook.com/policy/]. This copyright notice
# shall be included in all copies or substantial portions of the software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
'''
Unit tests for the Python Facebook Ads API SDK.
How to run:
python -m facebook_business.test.async_ad_docs
'''
import sys
from facebook_business.asyncapi import FacebookAdsAsyncApi
from facebook_business.objects import Insights
from facebook_business.test.async_docs_utils import *
from facebook_business.utils.httpretries import retry_policy
class AdDocsTestCase(AsyncDocsTestCase):
def setUp(self):
campaign = self.create_campaign(1)
adset = self.create_adset(1, campaign)
creative = self.create_creative_leads(1)
ad = self.create_ad(1, adset, creative)
AsyncDocsDataStore.set('ad_id', ad.get_id())
def test_get_insights(self):
ad = Ad(AsyncDocsDataStore.get('ad_id'))
insights = ad.get_insights(fields=[
Insights.Field.ad_id,
Insights.Field.unique_clicks,
Insights.Field.impressions,
], params={
'level': Insights.Level.ad,
'date_preset': Insights.Preset.last_week,
})
self.store_response(insights)
def test_get_ad_creatives(self):
ad = Ad(AsyncDocsDataStore.get('ad_id'))
creatives = ad.get_ad_creatives_aio(fields=[AdCreative.Field.name])
self.store_response(creatives)
def test_get_targeting_description(self):
ad = Ad(AsyncDocsDataStore.get('ad_id'))
targeting_desc = ad.get_targeting_description_aio(fields=[
'targetingsentencelines',
])
self.store_response(targeting_desc)
def test_get_keyword_stats(self):
ad = Ad(AsyncDocsDataStore.get('ad_id'))
keywords = ad.get_keyword_stats_aio()
self.store_response(keywords)
def test_get_ad_preview(self):
ad = Ad(AsyncDocsDataStore.get('ad_id'))
ad_preview = ad.get_ad_preview_aio(params={
'ad_format': 'RIGHT_COLUMN_STANDARD',
})
self.store_response(ad_preview)
def test_get_reach_estimate(self):
ad = Ad(AsyncDocsDataStore.get('ad_id'))
reach_estimate = ad.get_reach_estimate_aio()
self.store_response(reach_estimate)
def test_get_click_tracking_tag(self):
ad = Ad(AsyncDocsDataStore.get('ad_id'))
tag = ad.get_click_tracking_tag_aio()
self.store_response(tag)
def test_get_leads(self):
ad = Ad(AsyncDocsDataStore.get('ad_id'))
leads = ad.get_leads_aio()
self.store_response(leads)
if __name__ == '__main__':
handle = open(AsyncDocsDataStore.get('filename'), 'w')
handle.write('')
handle.close()
try:
config_file = open('./autogen_docs_config.json')
except IOError:
print("No config file found, skipping docs tests")
sys.exit()
config = json.load(config_file)
config_file.close()
FacebookAdsAsyncApi.init(
config['app_id'],
config['app_secret'],
config['access_token'],
config['adaccount_id'],
pool_maxsize=10,
max_retries=retry_policy()
)
AsyncDocsDataStore.set('adaccount_id', config['adaccount_id'])
AsyncDocsDataStore.set('adaccount_id_int', config['adaccount_id_int'])
AsyncDocsDataStore.set('business_id', config['business_id'])
AsyncDocsDataStore.set('ca_id', config['ca_id'])
AsyncDocsDataStore.set('dpa_catalog_id', config['dpa_catalog_id'])
AsyncDocsDataStore.set('dpa_set_id', config['dpa_set_id'])
AsyncDocsDataStore.set('dpa_feed_id', config['dpa_feed_id'])
AsyncDocsDataStore.set('dpa_upload_id', config['dpa_upload_id'])
AsyncDocsDataStore.set('as_user_id', config['as_user_id'])
AsyncDocsDataStore.set('page_id', config['page_id'])
AsyncDocsDataStore.set('pixel_id', config['pixel_id'])
unittest.main()
| 36.51145 | 76 | 0.701861 |
f747350ed2fe7ea42b0ccce6f38156b781455d8b | 1,003 | py | Python | taxamo/models/emailInvoiceIn.py | taxamo/taxamo-python | 190bdda68963860c131d2b1e9d31cd88de10d694 | [
"Apache-2.0"
] | 4 | 2016-03-14T03:59:08.000Z | 2020-06-21T07:58:38.000Z | taxamo/models/emailInvoiceIn.py | taxamo/taxamo-python | 190bdda68963860c131d2b1e9d31cd88de10d694 | [
"Apache-2.0"
] | 2 | 2016-03-07T13:41:23.000Z | 2017-07-11T13:39:44.000Z | taxamo/models/emailInvoiceIn.py | taxamo/taxamo-python | 190bdda68963860c131d2b1e9d31cd88de10d694 | [
"Apache-2.0"
] | 8 | 2016-01-13T14:32:19.000Z | 2021-08-16T11:14:06.000Z | #!/usr/bin/env python
"""
Copyright 2014-2021 by Taxamo
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class EmailInvoiceIn:
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually."""
def __init__(self):
self.swaggerTypes = {
'buyer_email': 'str'
}
#Email to send the invoice. If not provided, transaction.buyer_email will be used.
self.buyer_email = None # str
| 31.34375 | 90 | 0.690927 |
f7473613deb0766b99315c742b58237acf9fe7aa | 3,944 | py | Python | tests/test_PolyViz.py | kobejohn/polymaze | c7ac6049bef547f032c353c5c50e8ead95954a33 | [
"MIT"
] | 21 | 2015-02-09T17:36:36.000Z | 2021-06-01T19:58:59.000Z | tests/test_PolyViz.py | kobejohn/polymaze | c7ac6049bef547f032c353c5c50e8ead95954a33 | [
"MIT"
] | 1 | 2017-01-12T13:52:07.000Z | 2017-01-12T13:52:07.000Z | tests/test_PolyViz.py | kobejohn/polymaze | c7ac6049bef547f032c353c5c50e8ead95954a33 | [
"MIT"
] | 4 | 2017-12-07T15:18:19.000Z | 2021-06-01T19:59:20.000Z | import unittest
from tests.test_PolyGrid import generic_grid
from polymaze.polygrid import PolyGrid, PolyViz
# silly workaround to allow tests to work in py2 or py3
try:
_assertCountEqual = unittest.TestCase.assertCountEqual # py3
from unittest import mock
except (AttributeError, ImportError):
_assertCountEqual = unittest.TestCase.assertItemsEqual # py2
import mock
class TestPolyViz(unittest.TestCase):
def test_provides_reference_to_related_grid(self):
grid = generic_grid()
viz = generic_viz(grid)
self.assertIs(viz.grid, grid)
def test_can_create_new_part_styles(self):
viz = generic_viz()
any_name = 'asdf'
any_color = (0, 0, 0, 0)
# confirm styles don't exist
self.assertNotIn(any_name, viz._shape_styles)
self.assertNotIn(any_name, viz._edge_styles)
# make the new styles
viz.new_shape_style(any_name, any_color)
viz.new_edge_style(any_name, any_color)
# confirm they were added
self.assertIn(any_name, viz._shape_styles)
self.assertIn(any_name, viz._edge_styles)
def test_uses_get_x_style_when_drawing_each_part(self):
viz = generic_viz()
get_style_names = ('get_shape_style', 'get_edge_style')
for get_style_name in get_style_names:
with mock.patch.object(viz, get_style_name) as m_getstyle:
# make the call
try:
viz.image()
except Exception:
pass # it will probably die. that's fine.
# confirm that the style was retrieved appropriately
self.assertTrue(m_getstyle.called)
def test_get_x_style_returns_default_when_part_has_no_style_setting(self):
viz = generic_viz(generic_grid(neighborhood_center_index=(0, 0)))
edge_default_spec = viz._edge_styles['default']
shape_default_spec = viz._shape_styles['default']
# get one of each type of part
shape = next(iter(viz.grid.shapes()))
edge = next(iter(viz.grid.edges()))
# confirm no style setting
self.assertIsNone(shape.viz_style)
self.assertIsNone(edge.viz_style)
# confirm the lookup matches the specification
self.assertIs(viz.get_shape_style(shape), shape_default_spec)
self.assertIs(viz.get_edge_style(edge), edge_default_spec)
def test_get_x_style_returns_named_style_setting_when_part_has_it(self):
viz = generic_viz()
# create some named styles
shape_style_name = '<<shape>>'
edge_style_name = '<<edge>>'
any_color = (1, 2, 3, 4)
viz.new_shape_style(shape_style_name, color=any_color)
viz.new_edge_style(edge_style_name, color=any_color)
# apply style to some parts
shape = next(iter(viz.grid.shapes()))
edge = next(iter(viz.grid.edges()))
shape.viz_style = shape_style_name
edge.viz_style = edge_style_name
# get the style container that should be returned
shape_style_spec = viz._shape_styles[shape_style_name]
edge_style_spec = viz._edge_styles[edge_style_name]
# confirm correct style returned
self.assertIs(viz.get_shape_style(shape), shape_style_spec)
self.assertIs(viz.get_edge_style(edge), edge_style_spec)
def test_image_returns_a_PIL_image(self):
viz = generic_viz()
im = viz.image()
# confirm it has an image method
self.assertTrue(hasattr(im, 'crop'))
def test_image_returns_None_for_empty_grid(self):
# make and confirm an empty grid
empty_grid = PolyGrid()
self.assertEqual(len(tuple(empty_grid.shapes())), 0)
# confirm image is None
viz = PolyViz(empty_grid)
self.assertIsNone(viz.image())
def generic_viz(grid=None):
grid = grid or generic_grid(neighborhood_center_index=(0,0))
return PolyViz(grid) | 39.838384 | 78 | 0.672667 |
f74747fd63350079355397ba5d7eb5c20324d9bc | 2,469 | py | Python | app/lib/db_query/tweets/categories.py | MichaelCurrin/twitterverse | 9629f848377e4346be833db70f11c593cc0d7b6c | [
"MIT"
] | 10 | 2019-03-22T07:07:41.000Z | 2022-01-26T00:57:45.000Z | app/lib/db_query/tweets/categories.py | MichaelCurrin/twitterverse | 9629f848377e4346be833db70f11c593cc0d7b6c | [
"MIT"
] | 70 | 2017-07-12T19:49:38.000Z | 2020-09-02T10:03:28.000Z | app/lib/db_query/tweets/categories.py | MichaelCurrin/twitterverse | 9629f848377e4346be833db70f11c593cc0d7b6c | [
"MIT"
] | 2 | 2017-06-30T07:13:39.000Z | 2020-12-04T00:39:12.000Z | """
Category queries application file.
"""
from lib import database as db
def printAvailableCategories():
"""
Iterate through Categories in db to print out name and Profile count
for each.
:return: None
"""
print(" Category | Profiles")
print("-------------------------------+---------")
categoryResult = db.Category.select()
for i, v in enumerate(categoryResult):
print(
"{index:3d}. {category:25s} | {profCnt:7,d}".format(
index=i + 1, category=v.name, profCnt=v.profiles.count()
)
)
print()
def printCategoriesAndProfiles():
"""
Iterate through Categories in db to print out the name and list of
the Profiles in each.
:return: None
"""
for i, cat in enumerate(db.Category.select()):
profiles = list(cat.profiles.orderBy("screen_name"))
print(
"{index:d}. {name:15s} {profCnt:,d} profiles".format(
index=i + 1, name=cat.name, profCnt=len(profiles)
)
)
for p in profiles:
print(
" - @{screenName:20} | {name}".format(
screenName=p.screenName, name=p.name
)
)
print()
def printUnassignedProfiles():
"""
Iterate through Profiles in db to print out those in no Categories.
Output may be very long for large datasets of Profiles.
TODO: Add filters such as top N recently created profiles or most
followers. And find a way to make this more useful, considering that
the influencer category and a specific influencer category could be assigned
on fetch_profiles.py running, but it has to be clear that industry is
assigned yet.
:return: None
"""
for profileRec in db.Profile.select(orderBy="screen_name"):
if not profileRec.categories.count():
print(
"@{screenName} | {name} | {followers:,d} followers".format(
screenName=profileRec.screenName,
name=profileRec.name,
followers=profileRec.followersCount,
)
)
print(profileRec.getFlatDescription())
print()
if __name__ == "__main__":
print("Available cateogries")
print("====================")
printAvailableCategories()
print()
print("Profiles")
print("========")
printCategoriesAndProfiles()
| 29.392857 | 80 | 0.567436 |
f7474971492e22c5b28f459c7bf3ba7e80bfc150 | 1,279 | py | Python | example/logo_7_8.py | nikteliy/python-snap7 | 6f8bcb15ad2494fd95880171b974b001d33a5111 | [
"MIT"
] | 412 | 2015-02-25T11:07:09.000Z | 2022-03-28T10:03:09.000Z | example/logo_7_8.py | nikteliy/python-snap7 | 6f8bcb15ad2494fd95880171b974b001d33a5111 | [
"MIT"
] | 240 | 2015-02-24T18:10:13.000Z | 2022-03-22T11:10:09.000Z | example/logo_7_8.py | nikteliy/python-snap7 | 6f8bcb15ad2494fd95880171b974b001d33a5111 | [
"MIT"
] | 238 | 2015-02-24T16:12:32.000Z | 2022-03-30T08:50:29.000Z | import logging
import snap7
# for setup the Logo connection please follow this link
# http://snap7.sourceforge.net/logo.html
logging.basicConfig(level=logging.INFO)
# Siemens LOGO devices Logo 8 is the default
Logo_7 = True
logger = logging.getLogger(__name__)
plc = snap7.logo.Logo()
plc.connect("192.168.0.41",0x1000,0x2000)
if plc.get_connected():
logger.info("connected")
# read I1 from logo
vm_address = ("V923.0" if Logo_7==True else "V1024.0")
print (f"I1: {str(plc.read(vm_address))}")
# write some values in VM addresses between 0 and 100
value_1 = 0b10110001
value_2 = 480
print("write 0b10110001 to V10")
plc.write("V10", value_1)
print(f"read V10.0 must be 1 - check: {str(plc.read('V10.0'))}")
print(f"read V10.3 must be 0 - check: {str(plc.read('V10.3'))}")
print(f"read V10.7 must be 1 - check: {str(plc.read('V10.7'))}")
print("write 480 analog value to VW20")
plc.write("VW20", value_2)
print(f"read VW20 must be 480 - check: {str(plc.read('VW20'))}")
print("trigger V10.2")
plc.write("V10.2", 0)
plc.write("V10.2", 1)
plc.write("V10.2", 0)
else:
logger.error("Conncetion failed")
plc.disconnect()
logger.info("Disconnected")
plc.destroy()
| 23.685185 | 68 | 0.645817 |
f74766b394c7478c0db1c7e117d71d7afe454999 | 56 | py | Python | supplychainpy/distribution/air_frieght_transport.py | luisccalves/supplychainpy | 63a10b77ffdcc5bca71e815c70667c819d8f9af0 | [
"BSD-3-Clause"
] | 231 | 2016-05-30T02:34:45.000Z | 2022-03-28T17:00:29.000Z | supplychainpy/distribution/air_frieght_transport.py | luisccalves/supplychainpy | 63a10b77ffdcc5bca71e815c70667c819d8f9af0 | [
"BSD-3-Clause"
] | 77 | 2016-03-23T16:28:34.000Z | 2021-09-30T22:08:03.000Z | supplychainpy/distribution/air_frieght_transport.py | luisccalves/supplychainpy | 63a10b77ffdcc5bca71e815c70667c819d8f9af0 | [
"BSD-3-Clause"
] | 103 | 2016-08-10T19:53:09.000Z | 2022-03-16T16:34:38.000Z | class FreightPlane:
def __init__(self):
pass | 18.666667 | 23 | 0.642857 |
f747a897d8e129afaee76919be306a14ed4c5791 | 591 | py | Python | elit/datasets/ner/resume.py | emorynlp/levi-graph-amr-parser | f71f1056c13181b8db31d6136451fb8d57114819 | [
"Apache-2.0"
] | 9 | 2021-07-12T22:05:47.000Z | 2022-02-22T03:10:14.000Z | elit/datasets/ner/resume.py | emorynlp/stem-cell-hypothesis | 48a628093d93d653865fbac6409d179cddd99293 | [
"Apache-2.0"
] | 4 | 2021-08-31T08:28:37.000Z | 2022-03-28T05:52:14.000Z | elit/datasets/ner/resume.py | emorynlp/stem-cell-hypothesis | 48a628093d93d653865fbac6409d179cddd99293 | [
"Apache-2.0"
] | null | null | null | # -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2020-06-08 12:10
from elit.common.dataset import TransformableDataset
from elit.utils.io_util import get_resource, generate_words_tags_from_tsv
_RESUME_NER_HOME = 'https://github.com/jiesutd/LatticeLSTM/archive/master.zip#'
RESUME_NER_TRAIN = _RESUME_NER_HOME + 'ResumeNER/train.char.bmes'
'''Training set of Resume in char level.'''
RESUME_NER_DEV = _RESUME_NER_HOME + 'ResumeNER/dev.char.bmes'
'''Dev set of Resume in char level.'''
RESUME_NER_TEST = _RESUME_NER_HOME + 'ResumeNER/test.char.bmes'
'''Test set of Resume in char level.'''
| 34.764706 | 79 | 0.764805 |
f747bac6efbe42e2ea3d1d195eebcc5275b5d013 | 17,461 | py | Python | PythonProjects/10-Sequences/src/m7_summary.py | much2mutch/csse120-public | 4f862a6deb7a5373fb5723fb2a23e4042e4d4157 | [
"MIT"
] | null | null | null | PythonProjects/10-Sequences/src/m7_summary.py | much2mutch/csse120-public | 4f862a6deb7a5373fb5723fb2a23e4042e4d4157 | [
"MIT"
] | null | null | null | PythonProjects/10-Sequences/src/m7_summary.py | much2mutch/csse120-public | 4f862a6deb7a5373fb5723fb2a23e4042e4d4157 | [
"MIT"
] | null | null | null | """
This module lets you practice two forms of the ACCUMULATOR pattern:
-- SUMMING
-- COUNTING
where the accumulation is done via ITERATING (i.e., looping)
through a SEQUENCE.
It also demonstrates the distinction between:
-- an INDEX of the sequence (e.g., -5 is at index 1 in [0, -5, 12, -6]) and
-- the item AT an index (e.g., the item at index 3 in [0, -5, 12, -6] is -6).
Authors: David Mutchler, Vibha Alangar, Matt Boutell, Dave Fisher,
Mark Hays, Amanda Stouder, Derek Whitley, their colleagues,
and Seth Mutchler.
""" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.
import testing_helper
import time
def main():
""" Calls the TEST functions in this module. """
run_test_summary1a()
run_test_summary1c()
run_test_summary1c()
###############################################################################
# DONE: 2. READ the green doc-string for the:
# - is_prime
# function defined below. You do NOT need to understand its
# implementation, just its specification (per the doc-string).
# You should ** CALL ** this function as needed in implementing the
# other functions. After you have READ this, change its _TODO_ to DONE.
###############################################################################
def is_prime(n):
"""
What comes in: An integer n >= 2.
What goes out:
-- Returns True if the given integer is prime,
else returns False.
Side effects: None.
Examples:
-- is_prime(11) returns True
-- is_prime(12) returns False
-- is_prime(2) returns True
Note: The algorithm used here is simple and clear but slow.
"""
for k in range(2, (n // 2) + 1):
if n % k == 0:
return False
return True
# -------------------------------------------------------------------------
# Students:
# Do NOT touch the above is_prime function - it has no _TODO_.
# Do NOT copy code from this function.
#
# Instead, ** CALL ** this function as needed in the problems below.
# -------------------------------------------------------------------------
def run_test_summary1a():
""" Tests the summary1a function. """
print()
print('--------------------------------------------------')
print('Testing the summary1a function:')
print('--------------------------------------------------')
format_string = ' summary1a( {} )'
test_results = [0, 0] # Number of tests passed, failed.
# Test 1:
expected = 4
sequence = (20, 23, 29, 30, 33, 29, 100, 2, 4)
print_expected_result_of_test([sequence], expected, test_results,
format_string)
actual = summary1a(sequence)
print_actual_result_of_test(expected, actual, test_results)
# Test 2:
expected = 4
sequence = (23, 29, 30, 33, 29, 100, 2)
print_expected_result_of_test([sequence], expected, test_results,
format_string)
actual = summary1a(sequence)
print_actual_result_of_test(expected, actual, test_results)
# Test 3:
expected = 3
sequence = (20, 29, 30, 33, 29, 100, 100, 99, 40, 30, 30, 2)
print_expected_result_of_test([sequence], expected, test_results,
format_string)
actual = summary1a(sequence)
print_actual_result_of_test(expected, actual, test_results)
# Test 4:
expected = 3
sequence = (29, 29, 30, 33, 29, 100, 100, 99, 40, 30, 30)
print_expected_result_of_test([sequence], expected, test_results,
format_string)
actual = summary1a(sequence)
print_actual_result_of_test(expected, actual, test_results)
# Test 5:
expected = 2
sequence = (30, 33, 29, 17, 100, 99, 40, 30, 30)
print_expected_result_of_test([sequence], expected, test_results,
format_string)
actual = summary1a(sequence)
print_actual_result_of_test(expected, actual, test_results)
# Test 6:
expected = 1
sequence = (30, 33, 13, 100, 99, 40, 30, 30)
print_expected_result_of_test([sequence], expected, test_results,
format_string)
actual = summary1a(sequence)
print_actual_result_of_test(expected, actual, test_results)
# Test 7:
expected = 0
sequence = (30, 33, 4, 10, 21, 100, 99, 40, 30, 30)
print_expected_result_of_test([sequence], expected, test_results,
format_string)
actual = summary1a(sequence)
print_actual_result_of_test(expected, actual, test_results)
# Test 8:
expected = 3
sequence = (5, 3, 3)
print_expected_result_of_test([sequence], expected, test_results,
format_string)
actual = summary1a(sequence)
print_actual_result_of_test(expected, actual, test_results)
# Test 9:
expected = 2
sequence = (5, 3)
print_expected_result_of_test([sequence], expected, test_results,
format_string)
actual = summary1a(sequence)
print_actual_result_of_test(expected, actual, test_results)
# Test 10:
expected = 1
sequence = (5,)
print_expected_result_of_test([sequence], expected, test_results,
format_string)
actual = summary1a(sequence)
print_actual_result_of_test(expected, actual, test_results)
# Test 11:
expected = 0
sequence = ()
print_expected_result_of_test([sequence], expected, test_results,
format_string)
actual = summary1a(sequence)
print_actual_result_of_test(expected, actual, test_results)
print_summary_of_test_results(test_results)
def summary1a(sequence):
"""
What comes in: A sequence of integers, all >= 2.
What goes out:
-- Returns the number of items in the sequence that are prime.
Side effects: None.
Examples:
-- If the given sequence is [20, 23, 29, 30, 33, 29, 100, 2, 4],
then the returned value is 4, since 23, 29, 29, and 2
are the 4 primes in the sequence.
"""
count = 0
for k in range(len(sequence)):
if is_prime(sequence[k]):
count += 1
return count
# -------------------------------------------------------------------------
# DONE: 3. Implement and test this function.
# -------------------------------------------------------------------------
def run_test_summary1b():
""" Tests the summary1b function. """
print()
print('--------------------------------------------------')
print('Testing the summary1b function:')
print('--------------------------------------------------')
format_string = ' summary1b( {} )'
test_results = [0, 0] # Number of tests passed, failed.
# Test 1:
expected = 83
sequence = (20, 23, 29, 30, 33, 29, 100, 2, 4)
print_expected_result_of_test([sequence], expected, test_results,
format_string)
actual = summary1b(sequence)
print_actual_result_of_test(expected, actual, test_results)
# Test 2:
expected = 83
sequence = (23, 29, 30, 33, 29, 100, 2)
print_expected_result_of_test([sequence], expected, test_results,
format_string)
actual = summary1b(sequence)
print_actual_result_of_test(expected, actual, test_results)
# Test 3:
expected = 60
sequence = (20, 29, 30, 33, 29, 100, 100, 99, 40, 30, 30, 2)
print_expected_result_of_test([sequence], expected, test_results,
format_string)
actual = summary1b(sequence)
print_actual_result_of_test(expected, actual, test_results)
# Test 4:
expected = 87
sequence = (29, 29, 30, 33, 29, 100, 100, 99, 40, 30, 30)
print_expected_result_of_test([sequence], expected, test_results,
format_string)
actual = summary1b(sequence)
print_actual_result_of_test(expected, actual, test_results)
# Test 5:
expected = 46
sequence = (30, 33, 29, 17, 100, 99, 40, 30, 30)
print_expected_result_of_test([sequence], expected, test_results,
format_string)
actual = summary1b(sequence)
print_actual_result_of_test(expected, actual, test_results)
# Test 6:
expected = 13
sequence = (30, 33, 13, 100, 99, 40, 30, 30)
print_expected_result_of_test([sequence], expected, test_results,
format_string)
actual = summary1b(sequence)
print_actual_result_of_test(expected, actual, test_results)
# Test 7:
expected = 0
sequence = (30, 33, 4, 10, 21, 100, 99, 40, 30, 30)
print_expected_result_of_test([sequence], expected, test_results,
format_string)
actual = summary1b(sequence)
print_actual_result_of_test(expected, actual, test_results)
# Test 8:
expected = 11
sequence = (5, 3, 3)
print_expected_result_of_test([sequence], expected, test_results,
format_string)
actual = summary1b(sequence)
print_actual_result_of_test(expected, actual, test_results)
# Test 9:
expected = 8
sequence = (5, 3)
print_expected_result_of_test([sequence], expected, test_results,
format_string)
actual = summary1b(sequence)
print_actual_result_of_test(expected, actual, test_results)
# Test 10:
expected = 5
sequence = (5,)
print_expected_result_of_test([sequence], expected, test_results,
format_string)
actual = summary1b(sequence)
print_actual_result_of_test(expected, actual, test_results)
# Test 11:
expected = 0
sequence = ()
print_expected_result_of_test([sequence], expected, test_results,
format_string)
actual = summary1b(sequence)
print_actual_result_of_test(expected, actual, test_results)
print_summary_of_test_results(test_results)
def summary1b(seq):
"""
What comes in: A sequence of integers, all >= 2.
What goes out:
-- Returns the sum of items in the sequence that are prime.
Side effects: None.
Examples:
-- If the given sequence is [20, 23, 29, 30, 33, 29, 100, 2, 4],
then the returned value is 83, since the primes in the sequence
are 23, 29, 29, and 2, and 23 + 29 + 29 + 2 = 83.
"""
total = 0
for k in range(len(sequence)):
if is_prime(sequence[k]):
total += sequence[k]
return total
# -------------------------------------------------------------------------
# DONE: 4. Implement and test this function.
# -------------------------------------------------------------------------
def run_test_summary1c():
""" Tests the summary1c function. """
print()
print('--------------------------------------------------')
print('Testing the summary1c function:')
print('--------------------------------------------------')
format_string = ' summary1c( {} )'
test_results = [0, 0] # Number of tests passed, failed.
# Test 1:
expected = 1 + 2 + 5 + 7 # which is 15
sequence = (20, 23, 29, 30, 33, 29, 100, 2, 4)
print_expected_result_of_test([sequence], expected, test_results,
format_string)
actual = summary1c(sequence)
print_actual_result_of_test(expected, actual, test_results)
# Test 2:
expected = 1 + 4 + 6 # which is 11
sequence = (23, 29, 30, 33, 29, 100, 2)
print_expected_result_of_test([sequence], expected, test_results,
format_string)
actual = summary1c(sequence)
print_actual_result_of_test(expected, actual, test_results)
# Test 3:
expected = 16
sequence = (20, 29, 30, 33, 29, 100, 100, 99, 40, 30, 30, 2)
print_expected_result_of_test([sequence], expected, test_results,
format_string)
actual = summary1c(sequence)
print_actual_result_of_test(expected, actual, test_results)
# Test 4:
expected = 5
sequence = (29, 29, 30, 33, 29, 100, 100, 99, 40, 30, 30)
print_expected_result_of_test([sequence], expected, test_results,
format_string)
actual = summary1c(sequence)
print_actual_result_of_test(expected, actual, test_results)
# Test 5:
expected = 5
sequence = (30, 33, 29, 17, 100, 99, 40, 30, 30)
print_expected_result_of_test([sequence], expected, test_results,
format_string)
actual = summary1c(sequence)
print_actual_result_of_test(expected, actual, test_results)
# Test 6:
expected = 2
sequence = (30, 33, 13, 100, 99, 40, 30, 30)
print_expected_result_of_test([sequence], expected, test_results,
format_string)
actual = summary1c(sequence)
print_actual_result_of_test(expected, actual, test_results)
# Test 7:
expected = 0
sequence = (30, 33, 4, 10, 21, 100, 99, 40, 30, 30)
print_expected_result_of_test([sequence], expected, test_results,
format_string)
actual = summary1c(sequence)
print_actual_result_of_test(expected, actual, test_results)
# Test 8:
expected = 3
sequence = (5, 3, 3)
print_expected_result_of_test([sequence], expected, test_results,
format_string)
actual = summary1c(sequence)
print_actual_result_of_test(expected, actual, test_results)
# Test 9:
expected = 1
sequence = (5, 3)
print_expected_result_of_test([sequence], expected, test_results,
format_string)
actual = summary1c(sequence)
print_actual_result_of_test(expected, actual, test_results)
# Test 10:
expected = 0
sequence = (5,)
print_expected_result_of_test([sequence], expected, test_results,
format_string)
actual = summary1c(sequence)
print_actual_result_of_test(expected, actual, test_results)
# Test 11:
expected = 0
sequence = ()
print_expected_result_of_test([sequence], expected, test_results,
format_string)
actual = summary1c(sequence)
print_actual_result_of_test(expected, actual, test_results)
# Test 12:
expected = 0
sequence = (4,)
print_expected_result_of_test([sequence], expected, test_results,
format_string)
actual = summary1c(sequence)
print_actual_result_of_test(expected, actual, test_results)
print_summary_of_test_results(test_results)
def summary1c(sequence):
"""
What comes in: A sequence of integers, all >= 2.
What goes out:
-- Returns the sum of INDICES of the items in the sequence
that are prime.
Side effects: None.
Examples:
-- If the given sequence is [20, 23, 29, 30, 33, 29, 100, 2, 4],
then the returned value is 15, since the primes in the sequence
are at INDICES 1, 2, 5 and 7, and 1 + 2 + 5 + 7 = 15.
"""
total = 0
for k in range(len(sequence)):
if is_prime(sequence[k]):
total += k
return total
# -------------------------------------------------------------------------
# DONE: 5. Implement and test this function.
# -------------------------------------------------------------------------
###############################################################################
# Our tests use the following to print error messages in red.
# Do NOT change it. You do NOT have to do anything with it.
###############################################################################
def print_expected_result_of_test(arguments, expected,
test_results, format_string, suffix=''):
testing_helper.print_expected_result_of_test(arguments, expected,
test_results, format_string,
suffix)
def print_actual_result_of_test(expected, actual, test_results,
precision=None):
testing_helper.print_actual_result_of_test(expected, actual,
test_results, precision)
def print_summary_of_test_results(test_results):
testing_helper.print_summary_of_test_results(test_results)
# To allow color-coding the output to the console:
USE_COLORING = True # Change to False to revert to OLD style coloring
testing_helper.USE_COLORING = USE_COLORING
if USE_COLORING:
# noinspection PyShadowingBuiltins
print = testing_helper.print_colored
else:
# noinspection PyShadowingBuiltins
print = testing_helper.print_uncolored
# -----------------------------------------------------------------------------
# Calls main to start the ball rolling.
# The try .. except prevents error messages on the console from being
# intermingled with ordinary output to the console.
# -----------------------------------------------------------------------------
try:
main()
except Exception:
print('ERROR - While running this test,', color='red')
print('your code raised the following exception:', color='red')
print()
time.sleep(1)
raise
| 35.489837 | 79 | 0.573335 |
f747cb701495e98da0898401c18ec0d5e5c58ba6 | 2,705 | py | Python | src/rubrix/sdk/models/text_classification_annotation.py | sakares/rubrix | 791ffb29815b5d24f2bbbb0fa422f85f8b30098f | [
"Apache-2.0"
] | null | null | null | src/rubrix/sdk/models/text_classification_annotation.py | sakares/rubrix | 791ffb29815b5d24f2bbbb0fa422f85f8b30098f | [
"Apache-2.0"
] | null | null | null | src/rubrix/sdk/models/text_classification_annotation.py | sakares/rubrix | 791ffb29815b5d24f2bbbb0fa422f85f8b30098f | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2021-present, the Recognai S.L. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict, List, Type, TypeVar
import attr
from ..models.class_prediction import ClassPrediction
T = TypeVar("T", bound="TextClassificationAnnotation")
@attr.s(auto_attribs=True)
class TextClassificationAnnotation:
"""Annotation class for text classification tasks
Attributes:
-----------
labels: List[LabelPrediction]
list of annotated labels with score"""
agent: str
labels: List[ClassPrediction]
additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict)
def to_dict(self) -> Dict[str, Any]:
agent = self.agent
labels = []
for labels_item_data in self.labels:
labels_item = labels_item_data.to_dict()
labels.append(labels_item)
field_dict: Dict[str, Any] = {}
field_dict.update(self.additional_properties)
field_dict.update(
{
"agent": agent,
"labels": labels,
}
)
return field_dict
@classmethod
def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:
d = src_dict.copy()
agent = d.pop("agent")
labels = []
_labels = d.pop("labels")
for labels_item_data in _labels:
labels_item = ClassPrediction.from_dict(labels_item_data)
labels.append(labels_item)
text_classification_annotation = cls(
agent=agent,
labels=labels,
)
text_classification_annotation.additional_properties = d
return text_classification_annotation
@property
def additional_keys(self) -> List[str]:
return list(self.additional_properties.keys())
def __getitem__(self, key: str) -> Any:
return self.additional_properties[key]
def __setitem__(self, key: str, value: Any) -> None:
self.additional_properties[key] = value
def __delitem__(self, key: str) -> None:
del self.additional_properties[key]
def __contains__(self, key: str) -> bool:
return key in self.additional_properties
| 29.086022 | 77 | 0.655083 |
f747ee58f7f28242455511b3daf96f42202e3d2e | 146 | py | Python | src/wai/annotations/adams/base/component/__init__.py | waikato-ufdl/wai-annotations-adams | 1cb1ecf9520eea43c392a07d7766da51551a3a20 | [
"Apache-2.0"
] | null | null | null | src/wai/annotations/adams/base/component/__init__.py | waikato-ufdl/wai-annotations-adams | 1cb1ecf9520eea43c392a07d7766da51551a3a20 | [
"Apache-2.0"
] | null | null | null | src/wai/annotations/adams/base/component/__init__.py | waikato-ufdl/wai-annotations-adams | 1cb1ecf9520eea43c392a07d7766da51551a3a20 | [
"Apache-2.0"
] | null | null | null | from ._ADAMSBaseReader import ADAMSBaseReader
from ._ADAMSBaseWriter import ADAMSBaseWriter
from ._ADAMSFilenameSource import ADAMSFilenameSource
| 36.5 | 53 | 0.89726 |
f74815e8fe1acf0580d8f6e57bd76d58194451ce | 4,209 | py | Python | dvc/ignore.py | amisev/dvc | 025de9aeb509a539d5560f82caf47e851162f4a2 | [
"Apache-2.0"
] | null | null | null | dvc/ignore.py | amisev/dvc | 025de9aeb509a539d5560f82caf47e851162f4a2 | [
"Apache-2.0"
] | null | null | null | dvc/ignore.py | amisev/dvc | 025de9aeb509a539d5560f82caf47e851162f4a2 | [
"Apache-2.0"
] | null | null | null | from __future__ import unicode_literals
import os
from dulwich.ignore import match_pattern, read_ignore_patterns
from dvc.utils import relpath
from dvc.utils.compat import cast_bytes
from dvc.utils.fs import get_parent_dirs_up_to
class DvcIgnoreFileHandler(object):
def __init__(self, tree):
self.tree = tree
def read_patterns(self, path):
with self.tree.open(path, binary=True) as stream:
return self._read_patterns(stream)
def get_repo_root(self):
return self.tree.tree_root
def _read_patterns(self, binary_stream):
negate_patterns = []
patterns = []
for pattern in read_ignore_patterns(binary_stream):
if pattern.lstrip().startswith(b"!"):
negate_patterns.append(pattern)
else:
patterns.append(pattern)
return negate_patterns, patterns
class DvcIgnore(object):
DVCIGNORE_FILE = ".dvcignore"
def __call__(self, root, dirs, files):
raise NotImplementedError
class DvcIgnoreFromFile(DvcIgnore):
def __init__(self, ignore_file_path, ignore_handler):
self.ignore_file_path = ignore_file_path
self.dirname = os.path.normpath(os.path.dirname(ignore_file_path))
self.patterns = []
self.negate_patterns = []
self.negate_patterns, self.patterns = ignore_handler.read_patterns(
ignore_file_path
)
def __call__(self, root, dirs, files):
files = [f for f in files if not self.matches(root, f)]
dirs = [d for d in dirs if not self.matches(root, d)]
return dirs, files
def get_match(self, abs_path):
relative_path = relpath(abs_path, self.dirname)
if os.name == "nt":
relative_path = relative_path.replace("\\", "/")
relative_path = cast_bytes(relative_path, "utf-8")
for pattern in self.patterns:
if match_pattern(
relative_path, pattern
) and self._no_negate_pattern_matches(relative_path):
return (abs_path, pattern, self.ignore_file_path)
return None
def matches(self, dirname, basename):
if self.get_match(os.path.join(dirname, basename)):
return True
return False
def _no_negate_pattern_matches(self, path):
return all([not match_pattern(path, p) for p in self.negate_patterns])
def __hash__(self):
return hash(self.ignore_file_path)
class DvcIgnoreConstant(DvcIgnore):
def __init__(self, basename):
self.basename = basename
class DvcIgnoreDir(DvcIgnoreConstant):
def __call__(self, root, dirs, files):
dirs = [d for d in dirs if not d == self.basename]
return dirs, files
class DvcIgnoreFile(DvcIgnoreConstant):
def __call__(self, root, dirs, files):
files = [f for f in files if not f == self.basename]
return dirs, files
class DvcIgnoreFilter(object):
def __init__(self, wdir, ignore_file_handler=None):
self.ignores = [
DvcIgnoreDir(".git"),
DvcIgnoreDir(".hg"),
DvcIgnoreDir(".dvc"),
DvcIgnoreFile(".dvcignore"),
]
self.ignore_file_handler = ignore_file_handler
self._process_ignores_in_parent_dirs(wdir)
def _process_ignores_in_parent_dirs(self, wdir):
if self.ignore_file_handler:
wdir = os.path.normpath(os.path.abspath(wdir))
ignore_search_end_dir = self.ignore_file_handler.get_repo_root()
parent_dirs = get_parent_dirs_up_to(wdir, ignore_search_end_dir)
for d in parent_dirs:
self.update(d)
def update(self, wdir):
ignore_file_path = os.path.join(wdir, DvcIgnore.DVCIGNORE_FILE)
if os.path.exists(ignore_file_path):
file_ignore = DvcIgnoreFromFile(
ignore_file_path, ignore_handler=self.ignore_file_handler
)
self.ignores.append(file_ignore)
def __call__(self, root, dirs, files):
if self.ignore_file_handler:
self.update(root)
for ignore in self.ignores:
dirs, files = ignore(root, dirs, files)
return dirs, files
| 30.5 | 78 | 0.647897 |
f7481d7406370b2904206924837783a64d080c67 | 3,374 | py | Python | python_solutions/CHAIN_INSPECTION/CHAIN_INSPECTION.py | joelstanner/codeeval | ef0591fabcad39d45f10287d7a1330f342ab96e0 | [
"MIT"
] | null | null | null | python_solutions/CHAIN_INSPECTION/CHAIN_INSPECTION.py | joelstanner/codeeval | ef0591fabcad39d45f10287d7a1330f342ab96e0 | [
"MIT"
] | null | null | null | python_solutions/CHAIN_INSPECTION/CHAIN_INSPECTION.py | joelstanner/codeeval | ef0591fabcad39d45f10287d7a1330f342ab96e0 | [
"MIT"
] | null | null | null | """
CHAIN INSPECTION.
https://www.codeeval.com/open_challenges/119/
We use special generator to produce chains of elements. Don't ask why we need
them, we're not sure that somebody knows the answer for this question. A
chain is represented by a string of name-address pairs. So the first element
is the name of a pair and the second one (address) is pointing to the name of
a next pair. E.g.
BEGIN-3;4-2;3-4;2-END # GOOD
77-END;BEGIN-8;8-11;11-77 # GOOD
In examples above we can pass trough the chains from the 'BEGIN' to the 'END'
without missing any single pair. In the first case we moved from 'BEGIN' to 3,
from 3 to 4, from 4 to 2, from 2 to 'END'. In the second case we moved from
'BEGIN' to 8, from 8 to 11, from 11 to 77, from 77 to 'END'.
Our generator was producing only good chains, but something went wrong and
now it generates random chains and we are not sure if it's a good chain or a
bad one. E.g.
BEGIN-3;4-3;3-4;2-END # BAD
77-END;BEGIN-8;8-77;11-11 # BAD
In the first case the 'END' is unreachable because we have a loop between
3 and 4.
In the second case we can reach the 'END' but we missed one pair (11-11).
We know that for a BAD chain the generator first produces a GOOD chain but
after that it may replace existing address in some pairs with an address from
another pair. It never replaces an address in a pair to an addresses which
isn't present in original chain.
You can help us by writing a program that investigates the input and finds
GOOD and BAD chains.
INPUT SAMPLE:
Your program should accept as its first argument a path to a filename. Each
string in this file is a chain. The pairs are separating by semicolon, the
names and the address are separating by dash. E.g.
4-2;BEGIN-3;3-4;2-END
4-2;BEGIN-3;3-4;2-3
OUTPUT SAMPLE:
For each line of input print out the chain status. E.g.
GOOD
BAD
Constraints:
The number of pairs in a chain is in range [1, 500]
The addresses are integers in range [2, 10000]
-------------------------------------------------------
process:
Read a line from the input file
split the chain parts into pairs
create a dictionary of chain connections
determine if chain is good or bad:
Trace from BEGIN
Find END
Detect loops
Detect missed chain links
"""
from sys import argv
def make_links(line):
"""Split a line into parts, return a dictionary of chain links."""
link_parts = line.split(";")
chain_dict = {
k: v for k, v in tuple([x.split('-') for x in link_parts])
}
return chain_dict
def inspect_chain(chain):
"""Return whether a chain is 'GOOD' or 'BAD'."""
next_key = chain.pop('BEGIN')
while True:
try:
next_key = chain.pop(next_key)
if next_key == "END":
break
except KeyError:
return "BAD"
if len(chain) > 0:
return "BAD"
return "GOOD"
def main(input_file):
"""Run the process as described in the challenge description."""
with open(input_file, "r") as file:
for line in file:
line = line.rstrip()
# split the chain parts into pairs
# create a dictionary of chain connections
links = make_links(line)
chain_state = inspect_chain(links)
print(chain_state)
if __name__ == "__main__":
main(argv[1])
| 29.858407 | 78 | 0.667161 |
f748235b03a77e379a62b710d47f8a964a8cfeba | 14,611 | py | Python | domain-messages/domain_messages/LFMMarketResult/lfmmarketresult.py | simcesplatform/static-time-series-resource-forecaster | 0f53915d7fe6da543f628487c8e643f2e9bb2652 | [
"MIT"
] | null | null | null | domain-messages/domain_messages/LFMMarketResult/lfmmarketresult.py | simcesplatform/static-time-series-resource-forecaster | 0f53915d7fe6da543f628487c8e643f2e9bb2652 | [
"MIT"
] | null | null | null | domain-messages/domain_messages/LFMMarketResult/lfmmarketresult.py | simcesplatform/static-time-series-resource-forecaster | 0f53915d7fe6da543f628487c8e643f2e9bb2652 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2021 Tampere University and VTT Technical Research Centre of Finland
# This software was developed as a part of the ProCemPlus project: https://www.senecc.fi/projects/procemplus
# This source code is licensed under the MIT license. See LICENSE in the repository root directory.
# Author(s): Amir Safdarian <amir.safdarian@vtt.fi>
# Kalle Ruuth (TAU) <kalle.ruuth@tuni.fi>
# Keski-Koukkari Antti <antti.keski-koukkari@vtt.fi>
# Md Tanjimuddin <md.tanjimuddin@tuni.fi>
# Olli Suominen <olli.suominen@tuni.fi>
# Otto Hylli <otto.hylli@tuni.fi>
# Tanjim <tanjim0023@gmail.com>
# Ville Heikkilä <ville.heikkila@tuni.fi>
# Ville Mörsky (TAU) <ville.morsky@tuni.fi>
"""This module contains the message class for the simulation platform LFMMarketResult messages."""
from __future__ import annotations
from typing import Union, Dict, Any, List
from tools.exceptions.messages import MessageValueError
from tools.message.abstract import AbstractResultMessage
from tools.tools import FullLogger
import datetime
from tools.datetime_tools import to_iso_format_datetime_string
from tools.message.block import QuantityBlock, TimeSeriesBlock
LOGGER = FullLogger(__name__)
# Example:
# TimeSeriesBlock for RealPower:
#
# time_tmp = datetime.datetime.now()
# time_index_list = [to_iso_format_datetime_string(time_tmp),
# to_iso_format_datetime_string(time_tmp + datetime.timedelta(hours=1))]
# time_series_block_tmp = TimeSeriesBlock(time_index_list,
# {"Regulation": ValueArrayBlock([2.0, 3.0],
# "kW")})
#
# newMessage2 = LFMMarketResultMessage(**{
# "Type": "LFMMarketResult",
# "SimulationId": to_iso_format_datetime_string(datetime.datetime.now()),
# "SourceProcessId": "source1",
# "MessageId": "messageid1",
# "EpochNumber": 1,
# "TriggeringMessageIds": ["messageid1.1", "messageid1.2"],
# "ActivationTime": to_iso_format_datetime_string(datetime.datetime.now()),
# "Duration": 1.0,
# "Direction": "upregulation",
# "RealPower": time_series_block_tmp,
# "Price": 2.0,
# "CongestionId": "congestionId1",
# "OfferId": "offerid1",
# "ResultCount": 1,
# "CustomerIds": ["Customer1", "Customer2"]
# })
#
# NOTE! - ResultCount is only value that requires a value
# Others can be left out or set to None (outside what AbstractResultMessage requires)
# Use this if ResultCount == 0
# (No checks on this)
class LFMMarketResultMessage(AbstractResultMessage):
"""Class containing all the attributes for an LFMMarketResult message."""
# message type for these messages
CLASS_MESSAGE_TYPE = "LFMMarketResult"
MESSAGE_TYPE_CHECK = True
# Mapping from message JSON attributes to class attributes
MESSAGE_ATTRIBUTES = {
"ActivationTime": "activation_time",
"Duration": "duration",
"Direction": "direction",
"RealPower": "real_power",
"Price": "price",
"CongestionId": "congestion_id",
"OfferId": "offer_id",
"ResultCount": "result_count",
"CustomerIds": "customerids"
}
OPTIONAL_ATTRIBUTES = []
# Values accepted for direction
ALLOWED_DIRECTION_VALUES = ["upregulation", "downregulation"]
# Units accepted for price
ALLOWED_PRICE_UNIT = "EUR"
# RealPower units:
REAL_POWER_UNIT = "kW"
# Attribute names
ATTRIBUTE_REALPOWER = "RealPower"
ATTRIBUTE_PRICE = "Price"
ATTRIBUTE_DURATION = "Duration"
# attributes whose value should be a QuantityBlock and the expected unit of measure.
QUANTITY_BLOCK_ATTRIBUTES = {
ATTRIBUTE_DURATION: "Minute",
ATTRIBUTE_PRICE: "EUR"
}
# attributes whose value should be a Array Block.
QUANTITY_ARRAY_BLOCK_ATTRIBUTES = {}
# attributes whose value should be a Timeseries Block.
TIMESERIES_BLOCK_ATTRIBUTES = [ATTRIBUTE_REALPOWER]
MESSAGE_ATTRIBUTES_FULL = {
**AbstractResultMessage.MESSAGE_ATTRIBUTES_FULL,
**MESSAGE_ATTRIBUTES
}
OPTIONAL_ATTRIBUTES_FULL = AbstractResultMessage.OPTIONAL_ATTRIBUTES_FULL + OPTIONAL_ATTRIBUTES
QUANTITY_BLOCK_ATTRIBUTES_FULL = {
**AbstractResultMessage.QUANTITY_BLOCK_ATTRIBUTES_FULL,
**QUANTITY_BLOCK_ATTRIBUTES
}
QUANTITY_ARRAY_BLOCK_ATTRIBUTES_FULL = {
**AbstractResultMessage.QUANTITY_ARRAY_BLOCK_ATTRIBUTES_FULL,
**QUANTITY_ARRAY_BLOCK_ATTRIBUTES
}
TIMESERIES_BLOCK_ATTRIBUTES_FULL = (
AbstractResultMessage.TIMESERIES_BLOCK_ATTRIBUTES_FULL +
TIMESERIES_BLOCK_ATTRIBUTES
)
def __eq__(self, other: Any) -> bool:
"""Check that two LFMMarketResultMessages represent the same message."""
return (
super().__eq__(other) and
isinstance(other, LFMMarketResultMessage) and
self.activation_time == other.activation_time and
self.duration == other.duration and
self.direction == other.direction and
self.real_power == other.real_power and
self.price == other.price and
self.congestion_id == other.congestion_id and
self.offer_id == other.offer_id and
self.result_count == other.result_count and
self.customerids == other.customerids
)
@property
def activation_time(self) -> Union[str, None]:
"""The activation time in ISO 8601 format"""
return self.__activation_time
@activation_time.setter
def activation_time(self, activation_time: Union[None, str, datetime.datetime]):
if activation_time is None:
self.__activation_time = activation_time
return
if self._check_activation_time(activation_time):
iso_format_string = to_iso_format_datetime_string(activation_time)
if isinstance(iso_format_string, str):
self.__activation_time = iso_format_string
return
raise MessageValueError("'{:s}' is an invalid ActivationTime".format(str(activation_time)))
@classmethod
def _check_activation_time(cls, activation_time: Union[None, str, datetime.datetime]) -> bool:
return activation_time is None or cls._check_datetime(activation_time)
@property
def duration(self) -> Union[QuantityBlock, None]:
"""The duration of the request"""
return self.__duration
@duration.setter
def duration(self, duration: Union[str, float, int, Dict[str, Any], QuantityBlock, None]):
if isinstance(duration, int):
duration = float(duration)
if self._check_duration(duration):
self._set_quantity_block_value(self.ATTRIBUTE_DURATION, duration)
return
raise MessageValueError("'{:s}' is an invalid value for {}.".format(
str(duration), self.ATTRIBUTE_DURATION))
@classmethod
def _check_duration(cls, duration: Union[str, float, int, Dict[str, Any], QuantityBlock, None]) -> bool:
return cls._check_quantity_block(
value=duration,
unit=cls.QUANTITY_BLOCK_ATTRIBUTES_FULL[cls.ATTRIBUTE_DURATION],
can_be_none=True,
float_value_check=lambda value: value >= 0.0
)
@property
def direction(self) -> Union[str, None]:
"""The direction of the request"""
return self.__direction
@direction.setter
def direction(self, direction: Union[str, None]):
if self._check_direction(direction):
self.__direction = direction
return
raise MessageValueError("'{:s}' is an invalid value for Direction".format(str(direction)))
@classmethod
def _check_direction(cls, direction: Union[str, None]) -> bool:
if direction is None or (isinstance(direction, str) and direction in cls.ALLOWED_DIRECTION_VALUES):
return True
return False
@property
def real_power(self) -> Union[TimeSeriesBlock, None]:
"""Offered regulation as a TimeSeriesBlock"""
return self.__real_power
@real_power.setter
def real_power(self, real_power: Union[TimeSeriesBlock, Dict[str, Any], None]):
if self._check_real_power(real_power):
self._set_timeseries_block_value(self.ATTRIBUTE_REALPOWER, real_power)
return
raise MessageValueError("'{:s}' is an invalid value for {}".format(str(real_power),
self.ATTRIBUTE_REALPOWER))
@classmethod
def _check_real_power(cls, real_power: Union[TimeSeriesBlock, Dict[str, Any], None]) -> bool:
return cls._check_timeseries_block(value=real_power,
can_be_none=True,
block_check=cls._check_real_power_block)
@classmethod
def _check_real_power_block(cls, real_power_block: TimeSeriesBlock) -> bool:
block_series = real_power_block.series
if len(block_series) < 1 or len(real_power_block.time_index) < 1:
return False
for value_array in block_series.values():
if value_array.unit_of_measure != cls.REAL_POWER_UNIT:
return False
return True
@property
def price(self) -> Union[QuantityBlock, None]:
"""
Price of the offered regulation.
Units EUR
"""
return self.__price
@price.setter
def price(self, price: Union[str, float, int, QuantityBlock, None]):
"""
Sets the price for the offer. Sets the input price to a QuantityBlock.
If the price is given as a QuantityBlock, uses its unit of measure.
"""
if isinstance(price, int):
price = float(price)
if self._check_price(price):
self._set_quantity_block_value(message_attribute=self.ATTRIBUTE_PRICE,
quantity_value=price)
if isinstance(price, QuantityBlock) and isinstance(self.__price, QuantityBlock):
self.__price.unit_of_measure = price.unit_of_measure
return
raise MessageValueError("'{:s}' is an invalid value for {}".format(str(price),
self.ATTRIBUTE_PRICE))
@classmethod
def _check_price(cls, price) -> bool:
if isinstance(price, QuantityBlock):
if price.unit_of_measure is not cls.ALLOWED_PRICE_UNIT:
return False
return cls._check_quantity_block(value=price.value,
unit=price.unit_of_measure,
can_be_none=True,
float_value_check=lambda value: value >= 0.0)
return cls._check_quantity_block(value=price,
unit=cls.ALLOWED_PRICE_UNIT,
can_be_none=True,
float_value_check=lambda value: value >= 0.0)
@property
def congestion_id(self) -> Union[str, None]:
"""Identifier for the congestion area / specific congestion problem"""
return self.__congestion_id
@congestion_id.setter
def congestion_id(self, congestion_id: Union[str, None]):
if self._check_congestion_id(congestion_id):
self.__congestion_id = congestion_id
return
raise MessageValueError("'{:s}' is an invalid value for CongestionId".format(str(congestion_id)))
@classmethod
def _check_congestion_id(cls, congestion_id: Union[str, None]) -> bool:
return congestion_id is None or (isinstance(congestion_id, str) and len(congestion_id) > 0)
@property
def offer_id(self) -> Union[str, None]:
"""Identifier for this specific offer"""
return self.__offer_id
@offer_id.setter
def offer_id(self, offer_id: Union[str, None]):
if self._check_offer_id(offer_id):
self.__offer_id = offer_id
return
raise MessageValueError("'{:s}' is an invalid value for OfferId".format(str(offer_id)))
@classmethod
def _check_offer_id(cls, offer_id: Union[str, None]) -> bool:
return offer_id is None or (isinstance(offer_id, str) and len(offer_id) > 0)
@property
def result_count(self) -> int:
"""
Total number of accepted offers in place the provider is going to send to the running epoch related to this
congestion id.
"""
return self.__result_count
@result_count.setter
def result_count(self, result_count: Union[str, float, int]):
if self._check_result_count(result_count):
self.__result_count = int(result_count)
return
raise MessageValueError("'{:s}' is an invalid value for ResultCount".format(str(result_count)))
@classmethod
def _check_result_count(cls, result_count: Union[str, float, int]) -> bool:
if result_count is None or not isinstance(result_count, (str, float, int)):
return False
if isinstance(result_count, str):
try:
result_count = float(result_count)
except ValueError:
return False
return result_count >= 0 and (isinstance(result_count, int) or result_count.is_integer())
@property
def customerids(self) -> Union[List[str], None]:
return self.__customerids
@customerids.setter
def customerids(self, customerids: Union[List[str], None]):
if self._check_customerids(customerids):
self.__customerids = customerids
return
raise MessageValueError("'{:s}' is an invalid value for CustomerIds".format(str(customerids)))
@classmethod
def _check_customerids(cls, customerids: Union[List[str], None]) -> bool:
if customerids is None:
return True
if isinstance(customerids, list):
if len(customerids) > 0:
for customer in customerids:
if not isinstance(customer, str):
return False
return True
return False
@classmethod
def from_json(cls, json_message: Dict[str, Any]) -> Union[LFMMarketResultMessage, None]:
if cls.validate_json(json_message):
return LFMMarketResultMessage(**json_message)
return None
LFMMarketResultMessage.register_to_factory()
| 39.382749 | 115 | 0.642666 |
f7482ac691a0fdfdc007363f5bd8272711320b3d | 8,449 | py | Python | autolrn/encoding/labelenc.py | SimonCarozza/autolrn | d0875844a3e9b4fc22510ef320aa498e339b6192 | [
"MIT"
] | null | null | null | autolrn/encoding/labelenc.py | SimonCarozza/autolrn | d0875844a3e9b4fc22510ef320aa498e339b6192 | [
"MIT"
] | null | null | null | autolrn/encoding/labelenc.py | SimonCarozza/autolrn | d0875844a3e9b4fc22510ef320aa498e339b6192 | [
"MIT"
] | null | null | null | """Label encode and One-Hot encode dataframes."""
from sklearn.preprocessing import LabelEncoder
from pandas import get_dummies
from pandas import DataFrame
from pandas import Series
from pandas import concat
from pandas import merge
import numpy as np
# Auto encodes any dataframe column of type category or object.
def dummy_encode(df):
"""
Encode any dataframe column of type category or object.
---
df: pandas dataframe
"""
columnsToEncode = list(
df.select_dtypes(include=['category', 'object']))
df1 = df.copy()
for feature in columnsToEncode:
le = LabelEncoder()
try:
df1[feature] = le.fit_transform(df[feature].astype(str))
except Exception as e:
print(e)
print('Error encoding ' + feature)
return df1
#######
def le_encode_column(column):
"""Label-encode pandas DataFrame column or Series"""
le = LabelEncoder()
le_column = le.fit_transform(column.astype(str))
if isinstance(column, DataFrame):
le_column = Series(le_column).to_frame(column.name)
# equivalent to:
# le_column = DataFrame(le_column, columns=[df_column.name])
elif isinstance(column, Series):
le_column = Series(le_column)
else:
raise TypeError(
"'column' should be of type pandas.DataFrame/Series")
return le_column
def encode_df_column(df_column):
"""Convert dataframe column 'df_column' into df w dummy variables."""
# print("column name: ", df_column.name)
# if df_column.isin([' ']):
# df_column = df_column.str.replace(' ', '_')
try:
enc_df_column = get_dummies(
df_column, prefix=df_column.name, prefix_sep='_')
except MemoryError as me:
print(me)
print("MemoryError! Column: " + df_column.name)
print("Proceed to label-encoding")
enc_df_column = le_encode_column(df_column)
except KeyError as ke:
print(ke)
print("KeyError! Column: " + df_column.name)
except ValueError as ve:
print(ve)
print("ValueError! Column: " + df_column.name)
except Exception:
print('Error encoding feature ' + df_column.name)
# print("column head", enc_df_column.head(1))
assert (len(enc_df_column) == len(df_column)), \
"Ouch! Encoded column's different length than original's!"
return enc_df_column
def get_date_features(df, freqs=None):
"""
Get dates objects from dataframe.
---
df: pandas Dataframe
freqs: frequencies of datetime objects
"""
new_df = DataFrame()
if freqs is None:
freqs = ['Year', 'Month', 'Day', 'Week', 'hour', 'min']
else:
for f in freqs:
if f not in ('Year', 'Month', 'Day', 'Week', 'hour', 'min'):
raise ValueError(
"'%s' is not a valid value. Valid values are:"
"['Year', 'Month', 'Day', 'hour', 'min']"
% f)
for feature in df.columns:
if df[feature].dtype == 'datetime64[ns]':
for f in freqs:
try:
if f == 'Year':
new_df[f] = df[feature].dt.year
elif f == 'Month':
new_df[f] = df[feature].dt.month
elif f == 'Day':
new_df[f] = df[feature].dt.day
elif f == 'Week':
new_df[f] = df[feature].dt.week
elif f == 'hour':
new_df[f] = df[feature].dt.hour
else:
new_df[f] = df[feature].dt.minute
except KeyError as ke:
print(ke)
print("KeyError! Column: " + feature)
except ValueError as ve:
print(ve)
print("ValueError! Column: " + feature)
except Exception as e:
raise e
else:
new_df[feature] = df[feature]
assert (len(new_df.index) == len(df.index)), \
"Ouch, encoded dataframe's different length than original's!"
# remove 0-columns
new_df = new_df.loc[:, (new_df != 0).any(axis=0)]
return new_df
def get_dummies_or_label_encode(df, target=None, dummy_cols=10, ohe_dates=False):
"""
Label or One-Hot encode columns.
---
df: pandas Dataframe
ohe_dates: enable one-hot encoding of eventual date features
"""
df.reset_index(drop=True, inplace=True)
if target is None:
new_df = DataFrame()
cols = df.columns
else:
new_df = df[target].to_frame(name=target)
cols = df.drop([target], axis=1).columns
# print()
# print("New df's columns:\n", cols)
# print()
original_length = len(df.index)
columns_to_encode = list(
df.select_dtypes(include=['category', 'object', 'int64']))
for feature in cols:
col = df[feature]
if df[feature].dtype in (np.int64, 'object'):
col = col.astype('category')
nr_uniques = len(col.unique())
# print("%s's nr_uniques:" % feature, nr_uniques)
# if df[feature].dtype.name in column_types:
if feature in columns_to_encode:
try:
if new_df.empty:
# print("new_df is empty")
if ohe_dates:
if feature in ('Year', 'Month', 'Day', 'Week', 'hour', 'min'):
new_df = encode_df_column(col)
else:
new_df = le_encode_column(col).to_frame(feature)
else:
if nr_uniques < dummy_cols:
new_df = encode_df_column(col)
else:
new_df = le_encode_column(col)
if isinstance(new_df, Series):
new_df = new_df.to_frame(feature) # you forgot this
else:
# merge() more efficient than concat
if ohe_dates:
if feature in ('Year', 'Month', 'Day', 'Week', 'hour', 'min'):
new_df = merge(
new_df, encode_df_column(col), left_index=True,
right_index=True)
else:
new_df = merge(
new_df, le_encode_column(col).to_frame(feature),
left_index=True, right_index=True)
else:
new_df = merge(
new_df,
encode_df_column(col) if len(col.unique()) < dummy_cols \
else le_encode_column(col).to_frame(feature),
left_index=True, right_index=True)
# new_df = concat([
# new_df,
# encode_df_column(col) if len(col.unique()) < dummy_cols \
# else le_encode_column(col).to_frame(feature)], axis=1)
except KeyError as ke:
print(ke)
print("KeyError! Column: " + feature)
except ValueError as ve:
print(ve)
print("ValueError! Column: " + feature)
except Exception as e:
raise e
else:
if new_df.empty:
# print("new_df is empty")
new_df = col.to_frame(feature)
else:
# more efficient than concat
new_df = merge(
new_df, col.to_frame(feature),
left_index=True, right_index=True)
# new_df = concat(
# [new_df, col.to_frame(feature)], axis=1)
# print("New df's head:\n", new_df.head())
# print("New df's length:", len(new_df.index))
assert (len(new_df.index) == original_length), \
"Ouch, encoded dataframe's different length than original's!"
print()
print("New final df's head:\n", new_df.head(3))
print("New df's tail:\n", new_df.tail(3))
# print("New df's columns:", list(new_df.columns))
# print("New df's length:", len(new_df.index))
# print("Old df's length:", original_length)
return new_df
| 34.206478 | 87 | 0.519233 |
f7483dc41d7e3e1457294dcbb499f8e642d0b5a9 | 4,130 | py | Python | pybo/config.py | mikkokotila/pybo | ae921861e3e8c18856f19c59fc4c7da8f2126c1c | [
"Apache-2.0"
] | null | null | null | pybo/config.py | mikkokotila/pybo | ae921861e3e8c18856f19c59fc4c7da8f2126c1c | [
"Apache-2.0"
] | null | null | null | pybo/config.py | mikkokotila/pybo | ae921861e3e8c18856f19c59fc4c7da8f2126c1c | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""Configuration file to set up Pybo
"""
from pathlib import Path
import yaml
default_config = '''tokenizer:
trie_files:
- &part 'particles.txt'
- &ancient ancient.txt
- &except exceptions.txt
- &uncomp uncompound_lexicon.txt
- &tsikchen tsikchen.txt
- &oral0 oral_corpus_0.txt
- &oral1 oral_corpus_1.txt
- &oral2 oral_corpus_2.txt
- &oral3 oral_corpus_3.txt
- &record recordings_4.txt
- &mgd mgd.txt
- &verb verbs.txt
skrt_files:
- &skrt ~ssanskrit.txt
pos_files:
- &tibdict ~pTibetan.DICT
freq_files:
- &freq_mgd ~fmgd.txt
Profile:
empty: []
pytib: [*ancient, *except, *uncomp, *tsikchen, *tibdict, *part]
POS: [*ancient, *except, *uncomp, *tsikchen, *tibdict, *part]
PP: [*part]
GMD: [*ancient, *except, *uncomp, *tsikchen, *mgd, *verb, *tibdict, *skrt, *freq_mgd, *part]
pipeline:
basic:
pre: pre_basic
tok: spaces
proc: spaces_fulltext
frm: plaintext
pybo_raw_content:
pre: pre_basic
tok: pybo
pybo_profile: GMD
proc: pybo_raw_content
frm: plaintext
pybo_raw_lines:
pre: pre_basic_lines
tok: pybo
pybo_profile: GMD
proc: pybo_raw_content
frm: plaintext
syls:
pre: pre_basic
tok: syls
proc: spaces_fulltext
frm: plaintext
pybo_raw_types:
pre: pre_basic
tok: pybo
pybo_profile: GMD
proc: pybo_raw_types
frm: types'''
class Config:
"""Configuration class
Attributes :
filename: Complete filename of the configuration file
config : Dictionary object containing all the configuration elements
"""
def __init__(self, filename):
"""Initialize the class
Converting the configuration file into a Python dictionnary object which
contains all the necesary parameters to set up Pybo properly.
The text file has to respect the YAML writing rules.
For more information: 'https://pyyaml.org/wiki/PyYAML'
:param filename: Filename of the file with its extension
"""
self.filename = Path(filename).resolve()
if self.filename.suffix != ".yaml":
raise Exception("Unrecognised file extension. It only supports .yaml files")
# if the file doesn't exist, write it with the default values
if not self.filename.is_file():
with self.filename.open('w', encoding='utf-8-sig') as f:
f.write(default_config)
with self.filename.open('r', encoding='utf-8-sig') as g:
self.config = yaml.load(g.read())
def get_tokenizer_profile(self, profile):
"""Get the profile configuration list
Each profile has a list of files which can be collected by this function.
:param profile: the profile name
:return: the list of files of the selected profile
"""
return self.config["tokenizer"]["Profile"][profile]
def get_pipeline_profile(self, profile):
return self.config["pipeline"][profile]
def add_pipeline_profile(self, profile):
print('ok')
args_list = ['pre', 'tok', 'proc', 'frm', # components
'pybo_profile', # pybo
'left', 'right', # concs
'filename'] # others
key = list(profile.keys())
assert len(key) == 1
key = key[0]
parts = profile[key]
component_keys = list(parts.keys())
assert len(component_keys) >= 4
for c in component_keys:
assert c in args_list
self.config['pipeline'][key] = parts
def reset_default(self):
"""Resets the configuration file to the default values"""
with self.filename.open('w', encoding='utf-8-sig') as f:
f.write(default_config)
if __name__ == '__main__':
config = Config("pybo.yaml")
config.add_pipeline_profile({'test': {'pre': 'test', 'tok': 'test1', 'proc': 'test2', 'frm': 'test3'}})
config.reset_default()
print(config.get_tokenizer_profile('POS'))
| 29.29078 | 107 | 0.610654 |
f7485fd066424ede4f563d0a793586ac6a5a6a02 | 3,368 | py | Python | tests/aquisition/test_measure.py | jvdoorn/SpectrumAnalyzer | 264afaa795bd7246da2967d49b176361c454746e | [
"MIT"
] | 1 | 2020-12-29T18:02:04.000Z | 2020-12-29T18:02:04.000Z | tests/aquisition/test_measure.py | jvdoorn/SpectrumAnalyzer | 264afaa795bd7246da2967d49b176361c454746e | [
"MIT"
] | 5 | 2021-02-07T16:01:48.000Z | 2022-03-12T00:51:26.000Z | tests/aquisition/test_measure.py | jvdoorn/SpectrumAnalyzer | 264afaa795bd7246da2967d49b176361c454746e | [
"MIT"
] | null | null | null | import unittest
import numpy as np
from specc.analysis.analyzer import CircuitTester
from specc.aquisition.daq import DataAcquisitionInterface
from specc.data.results import SignalResponse
from tests.utils import ACCEPTABLE_ERROR, TEST_AMPLITUDE, TEST_DF, TEST_FREQUENCY, TEST_SAMPLES, TEST_SAMPLE_RATE
class TestDAQSampleRate(unittest.TestCase):
def test_sample_rate_limits(self):
minimum_sample_rate = 10
maximum_sample_rate = 100
class DAQInterfaceMock(DataAcquisitionInterface):
MINIMUM_SAMPLE_RATE = minimum_sample_rate
MAXIMUM_SAMPLE_RATE = maximum_sample_rate
self.assertRaises(AssertionError, lambda: DAQInterfaceMock(minimum_sample_rate - 1))
self.assertRaises(AssertionError, lambda: DAQInterfaceMock(maximum_sample_rate + 1))
def test_sample_rate_value(self):
initalization_sample_rate = 10
updated_sample_rate = 20
daq = DataAcquisitionInterface(initalization_sample_rate)
self.assertEqual(daq.sample_rate, initalization_sample_rate)
daq.sample_rate = updated_sample_rate
self.assertEqual(daq.sample_rate, updated_sample_rate)
class TestDAQTimeArray(unittest.TestCase):
def setUp(self) -> None:
self.daq = DataAcquisitionInterface(TEST_SAMPLE_RATE)
self.time_array = self.daq.calculate_time_array(TEST_SAMPLES)
def test_time_array_dimensions(self):
self.assertEqual(1, len(self.time_array.shape), "The time array is not a 1D-ndarray.")
self.assertEqual((TEST_SAMPLES,), self.time_array.shape)
def test_time_array_values(self):
expected_end_time = TEST_SAMPLES / TEST_SAMPLE_RATE
self.assertEqual(0, self.time_array[0], "Time array did not start at 0.")
self.assertEqual(expected_end_time, self.time_array[-1], f"Time array did not end at {expected_end_time}.")
class TestDAQAnalyzerRead(unittest.TestCase):
def setUp(self):
class DAQMock(DataAcquisitionInterface):
MOCK_OUTPUT_PHASE = np.pi / 4
def read(self, channels: np.ndarray, samples: int) -> np.ndarray:
end_time = samples / self.sample_rate
time_array = np.linspace(0, end_time, samples)
if len(channels) == 1:
signal = TEST_AMPLITUDE * np.sin(2 * np.pi * TEST_FREQUENCY * time_array)
elif len(channels) == 2:
signal = np.asarray([
TEST_AMPLITUDE * np.sin(2 * np.pi * TEST_FREQUENCY * time_array),
TEST_AMPLITUDE * np.sin(2 * np.pi * TEST_FREQUENCY * time_array + self.MOCK_OUTPUT_PHASE),
])
else:
raise NotImplementedError
return signal
self.daq = DAQMock(TEST_SAMPLE_RATE)
self.analyzer = CircuitTester(self.daq, 'empty', 'empty')
def test_measuring_single(self):
response = self.analyzer.measure_single(TEST_SAMPLES)
self.assertTrue(isinstance(response, SignalResponse))
self.assertAlmostEqual(self.daq.MOCK_OUTPUT_PHASE, response.relative_phase(TEST_FREQUENCY),
delta=ACCEPTABLE_ERROR)
self.assertAlmostEqual(1, response.relative_intensity(TEST_FREQUENCY, TEST_DF), delta=ACCEPTABLE_ERROR)
if __name__ == '__main__':
unittest.main()
| 38.712644 | 115 | 0.684976 |
f7486473166589d1446909259e7709fb68e2a5df | 3,373 | py | Python | src/structure/process_pdb.py | igordub/enm-research-project | c5c52f2a6b415bd871800bcf725fda23cb3fd542 | [
"MIT"
] | null | null | null | src/structure/process_pdb.py | igordub/enm-research-project | c5c52f2a6b415bd871800bcf725fda23cb3fd542 | [
"MIT"
] | null | null | null | src/structure/process_pdb.py | igordub/enm-research-project | c5c52f2a6b415bd871800bcf725fda23cb3fd542 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import click
import logging
from pathlib import Path
from dotenv import find_dotenv, load_dotenv
import os
import pandas as pd
from biopandas.pdb import PandasPdb
from copy import deepcopy
import src.utilities as utils
@click.command()
@click.argument('input_dir', type=click.Path(exists=True))
@click.argument('output_dir', type=click.Path())
def main_commandline(input_dir, output_dir):
""" Proccesses interim PDB structures (from pdb/interim/) and creates PDB
structural forms for simulations (saved in pdb/processed/).
"""
logger = logging.getLogger(__name__)
logger.info('making processed PDB forms from interim PDB structures')
main(input_dir, output_dir)
def main(input_dir, output_dir):
""" Proccesses interim PDB structures (from pdb/interim/) and creates PDB
structural forms for simulations (saved in pdb/processed/).
"""
config = utils.read_config()
pdb_code = config['pdb']['id']
# Data import
pdb_struct = load_structure(pdb_code, input_dir, file_extension="pdb")
# Data processing
# Delete residues 1 and 216
# pdb_struct.df['ATOM'] = pdb_struct.df['ATOM'][(pdb_struct.df['ATOM']['residue_number'] != 1) \
# & (pdb_struct.df['ATOM']['residue_number'] != 216)]
# Create structural forms
pdb_0 = create_form(pdb_struct, form_idx=0)
pdb_1 = create_form(pdb_struct, form_idx=1)
pdb_2 = create_form(pdb_struct, form_idx=2)
# Save processed data
save_structure(pdb_0, 0, output_dir)
save_structure(pdb_1, 1, output_dir)
save_structure(pdb_2, 2, output_dir)
def load_structure(pdb_code, input_dir, file_extension="pdb"):
""" Loads PDB file inot BioPandas object.
"""
pdb_filepath = os.path.join(input_dir, "{}.{}".format(pdb_code, file_extension))
return PandasPdb().read_pdb(pdb_filepath)
def create_form(data, form_idx=0):
""" Creates PDB structure forms.
form_idx = 0 is apo; 1 - holo1; and 2 - holo2
Note: Only works for homodimers.
"""
# Make a deep copy of BioPandas object to make changes
data_out = deepcopy(data)
# If form_idx == 2 that's holo2 already
if form_idx == 1:
hetatm_record_len = data_out.df['HETATM'].shape[0]
# Keep only one ligand
data_out.df['HETATM'] = data_out.df['HETATM'][:int(hetatm_record_len/2)]
elif form_idx == 0:
# Delete all 'HETATM' records
data_out.df['HETATM'] = pd.DataFrame(columns=data_out.df['HETATM'].columns)
return data_out
def save_structure(data, form_idx, output_dir):
""" Save BioPandas object as a PDB record file.
"""
output_filename = "{}.pdb".format(form_idx)
data.to_pdb(path=os.path.join(output_dir, output_filename),
records=['ATOM', 'HETATM'],
gz=False,
append_newline=True)
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
# not used in this stub but often useful for finding various files
project_dir = Path(__file__).resolve().parents[2]
# find .env automagically by walking up directories until it's found, then
# load up the .env entries as environment variables
load_dotenv(find_dotenv())
main_commandline()
| 34.070707 | 100 | 0.673881 |
f748b1172f3e0ca2af960e6acfaccec75a395009 | 1,051 | py | Python | src/compas_cem/elements/trail.py | dylau/compas_cem | 643c7d60cb9d6a9999cb96d39c44175cc8662a25 | [
"MIT"
] | 15 | 2020-12-11T14:35:19.000Z | 2022-03-31T03:12:30.000Z | src/compas_cem/elements/trail.py | dylau/compas_cem | 643c7d60cb9d6a9999cb96d39c44175cc8662a25 | [
"MIT"
] | 8 | 2020-10-09T00:08:05.000Z | 2021-11-18T00:24:20.000Z | src/compas_cem/elements/trail.py | dylau/compas_cem | 643c7d60cb9d6a9999cb96d39c44175cc8662a25 | [
"MIT"
] | 2 | 2021-12-28T02:29:47.000Z | 2022-03-15T14:04:23.000Z | from compas_cem.elements import Edge
class TrailEdge(Edge):
"""
A trail edge.
Notes
-----
If a plane is defined, it will override the absolute length of the trail edge.
However, the sign of the length (e.g. the combinatorial state) is preserved.
TODO: addexplicit combinatorial state to the signature of the constructor.
"""
def __init__(self, u, v, length, plane=None):
super(TrailEdge, self).__init__(u, v)
# TODO
# self.attributes = {"length": length, "state": state, type": "trail", "plane": plane}
self.attributes = {"length": length, "type": "trail", "plane": plane}
def __repr__(self):
"""
"""
st = "{0}(length={1!r}, plane={2!r})"
return st.format(self.__class__.__name__, self.attributes["length"], self.attributes["plane"])
# ==============================================================================
# Main
# ==============================================================================
if __name__ == "__main__":
pass
| 30.911765 | 102 | 0.519505 |
f748f56f4e1a24811310c06c94386e264b91bd57 | 12,063 | py | Python | schemes/parse_scheme_table.py | gold2718/ccpp-framework | 66f1a069b6b15748e08adbe940b8ceb9b39619ab | [
"Apache-2.0"
] | null | null | null | schemes/parse_scheme_table.py | gold2718/ccpp-framework | 66f1a069b6b15748e08adbe940b8ceb9b39619ab | [
"Apache-2.0"
] | 39 | 2019-01-25T21:50:33.000Z | 2021-09-03T16:57:43.000Z | schemes/parse_scheme_table.py | gold2718/ccpp-framework | 66f1a069b6b15748e08adbe940b8ceb9b39619ab | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Usage: ./parse_scheme_table.py filename1 [filename2 filename3 ...]
# Input: fortran filenames with doxygen-compliant and CCPP-compliant physics schemes; the argument tables should have the following format:
# !! \section arg_table_schemename_run
# !! | local_name | standard_name | long_name | units | rank | type | kind | intent | optional |
# !! |----------------|-------------------------------------------------------|------------------------------------|---------|------|---------|-----------|--------|----------|
# !! | im | horizontal_loop_extent | horizontal loop extent, start at 1 | index | 0 | integer | | in | F |
# !! | ix | horizontal_dimension | horizontal dimension | index | 0 | integer | | in | F |
# !! | ... | ... | | | | | | | |
# !!
# Notes on the input format:
# - the "\section arg_table_SubroutineName" command denotes the start of the table; SubroutineName must match the name of the subroutine that the argument table describes
# - each line of the table must begin with the doxygen-delimiter '!!'
# - table headers are the first row; right now, the only ones parsed into XML (only ones required) are 'local var name' => id, 'longname' => name, units => units, rank => rank, type => type
# - the second row must have the |---|-----| format
# - after the last row of the table, there should be a blank doxygen line (only '!!') to denote the end of the table
# Output: for each filename specified, this routine converts the argument tables for all subroutines (*_init, *_run, *_finalize) into an XML file suitable to be used with mkcap.py (which generates the fortran code for the scheme cap)
# - the script generates a separate file for each module within the given files
import argparse #needed for command line argument filenames
from xml.etree import ElementTree as ET #needed for writing out XML
#subroutine for writing "pretty" XML; copied from http://effbot.org/zone/element-lib.htm#prettyprint
def indent(elem, level=0):
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
#set up the command line argument parser
parser = argparse.ArgumentParser()
#the only arguments are a list of filenames to parse
parser.add_argument('file', help='paths to fortran source code to parse for generating CCPP scheme XML files', nargs='*')
args = parser.parse_args()
def parse_scheme_tables(files):
xmlfiles = []
#for each filename provided, parse it and output one XML file
for i in range(len(files)):
filename = files[i]
#read all lines of the file at once
with (open(filename, 'r')) as file:
file_lines = file.readlines()
#find all modules within the file, and save the start and end lines
module_names = []
mod_begin_lines = []
mod_end_lines = []
line_counter = 0
for line in file_lines:
words = line.split()
for j in range(len(words)):
#check for the word 'module', that it is the first word in the line, and that a module name exists afterward
if words[j].lower() == 'module' and j+1 <= len(words)-1 and j == 0:
mod_begin_lines.append(line_counter)
module_names.append(words[j+1].lower().strip())
if line.lower().find('end module') >= 0:
mod_end_lines.append(line_counter)
line_counter += 1
#for each module within the file, create a separate XML file for the "scheme"
for l in range(len(module_names)):
#find the *_init, *_run, *_finalize, etc. subroutines, save their location within the file and their names
line_counter = 0
sub_lines = []
sub_names = []
scheme_names = []
#only look at the lines in the current module
for line in file_lines[mod_begin_lines[l]:mod_end_lines[l]]:
words = line.split()
for j in range(len(words)):
#check for the word 'subroutine', that it is the first word in the line, and that a subroutine name exists afterward
if words[j].lower() == 'subroutine' and j+1 <= len(words)-1 and j == 0:
#consider the last substring separated by a '_' of the subroutine name as a 'postfix'
sub_name = words[j+1].split('(')[0].strip()
if sub_name.find('_') >= 0:
#ignore subroutines that have no postfix
if sub_name.find('init') >= 0 or sub_name.find('run') >= 0 or sub_name.find('finalize') >= 0:
#ignore subroutines that have postfixes other than init, run, finalize
sub_lines.append(line_counter)
#DH* case sensitive? sub_names.append(sub_name)
sub_names.append(sub_name.lower())
scheme_names.append(sub_names[-1][0:sub_names[-1].rfind('_')])
line_counter += 1
#check that all the subroutine "root" names in the current module are the same
if scheme_names.count(scheme_names[0]) == len(scheme_names):
scheme_name = scheme_names[0]
else:
message = 'Please check that all of the subroutines have the same root name:\n'
message += ' i.e. scheme_A_init, scheme_A_run, scheme_A_finalize\n'
message += 'Here is a list of the subroutine names:\n'
message += str(sub_names) + '\n'
message += 'Here is a list of the scheme names (parsed from the subroutine names):\n'
message += str(scheme_names)
raise Exception(message)
table_header_sets = []
var_data_sets = []
for j in range(len(sub_names)):
#find the argument table corresponding to each subroutine by searching
#"upward" from the subroutine definition line for the "arg_table_SubroutineName" section
table_found = False
for k in range(mod_begin_lines[l] + sub_lines[j], -1, -1):
line = file_lines[k]
words = line.split()
for word in words:
if 'arg_table_' + sub_names[j] in word.lower():
# DH* case sensitive? if 'arg_table_' + sub_names[j] in word:
table_found = True
header_line = k + 1
break
if table_found:
break
#if an argument table is found, parse it
if table_found:
#separate the table headers
table_headers = file_lines[header_line].split('|')
#check for blank table
if(len(table_headers) > 1):
table_headers = table_headers[1:-1]
table_header_sets.append([x.strip() for x in table_headers])
#get all of the variable information
end_of_table = False
k = header_line + 2
var_data = []
while not end_of_table:
line = file_lines[k]
words = line.split()
if len(words) == 1:
end_of_table = True
else:
var_items = line.split('|')[1:-1]
var_items = [x.strip() for x in var_items]
var_data.append(var_items)
k += 1
var_data_sets.append(var_data)
else:
table_header_sets.append([])
var_data_sets.append([])
else:
#if no table is found, just append an empty list
table_header_sets.append([])
var_data_sets.append([])
#write out the XML in the format that mkcap.py wants
top = ET.Element('scheme')
top.set('module', scheme_name)
for j in range(len(sub_names)):
sub_sub = ET.SubElement(top, 'subroutine')
sub_sub.set('name', sub_names[j])
#right now, the mapping from the tables to the XML is
# 'local var name' => id, 'longname' => name,
# units => units, rank => rank, type => type,
# description => description, kind => kind,
# intent => intent, optional => optional
#this can be generalized and updated in the future using the table header information ####
if len(var_data_sets[j]) > 0:
for k in range(len(var_data_sets[j])):
sub_var = ET.SubElement(sub_sub, 'var')
var_name = ET.SubElement(sub_var, 'name')
try:
var_name.text = var_data_sets[j][k][1]
except IndexError:
raise IndexError('This can be caused by the argument table missing an empty (!!) line in {0}'.format(filename))
var_units = ET.SubElement(sub_var, 'units')
var_units.text = var_data_sets[j][k][3]
var_id = ET.SubElement(sub_var, 'id')
var_id.text = var_data_sets[j][k][0]
var_rank = ET.SubElement(sub_var, 'rank')
var_rank.text = var_data_sets[j][k][4]
var_type = ET.SubElement(sub_var, 'type')
var_type.text = var_data_sets[j][k][5]
var_description = ET.SubElement(sub_var, 'description')
var_description.text = var_data_sets[j][k][2]
var_kind = ET.SubElement(sub_var, 'kind')
var_kind.text = var_data_sets[j][k][6]
var_intent = ET.SubElement(sub_var, 'intent')
var_intent.text = var_data_sets[j][k][7]
var_optional = ET.SubElement(sub_var, 'optional')
var_optional.text = var_data_sets[j][k][8]
indent(top)
tree = ET.ElementTree(top)
xmlfile = scheme_name + '.xml'
tree.write(xmlfile, xml_declaration=True, encoding='utf-8', method="xml")
print 'Parsed tables for ' + ", ".join([str(x) for x in sub_names] ) + ' in module ' \
+ module_names[l] + '; output => ' + xmlfile
xmlfiles.append(xmlfile)
return xmlfiles
def main():
#set up the command line argument parser
parser = argparse.ArgumentParser()
#the only arguments are a list of filenames to parse
parser.add_argument('file', help='paths to fortran source code to parse for generating CCPP scheme XML files', nargs='*')
args = parser.parse_args()
#parse scheme tables in all files
parse_scheme_tables(args.file)
if __name__ == '__main__':
main()
| 54.09417 | 233 | 0.521346 |
f74907b936eb00941f148a9f06560e3d56a0302c | 715 | py | Python | products/models.py | benoitboyer/DjangoBio | 415e048e7207f4abc6ac9b6bde7b7c7043aab78a | [
"MIT"
] | null | null | null | products/models.py | benoitboyer/DjangoBio | 415e048e7207f4abc6ac9b6bde7b7c7043aab78a | [
"MIT"
] | null | null | null | products/models.py | benoitboyer/DjangoBio | 415e048e7207f4abc6ac9b6bde7b7c7043aab78a | [
"MIT"
] | null | null | null | from django.db import models
# Create your models here.
class Product(models.Model):
product_brand = models.CharField(max_length=50)
product_article_code = models.CharField(max_length=20)
product_code= models.IntegerField()
product_name= models.CharField(max_length=150)
product_unit_packaging_number = models.IntegerField(default=1)
product_price = models.DecimalField(max_digits=8,decimal_places=2)
product_selected = models.BooleanField(default=False)
def __str__(self):
return self.product_name
def product_was_selected(self):
return self.product_selected
def get_price(self):
return self.product_price
def get_selected_product():
return Product.objects.filter(product_selected=True)
| 29.791667 | 67 | 0.80979 |
f74922ca39a253d92a9c87dae1b6068ed0dd843c | 4,766 | py | Python | eland/ml/_model_serializer.py | mesejo/eland | d1444f8e094ef11ce4fa6713a521245b68a842d7 | [
"Apache-2.0"
] | null | null | null | eland/ml/_model_serializer.py | mesejo/eland | d1444f8e094ef11ce4fa6713a521245b68a842d7 | [
"Apache-2.0"
] | null | null | null | eland/ml/_model_serializer.py | mesejo/eland | d1444f8e094ef11ce4fa6713a521245b68a842d7 | [
"Apache-2.0"
] | null | null | null | # Licensed to Elasticsearch B.V under one or more agreements.
# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
# See the LICENSE file in the project root for more information
import base64
import gzip
import json
from abc import ABC
from typing import Sequence, Dict, Any, Optional
def add_if_exists(d: Dict[str, Any], k: str, v: Any) -> None:
if v is not None:
d[k] = v
class ModelSerializer(ABC):
def __init__(
self,
feature_names: Sequence[str],
target_type: Optional[str] = None,
classification_labels: Optional[Sequence[str]] = None,
):
self._target_type = target_type
self._feature_names = feature_names
self._classification_labels = classification_labels
def to_dict(self) -> Dict[str, Any]:
d: Dict[str, Any] = {}
add_if_exists(d, "target_type", self._target_type)
add_if_exists(d, "feature_names", self._feature_names)
add_if_exists(d, "classification_labels", self._classification_labels)
return d
@property
def feature_names(self) -> Sequence[str]:
return self._feature_names
def serialize_model(self) -> Dict[str, Any]:
return {"trained_model": self.to_dict()}
def serialize_and_compress_model(self) -> str:
json_string = json.dumps(self.serialize_model(), separators=(",", ":"))
return base64.b64encode(gzip.compress(json_string.encode("utf-8"))).decode(
"ascii"
)
class TreeNode:
def __init__(
self,
node_idx: int,
default_left: Optional[bool] = None,
decision_type: Optional[str] = None,
left_child: Optional[int] = None,
right_child: Optional[int] = None,
split_feature: Optional[int] = None,
threshold: Optional[float] = None,
leaf_value: Optional[float] = None,
):
self._node_idx = node_idx
self._decision_type = decision_type
self._left_child = left_child
self._right_child = right_child
self._split_feature = split_feature
self._threshold = threshold
self._leaf_value = leaf_value
self._default_left = default_left
def to_dict(self) -> Dict[str, Any]:
d: Dict[str, Any] = {}
add_if_exists(d, "node_index", self._node_idx)
add_if_exists(d, "decision_type", self._decision_type)
if self._leaf_value is None:
add_if_exists(d, "left_child", self._left_child)
add_if_exists(d, "right_child", self._right_child)
add_if_exists(d, "split_feature", self._split_feature)
add_if_exists(d, "threshold", self._threshold)
else:
add_if_exists(d, "leaf_value", self._leaf_value)
return d
class Tree(ModelSerializer):
def __init__(
self,
feature_names: Sequence[str],
target_type: Optional[str] = None,
tree_structure: Optional[Sequence[TreeNode]] = None,
classification_labels: Optional[Sequence[str]] = None,
):
super().__init__(
feature_names=feature_names,
target_type=target_type,
classification_labels=classification_labels,
)
if target_type == "regression" and classification_labels:
raise ValueError("regression does not support classification_labels")
self._tree_structure = tree_structure or []
def to_dict(self) -> Dict[str, Any]:
d = super().to_dict()
add_if_exists(d, "tree_structure", [t.to_dict() for t in self._tree_structure])
return {"tree": d}
class Ensemble(ModelSerializer):
def __init__(
self,
feature_names: Sequence[str],
trained_models: Sequence[ModelSerializer],
output_aggregator: Dict[str, Any],
target_type: Optional[str] = None,
classification_labels: Optional[Sequence[str]] = None,
classification_weights: Optional[Sequence[float]] = None,
):
super().__init__(
feature_names=feature_names,
target_type=target_type,
classification_labels=classification_labels,
)
self._trained_models = trained_models
self._classification_weights = classification_weights
self._output_aggregator = output_aggregator
def to_dict(self) -> Dict[str, Any]:
d = super().to_dict()
trained_models = None
if self._trained_models:
trained_models = [t.to_dict() for t in self._trained_models]
add_if_exists(d, "trained_models", trained_models)
add_if_exists(d, "classification_weights", self._classification_weights)
add_if_exists(d, "aggregate_output", self._output_aggregator)
return {"ensemble": d}
| 35.303704 | 87 | 0.648342 |
f74940774759d64a85f79cb06fe04419f5dcf521 | 27,717 | py | Python | autotest/gdrivers/netcdf_cf.py | chambbj/gdal | 3d56aecb5b8e9890dae8f560acd099992e707d12 | [
"MIT"
] | 1 | 2015-02-16T16:51:38.000Z | 2015-02-16T16:51:38.000Z | autotest/gdrivers/netcdf_cf.py | theduckylittle/gdal | 61be261cae524582ba28bceebb027cc1e967e0ab | [
"MIT"
] | null | null | null | autotest/gdrivers/netcdf_cf.py | theduckylittle/gdal | 61be261cae524582ba28bceebb027cc1e967e0ab | [
"MIT"
] | null | null | null | #!/usr/bin/env python
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: Test NetCDF driver support.
# Author: Frank Warmerdam <warmerdam@pobox.com>
#
###############################################################################
# Copyright (c) 2007, Frank Warmerdam <warmerdam@pobox.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import os
import sys
from osgeo import gdal
from osgeo import osr
from gdalconst import *
sys.path.append( '../pymod' )
import gdaltest
import imp # for netcdf_cf_setup()
import netcdf
from netcdf import netcdf_setup, netcdf_test_copy
###############################################################################
# Netcdf CF compliance Functions
###############################################################################
###############################################################################
#check for necessary files and software
def netcdf_cf_setup():
#global vars
gdaltest.netcdf_cf_method = None
gdaltest.netcdf_cf_files = None
gdaltest.netcdf_cf_check_error = ''
#if netcdf is not supported, skip detection
if gdaltest.netcdf_drv is None:
return 'skip'
#skip if on windows
if os.name != 'posix':
print('NOTICE: will skip CF checks because OS is not posix!')
return 'skip'
#try local method
cdms2_installed = False
try:
imp.find_module( 'cdms2' )
cdms2_installed = True
except ImportError:
print( 'NOTICE: cdms2 not installed!' )
print( ' see installation notes at http://pypi.python.org/pypi/cfchecker' )
pass
if cdms2_installed:
xml_dir = './data/netcdf_cf_xml'
tmp_dir = './tmp/cache'
files = dict()
files['a'] = xml_dir+'/area-type-table.xml'
files['s'] = tmp_dir+'/cf-standard-name-table-v18.xml'
#either find udunits path in UDUNITS_PATH, or based on location of udunits app, or copy all .xml files to data
#opt_u = '/home/soft/share/udunits/udunits2.xml'
files['u'] = xml_dir+'/udunits2.xml'
#look for xml files
if not ( os.path.exists(files['a']) and os.path.exists(files['s']) and os.path.exists(files['u']) ):
print('NOTICE: cdms2 installed, but necessary xml files are not found!')
print(' the following files must exist:')
print(' '+xml_dir+'/area-type-table.xml from http://cf-pcmdi.llnl.gov/documents/cf-standard-names/area-type-table/1/area-type-table.xml')
print(' '+tmp_dir+'/cf-standard-name-table-v18.xml - http://cf-pcmdi.llnl.gov/documents/cf-standard-names/standard-name-table/18/cf-standard-name-table.xml')
print(' '+xml_dir+'/udunits2*.xml from a UDUNITS2 install')
#try to get cf-standard-name-table
if not os.path.exists(files['s']):
#print ' downloading cf-standard-name-table.xml (v18) from http://cf-pcmdi.llnl.gov ...'
if not gdaltest.download_file('http://cf-pcmdi.llnl.gov/documents/cf-standard-names/standard-name-table/18/cf-standard-name-table.xml',
'cf-standard-name-table-v18.xml'):
print(' Failed to download, please get it and try again.')
if os.path.exists(files['a']) and os.path.exists(files['s']) and os.path.exists(files['u']):
gdaltest.netcdf_cf_method = 'local'
gdaltest.netcdf_cf_files = files
print('NOTICE: netcdf CF compliance checks: using local checker script')
return 'success'
#skip http method if GDAL_DOWNLOAD_TEST_DATA and GDAL_RUN_SLOW_TESTS are not defined
if not 'GDAL_DOWNLOAD_TEST_DATA' in os.environ:
print('NOTICE: skipping netcdf CF compliance checks')
print('to enable remote http checker script, define GDAL_DOWNLOAD_TEST_DATA')
return 'success'
if not gdaltest.run_slow_tests():
print('NOTICE: skipping netcdf CF compliance checks')
return 'success'
#http method with curl, should use python module but easier for now
success = False
try:
(ret, err) = gdaltest.runexternal_out_and_err('curl')
except :
print('no curl executable')
else:
#make sure script is responding
handle = gdaltest.gdalurlopen("http://puma.nerc.ac.uk/cgi-bin/cf-checker.pl")
if handle is not None:
success = True
else:
print('script not responding')
if success:
gdaltest.netcdf_cf_method = 'http'
print('NOTICE: netcdf CF compliance ckecks: using remote http checker script, consider installing cdms2 locally')
return 'success'
if gdaltest.netcdf_cf_method is None:
print('NOTICE: skipping netcdf CF compliance checks')
return 'success'
###############################################################################
#build a command used to check ifile
def netcdf_cf_get_command(ifile, version='auto'):
command = ''
#fetch method obtained previously
method = gdaltest.netcdf_cf_method
if method is not None:
if method is 'local':
command = './netcdf_cfchecks.py -a ' + gdaltest.netcdf_cf_files['a'] \
+ ' -s ' + gdaltest.netcdf_cf_files['s'] \
+ ' -u ' + gdaltest.netcdf_cf_files['u'] \
+ ' -v ' + version +' ' + ifile
elif method is 'http':
#command = shlex.split( 'curl --form cfversion="1.5" --form upload=@' + ifile + ' --form submit=\"Check file\" "http://puma.nerc.ac.uk/cgi-bin/cf-checker.pl"' )
#switch to 1.5 as driver now supports, and auto when it becomes available
version = '1.5'
command = 'curl --form cfversion=' + version + ' --form upload=@' + ifile + ' --form submit=\"Check file\" "http://puma.nerc.ac.uk/cgi-bin/cf-checker.pl"'
return command
###############################################################################
# Check a file for CF compliance
def netcdf_cf_check_file(ifile,version='auto', silent=True):
#if not silent:
# print 'checking file ' + ifile
gdaltest.netcdf_cf_check_error = ''
if ( not os.path.exists(ifile) ):
return 'skip'
output_all = ''
command = netcdf_cf_get_command(ifile, version='auto')
if command is None or command=='':
gdaltest.post_reason('no suitable method found, skipping')
return 'skip'
try:
if gdaltest.netcdf_cf_method == 'http':
print('calling ' + command)
(ret, err) = gdaltest.runexternal_out_and_err(command)
except :
gdaltest.post_reason('ERROR with command - ' + command)
return 'fail'
output_all = ret
output_err = ''
output_warn = ''
for line in output_all.splitlines( ):
#optimize this with regex
if 'ERROR' in line and not 'ERRORS' in line:
output_err = output_err + '\n' + line
elif 'WARNING' in line and not 'WARNINGS' in line:
output_warn = output_warn + '\n' + line
result = 'success'
if output_err != '':
result = 'fail'
if output_err != '':
gdaltest.netcdf_cf_check_error += output_err.strip()
if not silent:
print('=> CF check ERRORS for file ' + ifile + ' : ' + output_err)
if output_warn != '':
if not silent:
print('CF check WARNINGS for file ' + ifile + ' : ' + output_warn)
return result
###############################################################################
# Netcdf CF projection Functions and data
###############################################################################
###############################################################################
# Definitions to test projections that are supported by CF
# Tuple structure:
# 0: Short code (eg AEA) - (no GDAL significance, just for filenames etc)
# 1: official name from CF-1 conventions
# 2: EPSG code, or WKT, to tell GDAL to do reprojection
# 3: Actual attribute official name of grid mapping
# 4: List of required attributes to define projection
# 5: List of required coordinate variable standard name attributes
netcdf_cfproj_tuples = [
("AEA", "Albers Equal Area", "EPSG:3577", "albers_conical_equal_area",
['standard_parallel', 'longitude_of_central_meridian',
'latitude_of_projection_origin', 'false_easting', 'false_northing'],
['projection_x_coordinate','projection_y_coordinate']),
("AZE", "Azimuthal Equidistant",
#Didn't have EPSG suitable for AU
"+proj=aeqd +lat_0=-37 +lon_0=145 +x_0=0 +y_0=0 +ellps=WGS84 +datum=WGS84 +units=m +no_defs",
"azimuthal_equidistant",
['longitude_of_projection_origin',
'latitude_of_projection_origin', 'false_easting', 'false_northing'],
['projection_x_coordinate','projection_y_coordinate']),
("LAZEA", "Lambert azimuthal equal area",
#Specify proj4 since no approp LAZEA for AU
#"+proj=laea +lat_0=0 +lon_0=134 +x_0=0 +y_0=0 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs",
"+proj=laea +lat_0=-37 +lon_0=145 +x_0=0 +y_0=0 +ellps=WGS84 +datum=WGS84 +units=m +no_defs",
"lambert_azimuthal_equal_area",
['longitude_of_projection_origin',
'latitude_of_projection_origin', 'false_easting', 'false_northing'],
['projection_x_coordinate','projection_y_coordinate']),
("LC_2SP", "Lambert conformal", "EPSG:3112", "lambert_conformal_conic",
['standard_parallel',
'longitude_of_central_meridian',
'latitude_of_projection_origin', 'false_easting', 'false_northing'],
['projection_x_coordinate','projection_y_coordinate']),
# TODO: Test LCC with 1SP
("LCEA", "Lambert Cylindrical Equal Area",
"+proj=cea +lat_ts=-37 +lon_0=145 +x_0=0 +y_0=0 +ellps=WGS84 +datum=WGS84 +units=m +no_defs",
"lambert_cylindrical_equal_area",
['longitude_of_central_meridian',
'standard_parallel', # TODO: OR 'scale_factor_at_projection_origin'
'false_easting', 'false_northing'],
['projection_x_coordinate','projection_y_coordinate']),
# 2 entries for Mercator, since attribs different for 1SP or 2SP
("M-1SP", "Mercator",
"+proj=merc +lon_0=145 +k_0=1 +x_0=0 +y_0=0 +ellps=WGS84 +datum=WGS84 +units=m +no_defs",
"mercator",
['longitude_of_projection_origin',
'scale_factor_at_projection_origin',
'false_easting', 'false_northing'],
['projection_x_coordinate','projection_y_coordinate']),
# Commented out as it seems GDAL itself's support of Mercator with 2SP
# is a bit dodgy
("M-2SP", "Mercator",
"+proj=merc +lat_ts=-37 +lon_0=145 +x_0=0 +y_0=0 +ellps=WGS84 +datum=WGS84 +units=m +no_defs",
# Trying with full WKT:
#"""PROJCS["unnamed", GEOGCS["WGS 84", DATUM["WGS_1984", SPHEROID["WGS 84",6378137,298.257223563, AUTHORITY["EPSG","7030"]], AUTHORITY["EPSG","6326"]], PRIMEM["Greenwich",0], UNIT["degree",0.0174532925199433], AUTHORITY["EPSG","4326"]], PROJECTION["Mercator_2SP"], PARAMETER["central_meridian",146], PARAMETER["standard_parallel_1",-37], PARAMETER["latitude_of_origin",0], PARAMETER["false_easting",0], PARAMETER["false_northing",0], UNIT["metre",1, AUTHORITY["EPSG","9001"]]]""",
"mercator",
['longitude_of_projection_origin',
'standard_parallel',
'false_easting', 'false_northing'],
['projection_x_coordinate','projection_y_coordinate']),
("Ortho", "Orthographic",
"+proj=ortho +lat_0=-37 +lon_0=145 +x_0=0 +y_0=0 +ellps=WGS84 +datum=WGS84 +units=m +no_defs",
"orthographic",
['longitude_of_projection_origin',
'latitude_of_projection_origin',
'false_easting', 'false_northing'],
['projection_x_coordinate', 'projection_y_coordinate']),
# Seems GDAL may have problems with Polar stereographic, as it
# considers these "local coordinate systems"
("PSt", "Polar stereographic",
"+proj=stere +lat_ts=-37 +lat_0=-90 +lon_0=145 +k_0=1.0 +x_0=0 +y_0=0 +ellps=WGS84 +datum=WGS84 +units=m +no_defs",
"polar_stereographic",
['straight_vertical_longitude_from_pole',
'latitude_of_projection_origin',
'standard_parallel',
'false_easting', 'false_northing'],
['projection_x_coordinate', 'projection_y_coordinate']),
("St", "Stereographic",
"+proj=stere +lat_0=-37 +lon_0=145 +x_0=0 +y_0=0 +ellps=WGS84 +datum=WGS84 +units=m +no_defs",
#'PROJCS["unnamed", GEOGCS["WGS 84", DATUM["WGS_1984", SPHEROID["WGS 84",6378137,298.257223563, AUTHORITY["EPSG","7030"]], AUTHORITY["EPSG","6326"]], PRIMEM["Greenwich",0], UNIT["degree",0.0174532925199433], AUTHORITY["EPSG","4326"]], PROJECTION["Stereographic"], PARAMETER["latitude_of_origin",-37.5], PARAMETER["central_meridian",145], PARAMETER["scale_factor",1], PARAMETER["false_easting",0], PARAMETER["false_northing",0], UNIT["metre",1, AUTHORITY["EPSG","9001"]]]',
"stereographic",
['longitude_of_projection_origin',
'latitude_of_projection_origin',
'scale_factor_at_projection_origin',
'false_easting', 'false_northing'],
['projection_x_coordinate', 'projection_y_coordinate']),
#Note: Rotated Pole not in this list, as seems not GDAL-supported
("TM", "Transverse Mercator", "EPSG:32655", #UTM Zone 55N
"transverse_mercator",
[
'scale_factor_at_central_meridian',
'longitude_of_central_meridian',
'latitude_of_projection_origin',
'false_easting', 'false_northing'],
['projection_x_coordinate','projection_y_coordinate'])
]
#By default, we will use GeoTiff as the 'intermediate' raster format
# for gdalwarp'ing into before gdal_translate to NetCDF.
# But since GeoTiff can't act as a storage format for certain projections
# (eg Mercator-2SP), we will choose other intermediate formats for certain
# projection.
# The following array maps projection short code, to driver format to use
netcdf_cfproj_def_int_format = "GTiff"
netcdf_cfproj_int_fmt_maps = {
"M-2SP":'HFA'
}
netcdf_cfproj_format_fnames = {"HFA":"img", "GTiff":"tif", "NITF":"nitf",
"ERS":"ers"}
###############################################################################
# Check support for given projection tuple definitions
# For each projection, warp the original file and then create a netcdf
def netcdf_cfproj_testcopy(projTuples, origTiff, interFormats, inPath, outPath,
resFilename):
"""Test a Geotiff file can be converted to NetCDF, and projection in
CF-1 conventions can be successfully maintained. Save results to file.
:arg: projTuples - list of tuples
:arg: interFormats - dict of intermediate format overrides
:arg: outPath - path to save output
:arg: resFilename - results filename to write to.
"""
silent = True
gdaltest.netcdf_drv_silent = True
bWriteGdalTags="YES"
#silent = False
gdaltest.netcdf_drv_silent = False
# bWriteGdalTags="NO"
result = 'success'
# Test if ncdump is available
try:
(ret, err) = gdaltest.runexternal_out_and_err('ncdump -h')
except:
#nothing is supported as ncdump not found
print('NOTICE: netcdf version not found')
return 'skip'
i = err.find('netcdf library version ')
#version not found
if i == -1:
print('NOTICE: netcdf version not found')
return 'skip'
if not os.path.exists(outPath):
os.makedirs(outPath)
resFile = open(os.path.join(outPath, resFilename), "w")
if not os.path.exists(outPath):
os.makedirs(outPath)
heading = "Testing GDAL translation results to NetCDF\n"
resFile.write(heading)
resFile.write(len(heading)*"="+"\n")
# now = datetime.datetime.now()
# resFile.write("*Date/time:* %s\n" % (now.strftime("%Y-%m-%d %H:%M")))
resFile.write("\n")
resPerProj = {}
dsTiff = gdal.Open( os.path.join(inPath, origTiff), GA_ReadOnly );
s_srs_wkt = dsTiff.GetProjection()
#objects to hold the various tests
i_t = 0
tst = {}
tst_res = {}
for proj in projTuples:
try:
intFmt = interFormats[proj[0]]
except KeyError:
intFmt = netcdf_cfproj_def_int_format
intExt = netcdf_cfproj_format_fnames[intFmt]
# Our little results data structures
if not silent:
print("")
print("Testing %s (%s) translation:" % (proj[0], proj[1]))
if not silent:
print("About to create raster in chosen SRS")
projVrt = os.path.join(outPath, "%s_%s.vrt" % \
(origTiff.rstrip('.tif'), proj[0] ))
projRaster = os.path.join(outPath, "%s_%s.%s" % \
(origTiff.rstrip('.tif'), proj[0], intExt ))
srs = osr.SpatialReference()
srs.SetFromUserInput(proj[2])
t_srs_wkt = srs.ExportToWkt()
if not silent:
print("going to warp file "+origTiff+"\n" + s_srs_wkt + "\ninto file "+projRaster + "\n" + t_srs_wkt)
dswarp = gdal.AutoCreateWarpedVRT( dsTiff, s_srs_wkt, t_srs_wkt, GRA_NearestNeighbour, 0 );
drv_inter = gdal.GetDriverByName(intFmt);
drv_netcdf = gdal.GetDriverByName("netcdf");
dsw = drv_inter.CreateCopy(projRaster, dswarp, 0)
if not silent:
print("Warped %s to %s" % (proj[0], projRaster))
projNc = os.path.join(outPath, "%s_%s.nc" % \
(origTiff.rstrip('.tif'), proj[0] ))
#Force GDAL tags to be written to make testing easier, with preserved datum etc
ncCoOpts = "-co WRITE_GDAL_TAGS=yes"
if not silent:
print("About to translate to NetCDF")
dst = drv_netcdf.CreateCopy(projNc, dsw, 0, [ 'WRITE_GDAL_TAGS='+bWriteGdalTags ])
#For drivers like HFA, line below ESSENTIAL so that all info is
# saved to new raster file - which we'll reopen later and want
# to be fully updated.
dsw = None
dst = None
if not silent:
print("Translated to %s" % (projNc))
transWorked, resDetails = netcdf_cfproj_test_cf(proj, projNc)
resPerProj[proj[0]] = resDetails
resFile.write("%s (%s): " % (proj[0], proj[1]))
if transWorked:
resFile.write("OK\n")
else:
resFile.write("BAD\n")
if 'missingProjName' in resPerProj[proj[0]]:
resFile.write("\tMissing proj name '%s'\n" % \
(resPerProj[proj[0]]['missingProjName']))
for attrib in resPerProj[proj[0]]['missingAttrs']:
resFile.write("\tMissing attrib '%s'\n" % (attrib))
for cVarStdName in resPerProj[proj[0]]['missingCoordVarStdNames']:
resFile.write("\tMissing coord var with std name '%s'\n" \
% (cVarStdName))
if 'cfcheck_error' in resPerProj[proj[0]]:
resFile.write("\tFailed cf check: %s\n" % \
(resPerProj[proj[0]]['cfcheck_error']))
# test file copy
# We now copy to a new file, just to be safe
projNc2 = projNc.rstrip('.nc') + '2.nc'
projRaster2 = os.path.join(outPath, "%s_%s2.%s" % \
(origTiff.rstrip('.tif'), proj[0], intExt ))
tst_res[i_t+1] = netcdf_test_copy( projRaster, 1, None, projNc2, [], 'NETCDF' )
tst_res[i_t+2] = netcdf_test_copy( projNc2, 1, None, projRaster2, [], intFmt )
if tst_res[i_t+1] == 'fail' or tst_res[i_t+2] == 'fail':
result = 'fail'
i_t = i_t + 2
resFile.close()
if not silent:
print("\n" + "*" * 80)
print("Saved results to file %s" % (os.path.join(outPath, resFilename)))
#result = 'success'
resFile = open(os.path.join(outPath, resFilename), "r")
resStr = resFile.read()
if resStr.find('BAD') != -1:
print('\nCF projection tests failed, here is the output (stored in file %s)\n' % \
(os.path.join(outPath, resFilename)))
print(resStr)
result = 'fail'
return result
###############################################################################
# Test an NC file has valid conventions according to passed-in proj tuple
# Note: current testing strategy is a fairly simple attribute search.
# this could use gdal netcdf driver for getting attribs instead...
def netcdf_cfproj_test_cf(proj, projNc):
transWorked = True
command = 'ncdump -h ' + projNc
(ret, err) = gdaltest.runexternal_out_and_err(command)
if err != '':
print(err)
dumpStr = ret
resDetails = {}
resDetails['missingAttrs'] = []
resDetails['missingCoordVarStdNames'] = []
if (':grid_mapping_name = "%s"' % (proj[3])) not in dumpStr:
transWorked = False
resDetails['missingProjName'] = proj[3]
# Check attributes in the projection are included
for attrib in proj[4]:
# The ':' prefix and ' ' suffix is to help check for exact name,
# eg to catch the standard_parallel_1 and 2 issue.
if (":"+attrib+" ") not in dumpStr:
transWorked = False
resDetails['missingAttrs'].append(attrib)
# print "**Error for proj '%s': CF-1 attrib '%s' not found.**" % \
# (proj[0], attrib)
# Now we check the required X and Y attributes are included (e.g. Rotated Pole
# has special names required here.
for coordVarStdName in proj[5]:
if coordVarStdName not in dumpStr:
transWorked = False
resDetails['missingCoordVarStdNames'].append(coordVarStdName)
#Final check use the cf-checker
result_cf = netcdf_cf_check_file( projNc,'auto',True )
if result_cf == 'fail':
resDetails['cfcheck_error'] = gdaltest.netcdf_cf_check_error
transWorked = False
return transWorked, resDetails
###############################################################################
# Netcdf CF Tests
###############################################################################
###############################################################################
#test copy and CF compliance for lat/lon (no datum, no GEOGCS) file, tif->nc->tif
def netcdf_cf_1():
#setup netcdf and netcdf_cf environment
netcdf_setup()
netcdf_cf_setup()
if gdaltest.netcdf_drv is None:
return 'skip'
#tst1 = gdaltest.GDALTest( 'NETCDF', 'trmm.tif', 1, 14 )
#result = tst1.testCreateCopy(check_gt=1, check_srs=1, new_filename='tmp/netcdf_cf_1.nc', delete_copy = 0)
result = netcdf_test_copy( 'data/trmm.nc', 1, 14, 'tmp/netcdf_cf_1.nc' )
if result != 'fail':
#tst2 = gdaltest.GDALTest( 'GTIFF', '../tmp/netcdf_cf_1.nc', 1, 14 )
#result = tst2.testCreateCopy(check_gt=1, check_srs=1, new_filename='tmp/netcdf_cf_1.tiff', delete_copy = 0)
result = netcdf_test_copy( 'tmp/netcdf_cf_1.nc', 1, 14, 'tmp/netcdf_cf_1.tif', [], 'GTIFF' )
result_cf = 'success'
if gdaltest.netcdf_cf_method is not None:
result_cf = netcdf_cf_check_file( 'tmp/netcdf_18.nc','auto',False )
if result != 'fail' and result_cf != 'fail':
return 'success'
else:
return 'fail'
###############################################################################
#test copy and CF compliance for lat/lon (no datum, no GEOGCS) file, nc->nc
def netcdf_cf_2():
if gdaltest.netcdf_drv is None:
return 'skip'
result = netcdf_test_copy( 'data/trmm.nc', 1, 14, 'tmp/netcdf_cf_2.nc' )
result_cf = 'success'
if gdaltest.netcdf_cf_method is not None:
result_cf = netcdf_cf_check_file( 'tmp/netcdf_cf_2.nc','auto',False )
if result != 'fail' and result_cf != 'fail':
return 'success'
else:
return 'fail'
###############################################################################
#test copy and CF compliance for lat/lon (W*S84) file, tif->nc->tif
# note: this test fails in trunk (before r23246)
def netcdf_cf_3():
if gdaltest.netcdf_drv is None:
return 'skip'
result = 'success'
result_cf = 'success'
result = netcdf_test_copy( 'data/trmm-wgs84.tif', 1, 14, 'tmp/netcdf_cf_3.nc' )
if result == 'success':
#tst = gdaltest.GDALTest( 'GTIFF', '../tmp/netcdf_cf_3.nc', 1, 14 )
#result = tst.testCreateCopy(check_gt=1, check_srs=1, new_filename='tmp/netcdf_cf_3.tif', delete_copy = 0)
result = netcdf_test_copy( 'tmp/netcdf_cf_3.nc', 1, 14, 'tmp/netcdf_cf_3.tif', [], 'GTIFF' )
result_cf = 'success'
if gdaltest.netcdf_cf_method is not None:
result_cf = netcdf_cf_check_file( 'tmp/netcdf_cf_3.nc','auto',False )
if result != 'fail' and result_cf != 'fail':
return 'success'
else:
return 'fail'
###############################################################################
#test support for various CF projections
def netcdf_cf_4():
result = netcdf_cfproj_testcopy(netcdf_cfproj_tuples, 'melb-small.tif',
netcdf_cfproj_int_fmt_maps,
'data', 'tmp', 'translate_results.txt')
# result = netcdf_cfproj_testcopy(netcdf_cfproj_tuples1, 'melb-small.tif', \
# 'data', 'tmp', 'translate_results.txt')
return result
###############################################################################
#test support for PS variants (bug #2893)
def netcdf_cf_5():
if gdaltest.netcdf_drv is None:
return 'skip'
ifiles = [ 'NETCDF:data/orog_CRCM1.nc:orog', 'NETCDF:data/orog_CRCM2.nc:orog' ]
for ifile in ifiles:
ds = gdal.Open( ifile )
prj = ds.GetProjection()
sr = osr.SpatialReference( )
sr.ImportFromWkt( prj )
lat_origin = sr.GetProjParm( 'latitude_of_origin' )
if lat_origin != 60:
gdaltest.post_reason( 'Latitude of origin in %s does not match expected: %f'
% (ifile, lat_origin) )
return 'fail'
return 'success'
###############################################################################
gdaltest_list = [
netcdf_cf_1,
netcdf_cf_2,
netcdf_cf_3,
netcdf_cf_4,
netcdf_cf_5,
None ]
if __name__ == '__main__':
gdaltest.setup_run( 'netcdf_cf' )
gdaltest.run_tests( gdaltest_list )
#make sure we cleanup
gdaltest.clean_tmp()
gdaltest.summarize()
| 41.001479 | 488 | 0.599307 |
f74959f1794d63d86a99d0bbf4f7c523d22281a1 | 5,114 | py | Python | v3io_gputils/mpijob.py | sahare92/v3io-gputils | 8d18c6b5b8d9051967295b75836708ffa645df81 | [
"Apache-2.0"
] | null | null | null | v3io_gputils/mpijob.py | sahare92/v3io-gputils | 8d18c6b5b8d9051967295b75836708ffa645df81 | [
"Apache-2.0"
] | null | null | null | v3io_gputils/mpijob.py | sahare92/v3io-gputils | 8d18c6b5b8d9051967295b75836708ffa645df81 | [
"Apache-2.0"
] | null | null | null | from copy import deepcopy
from os import environ
from pprint import pprint
import yaml
from kubernetes import client, config
from kubernetes.client.rest import ApiException
_mpijob_template = {
'apiVersion': 'kubeflow.org/v1alpha1',
'kind': 'MPIJob',
'metadata': {
'name': '',
'namespace': 'default-tenant'
},
'spec': {
'replicas': 1,
'template': {
'spec': {
'containers': [{
'image': 'iguaziodocker/horovod:0.1.1',
'name': '',
'command': [],
'volumeMounts': [{'name': 'v3io', 'mountPath': '/User'}],
'workingDir': '/User',
'securityContext': {
'capabilities': {'add': ['IPC_LOCK']}},
'resources': {
'limits': {'nvidia.com/gpu': 1}}}],
'volumes': [{
'name': 'v3io',
'flexVolume': {
'driver': 'v3io/fuse',
'options': {
'container': 'users',
'subPath': '',
'accessKey': '',
}
}}]
}}}}
class MpiJob:
"""
A wrapper over Kubernetes MPIJob (Horovod).
Example:
from mpijob import MpiJob
job = MpiJob('myname', 'img', ['a','b'])
print(job.to_yaml())
job.submit()
"""
group = 'kubeflow.org'
version = 'v1alpha1'
plural = 'mpijobs'
def __init__(self, name, image=None, command=None,
replicas=1, namespace='default-tenant'):
self.api_instance = None
self.name = name
self.namespace = namespace
self._struct = deepcopy(_mpijob_template)
self._struct['metadata'] = {'name': name, 'namespace': namespace}
self._update_container('name', name)
if image:
self._update_container('image', image)
if command:
self._update_container('command', ['mpirun','python'] + command)
if replicas:
self._struct['spec']['replicas'] = replicas
self._update_access_token(environ.get('V3IO_ACCESS_KEY',''))
self._update_running_user(environ.get('V3IO_USERNAME',''))
def _update_container(self, key, value):
self._struct['spec']['template']['spec']['containers'][0][key] = value
def _update_access_token(self, token):
self._struct['spec']['template']['spec']['volumes'][0]['flexVolume']['options']['accessKey'] = token
def _update_running_user(self, username):
self._struct['spec']['template']['spec']['volumes'][0]['flexVolume']['options']['subPath'] = '/' + username
def volume(self, mount='/User', volpath='~/', access_key=''):
self._update_container('volumeMounts', [{'name': 'v3io', 'mountPath': mount}])
if volpath.startswith('~/'):
v3io_home = environ.get('V3IO_HOME', '')
volpath = v3io_home + volpath[1:]
container, subpath = split_path(volpath)
access_key = access_key or environ.get('V3IO_ACCESS_KEY','')
vol = {'name': 'v3io', 'flexVolume': {
'driver': 'v3io/fuse',
'options': {
'container': container,
'subPath': subpath,
'accessKey': access_key,
}
}}
self._struct['spec']['template']['spec']['volumes'] = [vol]
return self
def gpus(self, num, gpu_type='nvidia.com/gpu'):
self._update_container('resources', {'limits' : {gpu_type: num}})
return self
def replicas(self, replicas_num):
self._struct['spec']['replicas'] = replicas_num
return self
def working_dir(self, working_dir):
self._update_container('workingDir', working_dir)
return self
def to_dict(self):
return self._struct
def to_yaml(self):
return yaml.dump(self.to_dict(), default_flow_style=False, sort_keys=False)
def submit(self):
config.load_incluster_config()
self.api_instance = client.CustomObjectsApi()
try:
api_response = self.api_instance.create_namespaced_custom_object(
MpiJob.group, MpiJob.version, self.namespace, 'mpijobs', self.to_dict())
pprint(api_response)
except ApiException as e:
print("Exception when creating MPIJob: %s" % e)
def delete(self):
try:
# delete the mpi job
body = client.V1DeleteOptions()
api_response = self.api_instance.delete_namespaced_custom_object(
MpiJob.group, MpiJob.version, self.namespace, MpiJob.plural, self.name, body)
pprint(api_response)
except ApiException as e:
print("Exception when calling CustomObjectsApi->delete_namespaced_custom_object: %s\\n" % e)
def split_path(mntpath=''):
if mntpath[0] == '/':
mntpath = mntpath[1:]
paths = mntpath.split('/')
container = paths[0]
subpath = ''
if len(paths) > 1:
subpath = mntpath[len(container):]
return container, subpath
| 32.993548 | 115 | 0.555534 |
f7497600d642a7db6b364f43f0e0a50efd519232 | 3,453 | py | Python | appengine/components/tools/log_since.py | stefb965/luci-py | e0a8a5640c4104e5c90781d833168aa8a8d1f24d | [
"Apache-2.0"
] | 1 | 2017-10-30T15:08:10.000Z | 2017-10-30T15:08:10.000Z | appengine/components/tools/log_since.py | stefb965/luci-py | e0a8a5640c4104e5c90781d833168aa8a8d1f24d | [
"Apache-2.0"
] | null | null | null | appengine/components/tools/log_since.py | stefb965/luci-py | e0a8a5640c4104e5c90781d833168aa8a8d1f24d | [
"Apache-2.0"
] | 1 | 2020-07-05T19:54:40.000Z | 2020-07-05T19:54:40.000Z | #!/usr/bin/env python
# Copyright 2014 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""Prints a short log from HEAD (or [end]) to a pseudo revision number."""
__version__ = '1.0'
import optparse
import subprocess
import sys
import calculate_version # pylint: disable=W0403
def get_logs(root, pseudo_revision, mergebase, start, end):
start_ref = '%s~%d' % (mergebase, pseudo_revision - start)
end_ref = mergebase
if end is not None:
end_ref += '~%d' % (pseudo_revision - end)
refspec = '%s..%s' % (start_ref, end_ref)
cmd = ['git', 'log', refspec, '--date=short', '--format=%ad %ae %s']
nb_commits = (end or pseudo_revision) - start
try:
log = subprocess.check_output(cmd, cwd=root)
except subprocess.CalledProcessError:
print >> sys.stderr, (
'\nFailed to retrieve the log of last %d commits.' % nb_commits)
return 1
maxlen = 0
lines = []
for l in log.rstrip().splitlines():
parts = l.split(' ', 2)
parts[1] = parts[1].split('@', 1)[0]
maxlen = max(maxlen, len(parts[1]))
lines.append(parts)
out = '\n'.join(
'%s %-*s %s' % (parts[0], maxlen, parts[1], parts[2])
for parts in lines)
return out, refspec
def main():
root = calculate_version.checkout_root('.')
pseudo_revision, mergebase = calculate_version.get_head_pseudo_revision(
root, 'origin/master')
is_pristine = calculate_version.is_pristine(root, mergebase)
parser = optparse.OptionParser(
usage='%prog [options] <start> [end]',
version=__version__,
description=sys.modules[__name__].__doc__)
parser.add_option(
'-f', '--force', action='store_true',
help='Run even if not pristine checkout, e.g. HEAD != origin/master')
parser.add_option(
'-F', '--files', action='store_true', help='List all modified files')
options, args = parser.parse_args()
print >> sys.stderr, (
'Current version: %s @ %s\n' % (pseudo_revision, mergebase))
if not args:
parser.error('Specify the pseudo-revision number of the last push.')
start = int(args[0])
end = None
if len(args) == 2:
end = int(args[1])
if len(args) > 2:
parser.error('Too many arguments.')
if start >= pseudo_revision:
parser.error(
'%d >= %d, you specified \'start\' that was not committed yet?'
% (start, pseudo_revision))
if end is not None:
if start >= end:
parser.error('%d >= %d, did you reverse start and end?' % (start, end))
if end > pseudo_revision:
parser.error(
'%d >= %d, you specified \'end\' that was not committed yet?'
% (end, pseudo_revision))
nb_commits = (end or pseudo_revision) - start
if not is_pristine:
if not options.force:
parser.error(
'Make sure to sync to what was committed and uploaded first.')
print >> sys.stderr, (
'Warning: --force was specified, continuing even if not pristine.\n')
out, refspec = get_logs(root, pseudo_revision, mergebase, start, end)
print(out)
if options.files:
print('')
cmd = ['git', 'diff', refspec, '--stat', '-C', '-C']
try:
subprocess.check_call(cmd, cwd=root)
except subprocess.CalledProcessError:
print >> sys.stderr, (
'\nFailed to list files of last %d commits.' % nb_commits)
return 1
return 0
if __name__ == '__main__':
sys.exit(main())
| 31.108108 | 77 | 0.640023 |
f7497d8712fd029a25bf3621c210603d647a77d2 | 31,698 | py | Python | rioxarray/_io.py | aerisweather/rioxarray | 1755f90ed827ea66477a235677c1c5ecd245833d | [
"ECL-2.0",
"Apache-2.0"
] | 269 | 2019-04-16T15:02:31.000Z | 2022-03-31T08:10:13.000Z | rioxarray/_io.py | aerisweather/rioxarray | 1755f90ed827ea66477a235677c1c5ecd245833d | [
"ECL-2.0",
"Apache-2.0"
] | 287 | 2019-04-17T02:51:12.000Z | 2022-03-30T14:04:49.000Z | rioxarray/_io.py | aerisweather/rioxarray | 1755f90ed827ea66477a235677c1c5ecd245833d | [
"ECL-2.0",
"Apache-2.0"
] | 43 | 2019-04-19T20:24:21.000Z | 2022-03-25T15:36:56.000Z | """
Credits:
This file was adopted from: https://github.com/pydata/xarray # noqa
Source file: https://github.com/pydata/xarray/blob/1d7bcbdc75b6d556c04e2c7d7a042e4379e15303/xarray/backends/rasterio_.py # noqa
"""
import contextlib
import os
import re
import threading
import warnings
import numpy as np
import rasterio
from packaging import version
from rasterio.errors import NotGeoreferencedWarning
from rasterio.vrt import WarpedVRT
from xarray import Dataset, IndexVariable
from xarray.backends.common import BackendArray
from xarray.backends.file_manager import CachingFileManager, FileManager
from xarray.backends.locks import SerializableLock
from xarray.coding import times, variables
from xarray.core import indexing
from xarray.core.dataarray import DataArray
from xarray.core.dtypes import maybe_promote
from xarray.core.utils import is_scalar
from xarray.core.variable import as_variable
from rioxarray.exceptions import RioXarrayError
from rioxarray.rioxarray import _generate_spatial_coords
# TODO: should this be GDAL_LOCK instead?
RASTERIO_LOCK = SerializableLock()
NO_LOCK = contextlib.nullcontext()
class FileHandleLocal(threading.local):
"""
This contains the thread local ThreadURIManager
"""
def __init__(self): # pylint: disable=super-init-not-called
self.thread_manager = None # Initialises in each thread
class ThreadURIManager:
"""
This handles opening & closing file handles in each thread.
"""
def __init__(
self,
opener,
*args,
mode="r",
kwargs=None,
):
self._opener = opener
self._args = args
self._mode = mode
self._kwargs = {} if kwargs is None else dict(kwargs)
self._file_handle = None
@property
def file_handle(self):
"""
File handle returned by the opener.
"""
if self._file_handle is not None:
return self._file_handle
self._file_handle = self._opener(*self._args, mode=self._mode, **self._kwargs)
return self._file_handle
def close(self):
"""
Close file handle.
"""
if self._file_handle is not None:
self._file_handle.close()
self._file_handle = None
def __del__(self):
self.close()
def __enter__(self):
return self
def __exit__(self, type_, value, traceback):
self.close()
class URIManager(FileManager):
"""
The URI manager is used for lockless reading
"""
def __init__(
self,
opener,
*args,
mode="r",
kwargs=None,
):
self._opener = opener
self._args = args
self._mode = mode
self._kwargs = {} if kwargs is None else dict(kwargs)
self._local = FileHandleLocal()
def acquire(self, needs_lock=True):
if self._local.thread_manager is None:
self._local.thread_manager = ThreadURIManager(
self._opener, *self._args, mode=self._mode, kwargs=self._kwargs
)
return self._local.thread_manager.file_handle
@contextlib.contextmanager
def acquire_context(self, needs_lock=True):
try:
yield self.acquire(needs_lock=needs_lock)
except Exception:
self.close(needs_lock=needs_lock)
raise
def close(self, needs_lock=True):
if self._local.thread_manager is not None:
self._local.thread_manager.close()
self._local.thread_manager = None
def __del__(self):
self.close(needs_lock=False)
def __getstate__(self):
"""State for pickling."""
return (self._opener, self._args, self._mode, self._kwargs)
def __setstate__(self, state):
"""Restore from a pickle."""
opener, args, mode, kwargs = state
self.__init__(opener, *args, mode=mode, kwargs=kwargs)
class RasterioArrayWrapper(BackendArray):
"""A wrapper around rasterio dataset objects"""
# pylint: disable=too-many-instance-attributes
def __init__(
self,
manager,
lock,
name,
vrt_params=None,
masked=False,
mask_and_scale=False,
unsigned=False,
):
self.manager = manager
self.lock = lock
self.masked = masked or mask_and_scale
self.mask_and_scale = mask_and_scale
# cannot save riods as an attribute: this would break pickleability
riods = manager.acquire()
if vrt_params is not None:
riods = WarpedVRT(riods, **vrt_params)
self.vrt_params = vrt_params
self._shape = (riods.count, riods.height, riods.width)
self._dtype = None
dtypes = riods.dtypes
if not np.all(np.asarray(dtypes) == dtypes[0]):
raise ValueError("All bands should have the same dtype")
dtype = _rasterio_to_numpy_dtype(dtypes)
# handle unsigned case
if mask_and_scale and unsigned and dtype.kind == "i":
self._dtype = np.dtype(f"u{dtype.itemsize}")
elif mask_and_scale and unsigned:
warnings.warn(
f"variable {name!r} has _Unsigned attribute but is not "
"of integer type. Ignoring attribute.",
variables.SerializationWarning,
stacklevel=3,
)
self._fill_value = riods.nodata
if self._dtype is None:
if self.masked:
self._dtype, self._fill_value = maybe_promote(dtype)
else:
self._dtype = dtype
@property
def dtype(self):
"""
Data type of the array
"""
return self._dtype
@property
def fill_value(self):
"""
Fill value of the array
"""
return self._fill_value
@property
def shape(self):
"""
Shape of the array
"""
return self._shape
def _get_indexer(self, key):
"""Get indexer for rasterio array.
Parameter
---------
key: tuple of int
Returns
-------
band_key: an indexer for the 1st dimension
window: two tuples. Each consists of (start, stop).
squeeze_axis: axes to be squeezed
np_ind: indexer for loaded numpy array
See also
--------
indexing.decompose_indexer
"""
if len(key) != 3:
raise RioXarrayError("rasterio datasets should always be 3D")
# bands cannot be windowed but they can be listed
band_key = key[0]
np_inds = []
# bands (axis=0) cannot be windowed but they can be listed
if isinstance(band_key, slice):
start, stop, step = band_key.indices(self.shape[0])
band_key = np.arange(start, stop, step)
# be sure we give out a list
band_key = (np.asarray(band_key) + 1).tolist()
if isinstance(band_key, list): # if band_key is not a scalar
np_inds.append(slice(None))
# but other dims can only be windowed
window = []
squeeze_axis = []
for iii, (ikey, size) in enumerate(zip(key[1:], self.shape[1:])):
if isinstance(ikey, slice):
# step is always positive. see indexing.decompose_indexer
start, stop, step = ikey.indices(size)
np_inds.append(slice(None, None, step))
elif is_scalar(ikey):
# windowed operations will always return an array
# we will have to squeeze it later
squeeze_axis.append(-(2 - iii))
start = ikey
stop = ikey + 1
else:
start, stop = np.min(ikey), np.max(ikey) + 1
np_inds.append(ikey - start)
window.append((start, stop))
if isinstance(key[1], np.ndarray) and isinstance(key[2], np.ndarray):
# do outer-style indexing
np_inds[-2:] = np.ix_(*np_inds[-2:])
return band_key, tuple(window), tuple(squeeze_axis), tuple(np_inds)
def _getitem(self, key):
band_key, window, squeeze_axis, np_inds = self._get_indexer(key)
if not band_key or any(start == stop for (start, stop) in window):
# no need to do IO
shape = (len(band_key),) + tuple(stop - start for (start, stop) in window)
out = np.zeros(shape, dtype=self.dtype)
else:
with self.lock:
riods = self.manager.acquire(needs_lock=False)
if self.vrt_params is not None:
riods = WarpedVRT(riods, **self.vrt_params)
out = riods.read(band_key, window=window, masked=self.masked)
if self.masked:
out = np.ma.filled(out.astype(self.dtype), self.fill_value)
if self.mask_and_scale:
for iii, band_iii in enumerate(np.atleast_1d(band_key) - 1):
out[iii] = (
out[iii] * riods.scales[band_iii] + riods.offsets[band_iii]
)
if squeeze_axis:
out = np.squeeze(out, axis=squeeze_axis)
return out[np_inds]
def __getitem__(self, key):
return indexing.explicit_indexing_adapter(
key, self.shape, indexing.IndexingSupport.OUTER, self._getitem
)
def _parse_envi(meta):
"""Parse ENVI metadata into Python data structures.
See the link for information on the ENVI header file format:
http://www.harrisgeospatial.com/docs/enviheaderfiles.html
Parameters
----------
meta : dict
Dictionary of keys and str values to parse, as returned by the rasterio
tags(ns='ENVI') call.
Returns
-------
parsed_meta : dict
Dictionary containing the original keys and the parsed values
"""
def parsevec(value):
return np.fromstring(value.strip("{}"), dtype="float", sep=",")
def default(value):
return value.strip("{}")
parse = {"wavelength": parsevec, "fwhm": parsevec}
parsed_meta = {key: parse.get(key, default)(value) for key, value in meta.items()}
return parsed_meta
def _rasterio_to_numpy_dtype(dtypes):
"""Numpy dtype from first entry of rasterio dataset.dtypes"""
# rasterio has some special dtype names (complex_int16 -> np.complex64)
if dtypes[0] == "complex_int16":
dtype = np.dtype("complex64")
else:
dtype = np.dtype(dtypes[0])
return dtype
def _to_numeric(value):
"""
Convert the value to a number
"""
try:
value = int(value)
except (TypeError, ValueError):
try:
value = float(value)
except (TypeError, ValueError):
pass
return value
def _parse_tag(key, value):
# NC_GLOBAL is appended to tags with netcdf driver and is not really needed
key = key.split("NC_GLOBAL#")[-1]
if value.startswith("{") and value.endswith("}"):
try:
new_val = np.fromstring(value.strip("{}"), dtype="float", sep=",")
# pylint: disable=len-as-condition
value = new_val if len(new_val) else _to_numeric(value)
except ValueError:
value = _to_numeric(value)
else:
value = _to_numeric(value)
return key, value
def _parse_tags(tags):
parsed_tags = {}
for key, value in tags.items():
key, value = _parse_tag(key, value)
parsed_tags[key] = value
return parsed_tags
NETCDF_DTYPE_MAP = {
0: object, # NC_NAT
1: np.byte, # NC_BYTE
2: np.char, # NC_CHAR
3: np.short, # NC_SHORT
4: np.int_, # NC_INT, NC_LONG
5: float, # NC_FLOAT
6: np.double, # NC_DOUBLE
7: np.ubyte, # NC_UBYTE
8: np.ushort, # NC_USHORT
9: np.uint, # NC_UINT
10: np.int64, # NC_INT64
11: np.uint64, # NC_UINT64
12: object, # NC_STRING
}
def _load_netcdf_attrs(tags, data_array):
"""
Loads the netCDF attributes into the data array
Attributes stored in this format:
- variable_name#attr_name: attr_value
"""
for key, value in tags.items():
key, value = _parse_tag(key, value)
key_split = key.split("#")
if len(key_split) != 2:
continue
variable_name, attr_name = key_split
if variable_name in data_array.coords:
data_array.coords[variable_name].attrs.update({attr_name: value})
def _load_netcdf_1d_coords(tags):
"""
Dimension information:
- NETCDF_DIM_EXTRA: '{time}' (comma separated list of dim names)
- NETCDF_DIM_time_DEF: '{2,6}' (dim size, dim dtype)
- NETCDF_DIM_time_VALUES: '{0,872712.659688}' (comma separated list of data)
"""
dim_names = tags.get("NETCDF_DIM_EXTRA")
if not dim_names:
return {}
dim_names = dim_names.strip("{}").split(",")
coords = {}
for dim_name in dim_names:
dim_def = tags.get(f"NETCDF_DIM_{dim_name}_DEF")
if not dim_def:
continue
# pylint: disable=unused-variable
dim_size, dim_dtype = dim_def.strip("{}").split(",")
dim_dtype = NETCDF_DTYPE_MAP.get(int(dim_dtype), object)
dim_values = tags[f"NETCDF_DIM_{dim_name}_VALUES"].strip("{}")
coords[dim_name] = IndexVariable(
dim_name, np.fromstring(dim_values, dtype=dim_dtype, sep=",")
)
return coords
def build_subdataset_filter(group_names, variable_names):
"""
Example::
'HDF4_EOS:EOS_GRID:"./modis/MOD09GQ.A2017290.h11v04.006.NRT.hdf":
MODIS_Grid_2D:sur_refl_b01_1'
Parameters
----------
group_names: str or list or tuple
Name or names of netCDF groups to filter by.
variable_names: str or list or tuple
Name or names of netCDF variables to filter by.
Returns
-------
re.SRE_Pattern: output of re.compile()
"""
variable_query = r"\w+"
if variable_names is not None:
if not isinstance(variable_names, (tuple, list)):
variable_names = [variable_names]
variable_names = [re.escape(variable_name) for variable_name in variable_names]
variable_query = rf"(?:{'|'.join(variable_names)})"
if group_names is not None:
if not isinstance(group_names, (tuple, list)):
group_names = [group_names]
group_names = [re.escape(group_name) for group_name in group_names]
group_query = rf"(?:{'|'.join(group_names)})"
else:
return re.compile(r"".join([r".*(?:\:/|\:)(/+)?", variable_query, r"$"]))
return re.compile(
r"".join(
[r".*(?:\:/|\:)(/+)?", group_query, r"[:/](/+)?", variable_query, r"$"]
)
)
def _rio_transform(riods):
"""
Get the transform from a rasterio dataset
reguardless of rasterio version.
"""
try:
return riods.transform
except AttributeError:
return riods.affine # rasterio < 1.0
def _get_rasterio_attrs(riods):
"""
Get rasterio specific attributes
"""
# pylint: disable=too-many-branches
# Add rasterio attributes
attrs = _parse_tags(riods.tags(1))
if hasattr(riods, "nodata") and riods.nodata is not None:
# The nodata values for the raster bands
attrs["_FillValue"] = riods.nodata
if hasattr(riods, "scales"):
# The scale values for the raster bands
if len(set(riods.scales)) > 1:
attrs["scales"] = riods.scales
warnings.warn(
"Offsets differ across bands. The 'scale_factor' attribute will "
"not be added. See the 'scales' attribute."
)
else:
attrs["scale_factor"] = riods.scales[0]
if hasattr(riods, "offsets"):
# The offset values for the raster bands
if len(set(riods.offsets)) > 1:
attrs["offsets"] = riods.offsets
warnings.warn(
"Offsets differ across bands. The 'add_offset' attribute will "
"not be added. See the 'offsets' attribute."
)
else:
attrs["add_offset"] = riods.offsets[0]
if hasattr(riods, "descriptions") and any(riods.descriptions):
if len(set(riods.descriptions)) == 1:
attrs["long_name"] = riods.descriptions[0]
else:
# Descriptions for each dataset band
attrs["long_name"] = riods.descriptions
if hasattr(riods, "units") and any(riods.units):
# A list of units string for each dataset band
if len(riods.units) == 1:
attrs["units"] = riods.units[0]
else:
attrs["units"] = riods.units
return attrs
def _decode_datetime_cf(data_array, decode_times, decode_timedelta):
"""
Decide the datetime based on CF conventions
"""
if decode_timedelta is None:
decode_timedelta = decode_times
for coord in data_array.coords:
time_var = None
if decode_times and "since" in data_array[coord].attrs.get("units", ""):
time_var = times.CFDatetimeCoder(use_cftime=True).decode(
as_variable(data_array[coord]), name=coord
)
elif (
decode_timedelta
and data_array[coord].attrs.get("units") in times.TIME_UNITS
):
time_var = times.CFTimedeltaCoder().decode(
as_variable(data_array[coord]), name=coord
)
if time_var is not None:
dimensions, data, attributes, encoding = variables.unpack_for_decoding(
time_var
)
data_array = data_array.assign_coords(
{
coord: IndexVariable(
dims=dimensions,
data=data,
attrs=attributes,
encoding=encoding,
)
}
)
return data_array
def _parse_driver_tags(riods, attrs, coords):
# Parse extra metadata from tags, if supported
parsers = {"ENVI": _parse_envi}
driver = riods.driver
if driver in parsers:
meta = parsers[driver](riods.tags(ns=driver))
for key, value in meta.items():
# Add values as coordinates if they match the band count,
# as attributes otherwise
if isinstance(value, (list, np.ndarray)) and len(value) == riods.count:
coords[key] = ("band", np.asarray(value))
else:
attrs[key] = value
def _load_subdatasets(
riods,
group,
variable,
parse_coordinates,
chunks,
cache,
lock,
masked,
mask_and_scale,
decode_times,
decode_timedelta,
**open_kwargs,
):
"""
Load in rasterio subdatasets
"""
base_tags = _parse_tags(riods.tags())
dim_groups = {}
subdataset_filter = None
if any((group, variable)):
subdataset_filter = build_subdataset_filter(group, variable)
for subdataset in riods.subdatasets:
if subdataset_filter is not None and not subdataset_filter.match(subdataset):
continue
with rasterio.open(subdataset) as rds:
shape = rds.shape
rioda = open_rasterio(
subdataset,
parse_coordinates=shape not in dim_groups and parse_coordinates,
chunks=chunks,
cache=cache,
lock=lock,
masked=masked,
mask_and_scale=mask_and_scale,
default_name=subdataset.split(":")[-1].lstrip("/").replace("/", "_"),
decode_times=decode_times,
decode_timedelta=decode_timedelta,
**open_kwargs,
)
if shape not in dim_groups:
dim_groups[shape] = {rioda.name: rioda}
else:
dim_groups[shape][rioda.name] = rioda
if len(dim_groups) > 1:
dataset = [
Dataset(dim_group, attrs=base_tags) for dim_group in dim_groups.values()
]
elif not dim_groups:
dataset = Dataset(attrs=base_tags)
else:
dataset = Dataset(list(dim_groups.values())[0], attrs=base_tags)
return dataset
def _prepare_dask(result, riods, filename, chunks):
"""
Prepare the data for dask computations
"""
# pylint: disable=import-outside-toplevel
from dask.base import tokenize
# augment the token with the file modification time
try:
mtime = os.path.getmtime(filename)
except OSError:
# the filename is probably an s3 bucket rather than a regular file
mtime = None
if chunks in (True, "auto"):
import dask
from dask.array.core import normalize_chunks
if version.parse(dask.__version__) < version.parse("0.18.0"):
msg = (
"Automatic chunking requires dask.__version__ >= 0.18.0 . "
f"You currently have version {dask.__version__}"
)
raise NotImplementedError(msg)
block_shape = (1,) + riods.block_shapes[0]
chunks = normalize_chunks(
chunks=(1, "auto", "auto"),
shape=(riods.count, riods.height, riods.width),
dtype=riods.dtypes[0],
previous_chunks=tuple((c,) for c in block_shape),
)
token = tokenize(filename, mtime, chunks)
name_prefix = f"open_rasterio-{token}"
return result.chunk(chunks, name_prefix=name_prefix, token=token)
def _handle_encoding(result, mask_and_scale, masked, da_name):
"""
Make sure encoding handled properly
"""
if "grid_mapping" in result.attrs:
variables.pop_to(result.attrs, result.encoding, "grid_mapping", name=da_name)
if mask_and_scale:
if "scale_factor" in result.attrs:
variables.pop_to(
result.attrs, result.encoding, "scale_factor", name=da_name
)
if "add_offset" in result.attrs:
variables.pop_to(result.attrs, result.encoding, "add_offset", name=da_name)
if masked:
if "_FillValue" in result.attrs:
variables.pop_to(result.attrs, result.encoding, "_FillValue", name=da_name)
if "missing_value" in result.attrs:
variables.pop_to(
result.attrs, result.encoding, "missing_value", name=da_name
)
def open_rasterio(
filename,
parse_coordinates=None,
chunks=None,
cache=None,
lock=None,
masked=False,
mask_and_scale=False,
variable=None,
group=None,
default_name=None,
decode_times=True,
decode_timedelta=None,
**open_kwargs,
):
# pylint: disable=too-many-statements,too-many-locals,too-many-branches
"""Open a file with rasterio (experimental).
This should work with any file that rasterio can open (most often:
geoTIFF). The x and y coordinates are generated automatically from the
file's geoinformation, shifted to the center of each pixel (see
`"PixelIsArea" Raster Space
<http://web.archive.org/web/20160326194152/http://remotesensing.org/geotiff/spec/geotiff2.5.html#2.5.2>`_
for more information).
Parameters
----------
filename: str, rasterio.io.DatasetReader, or rasterio.vrt.WarpedVRT
Path to the file to open. Or already open rasterio dataset.
parse_coordinates: bool, optional
Whether to parse the x and y coordinates out of the file's
``transform`` attribute or not. The default is to automatically
parse the coordinates only if they are rectilinear (1D).
It can be useful to set ``parse_coordinates=False``
if your files are very large or if you don't need the coordinates.
chunks: int, tuple or dict, optional
Chunk sizes along each dimension, e.g., ``5``, ``(5, 5)`` or
``{'x': 5, 'y': 5}``. If chunks is provided, it used to load the new
DataArray into a dask array. Chunks can also be set to
``True`` or ``"auto"`` to choose sensible chunk sizes according to
``dask.config.get("array.chunk-size")``.
cache: bool, optional
If True, cache data loaded from the underlying datastore in memory as
NumPy arrays when accessed to avoid reading from the underlying data-
store multiple times. Defaults to True unless you specify the `chunks`
argument to use dask, in which case it defaults to False.
lock: bool or dask.utils.SerializableLock, optional
If chunks is provided, this argument is used to ensure that only one
thread per process is reading from a rasterio file object at a time.
By default and when a lock instance is provided,
a :class:`xarray.backends.CachingFileManager` is used to cache File objects.
Since rasterio also caches some data, this will make repeated reads from the
same object fast.
When ``lock=False``, no lock is used, allowing for completely parallel reads
from multiple threads or processes. However, a new file handle is opened on
each request.
masked: bool, optional
If True, read the mask and set values to NaN. Defaults to False.
mask_and_scale: bool, optional
Lazily scale (using the `scales` and `offsets` from rasterio) and mask.
If the _Unsigned attribute is present treat integer arrays as unsigned.
variable: str or list or tuple, optional
Variable name or names to use to filter loading.
group: str or list or tuple, optional
Group name or names to use to filter loading.
default_name: str, optional
The name of the data array if none exists. Default is None.
decode_times: bool, optional
If True, decode times encoded in the standard NetCDF datetime format
into datetime objects. Otherwise, leave them encoded as numbers.
decode_timedelta: bool, optional
If True, decode variables and coordinates with time units in
{“days”, “hours”, “minutes”, “seconds”, “milliseconds”, “microseconds”}
into timedelta objects. If False, leave them encoded as numbers.
If None (default), assume the same value of decode_time.
**open_kwargs: kwargs, optional
Optional keyword arguments to pass into rasterio.open().
Returns
-------
:obj:`xarray.Dataset` | :obj:`xarray.DataArray` | List[:obj:`xarray.Dataset`]:
The newly created dataset(s).
"""
parse_coordinates = True if parse_coordinates is None else parse_coordinates
masked = masked or mask_and_scale
vrt_params = None
if isinstance(filename, rasterio.io.DatasetReader):
filename = filename.name
elif isinstance(filename, rasterio.vrt.WarpedVRT):
vrt = filename
filename = vrt.src_dataset.name
vrt_params = dict(
src_crs=vrt.src_crs.to_string() if vrt.src_crs else None,
crs=vrt.crs.to_string() if vrt.crs else None,
resampling=vrt.resampling,
tolerance=vrt.tolerance,
src_nodata=vrt.src_nodata,
nodata=vrt.nodata,
width=vrt.width,
height=vrt.height,
src_transform=vrt.src_transform,
transform=vrt.transform,
dtype=vrt.working_dtype,
warp_extras=vrt.warp_extras,
)
if lock in (True, None):
lock = RASTERIO_LOCK
elif lock is False:
lock = NO_LOCK
# ensure default for sharing is False
# ref https://github.com/mapbox/rasterio/issues/1504
open_kwargs["sharing"] = open_kwargs.get("sharing", False)
with warnings.catch_warnings(record=True) as rio_warnings:
if lock is not NO_LOCK:
manager = CachingFileManager(
rasterio.open, filename, lock=lock, mode="r", kwargs=open_kwargs
)
else:
manager = URIManager(rasterio.open, filename, mode="r", kwargs=open_kwargs)
riods = manager.acquire()
captured_warnings = rio_warnings.copy()
# raise the NotGeoreferencedWarning if applicable
for rio_warning in captured_warnings:
if not riods.subdatasets or not isinstance(
rio_warning.message, NotGeoreferencedWarning
):
warnings.warn(str(rio_warning.message), type(rio_warning.message))
# open the subdatasets if they exist
if riods.subdatasets:
return _load_subdatasets(
riods=riods,
group=group,
variable=variable,
parse_coordinates=parse_coordinates,
chunks=chunks,
cache=cache,
lock=lock,
masked=masked,
mask_and_scale=mask_and_scale,
decode_times=decode_times,
decode_timedelta=decode_timedelta,
**open_kwargs,
)
if vrt_params is not None:
riods = WarpedVRT(riods, **vrt_params)
if cache is None:
cache = chunks is None
# Get bands
if riods.count < 1:
raise ValueError("Unknown dims")
# parse tags & load alternate coords
attrs = _get_rasterio_attrs(riods=riods)
coords = _load_netcdf_1d_coords(riods.tags())
_parse_driver_tags(riods=riods, attrs=attrs, coords=coords)
for coord in coords:
if f"NETCDF_DIM_{coord}" in attrs:
coord_name = coord
attrs.pop(f"NETCDF_DIM_{coord}")
break
else:
coord_name = "band"
coords[coord_name] = np.asarray(riods.indexes)
# Get geospatial coordinates
if parse_coordinates:
coords.update(
_generate_spatial_coords(_rio_transform(riods), riods.width, riods.height)
)
unsigned = False
encoding = {}
if mask_and_scale and "_Unsigned" in attrs:
unsigned = variables.pop_to(attrs, encoding, "_Unsigned") == "true"
if masked:
encoding["dtype"] = str(_rasterio_to_numpy_dtype(riods.dtypes))
da_name = attrs.pop("NETCDF_VARNAME", default_name)
data = indexing.LazilyOuterIndexedArray(
RasterioArrayWrapper(
manager,
lock,
name=da_name,
vrt_params=vrt_params,
masked=masked,
mask_and_scale=mask_and_scale,
unsigned=unsigned,
)
)
# this lets you write arrays loaded with rasterio
data = indexing.CopyOnWriteArray(data)
if cache and chunks is None:
data = indexing.MemoryCachedArray(data)
result = DataArray(
data=data, dims=(coord_name, "y", "x"), coords=coords, attrs=attrs, name=da_name
)
result.encoding = encoding
# update attributes from NetCDF attributess
_load_netcdf_attrs(riods.tags(), result)
result = _decode_datetime_cf(
result, decode_times=decode_times, decode_timedelta=decode_timedelta
)
# make sure the _FillValue is correct dtype
if "_FillValue" in attrs:
attrs["_FillValue"] = result.dtype.type(attrs["_FillValue"])
# handle encoding
_handle_encoding(result, mask_and_scale, masked, da_name)
# Affine transformation matrix (always available)
# This describes coefficients mapping pixel coordinates to CRS
# For serialization store as tuple of 6 floats, the last row being
# always (0, 0, 1) per definition (see
# https://github.com/sgillies/affine)
result.rio.write_transform(_rio_transform(riods), inplace=True)
if hasattr(riods, "crs") and riods.crs:
result.rio.write_crs(riods.crs, inplace=True)
if chunks is not None:
result = _prepare_dask(result, riods, filename, chunks)
# Make the file closeable
result.set_close(manager.close)
result.rio._manager = manager
# add file path to encoding
result.encoding["source"] = riods.name
result.encoding["rasterio_dtype"] = str(riods.dtypes[0])
return result
| 33.33123 | 127 | 0.618146 |
f7497f1f4bb2a67d60b34325c900ce8e14e8d00d | 3,202 | py | Python | alipay/aop/api/request/AlipayCommerceEducateCampusExamineQueryRequest.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 213 | 2018-08-27T16:49:32.000Z | 2021-12-29T04:34:12.000Z | alipay/aop/api/request/AlipayCommerceEducateCampusExamineQueryRequest.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 29 | 2018-09-29T06:43:00.000Z | 2021-09-02T03:27:32.000Z | alipay/aop/api/request/AlipayCommerceEducateCampusExamineQueryRequest.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 59 | 2018-08-27T16:59:26.000Z | 2022-03-25T10:08:15.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
class AlipayCommerceEducateCampusExamineQueryRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.commerce.educate.campus.examine.query'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| 25.015625 | 142 | 0.637726 |
f749833b519eca60c2ae70fd72c38e6decefd258 | 9,769 | py | Python | opacus/privacy_analysis.py | nhsx-mirror/SynthVAE | 64c00dff1b9cb1fe22b4b25e585b17ca5c7b9651 | [
"MIT"
] | 958 | 2020-08-28T15:34:15.000Z | 2022-03-29T20:58:14.000Z | opacus/privacy_analysis.py | nhsx-mirror/SynthVAE | 64c00dff1b9cb1fe22b4b25e585b17ca5c7b9651 | [
"MIT"
] | 330 | 2020-08-28T07:11:02.000Z | 2022-03-31T19:16:10.000Z | opacus/privacy_analysis.py | nhsx-mirror/SynthVAE | 64c00dff1b9cb1fe22b4b25e585b17ca5c7b9651 | [
"MIT"
] | 161 | 2020-08-28T06:12:10.000Z | 2022-03-31T07:47:04.000Z | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
r"""
*Based on Google's TF Privacy:* https://github.com/tensorflow/privacy/blob/master/tensorflow_privacy/privacy/analysis/rdp_accountant.py.
*Here, we update this code to Python 3, and optimize dependencies.*
Functionality for computing Renyi Differential Privacy (RDP) of an additive
Sampled Gaussian Mechanism (SGM).
Example:
Suppose that we have run an SGM applied to a function with L2-sensitivity of 1.
Its parameters are given as a list of tuples
``[(q_1, sigma_1, steps_1), ..., (q_k, sigma_k, steps_k)],``
and we wish to compute epsilon for a given target delta.
The example code would be:
>>> max_order = 32
>>> orders = range(2, max_order + 1)
>>> rdp = np.zeros_like(orders, dtype=float)
>>> for q, sigma, steps in parameters:
>>> rdp += privacy_analysis.compute_rdp(q, sigma, steps, orders)
>>> epsilon, opt_order = privacy_analysis.get_privacy_spent(orders, rdp, delta)
"""
import math
from typing import List, Tuple, Union
import numpy as np
from scipy import special
########################
# LOG-SPACE ARITHMETIC #
########################
def _log_add(logx: float, logy: float) -> float:
r"""Adds two numbers in the log space.
Args:
logx: First term in log space.
logy: Second term in log space.
Returns:
Sum of numbers in log space.
"""
a, b = min(logx, logy), max(logx, logy)
if a == -np.inf: # adding 0
return b
# Use exp(a) + exp(b) = (exp(a - b) + 1) * exp(b)
return math.log1p(math.exp(a - b)) + b # log1p(x) = log(x + 1)
def _log_sub(logx: float, logy: float) -> float:
r"""Subtracts two numbers in the log space.
Args:
logx: First term in log space. Expected to be greater than the second term.
logy: First term in log space. Expected to be less than the first term.
Returns:
Difference of numbers in log space.
Raises:
ValueError
If the result is negative.
"""
if logx < logy:
raise ValueError("The result of subtraction must be non-negative.")
if logy == -np.inf: # subtracting 0
return logx
if logx == logy:
return -np.inf # 0 is represented as -np.inf in the log space.
try:
# Use exp(x) - exp(y) = (exp(x - y) - 1) * exp(y).
return math.log(math.expm1(logx - logy)) + logy # expm1(x) = exp(x) - 1
except OverflowError:
return logx
def _compute_log_a_for_int_alpha(q: float, sigma: float, alpha: int) -> float:
r"""Computes :math:`log(A_\alpha)` for integer ``alpha``.
Notes:
Note that
:math:`A_\alpha` is real valued function of ``alpha`` and ``q``,
and that 0 < ``q`` < 1.
Refer to Section 3.3 of https://arxiv.org/pdf/1908.10530.pdf for details.
Args:
q: Sampling rate of SGM.
sigma: The standard deviation of the additive Gaussian noise.
alpha: The order at which RDP is computed.
Returns:
:math:`log(A_\alpha)` as defined in Section 3.3 of
https://arxiv.org/pdf/1908.10530.pdf.
"""
# Initialize with 0 in the log space.
log_a = -np.inf
for i in range(alpha + 1):
log_coef_i = (
math.log(special.binom(alpha, i))
+ i * math.log(q)
+ (alpha - i) * math.log(1 - q)
)
s = log_coef_i + (i * i - i) / (2 * (sigma ** 2))
log_a = _log_add(log_a, s)
return float(log_a)
def _compute_log_a_for_frac_alpha(q: float, sigma: float, alpha: float) -> float:
r"""Computes :math:`log(A_\alpha)` for fractional ``alpha``.
Notes:
Note that
:math:`A_\alpha` is real valued function of ``alpha`` and ``q``,
and that 0 < ``q`` < 1.
Refer to Section 3.3 of https://arxiv.org/pdf/1908.10530.pdf for details.
Args:
q: Sampling rate of SGM.
sigma: The standard deviation of the additive Gaussian noise.
alpha: The order at which RDP is computed.
Returns:
:math:`log(A_\alpha)` as defined in Section 3.3 of
https://arxiv.org/pdf/1908.10530.pdf.
"""
# The two parts of A_alpha, integrals over (-inf,z0] and [z0, +inf), are
# initialized to 0 in the log space:
log_a0, log_a1 = -np.inf, -np.inf
i = 0
z0 = sigma ** 2 * math.log(1 / q - 1) + 0.5
while True: # do ... until loop
coef = special.binom(alpha, i)
log_coef = math.log(abs(coef))
j = alpha - i
log_t0 = log_coef + i * math.log(q) + j * math.log(1 - q)
log_t1 = log_coef + j * math.log(q) + i * math.log(1 - q)
log_e0 = math.log(0.5) + _log_erfc((i - z0) / (math.sqrt(2) * sigma))
log_e1 = math.log(0.5) + _log_erfc((z0 - j) / (math.sqrt(2) * sigma))
log_s0 = log_t0 + (i * i - i) / (2 * (sigma ** 2)) + log_e0
log_s1 = log_t1 + (j * j - j) / (2 * (sigma ** 2)) + log_e1
if coef > 0:
log_a0 = _log_add(log_a0, log_s0)
log_a1 = _log_add(log_a1, log_s1)
else:
log_a0 = _log_sub(log_a0, log_s0)
log_a1 = _log_sub(log_a1, log_s1)
i += 1
if max(log_s0, log_s1) < -30:
break
return _log_add(log_a0, log_a1)
def _compute_log_a(q: float, sigma: float, alpha: float) -> float:
r"""Computes :math:`log(A_\alpha)` for any positive finite ``alpha``.
Notes:
Note that
:math:`A_\alpha` is real valued function of ``alpha`` and ``q``,
and that 0 < ``q`` < 1.
Refer to Section 3.3 of https://arxiv.org/pdf/1908.10530.pdf
for details.
Args:
q: Sampling rate of SGM.
sigma: The standard deviation of the additive Gaussian noise.
alpha: The order at which RDP is computed.
Returns:
:math:`log(A_\alpha)` as defined in the paper mentioned above.
"""
if float(alpha).is_integer():
return _compute_log_a_for_int_alpha(q, sigma, int(alpha))
else:
return _compute_log_a_for_frac_alpha(q, sigma, alpha)
def _log_erfc(x: float) -> float:
r"""Computes :math:`log(erfc(x))` with high accuracy for large ``x``.
Helper function used in computation of :math:`log(A_\alpha)`
for a fractional alpha.
Args:
x: The input to the function
Returns:
:math:`log(erfc(x))`
"""
return math.log(2) + special.log_ndtr(-x * 2 ** 0.5)
def _compute_rdp(q: float, sigma: float, alpha: float) -> float:
r"""Computes RDP of the Sampled Gaussian Mechanism at order ``alpha``.
Args:
q: Sampling rate of SGM.
sigma: The standard deviation of the additive Gaussian noise.
alpha: The order at which RDP is computed.
Returns:
RDP at order ``alpha``; can be np.inf.
"""
if q == 0:
return 0
# no privacy
if sigma == 0:
return np.inf
if q == 1.0:
return alpha / (2 * sigma ** 2)
if np.isinf(alpha):
return np.inf
return _compute_log_a(q, sigma, alpha) / (alpha - 1)
def compute_rdp(
q: float, noise_multiplier: float, steps: int, orders: Union[List[float], float]
) -> Union[List[float], float]:
r"""Computes Renyi Differential Privacy (RDP) guarantees of the
Sampled Gaussian Mechanism (SGM) iterated ``steps`` times.
Args:
q: Sampling rate of SGM.
noise_multiplier: The ratio of the standard deviation of the
additive Gaussian noise to the L2-sensitivity of the function
to which it is added. Note that this is same as the standard
deviation of the additive Gaussian noise when the L2-sensitivity
of the function is 1.
steps: The number of iterations of the mechanism.
orders: An array (or a scalar) of RDP orders.
Returns:
The RDP guarantees at all orders; can be ``np.inf``.
"""
if isinstance(orders, float):
rdp = _compute_rdp(q, noise_multiplier, orders)
else:
rdp = np.array([_compute_rdp(q, noise_multiplier, order) for order in orders])
return rdp * steps
def get_privacy_spent(
orders: Union[List[float], float], rdp: Union[List[float], float], delta: float
) -> Tuple[float, float]:
r"""Computes epsilon given a list of Renyi Differential Privacy (RDP) values at
multiple RDP orders and target ``delta``.
The computation of epslion, i.e. conversion from RDP to (eps, delta)-DP,
is based on the theorem presented in the following work:
Borja Balle et al. "Hypothesis testing interpretations and Renyi differential privacy."
International Conference on Artificial Intelligence and Statistics. PMLR, 2020.
Particullary, Theorem 21 in the arXiv version https://arxiv.org/abs/1905.09982.
Args:
orders: An array (or a scalar) of orders (alphas).
rdp: A list (or a scalar) of RDP guarantees.
delta: The target delta.
Returns:
Pair of epsilon and optimal order alpha.
Raises:
ValueError
If the lengths of ``orders`` and ``rdp`` are not equal.
"""
orders_vec = np.atleast_1d(orders)
rdp_vec = np.atleast_1d(rdp)
if len(orders_vec) != len(rdp_vec):
raise ValueError(
f"Input lists must have the same length.\n"
f"\torders_vec = {orders_vec}\n"
f"\trdp_vec = {rdp_vec}\n"
)
eps = (
rdp_vec
- (np.log(delta) + np.log(orders_vec)) / (orders_vec - 1)
+ np.log((orders_vec - 1) / orders_vec)
)
# special case when there is no privacy
if np.isnan(eps).all():
return np.inf, np.nan
idx_opt = np.nanargmin(eps) # Ignore NaNs
return eps[idx_opt], orders_vec[idx_opt]
| 31.310897 | 136 | 0.604463 |
f749b1e973e1bf8ca1d6b889e5ab6ae31308500a | 54 | py | Python | webservices/env.py | adborden/openFEC | 53a0a2b1a56292c5ca8e7a3185832baaed4a63d9 | [
"CC0-1.0"
] | 246 | 2015-01-07T16:59:42.000Z | 2020-01-18T20:35:05.000Z | webservices/env.py | adborden/openFEC | 53a0a2b1a56292c5ca8e7a3185832baaed4a63d9 | [
"CC0-1.0"
] | 2,532 | 2015-01-02T16:22:46.000Z | 2018-03-08T17:30:53.000Z | webservices/env.py | 18F/openFEC | ee7b7368e0934f50c391789fb55444f811c1a2f7 | [
"CC0-1.0"
] | 75 | 2015-02-01T00:46:56.000Z | 2021-02-14T10:51:34.000Z | import cfenv
env = cfenv.AppEnv()
__all__ = ['env']
| 9 | 20 | 0.648148 |
f749b7cb3e9cf99c7c9924870973047ee91ad75a | 76 | py | Python | tests/CONSTANTS.py | gousteris/git-diff-conditional-buildkite-plugin | 517442ca9ebe21021e1078debf88c231ba8e8dff | [
"MIT"
] | 19 | 2020-03-27T11:53:11.000Z | 2021-04-14T23:11:27.000Z | tests/CONSTANTS.py | segmentio/git-diff-conditional-buildkite-plugin | 62a276000149da0bbf4e152634211c75966a8558 | [
"MIT"
] | 7 | 2020-03-27T11:59:51.000Z | 2021-08-06T14:01:42.000Z | tests/CONSTANTS.py | segmentio/git-diff-conditional-buildkite-plugin | 62a276000149da0bbf4e152634211c75966a8558 | [
"MIT"
] | 6 | 2020-03-28T20:49:49.000Z | 2022-01-19T17:48:02.000Z | LOGGER_NAME = "cli"
PLUGIN_PREFIX = "BUILDKITE_PLUGIN_GIT_DIFF_CONDITIONAL"
| 25.333333 | 55 | 0.842105 |
f749cbea49751db5546b18ea9fe69eec26724adb | 15,583 | py | Python | pylib/zeus/runner.py | xkmato/py77 | 9c44d8f8924f47a7331c29fd0287a4bb9416d316 | [
"MIT"
] | null | null | null | pylib/zeus/runner.py | xkmato/py77 | 9c44d8f8924f47a7331c29fd0287a4bb9416d316 | [
"MIT"
] | null | null | null | pylib/zeus/runner.py | xkmato/py77 | 9c44d8f8924f47a7331c29fd0287a4bb9416d316 | [
"MIT"
] | 2 | 2018-07-16T19:14:11.000Z | 2020-10-15T08:48:32.000Z | #!/usr/bin/env python
"""Handles run."""
__author__ = 'pramodg@room77.com (Pramod Gupta)'
__copyright__ = 'Copyright 2012 Room77, Inc.'
import itertools
import json
import os
import re
import sys
import time
from pylib.base.flags import Flags
from pylib.base.exec_utils import ExecUtils
from pylib.base.term_color import TermColor
from pylib.file.file_utils import FileUtils
from pylib.util.mail.mailer import Mailer
from pylib.zeus.pipeline_cmd_base import PipelineCmdBase
from pylib.zeus.pipeline_config import PipelineConfig
from pylib.zeus.pipeline_utils import PipelineUtils
class Runner(PipelineCmdBase):
"""Class to handle run."""
# The different exit codes that can be returned after running a task.
EXITCODE = {
'_LOWEST':-1, # Internal use.
'SUCCESS': 0,
'ALLOW_FAIL': 1,
'FAILURE': 2,
'ABORT_FAIL': 3,
}
EXITCODE_DESCRIPTION = {
0: 'SUCCESS',
1: 'ALLOW_FAIL',
2: 'FAILURE',
3: 'ABORT_FAIL',
}
EXITCODE_FILE = {
0: 'SUCCESS',
1: 'SUCCESS',
2: 'FAILURE',
3: 'ABORT',
}
TASK_OPTIONS = {
# Default option. Task will run regardless of if earlier tasks in the directory
# were successful or not. Task will not run if any task across the pipeline was
# marked as `abort_fail`.
'NORMAL': 0,
# If this step fails, prevent any subsequent steps across the entire pipeline from
# running. The pipeline will be aborted. All currently running tasks will finish,
# but no further tasks will be run. To enable this option, add `.abort_fail` to the
# task name.
'ABORT_FAIL': 1,
# If this step fails, do not mark the out directory as failed. Mark it as successful
# if all other tasks at this level have succeeded. This will allow publishing of a
# task directory even if the only steps that failed were marked as `allow_fail`. To
# enable this option, add `.allow_fail` to the task name.
'ALLOW_FAIL': 2,
# If any earlier tasks located in the same directory as this task failed, prevent
# this task from running. This task will also be marked as failed. To enable this
# option, add `.require_dir_success` to the task name.
'REQUIRE_DIR_SUCCESS': 3,
}
@classmethod
def Init(cls, parser):
super(Runner, cls).Init(parser)
parser.add_argument('-t', '--timeout', type=float, default=86400,
help='Timeout for each task in seconds.')
parser.add_argument('--pool_size', type=int, default=0,
help='The pool size for parallelization.')
parser.add_argument('--detailed_success_mail', action='store_true', default=False,
help='Sends a detailed mail even on success. Useful for debugging.')
parser.add_argument('--success_mail', type=str, default='',
help='The mail to use to send info in case of success.')
parser.add_argument('--failure_mail', type=str, default='',
help='The mail to use to send info in case of success.')
parser.add_argument('--mail_domain', type=str, default='corp.room77.com',
help='The domain to use when sending automated '
'pipeline mail.')
@classmethod
def WorkHorse(cls, tasks):
"""Runs the workhorse for the command.
Args:
tasks: OrderedDict {int, set(string)}: Dict from priority to set of tasks to execute at the
priority. Note: the dict is ordered by priority.
Return:
(list, list): Returns a tuple of list in the form
(successful_tasks, failed_tasks) specifying tasks that succeeded and
ones that failed.
"""
# All our binaries assume they will be run from the source root.
start = time.time()
os.chdir(FileUtils.GetSrcRoot())
cls._CreateDirsForTasks(tasks)
successful_run = []; failed_run = []
aborted_task = None
# NOTE(stephen): Storing task dir status and task out dir status separately since
# pipelines do not always have an out dir defined.
dirs_status = {}
out_dirs_status = {}
for set_tasks in tasks.values():
if aborted_task:
failed_run += set_tasks
continue
tasks_to_run = []
for task in set_tasks:
task_options = cls.__GetTaskOptions(task)
# Check if this task requires all previous tasks in the same directory to be
# successful.
if task_options[Runner.TASK_OPTIONS['REQUIRE_DIR_SUCCESS']]:
task_dir = PipelineUtils.TaskDirName(task)
cur_dir_status = dirs_status.get(task_dir)
# If any previous tasks have been run in this directory, check to ensure all
# of them were successful.
if cur_dir_status and cur_dir_status != Runner.EXITCODE['SUCCESS']:
failed_run += [task]
task_display_name = PipelineUtils.TaskDisplayName(task)
TermColor.Info('Skipped %s' % task_display_name)
TermColor.Failure(
'Skipped Task: %s due to earlier failures in task dir' % task_display_name
)
continue
tasks_to_run.append(task)
# It is possible for all steps at this priority level to be skipped due to the
# task options selected.
if set_tasks and not tasks_to_run:
continue
# Run all the tasks at the same priority in parallel.
args = zip(itertools.repeat(cls), itertools.repeat('_RunSingeTask'),
tasks_to_run)
task_res = ExecUtils.ExecuteParallel(args, Flags.ARGS.pool_size)
# task_res = []
# for task in tasks_to_run: task_res += [cls._RunSingeTask(task)]
if not task_res:
TermColor.Error('Could not process: %s' % tasks_to_run)
failed_run += tasks_to_run
continue
for (res, task) in task_res:
if res == Runner.EXITCODE['SUCCESS']:
successful_run += [task]
elif res == Runner.EXITCODE['FAILURE']:
failed_run += [task]
elif res == Runner.EXITCODE['ALLOW_FAIL']:
failed_run += [task]
elif res == Runner.EXITCODE['ABORT_FAIL']:
failed_run += [task]
aborted_task = task
else:
TermColor.Fatal('Invalid return %d code for %s' % (res, task))
# Update the current status of all tasks in the same directory.
task_dir = PipelineUtils.TaskDirName(task)
dirs_status[task_dir] = max(
dirs_status.get(task_dir, Runner.EXITCODE['_LOWEST']), res,
)
# Update the out dir status.
out_dir = PipelineUtils.GetOutDirForTask(task)
if out_dir:
out_dirs_status[out_dir] = max(
out_dirs_status.get(out_dir, Runner.EXITCODE['_LOWEST']), res,
)
# Write the status files to the dirs.
cls._WriteOutDirsStatus(out_dirs_status)
# Send the final status mail.
time_taken = time.time() - start
cls._SendFinalStatusMail(successful_run, failed_run, aborted_task, time_taken)
if aborted_task:
TermColor.Failure('Aborted by task: %s' % aborted_task)
return (successful_run, failed_run)
@classmethod
def _CreateDirsForTasks(cls, tasks):
"""Creates the relevant dirs for tasks.
Args:
tasks: OrderedDict {int, set(string)}: Dict from priority to set of tasks to execute at the
priority. Note: the dict is ordered by priority.
"""
for set_tasks in tasks.values():
for task in set_tasks:
rel_path = PipelineUtils.GetTaskOutputRelativeDir(task)
PipelineConfig.Instance().CreateAllSubDirsForPath(rel_path)
@classmethod
def _RunSingeTask(cls, task):
"""Runs a Single Task.
Args:
task: string: The task to run.
Return:
(EXITCODE, string): Returns a tuple of the result status and the task.
"""
TermColor.Info('Executing %s' % PipelineUtils.TaskDisplayName(task))
task_vars = cls.__GetEnvVarsForTask(task)
TermColor.VInfo(4, 'VARS: \n%s' % task_vars)
task_cmd = task
pipe_output = True
log_file = PipelineUtils.GetLogFileForTask(task)
if log_file:
task_cmd += ' > ' + PipelineUtils.GetLogFileForTask(task) + ' 2>&1'
pipe_output = False
timeout = cls.__GetTimeOutForTask(task)
start = time.time()
(status, out) = ExecUtils.RunCmd(task_cmd, timeout, pipe_output, task_vars)
time_taken = time.time() - start
TermColor.Info('Executed %s. Took %.2fs' % (PipelineUtils.TaskDisplayName(task), time_taken))
if status:
TermColor.Failure('Failed Task: %s' % PipelineUtils.TaskDisplayName(task))
if task_vars.get('PIPELINE_TASK_ABORT_FAIL', None):
status_code = Runner.EXITCODE['ABORT_FAIL']
elif task_vars.get('PIPELINE_TASK_ALLOW_FAIL', None):
status_code = Runner.EXITCODE['ALLOW_FAIL']
else:
status_code = Runner.EXITCODE['FAILURE']
else:
status_code = Runner.EXITCODE['SUCCESS']
cls._SendMailForTask(task, status_code, time_taken, log_file, out)
# Everything done. Mark the task as successful.
return (status_code, task)
@classmethod
def __GetEnvVarsForTask(cls, task):
"""Returns the env vars for the task.
Args:
task: string: The task for which the envvar should be prepared.
Returns:
dict {string, string}: The dictionary of IDS to values.
"""
rel_path = PipelineUtils.GetTaskOutputRelativeDir(task)
vars = {}
for k, v in PipelineConfig.Instance().GetAllSubDirsForPath(rel_path).items():
vars[k] = v
prev_dir = FileUtils.GetPreviousDatedDir(v)
if not prev_dir: prev_dir = v
vars[k + '_PREV'] = prev_dir
vars.update(PipelineConfig.Instance().GetAllENVVars())
# Check if the task is critical or not.
task_options = cls.__GetTaskOptions(task)
if task_options[Runner.TASK_OPTIONS['ABORT_FAIL']]:
vars['PIPELINE_TASK_ABORT_FAIL'] = '1'
if task_options[Runner.TASK_OPTIONS['ALLOW_FAIL']]:
vars['PIPELINE_TASK_ALLOW_FAIL'] = '1'
return vars
@classmethod
def __GetTimeOutForTask(cls, task):
"""Returns the timeout for the task.
Args:
task: string: The task for which the timeout should be prepared.
Returns:
int: The timeout in seconds.
"""
timeout = FileUtils.FileContents(task + '.timeout')
if not timeout:
timeout = FileUtils.FileContents(os.path.join(PipelineUtils.TaskDirName(task), 'timeout'))
if not timeout: return Flags.ARGS.timeout
timeout = re.sub('\s*', '', timeout)
timeout_parts = re.split('(\d+)', timeout)
if len(timeout_parts) < 3:
TermColor.Warning('Ignoring invalid timeout [%s] for task: %s' % (timeout, task))
return Flags.ARGS.timeout
timeout = float(timeout_parts[1])
annotation = timeout_parts[2]
if not annotation: return timeout
elif annotation == 'd': timeout *= 86400
elif annotation == 'h': timeout *= 3600
elif annotation == 'm': timeout *= 60
elif annotation == 'ms': timeout *= 0.001
elif annotation == 'us': timeout *= 0.000001
return timeout
@classmethod
def __GetTaskOptions(cls, task):
rel_task = PipelineUtils.TaskRelativeName(task)
options = {
v: False
for v in Runner.TASK_OPTIONS.values()
}
if '.abort_fail' in rel_task:
options[Runner.TASK_OPTIONS['ABORT_FAIL']] = True
if '.allow_fail' in rel_task:
options[Runner.TASK_OPTIONS['ALLOW_FAIL']] = True
if '.require_dir_success' in rel_task:
options[Runner.TASK_OPTIONS['REQUIRE_DIR_SUCCESS']] = True
if not options:
options[Runner.TASK_OPTIONS['NORMAL']] = True
return options
@classmethod
def _SendMailForTask(cls, task, status_code, time_taken, log_file, msg):
"""Sends the mail if required for the task.
Args:
task: string: The task for which the envvar should be prepared.
status_code: EXITCODE: The exit code for the task.
time_taken: float: Time taken in seconds.
log_file: string: The log file containing the output of the task.
msg: string: The output message piped directly. Note only one of msg or log_file will be
present at any time.
Returns:
dict {string, string}: The dictionary of IDS to values.
"""
if status_code == Runner.EXITCODE['SUCCESS']:
if not Flags.ARGS.detailed_success_mail: return
receiver = Flags.ARGS.success_mail
else: receiver = Flags.ARGS.failure_mail
# Check if there is no receiver for the mail.
if not receiver: return
mail_domain = Flags.ARGS.mail_domain
status_description = Runner.EXITCODE_DESCRIPTION[status_code]
subject = "[%s:%s] %s : %s" % (PipelineConfig.Instance().pipeline_id(),
PipelineConfig.Instance().pipeline_date(),
status_description, PipelineUtils.TaskDisplayName(task))
body = 'Executed task: %s. \nStatus:%s \nTime: %.2fs.' % (task, status_description, time_taken)
if msg:
body += '\n%s' % msg
Mailer().send_simple_message(
PipelineUtils.ZeusEmailId(mail_domain), [receiver], subject, body)
else:
Mailer().send_message_from_files(
PipelineUtils.ZeusEmailId(mail_domain), [receiver], subject, [log_file],
body)
@classmethod
def _WriteOutDirsStatus(cls, out_dirs_status):
"""Writes the status for each of the dirs in the dict.
Args:
out_dirs_status: dict {string, EXITCODE}: Dict of dir -> exit status.
"""
for k, v in out_dirs_status.items():
FileUtils.RemoveFiles([os.path.join(k, x) for x in Runner.EXITCODE_FILE.values()])
status_file = Runner.EXITCODE_FILE.get(v, '')
if not status_file: continue
FileUtils.CreateFileWithData(os.path.join(k, status_file))
@classmethod
def _SendFinalStatusMail(cls, successful_run, failed_run, aborted_task, time_taken):
"""Sends the final status mail if required.
Args:
(list, list): Returns a tuple of list in the form
(successful_tasks, failed_tasks) specifying tasks that succeeded and
ones that failed.
aborted_task: string: True if the pipeline was aborted.
time_taken: float: Time taken in seconds.
"""
if not successful_run and not failed_run: return
if not failed_run:
receiver = Flags.ARGS.success_mail
status_description = 'SUCCESS'
else:
receiver = Flags.ARGS.failure_mail
status_description = 'ABORT FAIL' if aborted_task else 'FAIL'
# Check if there is no receiver for the mail.
if not receiver: return
mail_domain = Flags.ARGS.mail_domain
subject = "[%s:%s] Final Status: %s" % (PipelineConfig.Instance().pipeline_id(),
PipelineConfig.Instance().pipeline_date(),
status_description)
body = 'Aborted by: %s\n\n' % aborted_task if aborted_task else ''
body += ('Successful tasks: %d\n%s\n\n'
'Failed tasks: %d\n%s\n\n'
'Total Time: %.2fs.\n'
'\n%s\n\n' %
(len(successful_run), json.dumps(successful_run, indent=2),
len(failed_run), json.dumps(failed_run, indent=2),
time_taken,
PipelineConfig.Instance().GetConfigString()))
Mailer().send_simple_message(
PipelineUtils.ZeusEmailId(mail_domain), [receiver], subject, body)
def main():
try:
Runner.Init(Flags.PARSER)
Flags.InitArgs()
return Runner.Run()
except KeyboardInterrupt as e:
TermColor.Warning('KeyboardInterrupt')
return 1
if __name__ == '__main__':
sys.exit(main())
| 35.988453 | 99 | 0.656549 |
f74a1d08e0902583b5cd19cd039aea131ec0c4a4 | 5,506 | py | Python | jenkinsapi_utils/jenkins_launcher.py | ifwe/jenkinsapi | 31a7fbb07efcd48e226f7dcf643fd2a2625416c0 | [
"MIT"
] | 1 | 2015-01-12T14:15:59.000Z | 2015-01-12T14:15:59.000Z | jenkinsapi_utils/jenkins_launcher.py | moustuk/jenkinsapi-1 | d18c1e669965c209093763f3295f79c9d3ccdeea | [
"MIT"
] | null | null | null | jenkinsapi_utils/jenkins_launcher.py | moustuk/jenkinsapi-1 | d18c1e669965c209093763f3295f79c9d3ccdeea | [
"MIT"
] | null | null | null | import os
import time
import Queue
import random
import shutil
import logging
import datetime
import tempfile
import requests
import threading
import subprocess
import pkg_resources
from jenkinsapi.jenkins import Jenkins
from jenkinsapi.custom_exceptions import JenkinsAPIException
log = logging.getLogger(__name__)
class FailedToStart(Exception):
pass
class TimeOut(Exception):
pass
class StreamThread(threading.Thread):
def __init__(self, name, q, stream, fn_log):
threading.Thread.__init__(self)
self.name = name
self.q = q
self.stream = stream
self.fn_log = fn_log
def run(self):
log.info("Starting %s", self.name)
while True:
line = self.stream.readline()
if line:
self.fn_log(line.rstrip())
self.q.put((self.name, line))
else:
break
self.q.put((self.name, None))
class JenkinsLancher(object):
"""
Launch jenkins
"""
JENKINS_WAR_URL = "http://mirrors.jenkins-ci.org/war/latest/jenkins.war"
def __init__(self, war_path, plugin_urls=None):
self.war_path = war_path
self.war_directory, self.war_filename = os.path.split(self.war_path)
self.jenkins_home = tempfile.mkdtemp(prefix='jenkins-home-')
self.jenkins_process = None
self.q = Queue.Queue()
self.plugin_urls = plugin_urls or []
self.http_port = random.randint(9000, 10000)
def update_war(self):
os.chdir(self.war_directory)
if os.path.exists(self.war_path):
log.info("We already have the War file...")
else:
log.info("Redownloading Jenkins")
subprocess.check_call('./get-jenkins-war.sh')
def update_config(self):
config_dest = os.path.join(self.jenkins_home, 'config.xml')
config_dest_file = open(config_dest, 'w')
config_source = pkg_resources.resource_string('jenkinsapi_tests.systests', 'config.xml')
config_dest_file.write(config_source.encode('UTF-8'))
def install_plugins(self):
for i, url in enumerate(self.plugin_urls):
self.install_plugin(url, i)
def install_plugin(self, hpi_url, i):
plugin_dir = os.path.join(self.jenkins_home, 'plugins')
if not os.path.exists(plugin_dir):
os.mkdir(plugin_dir)
log.info("Downloading %s", hpi_url)
log.info("Plugins will be installed in '%s'" % plugin_dir)
# FIXME: This is kinda ugly but works
filename = "plugin_%s.hpi" % i
plugin_path = os.path.join(plugin_dir, filename)
with open(plugin_path, 'wb') as h:
request = requests.get(hpi_url)
h.write(request.content)
def stop(self):
log.info("Shutting down jenkins.")
self.jenkins_process.terminate()
self.jenkins_process.wait()
shutil.rmtree(self.jenkins_home)
def block_until_jenkins_ready(self, timeout):
start_time = datetime.datetime.now()
timeout_time = start_time + datetime.timedelta(seconds=timeout)
while True:
try:
Jenkins('http://localhost:8080')
log.info('Jenkins is finally ready for use.')
except JenkinsAPIException:
log.info('Jenkins is not yet ready...')
if datetime.datetime.now() > timeout_time:
raise TimeOut('Took too long for Jenkins to become ready...')
time.sleep(5)
def start(self, timeout=60):
self.update_war()
self.update_config()
self.install_plugins()
os.environ['JENKINS_HOME'] = self.jenkins_home
os.chdir(self.war_directory)
jenkins_command = ['java', '-jar', self.war_filename,
'--httpPort=%d' % self.http_port]
log.info("About to start Jenkins...")
log.info("%s> %s", os.getcwd(), " ".join(jenkins_command))
self.jenkins_process = subprocess.Popen(
jenkins_command, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
threads = [
StreamThread('out', self.q, self.jenkins_process.stdout, log.info),
StreamThread('err', self.q, self.jenkins_process.stderr, log.warn)
]
# Start the threads
for t in threads:
t.start()
while True:
try:
streamName, line = self.q.get(block=True, timeout=timeout)
except Queue.Empty:
log.warn("Input ended unexpectedly")
break
else:
if line:
if 'Failed to initialize Jenkins' in line:
raise FailedToStart(line)
if 'Invalid or corrupt jarfile' in line:
raise FailedToStart(line)
if 'is fully up and running' in line:
log.info(line)
return
else:
log.warn('Stream %s has terminated', streamName)
self.block_until_jenkins_ready(timeout)
if __name__ == '__main__':
logging.basicConfig()
logging.getLogger('').setLevel(logging.INFO)
log.info("Hello!")
jl = JenkinsLancher(
'/home/sal/workspace/jenkinsapi/src/jenkinsapi_tests/systests/jenkins.war'
)
jl.start()
log.info("Jenkins was launched...")
time.sleep(30)
log.info("...now to shut it down!")
jl.stop()
| 30.087432 | 96 | 0.599346 |
f74a2896a1d27cb39eb9be8b7f90b3a49f353807 | 38,296 | py | Python | dbio/dss/__init__.py | DataBiosphere/data-store-cli | d83776ac68d3c6c86de1714487803bbdf705fddf | [
"MIT"
] | null | null | null | dbio/dss/__init__.py | DataBiosphere/data-store-cli | d83776ac68d3c6c86de1714487803bbdf705fddf | [
"MIT"
] | 41 | 2020-01-13T22:19:19.000Z | 2020-03-16T23:11:37.000Z | dbio/dss/__init__.py | DataBiosphere/data-store-cli | d83776ac68d3c6c86de1714487803bbdf705fddf | [
"MIT"
] | null | null | null | """
Data Storage System
*******************
"""
import errno
import functools
import json
from collections import defaultdict, namedtuple
import concurrent.futures
from datetime import datetime
from fnmatch import fnmatchcase
import hashlib
import os
import re
import tempfile
import time
import uuid
from io import open
import requests
from atomicwrites import atomic_write
from requests.exceptions import ChunkedEncodingError, ConnectionError, ReadTimeout
from dbio.dss.util import iter_paths, object_name_builder, hardlink, atomic_overwrite
from glob import escape as glob_escape
from dbio.util import tsv
from ..util import SwaggerClient, DEFAULT_THREAD_COUNT
from ..util.exceptions import SwaggerAPIException
from .. import logger
from .upload_to_cloud import upload_to_cloud
class DSSFile(namedtuple('DSSFile', ['name', 'uuid', 'version', 'sha256', 'size', 'indexed', 'replica'])):
"""
Local representation of a file on the DSS
"""
@classmethod
def from_manifest_row(cls, row, replica):
return cls(name=row['file_name'],
uuid=row['file_uuid'],
version=row['file_version'],
sha256=row['file_sha256'],
size=row['file_size'],
indexed=False,
replica=replica)
@classmethod
def from_dss_bundle_response(cls, file_dict, replica):
return cls(name=file_dict['name'],
uuid=file_dict['uuid'],
version=file_dict['version'],
sha256=file_dict['sha256'],
size=file_dict['size'],
indexed=file_dict['indexed'],
replica=replica)
@classmethod
def for_bundle_manifest(cls, manifest_bytes, bundle_uuid, version, replica):
"""
Even though the bundle manifest is not a DSS file, we need to wrap its info in a DSSFile object for consistency
and logging purposes.
"""
return cls(name='bundle.json',
uuid=bundle_uuid,
version=version,
sha256=hashlib.sha256(manifest_bytes).hexdigest(),
size=len(manifest_bytes),
indexed=False,
replica=replica)
class DSSClient(SwaggerClient):
"""
Client for the Data Storage Service API
"""
# Note: there are more API methods available than are defined here.
# See docstring in ``dbio/util/__init__.py``.
UPLOAD_BACKOFF_FACTOR = 1.618
threads = DEFAULT_THREAD_COUNT
def __init__(self, *args, **kwargs):
super(DSSClient, self).__init__(*args, **kwargs)
self.commands += [self.upload, self.download, self.download_manifest, self.create_version,
self.download_collection]
def create_version(self):
"""
Prints a timestamp that can be used for versioning
"""
print(self._create_version())
def _create_version(self):
return datetime.utcnow().strftime("%Y-%m-%dT%H%M%S.%fZ")
def upload(self, src_dir, replica, staging_bucket, timeout_seconds=1200, no_progress=False,
bundle_uuid=None):
"""
Upload a directory of files from the local filesystem and create a bundle containing the uploaded files.
:param str src_dir: file path to a directory of files to upload to the replica.
:param str replica: the replica to upload to. The supported replicas are: `aws` for Amazon Web Services, and
`gcp` for Google Cloud Platform. [aws, gcp]
:param str staging_bucket: a client controlled AWS S3 storage bucket to upload from.
:param int timeout_seconds: the time to wait for a file to upload to replica.
:param bool no_progress: if set, will not report upload progress. Note that even if this flag
is not set, progress will not be reported if the logging level is higher
than INFO or if the session is not interactive.
Upload a directory of files from the local filesystem and create a bundle containing the uploaded files.
This method requires the use of a client-controlled object storage bucket to stage the data for upload.
"""
bundle_uuid = bundle_uuid if bundle_uuid else str(uuid.uuid4())
version = datetime.utcnow().strftime("%Y-%m-%dT%H%M%S.%fZ")
files_to_upload, files_uploaded = [], []
for filename in iter_paths(src_dir):
full_file_name = filename.path
files_to_upload.append(open(full_file_name, "rb"))
logger.info("Uploading %i files from %s to %s", len(files_to_upload), src_dir, staging_bucket)
file_uuids, uploaded_keys, abs_file_paths = upload_to_cloud(files_to_upload, staging_bucket=staging_bucket,
replica=replica, from_cloud=False,
log_progress=not no_progress)
for file_handle in files_to_upload:
file_handle.close()
filenames = [object_name_builder(p, src_dir) for p in abs_file_paths]
filename_key_list = list(zip(filenames, file_uuids, uploaded_keys))
for filename, file_uuid, key in filename_key_list:
filename = filename.replace('\\', '/') # for windows paths
if filename.startswith('/'):
filename = filename.lstrip('/')
logger.info("File %s: registering...", filename)
# Generating file data
creator_uid = self.config.get("creator_uid", 0)
source_url = "s3://{}/{}".format(staging_bucket, key)
logger.info("File %s: registering from %s -> uuid %s", filename, source_url, file_uuid)
response = self.put_file._request(dict(
uuid=file_uuid,
bundle_uuid=bundle_uuid,
version=version,
creator_uid=creator_uid,
source_url=source_url
))
files_uploaded.append(dict(name=filename, version=version, uuid=file_uuid, creator_uid=creator_uid))
if response.status_code in (requests.codes.ok, requests.codes.created):
logger.info("File %s: Sync copy -> %s", filename, version)
else:
assert response.status_code == requests.codes.accepted
logger.info("File %s: Async copy -> %s", filename, version)
timeout = time.time() + timeout_seconds
wait = 1.0
while time.time() < timeout:
try:
self.head_file(uuid=file_uuid, replica="aws", version=version)
break
except SwaggerAPIException as e:
if e.code != requests.codes.not_found:
msg = "File {}: Unexpected server response during registration"
req_id = 'X-AWS-REQUEST-ID: {}'.format(response.headers.get("X-AWS-REQUEST-ID"))
raise RuntimeError(msg.format(filename), req_id)
time.sleep(wait)
wait = min(60.0, wait * self.UPLOAD_BACKOFF_FACTOR)
else:
# timed out. :(
req_id = 'X-AWS-REQUEST-ID: {}'.format(response.headers.get("X-AWS-REQUEST-ID"))
raise RuntimeError("File {}: registration FAILED".format(filename), req_id)
logger.debug("Successfully uploaded file")
file_args = [{'indexed': file_["name"].endswith(".json"),
'name': file_['name'],
'version': file_['version'],
'uuid': file_['uuid']} for file_ in files_uploaded]
logger.info("%s", "Bundle {}: Registering...".format(bundle_uuid))
response = self.put_bundle(uuid=bundle_uuid,
version=version,
replica=replica,
creator_uid=creator_uid,
files=file_args)
logger.info("%s", "Bundle {}: Registered successfully".format(bundle_uuid))
return {
"bundle_uuid": bundle_uuid,
"creator_uid": creator_uid,
"replica": replica,
"version": response["version"],
"files": files_uploaded
}
def download(self,
bundle_uuid,
replica,
version="",
download_dir="",
metadata_filter=('*',),
data_filter=('*',),
no_metadata=False,
no_data=False,
num_retries=10,
min_delay_seconds=0.25):
"""
Download a bundle and save it to the local filesystem as a directory.
:param str bundle_uuid: The uuid of the bundle to download
:param str replica: the replica to download from. The supported replicas are: `aws` for Amazon Web Services, and
`gcp` for Google Cloud Platform. [aws, gcp]
:param str version: The version to download, else if not specified, download the latest. The version is a
timestamp of bundle creation in RFC3339
:param str download_dir: The directory into which to download
:param iterable metadata_filter: One or more shell patterns against which all metadata files in the bundle will
be matched case-sensitively. A file is considered a metadata file if the
`indexed` property in the manifest is set. If and only if a metadata file
matches any of the patterns in `metadata_files` will it be downloaded.
:param iterable data_filter: One or more shell patterns against which all data files in the bundle will be
matched case-sensitively. A file is considered a data file if the `indexed` property
in the manifest is not set. The file will be downloaded only if a data file matches
any of the patterns in `data_files` will it be downloaded.
:param no_metadata: Exclude metadata files. Cannot be set when --metadata-filter is also set.
:param no_data: Exclude data files. Cannot be set when --data-filter is also set.
:param int num_retries: The initial quota of download failures to accept before exiting due to failures.
The number of retries increase and decrease as file chucks succeed and fail.
:param float min_delay_seconds: The minimum number of seconds to wait in between retries.
Download a bundle and save it to the local filesystem as a directory.
By default, all data and metadata files are downloaded. To disable the downloading of data, use the
`--no-data` flag if using the CLI or pass the `no_data=True` argument if calling the `download()` API method.
Likewise, to disable the downloading of metadata, use the `--no-metadata` flag for the CLI or pass the
`no_metadata=True` argument if calling the `download()` API method.
If a retryable exception occurs, we wait a bit and retry again. The delay increases each time we fail and
decreases each time we successfully read a block. We set a quota for the number of failures that goes up with
every successful block read and down with each failure.
"""
if no_metadata:
if metadata_filter != ('*',):
raise ValueError('--metadata-filter and --no-metadata are mutually exclusive options.')
metadata_filter = ('',)
if no_data:
if data_filter != ('*',):
raise ValueError('--data-filter and --no-data are mutually exclusive options.')
data_filter = ('',)
context = DownloadContext(download_dir=download_dir,
dss_client=self,
replica=replica,
num_retries=num_retries,
min_delay_seconds=min_delay_seconds)
with context.runner:
context.download_bundle(bundle_uuid, version, metadata_filter, data_filter)
def download_manifest(self,
manifest,
replica,
layout='none',
no_metadata=False,
no_data=False,
num_retries=10,
min_delay_seconds=0.25,
download_dir=''):
"""
Process the given manifest file in TSV (tab-separated values) format and download the files referenced by it.
:param str layout: The layout of the downloaded files. Currently two options are supported, 'none' (the
default), and 'bundle'.
:param str manifest: The path to a TSV (tab-separated values) file listing files to download. If the directory
for download already contains the manifest, the manifest will be overwritten to include a column with paths
into the filestore.
:param str replica: The replica from which to download. The supported replicas are: `aws` for Amazon Web
Services, and `gcp` for Google Cloud Platform. [aws, gcp]
:param no_metadata: Exclude metadata files. Cannot be set when --metadata-filter is also set.
:param no_data: Exclude data files. Cannot be set when --data-filter is also set.
:param int num_retries: The initial quota of download failures to accept before exiting due to
failures. The number of retries increase and decrease as file chucks succeed and fail.
:param float min_delay_seconds: The minimum number of seconds to wait in between retries for downloading any
file
:param str download_dir: The directory into which to download
Files are always downloaded to a cache / filestore directory called '.dbio'. This directory is created in the
current directory where download is initiated. A copy of the manifest used is also written to the current
directory. This manifest has an added column that lists the paths of the files within the '.dbio' filestore.
The default layout is **none**. In this layout all of the files are downloaded to the filestore and the
recommended way of accessing the files in by parsing the manifest copy that's written to the download
directory.
The bundle layout still downloads all of files to the filestore. For each bundle mentioned in the
manifest a directory is created. All relevant metadata files for each bundle are linked into these
directories in addition to relevant data files mentioned in the manifest.
Each row in the manifest represents one file in DSS. The manifest must have a header row. The header row
must declare the following columns:
- `bundle_uuid` - the UUID of the bundle containing the file in DSS.
- `bundle_version` - the version of the bundle containing the file in DSS.
- `file_name` - the name of the file as specified in the bundle.
- `file_uuid` - the UUID of the file in the DSS.
- `file_sha256` - the SHA-256 hash of the file.
- `file_size` - the size of the file.
The TSV may have additional columns. Those columns will be ignored. The ordering of the columns is
insignificant because the TSV is required to have a header row.
This download format will serve as the main storage format for downloaded files. If a user specifies a different
format for download (coming in the future) the files will first be downloaded in this format, then hard-linked
to the user's preferred format.
"""
context = ManifestDownloadContext(manifest=manifest,
download_dir=download_dir,
dss_client=self,
replica=replica,
num_retries=num_retries,
min_delay_seconds=min_delay_seconds)
if layout == 'none':
if no_metadata or no_data:
raise ValueError("--no-metadata and --no-data are only compatible with the 'bundle' layout")
context.download_manifest()
elif layout == 'bundle':
context.download_manifest_bundle_layout(no_metadata, no_data)
else:
raise ValueError('Invalid layout {} not one of [none, bundle]'.format(layout))
def _serialize_col_to_manifest(self, uuid, replica, version):
"""
Given a collection UUID, uses GET `/collection/{uuid}` to
serialize the collection into a set of dicts that that can be
used to generate a manifest file.
Most of the heavy lifting is handled by
:meth:`DSSClient.download_manifest`.
:param uuid: uuid of the collection to serialize
:param replica: replica to query against
:param version: version of the specified collection
"""
errors = 0
rows = []
seen = []
col = self.get_collection(uuid=uuid, replica=replica, version=version)['contents']
context = DownloadContext(download_dir=None, dss_client=self, replica=replica,
num_retries=0, min_delay_seconds=0)
while col:
obj = col.pop()
if obj['type'] == 'file':
# Currently cannot download files not associated with a
# bundle. This is a limitation of :meth:`download_manifest`
errors += 1
logger.warning("Failed to download file %s version %s",
obj['uuid'], obj['version'])
elif obj['type'] == 'collection':
if (obj['uuid'], obj['version']) in seen:
logger.info("Ignoring already-seen collection %s version %s",
obj['uuid'], obj['version'])
continue
seen.append((obj['uuid'], obj['version']))
col.extend(self.get_collection(uuid=obj['uuid'], replica=replica,
version=obj.get('version', ''))['contents'])
elif obj['type'] == 'bundle':
bundle = context._get_full_bundle_manifest(bundle_uuid=obj['uuid'],
version=obj['version'])
rows.extend(({
'bundle_uuid': obj['uuid'],
'bundle_version': obj.get('version', None),
'file_name': f['name'],
'file_sha256': f['sha256'],
'file_uuid': f['uuid'],
'file_size': f['size'],
'file_version': f['version']} for f in bundle['bundle']['files']))
else:
errors += 1
logger.warning("Failed to download file %s version %s",
obj['uuid'], obj['version'])
if errors:
raise RuntimeError("%d download failure(s)..." % errors)
return rows
def download_collection(self, uuid, replica, version=None, download_dir=''):
"""
Download a bundle and save it to the local filesystem as a directory.
:param str uuid: The uuid of the collection to download
:param str replica: the replica to download from. The supported
replicas are: `aws` for Amazon Web Services, and `gcp` for
Google Cloud Platform. [aws, gcp]
:param str version: The version to download, else if not specified,
download the latest. The version is a timestamp of bundle creation
in RFC3339
:param str download_dir: The directory into which to download
Download a bundle and save it to the local filesystem as a directory.
"""
collection = self._serialize_col_to_manifest(uuid, replica, version)
# Explicitly declare mode `w` (default `w+b`) for Python 3 string compat
with tempfile.NamedTemporaryFile(mode='w') as manifest:
writer = tsv.DictWriter(manifest,
fieldnames=('bundle_uuid',
'bundle_version',
'file_name',
'file_sha256',
'file_uuid',
'file_version',
'file_size'))
writer.writeheader()
writer.writerows(collection)
# Flushing the I/O buffer here is preferable to closing the file
# handle and deleting the temporary file later because within the
# context manager there is a guarantee that the temporary file
# will be deleted when we are done
manifest.flush()
self.download_manifest(manifest=manifest.name, replica=replica,
download_dir=download_dir, layout='bundle')
class TaskRunner(object):
"""
A wrapper for ThreadPoolExecutor that tracks futures for you and allows
dynamic submission of tasks.
"""
def __init__(self, threads=DEFAULT_THREAD_COUNT):
self._executor = concurrent.futures.ThreadPoolExecutor(max_workers=threads)
self._futures = set()
self._errors = 0
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.wait_for_futures()
self._executor.__exit__(exc_type, exc_val, exc_tb)
self.raise_if_errors()
return False
def submit(self, info, task, *args, **kwargs):
"""
Add task to be run.
Should only be called from the main thread or from tasks submitted by this method.
:param info: Something printable
:param task: A callable
"""
future = self._executor.submit(task, *args, **kwargs)
self._futures.add(future)
def process_future(f):
e = f.exception()
if e:
self._errors += 1
logger.warning('Download task failed: %r', info, exc_info=e)
future.add_done_callback(process_future)
def wait_for_futures(self):
"""
Wait for all submitted futures to finish.
Should only be called from the main thread.
"""
# This loop is necessary because futures are being dynamically added
while self._futures:
completed_futures = concurrent.futures.as_completed(self._futures)
self._futures.difference_update(completed_futures)
def raise_if_errors(self):
if self._errors:
raise RuntimeError('{} download task(s) failed.'.format(self._errors))
class DownloadContext(object):
# This variable is the configuration for download_manifest_v2. It specifies the length of the names of nested
# directories for downloaded files.
DIRECTORY_NAME_LENGTHS = [2, 4]
def __init__(self, download_dir, dss_client, replica, num_retries, min_delay_seconds):
self.runner = TaskRunner()
self.download_dir = download_dir
self.dss_client = dss_client
self.replica = replica
self.num_retries = num_retries
self.min_delay_seconds = min_delay_seconds
def download_bundle(self, bundle_uuid, version="", metadata_filter=('*',), data_filter=('*',)):
"""
Returns an iterator of tasks that each download one of the files in a bundle.
Note that this method can only be used once per instantiation of context
"""
logger.info('Downloading bundle %s version %s ...', bundle_uuid, version)
manifest = self._get_full_bundle_manifest(bundle_uuid, version)
bundle_version = manifest['bundle']['version']
bundle_fqid = bundle_uuid + '.' + bundle_version
bundle_dir = os.path.join(self.download_dir, bundle_fqid)
# Download bundle.json (manifest for bundle as a file)
manifest_bytes = json.dumps(manifest, indent=4, sort_keys=True).encode()
manifest_dss_file = DSSFile.for_bundle_manifest(manifest_bytes, bundle_uuid, bundle_version, self.replica)
task = functools.partial(self._download_bundle_manifest,
manifest_bytes,
bundle_dir,
manifest_dss_file)
self.runner.submit(manifest_dss_file, task)
for file_ in manifest['bundle']['files']:
dss_file = DSSFile.from_dss_bundle_response(file_, self.replica)
filename = file_.get("name", dss_file.uuid)
walking_dir = bundle_dir
globs = metadata_filter if file_['indexed'] else data_filter
if not any(fnmatchcase(filename, glob) for glob in globs):
continue
intermediate_path, filename_base = os.path.split(filename)
if intermediate_path:
walking_dir = os.path.join(walking_dir, intermediate_path)
logger.info("File %s: Retrieving...", filename)
file_path = os.path.join(walking_dir, filename_base)
task = functools.partial(self._download_and_link_to_filestore, dss_file, file_path)
self.runner.submit(dss_file, task)
def _download_bundle_manifest(self, manifest_bytes, bundle_dir, dss_file):
dest_path = self._file_path(dss_file.sha256, self.download_dir)
if os.path.exists(dest_path):
logger.info("Skipping download of '%s' because it already exists at '%s'.", dss_file.name, dest_path)
else:
self._make_dirs_if_necessary(dest_path)
with atomic_overwrite(dest_path, mode="wb") as fh:
fh.write(manifest_bytes)
file_path = os.path.join(bundle_dir, dss_file.name)
self._make_dirs_if_necessary(file_path)
hardlink(dest_path, file_path)
def _get_full_bundle_manifest(self, bundle_uuid, version):
"""
Takes care of paging through the bundle and checks for name collisions.
"""
pages = self.dss_client.get_bundle.paginate(uuid=bundle_uuid,
version=version if version else None,
replica=self.replica)
files = {}
ordered_files = []
for page in pages:
ordered_files += page['bundle']['files']
for file_ in page['bundle']['files']:
# The file name collision check is case-insensitive even if the local file system we're running on is
# case-sensitive. We do this in order to get consistent download behavior on all operating systems and
# file systems. The case of file names downloaded to a case-sensitive system will still match exactly
# what's specified in the bundle manifest. We just don't want a bundle with files 'Foo' and 'foo' to
# create two files on one system and one file on the other. Allowing this to happen would, in the best
# case, overwrite Foo with foo locally. A resumed download could produce a local file called foo that
# contains a mix of data from Foo and foo.
filename = file_.get("name", file_["uuid"]).lower()
if files.setdefault(filename, file_) is not file_:
raise ValueError("Bundle {bundle_uuid} version {version} contains multiple files named "
"'{filename}' or a case derivation thereof"
.format(filename=filename, bundle_uuid=bundle_uuid, version=version))
manifest = page
# there will always be one page (or else we would have gotten a 404)
# noinspection PyUnboundLocalVariable
manifest['bundle']['files'] = ordered_files
return manifest
def _download_to_filestore(self, dss_file):
"""
Attempt to download the data and save it in the 'filestore' location dictated by self._file_path()
"""
dest_path = self._file_path(dss_file.sha256, self.download_dir)
if os.path.exists(dest_path):
logger.info("Skipping download of '%s' because it already exists at '%s'.", dss_file.name, dest_path)
else:
logger.debug("Downloading '%s' to '%s'.", dss_file.name, dest_path)
self._download_file(dss_file, dest_path)
logger.info("Download '%s' to '%s'.", dss_file.name, dest_path)
return dest_path
def _download_and_link_to_filestore(self, dss_file, file_path):
file_store_path = self._download_to_filestore(dss_file)
self._make_dirs_if_necessary(file_path)
hardlink(file_store_path, file_path)
def _download_file(self, dss_file, dest_path):
"""
Attempt to download the data. If a retryable exception occurs, we wait a bit and retry again. The delay
increases each time we fail and decreases each time we successfully read a block. We set a quota for the
number of failures that goes up with every successful block read and down with each failure.
If we can, we will attempt HTTP resume. However, we verify that the server supports HTTP resume. If the
ranged get doesn't yield the correct header, then we start over.
"""
self._make_dirs_if_necessary(dest_path)
with atomic_overwrite(dest_path, mode="wb") as fh:
if dss_file.size == 0:
return
download_hash = self._do_download_file(dss_file, fh)
if download_hash.lower() != dss_file.sha256.lower():
# No need to delete what's been written. atomic_overwrite ensures we're cleaned up
logger.error("%s", "File {}: GET FAILED. Checksum mismatch.".format(dss_file.uuid))
raise ValueError("Expected sha256 {} Received sha256 {}".format(
dss_file.sha256.lower(), download_hash.lower()))
@classmethod
def _make_dirs_if_necessary(cls, dest_path):
directory, _ = os.path.split(dest_path)
if directory:
try:
os.makedirs(directory)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def _do_download_file(self, dss_file, fh):
"""
Abstracts away complications for downloading a file, handles retries and delays, and computes its hash
"""
hasher = hashlib.sha256()
delay = self.min_delay_seconds
retries_left = self.num_retries
while True:
try:
response = self.dss_client.get_file._request(
dict(uuid=dss_file.uuid, version=dss_file.version, replica=dss_file.replica),
stream=True,
headers={
'Range': "bytes={}-".format(fh.tell())
},
)
try:
if not response.ok:
logger.error("%s", "File {}: GET FAILED.".format(dss_file.uuid))
logger.error("%s", "Response: {}".format(response.text))
break
consume_bytes = int(fh.tell())
server_start = 0
content_range_header = response.headers.get('Content-Range', None)
if content_range_header is not None:
cre = re.compile(r"bytes (\d+)-(\d+)")
mo = cre.search(content_range_header)
if mo is not None:
server_start = int(mo.group(1))
consume_bytes -= server_start
assert consume_bytes >= 0
if server_start > 0 and consume_bytes == 0:
logger.info("%s", "File {}: Resuming at {}.".format(
dss_file.uuid, server_start))
elif consume_bytes > 0:
logger.info("%s", "File {}: Resuming at {}. Dropping {} bytes to match".format(
dss_file.uuid, server_start, consume_bytes))
while consume_bytes > 0:
bytes_to_read = min(consume_bytes, 1024 * 1024)
content = response.iter_content(chunk_size=bytes_to_read)
chunk = next(content)
if chunk:
consume_bytes -= len(chunk)
for chunk in response.iter_content(chunk_size=1024 * 1024):
if chunk:
fh.write(chunk)
hasher.update(chunk)
retries_left = min(retries_left + 1, self.num_retries)
delay = max(delay / 2, self.min_delay_seconds)
break
finally:
response.close()
except (ChunkedEncodingError, ConnectionError, ReadTimeout):
if retries_left > 0:
logger.info("%s", "File {}: GET FAILED. Attempting to resume.".format(dss_file.uuid))
time.sleep(delay)
delay *= 2
retries_left -= 1
continue
raise
return hasher.hexdigest()
@classmethod
def _file_path(cls, checksum, download_dir):
"""
returns a file's relative local path based on the nesting parameters and the files hash
:param checksum: a string checksum
:param download_dir: root directory for filestore
:return: relative Path object
"""
checksum = checksum.lower()
file_prefix = '_'.join(['files'] + list(map(str, cls.DIRECTORY_NAME_LENGTHS)))
path_pieces = [download_dir, '.dbio', 'v2', file_prefix]
checksum_index = 0
assert(sum(cls.DIRECTORY_NAME_LENGTHS) <= len(checksum))
for prefix_length in cls.DIRECTORY_NAME_LENGTHS:
path_pieces.append(checksum[checksum_index:(checksum_index + prefix_length)])
checksum_index += prefix_length
path_pieces.append(checksum)
return os.path.join(*path_pieces)
class ManifestDownloadContext(DownloadContext):
def __init__(self, manifest, *args, **kwargs):
super(ManifestDownloadContext, self).__init__(*args, **kwargs)
self.manifest = manifest
def download_manifest(self):
"""
Download the manifest
Note that this method can only be used once per instantiation of context.
"""
fieldnames, rows = self._parse_manifest(self.manifest)
with self.runner:
for row in rows:
dss_file = DSSFile.from_manifest_row(row, self.replica)
self.runner.submit(dss_file, self._download_to_filestore, dss_file)
self._write_output_manifest()
def download_manifest_bundle_layout(self, no_metadata, no_data):
"""
Download the manifest, with into the filestore.
Note that this method can only be used once per instantiation of context.
"""
with self.runner:
self._download_manifest_tasks(no_metadata, no_data)
self._write_output_manifest()
logger.info('Primary copies of the files have been downloaded to `.dbio` and linked '
'into per-bundle subdirectories of the current directory.')
def _download_manifest_tasks(self, no_metadata, no_data):
with open(self.manifest) as f:
bundles = defaultdict(set)
# unicode_literals is on so all strings are unicode. CSV wants a str so we need to jump through a hoop.
reader = tsv.DictReader(f)
for row in reader:
bundles[(row['bundle_uuid'], row['bundle_version'])].add(row['file_name'])
for (bundle_uuid, bundle_version), data_files in bundles.items():
if no_data:
data_filter = ('',)
else:
data_filter = tuple(glob_escape(file_name) for file_name in data_files if file_name)
if no_metadata:
metadata_filter = ('',)
else:
metadata_filter = ('*',)
task = functools.partial(self.download_bundle, bundle_uuid,
data_filter=data_filter, metadata_filter=metadata_filter)
self.runner.submit(bundle_uuid, task)
def _write_output_manifest(self):
"""
Adds the file path column to the manifest and writes the copy to the current directory. If the original manifest
is in the current directory it is overwritten with a warning.
"""
output = os.path.basename(self.manifest)
fieldnames, source_manifest = self._parse_manifest(self.manifest)
if 'file_path' not in fieldnames:
fieldnames.append('file_path')
with atomic_write(output, overwrite=True, newline='') as f:
writer = tsv.DictWriter(f, fieldnames)
writer.writeheader()
for row in source_manifest:
row['file_path'] = self._file_path(row['file_sha256'], self.download_dir)
writer.writerow(row)
if os.path.isfile(output):
logger.warning('Overwriting manifest %s', output)
logger.info('Rewrote manifest %s with additional column containing path to downloaded files.', output)
@classmethod
def _parse_manifest(cls, manifest):
with open(manifest) as f:
reader = tsv.DictReader(f)
return reader.fieldnames, list(reader)
| 49.160462 | 120 | 0.59283 |
f74a2c02757d56d4e71b63b64ba38781b6ff5ae8 | 966 | py | Python | tests/test_irl.py | sethmlarson/irl | f28cc40faa8ef7991a4116fa38d1ea2be21ec081 | [
"MIT"
] | 7 | 2019-07-10T10:21:33.000Z | 2020-09-22T09:05:09.000Z | tests/test_irl.py | python-http/yaul | f28cc40faa8ef7991a4116fa38d1ea2be21ec081 | [
"MIT"
] | 2 | 2019-09-25T17:26:03.000Z | 2019-11-15T06:18:18.000Z | tests/test_irl.py | python-http/yaul | f28cc40faa8ef7991a4116fa38d1ea2be21ec081 | [
"MIT"
] | null | null | null | import pytest
import irl
def test_equality_on_normalize():
url1 = irl.URL.parse("http://ヒ.example.com/abc%af?ヒq%CC#%dE")
url2 = irl.URL.parse("HTTP://xn--pdk.eXaMpLe.CoM/abc%AF?%E3%83%92q%cc#%De")
assert url1 == url2
@pytest.mark.parametrize(
["url", "addr"],
[
("http://example.com", ("example.com", 80)),
("https://example.com", ("example.com", 443)),
("https://example.com:1337", ("example.com", 1337)),
("http://[::1]:1", ("::1", 1)),
("http://[ffff::1%eth0]:443", ("ffff::1%eth0", 443)),
("http://[ffff::1%25eth0]:80", ("ffff::1%eth0", 80)),
],
)
def test_url_to_address(url, addr):
assert irl.URL.parse(url).address() == addr
@pytest.mark.parametrize(
"url", ["httpq://example.com/", "/google.com", "http+unix://%2Ftmp%2Fdocker.sock"]
)
def test_unknown_host_or_port_on_address(url):
url = irl.URL.parse(url)
with pytest.raises(irl.URLError):
url.address()
| 28.411765 | 86 | 0.582816 |
f74a2e37020b9b9e0702331ff6e7a49c2a6f4eb8 | 3,222 | py | Python | data/process_data.py | shahad-bit/Disaster-Response-Pipeline | 76a86db14845c8d8ba8d87c81112580c96b2b0d4 | [
"CNRI-Python"
] | null | null | null | data/process_data.py | shahad-bit/Disaster-Response-Pipeline | 76a86db14845c8d8ba8d87c81112580c96b2b0d4 | [
"CNRI-Python"
] | 14 | 2020-01-10T22:26:06.000Z | 2022-02-10T01:16:49.000Z | data/process_data.py | shahad-bit/Disaster-Response-Pipeline | 76a86db14845c8d8ba8d87c81112580c96b2b0d4 | [
"CNRI-Python"
] | null | null | null | import sys
import pandas as pd
from sqlalchemy import create_engine
def load_data(messages_filepath, categories_filepath):
"""Load disaster messages and categories from csv files.
Arguments:
messages_filepath {String} -- disaster message file path
categories_filepath {String} -- disaster categories file path
Returns:
pandas dataframe -- merged disaster data
"""
messages = pd.read_csv(messages_filepath)
categories = pd.read_csv(categories_filepath)
df = messages.merge(categories, on='id')
return df
def clean_data(df):
"""Preprocess data
Arguments:
df {pandas dataframe} -- disaster data
"""
# create a dataframe of the 36 individual category columns
categories = df['categories'].str.split(';', expand=True)
# select the first row of the categories dataframe
row = categories.iloc[0]
# use this row to extract a list of new column names for categories.
# one way is to apply a lambda function that takes everything
# up to the second to last character of each string with slicing
category_colnames = [val.split('-')[0] for val in row]
print(category_colnames)
# rename the columns of `categories`
categories.columns = category_colnames
for column in categories:
# set each value to be the last character of the string
categories[column] = categories[column].str[-1]
# convert column from string to numeric
categories[column] = categories[column].astype(int)
# drop the original categories column from `df`
df.drop(['categories'], axis=1, inplace=True)
# concatenate the original dataframe with the new `categories` dataframe
df = pd.concat([df, categories], axis=1)
# drop duplicates
df.drop_duplicates(inplace=True)
return df
def save_data(df, database_filename):
"""Store the data in mysql db.
Arguments:
df {pandas dataframe} -- disaster data
database_filename {String} -- path to the db
"""
engine = create_engine('sqlite:///{}'.format(database_filename))
df.to_sql('disaster_response', engine, index=False)
def main():
if len(sys.argv) == 4:
messages_filepath, categories_filepath, database_filepath = sys.argv[1:]
print('Loading data...\n MESSAGES: {}\n CATEGORIES: {}'
.format(messages_filepath, categories_filepath))
df = load_data(messages_filepath, categories_filepath)
print('Cleaning data...')
df = clean_data(df)
print('Saving data...\n DATABASE: {}'.format(database_filepath))
save_data(df, database_filepath)
print('Cleaned data saved to database!')
else:
print('Please provide the filepaths of the messages and categories '\
'datasets as the first and second argument respectively, as '\
'well as the filepath of the database to save the cleaned data '\
'to as the third argument. \n\nExample: python process_data.py '\
'disaster_messages.csv disaster_categories.csv '\
'DisasterResponse.db')
if __name__ == '__main__':
main() | 32.877551 | 80 | 0.658287 |
f74a4e0e253e95f9c690d7cec093a817cbba1f5e | 24,765 | py | Python | crosshair/simplestructs.py | Rik-de-Kort/CrossHair | e0a06969d571aebcc582359afdedf25f38630c0d | [
"MIT"
] | null | null | null | crosshair/simplestructs.py | Rik-de-Kort/CrossHair | e0a06969d571aebcc582359afdedf25f38630c0d | [
"MIT"
] | null | null | null | crosshair/simplestructs.py | Rik-de-Kort/CrossHair | e0a06969d571aebcc582359afdedf25f38630c0d | [
"MIT"
] | null | null | null | import collections.abc
import dataclasses
import functools
import itertools
import numbers
import operator
import sys
from typing import *
from crosshair.util import is_iterable
from crosshair.util import is_hashable
from crosshair.util import name_of_type
class MapBase(collections.abc.MutableMapping):
def __eq__(self, other):
# Make our own __eq__ because the one in abc will hash all of our keys.
if not isinstance(other, collections.abc.Mapping):
return NotImplemented
if len(self) != len(other):
return False
for (k, self_value) in self.items():
found = False
# We do a slow nested loop search because we don't want to hash the key.
for (other_key, other_value) in other.items():
if other_key != k:
continue
if self_value == other_value:
found = True
break
else:
return False
if not found:
return False
return True
def copy(self):
raise NotImplementedError
def __ch_pytype__(self):
return dict
if sys.version_info >= (3, 9):
def __or__(self, other: Mapping) -> Mapping:
union_map = self.copy()
union_map.update(other)
return union_map
__ror__ = __or__
_MISSING = object()
class SimpleDict(MapBase):
"""
Provide a pure Python implementation of a dictionary.
#inv: set(self.keys()) == set(dict(self.items()).keys())
>>> d = SimpleDict([(1, 'one'), (2, 'two')])
>>> d
{1: 'one', 2: 'two'}
>>> d[3] = 'three'
>>> len(d)
3
>>> d[2] = 'cat'
>>> d[2]
'cat'
>>> del d[1]
>>> list(d.keys())
[2, 3]
"""
contents_: MutableSequence
def __init__(self, contents: MutableSequence):
"""
Initialize with the given value.
``contents`` is assumed to not have duplicate keys.
"""
self.contents_ = contents
def __getitem__(self, key, default=_MISSING):
if not is_hashable(key):
raise TypeError("unhashable type")
for (k, v) in self.contents_:
if k == key:
return v
if default is _MISSING:
raise KeyError(key)
return default
def __setitem__(self, key, value):
if not is_hashable(key):
raise TypeError("unhashable type")
for (i, (k, v)) in enumerate(self.contents_):
if k == key:
self.contents_[i] = (k, value)
return
self.contents_.append((key, value))
def __delitem__(self, key):
if not is_hashable(key):
raise TypeError("unhashable type")
for (i, (k, v)) in enumerate(self.contents_):
if k == key:
del self.contents_[i]
return
raise KeyError(key)
def __iter__(self):
return (k for (k, v) in self.contents_)
def __reversed__(self):
return (k for (k, v) in reversed(self.contents_))
def __bool__(self):
return (len(self.contents_) > 0).__bool__()
def __len__(self):
return self.contents_.__len__()
def __repr__(self):
return repr(dict(self.items()))
def items(self):
return self.contents_
def popitem(self):
if not self.contents_:
raise KeyError
(k, v) = self.contents_.pop()
return (k, v)
def copy(self):
return SimpleDict(self.contents_[:])
_DELETED = object()
_NOT_FOUND = object()
class ShellMutableMap(MapBase, collections.abc.MutableMapping):
def __init__(self, inner: Mapping):
self._mutations: MutableMapping = SimpleDict([])
self._inner = inner
self._len = inner.__len__()
def __getitem__(self, key):
ret = self._mutations.get(key, _NOT_FOUND)
if ret is _DELETED:
raise KeyError(key)
elif ret is _NOT_FOUND:
return self._inner.__getitem__(key)
else:
return ret
if sys.version_info >= (3, 8):
def __reversed__(self):
return self._reversed()
def _reversed(self):
deleted = []
mutations = self._mutations
for k in reversed(mutations):
if mutations[k] is _DELETED:
deleted.append(k)
continue
else:
yield k
inner = self._inner
last = None
for k in reversed(inner):
if k in deleted:
continue
else:
yield k
def __iter__(self):
mutations = self._mutations
suppress = list(mutations.keys()) # check against list to avoid hash
for k in self._inner:
if k not in suppress:
yield k
for k, v in self._mutations.items():
if v is not _DELETED:
yield k
def __eq__(self, other):
if not self._mutations:
return self._inner.__eq__(other)
if not isinstance(other, collections.abc.Mapping):
return False
if len(self) != len(other):
return False
for k, v in other.items():
if k not in self or self[k] != v:
return False
return True
def __bool__(self):
return bool(self._len > 0)
def __len__(self):
return self._len
def __setitem__(self, key, val):
if key not in self:
self._len += 1
self._mutations[key] = val
def __delitem__(self, key):
first_hit = self._mutations.get(key, _NOT_FOUND)
if first_hit is _DELETED:
raise KeyError(key)
if first_hit is _NOT_FOUND:
if key not in self._inner:
raise KeyError(key)
self._mutations[key] = _DELETED
self._len -= 1
def __repr__(self):
return repr(dict(self.items()))
def _lastitem(self):
raise KeyError
def pop(self, key, default=_MISSING):
# CPython checks the empty case before attempting to hash the key.
# So this must happen before the hash-ability check:
if self._len == 0:
raise KeyError(key)
try:
value = self[key]
except KeyError:
if default is self._MISSING:
raise
return default
else:
del self[key]
return value
def popitem(self):
for key in self._reversed():
val = self.__getitem__(key)
self.__delitem__(key)
return (key, val)
raise KeyError
def copy(self):
m = ShellMutableMap(self._inner)
m._mutations = self._mutations.copy()
return m
def normalize_idx(idx: Any, container_len: int) -> int:
if (idx is not None) and (not hasattr(idx, "__index__")):
raise TypeError("indices must be integers or slices")
if idx < 0:
return idx + container_len
return idx
def check_idx(idx: Any, container_len: int) -> int:
if not hasattr(idx, "__index__"):
raise TypeError("indices must be integers or slices, not str")
normalized_idx = normalize_idx(idx, container_len)
if 0 <= normalized_idx < container_len:
return normalized_idx
raise IndexError(f'index "{idx}" is out of range')
def clamp_slice(s: slice, container_len: int) -> slice:
if s.step < 0:
if s.start < 0 or s.stop >= container_len - 1:
return slice(0, 0, s.step)
def clamper(i: int) -> Optional[int]:
if i < 0:
return None
if i >= container_len:
return container_len - 1
return i
else:
def clamper(i: int) -> Optional[int]:
if i < 0:
return 0
if i > container_len:
return container_len
return i
return slice(clamper(s.start), clamper(s.stop), s.step)
def offset_slice(s: slice, offset: int) -> slice:
return slice(s.start + offset, s.stop + offset, s.step)
def cut_slice(start: int, stop: int, step: int, cut: int) -> Tuple[slice, slice]:
backwards = step < 0
if backwards:
start, stop, step, cut = -start, -stop, -step, -cut
# Modulous with negatives is super hard to reason about, shift everything >= 0:
delta = -min(start, stop, cut)
start, stop, cut = start + delta, stop + delta, cut + delta
if cut < start:
lstart, lstop = cut, cut
rstart, rstop = start, stop
elif cut > stop:
lstart, lstop = start, stop
rstart, rstop = cut, cut
else:
mid = min(cut, stop)
lstart, lstop = start, mid
empties_at_tail = mid % step
if empties_at_tail > 0:
mid += step - empties_at_tail
rstart = mid
rstop = stop
lstart, lstop = lstart - delta, lstop - delta
rstart, rstop = rstart - delta, rstop - delta
if backwards:
lstart, lstop = -lstart, -lstop
rstart, rstop = -rstart, -rstop
step = -step
return (slice(lstart, lstop, step), slice(rstart, rstop, step))
def indices(s: slice, container_len: int) -> Tuple[int, int, int]:
"""
Mimic ``slice.indices``.
This is a pure Python version of ``slice.indices()`` that doesn't force integers
into existence.
"""
start, stop, step = s.start, s.stop, s.step
if (step is not None) and (not hasattr(step, "__index__")):
raise TypeError(
"slice indices must be integers or None or have an __index__ method"
)
if step is None:
step = 1
elif step <= 0:
# fallback to python implementation (this will realize values)
return s.indices(container_len)
return (
0 if start is None else normalize_idx(start, container_len),
container_len if stop is None else normalize_idx(stop, container_len),
step,
)
@functools.total_ordering
class SeqBase:
def __hash__(self):
return hash(list(self))
def __eq__(self, other):
if self is other:
return True
if not is_iterable(other):
return False
if len(self) != len(other):
return False
for myval, otherval in zip(self, other):
if myval is otherval:
continue
if myval != otherval:
return False
return True
def __lt__(self, other):
# NOTE: subclasses will need further type restrictions.
# For example, `[1,2] <= (1,2)` raises a TypeError.
if not is_iterable(other):
return NotImplemented
for v1, v2 in zip(self, other):
if v1 == v2:
continue
return v1 < v2
return len(self) < len(other)
def __bool__(self):
return bool(self.__len__() > 0)
def __add__(self, other):
if isinstance(other, collections.abc.Sequence):
return SequenceConcatenation(self, other)
raise TypeError(f"unsupported operand type(s) for +")
def __radd__(self, other):
if isinstance(other, collections.abc.Sequence):
return SequenceConcatenation(other, self)
raise TypeError(f"unsupported operand type(s) for +")
def __mul__(self, other):
if not isinstance(other, int):
raise TypeError("can't multiply by non-int xx")
if other <= 0:
# A trick to get an empty thing of the same type!:
return self[0:0]
ret = self
for idx in range(1, other):
ret = self.__add__(ret)
return ret
def __rmul__(self, other):
return self.__mul__(other)
@dataclasses.dataclass(eq=False)
class SequenceConcatenation(collections.abc.Sequence, SeqBase):
_first: Sequence
_second: Sequence
_len: Optional[int] = None
def __getitem__(self, i: Union[int, slice]):
"""
Get the item from the concatenation.
raises: IndexError
post: _ == (self._first + self._second)[i]
"""
first, second = self._first, self._second
firstlen, secondlen = len(first), len(second)
totallen = firstlen + secondlen
if isinstance(i, int):
i = check_idx(i, totallen)
return first[i] if i < firstlen else second[i - firstlen]
else:
start, stop, step = i.indices(totallen)
cutpoint = firstlen if step > 0 else firstlen - 1
slice1, slice2 = cut_slice(start, stop, step, cutpoint)
if step > 0:
slice1 = clamp_slice(slice1, firstlen)
slice2 = clamp_slice(offset_slice(slice2, -firstlen), secondlen)
return SequenceConcatenation(first[slice1], second[slice2])
else:
slice1 = clamp_slice(offset_slice(slice1, -firstlen), secondlen)
slice2 = clamp_slice(slice2, firstlen)
return SequenceConcatenation(second[slice1], first[slice2])
def __contains__(self, item):
return self._first.__contains__(item) or self._second.__contains__(item)
def __iter__(self):
return itertools.chain(self._first, self._second)
def __len__(self):
if self._len is None:
self._len = len(self._first) + len(self._second)
return self._len
@dataclasses.dataclass(init=False, eq=False) # type: ignore # (https://github.com/python/mypy/issues/5374)
class SliceView(collections.abc.Sequence, SeqBase):
seq: Sequence
start: int
stop: int
def __init__(self, seq: Sequence, start: int, stop: int):
seqlen = seq.__len__()
if start < 0:
start = 0
if stop > seqlen:
stop = seqlen
if stop < start:
stop = start
self.seq = seq
self.start = start
self.stop = stop
def __getitem__(self, key):
mylen = self.stop - self.start
if type(key) is slice:
start, stop, step = indices(key, mylen)
if step == 1:
# Move truncation into indices helper to avoid the nesting of slices here
return SliceView(self, start, stop)
else:
return list(self)[key]
else:
key = self.start + check_idx(key, mylen)
return self.seq[key]
def __len__(self) -> int:
return self.stop - self.start
def __iter__(self):
for i in range(self.start, self.stop):
yield self.seq[i]
@dataclasses.dataclass(eq=False)
class ShellMutableSequence(collections.abc.MutableSequence, SeqBase):
"""
Wrap a sequence and provide mutating operations without modifying the original.
It reuses portions of the original list as best it can.
"""
inner: Sequence
__hash__ = None # type: ignore
def _spawn(self, items: Sequence) -> "ShellMutableSequence":
# For overriding in subclasses.
return ShellMutableSequence(items)
def __setitem__(self, k, v):
inner = self.inner
if hasattr(inner, "__setitem__"):
inner.__setitem__(k, v)
return
old_len = len(inner)
if isinstance(k, slice):
if not isinstance(v, collections.abc.Iterable):
raise TypeError("can only assign an iterable")
if getattr(v, "__hash__", None) is None:
# Make a copy if the argument is a mutable container.
v = list(v)
start, stop, step = indices(k, old_len)
if step != 1:
# abort cleverness:
newinner = list(inner)
newinner[k] = v
self.inner = newinner
return
else:
newinner = v
elif isinstance(k, numbers.Integral):
k = check_idx(k, old_len)
start, stop = k, k + 1
newinner = [v]
else:
raise TypeError(
f'indices must be integers or slices, not "{name_of_type(k)}"'
)
if stop < start:
stop = start
# At this point, `stop` >= `start`
if start > 0:
newinner = SequenceConcatenation(inner[:start], newinner)
elif stop <= 0:
stop = 0
# At this point, `stop` must be >= 0
if stop < old_len:
newinner = SequenceConcatenation(newinner, inner[stop:])
self.inner = newinner
def __delitem__(self, k):
if isinstance(k, slice):
self.__setitem__(k, [])
else:
mylen = self.inner.__len__()
idx = check_idx(k, mylen)
self.__setitem__(slice(idx, idx + 1, 1), [])
def __add__(self, other):
if isinstance(other, collections.abc.Sequence):
return self._spawn(SequenceConcatenation(self, other))
raise TypeError(f"unsupported operand type(s) for +")
def __radd__(self, other):
if isinstance(other, collections.abc.Sequence):
return self._spawn(SequenceConcatenation(other, self))
raise TypeError(f"unsupported operand type(s) for +")
def __imul__(self, other):
return self._spawn(self * other)
def append(self, item):
inner = self.inner
if hasattr(inner, "append"):
inner.append(item)
else:
self.inner = SequenceConcatenation(inner, [item])
def extend(self, other):
if not isinstance(other, collections.abc.Iterable):
raise TypeError("object is not iterable")
self.inner = SequenceConcatenation(self.inner, other)
def sort(self, key=None, reverse=False):
self.inner = sorted(self.inner, key=key, reverse=reverse)
def copy(self):
return self[:]
def __len__(self):
return self.inner.__len__()
def insert(self, index, item):
self.__setitem__(slice(index, index, 1), [item])
def __getitem__(self, key):
if isinstance(key, slice):
return self._spawn(self.inner.__getitem__(key))
else:
return self.inner.__getitem__(key)
def __repr__(self):
return repr(list(self.__iter__()))
def __contains__(self, other):
return self.inner.__contains__(other)
def __iter__(self):
return self.inner.__iter__()
def reverse(self):
self.inner = list(reversed(self.inner))
AbcSet = collections.abc.Set
class SetBase:
def __repr__(self):
return set(self).__repr__()
def __hash__(self):
return hash(set(self))
def __and__(self, x):
if not isinstance(x, AbcSet):
return NotImplemented
AbcSet.__and__(self, x)
def __or__(self, x):
if not isinstance(x, AbcSet):
return NotImplemented
AbcSet.__or__(self, x)
def __xor__(self, x):
if not isinstance(x, AbcSet):
return NotImplemented
AbcSet.__xor__(self, x)
def __sub__(self, x):
if not isinstance(x, AbcSet):
return NotImplemented
AbcSet.__sub__(self, x)
def __rsub__(self, x):
if not isinstance(x, AbcSet):
return NotImplemented
AbcSet.__rsub__(self, x)
class SingletonSet(SetBase, AbcSet):
# Primarily this exists to avoid hashing values.
def __init__(self, item):
self._item = item
def __contains__(self, x):
return x == self._item
def __iter__(self):
yield self._item
def __len__(self):
return 1
class LazySetCombination(SetBase, AbcSet):
"""
Provide a view over two sets and a logical operation in-between.
The view itself is an immutable set.
>>> a = {2, 4, 6 }
>>> b = { 4, 5, 6, 7}
>>> s = LazySetCombination(lambda a,b: (a and b), a, b)
>>> sorted(s)
[4, 6]
>>> a.add(5)
>>> sorted(s)
[4, 5, 6]
"""
def __init__(self, op: Callable[[bool, bool], bool], a: Set, b: Set):
self._op = op
self._a = a
self._b = b
def __contains__(self, x):
ina = self._a.__contains__(x)
inb = self._b.__contains__(x)
return self._op(ina, inb)
def __iter__(self):
op, a, b = self._op, self._a, self._b
def afilter(a_item):
return op(True, a_item in b)
def bfilter(b_item):
ina = b_item in a
if ina:
# We've already seen this item and would have returned it
# while traversing a, if we were supposed to.
return False
return op(ina, True)
return itertools.chain(filter(afilter, a), filter(bfilter, b))
def __len__(self):
return sum(1 for i in self.__iter__())
class ShellMutableSet(SetBase, collections.abc.MutableSet):
"""
Provide a view over an immutable set.
The view give the immutable set mutating operations that replace the underlying
data structure entirely.
This set also attempts to preserve insertion order of the set,
assuming the underlying set(s) do so as well.
"""
_inner: Set
def __init__(self, inner=frozenset()):
if isinstance(inner, AbcSet):
self._inner = inner
elif is_iterable(inner):
# Piggyback on ordered-ness of dictionaries:
self._inner = {k: None for k in inner}.keys()
# TODO: this hashes the elements;
# we likely want a dedicated ordered set class.
else:
raise TypeError
def __ch_pytype__(self):
return set
def __deepcopy__(self, memo):
import copy
return ShellMutableSet(copy.deepcopy(self._inner))
# methods that just defer to _inner
def __contains__(self, x):
return self._inner.__contains__(x)
def __iter__(self):
return self._inner.__iter__()
def __len__(self):
return self._inner.__len__()
def __le__(self, x):
return self._inner.__le__(x)
def __lt__(self, x):
return self._inner.__lt__(x)
def __eq__(self, x):
return self._inner.__eq__(x)
def __ne__(self, x):
return self._inner.__ne__(x)
def __gt__(self, x):
return self._inner.__gt__(x)
def __ge__(self, x):
return self._inner.__ge__(x)
def isdisjoint(self, x):
return self._inner.isdisjoint(x)
# mutation operations
def add(self, x):
self.__ior__(SingletonSet(x))
def clear(self):
self._inner = frozenset()
def pop(self):
if self:
x = next(iter(self))
self.remove(x)
return x
else:
raise KeyError
def discard(self, x):
self.__isub__(SingletonSet(x))
def remove(self, x):
if x not in self:
raise KeyError
self.discard(x)
def __or__(self, x):
if not isinstance(x, AbcSet):
return NotImplemented
return ShellMutableSet(LazySetCombination(operator.or_, self._inner, x))
__ror__ = __or__
def __and__(self, x):
if not isinstance(x, AbcSet):
return NotImplemented
return ShellMutableSet(LazySetCombination(operator.and_, self._inner, x))
__rand__ = __and__
def __xor__(self, x):
if not isinstance(x, AbcSet):
return NotImplemented
return ShellMutableSet(LazySetCombination(operator.xor, self._inner, x))
__rxor__ = __xor__
def __sub__(self, x):
if not isinstance(x, AbcSet):
return NotImplemented
return ShellMutableSet(
LazySetCombination(lambda x, y: (x and not y), self._inner, x)
)
def __rsub__(self, x):
if not isinstance(x, AbcSet):
return NotImplemented
return ShellMutableSet(
LazySetCombination(lambda x, y: (y and not x), self._inner, x)
)
def __ior__(self, x):
if not isinstance(x, AbcSet):
return NotImplemented
self._inner = LazySetCombination(operator.or_, self._inner, x)
return self
def __iand__(self, x):
if not isinstance(x, AbcSet):
return NotImplemented
self._inner = LazySetCombination(operator.and_, self._inner, x)
return self
def __ixor__(self, x):
if not isinstance(x, AbcSet):
return NotImplemented
self._inner = LazySetCombination(operator.xor, self._inner, x)
return self
def __isub__(self, x):
if not isinstance(x, AbcSet):
return NotImplemented
self._inner = LazySetCombination(lambda x, y: (x and not y), self._inner, x)
return self
| 28.696408 | 107 | 0.574399 |
f74a63828ceedda845b30d95cc6687889e0430b1 | 8,201 | py | Python | src/LDPC/pyldpc/ldpcmatrices.py | Horacehxw/Multi-label | 76095c72327e9aa379eaa653dbbb775ca638e6db | [
"MIT"
] | 1 | 2019-04-24T15:24:48.000Z | 2019-04-24T15:24:48.000Z | src/LDPC/pyldpc/ldpcmatrices.py | Horacehxw/Multi-label | 76095c72327e9aa379eaa653dbbb775ca638e6db | [
"MIT"
] | null | null | null | src/LDPC/pyldpc/ldpcmatrices.py | Horacehxw/Multi-label | 76095c72327e9aa379eaa653dbbb775ca638e6db | [
"MIT"
] | null | null | null | import numpy as np
from scipy.sparse import csr_matrix
from .ldpcalgebra import*
__all__ = ['BinaryProduct', 'InCode', 'BinaryRank','RegularH','CodingMatrix','CodingMatrix_systematic','HtG']
def RegularH(n,d_v,d_c):
""" ------------------------------------------------------------------------------
Builds a regular Parity-Check Matrix H (n,d_v,d_c) following Callager's algorithm :
----------------------------------------------------------------------------------
Paramaeters:
n: Number of columns (Same as number of coding bits)
d_v: number of ones per column (number of parity-check equations including a certain variable)
d_c: number of ones per row (number of variables participating in a certain parity-check equation);
----------------------------------------------------------------------------------
Errors:
The number of ones in the matrix is the same no matter how we calculate it (rows or columns), therefore, if m is
the number of rows in the matrix:
m*d_c = n*d_v with m < n (because H is a decoding matrix) => Parameters must verify:
0 - all integer parameters
1 - d_v < d_v
2 - d_c divides n
---------------------------------------------------------------------------------------
Returns: 2D-array (shape = (m,n))
"""
if n%d_c:
raise ValueError('d_c must divide n. Help(RegularH) for more info.')
if d_c <= d_v:
raise ValueError('d_c must be greater than d_v. Help(RegularH) for more info.')
m = (n*d_v)// d_c
Set=np.zeros((m//d_v,n),dtype=int)
a=m//d_v
# Filling the first set with consecutive ones in each row of the set
for i in range(a):
for j in range(i*d_c,(i+1)*d_c):
Set[i,j]=1
#Create list of Sets and append the first reference set
Sets=[]
Sets.append(Set.tolist())
#Create remaining sets by permutations of the first set's columns:
i=1
for i in range(1,d_v):
newSet = np.transpose(np.random.permutation(np.transpose(Set))).tolist()
Sets.append(newSet)
#Returns concatenated list of sest:
H = np.concatenate(Sets)
return H
def CodingMatrix(MATRIX,use_sparse=1):
"""
CAUTION: RETURNS tG TRANSPOSED CODING MATRIX.
Function Applies GaussJordan Algorithm on Columns and rows of MATRIX in order
to permute Basis Change matrix using Matrix Equivalence.
Let A be the treated Matrix. refAref the double row reduced echelon Matrix.
refAref has the form:
(e.g) : |1 0 0 0 0 0 ... 0 0 0 0|
|0 1 0 0 0 0 ... 0 0 0 0|
|0 0 0 0 0 0 ... 0 0 0 0|
|0 0 0 1 0 0 ... 0 0 0 0|
|0 0 0 0 0 0 ... 0 0 0 0|
|0 0 0 0 0 0 ... 0 0 0 0|
First, let P1 Q1 invertible matrices: P1.A.Q1 = refAref
We would like to calculate:
P,Q are the square invertible matrices of the appropriate size so that:
P.A.Q = J. Where J is the matrix of the form (having MATRIX's shape):
| I_p O | where p is MATRIX's rank and I_p Identity matrix of size p.
| 0 0 |
Therfore, we perform permuations of rows and columns in refAref (same changes
are applied to Q1 in order to get final Q matrix)
NOTE: P IS NOT RETURNED BECAUSE WE DO NOT NEED IT TO SOLVE H.G' = 0
P IS INVERTIBLE, WE GET SIMPLY RID OF IT.
Then
solves: inv(P).J.inv(Q).G' = 0 (1) where inv(P) = P^(-1) and
P.H.Q = J. Help(PJQ) for more info.
Let Y = inv(Q).G', equation becomes J.Y = 0 (2) whilst:
J = | I_p O | where p is H's rank and I_p Identity matrix of size p.
| 0 0 |
Knowing that G must have full rank, a solution of (2) is Y = | 0 | Where k = n-p.
| I-k |
Because of rank-nullity theorem.
-----------------
parameters:
H: Parity check matrix.
use_sparse: (optional, default True): use scipy.sparse format to speed up calculations
---------------
returns:
tG: Transposed Coding Matrix.
"""
H = np.copy(MATRIX)
m,n = H.shape
if m > n:
raise ValueError('MATRIX must have more rows than columns (a parity check matrix)')
if n > 500 and use_sparse:
sparse = 1
else:
sparse = 0
##### DOUBLE GAUSS-JORDAN:
Href_colonnes,tQ = GaussJordan(np.transpose(H),1)
Href_diag = GaussJordan(np.transpose(Href_colonnes))
Q=np.transpose(tQ)
k = n - sum(Href_diag.reshape(m*n))
Y = np.zeros(shape=(n,k)).astype(int)
Y[n-k:,:] = np.identity(k)
if sparse:
Q = csr_matrix(Q)
Y = csr_matrix(Y)
tG = BinaryProduct(Q,Y)
return tG
def CodingMatrix_systematic(MATRIX,use_sparse = 1):
"""
Description:
Solves H.G' = 0 and finds the coding matrix G in the systematic form : [I_k A] by applying permutations on MATRIX.
CAUTION: RETURNS TUPLE (Hp,tGS) WHERE Hp IS A MODIFIED VERSION OF THE GIVEN PARITY CHECK MATRIX, tGS THE TRANSPOSED
SYSTEMATIC CODING MATRIX ASSOCIATED TO Hp. YOU MUST USE THE RETURNED TUPLE IN CODING AND DECODING, RATHER THAN THE UNCHANGED
PARITY-CHECK MATRIX H.
-------------------------------------------------
Parameters:
MATRIX: 2D-Array. Parity-check matrix.
use_sparse: (optional, default True): use scipy.sparse matrices to speed up calculations if n>100.
------------------------------------------------
>>> Returns Tuple of 2D-arrays (Hp,GS):
Hp: Modified H: permutation of columns (The code doesn't change)
tGS: Transposed Systematic Coding matrix associated to Hp.
"""
H = np.copy(MATRIX)
m,n = H.shape
if n>100 and use_sparse:
sparse = 1
else:
sparse = 0
P1 = np.identity(n,dtype=int)
Hrowreduced = GaussJordan(H)
k = n - sum([a.any() for a in Hrowreduced ])
## After this loop, Hrowreduced will have the form H_ss : | I_(n-k) A |
permut = np.array(list(range(n)))
while(True):
zeros = [i for i in range(min(m,n)) if not Hrowreduced[i,i]]
if len(zeros)==0:
break
indice_colonne_a = min(zeros)
list_ones = [j for j in range(indice_colonne_a+1,n) if Hrowreduced[indice_colonne_a,j] ]
if not len(list_ones):
break
indice_colonne_b = min(list_ones)
aux = np.copy(Hrowreduced[:,indice_colonne_a])
Hrowreduced[:,indice_colonne_a] = Hrowreduced[:,indice_colonne_b]
Hrowreduced[:,indice_colonne_b] = aux
aux = np.copy(P1[:,indice_colonne_a])
P1[:,indice_colonne_a] = P1[:,indice_colonne_b]
P1[:,indice_colonne_b] = aux
############ NOW, Hrowreduced has the form: | I_(n-k) A | , the permutation above makes it look like :
########### |A I_(n-k)|
P1 = P1.T
identity = list(range(n))
sigma = identity[n-k:]+identity[:n-k]
P2 = np.zeros(shape=(n,n),dtype=int)
P2[identity,sigma] = np.ones(n)
if sparse:
P1 = csr_matrix(P1)
P2 = csr_matrix(P2)
H = csr_matrix(H)
P = BinaryProduct(P2,P1)
if sparse:
P = csr_matrix(P)
Hp = BinaryProduct(H,np.transpose(P))
GS = np.zeros((k,n),dtype=int)
GS[:,:k] = np.identity(k)
GS[:,k:] = np.transpose(Hrowreduced[:n-k,n-k:])
return Hp,np.transpose(GS)
def HtG(invrate,k,systematic=True):
"""
Constructs tuple H,tG using approximate rate (k/n) and k.
Parameters:
- invrate= 1/rate must be > 2
- k must be > 1
- systematic (Boolean optional, default = True) Construction method of transposed coding matrix tG.
returns tuple: H,tG
"""
if invrate < 3:
raise ValueError('invrate must be > 2')
if k < 2:
raise ValueError('k must be > 1')
d_c = invrate
d_v = invrate-1
n = invrate*k - (d_c-2)*d_c
H = RegularH(n,d_v,d_c)
if systematic:
H,tG = CodingMatrix_systematic(H)
else:
tG = CodingMatrix(H)
return H,tG
| 27.428094 | 129 | 0.559932 |
f74a6be17d44b1c99d379a8d7d7f5df13ddd7afb | 106 | py | Python | src/djangoSrc/dropbox_listener/apps.py | dighr/nethope_audio | 8571bd6f621920f3fea085be3879cab15ccfc1e6 | [
"MIT"
] | null | null | null | src/djangoSrc/dropbox_listener/apps.py | dighr/nethope_audio | 8571bd6f621920f3fea085be3879cab15ccfc1e6 | [
"MIT"
] | 9 | 2021-03-09T21:01:14.000Z | 2022-03-02T06:01:00.000Z | src/djangoSrc/dropbox_listener/apps.py | nethopeorg/nethope_audio | 8571bd6f621920f3fea085be3879cab15ccfc1e6 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class DropboxListenerConfig(AppConfig):
name = 'dropbox_listener'
| 17.666667 | 39 | 0.792453 |
f74a73ed235fee6f6be7792ebdcaa8762726590d | 905 | py | Python | setup.py | paparent/pyforrst | 70147a33b333c68fd8a72b5b5c502c3783b62c43 | [
"MIT"
] | 2 | 2016-03-01T22:16:45.000Z | 2016-07-17T18:07:27.000Z | setup.py | paparent/pyforrst | 70147a33b333c68fd8a72b5b5c502c3783b62c43 | [
"MIT"
] | null | null | null | setup.py | paparent/pyforrst | 70147a33b333c68fd8a72b5b5c502c3783b62c43 | [
"MIT"
] | null | null | null | import os
from setuptools import setup
from pyforrst import __version__
setup(
name='pyforrst',
version=__version__,
author='PA Parent',
author_email='paparent@paparent.me',
description='Python interface to Forrst API',
long_description=file(
os.path.join(
os.path.dirname(__file__),
'README'
)
).read(),
license="MIT",
url="http://github.com/paparent/pyforrst",
py_modules=['pyforrst'],
classifiers=[
"Development Status :: 3 - Alpha",
"Programming Language :: Python",
"Intended Audience :: Developers",
"Topic :: Software Development :: Libraries :: Python Modules",
"License :: OSI Approved :: MIT License",
],
zip_safe=False,
include_package_data=True,
exclude_package_data={'': ['*.pyc']},
install_requires=['setuptools'],
test_suite = 'tests',
)
| 26.617647 | 71 | 0.61768 |
f74ab10f5ec32efdaea5c3f1b1f1bdf486d2e48b | 1,283 | py | Python | generic/time-calculator/time_calculator.py | dalamilla/programming-python | bf30ae752afff810d5cc0dda07694518a510a913 | [
"MIT"
] | null | null | null | generic/time-calculator/time_calculator.py | dalamilla/programming-python | bf30ae752afff810d5cc0dda07694518a510a913 | [
"MIT"
] | null | null | null | generic/time-calculator/time_calculator.py | dalamilla/programming-python | bf30ae752afff810d5cc0dda07694518a510a913 | [
"MIT"
] | null | null | null | def add_time(start, duration, day=""):
start_time = start.split()
dur_time = duration.split()
arr_days = ["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"]
star_arr = start_time[0].split(":")
dur_arr = dur_time[0].split(":")
hour = int(star_arr[0]) + int(dur_arr[0])
min = int(star_arr[1]) + int(dur_arr[1])
day = day.lower().title()
if min > 59:
hour += 1
min = min - 60
if len(str(min)) < 2:
min = "0" + str(min)
count_days = 0
while hour > 12:
hour = hour - 12
if start_time[1] == "PM":
count_days += 1
start_time[1] = "AM"
else:
start_time[1] = "PM"
if hour == 12:
if start_time[1] == "PM":
start_time[1] = "AM"
count_days += 1
else:
start_time[1] = "PM"
if day == "":
new_time = f'{hour}:{min} {start_time[1]}'
else:
check_day = count_days
index = arr_days.index(day)
while check_day:
index += 1
if index >= len(arr_days):
index = 0
day = arr_days[index]
check_day -= 1
new_time = f'{hour}:{min} {start_time[1]}, {day}'
if count_days > 1:
new_time = new_time + f' ({count_days} days later)'
if count_days == 1:
new_time = new_time + f' (next day)'
return new_time
| 22.910714 | 91 | 0.55339 |
f74ae3fe033f7ab85beb25972ec28e355ac8796b | 10,965 | py | Python | Models/Electricity_Market/Tiers/V001/model.py | schmocker/Pyjamas | 52a72d6e8b915f77a2194d4e7d53c46d0ec28c17 | [
"MIT"
] | 2 | 2018-05-31T15:02:08.000Z | 2018-07-11T11:02:44.000Z | Models/Electricity_Market/Tiers/V001/model.py | schmocker/Pyjamas | 52a72d6e8b915f77a2194d4e7d53c46d0ec28c17 | [
"MIT"
] | null | null | null | Models/Electricity_Market/Tiers/V001/model.py | schmocker/Pyjamas | 52a72d6e8b915f77a2194d4e7d53c46d0ec28c17 | [
"MIT"
] | null | null | null | from pyjamas_core import Supermodel
from pyjamas_core.util import Input, Output, Property
from datetime import datetime, timedelta
from Models._utils.time import datetime2utc_time, utc_time2datetime
import numpy as np
from pytz import timezone
import json
from scipy.interpolate import griddata
import pandas as pd
import requests
import os
# define the model class and inherit from class "Supermodel"
class Model(Supermodel):
# model constructor
def __init__(self, id, name: str):
# instantiate supermodel
super(Model, self).__init__(id, name)
# define inputs
self.inputs['stock_ex_price'] = Input(name='Stock exchange price', unit='€/J', info="stock exchange price")
self.inputs['distnet_costs'] = Input(name='Distribution network cost', unit='{-, €/J}', info="distribution network cost")
self.inputs['service_cost'] = Input(name='Service cost', unit='€/J', info="service cost")
self.inputs['taxes'] = Input(name='Taxes', unit='€/J', info="taxes")
self.inputs['futures'] = Input(name='Futures', unit='s', info="Futures")
# define outputs
self.outputs['el_rate'] = Output(name='Electricity rate', unit='€/J', info='electricity rate')
self.outputs['times'] = Output(name='Times', unit='s', info='Times')
self.outputs['y_scaling'] = Output(name='Scaling of y axis', unit='', info='Scaling of y axis')
self.outputs['y_unit'] = Output(name='Unit of y axis', unit='', info='Unit of y axis')
self.outputs['y_label'] = Output(name='y label', unit='', info='Label of y axis')
# define properties
ET_def = {"location": ["Baden"],
"border": [[-1.0, -0.5, -0.2, 0.0, 0.2, 0.5, 1.0]],
"weight": [[-0.3, -0.6, -0.8, 1.1, 1.3, 1.5]]}
NT_def = {"location": ["Baden"],
"border": [[-1.0, -0.8, -0.5, 0.0, 0.4, 0.8, 1.0]],
"weight": [[-0.5, -0.6, -0.8, 1.2, 1.5, 1.8]]}
ET_def = json.dumps(ET_def)
NT_def = json.dumps(NT_def)
self.properties['weight_ET'] = Property(default=ET_def, data_type=str, name='energy tiers', unit='-',
info='borders and weights of energy tiers', example=ET_def)
self.properties['weight_NT'] = Property(default=NT_def, data_type=str, name='net tiers', unit='-',
info='borders and weights of net tiers', example=NT_def)
self.properties["scaling"] = Property(default=1, data_type=float, name='Scaling factor', unit='-',
info='Scaling factor for y axis', example='3.6e9')
self.properties["y_unit"] = Property(default='€/MWh', data_type=str, name='unit of y label', unit='-',
info='Unit of label for y axis', example='[€/MWh]')
self.properties["y_labeling"] = Property(default='Price', data_type=str, name='y label', unit='-',
info='Label for y axis', example='Price [€/MWh]')
# define persistent variables
self.weight_ET = None
self.weight_NT = None
self.y_scaling = None
self.y_unit = None
self.y_labeling = None
async def func_birth(self):
pass
async def func_amend(self, keys=[]):
if 'weight_ET' in keys:
weight_ET_i = self.get_property('weight_ET')
self.weight_ET = json.loads(weight_ET_i)
if 'weight_NT' in keys:
weight_NT_i = self.get_property('weight_NT')
self.weight_NT = json.loads(weight_NT_i)
if 'scaling' in keys:
self.y_scaling = self.get_property("scaling")
if 'y_unit' in keys:
self.y_unit = self.get_property("y_unit")
if 'y_labeling' in keys:
self.y_labeling = self.get_property("y_labeling")
async def func_peri(self, prep_to_peri=None):
# locations information
loc_tiers = self.weight_ET['location']
# read prices
stock_prices_input = await self.get_input('stock_ex_price')
# read distribution costs
dn_costs_input = await self.get_input('distnet_costs')
loc_distnet = dn_costs_input['distribution_networks']
len_loc_distnet = len(loc_distnet)
# DLK
DLK_val = await self.get_input('service_cost')
# Abgaben
abgaben_val = await self.get_input('taxes')
# electricity rate
el_rate = []
border_tiers = []
border_val = []
ET_val = []
NT_val = []
MP_val = []
for nt in range(0, len_loc_distnet):
# compare location of distribution network with tiers locations, in it?
if loc_distnet[nt] in loc_tiers:
idx = loc_tiers.index(loc_distnet[nt])
else: # if not in list, take default values
idx = 0
# distribution cost
dist_costs = dn_costs_input['costs'][nt]
# read and determine borders and tiers
border_tiers_i = self.det_border_tiers(idx)
# stock prices
stock_prices = stock_prices_input['prices'][nt]
el_rate_i = []
for i_mt in range(0, len(stock_prices)):
# stock price
mt = stock_prices[i_mt]
# electricity rate
el_rate_ii = np.multiply(mt, border_tiers_i['ET_tiers']) + np.multiply(dist_costs, border_tiers_i['NT_tiers']) + DLK_val + abgaben_val
el_rate_ii = el_rate_ii.tolist()
el_rate_i.append(el_rate_ii)
el_rate.append(el_rate_i)
border_tiers.append(border_tiers_i)
border_val_i = []
ET_val_i = []
NT_val_i = []
border_i = border_tiers[nt]
len_border_i = len(border_i["borders"])
for ni in range(0, len_border_i):
if ni == 0:
border_val_i.append(border_i["borders"][ni])
ET_val_i.append(border_i["ET_tiers"][ni])
NT_val_i.append(border_i["NT_tiers"][ni])
if ni == (len_border_i-1):
border_val_i.append(border_i["borders"][ni])
ET_val_i.append(border_i["ET_tiers"][ni-1])
NT_val_i.append(border_i["NT_tiers"][ni-1])
if ((ni>0) & (ni<(len_border_i-1))):
border_val_i.append(border_i["borders"][ni])
ET_val_i.append(border_i["ET_tiers"][ni-1])
NT_val_i.append(border_i["NT_tiers"][ni-1])
border_val_i.append(border_i["borders"][ni])
ET_val_i.append(border_i["ET_tiers"][ni])
NT_val_i.append(border_i["NT_tiers"][ni])
border_val.append(border_val_i)
ET_val.append(ET_val_i)
NT_val.append(NT_val_i)
MP_val_data = []
for mi in range(0, el_rate_i.__len__()):
MP_val_i = [];
for ni in range(0, len_border_i):
erate_i = el_rate_i[mi]
if ni == 0:
MP_val_i.append(erate_i[ni])
if ni == (len_border_i - 1):
MP_val_i.append(erate_i[ni-1])
if ((ni > 0) & (ni < (len_border_i - 1))):
MP_val_i.append(erate_i[ni-1])
MP_val_i.append(erate_i[ni])
MP_val_data.append(MP_val_i)
MP_val.append(MP_val_data)
tier_val = [ET_val, NT_val]
border_lines = {"borders": border_val,
"tiers": ["ET Tiers", "NT Tiers"],
"tier_values": tier_val,
"prices": MP_val}
output = {'Stao_ID': loc_distnet,
'values': el_rate,
'borders': border_tiers,
'border_lines': border_lines
}
# set output
self.set_output("el_rate", output)
self.set_output("times", await self.get_input('futures'))
self.set_output("y_scaling", self.y_scaling)
self.set_output("y_unit", self.y_unit)
self.set_output("y_label", self.y_labeling)
def det_border_tiers(self, it):
# read borders
ET_border = self.weight_ET["border"][it]
NT_border = self.weight_NT["border"][it]
ET_border = np.array(ET_border)
NT_border = np.array(NT_border)
# merge
borders = np.append(ET_border, NT_border)
borders = np.unique(borders)
# read tiers
ET_tiers_orig = self.weight_ET["weight"][it]
NT_tiers_orig = self.weight_NT["weight"][it]
# create tiers corresponding to border
ind_ET = 0
ind_NT = 0
ET_tiers = np.array(ET_tiers_orig[ind_ET])
NT_tiers = np.array(NT_tiers_orig[ind_NT])
for it in range(1, len(borders) - 1):
# ET
if ET_border[ind_ET+1] <= borders[it]:
ind_ET = ind_ET + 1
ET_tiers = np.append(ET_tiers, ET_tiers_orig[ind_ET])
else:
ET_tiers = np.append(ET_tiers, ET_tiers_orig[ind_ET])
# NT
if NT_border[ind_NT+1] <= borders[it]:
ind_NT = ind_NT + 1
NT_tiers = np.append(NT_tiers, NT_tiers_orig[ind_NT])
else:
NT_tiers = np.append(NT_tiers, NT_tiers_orig[ind_NT])
#print(it)
# return dict
border_tiers = {'borders': borders.tolist(),
'ET_tiers': ET_tiers.tolist(),
'NT_tiers': NT_tiers.tolist()}
return border_tiers
if __name__ == "__main__":
# input
stock_ex_price = {'distribution_networks': ['Baden', 'Brugg'],
'prices': [[1, 2, 3], [1.1, 2.2, 3.3]]}
distnet_costs = {'distribution_networks': stock_ex_price['distribution_networks'],
'costs': [100, 111]}
DLK = [0.5]
abgaben = [0.25]
# properties
ET = {"location": ['Baden', 'Brugg'],
"border": [[-1., -0.8, -0.3, 0., 0.3, 0.8, 1.], [-1., -0.85, -0.35, 0., 0.35, 0.85, 1.]],
"weight": [[-2., -1.25, -0.75, 0.75, 1.25, 2.], [-2., -1.3, -0.8, 0.8, 1.3, 2.]]}
NT = {"location": ['Baden', 'Brugg'],
"border": [[-1., -0.7, -0.4, 0., 0.4, 0.7, 1.], [-1., -0.75, -0.45, 0., 0.45, 0.75, 1.]],
"weight": [[-1.75, -1., -0.5, 0.5, 1., 1.75], [-1.8, -1.05, -0.55, 0.55, 1.05, 1.8]]}
ET = json.dumps(ET)
NT = json.dumps(NT)
inputs = {'stock_ex_price': stock_ex_price,
'distnet_costs': distnet_costs,
'service_cost': DLK,
'taxes': abgaben}
props = {'weight_ET': ET,
'weight_NT': NT}
outputs = Model.test(inputs, props)
| 39.584838 | 150 | 0.540264 |
f74ae6b9925457538b16177136de3888adde735b | 1,635 | py | Python | tensorflow_datasets/scripts/cli/main.py | rushabh-v/datasets | eccdf9dd2b8741e1d2500ae7edb0cb7f5cd59da4 | [
"Apache-2.0"
] | 2 | 2020-10-12T07:09:38.000Z | 2021-03-05T12:48:23.000Z | tensorflow_datasets/scripts/cli/main.py | rushabh-v/datasets | eccdf9dd2b8741e1d2500ae7edb0cb7f5cd59da4 | [
"Apache-2.0"
] | null | null | null | tensorflow_datasets/scripts/cli/main.py | rushabh-v/datasets | eccdf9dd2b8741e1d2500ae7edb0cb7f5cd59da4 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""CLI for Tensorflow Datasets.
TFDS CLI to help creates and build datasets (e.g. `tfds new my_dataset`,
`tfds build`,...)
"""
import argparse
from typing import List
from absl import app
from absl.flags import argparse_flags
import tensorflow_datasets.public_api as tfds
def _parse_flags(argv: List[str]) -> argparse.Namespace:
"""Command lines flag parsing."""
parser = argparse_flags.ArgumentParser(
description='Tensorflow Datasets CLI tool',
)
parser.add_argument(
'--version',
action='version',
version='TensorFlow Datasets: ' + tfds.__version__
)
return parser.parse_args(argv[1:])
def main(args: argparse.Namespace) -> None:
del args # Unused for now
def launch_cli() -> None:
"""Parse arguments and launch the CLI main function."""
app.run(main, flags_parser=_parse_flags)
if __name__ == '__main__':
# Entry-points in `setup.py` launch the `launch_cli()` function directly, so
# the code in `if __name__ == '__main__'` is never executed.
launch_cli()
| 28.189655 | 78 | 0.729052 |
f74ae9c9e53bc414e5245b8a46d222773bdd1843 | 4,936 | py | Python | aoide/make_sky_mask.py | granttremblay/aoide | ea25bdf92013f7dc3b254e261039c43e697ee901 | [
"MIT"
] | 1 | 2018-06-26T12:28:39.000Z | 2018-06-26T12:28:39.000Z | aoide/make_sky_mask.py | granttremblay/Aoide | ea25bdf92013f7dc3b254e261039c43e697ee901 | [
"MIT"
] | null | null | null | aoide/make_sky_mask.py | granttremblay/Aoide | ea25bdf92013f7dc3b254e261039c43e697ee901 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
'''
Aoide | Reduction & Analysis of MUSE observations
-------------------------------------------------
Dr. Grant R. Tremblay | Harvard-Smithsonian Center for Astrophysics
grant.tremblay @ cfa.harvard.edu
See the README associated with this repository for documentation & examples.
'''
from __future__ import print_function
import os
import sys
import matplotlib.pyplot as plt
import numpy as np
from astropy.io import fits as pyfits
"""
Interactive Masking of a FITS-file. The FITS-file must be provided upon
creating a new instance. If no mask is provided, the routine will create one
from scratch. Otherwise, the supplied mask will be modified.
The masking porcess is carried out using the mouse: An area to mask is selected
by moving the mouse over it while pressing the left button. To unmask an area,
use the right button. The cuts might be changed by clicking on the wheel.
Note that plt.show() must be called after an instance of MaskFrame has been
created!
"""
class MaskFrame:
"""
Initiate an instance
"""
def __init__(self, image, mask_name, cuts=(0, 10), extension=0):
fits_ima = pyfits.open(image)
self.true_arr = fits_ima[extension].data
if len(self.true_arr.shape) == 3:
self.true_arr = self.true_arr[0, :]
fits_ima.close()
self.mask_name = mask_name
self.extension = extension
if os.path.exists(mask_name):
self.in_mask = pyfits.open(mask_name, mode='update')
self.mask = self.in_mask[0].data
else:
self.in_mask = None
self.mask = np.zeros(self.true_arr.shape, dtype='Int16')
self.plot_arr = self.true_arr + (self.mask * 1e9)
self.lo_cut = cuts[0]
self.hi_cut = cuts[1]
self.fig = plt.figure(figsize=(8,8))
self.ax = self.fig.add_subplot(111)
self.ax.set_title('LEFT: Mask | RIGHT: Unmask | Wheel: Change cuts')
self.im = self.ax.imshow(
self.true_arr, origin='lower', interpolation='nearest', cmap='magma')
self.update()
self.xM = []
self.yM = []
self._connect()
"""
Connect the button_***_events to the corresponding methods
"""
def _connect(self):
self.ax.figure.canvas.mpl_connect('button_press_event', self.__on)
self.ax.figure.canvas.mpl_connect('button_release_event', self.__off)
"""
The actions that are carried out when a mouse button is pressed:
"""
def __on(self, event):
if event.button == 2:
print('Current cut levels are: {}, {}'.format(
self.lo_cut, self.hi_cut))
new_c = input('Enter new cut levels as low,high e.g. 0,20: ')
self.lo_cut = float(new_c.split(',')[0])
self.hi_cut = float(new_c.split(',')[1])
self.update()
else:
if event.inaxes != self.ax.axes:
print('Out of bounds!')
return
self.xM.append(int(round(event.xdata)))
self.yM.append(int(round(event.ydata)))
"""
The actions that are carried out when a mouse button is released.
"""
def __off(self, event):
if event.inaxes != self.ax.axes:
print('Out of bounds!')
return
else:
self.xM.append(int(round(event.xdata)))
self.yM.append(int(round(event.ydata)))
if len(self.xM) == 2:
if event.button == 1:
self.mask[min(self.yM):max(self.yM) + 1,
min(self.xM):max(self.xM) + 1] = 1
elif event.button == 3:
self.mask[min(self.yM):max(self.yM) + 1,
min(self.xM):max(self.xM) + 1] = 0
self.plot_arr = self.true_arr + (self.mask * 1e9)
self.update()
self.xM = []
self.yM = []
"""
This method updates the graphical interface:
"""
def update(self):
self.im.set_data(self.plot_arr[:, :])
self.im.set_clim(vmin=self.lo_cut, vmax=self.hi_cut)
self.im.axes.figure.canvas.draw()
"""
Save the mask under the filename specified in FrameMask.__init__
Note that unlike the other methods, this method must be called explicitely
"""
def save_mask(self):
extension = self.extension
if self.in_mask == None:
maskHDU = pyfits.PrimaryHDU(self.mask)
maskHDU.writeto(self.mask_name, overwrite=True)
else:
self.in_mask[0].data = self.mask
self.in_mask.flush()
def main():
if len(sys.argv) == 3:
make_mask = MaskFrame(sys.argv[1], sys.argv[2])
elif len(sys.argv) == 4:
make_mask = MaskFrame(
sys.argv[1], sys.argv[2], extension=int(sys.argv[3]))
plt.show()
make_mask.save_mask()
if __name__ == '__main__':
main()
| 30.097561 | 81 | 0.586507 |
f74af63f5ccfd54b66af7b96b0177756056c6157 | 240 | py | Python | iterations/enumerate.py | SeanSyue/PythonReferences | 103b2d6934a33e56a5d8fbb14d95282f572b3af7 | [
"MIT"
] | null | null | null | iterations/enumerate.py | SeanSyue/PythonReferences | 103b2d6934a33e56a5d8fbb14d95282f572b3af7 | [
"MIT"
] | null | null | null | iterations/enumerate.py | SeanSyue/PythonReferences | 103b2d6934a33e56a5d8fbb14d95282f572b3af7 | [
"MIT"
] | null | null | null | seasons = ['Spring', 'Summer', 'Fall', 'Winter']
enum = enumerate(seasons)
print(enum)
print(type(enum))
print(list(enum))
my_list = ['apple', 'banana', 'grapes', 'pear']
for c, value in enumerate(my_list, 1):
print(c, value)
| 24 | 49 | 0.629167 |
f74b26a31c7f63843b71cd0c4907c2f9a1f87f70 | 241 | py | Python | pyspeckit/spectrum/models/OH.py | migueldvb/pyspeckit | fa7d875da7c684c8f6aaa3ba206ef3ff2e196652 | [
"MIT",
"BSD-3-Clause"
] | null | null | null | pyspeckit/spectrum/models/OH.py | migueldvb/pyspeckit | fa7d875da7c684c8f6aaa3ba206ef3ff2e196652 | [
"MIT",
"BSD-3-Clause"
] | null | null | null | pyspeckit/spectrum/models/OH.py | migueldvb/pyspeckit | fa7d875da7c684c8f6aaa3ba206ef3ff2e196652 | [
"MIT",
"BSD-3-Clause"
] | null | null | null | """
OH line fitter
"""
import redshiftedgroup
freq_dict={
'OH12':1.61223e9,
'OH11':1.66540e9,
'OH22':1.66736e9,
'OH21':1.72053e9,
}
OH = redshiftedgroup.redshiftedgroup(freq_dict)
OHfitter = OH.fitter
OHvheightfitter = OH.vheight_fitter
| 13.388889 | 47 | 0.738589 |
f74b33abf61540f34fa5672e5d8e0dabd0402b36 | 4,377 | py | Python | oscar/lib/python2.7/site-packages/django/contrib/gis/db/backends/mysql/operations.py | sainjusajan/django-oscar | 466e8edc807be689b0a28c9e525c8323cc48b8e1 | [
"BSD-3-Clause"
] | null | null | null | oscar/lib/python2.7/site-packages/django/contrib/gis/db/backends/mysql/operations.py | sainjusajan/django-oscar | 466e8edc807be689b0a28c9e525c8323cc48b8e1 | [
"BSD-3-Clause"
] | null | null | null | oscar/lib/python2.7/site-packages/django/contrib/gis/db/backends/mysql/operations.py | sainjusajan/django-oscar | 466e8edc807be689b0a28c9e525c8323cc48b8e1 | [
"BSD-3-Clause"
] | null | null | null | from django.contrib.gis.db.backends.base.adapter import WKTAdapter
from django.contrib.gis.db.backends.base.operations import (
BaseSpatialOperations,
)
from django.contrib.gis.db.backends.utils import SpatialOperator
from django.contrib.gis.db.models import GeometryField, aggregates
from django.db.backends.mysql.operations import DatabaseOperations
from django.utils.functional import cached_property
class MySQLOperations(BaseSpatialOperations, DatabaseOperations):
mysql = True
name = 'mysql'
Adapter = WKTAdapter
@cached_property
def geom_func_prefix(self):
return '' if self.is_mysql_5_5 else 'ST_'
@cached_property
def is_mysql_5_5(self):
return self.connection.mysql_version < (5, 6, 1)
@cached_property
def is_mysql_5_6(self):
return self.connection.mysql_version < (5, 7, 6)
@cached_property
def uses_invalid_empty_geometry_collection(self):
return self.connection.mysql_version >= (5, 7, 5)
@cached_property
def select(self):
return self.geom_func_prefix + 'AsText(%s)'
@cached_property
def from_wkb(self):
return self.geom_func_prefix + 'GeomFromWKB'
@cached_property
def from_text(self):
return self.geom_func_prefix + 'GeomFromText'
@cached_property
def gis_operators(self):
MBREquals = 'MBREqual' if self.is_mysql_5_6 else 'MBREquals'
return {
'bbcontains': SpatialOperator(func='MBRContains'), # For consistency w/PostGIS API
'bboverlaps': SpatialOperator(func='MBROverlaps'), # ...
'contained': SpatialOperator(func='MBRWithin'), # ...
'contains': SpatialOperator(func='MBRContains'),
'disjoint': SpatialOperator(func='MBRDisjoint'),
'equals': SpatialOperator(func=MBREquals),
'exact': SpatialOperator(func=MBREquals),
'intersects': SpatialOperator(func='MBRIntersects'),
'overlaps': SpatialOperator(func='MBROverlaps'),
'same_as': SpatialOperator(func=MBREquals),
'touches': SpatialOperator(func='MBRTouches'),
'within': SpatialOperator(func='MBRWithin'),
}
@cached_property
def function_names(self):
return {'Length': 'GLength'} if self.is_mysql_5_5 else {}
disallowed_aggregates = (
aggregates.Collect, aggregates.Extent, aggregates.Extent3D,
aggregates.MakeLine, aggregates.Union,
)
@cached_property
def unsupported_functions(self):
unsupported = {
'AsGeoJSON', 'AsGML', 'AsKML', 'AsSVG', 'BoundingCircle',
'ForceRHR', 'GeoHash', 'IsValid', 'MakeValid', 'MemSize',
'Perimeter', 'PointOnSurface', 'Reverse', 'Scale', 'SnapToGrid',
'Transform', 'Translate',
}
if self.is_mysql_5_5:
unsupported.update({'Difference', 'Distance', 'Intersection', 'SymDifference', 'Union'})
return unsupported
def geo_db_type(self, f):
return f.geom_type
def get_geom_placeholder(self, f, value, compiler):
"""
The placeholder here has to include MySQL's WKT constructor. Because
MySQL does not support spatial transformations, there is no need to
modify the placeholder based on the contents of the given value.
"""
if hasattr(value, 'as_sql'):
placeholder, _ = compiler.compile(value)
else:
placeholder = '%s(%%s)' % self.from_text
return placeholder
def get_db_converters(self, expression):
converters = super(MySQLOperations, self).get_db_converters(expression)
if isinstance(expression.output_field, GeometryField) and self.uses_invalid_empty_geometry_collection:
converters.append(self.convert_invalid_empty_geometry_collection)
return converters
# https://dev.mysql.com/doc/refman/en/spatial-function-argument-handling.html
# MySQL 5.7.5 adds support for the empty geometry collections, but they are represented with invalid WKT.
def convert_invalid_empty_geometry_collection(self, value, expression, connection, context):
if value == b'GEOMETRYCOLLECTION()':
return b'GEOMETRYCOLLECTION EMPTY'
return value
| 39.080357 | 111 | 0.656614 |
f74b35d573a55d5e5d807e6f63998295e6f2ac66 | 202 | py | Python | code_examples/python_with/with_simple.py | emilgaripov/emilgaripov.github.io | b0e4bf353cca894ec0199a73e71dc4f963e559a8 | [
"MIT"
] | 18 | 2017-02-19T15:58:54.000Z | 2022-02-13T22:15:19.000Z | code_examples/python_with/with_simple.py | emilgaripov/emilgaripov.github.io | b0e4bf353cca894ec0199a73e71dc4f963e559a8 | [
"MIT"
] | 3 | 2020-02-26T14:42:54.000Z | 2021-09-28T00:32:23.000Z | code_examples/python_with/with_simple.py | emilgaripov/emilgaripov.github.io | b0e4bf353cca894ec0199a73e71dc4f963e559a8 | [
"MIT"
] | 27 | 2017-05-03T15:38:41.000Z | 2022-02-08T02:53:38.000Z | import contextlib
@contextlib.contextmanager
def lines():
print('-'*10, 'START', '-'*10)
yield
print('-'*11, 'END', '-'*11)
with lines():
print('inside with block')
print('outside')
| 14.428571 | 34 | 0.594059 |
f74b93f2ba91c860ce8e8dca9c307f3dd4f8e782 | 464 | py | Python | toontown/coghq/PaintMixer.py | LittleNed/toontown-stride | 1252a8f9a8816c1810106006d09c8bdfe6ad1e57 | [
"Apache-2.0"
] | 3 | 2020-01-02T08:43:36.000Z | 2020-07-05T08:59:02.000Z | toontown/coghq/PaintMixer.py | NoraTT/Historical-Commits-Project-Altis-Source | fe88e6d07edf418f7de6ad5b3d9ecb3d0d285179 | [
"Apache-2.0"
] | null | null | null | toontown/coghq/PaintMixer.py | NoraTT/Historical-Commits-Project-Altis-Source | fe88e6d07edf418f7de6ad5b3d9ecb3d0d285179 | [
"Apache-2.0"
] | 4 | 2019-06-20T23:45:23.000Z | 2020-10-14T20:30:15.000Z | from toontown.coghq import PlatformEntity
class PaintMixer(PlatformEntity.PlatformEntity):
def start(self):
PlatformEntity.PlatformEntity.start(self)
model = self.platform.model
shaft = model.find('**/PaintMixerBase1')
shaft.setSz(self.shaftScale)
shaft.node().setPreserveTransform(0)
shaftChild = shaft.find('**/PaintMixerBase')
shaftChild.node().setPreserveTransform(0)
model.flattenMedium()
| 33.142857 | 52 | 0.689655 |
f74ba7b4d3a3b75a573444f558a4d790e954d12d | 726 | py | Python | mmdet/datasets/__init__.py | maktu6/mmdetection | 4a0a42d4ab1f00732997e58da70c8145b9751bb0 | [
"Apache-2.0"
] | null | null | null | mmdet/datasets/__init__.py | maktu6/mmdetection | 4a0a42d4ab1f00732997e58da70c8145b9751bb0 | [
"Apache-2.0"
] | null | null | null | mmdet/datasets/__init__.py | maktu6/mmdetection | 4a0a42d4ab1f00732997e58da70c8145b9751bb0 | [
"Apache-2.0"
] | null | null | null | from .custom import CustomDataset
from .xml_style import XMLDataset
from .coco import CocoDataset
from .voc import VOCDataset
from .loader import GroupSampler, DistributedGroupSampler, build_dataloader
from .utils import to_tensor, random_scale, show_ann, get_dataset
from .concat_dataset import ConcatDataset
from .repeat_dataset import RepeatDataset
from .extra_aug import ExtraAugmentation
from .imaterialist import iMaterialistDataset
__all__ = [
'CustomDataset', 'XMLDataset', 'CocoDataset', 'VOCDataset', 'GroupSampler',
'DistributedGroupSampler', 'build_dataloader', 'to_tensor', 'random_scale',
'show_ann', 'get_dataset', 'ConcatDataset', 'RepeatDataset',
'ExtraAugmentation', 'iMaterialistDataset'
]
| 40.333333 | 79 | 0.80303 |
f74bc584fa3f7f842ca2988f55f612e5139f9b02 | 7,270 | py | Python | runtime/test/specs/V1_2/unidirectional_sequence_lstm_1step.mod.py | riscv-android-src/platform-packages-modules-NeuralNetworks | 32a7fbe0cec3a17f9cdd8c6f11d94ae77e30add5 | [
"Apache-2.0"
] | null | null | null | runtime/test/specs/V1_2/unidirectional_sequence_lstm_1step.mod.py | riscv-android-src/platform-packages-modules-NeuralNetworks | 32a7fbe0cec3a17f9cdd8c6f11d94ae77e30add5 | [
"Apache-2.0"
] | null | null | null | runtime/test/specs/V1_2/unidirectional_sequence_lstm_1step.mod.py | riscv-android-src/platform-packages-modules-NeuralNetworks | 32a7fbe0cec3a17f9cdd8c6f11d94ae77e30add5 | [
"Apache-2.0"
] | null | null | null | #
# Copyright (C) 2019 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Unidirectional Sequence LSTM Test:
# 1 Time Step, Layer Normalization, No Cifg, Peephole, Projection, and No Clipping.
import copy
model = Model()
max_time = 1
n_batch = 2
n_input = 5
# n_cell and n_output have the same size when there is no projection.
n_cell = 4
n_output = 3
input = Input("input", "TENSOR_FLOAT32", "{%d, %d, %d}" % (max_time, n_batch, n_input))
input_to_input_weights = Input("input_to_input_weights", "TENSOR_FLOAT32",
"{%d, %d}" % (n_cell, n_input))
input_to_forget_weights = Input("input_to_forget_weights", "TENSOR_FLOAT32",
"{%d, %d}" % (n_cell, n_input))
input_to_cell_weights = Input("input_to_cell_weights", "TENSOR_FLOAT32",
"{%d, %d}" % (n_cell, n_input))
input_to_output_weights = Input("input_to_output_weights", "TENSOR_FLOAT32",
"{%d, %d}" % (n_cell, n_input))
recurrent_to_input_weights = Input("recurrent_to_input_weights",
"TENSOR_FLOAT32",
"{%d, %d}" % (n_cell, n_output))
recurrent_to_forget_weights = Input("recurrent_to_forget_weights",
"TENSOR_FLOAT32",
"{%d, %d}" % (n_cell, n_output))
recurrent_to_cell_weights = Input("recurrent_to_cell_weights", "TENSOR_FLOAT32",
"{%d, %d}" % (n_cell, n_output))
recurrent_to_output_weights = Input("recurrent_to_output_weights",
"TENSOR_FLOAT32",
"{%d, %d}" % (n_cell, n_output))
cell_to_input_weights = Input("cell_to_input_weights", "TENSOR_FLOAT32",
"{%d}" % (n_cell))
cell_to_forget_weights = Input("cell_to_forget_weights", "TENSOR_FLOAT32",
"{%d}" % (n_cell))
cell_to_output_weights = Input("cell_to_output_weights", "TENSOR_FLOAT32",
"{%d}" % (n_cell))
input_gate_bias = Input("input_gate_bias", "TENSOR_FLOAT32", "{%d}" % (n_cell))
forget_gate_bias = Input("forget_gate_bias", "TENSOR_FLOAT32",
"{%d}" % (n_cell))
cell_gate_bias = Input("cell_gate_bias", "TENSOR_FLOAT32", "{%d}" % (n_cell))
output_gate_bias = Input("output_gate_bias", "TENSOR_FLOAT32",
"{%d}" % (n_cell))
projection_weights = Input("projection_weights", "TENSOR_FLOAT32",
"{%d,%d}" % (n_output, n_cell))
projection_bias = Input("projection_bias", "TENSOR_FLOAT32", "{0}")
output_state_in = Input("output_state_in", "TENSOR_FLOAT32",
"{%d, %d}" % (n_batch, n_output))
cell_state_in = Input("cell_state_in", "TENSOR_FLOAT32",
"{%d, %d}" % (n_batch, n_cell))
activation_param = Int32Scalar("activation_param", 4) # Tanh
cell_clip_param = Float32Scalar("cell_clip_param", 0.)
proj_clip_param = Float32Scalar("proj_clip_param", 0.)
time_major_param = BoolScalar("time_major_param", True)
input_layer_norm_weights = Input("input_layer_norm_weights", "TENSOR_FLOAT32",
"{%d}" % n_cell)
forget_layer_norm_weights = Input("forget_layer_norm_weights", "TENSOR_FLOAT32",
"{%d}" % n_cell)
cell_layer_norm_weights = Input("cell_layer_norm_weights", "TENSOR_FLOAT32",
"{%d}" % n_cell)
output_layer_norm_weights = Input("output_layer_norm_weights", "TENSOR_FLOAT32",
"{%d}" % n_cell)
output = Output("output", "TENSOR_FLOAT32", "{%d, %d, %d}" % (max_time, n_batch, n_output))
model = model.Operation(
"UNIDIRECTIONAL_SEQUENCE_LSTM", input, input_to_input_weights, input_to_forget_weights,
input_to_cell_weights, input_to_output_weights, recurrent_to_input_weights,
recurrent_to_forget_weights, recurrent_to_cell_weights,
recurrent_to_output_weights, cell_to_input_weights, cell_to_forget_weights,
cell_to_output_weights, input_gate_bias, forget_gate_bias, cell_gate_bias,
output_gate_bias, projection_weights, projection_bias, output_state_in,
cell_state_in, activation_param, cell_clip_param, proj_clip_param, time_major_param,
input_layer_norm_weights, forget_layer_norm_weights,
cell_layer_norm_weights, output_layer_norm_weights).To([output])
# Example 1. Input in operand 0,
input0 = {
input_to_input_weights: [
0.5, 0.6, 0.7, -0.8, -0.9, 0.1, 0.2, 0.3, -0.4, 0.5, -0.8, 0.7, -0.6,
0.5, -0.4, -0.5, -0.4, -0.3, -0.2, -0.1
],
input_to_forget_weights: [
-0.6, -0.1, 0.3, 0.2, 0.9, -0.5, -0.2, -0.4, 0.3, -0.8, -0.4, 0.3, -0.5,
-0.4, -0.6, 0.3, -0.4, -0.6, -0.5, -0.5
],
input_to_cell_weights: [
-0.4, -0.3, -0.2, -0.1, -0.5, 0.5, -0.2, -0.3, -0.2, -0.6, 0.6, -0.1,
-0.4, -0.3, -0.7, 0.7, -0.9, -0.5, 0.8, 0.6
],
input_to_output_weights: [
-0.8, -0.4, -0.2, -0.9, -0.1, -0.7, 0.3, -0.3, -0.8, -0.2, 0.6, -0.2,
0.4, -0.7, -0.3, -0.5, 0.1, 0.5, -0.6, -0.4
],
input_gate_bias: [0.03, 0.15, 0.22, 0.38],
forget_gate_bias: [0.1, -0.3, -0.2, 0.1],
cell_gate_bias: [-0.05, 0.72, 0.25, 0.08],
output_gate_bias: [0.05, -0.01, 0.2, 0.1],
recurrent_to_input_weights: [
-0.2, -0.3, 0.4, 0.1, -0.5, 0.9, -0.2, -0.3, -0.7, 0.05, -0.2, -0.6
],
recurrent_to_cell_weights: [
-0.3, 0.2, 0.1, -0.3, 0.8, -0.08, -0.2, 0.3, 0.8, -0.6, -0.1, 0.2
],
recurrent_to_forget_weights: [
-0.5, -0.3, -0.5, -0.2, 0.6, 0.4, 0.9, 0.3, -0.1, 0.2, 0.5, 0.2
],
recurrent_to_output_weights: [
0.3, -0.1, 0.1, -0.2, -0.5, -0.7, -0.2, -0.6, -0.1, -0.4, -0.7, -0.2
],
cell_to_input_weights: [0.05, 0.1, 0.25, 0.15],
cell_to_forget_weights: [-0.02, -0.15, -0.25, -0.03],
cell_to_output_weights: [0.1, -0.1, -0.5, 0.05],
projection_weights: [
-0.1, 0.2, 0.01, -0.2, 0.1, 0.5, 0.3, 0.08, 0.07, 0.2, -0.4, 0.2
],
projection_bias: [],
input_layer_norm_weights: [0.1, 0.2, 0.3, 0.5],
forget_layer_norm_weights: [0.2, 0.2, 0.4, 0.3],
cell_layer_norm_weights: [0.7, 0.2, 0.3, 0.8],
output_layer_norm_weights: [0.6, 0.2, 0.2, 0.5]
}
test_input = [0.7, 0.8, 0.1, 0.2, 0.3, 0.3, 0.2, 0.9, 0.8, 0.1]
golden_output = [
0.024407668039203, 0.128027379512787, -0.001709178090096,
-0.006924282759428, 0.084874063730240, 0.063444979488850
]
output0 = {
output: golden_output,
}
input0[input] = test_input
input0[output_state_in] = [ 0 for _ in range(n_batch * n_output) ]
input0[cell_state_in] = [ 0 for _ in range(n_batch * n_cell) ]
Example((input0, output0))
| 43.795181 | 91 | 0.601376 |
f74bdb6e8649c00dbb2ee80660d7a80dbe509e45 | 13,273 | py | Python | pycatia/drafting_interfaces/drawing_dimensions.py | Tian-Jionglu/pycatia | b315aeb3a74846f134ff6b67b3a6334b9d3905fa | [
"MIT"
] | 1 | 2020-04-27T13:59:10.000Z | 2020-04-27T13:59:10.000Z | pycatia/drafting_interfaces/drawing_dimensions.py | Luanee/pycatia | ea5eef8178f73de12404561c00baf7a7ca30da59 | [
"MIT"
] | null | null | null | pycatia/drafting_interfaces/drawing_dimensions.py | Luanee/pycatia | ea5eef8178f73de12404561c00baf7a7ca30da59 | [
"MIT"
] | null | null | null | #! usr/bin/python3.6
"""
Module initially auto generated using V5Automation files from CATIA V5 R28 on 2020-06-11 12:40:47.360445
.. warning::
The notes denoted "CAA V5 Visual Basic Help" are to be used as reference only.
They are there as a guide as to how the visual basic / catscript functions work
and thus help debugging in pycatia.
"""
from pycatia.drafting_interfaces.drawing_dimension import DrawingDimension
from pycatia.system_interfaces.collection import Collection
from pycatia.types import cat_variant
class DrawingDimensions(Collection):
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445)
| System.IUnknown
| System.IDispatch
| System.CATBaseUnknown
| System.CATBaseDispatch
| System.Collection
| DrawingDimensions
|
| A collection of all the drawing dimensions currently managed by a drawing view
| of drawing sheet in a drawing document.
"""
def __init__(self, com_object):
super().__init__(com_object, child_object=DrawingDimension)
self.drawing_dimensions = com_object
def add(self, i_type_dim: int, i_geom_elem: tuple, i_pt_coord_elem: tuple, i_line_rep: int) -> DrawingDimension:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445))
| o Func Add(CatDimType iTypeDim,
| CATSafeArrayVariant iGeomElem,
| CATSafeArrayVariant iPtCoordElem,
| CatDimLineRep iLineRep) As DrawingDimension
|
| Creates a drawing dimension and adds it to the DrawingDimensions
| collection.
|
| Parameters:
|
| iTypeDim
| Dimension type
| iGeomElem
| Parent geometrical element(s) of dimension
| iPtCoordElem
| Array of pointers on the selection points of each element of
| iGeomElem
| iLineRep
| Basic representation mode
|
| Returns:
| The created drawing dimension
|
| Example:
| The following example creates a drawing angle dimension between two lines
| and a partial curvilinear length dimension on an ellipse and retrieved in
| MyDimension1 and MyDimension2 in the drawing view collection of the MyView
| drawing view. This view belongs to the drawing view collection of the drawing
| sheet
|
| Dim MyView As DrawingView
| Set MyView = MySheet.Views.ActiveView
| Dim Fact2D As Factory2D
| Set Fact2D = MyView.Factory2D
| Dim Line1 As Line2D
| Dim Line2 As Line2D
| Set Line1 = Fact2D.CreateLine(50, 10, 150, 10)
| Set Line2 = Fact2D.CreateLine(50, 10, 120, 100)
| Dim Ellipse1 As Ellipse2D
| Set Ellipse1 = Fact2D.CreateEllipse(-40, 100, 120, 180,120,90,0, 3)
| Dim Point1 As Point2D
| Dim Point2 As Point2D
| Set Point1 = Fact2D.CreatePoint(-10,190)
| Set Point2 = Fact2D.CreatePoint(-120,90)
| Dim iType As catDimType
| iType = catDimAngle
| Dim myElements1(1)
| myElements1(1) = Array(Line1,Line2)
| Dim selpoints(3)
| selpoints(3) = Array(150, 10, 120, 100)
| Dim MyDimension1 As DrawingDimension
| Set MyDimension1 = MyView.Dimensions.Add(iType, myElements1(1), selpoints(3),catDimAuto)
| iType = catDimLengthCurvilinear
| Dim myElements2(2)
| myElements2(2) = Array(Point1,Point2,Ellipse1)
| selpoints(3) = Array(0, 0, 0, 0)
| Dim MyDimension2 As DrawingDimension
| Set MyDimension2 = MyView.Dimensions.Add(iType, myElements2(1), selpoints(3),catDimOffset)
:param int i_type_dim:
:param tuple i_geom_elem:
:param tuple i_pt_coord_elem:
:param int i_line_rep:
:return: DrawingDimension
:rtype: DrawingDimension
"""
return DrawingDimension(
self.drawing_dimensions.Add(
i_type_dim,
i_geom_elem,
i_pt_coord_elem,
i_line_rep)
)
def add2(self,
i_type_dim: int,
i_geom_elem: tuple,
i_pt_coord_elem: tuple,
i_ldc_ref_elem: cat_variant,
i_ldc_ref_angle: int) -> DrawingDimension:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445))
| o Func Add2(CatDimType iTypeDim,
| CATSafeArrayVariant iGeomElem,
| CATSafeArrayVariant iPtCoordElem,
| CATVariant iLDCRefElem,
| long iLDCRefAngle) As DrawingDimension
|
| Creates a drawing dimension along a direction and adds it to the
| DrawingDimensions collection.
|
| Parameters:
|
| iTypeDim
| Dimension type (available types : catDimDistance, catDimLength, catDimRadiusTangent and
| catDimDiameterTangent)
| iGeomElem
| Parent geometrical element(s) of dimension
| iPtCoordElem
| Array of pointers on the selection points of each element of
| iGeomElem
| iLDCRefElem
| Reference geometrical element for the direction of the dimension
| line .iLDCRefElem can be null: in this case, the view is the reference element
|
| iLDCRefAngle
| Angle between the reference element and the direction of the
| dimension line
|
| Returns:
| The created drawing dimension (The property CATDimLineRep of the
| dimension line of the created dimension is set to catDimUserDefined)
|
|
| Example:
| The following example creates a drawing distance dimension between two
| points along the direction of a line and retrieved in MyDimension in the
| drawing view collection of the MyView drawing view. This view belongs to the
| drawing view collection of the drawing sheet
|
| Dim MyView As DrawingView
| Set MyView = MySheet.Views.ActiveView
| Dim Fact2D As Factory2D
| Set Fact2D = MyView.Factory2D
| Dim Point1 As Point2D
| Dim Point2 As Point2D
| Set Point1 = Fact2D.CreatePoint(40, 230)
| Set Point2 = Fact2D.CreatePoint(80, 210)
| Dim Line1 As Line2D
| Set Line1 = Fact2D.CreateLine(50, 10, 150, 10)
| Dim iType As catDimType
| iType = catDimDistance
| Dim myElements(1)
| myElements(1) = Array(Point1,Point2)
| Dim selpoints(3)
| selpoints(3) = Array(0, 0, 0, 0)
| Dim MyDimension As DrawingDimension
| Set MyDimension = MyView.Dimensions.Add2(iType, myElements(1), selpoints(3), Line1, 0)
:param int i_type_dim:
:param tuple i_geom_elem:
:param tuple i_pt_coord_elem:
:param CATVariant i_ldc_ref_elem:
:param int i_ldc_ref_angle:
:return: DrawingDimension
:rtype: DrawingDimension
"""
return DrawingDimension(
self.drawing_dimensions.Add2(
i_type_dim,
i_geom_elem,
i_pt_coord_elem,
i_ldc_ref_elem,
i_ldc_ref_angle)
)
def item(self, i_index: cat_variant) -> DrawingDimension:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445))
| o Func Item(CATVariant iIndex) As DrawingDimension
|
| Returns a drawing dimension using its index or its name from the
| DrawingDimensions collection.
|
| Parameters:
|
| iIndex
| The index or the name of the drawing dimension to retrieve from the
| collection of drawing dimensions. As a numerics, this index is the rank of the
| drawing dimension in the collection. The index of the first drawing dimension
| in the collection is 1, and the index of the last drawing dimension is Count.
| As a string, it is the name you assigned to the drawing dimension using the
|
|
| AnyObject.Name property or when creating it using the Add method.
|
| Returns:
| The retrieved drawing dimension
| Example:
|
| This example retrieves in ThisDrawingDimension the second drawing
| dimension,
| and in ThatDrawingDimension the drawing dimension
| named
| MyDimension in the drawing dimension collection of the active
| view.
|
|
| Dim MyView As DrawingView
| Set MyView = MySheet.Views.ActiveView
| Dim ThisDrawingDimension As DrawingDimension
| Set ThisDrawingDimension = MyView.Dimensions.Item(2)
| Dim ThatDrawingDimension As DrawingDimension
| Set ThatDrawingDimension = MyView.Dimensions.Item("MyDimension")
:param cat_variant i_index:
:return: DrawingDimension
:rtype: DrawingDimension
"""
return DrawingDimension(self.drawing_dimensions.Item(i_index))
def remove(self, i_index: cat_variant) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445))
| o Sub Remove(CATVariant iIndex)
|
| Removes a drawing dimension from the DrawingDimensions
| collection.
|
| Parameters:
|
| iIndex
| The index of the drawing dimension to remove from the collection of
| drawing dimensions. As a numerics, this index is the rank of the drawing
| dimension in the collection. The index of the first drawing dimension in the
| collection is 1, and the index of the last drawing dimension is Count.
|
|
| Example:
| The following example removes the third drawing dimension in the
| drawing dimension collection of the active view of the active document,
| supposed to be a drawing document.
|
| Dim MyView As DrawingView
| Set MyView = MySheet.Views.ActiveView
| MyView.Dimensions.Remove(3)
:param cat_variant i_index:
:return: None
:rtype: None
"""
return self.drawing_dimensions.Remove(i_index)
def __getitem__(self, n: int) -> DrawingDimension:
if (n + 1) > self.count:
raise StopIteration
return DrawingDimension(self.drawing_dimensions.item(n + 1))
def __repr__(self):
return f'DrawingDimensions(name="{self.name}")'
| 44.841216 | 117 | 0.497401 |