code
stringlengths 22
1.05M
| apis
sequencelengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
from sage.misc.lazy_import import lazy_import
lazy_import('sage.geometry.polyhedron.constructor', 'Polyhedron')
lazy_import('sage.geometry.polyhedron.library', 'polytopes')
| [
"sage.misc.lazy_import.lazy_import"
] | [((47, 112), 'sage.misc.lazy_import.lazy_import', 'lazy_import', (['"""sage.geometry.polyhedron.constructor"""', '"""Polyhedron"""'], {}), "('sage.geometry.polyhedron.constructor', 'Polyhedron')\n", (58, 112), False, 'from sage.misc.lazy_import import lazy_import\n'), ((113, 173), 'sage.misc.lazy_import.lazy_import', 'lazy_import', (['"""sage.geometry.polyhedron.library"""', '"""polytopes"""'], {}), "('sage.geometry.polyhedron.library', 'polytopes')\n", (124, 173), False, 'from sage.misc.lazy_import import lazy_import\n')] |
from Position import Position
from Sheep import Sheep
from Grass import Grass
if __name__ == '__main__':
grass = Grass(position=Position(10, 10))
sheep = Sheep(position=Position(-10, -10))
for i in range(0,10):
print('-- Iteration {0} --'.format(i))
grass.move()
print(grass)
sheep.move()
print(sheep) | [
"Position.Position"
] | [((133, 149), 'Position.Position', 'Position', (['(10)', '(10)'], {}), '(10, 10)\n', (141, 149), False, 'from Position import Position\n'), ((178, 196), 'Position.Position', 'Position', (['(-10)', '(-10)'], {}), '(-10, -10)\n', (186, 196), False, 'from Position import Position\n')] |
from django.views.generic import ListView, DetailView
from django.shortcuts import render, get_object_or_404
from analytics.mixins import ObjectViewedMixin
from carts.models import Cart
from .models import Product
from django.http import Http404
class ProductFeaturedListView(ListView):
template_name = "products/list.html"
def get_queryset(self, *args, **kwargs):
request = self.request
return Product.objects.featured()
class ProductFeaturedDetailView(ObjectViewedMixin, DetailView):
queryset = Product.objects.featured()
template_name = "products/featured-detail.html"
class ProductListView(ListView):
queryset = Product.objects.all()
template_name = "products/list.html"
def get_context_data(self,*args, **kwargs):
context = super(ProductListView, self).get_context_data(*args, **kwargs)
cart_obj , new_obj = Cart.objects.new_or_get(self.request)
context['cart'] = cart_obj
return context
def product_list_view(request):
queryset = Product.objects.all()
context = {
'object_list': queryset
}
return render(request, "products/list.html", context)
class ProductDetailView(ObjectViewedMixin, DetailView):
queryset = Product.objects.all()
template_name = "products/detail.html"
class ProductDetailSlugView(ObjectViewedMixin, DetailView):
queryset = Product.objects.all()
template_name = "products/detail.html"
def get_context_data(self,*args, **kwargs):
context = super(ProductDetailSlugView, self).get_context_data(*args, **kwargs)
cart_obj , new_obj = Cart.objects.new_or_get(self.request)
context['cart'] = cart_obj
return context
# def get_object(self, *args, **kwargs):
# request = self.request
# slug = self.kwargs.get('slug')
# try:
# instance = Product.objects.get(slug=slug, active=True)
# except Product.DoesNotExist:
# raise Http404("Not Found..")
# except Product.MultipleObjectsReturned:
# qs = Product.objects.get(slug=slug, active=True)
# instance = qs.first()
# except:
# raise Http404("Uhmm")
# return instance
def product_detail_view(request, pk):
# instance = get_object_or_404(Product, pk=pk)
# try:
# instance = Product.objects.filter(pk=pk)
# except Product.DoesNotExist:
# print('no product here')
# raise Http404("Product doesn't exist.")
# except:
# print("Huh?")
#
# qs = Product.objects.filter(pk=pk)
# if qs.exists() and qs.count == 1:
# instance = qs.first()
# else:
# raise Http404("Product doesn't exist.")
instance = Product.objects.get_by_id(pk)
if instance is None:
raise Http404("Product doesn't exist.")
context = {
'object': instance
}
return render(request, "products/detail.html", context)
| [
"django.shortcuts.render",
"carts.models.Cart.objects.new_or_get",
"django.http.Http404"
] | [((1116, 1162), 'django.shortcuts.render', 'render', (['request', '"""products/list.html"""', 'context'], {}), "(request, 'products/list.html', context)\n", (1122, 1162), False, 'from django.shortcuts import render, get_object_or_404\n'), ((2888, 2936), 'django.shortcuts.render', 'render', (['request', '"""products/detail.html"""', 'context'], {}), "(request, 'products/detail.html', context)\n", (2894, 2936), False, 'from django.shortcuts import render, get_object_or_404\n'), ((884, 921), 'carts.models.Cart.objects.new_or_get', 'Cart.objects.new_or_get', (['self.request'], {}), '(self.request)\n', (907, 921), False, 'from carts.models import Cart\n'), ((1608, 1645), 'carts.models.Cart.objects.new_or_get', 'Cart.objects.new_or_get', (['self.request'], {}), '(self.request)\n', (1631, 1645), False, 'from carts.models import Cart\n'), ((2794, 2827), 'django.http.Http404', 'Http404', (['"""Product doesn\'t exist."""'], {}), '("Product doesn\'t exist.")\n', (2801, 2827), False, 'from django.http import Http404\n')] |
import sys
if sys.version_info < (3, 0):
from nose.plugins.skip import SkipTest
raise SkipTest
from mlxtend.text import generalize_names
def test_generalize_names():
assert(generalize_names("<NAME>") == 'etoo s')
assert(generalize_names("Eto'o, Samuel") == 'etoo s')
assert(generalize_names("Eto'o, Samuel") == 'etoo s')
assert(generalize_names('Xavi') == 'xavi')
assert(generalize_names('<NAME>') == 'toure y')
assert(generalize_names('<NAME>') == 'pozo j')
assert(generalize_names('<NAME>') == 'pozo j')
assert(generalize_names('<NAME>') == 'pozo j')
assert(generalize_names('<NAME>') == 'pozo j')
assert(generalize_names('<NAME>', firstname_output_letters=2) ==
'pozo jo')
assert(generalize_names("<NAME>", firstname_output_letters=2) ==
'etoo sa')
assert(generalize_names("Eto'o, Samuel", firstname_output_letters=0) ==
'etoo')
assert(generalize_names("Eto'o, Samuel", output_sep=', ') == 'etoo, s')
assert(generalize_names("Eto'o, Samuel", output_sep=', ') == 'etoo, s')
assert(generalize_names("<NAME>, Robin", output_sep=', ') ==
'vanpersie, r')
assert(generalize_names("<NAME>", output_sep=', ') ==
'vanpersie, r')
assert(generalize_names("<NAME>", output_sep=', ') ==
'vandervaart, r')
assert(generalize_names("<NAME>, Rafael", output_sep=', ') ==
'vandervaart, r')
assert(generalize_names("<NAME>") == 'hamer b')
| [
"mlxtend.text.generalize_names"
] | [((190, 216), 'mlxtend.text.generalize_names', 'generalize_names', (['"""<NAME>"""'], {}), "('<NAME>')\n", (206, 216), False, 'from mlxtend.text import generalize_names\n'), ((241, 274), 'mlxtend.text.generalize_names', 'generalize_names', (['"""Eto\'o, Samuel"""'], {}), '("Eto\'o, Samuel")\n', (257, 274), False, 'from mlxtend.text import generalize_names\n'), ((299, 332), 'mlxtend.text.generalize_names', 'generalize_names', (['"""Eto\'o, Samuel"""'], {}), '("Eto\'o, Samuel")\n', (315, 332), False, 'from mlxtend.text import generalize_names\n'), ((357, 381), 'mlxtend.text.generalize_names', 'generalize_names', (['"""Xavi"""'], {}), "('Xavi')\n", (373, 381), False, 'from mlxtend.text import generalize_names\n'), ((404, 430), 'mlxtend.text.generalize_names', 'generalize_names', (['"""<NAME>"""'], {}), "('<NAME>')\n", (420, 430), False, 'from mlxtend.text import generalize_names\n'), ((456, 482), 'mlxtend.text.generalize_names', 'generalize_names', (['"""<NAME>"""'], {}), "('<NAME>')\n", (472, 482), False, 'from mlxtend.text import generalize_names\n'), ((507, 533), 'mlxtend.text.generalize_names', 'generalize_names', (['"""<NAME>"""'], {}), "('<NAME>')\n", (523, 533), False, 'from mlxtend.text import generalize_names\n'), ((558, 584), 'mlxtend.text.generalize_names', 'generalize_names', (['"""<NAME>"""'], {}), "('<NAME>')\n", (574, 584), False, 'from mlxtend.text import generalize_names\n'), ((609, 635), 'mlxtend.text.generalize_names', 'generalize_names', (['"""<NAME>"""'], {}), "('<NAME>')\n", (625, 635), False, 'from mlxtend.text import generalize_names\n'), ((660, 714), 'mlxtend.text.generalize_names', 'generalize_names', (['"""<NAME>"""'], {'firstname_output_letters': '(2)'}), "('<NAME>', firstname_output_letters=2)\n", (676, 714), False, 'from mlxtend.text import generalize_names\n'), ((751, 805), 'mlxtend.text.generalize_names', 'generalize_names', (['"""<NAME>"""'], {'firstname_output_letters': '(2)'}), "('<NAME>', firstname_output_letters=2)\n", (767, 805), False, 'from mlxtend.text import generalize_names\n'), ((842, 903), 'mlxtend.text.generalize_names', 'generalize_names', (['"""Eto\'o, Samuel"""'], {'firstname_output_letters': '(0)'}), '("Eto\'o, Samuel", firstname_output_letters=0)\n', (858, 903), False, 'from mlxtend.text import generalize_names\n'), ((937, 987), 'mlxtend.text.generalize_names', 'generalize_names', (['"""Eto\'o, Samuel"""'], {'output_sep': '""", """'}), '("Eto\'o, Samuel", output_sep=\', \')\n', (953, 987), False, 'from mlxtend.text import generalize_names\n'), ((1013, 1063), 'mlxtend.text.generalize_names', 'generalize_names', (['"""Eto\'o, Samuel"""'], {'output_sep': '""", """'}), '("Eto\'o, Samuel", output_sep=\', \')\n', (1029, 1063), False, 'from mlxtend.text import generalize_names\n'), ((1090, 1140), 'mlxtend.text.generalize_names', 'generalize_names', (['"""<NAME>, Robin"""'], {'output_sep': '""", """'}), "('<NAME>, Robin', output_sep=', ')\n", (1106, 1140), False, 'from mlxtend.text import generalize_names\n'), ((1182, 1225), 'mlxtend.text.generalize_names', 'generalize_names', (['"""<NAME>"""'], {'output_sep': '""", """'}), "('<NAME>', output_sep=', ')\n", (1198, 1225), False, 'from mlxtend.text import generalize_names\n'), ((1267, 1310), 'mlxtend.text.generalize_names', 'generalize_names', (['"""<NAME>"""'], {'output_sep': '""", """'}), "('<NAME>', output_sep=', ')\n", (1283, 1310), False, 'from mlxtend.text import generalize_names\n'), ((1354, 1405), 'mlxtend.text.generalize_names', 'generalize_names', (['"""<NAME>, Rafael"""'], {'output_sep': '""", """'}), "('<NAME>, Rafael', output_sep=', ')\n", (1370, 1405), False, 'from mlxtend.text import generalize_names\n'), ((1449, 1475), 'mlxtend.text.generalize_names', 'generalize_names', (['"""<NAME>"""'], {}), "('<NAME>')\n", (1465, 1475), False, 'from mlxtend.text import generalize_names\n')] |
from nanoget import get_input
from argparse import ArgumentParser
from nanoplot import utils
from .version import __version__
from nanoplotter import check_valid_time_and_sort, Plot
from os import path
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
def main():
args = get_args()
merged_df = get_input(source="summary", files=args.summary).set_index("readIDs") \
.merge(right=get_input(source="bam", files=args.bam).set_index("readIDs"),
how="left",
left_index=True,
right_index=True)
plot_retrotect(df=merged_df,
path=path.join(args.outdir, args.prefix),
figformat=args.format,
title=args.title,
hours=args.hours)
merged_df.dropna(axis="index", how="any").sort_values(by="start_time").to_csv(
path_or_buf=path.join(args.outdir, args.prefix) + "Retrotect_details.txt.gz",
sep="\t",
columns=["start_time"],
compression='gzip')
def get_args():
epilog = """"""
parser = ArgumentParser(
description="Get detection curve of nanopore experiment.",
epilog=epilog,
formatter_class=utils.custom_formatter,
add_help=False)
general = parser.add_argument_group(
title='General options')
general.add_argument("-h", "--help",
action="help",
help="show the help and exit")
general.add_argument("-v", "--version",
help="Print version and exit.",
action="version",
version='NanoComp {}'.format(__version__))
general.add_argument("-t", "--threads",
help="Set the allowed number of threads to be used by the script",
default=4,
type=int)
general.add_argument("-o", "--outdir",
help="Specify directory in which output has to be created.",
default=".")
general.add_argument("-p", "--prefix",
help="Specify an optional prefix to be used for the output files.",
default="",
type=str)
general.add_argument("--verbose",
help="Write log messages also to terminal.",
action="store_true")
visual = parser.add_argument_group(
title='Options for customizing the plots created')
visual.add_argument("-f", "--format",
help="Specify the output format of the plots.",
default="png",
type=str,
choices=['eps', 'jpeg', 'jpg', 'pdf', 'pgf', 'png', 'ps',
'raw', 'rgba', 'svg', 'svgz', 'tif', 'tiff'])
visual.add_argument("--title",
help="Add a title to all plots, requires quoting if using spaces",
type=str,
default=None)
visual.add_argument("--hours",
help="How many hours to plot in the graph",
type=int,
default=8)
target = parser.add_argument_group(
title="Input data sources, requires a bam and a summary file.")
target.add_argument("--summary",
help="Data is a summary file generated by albacore.",
nargs='+',
metavar="files",
required=True)
target.add_argument("--bam",
help="Data as a sorted bam file.",
nargs='+',
metavar="files",
required=True)
return parser.parse_args()
def plot_retrotect(df, path, figformat="png", title=None, hours=8):
dfs = check_valid_time_and_sort(
df=df,
timescol="start_time",
days=hours / 24,
warning=False)
dfs["start_time"] = dfs["start_time"].astype('timedelta64[m]') # ?! dtype float64
cum_yield_reads = Plot(
path=path + "CumulativeYieldPlot_NumberOfReads." + figformat,
title="Cumulative yield")
ax = sns.regplot(
x=dfs['start_time'],
y=np.log10(dfs['index'] + 1),
x_ci=None,
fit_reg=False,
color="blue",
scatter_kws={"s": 1})
aligned_df = dfs.drop('index', axis=1) \
.dropna(axis="index", how="any") \
.reset_index(drop=True) \
.reset_index()
ax = sns.regplot(
x=aligned_df['start_time'],
y=np.log10(aligned_df["index"] + 1),
x_ci=None,
fit_reg=False,
color="red",
scatter_kws={"s": 1},
ax=ax)
yticks = [10**i for i in range(10) if not 10**i > 10 * dfs["index"].max()]
ax.set(
xlabel='Run time (minutes)',
yticks=np.log10(yticks),
yticklabels=yticks,
ylabel='Cumulative yield in log transformed number of reads',
title=title or cum_yield_reads.title)
fig = ax.get_figure()
cum_yield_reads.fig = fig
fig.savefig(cum_yield_reads.path, format=figformat, dpi=100, bbox_inches="tight")
plt.close("all")
if __name__ == '__main__':
main()
| [
"argparse.ArgumentParser",
"matplotlib.pyplot.close",
"nanoplotter.Plot",
"nanoplotter.check_valid_time_and_sort",
"numpy.log10",
"nanoget.get_input",
"os.path.join"
] | [((1081, 1229), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""Get detection curve of nanopore experiment."""', 'epilog': 'epilog', 'formatter_class': 'utils.custom_formatter', 'add_help': '(False)'}), "(description='Get detection curve of nanopore experiment.',\n epilog=epilog, formatter_class=utils.custom_formatter, add_help=False)\n", (1095, 1229), False, 'from argparse import ArgumentParser\n'), ((3895, 3986), 'nanoplotter.check_valid_time_and_sort', 'check_valid_time_and_sort', ([], {'df': 'df', 'timescol': '"""start_time"""', 'days': '(hours / 24)', 'warning': '(False)'}), "(df=df, timescol='start_time', days=hours / 24,\n warning=False)\n", (3920, 3986), False, 'from nanoplotter import check_valid_time_and_sort, Plot\n'), ((4126, 4223), 'nanoplotter.Plot', 'Plot', ([], {'path': "(path + 'CumulativeYieldPlot_NumberOfReads.' + figformat)", 'title': '"""Cumulative yield"""'}), "(path=path + 'CumulativeYieldPlot_NumberOfReads.' + figformat, title=\n 'Cumulative yield')\n", (4130, 4223), False, 'from nanoplotter import check_valid_time_and_sort, Plot\n'), ((5226, 5242), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (5235, 5242), True, 'import matplotlib.pyplot as plt\n'), ((630, 665), 'os.path.join', 'path.join', (['args.outdir', 'args.prefix'], {}), '(args.outdir, args.prefix)\n', (639, 665), False, 'from os import path\n'), ((4297, 4323), 'numpy.log10', 'np.log10', (["(dfs['index'] + 1)"], {}), "(dfs['index'] + 1)\n", (4305, 4323), True, 'import numpy as np\n'), ((4632, 4665), 'numpy.log10', 'np.log10', (["(aligned_df['index'] + 1)"], {}), "(aligned_df['index'] + 1)\n", (4640, 4665), True, 'import numpy as np\n'), ((4918, 4934), 'numpy.log10', 'np.log10', (['yticks'], {}), '(yticks)\n', (4926, 4934), True, 'import numpy as np\n'), ((886, 921), 'os.path.join', 'path.join', (['args.outdir', 'args.prefix'], {}), '(args.outdir, args.prefix)\n', (895, 921), False, 'from os import path\n'), ((327, 374), 'nanoget.get_input', 'get_input', ([], {'source': '"""summary"""', 'files': 'args.summary'}), "(source='summary', files=args.summary)\n", (336, 374), False, 'from nanoget import get_input\n'), ((419, 458), 'nanoget.get_input', 'get_input', ([], {'source': '"""bam"""', 'files': 'args.bam'}), "(source='bam', files=args.bam)\n", (428, 458), False, 'from nanoget import get_input\n')] |
# -*- coding:utf-8 -*-
import ymz294
import mml
import time
class Sequencer:
# initialize.
# @param psgplayer ymz294.PSGPlayer instance
def __init__(self, psgplayer):
self.psgplayer = psgplayer
# play sound by MML string
# @param chA_MML a MML string for PSG channel A
# @param chB_MML a MML string for PSG channel B
# @param chC_MML a MML string for PSG channel C
# @param core_freq frequency of the octave 4's A
def playMML(self, chA_MML, chB_MML="", chC_MML="", core_freq=440):
parser = mml.Parser(core_freq)
chA_seq = parser.parse(chA_MML)
chB_seq = parser.parse(chB_MML)
chC_seq = parser.parse(chC_MML)
wait_a = 0
index_a = 0
wait_b = 0
index_b = 0
wait_c = 0
index_c = 0
eom = 0 #End of mml
while(index_a < len(chA_seq) or index_b < len(chB_seq) or index_c < len(chC_seq)):
if wait_a <= 0:
if index_a < len(chA_seq):
seq = chA_seq[index_a]
wait_a = seq["duration"]
self.__play_tone__(ymz294.PSGPlayer.CHANNEL_A, seq)
index_a += 1
else:
self.psgplayer.setMute(True, ymz294.PSGPlayer.CHANNEL_A)
eom |= 1
if wait_b <= 0:
if index_b < len(chB_seq):
seq = chB_seq[index_b]
wait_b = seq["duration"]
self.__play_tone__(ymz294.PSGPlayer.CHANNEL_B, seq)
index_b += 1
else:
self.psgplayer.setMute(True, ymz294.PSGPlayer.CHANNEL_B)
eom |= 2
if wait_c <= 0:
if index_c < len(chC_seq):
seq = chC_seq[index_c]
wait_c = seq["duration"]
self.__play_tone__(ymz294.PSGPlayer.CHANNEL_C, seq)
index_c += 1
else:
self.psgplayer.setMute(True, ymz294.PSGPlayer.CHANNEL_C)
eom |= 4
wait = min(wait_a + ((eom & 1) == 1) * 10, wait_b + ((eom & 2) == 2) * 10, wait_c + ((eom & 4) == 4) * 10)
time.sleep(wait)
if wait_a > 0: wait_a -= wait
if wait_b > 0: wait_b -= wait
if wait_c > 0: wait_c -= wait
time.sleep(max(wait_a, wait_b, wait_c))
def __play_tone__(self, channel, seq):
if seq["freq"] != 0:
self.psgplayer.setMute(False, channel)
self.psgplayer.playSound(channel, seq["freq"])
#print seq["freq"]
else:
#mute
self.psgplayer.setMute(True, channel)
#self.psgplayer.playSound(channel, 20000)
return
if seq["tie_slur"] == False:
env = self.psgplayer.getEnvelopType()
if env is not None and channel == ymz294.PSGPlayer.CHANNEL_A:
self.psgplayer.setEnvelopType(env)
| [
"mml.Parser",
"time.sleep"
] | [((549, 570), 'mml.Parser', 'mml.Parser', (['core_freq'], {}), '(core_freq)\n', (559, 570), False, 'import mml\n'), ((2331, 2347), 'time.sleep', 'time.sleep', (['wait'], {}), '(wait)\n', (2341, 2347), False, 'import time\n')] |
import random
import arcade
from ant import Ant
from colony import Colony
# TODO
# - Food blobs 2x zo groot
# - Food blobs droppen met muis
# - Food blob coo is altijd centrale coo
# - Lijn tekenen bij backtrack
from settings import settings
class Arena(arcade.Window):
def __init__(self, width, height, title, generation_callback=None):
super().__init__(width, height, title)
self.wall_list = arcade.SpriteList(is_static=True, use_spatial_hash=True)
self.food_list = arcade.SpriteList(is_static=True, use_spatial_hash=True)
self.ant_list = arcade.SpriteList(use_spatial_hash=False)
self.physics_engine = None
if settings.MAX_FPS:
self.set_update_rate(1 / settings.MAX_FPS)
self.actual_fps = settings.MAX_FPS # Initializse to something
self.generation = 0
self.generation_callback = generation_callback # For testing purposes
def setup(self):
if settings.DRAW_BASE:
self.create_base()
for _ in range(settings.NUM_WALLS):
self.create_wall()
for _ in range(settings.NUM_FOOD_BLOBS):
self.create_food_blob(settings.FOOD_BLOB_SIZE)
self.colony = Colony()
for _ in range(settings.NUM_ANTS):
ant = Ant(
settings.SCREEN_WIDTH / 2, 0, self, self.colony, scale=settings.SCALE
)
self.ant_list.append(ant)
arcade.set_background_color(settings.FIELD_COLOR)
if self.generation_callback:
self.generation_callback(self.generation, self)
def create_base(self):
x = settings.SCREEN_WIDTH / 2
for y in range(0, round(20 * settings.SCALE), settings.WALL_THICKNESS()):
block = arcade.SpriteSolidColor(
settings.WALL_THICKNESS(),
settings.WALL_THICKNESS(),
settings.BASE_COLOR,
)
block.center_x = x - 8 * settings.SCALE
block.center_y = y
self.wall_list.append(block)
block = arcade.SpriteSolidColor(
settings.WALL_THICKNESS(),
settings.WALL_THICKNESS(),
settings.BASE_COLOR,
)
block.center_x = x + 8 * settings.SCALE
block.center_y = y
self.wall_list.append(block)
def create_wall(self):
def block_at(x, y):
block = arcade.SpriteSolidColor(
settings.WALL_THICKNESS(),
settings.WALL_THICKNESS(),
settings.WALL_COLOR,
)
block.center_x = x
block.center_y = y
wally.append(block)
while True:
wally = []
length = random.randint(settings.WALL_MIN(), settings.WALL_MAX())
if random.random() < 0.5:
# Horizontal
start_x = random.randint(0, settings.SCREEN_WIDTH - length)
y = random.randint(0, settings.SCREEN_HEIGHT)
for x in range(start_x, start_x + length, settings.WALL_THICKNESS()):
block_at(x, y)
else:
# Vertical
start_y = random.randint(0, settings.SCREEN_HEIGHT - length)
x = random.randint(0, settings.SCREEN_WIDTH)
for y in range(start_y, start_y + length, settings.WALL_THICKNESS()):
block_at(x, y)
for block in wally:
if arcade.check_for_collision_with_list(block, self.wall_list):
break # Oops, break it off, try a new wall
else:
for block in wally:
self.wall_list.append(block)
return
def create_food_blob(self, size=10, start_coo=None):
scale = settings.SCALE * 3
if start_coo:
start_x, start_y = start_coo
else:
start_x = random.randint(0, settings.SCREEN_WIDTH - size * scale)
start_y = random.randint(0, settings.SCREEN_HEIGHT - size * scale)
for x in range(start_x, start_x + size * scale, scale):
for y in range(start_y, start_y + size * scale, scale):
block = arcade.SpriteSolidColor(scale, scale, settings.FOOD_COLOR)
block.center_x = x
block.center_y = y
if not arcade.check_for_collision_with_list(block, self.wall_list):
self.food_list.append(block)
def on_draw(self):
# This command has to happen before we start drawing
arcade.start_render()
# Draw all the sprites.
self.wall_list.draw()
self.food_list.draw()
for ant in self.ant_list:
ant.draw()
# ant.draw_hit_box((255,0,0))
# def on_key_press(self, key, modifiers):
# """Called whenever a key is pressed. """
#
# if key == arcade.key.UP:
# self.player_sprite.change_y = MOVEMENT_SPEED
# elif key == arcade.key.DOWN:
# self.player_sprite.change_y = -MOVEMENT_SPEED
# elif key == arcade.key.LEFT:
# self.player_sprite.change_x = -MOVEMENT_SPEED
# elif key == arcade.key.RIGHT:
# self.player_sprite.change_x = MOVEMENT_SPEED
#
# def on_key_release(self, key, modifiers):
# """Called when the user releases a key. """
#
# if key == arcade.key.UP or key == arcade.key.DOWN:
# self.player_sprite.change_y = 0
# elif key == arcade.key.LEFT or key == arcade.key.RIGHT:
# self.player_sprite.change_x = 0
def on_update(self, delta_time):
self.colony.tick()
self.actual_fps = (99 * self.actual_fps + 1 / delta_time) / 100
food_per_100_turns = self.colony.food_per_turn() * 100
self.set_caption(
f"{settings.SCREEN_TITLE} - {self.actual_fps:0.0f} fps, {food_per_100_turns:0.0f} food per 100 turns - {self.generation}"
)
arcade.start_render()
for ant in self.ant_list:
ant.move()
self.generation += 1 #!! Dubbel naast colony.tick()
if self.generation_callback:
self.generation_callback(self.generation, self)
if __name__ == "__main__":
window = Arena(settings.SCREEN_WIDTH, settings.SCREEN_HEIGHT, settings.SCREEN_TITLE)
window.setup()
arcade.run()
| [
"ant.Ant",
"random.randint",
"arcade.run",
"arcade.SpriteSolidColor",
"settings.settings.WALL_MIN",
"arcade.start_render",
"settings.settings.WALL_MAX",
"random.random",
"arcade.check_for_collision_with_list",
"arcade.set_background_color",
"settings.settings.WALL_THICKNESS",
"arcade.SpriteList",
"colony.Colony"
] | [((6368, 6380), 'arcade.run', 'arcade.run', ([], {}), '()\n', (6378, 6380), False, 'import arcade\n'), ((418, 474), 'arcade.SpriteList', 'arcade.SpriteList', ([], {'is_static': '(True)', 'use_spatial_hash': '(True)'}), '(is_static=True, use_spatial_hash=True)\n', (435, 474), False, 'import arcade\n'), ((500, 556), 'arcade.SpriteList', 'arcade.SpriteList', ([], {'is_static': '(True)', 'use_spatial_hash': '(True)'}), '(is_static=True, use_spatial_hash=True)\n', (517, 556), False, 'import arcade\n'), ((581, 622), 'arcade.SpriteList', 'arcade.SpriteList', ([], {'use_spatial_hash': '(False)'}), '(use_spatial_hash=False)\n', (598, 622), False, 'import arcade\n'), ((1214, 1222), 'colony.Colony', 'Colony', ([], {}), '()\n', (1220, 1222), False, 'from colony import Colony\n'), ((1437, 1486), 'arcade.set_background_color', 'arcade.set_background_color', (['settings.FIELD_COLOR'], {}), '(settings.FIELD_COLOR)\n', (1464, 1486), False, 'import arcade\n'), ((4570, 4591), 'arcade.start_render', 'arcade.start_render', ([], {}), '()\n', (4589, 4591), False, 'import arcade\n'), ((5990, 6011), 'arcade.start_render', 'arcade.start_render', ([], {}), '()\n', (6009, 6011), False, 'import arcade\n'), ((1285, 1359), 'ant.Ant', 'Ant', (['(settings.SCREEN_WIDTH / 2)', '(0)', 'self', 'self.colony'], {'scale': 'settings.SCALE'}), '(settings.SCREEN_WIDTH / 2, 0, self, self.colony, scale=settings.SCALE)\n', (1288, 1359), False, 'from ant import Ant\n'), ((1705, 1730), 'settings.settings.WALL_THICKNESS', 'settings.WALL_THICKNESS', ([], {}), '()\n', (1728, 1730), False, 'from settings import settings\n'), ((3923, 3978), 'random.randint', 'random.randint', (['(0)', '(settings.SCREEN_WIDTH - size * scale)'], {}), '(0, settings.SCREEN_WIDTH - size * scale)\n', (3937, 3978), False, 'import random\n'), ((4001, 4057), 'random.randint', 'random.randint', (['(0)', '(settings.SCREEN_HEIGHT - size * scale)'], {}), '(0, settings.SCREEN_HEIGHT - size * scale)\n', (4015, 4057), False, 'import random\n'), ((1794, 1819), 'settings.settings.WALL_THICKNESS', 'settings.WALL_THICKNESS', ([], {}), '()\n', (1817, 1819), False, 'from settings import settings\n'), ((1837, 1862), 'settings.settings.WALL_THICKNESS', 'settings.WALL_THICKNESS', ([], {}), '()\n', (1860, 1862), False, 'from settings import settings\n'), ((2100, 2125), 'settings.settings.WALL_THICKNESS', 'settings.WALL_THICKNESS', ([], {}), '()\n', (2123, 2125), False, 'from settings import settings\n'), ((2143, 2168), 'settings.settings.WALL_THICKNESS', 'settings.WALL_THICKNESS', ([], {}), '()\n', (2166, 2168), False, 'from settings import settings\n'), ((2462, 2487), 'settings.settings.WALL_THICKNESS', 'settings.WALL_THICKNESS', ([], {}), '()\n', (2485, 2487), False, 'from settings import settings\n'), ((2505, 2530), 'settings.settings.WALL_THICKNESS', 'settings.WALL_THICKNESS', ([], {}), '()\n', (2528, 2530), False, 'from settings import settings\n'), ((2757, 2776), 'settings.settings.WALL_MIN', 'settings.WALL_MIN', ([], {}), '()\n', (2774, 2776), False, 'from settings import settings\n'), ((2778, 2797), 'settings.settings.WALL_MAX', 'settings.WALL_MAX', ([], {}), '()\n', (2795, 2797), False, 'from settings import settings\n'), ((2814, 2829), 'random.random', 'random.random', ([], {}), '()\n', (2827, 2829), False, 'import random\n'), ((2892, 2941), 'random.randint', 'random.randint', (['(0)', '(settings.SCREEN_WIDTH - length)'], {}), '(0, settings.SCREEN_WIDTH - length)\n', (2906, 2941), False, 'import random\n'), ((2962, 3003), 'random.randint', 'random.randint', (['(0)', 'settings.SCREEN_HEIGHT'], {}), '(0, settings.SCREEN_HEIGHT)\n', (2976, 3003), False, 'import random\n'), ((3196, 3246), 'random.randint', 'random.randint', (['(0)', '(settings.SCREEN_HEIGHT - length)'], {}), '(0, settings.SCREEN_HEIGHT - length)\n', (3210, 3246), False, 'import random\n'), ((3267, 3307), 'random.randint', 'random.randint', (['(0)', 'settings.SCREEN_WIDTH'], {}), '(0, settings.SCREEN_WIDTH)\n', (3281, 3307), False, 'import random\n'), ((3480, 3539), 'arcade.check_for_collision_with_list', 'arcade.check_for_collision_with_list', (['block', 'self.wall_list'], {}), '(block, self.wall_list)\n', (3516, 3539), False, 'import arcade\n'), ((4215, 4273), 'arcade.SpriteSolidColor', 'arcade.SpriteSolidColor', (['scale', 'scale', 'settings.FOOD_COLOR'], {}), '(scale, scale, settings.FOOD_COLOR)\n', (4238, 4273), False, 'import arcade\n'), ((3062, 3087), 'settings.settings.WALL_THICKNESS', 'settings.WALL_THICKNESS', ([], {}), '()\n', (3085, 3087), False, 'from settings import settings\n'), ((3366, 3391), 'settings.settings.WALL_THICKNESS', 'settings.WALL_THICKNESS', ([], {}), '()\n', (3389, 3391), False, 'from settings import settings\n'), ((4367, 4426), 'arcade.check_for_collision_with_list', 'arcade.check_for_collision_with_list', (['block', 'self.wall_list'], {}), '(block, self.wall_list)\n', (4403, 4426), False, 'import arcade\n')] |
import tkinter
from tkinter import messagebox
from tkinter import Button
window = tkinter.Tk()
HEIGHT = window.winfo_height()
WIDTH = window.winfo_width()
print(f'Height: {HEIGHT}, Width: {WIDTH}')
def click_button():
msg = messagebox.showinfo("Hello!", "You clicked a button!")
# initializing button
button_widget = Button(
window,
text='Click me!',
command=click_button
)
# placing a button to center of the window
button_widget.place(
relx=0.5,
rely=0.5,
anchor=tkinter.CENTER
)
window.mainloop()
| [
"tkinter.Button",
"tkinter.messagebox.showinfo",
"tkinter.Tk"
] | [((83, 95), 'tkinter.Tk', 'tkinter.Tk', ([], {}), '()\n', (93, 95), False, 'import tkinter\n'), ((325, 379), 'tkinter.Button', 'Button', (['window'], {'text': '"""Click me!"""', 'command': 'click_button'}), "(window, text='Click me!', command=click_button)\n", (331, 379), False, 'from tkinter import Button\n'), ((231, 285), 'tkinter.messagebox.showinfo', 'messagebox.showinfo', (['"""Hello!"""', '"""You clicked a button!"""'], {}), "('Hello!', 'You clicked a button!')\n", (250, 285), False, 'from tkinter import messagebox\n')] |
import numpy as np
import matplotlib.pyplot as plt
from IPython.core.debugger import set_trace
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import glob
import os
from skimage.io import imread
from skimage.transform import resize
from torch.utils import data
import os
from config import Config
import pandas as pd
from utils.rotate_fcns import rotate_2d,rotate_3d,flip_2d
class DataLoader2D(data.Dataset):
def __init__(self, split,path_to_data):
self.split=split
self.path=path_to_data
data = pd.read_csv("utils/rot_dict_unique.csv")
self.rots_table=data.loc[:,:].to_numpy()
xl_file = pd.ExcelFile(self.path + os.sep+'ListOfData.xlsx')
data = pd.read_excel(xl_file,header=None)
folders=data.loc[:,0].tolist()
names=data.loc[:,1].tolist()
file_names=[]
for folder,name in zip(folders,names):
file_names.append((self.path + os.sep + folder.split('\\')[-1] + os.sep + name).replace('.mhd',''))
if self.split=='training':
file_names=file_names[:int(len(file_names)*0.8)]
elif self.split=='testing':
file_names=file_names[int(len(file_names)*0.8):-20]
self.file_names=[]
self.vec=[]
self.flip=[]
self.lbls=[]
for file in file_names:
for flip in [0,1]:
for unique_rot_num in range(self.rots_table.shape[0]):
self.file_names.append(file)
self.vec.append(self.rots_table[unique_rot_num,:])
self.flip.append(flip)
self.lbls.append(unique_rot_num)
def __len__(self):
return len(self.file_names)
def __getitem__(self, index):
file_name=self.file_names[index]
r=self.vec[index][0:3]
flip=self.flip[index]
flip=np.array([flip])
img_list=[]
folders=['mean','max','std']
for folder in folders:
for k in range(3):
tmp=imread(file_name + '_' + folder + '_'+ str(k+1) +'.png' )
tmp=tmp.astype(np.float32)/255-0.5
img_list.append(tmp)
# if self.split=='training':
# max_mult_change=0.3
# for k in range(len(img_list)):
# mult_change=1+torch.rand(1).numpy()[0]*2*max_mult_change-max_mult_change
# img_list[k]=img_list[k]*mult_change
# max_add_change=0.3
# for k in range(len(img_list)):
# add_change=torch.rand(1).numpy()[0]*2*max_add_change-max_add_change
# img_list[k]=img_list[k]+add_change
imgs=np.stack(img_list,axis=2)
for k in range(0,9,3):
if flip==1:
imgs[:,:,k:k+3]=flip_2d(imgs[:,:,k:k+3])
imgs[:,:,k:k+3]=rotate_2d(imgs[:,:,k:k+3],r)
imgs=torch.from_numpy(imgs.copy())
imgs=imgs.permute(2,0,1)
lbl=self.lbls[index]
lbl2=np.zeros(self.rots_table.shape[0]).astype(np.float32)
lbl2[lbl]=1
lbl=torch.from_numpy(lbl2)
return imgs,lbl
| [
"numpy.stack",
"utils.rotate_fcns.rotate_2d",
"pandas.read_csv",
"pandas.ExcelFile",
"numpy.zeros",
"pandas.read_excel",
"numpy.array",
"utils.rotate_fcns.flip_2d",
"torch.from_numpy"
] | [((599, 639), 'pandas.read_csv', 'pd.read_csv', (['"""utils/rot_dict_unique.csv"""'], {}), "('utils/rot_dict_unique.csv')\n", (610, 639), True, 'import pandas as pd\n'), ((717, 769), 'pandas.ExcelFile', 'pd.ExcelFile', (["(self.path + os.sep + 'ListOfData.xlsx')"], {}), "(self.path + os.sep + 'ListOfData.xlsx')\n", (729, 769), True, 'import pandas as pd\n'), ((783, 818), 'pandas.read_excel', 'pd.read_excel', (['xl_file'], {'header': 'None'}), '(xl_file, header=None)\n', (796, 818), True, 'import pandas as pd\n'), ((2093, 2109), 'numpy.array', 'np.array', (['[flip]'], {}), '([flip])\n', (2101, 2109), True, 'import numpy as np\n'), ((3102, 3128), 'numpy.stack', 'np.stack', (['img_list'], {'axis': '(2)'}), '(img_list, axis=2)\n', (3110, 3128), True, 'import numpy as np\n'), ((3553, 3575), 'torch.from_numpy', 'torch.from_numpy', (['lbl2'], {}), '(lbl2)\n', (3569, 3575), False, 'import torch\n'), ((3281, 3314), 'utils.rotate_fcns.rotate_2d', 'rotate_2d', (['imgs[:, :, k:k + 3]', 'r'], {}), '(imgs[:, :, k:k + 3], r)\n', (3290, 3314), False, 'from utils.rotate_fcns import rotate_2d, rotate_3d, flip_2d\n'), ((3215, 3243), 'utils.rotate_fcns.flip_2d', 'flip_2d', (['imgs[:, :, k:k + 3]'], {}), '(imgs[:, :, k:k + 3])\n', (3222, 3243), False, 'from utils.rotate_fcns import rotate_2d, rotate_3d, flip_2d\n'), ((3458, 3492), 'numpy.zeros', 'np.zeros', (['self.rots_table.shape[0]'], {}), '(self.rots_table.shape[0])\n', (3466, 3492), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
from aws_cdk import (core as cdk)
from core.monorepo_stack import MonorepoStack
from core.pipelines_stack import PipelineStack
app = cdk.App()
core = MonorepoStack(app, "MonoRepoStack")
PipelineStack(app, "PipelinesStack", core.exported_monorepo)
app.synth()
| [
"aws_cdk.core.App",
"core.monorepo_stack.MonorepoStack",
"core.pipelines_stack.PipelineStack"
] | [((158, 167), 'aws_cdk.core.App', 'cdk.App', ([], {}), '()\n', (165, 167), True, 'from aws_cdk import core as cdk\n'), ((175, 210), 'core.monorepo_stack.MonorepoStack', 'MonorepoStack', (['app', '"""MonoRepoStack"""'], {}), "(app, 'MonoRepoStack')\n", (188, 210), False, 'from core.monorepo_stack import MonorepoStack\n'), ((211, 271), 'core.pipelines_stack.PipelineStack', 'PipelineStack', (['app', '"""PipelinesStack"""', 'core.exported_monorepo'], {}), "(app, 'PipelinesStack', core.exported_monorepo)\n", (224, 271), False, 'from core.pipelines_stack import PipelineStack\n')] |
import datetime
import uuid
from django.contrib.auth import get_user_model
from django.db import models
from django.urls import reverse
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from taggit.managers import TaggableManager
from taggit.models import GenericUUIDTaggedItemBase, TaggedItemBase
class UUIDTaggedItem(GenericUUIDTaggedItemBase, TaggedItemBase):
class Meta:
verbose_name = _("Tag")
verbose_name_plural = _("Tags")
class PublishedManager(models.Manager):
def get_queryset(self):
return super(PublishedManager, self).get_queryset().filter(status="published")
class Question(models.Model):
STATUS_CHOICES = (
("draft", "Draft"),
("published", "Published"),
)
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
question_text = models.CharField(max_length=200)
slug = models.SlugField(max_length=250, unique_for_date='pub_date')
pub_date = models.DateTimeField('date published', default=timezone.now)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
status = models.CharField(max_length=10, choices=STATUS_CHOICES, default="draft")
created_by = models.ForeignKey(get_user_model(), on_delete=models.CASCADE)
objects = models.Manager()
published = PublishedManager()
tags = TaggableManager(through=UUIDTaggedItem)
class Meta:
ordering = ('-pub_date',)
def __str__(self):
return self.question_text
def get_absolute_url(self):
return reverse('polls:question_detail', args=[self.id])
def was_published_recently(self):
return self.pub_date >= timezone.now() - datetime.timedelta(days=1)
def get_update_url(self):
return reverse('polls:question_update', args=[self.id])
def get_delete_url(self):
return reverse('polls:question_delete', args=[self.id])
def can_update(self, user):
return user.is_superuser or self.created_by == user
def can_delete(self, user):
return user.is_superuser or self.created_by == user
class Choice(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
choice_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
created_at = models.DateTimeField(auto_now_add=True)
question = models.ForeignKey(Question, on_delete=models.CASCADE, related_name='choices')
def __str__(self):
return self.choice_text
def get_absolute_url(self):
return reverse('choice_detail', args=str([self.id]))
| [
"django.db.models.CharField",
"django.db.models.DateTimeField",
"django.db.models.ForeignKey",
"django.utils.timezone.now",
"django.contrib.auth.get_user_model",
"django.db.models.Manager",
"django.db.models.SlugField",
"django.urls.reverse",
"django.db.models.IntegerField",
"taggit.managers.TaggableManager",
"datetime.timedelta",
"django.db.models.UUIDField",
"django.utils.translation.ugettext_lazy"
] | [((786, 856), 'django.db.models.UUIDField', 'models.UUIDField', ([], {'primary_key': '(True)', 'default': 'uuid.uuid4', 'editable': '(False)'}), '(primary_key=True, default=uuid.uuid4, editable=False)\n', (802, 856), False, 'from django.db import models\n'), ((877, 909), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (893, 909), False, 'from django.db import models\n'), ((921, 981), 'django.db.models.SlugField', 'models.SlugField', ([], {'max_length': '(250)', 'unique_for_date': '"""pub_date"""'}), "(max_length=250, unique_for_date='pub_date')\n", (937, 981), False, 'from django.db import models\n'), ((997, 1057), 'django.db.models.DateTimeField', 'models.DateTimeField', (['"""date published"""'], {'default': 'timezone.now'}), "('date published', default=timezone.now)\n", (1017, 1057), False, 'from django.db import models\n'), ((1075, 1114), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (1095, 1114), False, 'from django.db import models\n'), ((1132, 1167), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (1152, 1167), False, 'from django.db import models\n'), ((1181, 1253), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)', 'choices': 'STATUS_CHOICES', 'default': '"""draft"""'}), "(max_length=10, choices=STATUS_CHOICES, default='draft')\n", (1197, 1253), False, 'from django.db import models\n'), ((1349, 1365), 'django.db.models.Manager', 'models.Manager', ([], {}), '()\n', (1363, 1365), False, 'from django.db import models\n'), ((1412, 1451), 'taggit.managers.TaggableManager', 'TaggableManager', ([], {'through': 'UUIDTaggedItem'}), '(through=UUIDTaggedItem)\n', (1427, 1451), False, 'from taggit.managers import TaggableManager\n'), ((2188, 2258), 'django.db.models.UUIDField', 'models.UUIDField', ([], {'primary_key': '(True)', 'default': 'uuid.uuid4', 'editable': '(False)'}), '(primary_key=True, default=uuid.uuid4, editable=False)\n', (2204, 2258), False, 'from django.db import models\n'), ((2277, 2309), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (2293, 2309), False, 'from django.db import models\n'), ((2322, 2352), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (2341, 2352), False, 'from django.db import models\n'), ((2370, 2409), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (2390, 2409), False, 'from django.db import models\n'), ((2426, 2503), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Question'], {'on_delete': 'models.CASCADE', 'related_name': '"""choices"""'}), "(Question, on_delete=models.CASCADE, related_name='choices')\n", (2443, 2503), False, 'from django.db import models\n'), ((445, 453), 'django.utils.translation.ugettext_lazy', '_', (['"""Tag"""'], {}), "('Tag')\n", (446, 453), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((484, 493), 'django.utils.translation.ugettext_lazy', '_', (['"""Tags"""'], {}), "('Tags')\n", (485, 493), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1290, 1306), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (1304, 1306), False, 'from django.contrib.auth import get_user_model\n'), ((1609, 1657), 'django.urls.reverse', 'reverse', (['"""polls:question_detail"""'], {'args': '[self.id]'}), "('polls:question_detail', args=[self.id])\n", (1616, 1657), False, 'from django.urls import reverse\n'), ((1819, 1867), 'django.urls.reverse', 'reverse', (['"""polls:question_update"""'], {'args': '[self.id]'}), "('polls:question_update', args=[self.id])\n", (1826, 1867), False, 'from django.urls import reverse\n'), ((1914, 1962), 'django.urls.reverse', 'reverse', (['"""polls:question_delete"""'], {'args': '[self.id]'}), "('polls:question_delete', args=[self.id])\n", (1921, 1962), False, 'from django.urls import reverse\n'), ((1729, 1743), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (1741, 1743), False, 'from django.utils import timezone\n'), ((1746, 1772), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (1764, 1772), False, 'import datetime\n')] |
import RPi.GPIO as GPIO
class Pin:
def __init__(self, pin_id):
self.pin_id = pin_id
self.state = Off
GPIO.setmode(GPIO.BCM)
def sense(self):
GPIO.setup(self.pin_id, GPIO.IN)
output = GPIO.input(self.pin_id)
self.state = output
return output
def set(self, state):
GPIO.setup(self.pin_id, GPIO.OUT)
GPIO.output(self.pin_id, state)
self.state = state
def get(self):
return self.state
@classmethod
def cleanup(cls):
GPIO.cleanup()
| [
"RPi.GPIO.setmode",
"RPi.GPIO.cleanup",
"RPi.GPIO.setup",
"RPi.GPIO.input",
"RPi.GPIO.output"
] | [((131, 153), 'RPi.GPIO.setmode', 'GPIO.setmode', (['GPIO.BCM'], {}), '(GPIO.BCM)\n', (143, 153), True, 'import RPi.GPIO as GPIO\n'), ((184, 216), 'RPi.GPIO.setup', 'GPIO.setup', (['self.pin_id', 'GPIO.IN'], {}), '(self.pin_id, GPIO.IN)\n', (194, 216), True, 'import RPi.GPIO as GPIO\n'), ((234, 257), 'RPi.GPIO.input', 'GPIO.input', (['self.pin_id'], {}), '(self.pin_id)\n', (244, 257), True, 'import RPi.GPIO as GPIO\n'), ((343, 376), 'RPi.GPIO.setup', 'GPIO.setup', (['self.pin_id', 'GPIO.OUT'], {}), '(self.pin_id, GPIO.OUT)\n', (353, 376), True, 'import RPi.GPIO as GPIO\n'), ((385, 416), 'RPi.GPIO.output', 'GPIO.output', (['self.pin_id', 'state'], {}), '(self.pin_id, state)\n', (396, 416), True, 'import RPi.GPIO as GPIO\n'), ((538, 552), 'RPi.GPIO.cleanup', 'GPIO.cleanup', ([], {}), '()\n', (550, 552), True, 'import RPi.GPIO as GPIO\n')] |
# 多个文件中要用到的函数之类的统一写在这里
from skimage.measure import label
import numpy as np
import copy
# 如果最大连通域面积小于2000,直接认为分割错误,返回无分割结果,反之保留面积最大连通域,如果面积第二大连通域和最大差不多,则两个都保留
def refine_output(output):
refine = np.zeros((1280, 2440), dtype=np.uint8)
if len(np.where(output > 0)[0]) > 0:
output = label(output)
top = output.max()
area_list = []
for i in range(1, top + 1):
area = len(np.where(output == i)[0])
area_list.append(area)
max_area = max(area_list)
max_index = area_list.index(max_area)
if max_area < 2000:
return refine
else:
refine[output == max_index + 1] = 1
if top > 1:
temp_list = copy.deepcopy(area_list)
del temp_list[max_index]
second_max_area = max(temp_list)
second_max_index = area_list.index(second_max_area)
if (max_area / second_max_area) < 1.2:
refine[output == second_max_index + 1] = 1
return refine
else:
return refine
else:
return refine
else:
return refine
# 如果两颗牙的分割结果重合面积大于其中一颗牙的40%,则认为这颗牙分割错误在了其它牙齿上,去掉这颗牙的分割结果
def judge_overlap(id, output_all):
ids = [11, 12, 13, 14, 15, 16, 17, 18, 21, 22, 23, 24, 25, 26, 27, 28, 31, 32, 33, 34, 35, 36, 37, 38, 41, 42, 43,
44, 45, 46, 47, 48]
index = ids.index(id)
output_id = output_all[:, :, index].reshape(1, -1) # 每一通道保存着一颗牙的分割结果
output_id_area = output_id.sum(1) + 0.001
refine = output_all
if index <= 29:
end = index + 3
elif index == 30: # 倒数第二颗牙前面只有一颗牙
end = index + 2
else:
end = index + 1 # 最后一颗牙不用再计算重叠率了
for i in range(index + 1, end): # 每颗牙和前面两颗牙算重叠区域,因为有可能前面少了一颗牙齿,所以选两颗
output_other = output_all[:, :, i].reshape(1, -1)
output_other_area = output_other.sum(1) + 0.001
inter = (output_id * output_other).sum(1) + 0.001
if (inter / output_id_area) >= 0.4:
refine[:, :, index] = 0
if (inter / output_other_area) >= 0.4:
refine[:, :, i] = 0
return refine
# 输入一个模型,获得其参数量
def get_model_params(net):
total_params = sum(p.numel() for p in net.parameters())
print(f'{total_params:,} total parameters.')
total_trainable_params = sum(p.numel() for p in net.parameters() if p.requires_grad)
print(f'{total_trainable_params:,} training parameters.')
print()
| [
"copy.deepcopy",
"skimage.measure.label",
"numpy.where",
"numpy.zeros"
] | [((201, 239), 'numpy.zeros', 'np.zeros', (['(1280, 2440)'], {'dtype': 'np.uint8'}), '((1280, 2440), dtype=np.uint8)\n', (209, 239), True, 'import numpy as np\n'), ((298, 311), 'skimage.measure.label', 'label', (['output'], {}), '(output)\n', (303, 311), False, 'from skimage.measure import label\n'), ((251, 271), 'numpy.where', 'np.where', (['(output > 0)'], {}), '(output > 0)\n', (259, 271), True, 'import numpy as np\n'), ((730, 754), 'copy.deepcopy', 'copy.deepcopy', (['area_list'], {}), '(area_list)\n', (743, 754), False, 'import copy\n'), ((421, 442), 'numpy.where', 'np.where', (['(output == i)'], {}), '(output == i)\n', (429, 442), True, 'import numpy as np\n')] |
from signature import Structure, add_signature
class SpamTheOldWay:
def __init__(self, name, price):
self.name = name
self.price = price
@add_signature("name", "price")
class Spam(Structure):
pass
if __name__ == "__main__":
spam_0 = Spam(price=0.618, name="wexort")
print(spam_0.name, spam_0.price)
spam_1 = Spam("hughluo", 42)
print(spam_1.name, spam_1.price)
| [
"signature.add_signature"
] | [((162, 192), 'signature.add_signature', 'add_signature', (['"""name"""', '"""price"""'], {}), "('name', 'price')\n", (175, 192), False, 'from signature import Structure, add_signature\n')] |
import os
import shutil
import arrow
import glob
def get_date_range(start, end):
"""get the date range of the used months"""
start = start[:4] + '-' + start[4:]
startdate = arrow.get(start)
end = end[:4] + '-' + end[4:]
enddate = arrow.get(end)
return arrow.Arrow.range('month', startdate, enddate)
def get_train(date, quan_name):
"""get the file name of the training data"""
date0 = date[:4] + '-' + date[4:]
first = arrow.get(date0)
quan = quan_name.split("m_")[0]
m = -1 * int(quan)
second = first.shift(months=-1)
second = second.format("YYYYMM")
first = first.shift(months=m)
first = first.format('YYYYMM')
ret = first + '-' + second + '_train.csv'
return ret
def get_test(date):
"""get the file name of the test data"""
ret = date + 'pred.csv'
return ret
startDate = '201805'
endDate = '201805'
rootDir = 'D:/rongshidata'
# dataInfo = 'experiment_data_1'
dataInfo = 'experiment_data_2'
periodInfo = 'monthly'
usedQuantile = []
usedQuantile.extend(['6m_1_16', '6m_3_18'])
usedQuantile.extend(['12m_1_16', '12m_3_18'])
usedQuantile.extend(['3m_1_31', '3m_3_33'])
usedQuantile.extend(['24m_1_13', '24m_3_15'])
usedQuantile.extend(['36m_1_11', '36m_3_13'])
dir1st = 'D:/copy{0}_{1}'.format(startDate, endDate)
if not os.path.exists(dir1st):
os.mkdir(dir1st)
closePriceFile = '{0}/{1}/close.txt'.format(rootDir, dataInfo)
shutil.copy(closePriceFile, dir1st)
featureDir = '{0}/{1}/{2}/end/feature_v*'.format(rootDir, dataInfo, periodInfo)
featureList = glob.glob(featureDir)
for feature in featureList:
featureName = os.path.basename(feature)
for Date in get_date_range(startDate, endDate):
Date = Date.format('YYYYMM')
testDataDir = '{0}/{1}/end/{2}/testing'.format(dir1st, periodInfo, featureName)
if not os.path.exists(testDataDir):
os.makedirs(testDataDir)
testFile = feature + '/testing/' + get_test(Date)
shutil.copy(testFile, testDataDir)
trainDataList = glob.glob(feature + '/training/*m_*_*')
for quantile in trainDataList:
quantileName = os.path.basename(quantile)
if quantileName not in usedQuantile:
continue
trainDataDir = '{0}/{1}/end/{2}/training/{3}'.format(dir1st, periodInfo, featureName, quantileName)
if not os.path.exists(trainDataDir):
os.makedirs(trainDataDir)
trainFile = quantile + '/' + get_train(Date, quantileName)
shutil.copy(trainFile, trainDataDir)
print(quantile, 'DONE')
| [
"os.mkdir",
"arrow.get",
"os.makedirs",
"os.path.basename",
"os.path.exists",
"arrow.Arrow.range",
"glob.glob",
"shutil.copy"
] | [((1417, 1452), 'shutil.copy', 'shutil.copy', (['closePriceFile', 'dir1st'], {}), '(closePriceFile, dir1st)\n', (1428, 1452), False, 'import shutil\n'), ((1548, 1569), 'glob.glob', 'glob.glob', (['featureDir'], {}), '(featureDir)\n', (1557, 1569), False, 'import glob\n'), ((187, 203), 'arrow.get', 'arrow.get', (['start'], {}), '(start)\n', (196, 203), False, 'import arrow\n'), ((252, 266), 'arrow.get', 'arrow.get', (['end'], {}), '(end)\n', (261, 266), False, 'import arrow\n'), ((278, 324), 'arrow.Arrow.range', 'arrow.Arrow.range', (['"""month"""', 'startdate', 'enddate'], {}), "('month', startdate, enddate)\n", (295, 324), False, 'import arrow\n'), ((458, 474), 'arrow.get', 'arrow.get', (['date0'], {}), '(date0)\n', (467, 474), False, 'import arrow\n'), ((1308, 1330), 'os.path.exists', 'os.path.exists', (['dir1st'], {}), '(dir1st)\n', (1322, 1330), False, 'import os\n'), ((1336, 1352), 'os.mkdir', 'os.mkdir', (['dir1st'], {}), '(dir1st)\n', (1344, 1352), False, 'import os\n'), ((1616, 1641), 'os.path.basename', 'os.path.basename', (['feature'], {}), '(feature)\n', (1632, 1641), False, 'import os\n'), ((1967, 2001), 'shutil.copy', 'shutil.copy', (['testFile', 'testDataDir'], {}), '(testFile, testDataDir)\n', (1978, 2001), False, 'import shutil\n'), ((2027, 2066), 'glob.glob', 'glob.glob', (["(feature + '/training/*m_*_*')"], {}), "(feature + '/training/*m_*_*')\n", (2036, 2066), False, 'import glob\n'), ((1834, 1861), 'os.path.exists', 'os.path.exists', (['testDataDir'], {}), '(testDataDir)\n', (1848, 1861), False, 'import os\n'), ((1875, 1899), 'os.makedirs', 'os.makedirs', (['testDataDir'], {}), '(testDataDir)\n', (1886, 1899), False, 'import os\n'), ((2133, 2159), 'os.path.basename', 'os.path.basename', (['quantile'], {}), '(quantile)\n', (2149, 2159), False, 'import os\n'), ((2522, 2558), 'shutil.copy', 'shutil.copy', (['trainFile', 'trainDataDir'], {}), '(trainFile, trainDataDir)\n', (2533, 2558), False, 'import shutil\n'), ((2366, 2394), 'os.path.exists', 'os.path.exists', (['trainDataDir'], {}), '(trainDataDir)\n', (2380, 2394), False, 'import os\n'), ((2412, 2437), 'os.makedirs', 'os.makedirs', (['trainDataDir'], {}), '(trainDataDir)\n', (2423, 2437), False, 'import os\n')] |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
"""Module to access site configuration in siteconfig.ini."""
from ConfigParser import ConfigParser
from flask import g
FILENAME = '/srv/oclubs/siteconfig.ini'
def _done(commit=True):
if g.get('siteconfigParser', None):
if commit:
if g.get('siteconfigHasWrites', False):
with open(FILENAME, 'w') as configfile:
g.siteconfigParser.write(configfile)
g.siteconfigParser = None
del g.siteconfigParser
g.siteconfigHasWrites = None
del g.siteconfigHasWrites
def _get_parser():
if g.get('siteconfigParser', None):
return g.siteconfigParser
g.siteconfigParser = ConfigParser()
g.siteconfigParser.read(FILENAME)
return g.siteconfigParser
def get_config(name):
"""
Get a site configuration boolean.
:param basestring name: name of site configuration
:returns: value of site configuration
:rtype: bool
"""
return _get_parser().getboolean('siteconfig', name)
def set_config(name, value):
"""
Set a site configuration boolean.
:param basestring name: name of site configuration
:param bool value: new value of site configuration
"""
# ConfigParser stores bool in memory, and getboolean expects string
_get_parser().set('siteconfig', name, str(int(value)))
g.siteconfigHasWrites = True
| [
"ConfigParser.ConfigParser",
"flask.g.siteconfigParser.read",
"flask.g.get",
"flask.g.siteconfigParser.write"
] | [((245, 276), 'flask.g.get', 'g.get', (['"""siteconfigParser"""', 'None'], {}), "('siteconfigParser', None)\n", (250, 276), False, 'from flask import g\n'), ((626, 657), 'flask.g.get', 'g.get', (['"""siteconfigParser"""', 'None'], {}), "('siteconfigParser', None)\n", (631, 657), False, 'from flask import g\n'), ((719, 733), 'ConfigParser.ConfigParser', 'ConfigParser', ([], {}), '()\n', (731, 733), False, 'from ConfigParser import ConfigParser\n'), ((738, 771), 'flask.g.siteconfigParser.read', 'g.siteconfigParser.read', (['FILENAME'], {}), '(FILENAME)\n', (761, 771), False, 'from flask import g\n'), ((312, 347), 'flask.g.get', 'g.get', (['"""siteconfigHasWrites"""', '(False)'], {}), "('siteconfigHasWrites', False)\n", (317, 347), False, 'from flask import g\n'), ((425, 461), 'flask.g.siteconfigParser.write', 'g.siteconfigParser.write', (['configfile'], {}), '(configfile)\n', (449, 461), False, 'from flask import g\n')] |
import unittest
import library
NUM_CORPUS = '''
On the 5th of May every year, Mexicans celebrate Cinco de Mayo. This tradition
began in 1845 (the twenty-second anniversary of the Mexican Revolution), and
is the 1st example of a national independence holiday becoming popular in the
Western Hemisphere. (The Fourth of July didn't see regular celebration in the
US until 15-20 years later.) It is celebrated by 77.9% of the population--
trending toward 80.
'''
class TestCase(unittest.TestCase):
# Helper function
def assert_extract(self, text, extractors, *expected):
actual = [x[1].group(0) for x in library.scan(text, extractors)]
self.assertEquals(str(actual), str([x for x in expected]))
# First unit test; prove that if we scan NUM_CORPUS looking for mixed_ordinals,
# we find "5th" and "1st".
def test_mixed_ordinals(self):
self.assert_extract(NUM_CORPUS, library.mixed_ordinals, '5th', '1st')
# Second unit test; prove that if we look for integers, we find four of them.
def test_integers(self):
self.assert_extract(NUM_CORPUS, library.integers, '1845', '15', '20', '80')
# Third unit test; prove that if we look for integers where there are none, we get no results.
def test_no_integers(self):
self.assert_extract("no integers", library.integers)
def test_dates(self):
self.assert_extract('I was born on 2015-12-31.', library.dates_iso8601, '2015-12-31')
def test_dates_no_integers(self):
self.assert_extract("I was born on 2015-12-31", library.dates_iso8601)
def test_dates_fmt2(self):
self.assert_extract('I was born on 25 Jan 2017.', library.dates_fmt2, '25 Jan 2017')
# Checks for the iso date format with full Date 2018-06-21 15:54:14.87Z
def test_dates_1(self):
self.assert_extract(' 2018-06-21 15:54:14.876 ', library.dates_newiso8601, '2018-06-21 15:54:14.876')
# Checks only for the date
def test_dates_2(self):
self.assert_extract(' 2018-06-21 ', library.dates_newiso8601, '2018-06-21')
# Checks with hours and min
def test_dates_3(self):
self.assert_extract(' 2018-06-21 15:54', library.dates_newiso8601, '2018-06-21 15:54')
# Checks with hours and min with seconds
def test_dates_4(self):
self.assert_extract(' 2018-06-21 15:54:00 ', library.dates_newiso8601, '2018-06-21 15:54:00')
# Checks with hours and min with seconds with milliseconds
def test_dates_5(self):
self.assert_extract(' 2018-06-21 15:54:00.123 ', library.dates_newiso8601, '2018-06-21 15:54:00.123')
# Checks with hours and min with seconds with milliseconds and timezone(Z)
def test_dates_6(self):
self.assert_extract(' 2018-06-21 15:54:00.123Z ', library.dates_newiso8601, '2018-06-21 15:54:00.123Z')
# Checks with hours and min with seconds with milliseconds and timezone offset -0800
def test_dates_7(self):
self.assert_extract(' 2018-06-21 15:54:00.123-0800 ', library.dates_newiso8601, '2018-06-21 15:54:00.123-0800')
# Checks with hours and min with seconds with milliseconds and timezone offset -0800
def test_dates_8(self):
self.assert_extract(' 2018-06-21 15:54:00.123-0800 ', library.dates_newiso8601, '2018-06-21 15:54:00.123-0800')
# Checks for date format and , after the month
def test_dates_fmt3(self):
self.assert_extract(' 21 Jun, 2018 ', library.dates_fmt3, '21 Jun, 2018')
# Checks for date format - regular
def test_dates_fmt31(self):
self.assert_extract(' 21 Jun 2018 ', library.dates_fmt3, '21 Jun 2018')
# Support comma seperated grouping
def test_numbers(self):
self.assert_extract(' 123,456,789 ', library.comma_seperator, '123,456,789')
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"library.scan"
] | [((3867, 3882), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3880, 3882), False, 'import unittest\n'), ((683, 713), 'library.scan', 'library.scan', (['text', 'extractors'], {}), '(text, extractors)\n', (695, 713), False, 'import library\n')] |
#!/usr/bin/env python
# Copyright 2020-2022 OpenDR European Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import rospy
import torch
import cv2
from vision_msgs.msg import Detection2DArray
from sensor_msgs.msg import Image as ROS_Image
from opendr_bridge import ROSBridge
from opendr.perception.pose_estimation import get_bbox
from opendr.perception.pose_estimation import LightweightOpenPoseLearner
from opendr.perception.fall_detection import FallDetectorLearner
from opendr.engine.data import Image
from opendr.engine.target import BoundingBox, BoundingBoxList
class FallDetectionNode:
def __init__(self, input_image_topic="/usb_cam/image_raw", output_image_topic="/opendr/image_fall_annotated",
fall_annotations_topic="/opendr/falls", device="cuda"):
"""
Creates a ROS Node for fall detection
:param input_image_topic: Topic from which we are reading the input image
:type input_image_topic: str
:param output_image_topic: Topic to which we are publishing the annotated image (if None, we are not publishing
annotated image)
:type output_image_topic: str
:param fall_annotations_topic: Topic to which we are publishing the annotations (if None, we are not publishing
annotated fall annotations)
:type fall_annotations_topic: str
:param device: device on which we are running inference ('cpu' or 'cuda')
:type device: str
"""
if output_image_topic is not None:
self.image_publisher = rospy.Publisher(output_image_topic, ROS_Image, queue_size=10)
else:
self.image_publisher = None
if fall_annotations_topic is not None:
self.fall_publisher = rospy.Publisher(fall_annotations_topic, Detection2DArray, queue_size=10)
else:
self.fall_publisher = None
self.input_image_topic = input_image_topic
self.bridge = ROSBridge()
# Initialize the pose estimation
self.pose_estimator = LightweightOpenPoseLearner(device=device, num_refinement_stages=2,
mobilenet_use_stride=False,
half_precision=False)
self.pose_estimator.download(path=".", verbose=True)
self.pose_estimator.load("openpose_default")
self.fall_detector = FallDetectorLearner(self.pose_estimator)
def listen(self):
"""
Start the node and begin processing input data
"""
rospy.init_node('opendr_fall_detection', anonymous=True)
rospy.Subscriber(self.input_image_topic, ROS_Image, self.callback)
rospy.loginfo("Fall detection node started!")
rospy.spin()
def callback(self, data):
"""
Callback that process the input data and publishes to the corresponding topics
:param data: input message
:type data: sensor_msgs.msg.Image
"""
# Convert sensor_msgs.msg.Image into OpenDR Image
image = self.bridge.from_ros_image(data, encoding='bgr8')
# Run fall detection
detections = self.fall_detector.infer(image)
# Get an OpenCV image back
image = image.opencv()
bboxes = BoundingBoxList([])
for detection in detections:
fallen = detection[0].data
pose = detection[2]
if fallen == 1:
color = (0, 0, 255)
x, y, w, h = get_bbox(pose)
bbox = BoundingBox(left=x, top=y, width=w, height=h, name=0)
bboxes.data.append(bbox)
cv2.rectangle(image, (x, y), (x + w, y + h), color, 2)
cv2.putText(image, "Detected fallen person", (5, 55), cv2.FONT_HERSHEY_SIMPLEX,
0.75, color, 1, cv2.LINE_AA)
# Convert detected boxes to ROS type and publish
ros_boxes = self.bridge.to_ros_boxes(bboxes)
if self.fall_publisher is not None:
self.fall_publisher.publish(ros_boxes)
if self.image_publisher is not None:
message = self.bridge.to_ros_image(Image(image), encoding='bgr8')
self.image_publisher.publish(message)
if __name__ == '__main__':
# Select the device for running the
try:
if torch.cuda.is_available():
print("GPU found.")
device = 'cuda'
else:
print("GPU not found. Using CPU instead.")
device = 'cpu'
except:
device = 'cpu'
fall_detection_node = FallDetectionNode(device=device)
fall_detection_node.listen()
| [
"rospy.Subscriber",
"opendr_bridge.ROSBridge",
"cv2.putText",
"opendr.engine.target.BoundingBox",
"opendr.perception.pose_estimation.get_bbox",
"opendr.engine.target.BoundingBoxList",
"rospy.spin",
"opendr.perception.pose_estimation.LightweightOpenPoseLearner",
"rospy.Publisher",
"rospy.loginfo",
"torch.cuda.is_available",
"rospy.init_node",
"opendr.engine.data.Image",
"cv2.rectangle",
"opendr.perception.fall_detection.FallDetectorLearner"
] | [((2444, 2455), 'opendr_bridge.ROSBridge', 'ROSBridge', ([], {}), '()\n', (2453, 2455), False, 'from opendr_bridge import ROSBridge\n'), ((2528, 2648), 'opendr.perception.pose_estimation.LightweightOpenPoseLearner', 'LightweightOpenPoseLearner', ([], {'device': 'device', 'num_refinement_stages': '(2)', 'mobilenet_use_stride': '(False)', 'half_precision': '(False)'}), '(device=device, num_refinement_stages=2,\n mobilenet_use_stride=False, half_precision=False)\n', (2554, 2648), False, 'from opendr.perception.pose_estimation import LightweightOpenPoseLearner\n'), ((2903, 2943), 'opendr.perception.fall_detection.FallDetectorLearner', 'FallDetectorLearner', (['self.pose_estimator'], {}), '(self.pose_estimator)\n', (2922, 2943), False, 'from opendr.perception.fall_detection import FallDetectorLearner\n'), ((3054, 3110), 'rospy.init_node', 'rospy.init_node', (['"""opendr_fall_detection"""'], {'anonymous': '(True)'}), "('opendr_fall_detection', anonymous=True)\n", (3069, 3110), False, 'import rospy\n'), ((3119, 3185), 'rospy.Subscriber', 'rospy.Subscriber', (['self.input_image_topic', 'ROS_Image', 'self.callback'], {}), '(self.input_image_topic, ROS_Image, self.callback)\n', (3135, 3185), False, 'import rospy\n'), ((3194, 3239), 'rospy.loginfo', 'rospy.loginfo', (['"""Fall detection node started!"""'], {}), "('Fall detection node started!')\n", (3207, 3239), False, 'import rospy\n'), ((3248, 3260), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (3258, 3260), False, 'import rospy\n'), ((3773, 3792), 'opendr.engine.target.BoundingBoxList', 'BoundingBoxList', (['[]'], {}), '([])\n', (3788, 3792), False, 'from opendr.engine.target import BoundingBox, BoundingBoxList\n'), ((4854, 4879), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4877, 4879), False, 'import torch\n'), ((2045, 2106), 'rospy.Publisher', 'rospy.Publisher', (['output_image_topic', 'ROS_Image'], {'queue_size': '(10)'}), '(output_image_topic, ROS_Image, queue_size=10)\n', (2060, 2106), False, 'import rospy\n'), ((2243, 2315), 'rospy.Publisher', 'rospy.Publisher', (['fall_annotations_topic', 'Detection2DArray'], {'queue_size': '(10)'}), '(fall_annotations_topic, Detection2DArray, queue_size=10)\n', (2258, 2315), False, 'import rospy\n'), ((3995, 4009), 'opendr.perception.pose_estimation.get_bbox', 'get_bbox', (['pose'], {}), '(pose)\n', (4003, 4009), False, 'from opendr.perception.pose_estimation import get_bbox\n'), ((4033, 4086), 'opendr.engine.target.BoundingBox', 'BoundingBox', ([], {'left': 'x', 'top': 'y', 'width': 'w', 'height': 'h', 'name': '(0)'}), '(left=x, top=y, width=w, height=h, name=0)\n', (4044, 4086), False, 'from opendr.engine.target import BoundingBox, BoundingBoxList\n'), ((4145, 4199), 'cv2.rectangle', 'cv2.rectangle', (['image', '(x, y)', '(x + w, y + h)', 'color', '(2)'], {}), '(image, (x, y), (x + w, y + h), color, 2)\n', (4158, 4199), False, 'import cv2\n'), ((4216, 4329), 'cv2.putText', 'cv2.putText', (['image', '"""Detected fallen person"""', '(5, 55)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.75)', 'color', '(1)', 'cv2.LINE_AA'], {}), "(image, 'Detected fallen person', (5, 55), cv2.\n FONT_HERSHEY_SIMPLEX, 0.75, color, 1, cv2.LINE_AA)\n", (4227, 4329), False, 'import cv2\n'), ((4684, 4696), 'opendr.engine.data.Image', 'Image', (['image'], {}), '(image)\n', (4689, 4696), False, 'from opendr.engine.data import Image\n')] |
import pytest
import subprocess
import testinfra
import pprint
# scope='session' uses the same container for all the tests;
# scope='function' uses a new container per test function.
@pytest.fixture(scope='session')
def host(request):
# build local ./Dockerfile
subprocess.check_call(['docker', 'build', '-t', 'web', '.'])
# run a container
docker_id = subprocess.check_output(
['docker', 'run', '-d', 'web']).decode().strip()
# return a testinfra connection to the container
yield testinfra.get_host("docker://" + docker_id)
# at the end of the test suite, destroy the container
subprocess.check_call(['docker', 'rm', '-f', docker_id])
@pytest.mark.parametrize('name,version', [
('python3', '3.6.4'),
])
def test_container_version(host, name, version):
pkg = host.package(name)
assert pkg.is_installed
assert pkg.version.startswith(version)
@pytest.mark.parametrize('name,version', [
('Flask', '1.0.2'),
])
def test_pip_version(host, name, version):
pkgs = host.pip_package.get_packages()
pkg = pkgs[name]
assert pkg
assert pkg['version'] == version
def test_sshd_disabled(host):
try:
sshd = host.service('sshd')
assert not sshd.is_running
return
except:
return
pytest.fail('sshd should not be running') | [
"subprocess.check_output",
"pytest.fail",
"pytest.fixture",
"pytest.mark.parametrize",
"testinfra.get_host",
"subprocess.check_call"
] | [((185, 216), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (199, 216), False, 'import pytest\n'), ((660, 723), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""name,version"""', "[('python3', '3.6.4')]"], {}), "('name,version', [('python3', '3.6.4')])\n", (683, 723), False, 'import pytest\n'), ((874, 935), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""name,version"""', "[('Flask', '1.0.2')]"], {}), "('name,version', [('Flask', '1.0.2')])\n", (897, 935), False, 'import pytest\n'), ((267, 327), 'subprocess.check_call', 'subprocess.check_call', (["['docker', 'build', '-t', 'web', '.']"], {}), "(['docker', 'build', '-t', 'web', '.'])\n", (288, 327), False, 'import subprocess\n'), ((601, 657), 'subprocess.check_call', 'subprocess.check_call', (["['docker', 'rm', '-f', docker_id]"], {}), "(['docker', 'rm', '-f', docker_id])\n", (622, 657), False, 'import subprocess\n'), ((1230, 1271), 'pytest.fail', 'pytest.fail', (['"""sshd should not be running"""'], {}), "('sshd should not be running')\n", (1241, 1271), False, 'import pytest\n'), ((499, 542), 'testinfra.get_host', 'testinfra.get_host', (["('docker://' + docker_id)"], {}), "('docker://' + docker_id)\n", (517, 542), False, 'import testinfra\n'), ((362, 417), 'subprocess.check_output', 'subprocess.check_output', (["['docker', 'run', '-d', 'web']"], {}), "(['docker', 'run', '-d', 'web'])\n", (385, 417), False, 'import subprocess\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Chemical RDF converter & fixer.
Version 2.3 (Dec 28, 14:25:00 2021)
Added mol sanitization and try/catch
run by calling
rdf_fixer.convert(filename or path)
(optional: returns list of new filenames)
@author: <NAME> (DocMinus)
license: MIT License
Copyright (c) 2021 DocMinus
"""
import os
import re
import pandas as pd
from collections import OrderedDict
import rdkit.Chem as rdc
from rdkit.Chem.MolStandardize import rdMolStandardize
from rdkit import RDLogger
# Important, or else waaaay too many RDkit details in output
RDLogger.logger().setLevel(RDLogger.CRITICAL)
def fix(RDF_IN: str) -> "zipped":
"""Retrieving all .RDF files in a subdirectory recursively.
Then submit to conversion (i.e. fixing)
Parts of os.walk snippet originated on Reddit somewhere, forgot where though.
Args:
RDF_IN = filename, alt. directory and subdirectories to scan
Returns:
zipped List of the new file names
Order: input_file; fixed_file; csv_file
"""
file_list_in = []
file_list_ok = []
file_list_csv = []
if os.path.isfile(RDF_IN):
if RDF_IN.endswith(("rdf", "RDF")):
file_list_in.append(os.path.join(RDF_IN))
file_list_ok.append(os.path.splitext(RDF_IN)[0] + "_fixed.rdf")
file_list_csv.append(os.path.splitext(RDF_IN)[0] + ".csv")
elif os.path.isdir(RDF_IN):
for subdir, dirs, files in os.walk(RDF_IN):
for file in files:
if file.endswith(("rdf", "RDF")):
file_list_in.append(os.path.join(subdir, file))
file_list_ok.append(
os.path.join(subdir, os.path.splitext(file)[0] + "_fixed.rdf")
)
file_list_csv.append(
os.path.join(subdir, os.path.splitext(file)[0] + ".csv")
)
zipped = zip(file_list_in, file_list_ok, file_list_csv)
# note: zip gets unpacked upon usage and disappears
for file_in, file_ok, file_csv in zipped:
print("Converting file: ", file_in)
convert(file_in, file_ok, file_csv)
return zip(file_list_in, file_list_ok, file_list_csv)
def convert(RDF_IN_FILE: str, RDF_OK_FILE: str, RDF_CSV_FILE: str):
"""original script with single file usage wrapped into this 'convert' function
Args:
RDF_IN_FILE: original input RDF file including path
RDF_OK_FILE: new RDF file with corrections (if any)
RDF_CSV_FILE: resulting CSV file (incl. path)
Returns:
None - output are the new files.
"""
##############################################################
# Fix erroneous entries (empty mols) by deleting those entries
with open(RDF_IN_FILE) as file_in:
seed_line = file_in.readline()
previous_line = seed_line # get first line as "seed" for upcoming loop
# seed_line is later reused again
with open(RDF_OK_FILE, "w") as file_out:
write_to_file = True
for current_line in open(RDF_IN_FILE):
# prevent first line from being written twice
if current_line.startswith("$RDFILE") and previous_line.startswith(
"$RDFILE"
):
continue
# correct molecule block
# True
write_to_file = current_line.startswith(
"$RXN"
) and previous_line.startswith("$RFMT")
# else for empty molecule block
write_to_file = not (
current_line.startswith("$DTYPE") and previous_line.startswith("$RFMT")
)
if write_to_file:
file_out.write(previous_line)
previous_line = current_line
file_out.write(previous_line)
# the last line is not caught in the loop, hence written out here.
# end of fix section
####################
def scifi_or_reax(in_file: str) -> str:
"""Determine if Scifinder or Reaxys rdf file
(Scifinder contains 'SCHEME' in the enumeration)
Returned string is multiple string.replace() methods,
to render script independent of source
Args:
in_file (str): filename of the corrected file (in principle,
the original one would work as well;
alt even global variable possible instead)
Returns:
SCI_REAX (str): "RXN:" (scifinder) or string "ROOT:" (reaxys)
"""
f = open(in_file)
NUMBER_OF_LINES = 3
for i in range(NUMBER_OF_LINES):
line_three = f.readline()
return "RXN:" if re.match(".+SCHEME", line_three) else "ROOT:"
def build_empty_table(in_file: str, SCI_REAX: str):
"""Scans file three times to build a pandas df used as main table
Args:
in_file (str): filename of the corrected file: RDF_OK_FILE
SCI_REAX (str): "RXN:" (scifinder) or string "ROOT:" (reaxys) used in replacements
Returns:
da_table (object): the (empty) pandas df working table
max_reagents (int): number for later positioning of reagents smiles in table
max_products (int): <> (products)
"""
# get the IDs and use as row index
list_of_IDs = [] # i.e. rows
for line in open(in_file):
if line.startswith("$RFMT"):
list_of_IDs.append(line.strip().split(" ")[2])
# determine max no of reagents/products
flag = 0
max_reagents = 0
max_products = 0
for line in open(in_file):
if line.startswith("$RXN") | flag == 1:
flag = 1
if re.match("\s\s[0-9]\s\s[0-9]\n", line):
# analyse the " y z" line.
# implies: y reactants, z products.
x = line.strip().split(" ")
number_reagents = int(x[0])
number_products = int(x[1])
if number_reagents > max_reagents:
max_reagents = number_reagents
if number_products > max_products:
max_products = number_products
flag = 0
# build the column headers
fields = []
for i in range(max_reagents):
tmp_name = "Reagent" + str(i)
fields.append(tmp_name)
for i in range(max_products):
tmp_name = "Product" + str(i)
fields.append(tmp_name)
for line in open(in_file):
if line.startswith("$DTYPE"):
fields.append((line.strip().split(" ")[1]).replace(SCI_REAX, ""))
# finally, build the table
da_table = pd.DataFrame(
index=list_of_IDs, columns=list(OrderedDict.fromkeys(fields))
)
return da_table, max_reagents, max_products
##############################################################
# Initialize Table and diverse variables
# get string replacement variable depending on source
SCI_REAX = scifi_or_reax(RDF_OK_FILE)
# build table according to files specs. get max no of reagents & products at the same time.
my_table, max_reagents, max_products = build_empty_table(RDF_OK_FILE, SCI_REAX)
####################################################################
# Here comes the actual data extraction and addition to pandas table
#
############### GET MOLECULES #############
# (structure same for Reaxys and Scifinder)
#
flag = 0
# 0 = generic
# 1 = start of reaction block
# 2 = single MOL (molecules)
# 9 = skip
molecule = []
number_reagents = 0
number_products = 0
number_molecules = 0
iterate_molecules = 0
mol_string = ""
rxn_id = ""
multiple_row_text = ""
# get first line as "seed" for upcoming loop
previous_line = seed_line
for line in open(RDF_OK_FILE):
current_line = line
# get reaction ID
if current_line.startswith("$RFMT"):
rxn_id = str(current_line.strip().split(" ")[2])
flag = 0
continue
# start of a new reaction block
if current_line.startswith("$RXN") | flag == 1:
flag = 1
if re.match("\s\s[0-9]\s\s[0-9]\n", current_line):
# analyse the " y z" line. Not hard-coding this since it might change?
# implies: y reactants, z product.
x = current_line.strip().split(" ")
number_reagents = int(x[0])
number_products = int(x[1])
number_molecules = number_reagents + number_products
# create fresh list of max no of molecules, for use in $MOL block
# yes, always same size within a *given file*, can change from file to file(!)
for i in range(number_molecules):
molecule.append([])
if current_line == "\n" or re.match("\s\s[0-9]\s\s[0-9]\n", current_line):
# checks for empty lines and the number of molecules lines and skips them
continue
# after determining a block, find the molecules within the block
if (current_line == "$MOL\n") | (flag == 2):
flag = 2
if current_line != "$MOL\n" and (iterate_molecules < number_molecules):
molecule[iterate_molecules].append(current_line)
if current_line == "M END\n":
iterate_molecules += 1
# end of the complete reaction block
if current_line.startswith("$D") & (previous_line == "M END\n"):
flag = 9 # could just use flag = 0(?)
# rebuild the string of a molecule
counter_reagents = 0
counter_products = 0
num_mols_this_instance = len(molecule)
# should always be max_mol now, so doesn't matter
for mol in range(num_mols_this_instance):
mol_string = "".join(molecule[mol])
if mol_string == "":
smiles = ""
else:
mol = rdc.MolFromMolBlock(mol_string, sanitize=False)
if mol is None:
continue
try:
rdc.SanitizeMol(mol)
except ValueError as _e:
print("Error: ", _e)
continue
mol.UpdatePropertyCache(strict=False)
rdc.SanitizeMol(
mol,
sanitizeOps=(
rdc.SANITIZE_ALL
^ rdc.SANITIZE_CLEANUP
^ rdc.SANITIZE_PROPERTIES
),
)
mol = rdMolStandardize.Normalize(mol)
smiles = rdc.MolToSmiles(mol)
# some mols might be empty, this if/else positions reagents/products accordingly
if counter_reagents + 1 <= number_reagents:
my_table.loc[
rxn_id, my_table.columns[counter_reagents]
] = smiles
counter_reagents += 1
else:
my_table.loc[
rxn_id, my_table.columns[counter_products + max_reagents]
] = smiles
counter_products += 1
# reset variables
iterate_molecules = 0
molecule = []
mol_string = ""
previous_line = current_line
################################
#
######### GET single line data ##########
#
# Nota bene: this will write first line of multiline columns as well
# but doesn't matter since those will be correctly overwritten later on
rxn_id = ""
previous_line = seed_line
for line in open(RDF_OK_FILE):
current_line = line
# get reaction ID
if current_line.startswith("$RFMT"):
rxn_id = str(current_line.strip().split(" ")[2])
# flag = 0
continue
if previous_line.startswith("$DTYPE") and current_line.startswith("$DATUM"):
current_column = previous_line.strip().split(" ")[1].replace(SCI_REAX, "")
row_text = current_line.replace("\n", " ")
# flag = 1
my_table.loc[rxn_id, current_column] = row_text.replace("$DATUM ", "")
previous_line = current_line
################################
#
### Extract Experimental Procedure ###
# Multiline, both,
# Reaxys and Scifinder
#
flag = 0
# 0 = generic
# 5 = exp procedure text over multiple lines
# 9 = skip
rxn_id = ""
multiple_row_text = ""
previous_line = seed_line
for line in open(RDF_OK_FILE):
current_line = line
# get reaction ID
if current_line.startswith("$RFMT"):
rxn_id = str(current_line.strip().split(" ")[2])
flag = 0
continue
# get experimental section
if SCI_REAX == "RXN:":
if re.match(".+EXP_PROC", previous_line) or flag == 5:
# start of the experimental section. spans over multiple line
if re.match(".+EXP_PROC", previous_line):
current_column = (
previous_line.strip().split(" ")[1].replace(SCI_REAX, "")
)
if re.match(".+NOTES", current_line) or re.match(
".+REFERENCE.+", current_line
):
# this is the end of experimental block
flag = 9
my_table.loc[rxn_id, current_column] = multiple_row_text.replace(
"$DATUM ", ""
)
multiple_row_text = ""
else:
multiple_row_text += current_line.replace("\n", " ")
flag = 5
else: # Reaxys
if re.match(".+TXT", previous_line) or flag == 5:
# start of the experimental section. spans over multiple line
if re.match(".+TXT", previous_line):
current_column = (
previous_line.strip().split(" ")[1].replace(SCI_REAX, "")
)
if re.match(".+STP", current_line):
# this is the end of experimental block
flag = 9
my_table.loc[rxn_id, current_column] = multiple_row_text.replace(
"$DATUM ", ""
)
multiple_row_text = ""
else:
multiple_row_text += current_line.replace("\n", " ")
flag = 5
previous_line = current_line
################################
#
######## Extract Notes ########
# (only Scifinder)
#
flag = 0
# 0 = generic
# 6 = notes, text potentially over multiple lines
# 9 = skip
rxn_id = ""
multiple_row_text = ""
previous_line = seed_line
for line in open(RDF_OK_FILE):
current_line = line
# get reaction ID
if current_line.startswith("$RFMT"):
rxn_id = str(current_line.strip().split(" ")[2])
flag = 0
continue
# Get Notes
if re.match(".+NOTES", previous_line) or flag == 6:
flag = 6
# start of the Notes section. might span over multiple line
if re.match(".+NOTES", previous_line):
current_column = (
previous_line.strip().split(" ")[1].replace(SCI_REAX, "")
)
if current_line.startswith("$DTYPE"):
# this is the end of Notes block
flag = 9
my_table.loc[rxn_id, current_column] = multiple_row_text.replace(
"$DATUM ", ""
)
multiple_row_text = ""
else:
multiple_row_text += current_line.replace("\n", " ")
flag = 6
previous_line = current_line
################################
#
######## Extract title ########
# (only Scifinder)
#
flag = 0
# 0 = generic
# 7 = title
# 9 = skip
rxn_id = ""
multiple_row_text = ""
previous_line = seed_line
for line in open(RDF_OK_FILE):
current_line = line
# get reaction ID
if current_line.startswith("$RFMT"):
rxn_id = str(current_line.strip().split(" ")[2])
flag = 0
continue
# Get Title
if re.match(".+TITLE", previous_line) or flag == 7:
flag = 7
# start of the Title section. might span over multiple line
if re.match(".+TITLE", previous_line):
current_column = (
previous_line.strip().split(" ")[1].replace(SCI_REAX, "")
)
if current_line.startswith("$DTYPE"):
# this is the end of title block
flag = 9
my_table.loc[rxn_id, current_column] = multiple_row_text.replace(
"$DATUM ", ""
)
multiple_row_text = ""
else:
multiple_row_text += current_line.replace("\n", " ")
flag = 7
previous_line = current_line
################################
#
####### Extract authors ########
# (only Scifinder)
#
flag = 0
# 0 = generic
# 8 = authors
# 9 = skip
rxn_id = ""
multiple_row_text = ""
previous_line = seed_line
for line in open(RDF_OK_FILE):
current_line = line
# get reaction ID
if current_line.startswith("$RFMT"):
rxn_id = str(current_line.strip().split(" ")[2])
flag = 0
continue
# Get Authors
if re.match(".+AUTHOR", previous_line) or flag == 8:
flag = 8
if re.match(".+AUTHOR", previous_line):
current_column = (
previous_line.strip().split(" ")[1].replace(SCI_REAX, "")
)
if current_line.startswith("$DTYPE"):
# this is the end of author block
flag = 9
my_table.loc[rxn_id, current_column] = multiple_row_text.replace(
"$DATUM ", ""
)
multiple_row_text = ""
else:
multiple_row_text += current_line.replace("\n", " ")
flag = 8
previous_line = current_line
################################
#
### Extract citation (i.e. source) ###
#
# This is done last, since for Scifinder
# this is the last entry in a file
# not necessary for reaxys, but it will go through it anyway
# (less ifs and doesn't screw anything up)
#
flag = 0
# 0 = generic
# 9 = skip
# 4 = citation
rxn_id = ""
multiple_row_text = ""
previous_line = seed_line
for line in open(RDF_OK_FILE):
current_line = line
# get reaction ID
if current_line.startswith("$RFMT"):
rxn_id = str(current_line.strip().split(" ")[2])
flag = 0
continue
# Get Citation
if re.match(".+CITATION", previous_line) or flag == 4:
flag = 4
if re.match(".+CITATION", previous_line):
current_column = (
previous_line.strip().split(" ")[1].replace(SCI_REAX, "")
)
if current_line.startswith("$DTYPE"):
# this is the end of citation block
flag = 9
my_table.loc[rxn_id, current_column] = multiple_row_text.replace(
"$DATUM ", ""
)
multiple_row_text = ""
else:
multiple_row_text += current_line.replace("\n", " ")
flag = 4
previous_line = current_line
################################
# End of file scanning #
############################################
# Finish table for export to csv file format
my_table = my_table.replace(pd.np.nan, "", regex=True) # need to remove NaN
my_table.drop(
list(my_table.filter(regex="COPYRIGHT")), axis=1, inplace=True
) # skip the copyright (optional)
my_table.to_csv(RDF_CSV_FILE, sep="\t", header=True, index=True)
# end of script
# one could add a return value for better error handling.
return None
| [
"os.path.isdir",
"os.walk",
"re.match",
"rdkit.Chem.MolStandardize.rdMolStandardize.Normalize",
"rdkit.Chem.SanitizeMol",
"collections.OrderedDict.fromkeys",
"os.path.isfile",
"rdkit.RDLogger.logger",
"os.path.splitext",
"rdkit.Chem.MolToSmiles",
"os.path.join",
"rdkit.Chem.MolFromMolBlock"
] | [((1116, 1138), 'os.path.isfile', 'os.path.isfile', (['RDF_IN'], {}), '(RDF_IN)\n', (1130, 1138), False, 'import os\n'), ((578, 595), 'rdkit.RDLogger.logger', 'RDLogger.logger', ([], {}), '()\n', (593, 595), False, 'from rdkit import RDLogger\n'), ((1395, 1416), 'os.path.isdir', 'os.path.isdir', (['RDF_IN'], {}), '(RDF_IN)\n', (1408, 1416), False, 'import os\n'), ((1453, 1468), 'os.walk', 'os.walk', (['RDF_IN'], {}), '(RDF_IN)\n', (1460, 1468), False, 'import os\n'), ((4675, 4707), 're.match', 're.match', (['""".+SCHEME"""', 'line_three'], {}), "('.+SCHEME', line_three)\n", (4683, 4707), False, 'import re\n'), ((8322, 8372), 're.match', 're.match', (['"""\\\\s\\\\s[0-9]\\\\s\\\\s[0-9]\n"""', 'current_line'], {}), "('\\\\s\\\\s[0-9]\\\\s\\\\s[0-9]\\n', current_line)\n", (8330, 8372), False, 'import re\n'), ((15681, 15715), 're.match', 're.match', (['""".+NOTES"""', 'previous_line'], {}), "('.+NOTES', previous_line)\n", (15689, 15715), False, 'import re\n'), ((15838, 15872), 're.match', 're.match', (['""".+NOTES"""', 'previous_line'], {}), "('.+NOTES', previous_line)\n", (15846, 15872), False, 'import re\n'), ((16974, 17008), 're.match', 're.match', (['""".+TITLE"""', 'previous_line'], {}), "('.+TITLE', previous_line)\n", (16982, 17008), False, 'import re\n'), ((17131, 17165), 're.match', 're.match', (['""".+TITLE"""', 'previous_line'], {}), "('.+TITLE', previous_line)\n", (17139, 17165), False, 'import re\n'), ((18272, 18307), 're.match', 're.match', (['""".+AUTHOR"""', 'previous_line'], {}), "('.+AUTHOR', previous_line)\n", (18280, 18307), False, 'import re\n'), ((18358, 18393), 're.match', 're.match', (['""".+AUTHOR"""', 'previous_line'], {}), "('.+AUTHOR', previous_line)\n", (18366, 18393), False, 'import re\n'), ((19688, 19725), 're.match', 're.match', (['""".+CITATION"""', 'previous_line'], {}), "('.+CITATION', previous_line)\n", (19696, 19725), False, 'import re\n'), ((19776, 19813), 're.match', 're.match', (['""".+CITATION"""', 'previous_line'], {}), "('.+CITATION', previous_line)\n", (19784, 19813), False, 'import re\n'), ((1216, 1236), 'os.path.join', 'os.path.join', (['RDF_IN'], {}), '(RDF_IN)\n', (1228, 1236), False, 'import os\n'), ((5731, 5773), 're.match', 're.match', (['"""\\\\s\\\\s[0-9]\\\\s\\\\s[0-9]\n"""', 'line'], {}), "('\\\\s\\\\s[0-9]\\\\s\\\\s[0-9]\\n', line)\n", (5739, 5773), False, 'import re\n'), ((9027, 9077), 're.match', 're.match', (['"""\\\\s\\\\s[0-9]\\\\s\\\\s[0-9]\n"""', 'current_line'], {}), "('\\\\s\\\\s[0-9]\\\\s\\\\s[0-9]\\n', current_line)\n", (9035, 9077), False, 'import re\n'), ((13400, 13437), 're.match', 're.match', (['""".+EXP_PROC"""', 'previous_line'], {}), "('.+EXP_PROC', previous_line)\n", (13408, 13437), False, 'import re\n'), ((13549, 13586), 're.match', 're.match', (['""".+EXP_PROC"""', 'previous_line'], {}), "('.+EXP_PROC', previous_line)\n", (13557, 13586), False, 'import re\n'), ((14308, 14340), 're.match', 're.match', (['""".+TXT"""', 'previous_line'], {}), "('.+TXT', previous_line)\n", (14316, 14340), False, 'import re\n'), ((14452, 14484), 're.match', 're.match', (['""".+TXT"""', 'previous_line'], {}), "('.+TXT', previous_line)\n", (14460, 14484), False, 'import re\n'), ((14649, 14680), 're.match', 're.match', (['""".+STP"""', 'current_line'], {}), "('.+STP', current_line)\n", (14657, 14680), False, 'import re\n'), ((6832, 6860), 'collections.OrderedDict.fromkeys', 'OrderedDict.fromkeys', (['fields'], {}), '(fields)\n', (6852, 6860), False, 'from collections import OrderedDict\n'), ((13751, 13784), 're.match', 're.match', (['""".+NOTES"""', 'current_line'], {}), "('.+NOTES', current_line)\n", (13759, 13784), False, 'import re\n'), ((13788, 13827), 're.match', 're.match', (['""".+REFERENCE.+"""', 'current_line'], {}), "('.+REFERENCE.+', current_line)\n", (13796, 13827), False, 'import re\n'), ((1270, 1294), 'os.path.splitext', 'os.path.splitext', (['RDF_IN'], {}), '(RDF_IN)\n', (1286, 1294), False, 'import os\n'), ((1347, 1371), 'os.path.splitext', 'os.path.splitext', (['RDF_IN'], {}), '(RDF_IN)\n', (1363, 1371), False, 'import os\n'), ((10248, 10295), 'rdkit.Chem.MolFromMolBlock', 'rdc.MolFromMolBlock', (['mol_string'], {'sanitize': '(False)'}), '(mol_string, sanitize=False)\n', (10267, 10295), True, 'import rdkit.Chem as rdc\n'), ((10674, 10777), 'rdkit.Chem.SanitizeMol', 'rdc.SanitizeMol', (['mol'], {'sanitizeOps': '(rdc.SANITIZE_ALL ^ rdc.SANITIZE_CLEANUP ^ rdc.SANITIZE_PROPERTIES)'}), '(mol, sanitizeOps=rdc.SANITIZE_ALL ^ rdc.SANITIZE_CLEANUP ^\n rdc.SANITIZE_PROPERTIES)\n', (10689, 10777), True, 'import rdkit.Chem as rdc\n'), ((11015, 11046), 'rdkit.Chem.MolStandardize.rdMolStandardize.Normalize', 'rdMolStandardize.Normalize', (['mol'], {}), '(mol)\n', (11041, 11046), False, 'from rdkit.Chem.MolStandardize import rdMolStandardize\n'), ((11080, 11100), 'rdkit.Chem.MolToSmiles', 'rdc.MolToSmiles', (['mol'], {}), '(mol)\n', (11095, 11100), True, 'import rdkit.Chem as rdc\n'), ((1591, 1617), 'os.path.join', 'os.path.join', (['subdir', 'file'], {}), '(subdir, file)\n', (1603, 1617), False, 'import os\n'), ((10431, 10451), 'rdkit.Chem.SanitizeMol', 'rdc.SanitizeMol', (['mol'], {}), '(mol)\n', (10446, 10451), True, 'import rdkit.Chem as rdc\n'), ((1705, 1727), 'os.path.splitext', 'os.path.splitext', (['file'], {}), '(file)\n', (1721, 1727), False, 'import os\n'), ((1856, 1878), 'os.path.splitext', 'os.path.splitext', (['file'], {}), '(file)\n', (1872, 1878), False, 'import os\n')] |
#!/usr/bin/env python3
from subprocess import run
from sys import argv, exit
PYVER = argv[1]
IMAGE = f"ruterstop:python{PYVER}"
print("Building", IMAGE)
run(
[
"docker",
"build",
"--network=host",
"--file=.deploy/Dockerfile",
f"--build-arg=PYTHON_VERSION={PYVER}",
f"--build-arg=POETRY_VERSION=1.1.5",
f"--tag=ruterstop:python{PYVER}",
".",
],
check=True,
)
print("Running unit-tests", IMAGE)
run(
[
"docker",
"run",
"--network=host",
"--rm",
IMAGE,
]
+ ["unittest"],
check=True,
)
print("Running livetest", IMAGE)
run(
[
"docker",
"run",
"--network=host",
"--rm",
IMAGE,
]
+ ["ruterstop", "--stop-id=6013"],
check=True,
)
print("Success!")
| [
"subprocess.run"
] | [((155, 365), 'subprocess.run', 'run', (["['docker', 'build', '--network=host', '--file=.deploy/Dockerfile',\n f'--build-arg=PYTHON_VERSION={PYVER}',\n f'--build-arg=POETRY_VERSION=1.1.5', f'--tag=ruterstop:python{PYVER}', '.']"], {'check': '(True)'}), "(['docker', 'build', '--network=host', '--file=.deploy/Dockerfile',\n f'--build-arg=PYTHON_VERSION={PYVER}',\n f'--build-arg=POETRY_VERSION=1.1.5', f'--tag=ruterstop:python{PYVER}',\n '.'], check=True)\n", (158, 365), False, 'from subprocess import run\n'), ((472, 558), 'subprocess.run', 'run', (["(['docker', 'run', '--network=host', '--rm', IMAGE] + ['unittest'])"], {'check': '(True)'}), "(['docker', 'run', '--network=host', '--rm', IMAGE] + ['unittest'],\n check=True)\n", (475, 558), False, 'from subprocess import run\n'), ((651, 756), 'subprocess.run', 'run', (["(['docker', 'run', '--network=host', '--rm', IMAGE] + ['ruterstop',\n '--stop-id=6013'])"], {'check': '(True)'}), "(['docker', 'run', '--network=host', '--rm', IMAGE] + ['ruterstop',\n '--stop-id=6013'], check=True)\n", (654, 756), False, 'from subprocess import run\n')] |
import bayesian_irl
import mdp_worlds
import utils
import mdp
import numpy as np
import scipy
import random
import generate_efficient_frontier
import matplotlib.pyplot as plt
def generate_reward_sample():
#rewards for no-op are gamma distributed
r_noop = []
locs = 1/2
scales = [20, 40, 80,190]
for i in range(4):
r_noop.append(-np.random.gamma(locs, scales[i], 1)[0])
r_noop = np.array(r_noop)
#rewards for repair are -N(100,1) for all but last state where it is -N(130,20)
r_repair = -100 + -1 * np.random.randn(4)
return np.concatenate((r_noop, r_repair))
def generate_posterior_samples(num_samples):
print("samples")
all_samples = []
for i in range(num_samples):
r_sample = generate_reward_sample()
all_samples.append(r_sample)
print("mean of posterior from samples")
print(np.mean(all_samples, axis=0))
posterior = np.array(all_samples)
return posterior.transpose() #each column is a reward sample
if __name__=="__main__":
seed = 1234
np.random.seed(seed)
scipy.random.seed(seed)
random.seed(seed)
num_states = 4
num_samples = 2000
gamma = 0.95
alpha = 0.99
lamda = 0.9
posterior = generate_posterior_samples(num_samples)
r_sa = np.mean(posterior, axis=1)
init_distribution = np.ones(num_states)/num_states #uniform distribution
mdp_env = mdp.MachineReplacementMDP(num_states, r_sa, gamma, init_distribution)
print("---MDP solution for expectation---")
print("mean MDP reward", r_sa)
u_sa = mdp.solve_mdp_lp(mdp_env, debug=True)
print("mean policy from posterior")
utils.print_stochastic_policy_action_probs(u_sa, mdp_env)
print("MAP/Mean policy from posterior")
utils.print_policy_from_occupancies(u_sa, mdp_env)
print("rewards")
print(mdp_env.r_sa)
print("expected value = ", np.dot(u_sa, r_sa))
stoch_pi = utils.get_optimal_policy_from_usa(u_sa, mdp_env)
print("expected return", mdp.get_policy_expected_return(stoch_pi, mdp_env))
print("values", mdp.get_state_values(u_sa, mdp_env))
print('q-values', mdp.get_q_values(u_sa, mdp_env))
#run CVaR optimization, maybe just the robust version for now
u_expert = np.zeros(mdp_env.num_actions * mdp_env.num_states)
# print("solving for CVaR optimal policy")
posterior_probs = np.ones(num_samples) / num_samples #uniform dist since samples from MCMC
#generate efficient frontier
lambda_range = [0.0, 0.3, 0.5, 0.75, 0.95,0.99, 1.0]
#generate_efficient_frontier.calc_frontier(mdp_env, u_expert, posterior, posterior_probs, lambda_range, alpha, debug=False)
alpha = 0.99
print("calculating optimal policy for alpha = {} over lambda = {}".format(alpha, lambda_range))
cvar_rets = generate_efficient_frontier.calc_frontier(mdp_env, u_expert, posterior, posterior_probs, lambda_range, alpha, debug=False)
cvar_rets_array = np.array(cvar_rets)
plt.figure()
plt.plot(cvar_rets_array[:,0], cvar_rets_array[:,1], '-o')
#go through and label the points in the figure with the corresponding lambda values
unique_pts_lambdas = []
unique_pts = []
for i,pt in enumerate(cvar_rets_array):
unique = True
for upt in unique_pts:
if np.linalg.norm(upt - pt) < 0.00001:
unique = False
break
if unique:
unique_pts_lambdas.append((pt[0], pt[1], lambda_range[i]))
unique_pts.append(np.array(pt))
#calculate offset
offsetx = (np.max(cvar_rets_array[:,0]) - np.min(cvar_rets_array[:,0]))/30
offsety = (np.max(cvar_rets_array[:,1]) - np.min(cvar_rets_array[:,1]))/17
for i,pt in enumerate(unique_pts_lambdas):
if i in [0,1,2,4]:
plt.text(pt[0] - 6.2*offsetx, pt[1] , r"$\lambda = {}$".format(str(pt[2])), fontsize=19, fontweight='bold')
elif i in [3]:
plt.text(pt[0] - 6.2*offsetx, pt[1] - 1.2*offsety , r"$\lambda = {}$".format(str(pt[2])), fontsize=19, fontweight='bold')
elif i in [5]:
plt.text(pt[0] - 5.5*offsetx, pt[1] - 1.5*offsety, r"$\lambda = {}$".format(str(pt[2])), fontsize=19, fontweight='bold')
else:
plt.text(pt[0]-offsetx, pt[1] - 1.5*offsety, r"$\lambda = {}$".format(str(pt[2])), fontsize=19, fontweight='bold')
plt.xticks(fontsize=18)
plt.yticks(fontsize=18)
plt.xlabel("Robustness (CVaR)", fontsize=20)
plt.ylabel("Expected Return", fontsize=20)
plt.tight_layout()
plt.savefig('./figs/machine_replacement/efficient_frontier_machine_replacement.png')
plt.show()
| [
"numpy.random.seed",
"numpy.ones",
"numpy.random.gamma",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.linalg.norm",
"mdp.get_policy_expected_return",
"mdp.MachineReplacementMDP",
"matplotlib.pyplot.tight_layout",
"numpy.random.randn",
"matplotlib.pyplot.yticks",
"numpy.max",
"random.seed",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.show",
"mdp.get_state_values",
"scipy.random.seed",
"generate_efficient_frontier.calc_frontier",
"numpy.min",
"utils.print_policy_from_occupancies",
"matplotlib.pyplot.ylabel",
"numpy.dot",
"numpy.concatenate",
"matplotlib.pyplot.plot",
"numpy.zeros",
"mdp.solve_mdp_lp",
"utils.print_stochastic_policy_action_probs",
"numpy.array",
"mdp.get_q_values",
"utils.get_optimal_policy_from_usa",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
] | [((418, 434), 'numpy.array', 'np.array', (['r_noop'], {}), '(r_noop)\n', (426, 434), True, 'import numpy as np\n'), ((582, 616), 'numpy.concatenate', 'np.concatenate', (['(r_noop, r_repair)'], {}), '((r_noop, r_repair))\n', (596, 616), True, 'import numpy as np\n'), ((924, 945), 'numpy.array', 'np.array', (['all_samples'], {}), '(all_samples)\n', (932, 945), True, 'import numpy as np\n'), ((1060, 1080), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1074, 1080), True, 'import numpy as np\n'), ((1085, 1108), 'scipy.random.seed', 'scipy.random.seed', (['seed'], {}), '(seed)\n', (1102, 1108), False, 'import scipy\n'), ((1113, 1130), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (1124, 1130), False, 'import random\n'), ((1292, 1318), 'numpy.mean', 'np.mean', (['posterior'], {'axis': '(1)'}), '(posterior, axis=1)\n', (1299, 1318), True, 'import numpy as np\n'), ((1412, 1481), 'mdp.MachineReplacementMDP', 'mdp.MachineReplacementMDP', (['num_states', 'r_sa', 'gamma', 'init_distribution'], {}), '(num_states, r_sa, gamma, init_distribution)\n', (1437, 1481), False, 'import mdp\n'), ((1577, 1614), 'mdp.solve_mdp_lp', 'mdp.solve_mdp_lp', (['mdp_env'], {'debug': '(True)'}), '(mdp_env, debug=True)\n', (1593, 1614), False, 'import mdp\n'), ((1659, 1716), 'utils.print_stochastic_policy_action_probs', 'utils.print_stochastic_policy_action_probs', (['u_sa', 'mdp_env'], {}), '(u_sa, mdp_env)\n', (1701, 1716), False, 'import utils\n'), ((1765, 1815), 'utils.print_policy_from_occupancies', 'utils.print_policy_from_occupancies', (['u_sa', 'mdp_env'], {}), '(u_sa, mdp_env)\n', (1800, 1815), False, 'import utils\n'), ((1928, 1976), 'utils.get_optimal_policy_from_usa', 'utils.get_optimal_policy_from_usa', (['u_sa', 'mdp_env'], {}), '(u_sa, mdp_env)\n', (1961, 1976), False, 'import utils\n'), ((2259, 2309), 'numpy.zeros', 'np.zeros', (['(mdp_env.num_actions * mdp_env.num_states)'], {}), '(mdp_env.num_actions * mdp_env.num_states)\n', (2267, 2309), True, 'import numpy as np\n'), ((2820, 2946), 'generate_efficient_frontier.calc_frontier', 'generate_efficient_frontier.calc_frontier', (['mdp_env', 'u_expert', 'posterior', 'posterior_probs', 'lambda_range', 'alpha'], {'debug': '(False)'}), '(mdp_env, u_expert, posterior,\n posterior_probs, lambda_range, alpha, debug=False)\n', (2861, 2946), False, 'import generate_efficient_frontier\n'), ((2970, 2989), 'numpy.array', 'np.array', (['cvar_rets'], {}), '(cvar_rets)\n', (2978, 2989), True, 'import numpy as np\n'), ((2994, 3006), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3004, 3006), True, 'import matplotlib.pyplot as plt\n'), ((3011, 3071), 'matplotlib.pyplot.plot', 'plt.plot', (['cvar_rets_array[:, 0]', 'cvar_rets_array[:, 1]', '"""-o"""'], {}), "(cvar_rets_array[:, 0], cvar_rets_array[:, 1], '-o')\n", (3019, 3071), True, 'import matplotlib.pyplot as plt\n'), ((4381, 4404), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(18)'}), '(fontsize=18)\n', (4391, 4404), True, 'import matplotlib.pyplot as plt\n'), ((4410, 4433), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(18)'}), '(fontsize=18)\n', (4420, 4433), True, 'import matplotlib.pyplot as plt\n'), ((4439, 4483), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Robustness (CVaR)"""'], {'fontsize': '(20)'}), "('Robustness (CVaR)', fontsize=20)\n", (4449, 4483), True, 'import matplotlib.pyplot as plt\n'), ((4488, 4530), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Expected Return"""'], {'fontsize': '(20)'}), "('Expected Return', fontsize=20)\n", (4498, 4530), True, 'import matplotlib.pyplot as plt\n'), ((4540, 4558), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4556, 4558), True, 'import matplotlib.pyplot as plt\n'), ((4563, 4652), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./figs/machine_replacement/efficient_frontier_machine_replacement.png"""'], {}), "(\n './figs/machine_replacement/efficient_frontier_machine_replacement.png')\n", (4574, 4652), True, 'import matplotlib.pyplot as plt\n'), ((4653, 4663), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4661, 4663), True, 'import matplotlib.pyplot as plt\n'), ((876, 904), 'numpy.mean', 'np.mean', (['all_samples'], {'axis': '(0)'}), '(all_samples, axis=0)\n', (883, 904), True, 'import numpy as np\n'), ((1344, 1363), 'numpy.ones', 'np.ones', (['num_states'], {}), '(num_states)\n', (1351, 1363), True, 'import numpy as np\n'), ((1893, 1911), 'numpy.dot', 'np.dot', (['u_sa', 'r_sa'], {}), '(u_sa, r_sa)\n', (1899, 1911), True, 'import numpy as np\n'), ((2006, 2055), 'mdp.get_policy_expected_return', 'mdp.get_policy_expected_return', (['stoch_pi', 'mdp_env'], {}), '(stoch_pi, mdp_env)\n', (2036, 2055), False, 'import mdp\n'), ((2077, 2112), 'mdp.get_state_values', 'mdp.get_state_values', (['u_sa', 'mdp_env'], {}), '(u_sa, mdp_env)\n', (2097, 2112), False, 'import mdp\n'), ((2136, 2167), 'mdp.get_q_values', 'mdp.get_q_values', (['u_sa', 'mdp_env'], {}), '(u_sa, mdp_env)\n', (2152, 2167), False, 'import mdp\n'), ((2384, 2404), 'numpy.ones', 'np.ones', (['num_samples'], {}), '(num_samples)\n', (2391, 2404), True, 'import numpy as np\n'), ((551, 569), 'numpy.random.randn', 'np.random.randn', (['(4)'], {}), '(4)\n', (566, 569), True, 'import numpy as np\n'), ((3580, 3609), 'numpy.max', 'np.max', (['cvar_rets_array[:, 0]'], {}), '(cvar_rets_array[:, 0])\n', (3586, 3609), True, 'import numpy as np\n'), ((3611, 3640), 'numpy.min', 'np.min', (['cvar_rets_array[:, 0]'], {}), '(cvar_rets_array[:, 0])\n', (3617, 3640), True, 'import numpy as np\n'), ((3659, 3688), 'numpy.max', 'np.max', (['cvar_rets_array[:, 1]'], {}), '(cvar_rets_array[:, 1])\n', (3665, 3688), True, 'import numpy as np\n'), ((3690, 3719), 'numpy.min', 'np.min', (['cvar_rets_array[:, 1]'], {}), '(cvar_rets_array[:, 1])\n', (3696, 3719), True, 'import numpy as np\n'), ((3319, 3343), 'numpy.linalg.norm', 'np.linalg.norm', (['(upt - pt)'], {}), '(upt - pt)\n', (3333, 3343), True, 'import numpy as np\n'), ((3528, 3540), 'numpy.array', 'np.array', (['pt'], {}), '(pt)\n', (3536, 3540), True, 'import numpy as np\n'), ((365, 400), 'numpy.random.gamma', 'np.random.gamma', (['locs', 'scales[i]', '(1)'], {}), '(locs, scales[i], 1)\n', (380, 400), True, 'import numpy as np\n')] |
import time
import math
@profile
def primes(n):
start = time.time()
prime1 = [2]
sn=int(math.sqrt(n))
for attempt in range(3,sn+1,2):
if all((attempt % prime != 0 and n%attempt==0) for prime in prime1):
prime1.append(attempt)
end = time.time()
print(end - start)
return prime1
n=primes(600851475143)
print(max(n))
| [
"math.sqrt",
"time.time"
] | [((64, 75), 'time.time', 'time.time', ([], {}), '()\n', (73, 75), False, 'import time\n'), ((282, 293), 'time.time', 'time.time', ([], {}), '()\n', (291, 293), False, 'import time\n'), ((106, 118), 'math.sqrt', 'math.sqrt', (['n'], {}), '(n)\n', (115, 118), False, 'import math\n')] |
# Copyright 2021 Condenser Author All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from dataclasses import dataclass
from typing import List, Dict
import torch
from torch.utils.data import Dataset
from transformers import DataCollatorForWholeWordMask
@dataclass
class CondenserCollator(DataCollatorForWholeWordMask):
max_seq_length: int = 512
def __post_init__(self):
super(CondenserCollator, self).__post_init__()
from transformers import BertTokenizer, BertTokenizerFast
from transformers import RobertaTokenizer, RobertaTokenizerFast
if isinstance(self.tokenizer, (BertTokenizer, BertTokenizerFast)):
self.whole_word_cand_indexes = self._whole_word_cand_indexes_bert
elif isinstance(self.tokenizer, (RobertaTokenizer, RobertaTokenizerFast)):
self.whole_word_cand_indexes = self. _whole_word_cand_indexes_roberta
else:
raise NotImplementedError(f'{type(self.tokenizer)} collator not supported yet')
self.specials = self.tokenizer.all_special_tokens
def _whole_word_cand_indexes_bert(self, input_tokens: List[str]):
cand_indexes = []
for (i, token) in enumerate(input_tokens):
if token in self.specials:
continue
if len(cand_indexes) >= 1 and token.startswith("##"):
cand_indexes[-1].append(i)
else:
cand_indexes.append([i])
return cand_indexes
def _whole_word_cand_indexes_roberta(self, input_tokens: List[str]):
cand_indexes = []
for (i, token) in enumerate(input_tokens):
if token in self.specials:
raise ValueError('We expect only raw input for roberta for current implementation')
if i == 0:
cand_indexes.append([0])
elif not token.startswith('\u0120'):
cand_indexes[-1].append(i)
else:
cand_indexes.append([i])
return cand_indexes
def _whole_word_mask(self, input_tokens: List[str], max_predictions=512):
"""
Get 0/1 labels for masked tokens with whole word mask proxy
"""
cand_indexes = self._whole_word_cand_indexes_bert(input_tokens)
random.shuffle(cand_indexes)
num_to_predict = min(max_predictions, max(1, int(round(len(input_tokens) * self.mlm_probability))))
masked_lms = []
covered_indexes = set()
for index_set in cand_indexes:
if len(masked_lms) >= num_to_predict:
break
# If adding a whole-word mask would exceed the maximum number of
# predictions, then just skip this candidate.
if len(masked_lms) + len(index_set) > num_to_predict:
continue
is_any_index_covered = False
for index in index_set:
if index in covered_indexes:
is_any_index_covered = True
break
if is_any_index_covered:
continue
for index in index_set:
covered_indexes.add(index)
masked_lms.append(index)
assert len(covered_indexes) == len(masked_lms)
mask_labels = [1 if i in covered_indexes else 0 for i in range(len(input_tokens))]
return mask_labels
def _truncate(self, example: List[int]):
tgt_len = self.max_seq_length - self.tokenizer.num_special_tokens_to_add(False)
if len(example) <= tgt_len:
return example
trunc = len(example) - tgt_len
trunc_left = random.randint(0, trunc)
trunc_right = trunc - trunc_left
truncated = example[trunc_left:]
if trunc_right > 0:
truncated = truncated[:-trunc_right]
if not len(truncated) == tgt_len:
print(len(example), len(truncated), trunc_left, trunc_right, tgt_len, flush=True)
raise ValueError
return truncated
def _pad(self, seq, val=0):
tgt_len = self.max_seq_length
assert len(seq) <= tgt_len
return seq + [val for _ in range(tgt_len - len(seq))]
def __call__(self, examples: List[Dict[str, List[int]]]):
encoded_examples = []
masks = []
mlm_masks = []
for e in examples:
e_trunc = self._truncate(e['text'])
tokens = [self.tokenizer._convert_id_to_token(tid) for tid in e_trunc]
mlm_mask = self._whole_word_mask(tokens)
mlm_mask = self._pad([0] + mlm_mask)
mlm_masks.append(mlm_mask)
encoded = self.tokenizer.encode_plus(
self._truncate(e['text']),
add_special_tokens=True,
max_length=self.max_seq_length,
padding="max_length",
truncation=True,
return_token_type_ids=False,
)
masks.append(encoded['attention_mask'])
encoded_examples.append(encoded['input_ids'])
inputs, labels = self.mask_tokens(
torch.tensor(encoded_examples, dtype=torch.long),
torch.tensor(mlm_masks, dtype=torch.long)
)
batch = {
"input_ids": inputs,
"labels": labels,
"attention_mask": torch.tensor(masks),
}
return batch
@dataclass
class CoCondenserCollator(CondenserCollator):
def __call__(self, examples):
examples = sum(examples, [])
examples = [{'text': e} for e in examples]
return super(CoCondenserCollator, self).__call__(examples)
class CoCondenserDataset(Dataset):
def __init__(self, dataset, data_args):
self.dataset = dataset
self.data_args = data_args
def __len__(self):
return len(self.dataset)
def __getitem__(self, item):
spans = self.dataset[item]['spans']
return random.sample(spans, 2)
| [
"random.sample",
"random.shuffle",
"random.randint",
"torch.tensor"
] | [((2786, 2814), 'random.shuffle', 'random.shuffle', (['cand_indexes'], {}), '(cand_indexes)\n', (2800, 2814), False, 'import random\n'), ((4125, 4149), 'random.randint', 'random.randint', (['(0)', 'trunc'], {}), '(0, trunc)\n', (4139, 4149), False, 'import random\n'), ((6408, 6431), 'random.sample', 'random.sample', (['spans', '(2)'], {}), '(spans, 2)\n', (6421, 6431), False, 'import random\n'), ((5583, 5631), 'torch.tensor', 'torch.tensor', (['encoded_examples'], {'dtype': 'torch.long'}), '(encoded_examples, dtype=torch.long)\n', (5595, 5631), False, 'import torch\n'), ((5645, 5686), 'torch.tensor', 'torch.tensor', (['mlm_masks'], {'dtype': 'torch.long'}), '(mlm_masks, dtype=torch.long)\n', (5657, 5686), False, 'import torch\n'), ((5809, 5828), 'torch.tensor', 'torch.tensor', (['masks'], {}), '(masks)\n', (5821, 5828), False, 'import torch\n')] |
from rest_framework.exceptions import APIException
from core.exceptions import common_exception_handler
def test_common_exception_handler_if_error_without_detail(mocker):
exp = APIException({'data': 'test'})
response = common_exception_handler(exp, mocker.Mock())
assert response.data['service_name'] == 'unittest.mock.Mock:'
assert response.data['error_name'] == 'APIException'
assert response.data['detail'] == {'data': 'test'}
def test_common_exception_handler_if_error_is_string(mocker):
exp = APIException(['testing error'])
response = common_exception_handler(exp, mocker.Mock())
assert response.data['service_name'] == 'unittest.mock.Mock:'
assert response.data['error_name'] == 'APIException'
assert response.data['detail'] == ['testing error']
| [
"rest_framework.exceptions.APIException"
] | [((184, 214), 'rest_framework.exceptions.APIException', 'APIException', (["{'data': 'test'}"], {}), "({'data': 'test'})\n", (196, 214), False, 'from rest_framework.exceptions import APIException\n'), ((527, 558), 'rest_framework.exceptions.APIException', 'APIException', (["['testing error']"], {}), "(['testing error'])\n", (539, 558), False, 'from rest_framework.exceptions import APIException\n')] |
"""Models representing the data modifying payloads."""
from h.h_api.enums import DataType
from h.h_api.model.json_api import JSONAPIData
from h.h_api.schema import Schema
class UpsertBody(JSONAPIData):
data_type = None
query_fields = []
@classmethod
def create(cls, attributes, id_reference):
query = {field: attributes.pop(field, None) for field in cls.query_fields}
return super().create(
data_type=cls.data_type,
attributes=attributes,
meta={"query": query},
id_reference=id_reference,
)
@property
def query(self):
"""The query used to select which item to update."""
return self.meta["query"]
class UpsertUser(UpsertBody):
"""The data to upsert a user."""
validator = Schema.get_validator("bulk_api/command/upsert_user.json")
data_type = DataType.USER
query_fields = ["authority", "username"]
class UpsertGroup(UpsertBody):
"""The data to upsert a group."""
validator = Schema.get_validator("bulk_api/command/upsert_group.json")
data_type = DataType.GROUP
query_fields = ["authority", "authority_provided_id"]
class CreateGroupMembership(JSONAPIData):
"""The data to add a user to a group."""
validator = Schema.get_validator("bulk_api/command/create_group_membership.json")
@classmethod
def create(cls, user_ref, group_ref):
"""
Create a create group membership body for adding users to groups.
:param user_ref: Custom user reference
:param group_ref: Custom group reference
:return:
"""
return super().create(
DataType.GROUP_MEMBERSHIP,
relationships={
"member": {
"data": {"type": DataType.USER.value, "id": {"$ref": user_ref}}
},
"group": {
"data": {"type": DataType.GROUP.value, "id": {"$ref": group_ref}}
},
},
)
@property
def member(self):
"""The user which is a member of this group.
:return: A value object with `id` and `ref` properties.
"""
return _IdRef(self.relationships["member"]["data"]["id"])
@property
def group(self):
"""The group which this user is a member of.
:return: A value object with `id` and `ref` properties.
"""
return _IdRef(self.relationships["group"]["data"]["id"])
class _IdRef:
"""A value object which represents an id reference or concrete id."""
def __init__(self, value):
if isinstance(value, dict):
self.id, self.ref = None, value.get("$ref")
else:
self.id, self.ref = value, None
| [
"h.h_api.schema.Schema.get_validator"
] | [((802, 859), 'h.h_api.schema.Schema.get_validator', 'Schema.get_validator', (['"""bulk_api/command/upsert_user.json"""'], {}), "('bulk_api/command/upsert_user.json')\n", (822, 859), False, 'from h.h_api.schema import Schema\n'), ((1023, 1081), 'h.h_api.schema.Schema.get_validator', 'Schema.get_validator', (['"""bulk_api/command/upsert_group.json"""'], {}), "('bulk_api/command/upsert_group.json')\n", (1043, 1081), False, 'from h.h_api.schema import Schema\n'), ((1277, 1346), 'h.h_api.schema.Schema.get_validator', 'Schema.get_validator', (['"""bulk_api/command/create_group_membership.json"""'], {}), "('bulk_api/command/create_group_membership.json')\n", (1297, 1346), False, 'from h.h_api.schema import Schema\n')] |
#!/usr/bin/env python
"""Provides ways to join distinct graphs."""
from GArDen.transform.contraction import Minor
from sklearn.base import BaseEstimator, TransformerMixin
import networkx as nx
import logging
logger = logging.getLogger(__name__)
# ------------------------------------------------------------------------------
class Flatten(BaseEstimator, TransformerMixin):
"""DisjointUnion."""
def __init__(self):
"""Construct."""
pass
def transform(self, graphs_list):
"""transform."""
try:
for graphs in graphs_list:
for graph in graphs:
yield graph
except Exception as e:
logger.debug('Failed iteration. Reason: %s' % e)
logger.debug('Exception', exc_info=True)
# ------------------------------------------------------------------------------
class DisjointUnion(BaseEstimator, TransformerMixin):
"""DisjointUnion."""
def __init__(self):
"""Construct."""
pass
def transform(self, graphs_list):
"""transform."""
try:
for graphs in graphs_list:
transformed_graph = self._disjoint_union(graphs)
yield transformed_graph
except Exception as e:
logger.debug('Failed iteration. Reason: %s' % e)
logger.debug('Exception', exc_info=True)
def _disjoint_union(self, graphs):
# make the disjoint union of all graphs
graph_global = nx.Graph()
for graph in graphs:
graph_global = nx.disjoint_union(graph_global, graph)
return graph_global
# ------------------------------------------------------------------------------
class Union(BaseEstimator, TransformerMixin):
"""Union."""
def __init__(self, attribute='position'):
"""Construct."""
self.attribute = attribute
def transform(self, graphs_list):
"""transform."""
try:
minor = Minor()
graphs = self._union_list(graphs_list)
return minor.transform(graphs)
except Exception as e:
logger.debug('Failed iteration. Reason: %s' % e)
logger.debug('Exception', exc_info=True)
def _union_list(self, graphs_list):
for graphs in graphs_list:
transformed_graph = self._union(graphs)
yield transformed_graph
def _union(self, graphs):
graph_global = nx.Graph()
for graph in graphs:
graph_global = nx.disjoint_union(graph_global, graph)
for n in graph_global.nodes():
if self.attribute in graph_global.node[n]:
graph_global.node[n]['part_id'] = \
[graph_global.node[n][self.attribute]]
graph_global.node[n]['part_name'] = \
[graph_global.node[n]['label']]
return graph_global
| [
"networkx.disjoint_union",
"networkx.Graph",
"logging.getLogger",
"GArDen.transform.contraction.Minor"
] | [((219, 246), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (236, 246), False, 'import logging\n'), ((1502, 1512), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (1510, 1512), True, 'import networkx as nx\n'), ((2452, 2462), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (2460, 2462), True, 'import networkx as nx\n'), ((1569, 1607), 'networkx.disjoint_union', 'nx.disjoint_union', (['graph_global', 'graph'], {}), '(graph_global, graph)\n', (1586, 1607), True, 'import networkx as nx\n'), ((1987, 1994), 'GArDen.transform.contraction.Minor', 'Minor', ([], {}), '()\n', (1992, 1994), False, 'from GArDen.transform.contraction import Minor\n'), ((2519, 2557), 'networkx.disjoint_union', 'nx.disjoint_union', (['graph_global', 'graph'], {}), '(graph_global, graph)\n', (2536, 2557), True, 'import networkx as nx\n')] |
from SentimentAnalysis.creat_data.config import tencent
import pandas as pd
import numpy as np
import requests
import json
import time
import random
import hashlib
from urllib import parse
from collections import OrderedDict
AppID = tencent['account']['id_1']['APP_ID']
AppKey = tencent['account']['id_1']['AppKey']
def cal_sign(params_raw,AppKey=AppKey):
# 官方文档例子为php,给出python版本
# params_raw = {'app_id': '10000',
# 'time_stamp': '1493449657',
# 'nonce_str': '20e3408a79',
# 'key1': '腾讯AI开放平台',
# 'key2': '示例仅供参考',
# 'sign': ''}
# AppKey = '<KEY>'
# cal_sign(params_raw=params_raw,
# AppKey=AppKey)
# 返回:BE918C28827E0783D1E5F8E6D7C37A61
params = OrderedDict()
for i in sorted(params_raw):
if params_raw[i] != '':
params[i] = params_raw[i]
newurl = parse.urlencode(params)
newurl += ('&app_key=' + AppKey)
sign = hashlib.md5(newurl.encode("latin1")).hexdigest().upper()
return sign
def creat_label(texts,
AppID=AppID,
AppKey=AppKey):
'''
:param texts: 需要打标签的文档列表
:param AppID: 腾讯ai账号信息,默认调用配置文件id_1
:param AppKey: 腾讯ai账号信息,默认调用配置文件id_1
:return: 打好标签的列表,包括原始文档、标签、置信水平、是否成功
'''
url = tencent['api']['nlp_textpolar']['url']
results = []
# 逐句调用接口判断
count_i=0
for one_text in texts:
params = {'app_id': AppID,
'time_stamp': int(time.time()),
'nonce_str': ''.join([random.choice('1234567890abcdefghijklmnopqrstuvwxyz') for i in range(10)]),
'sign': '',
'text': one_text}
params['sign'] = cal_sign(params_raw=params,
AppKey=AppKey) # 获取sign
r = requests.post(url=url,
params=params) # 获取分析结果
result = json.loads(r.text)
# print(result)
results.append([one_text,
result['data']['polar'],
result['data']['confd'],
result['ret'],
result['msg']
])
r.close()
count_i += 1
if count_i % 50 == 0:
print('tencent finish:%d' % (count_i))
return results
if __name__ == '__main__':
results = creat_label(texts=['价格便宜啦,比原来优惠多了',
'壁挂效果差,果然一分价钱一分货',
'东西一般般,诶呀',
'讨厌你',
'一般'])
results = pd.DataFrame(results, columns=['evaluation',
'label',
'confidence',
'ret',
'msg'])
results['label'] = np.where(results['label'] == 1, '正面',
np.where(results['label'] == 0, '中性', '负面'))
print(results)
| [
"pandas.DataFrame",
"json.loads",
"urllib.parse.urlencode",
"random.choice",
"time.time",
"numpy.where",
"collections.OrderedDict",
"requests.post"
] | [((776, 789), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (787, 789), False, 'from collections import OrderedDict\n'), ((906, 929), 'urllib.parse.urlencode', 'parse.urlencode', (['params'], {}), '(params)\n', (921, 929), False, 'from urllib import parse\n'), ((2557, 2643), 'pandas.DataFrame', 'pd.DataFrame', (['results'], {'columns': "['evaluation', 'label', 'confidence', 'ret', 'msg']"}), "(results, columns=['evaluation', 'label', 'confidence', 'ret',\n 'msg'])\n", (2569, 2643), True, 'import pandas as pd\n'), ((1818, 1855), 'requests.post', 'requests.post', ([], {'url': 'url', 'params': 'params'}), '(url=url, params=params)\n', (1831, 1855), False, 'import requests\n'), ((1909, 1927), 'json.loads', 'json.loads', (['r.text'], {}), '(r.text)\n', (1919, 1927), False, 'import json\n'), ((2913, 2956), 'numpy.where', 'np.where', (["(results['label'] == 0)", '"""中性"""', '"""负面"""'], {}), "(results['label'] == 0, '中性', '负面')\n", (2921, 2956), True, 'import numpy as np\n'), ((1498, 1509), 'time.time', 'time.time', ([], {}), '()\n', (1507, 1509), False, 'import time\n'), ((1552, 1605), 'random.choice', 'random.choice', (['"""1234567890abcdefghijklmnopqrstuvwxyz"""'], {}), "('1234567890abcdefghijklmnopqrstuvwxyz')\n", (1565, 1605), False, 'import random\n')] |
# -*- coding: utf-8 -*-
'''
#-------------------------------------------------------------------------------
# NATIONAL UNIVERSITY OF SINGAPORE - NUS
# SINGAPORE INSTITUTE FOR NEUROTECHNOLOGY - SINAPSE
# Singapore
# URL: http://www.sinapseinstitute.org
#-------------------------------------------------------------------------------
# Neuromorphic Engineering Group
# Author: <NAME>, PhD
# Contact:
#-------------------------------------------------------------------------------
# Description: defines classes for processing tactile data to be used for
# braille recognition.
# The 'Braille' class stores the SVM model used to recognize braille characters.
# this class abstracts the process of data processing, meaning that it only deals
# with the data ready for training and/or classification procedures.
# For handling data, the class 'BrailleHandler' should be used instead
#-------------------------------------------------------------------------------
'''
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
#LIBRARIES
import os, os.path, sys
sys.path.append('../general')
import numpy as np
import scipy as sp
from sklearn.svm import SVC
from sklearn.externals import joblib
from dataprocessing import * #import the detect_peaks method
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
#Feature extraction for SVM-based braille classification
class BrailleHandler():
#---------------------------------------------------------------------------
#read a file and return the data
def loadFile(filepath):
if os.path.isfile(filepath):
#return the data contained in the data
return np.loadtxt(filepath)
else:
return False #file not found
def convert2vector(data):
return np.transpose(data)
#convert the data from a file into a vector
def oldconvert2vector(data,nrows,ncols):
#first convert to 3D matrix
datamat = BrailleHandler.oldconvert2frames(data,nrows,ncols)
numsamples = np.size(datamat,2) #number of samples or frames
dataVector = np.zeros((nrows*ncols,numsamples))
taxelCounter = 0
for i in range(nrows):
for j in range(ncols):
dataVector[taxelCounter] = datamat[i,j,:]
taxelCounter+=1
return dataVector #return the dataVector
#convert data from the file that are arranged
#in a 2D array (every line contains reading from all rows for one column)
#into a 3D array (row,col,frame)
def oldconvert2frames(data,nrows,ncols):
datamat = np.zeros((nrows,ncols,np.int(np.floor(np.divide(np.size(data,0),nrows)))),dtype=int)
c = 0
for ii in range(0,(np.size(data,0)-nrows),nrows):
datamat[:,:,c] = data[ii:ii+nrows,:]
c = c+1
return datamat #return the 3D matrix
#---------------------------------------------------------------------------
#find the number of peaks in every single taxel
def countPeaks(inputMatrix,threshold):
if len(inputMatrix.shape) == 3: #3D matrix
nrows = inputMatrix.shape[0] #number of rows
ncols = inputMatrix.shape[1] #number of columns
nsamples = inputMatrix.shape[2] #number of samples
#feature vector containing the number of peaks for
#each taxel of the tactile sensor
featureVector = np.zeros(nrows*ncols)
#matrix M*NxT where each row corresponds to a taxel and the
#columns to the time series signal
tactileSignal = np.zeros((nrows*ncols,nsamples))
#counter for the index of the tactileSignal matrix
counter = 0
#loop through the rows
for k in range(nrows):
#loop through the columns
for w in range(ncols):
#get a single taxel signal
tactileSignal[counter] = inputMatrix[k,w,:]
#count the number of peaks in the signal
#and built the feature vector
#find the peaks
tmppeaks = detect_peaks(tactileSignal[counter],mph=threshold,mpd=20,show=False)
#number of peaks is the length of 'tmppeaks'
featureVector[counter] = len(tmppeaks)
#increment the counter
counter+=1
#list of list, every element of the list corresponds to
#the time series of a single taxel
else:
#find the total number of taxels in the tactile array
numberTaxels = len(inputMatrix)
#feature vector containing the number of peaks for
#each taxel of the tactile sensor
featureVector = np.zeros(numberTaxels)
#scan all the taxels
for k in range(numberTaxels):
#find the peaks
tmppeaks = detect_peaks(inputMatrix[k],mph=threshold,mpd=20,show=False)
#number of peaks is the length of 'tmppeaks'
featureVector[k] = len(tmppeaks)
#return the feature vector
return featureVector
#-------------------------------------------------------------------------------
#create the training data based on the list of the text files to be loaded
#and the labels corresponding for each text data
def createTrainingData(dataFiles,nrows,ncols,filt=False):
for k in range(len(dataFiles)):
#get the filename
filename = dataFiles[k]
#load the data
datafile = BrailleHandler.loadFile(filename)
#convert to vector
#datavector = BrailleHandler.oldconvert2vector(datafile,nrows,ncols)
datavector = BrailleHandler.convert2vector(datafile)
#if data should be filtered
if filt == True:
#for every taxel
for i in range(np.size(datavector,0)):
mva = MovingAverage() #window size = 10, sampfreq = 100 Hz
#for every sample, get the moving average response
for z in range(np.size(datavector,1)):
datavector[i,z] = mva.getSample(datavector[i,z])
#find the number of peaks
peakTh = 0.05 #threshold for peak detection
#create the feature vector
featurevector = BrailleHandler.countPeaks(datavector,peakTh)
#if it is the first iteration, create the training data
if k != 0:
trainingData = np.vstack((trainingData,featurevector))
else:
trainingData = featurevector
return trainingData
#-------------------------------------------------------------------------------
#Braille Recognition Class
class Braille():
def __init__(self):
#labels for every class
#dictionary to associate label names and values
self.classes = dict()
#SVM model
self.modelSVM = None
#---------------------------------------------------------------------------
#---------------------------------------------------------------------------
#load a pre-trained SVM model from a file
def load(self,filepath):
#checks if the file exists
if os.path.isfile(filepath):
self.modelSVM = joblib.load(filepath) #loads the SVM model
return True #load ok
else:
return False #file not found
#---------------------------------------------------------------------------
#---------------------------------------------------------------------------
#save a new SVM model
def save(self,filename):
#saving
joblib.dump(self.modelSVM,filename+'.pkl')
#---------------------------------------------------------------------------
#---------------------------------------------------------------------------
#train a SVM model
def train(self,trainingData,labels):
#create a new SVM model
self.modelSVM = SVC()
#pass the training data and the labels for training
self.modelSVM.fit(trainingData,labels)
#---------------------------------------------------------------------------
#---------------------------------------------------------------------------
#classification
#features should be a feature vector following the same pattern
#that was used for training
def classify(self,features):
#check if there is a SVM model to classify the data
if self.modelSVM is not None:
#classify based on the input features
svmResp = self.modelSVM.predict(features)
#return the output of the classifier
return svmResp
else:
return False
#---------------------------------------------------------------------------
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
if __name__=='__main__':
#---------------------------------------------------------------------------
import numpy as np #numpy
import matplotlib.pyplot as plt #matplotlib
NROWS = 4 #number of columns in the tactile array
NCOLS = 4 #number of lines in the tactile array
peakTh = 300 #threshold for detecting peaks
#load the braille data from file
#2D matrix
datafile = np.loadtxt('NewData_BRC/BRC_B1.txt')
#convert data to a 3D matrix
tactileData = BrailleHandler.oldconvert2frames(datafile,NROWS,NCOLS)
#feature vector containing the number of peaks for each taxel
features = BrailleHandler.countPeaks(tactileData,peakTh)
#---------------------------------------------------------------------------
#feature extraction with 2D array
#moving average of the 2D matrix
#create a moving average object
#default parameters, windowsize = 10, sampfreq = 100 Hz
mva = MovingAverage()
tactileVector = BrailleHandler.oldconvert2vector(datafile,NROWS,NCOLS)
numsamples = np.size(tactileData,2) #total number of samples
tactileMVA = np.zeros((NROWS*NCOLS,numsamples))
counter = 0 #taxel counter
for k in range(NROWS*NCOLS): #scan all the columns
for z in range(numsamples): #filtering the signal sample by sample
tactileMVA[counter,z] = mva.getSample(tactileVector[k,z])
counter+=1 #increment the taxel counter
#with the filtered data, count peaks again
filtFeatures = BrailleHandler.countPeaks(tactileMVA,peakTh)
#print the filtered feature vector
print(filtFeatures) | [
"sys.path.append",
"numpy.size",
"sklearn.externals.joblib.dump",
"numpy.zeros",
"numpy.transpose",
"os.path.isfile",
"sklearn.externals.joblib.load",
"numpy.loadtxt",
"sklearn.svm.SVC",
"numpy.vstack"
] | [((1187, 1216), 'sys.path.append', 'sys.path.append', (['"""../general"""'], {}), "('../general')\n", (1202, 1216), False, 'import os, os.path, sys\n'), ((9841, 9877), 'numpy.loadtxt', 'np.loadtxt', (['"""NewData_BRC/BRC_B1.txt"""'], {}), "('NewData_BRC/BRC_B1.txt')\n", (9851, 9877), True, 'import numpy as np\n'), ((10493, 10516), 'numpy.size', 'np.size', (['tactileData', '(2)'], {}), '(tactileData, 2)\n', (10500, 10516), True, 'import numpy as np\n'), ((10559, 10596), 'numpy.zeros', 'np.zeros', (['(NROWS * NCOLS, numsamples)'], {}), '((NROWS * NCOLS, numsamples))\n', (10567, 10596), True, 'import numpy as np\n'), ((1794, 1818), 'os.path.isfile', 'os.path.isfile', (['filepath'], {}), '(filepath)\n', (1808, 1818), False, 'import os, os.path, sys\n'), ((2019, 2037), 'numpy.transpose', 'np.transpose', (['data'], {}), '(data)\n', (2031, 2037), True, 'import numpy as np\n'), ((2264, 2283), 'numpy.size', 'np.size', (['datamat', '(2)'], {}), '(datamat, 2)\n', (2271, 2283), True, 'import numpy as np\n'), ((2334, 2371), 'numpy.zeros', 'np.zeros', (['(nrows * ncols, numsamples)'], {}), '((nrows * ncols, numsamples))\n', (2342, 2371), True, 'import numpy as np\n'), ((7652, 7676), 'os.path.isfile', 'os.path.isfile', (['filepath'], {}), '(filepath)\n', (7666, 7676), False, 'import os, os.path, sys\n'), ((8088, 8133), 'sklearn.externals.joblib.dump', 'joblib.dump', (['self.modelSVM', "(filename + '.pkl')"], {}), "(self.modelSVM, filename + '.pkl')\n", (8099, 8133), False, 'from sklearn.externals import joblib\n'), ((8419, 8424), 'sklearn.svm.SVC', 'SVC', ([], {}), '()\n', (8422, 8424), False, 'from sklearn.svm import SVC\n'), ((1892, 1912), 'numpy.loadtxt', 'np.loadtxt', (['filepath'], {}), '(filepath)\n', (1902, 1912), True, 'import numpy as np\n'), ((3670, 3693), 'numpy.zeros', 'np.zeros', (['(nrows * ncols)'], {}), '(nrows * ncols)\n', (3678, 3693), True, 'import numpy as np\n'), ((3842, 3877), 'numpy.zeros', 'np.zeros', (['(nrows * ncols, nsamples)'], {}), '((nrows * ncols, nsamples))\n', (3850, 3877), True, 'import numpy as np\n'), ((5061, 5083), 'numpy.zeros', 'np.zeros', (['numberTaxels'], {}), '(numberTaxels)\n', (5069, 5083), True, 'import numpy as np\n'), ((7707, 7728), 'sklearn.externals.joblib.load', 'joblib.load', (['filepath'], {}), '(filepath)\n', (7718, 7728), False, 'from sklearn.externals import joblib\n'), ((2968, 2984), 'numpy.size', 'np.size', (['data', '(0)'], {}), '(data, 0)\n', (2975, 2984), True, 'import numpy as np\n'), ((6905, 6945), 'numpy.vstack', 'np.vstack', (['(trainingData, featurevector)'], {}), '((trainingData, featurevector))\n', (6914, 6945), True, 'import numpy as np\n'), ((6260, 6282), 'numpy.size', 'np.size', (['datavector', '(0)'], {}), '(datavector, 0)\n', (6267, 6282), True, 'import numpy as np\n'), ((6472, 6494), 'numpy.size', 'np.size', (['datavector', '(1)'], {}), '(datavector, 1)\n', (6479, 6494), True, 'import numpy as np\n'), ((2888, 2904), 'numpy.size', 'np.size', (['data', '(0)'], {}), '(data, 0)\n', (2895, 2904), True, 'import numpy as np\n')] |
from algorithm import Algorithm
from tkinter import *
from tkinter import ttk
class Gradient_Boosting_Classifier(Algorithm):
def __init__(self, frame):
self.frame = frame
self.name = "Gradient Boosting Classifier"
#Options for the loss criteria.
self.Loss_Label = ttk.Label(frame, text="Loss Function:")
self.Loss = StringVar()
self.Loss.set('deviance')
self.Loss_Deviance = ttk.Radiobutton(frame, text='Deviance', variable=self.Loss, value='deviance')
self.Loss_Exponential = ttk.Radiobutton(frame, text='Exponential', variable=self.Loss, value='exponential')
#Options for the learning rate.
self.LearningRate_Label = ttk.Label(frame, text="Learning Rate:")
self.LearningRate = StringVar()
self.LearningRate.set('0.1')
self.LearningRate_Box = Spinbox(frame, textvariable=self.LearningRate, from_=0.0, to=1.0, increment=0.01, width=5)
#Options for the number of boosting stages.
self.Estimators_Label = ttk.Label(frame, text='# of Stages:')
self.Estimators = StringVar()
self.Estimators.set('100')
self.Estimators_Box = ttk.Entry(frame, textvariable=self.Estimators, width=7)
#Options for the max depth
self.MaxDepth_Label = ttk.Label(frame, text='Max Depth:')
self.MaxDepth = StringVar()
self.MaxDepth.set('0')
self.MaxDepth_Box = ttk.Entry(frame, textvariable=self.MaxDepth, width=7)
#Options for the minimum number of samples before an internal node is split.
self.MinSamplesSplit_Label = ttk.Label(frame, text='Min Samples to Split:')
self.MinSamplesSplit = StringVar()
self.MinSamplesSplit.set('2')
self.MinSamplesSplit_Box = ttk.Entry(frame, textvariable=self.MinSamplesSplit, width=7)
#Options for the minimum number of leaf nodes
self.MinSamplesLeaf_Label = ttk.Label(frame, text='Min # of Leaf Nodes:')
self.MinSamplesLeaf = StringVar()
self.MinSamplesLeaf.set('1')
self.MinSamplesLeaf_Box = ttk.Entry(frame, textvariable=self.MinSamplesLeaf, width=7)
#Options for the minimum fraction of leaf nodes
self.MinFractionLeaf_Label = ttk.Label(frame, text='Min % of Leaf Nodes:')
self.MinFractionLeaf = StringVar()
self.MinFractionLeaf.set('0.0')
self.MinFractionLeaf_Box = ttk.Entry(frame, textvariable=self.MinFractionLeaf, width=7)
#Options for batch size
self.Subsample_Label = ttk.Label(frame, text='Batch Size:')
self.Subsample = StringVar()
self.Subsample.set('1.0')
self.Subsample_Box = Spinbox(frame, from_=0.0, to=1.0, increment=0.01, textvariable=self.Subsample, width=5)
#Options for max features.
self.MaxFeatures_Label = ttk.Label(frame, text='Max Features:')
self.MaxFeatures = StringVar()
self.MaxFeatures.set('none')
self.MaxFeatures_Integer = StringVar()
self.MaxFeatures_Float = StringVar()
self.MaxFeatures_Float.set('0.1')
self.MaxFeatures_None = ttk.Radiobutton(frame, text='None', variable=self.MaxFeatures, value='none')
self.MaxFeatures_Integer_Button = ttk.Radiobutton(frame, text='Number:', variable=self.MaxFeatures, value='integer')
self.MaxFeatures_Integer_Box = ttk.Entry(frame, textvariable=self.MaxFeatures_Integer, width=7)
self.MaxFeatures_Float_Button = ttk.Radiobutton(frame, text='Percentage:', variable=self.MaxFeatures, value='float')
self.MaxFeatures_Float_Box = Spinbox(frame, from_=0.0, to=1.0, textvariable=self.MaxFeatures_Float, width=5, increment=0.01)
self.MaxFeatures_Auto = ttk.Radiobutton(frame, text='Auto', variable=self.MaxFeatures, value='auto')
self.MaxFeatures_Log2 = ttk.Radiobutton(frame, text='Log2', variable=self.MaxFeatures, value='log2')
#Options for the max # of leaf nodes
self.MaxLeafNodes_Label = ttk.Label(frame, text='Max Leaf Nodes:')
self.MaxLeafNodes = StringVar()
self.MaxLeafNodes.set('none')
self.MaxLeafNodes_None = ttk.Radiobutton(frame, text='None', variable=self.MaxLeafNodes, value='none')
self.MaxLeafNodes_Integer = StringVar()
self.MaxLeafNodes_Integer.set('0')
self.MaxLeafNodes_Integer_Button = ttk.Radiobutton(frame, text='Number:', variable=self.MaxLeafNodes, value='integer')
self.MaxLeafNodes_Integer_Box = ttk.Entry(frame, textvariable=self.MaxLeafNodes_Integer, width=7)
#Options for verbosity
self.Verbose_Label = ttk.Label(frame, text='Verbose Level:')
self.Verbose = StringVar()
self.Verbose.set('0')
self.Verbose_Box = ttk.Entry(frame, textvariable=self.Verbose, width=7)
def Display_Options(self): #Display options for the Decision Tree Classifier.
self.clear_frame(self.frame)
#Insert the options into the frame.
self.Loss_Label.grid(column=0,row=0, sticky=(W))
self.Loss_Deviance.grid(column=1, row=0, sticky=(W))
self.Loss_Exponential.grid(column=2, row=0, sticky=(W))
self.LearningRate_Label.grid(column=0, row=1, sticky=(W))
self.LearningRate_Box.grid(column=1, row=1, sticky=(W))
self.Estimators_Label.grid(column=0, row=2, sticky=(W))
self.Estimators_Box.grid(column=1, row=2, sticky=(W))
self.MaxDepth_Label.grid(column=0, row=3, sticky=(W))
self.MaxDepth_Box.grid(column=1, row=3, sticky=(W))
self.MinSamplesSplit_Label.grid(column=0, columnspan=2, row=4, sticky=(W))
self.MinSamplesSplit_Box.grid(column=2, row=4, sticky=(W))
self.MinSamplesLeaf_Label.grid(column=0, columnspan=2, row=5, sticky=(W))
self.MinSamplesLeaf_Box.grid(column=2, row=5, sticky=(W))
self.MinFractionLeaf_Label.grid(column=0, columnspan=2, row=6, sticky=(W))
self.MinFractionLeaf_Box.grid(column=2, row=6, sticky=(W))
self.Subsample_Label.grid(column=0, row=7, sticky=(W))
self.Subsample_Box.grid(column=1, row=7, sticky=(W))
self.MaxFeatures_Label.grid(column=0, row=8, sticky=(W))
self.MaxFeatures_None.grid(column=0, row=9, sticky=(W))
self.MaxFeatures_Integer_Button.grid(column=0, row=10, sticky=(W))
self.MaxFeatures_Integer_Box.grid(column=1, row=10, sticky=(W))
self.MaxFeatures_Float_Button.grid(column=0, row=11, sticky=(W))
self.MaxFeatures_Float_Box.grid(column=1, row=11, sticky=(W))
self.MaxFeatures_Auto.grid(column=0, row=12, sticky=(W))
self.MaxFeatures_Log2.grid(column=0, row=12, sticky=(W))
self.MaxLeafNodes_Label.grid(column=0, row=13, sticky=(W))
self.MaxLeafNodes_None.grid(column=0, row=14, sticky=(W))
self.MaxLeafNodes_Integer_Button.grid(column=0, row=15, sticky=(W))
self.MaxLeafNodes_Integer_Box.grid(column=1, row=15, sticky=(W))
self.Verbose_Label.grid(column=0, row=16, sticky=(W))
self.Verbose_Box.grid(column=1, row=16, sticky=(W))
| [
"tkinter.ttk.Label",
"tkinter.ttk.Radiobutton",
"tkinter.ttk.Entry"
] | [((289, 328), 'tkinter.ttk.Label', 'ttk.Label', (['frame'], {'text': '"""Loss Function:"""'}), "(frame, text='Loss Function:')\n", (298, 328), False, 'from tkinter import ttk\n'), ((409, 486), 'tkinter.ttk.Radiobutton', 'ttk.Radiobutton', (['frame'], {'text': '"""Deviance"""', 'variable': 'self.Loss', 'value': '"""deviance"""'}), "(frame, text='Deviance', variable=self.Loss, value='deviance')\n", (424, 486), False, 'from tkinter import ttk\n'), ((514, 602), 'tkinter.ttk.Radiobutton', 'ttk.Radiobutton', (['frame'], {'text': '"""Exponential"""', 'variable': 'self.Loss', 'value': '"""exponential"""'}), "(frame, text='Exponential', variable=self.Loss, value=\n 'exponential')\n", (529, 602), False, 'from tkinter import ttk\n'), ((666, 705), 'tkinter.ttk.Label', 'ttk.Label', (['frame'], {'text': '"""Learning Rate:"""'}), "(frame, text='Learning Rate:')\n", (675, 705), False, 'from tkinter import ttk\n'), ((969, 1006), 'tkinter.ttk.Label', 'ttk.Label', (['frame'], {'text': '"""# of Stages:"""'}), "(frame, text='# of Stages:')\n", (978, 1006), False, 'from tkinter import ttk\n'), ((1095, 1150), 'tkinter.ttk.Entry', 'ttk.Entry', (['frame'], {'textvariable': 'self.Estimators', 'width': '(7)'}), '(frame, textvariable=self.Estimators, width=7)\n', (1104, 1150), False, 'from tkinter import ttk\n'), ((1210, 1245), 'tkinter.ttk.Label', 'ttk.Label', (['frame'], {'text': '"""Max Depth:"""'}), "(frame, text='Max Depth:')\n", (1219, 1245), False, 'from tkinter import ttk\n'), ((1326, 1379), 'tkinter.ttk.Entry', 'ttk.Entry', (['frame'], {'textvariable': 'self.MaxDepth', 'width': '(7)'}), '(frame, textvariable=self.MaxDepth, width=7)\n', (1335, 1379), False, 'from tkinter import ttk\n'), ((1496, 1542), 'tkinter.ttk.Label', 'ttk.Label', (['frame'], {'text': '"""Min Samples to Split:"""'}), "(frame, text='Min Samples to Split:')\n", (1505, 1542), False, 'from tkinter import ttk\n'), ((1644, 1704), 'tkinter.ttk.Entry', 'ttk.Entry', (['frame'], {'textvariable': 'self.MinSamplesSplit', 'width': '(7)'}), '(frame, textvariable=self.MinSamplesSplit, width=7)\n', (1653, 1704), False, 'from tkinter import ttk\n'), ((1789, 1834), 'tkinter.ttk.Label', 'ttk.Label', (['frame'], {'text': '"""Min # of Leaf Nodes:"""'}), "(frame, text='Min # of Leaf Nodes:')\n", (1798, 1834), False, 'from tkinter import ttk\n'), ((1933, 1992), 'tkinter.ttk.Entry', 'ttk.Entry', (['frame'], {'textvariable': 'self.MinSamplesLeaf', 'width': '(7)'}), '(frame, textvariable=self.MinSamplesLeaf, width=7)\n', (1942, 1992), False, 'from tkinter import ttk\n'), ((2080, 2125), 'tkinter.ttk.Label', 'ttk.Label', (['frame'], {'text': '"""Min % of Leaf Nodes:"""'}), "(frame, text='Min % of Leaf Nodes:')\n", (2089, 2125), False, 'from tkinter import ttk\n'), ((2229, 2289), 'tkinter.ttk.Entry', 'ttk.Entry', (['frame'], {'textvariable': 'self.MinFractionLeaf', 'width': '(7)'}), '(frame, textvariable=self.MinFractionLeaf, width=7)\n', (2238, 2289), False, 'from tkinter import ttk\n'), ((2347, 2383), 'tkinter.ttk.Label', 'ttk.Label', (['frame'], {'text': '"""Batch Size:"""'}), "(frame, text='Batch Size:')\n", (2356, 2383), False, 'from tkinter import ttk\n'), ((2619, 2657), 'tkinter.ttk.Label', 'ttk.Label', (['frame'], {'text': '"""Max Features:"""'}), "(frame, text='Max Features:')\n", (2628, 2657), False, 'from tkinter import ttk\n'), ((2870, 2946), 'tkinter.ttk.Radiobutton', 'ttk.Radiobutton', (['frame'], {'text': '"""None"""', 'variable': 'self.MaxFeatures', 'value': '"""none"""'}), "(frame, text='None', variable=self.MaxFeatures, value='none')\n", (2885, 2946), False, 'from tkinter import ttk\n'), ((2984, 3071), 'tkinter.ttk.Radiobutton', 'ttk.Radiobutton', (['frame'], {'text': '"""Number:"""', 'variable': 'self.MaxFeatures', 'value': '"""integer"""'}), "(frame, text='Number:', variable=self.MaxFeatures, value=\n 'integer')\n", (2999, 3071), False, 'from tkinter import ttk\n'), ((3101, 3165), 'tkinter.ttk.Entry', 'ttk.Entry', (['frame'], {'textvariable': 'self.MaxFeatures_Integer', 'width': '(7)'}), '(frame, textvariable=self.MaxFeatures_Integer, width=7)\n', (3110, 3165), False, 'from tkinter import ttk\n'), ((3201, 3290), 'tkinter.ttk.Radiobutton', 'ttk.Radiobutton', (['frame'], {'text': '"""Percentage:"""', 'variable': 'self.MaxFeatures', 'value': '"""float"""'}), "(frame, text='Percentage:', variable=self.MaxFeatures, value\n ='float')\n", (3216, 3290), False, 'from tkinter import ttk\n'), ((3441, 3517), 'tkinter.ttk.Radiobutton', 'ttk.Radiobutton', (['frame'], {'text': '"""Auto"""', 'variable': 'self.MaxFeatures', 'value': '"""auto"""'}), "(frame, text='Auto', variable=self.MaxFeatures, value='auto')\n", (3456, 3517), False, 'from tkinter import ttk\n'), ((3545, 3621), 'tkinter.ttk.Radiobutton', 'ttk.Radiobutton', (['frame'], {'text': '"""Log2"""', 'variable': 'self.MaxFeatures', 'value': '"""log2"""'}), "(frame, text='Log2', variable=self.MaxFeatures, value='log2')\n", (3560, 3621), False, 'from tkinter import ttk\n'), ((3695, 3735), 'tkinter.ttk.Label', 'ttk.Label', (['frame'], {'text': '"""Max Leaf Nodes:"""'}), "(frame, text='Max Leaf Nodes:')\n", (3704, 3735), False, 'from tkinter import ttk\n'), ((3832, 3909), 'tkinter.ttk.Radiobutton', 'ttk.Radiobutton', (['frame'], {'text': '"""None"""', 'variable': 'self.MaxLeafNodes', 'value': '"""none"""'}), "(frame, text='None', variable=self.MaxLeafNodes, value='none')\n", (3847, 3909), False, 'from tkinter import ttk\n'), ((4029, 4117), 'tkinter.ttk.Radiobutton', 'ttk.Radiobutton', (['frame'], {'text': '"""Number:"""', 'variable': 'self.MaxLeafNodes', 'value': '"""integer"""'}), "(frame, text='Number:', variable=self.MaxLeafNodes, value=\n 'integer')\n", (4044, 4117), False, 'from tkinter import ttk\n'), ((4148, 4213), 'tkinter.ttk.Entry', 'ttk.Entry', (['frame'], {'textvariable': 'self.MaxLeafNodes_Integer', 'width': '(7)'}), '(frame, textvariable=self.MaxLeafNodes_Integer, width=7)\n', (4157, 4213), False, 'from tkinter import ttk\n'), ((4268, 4307), 'tkinter.ttk.Label', 'ttk.Label', (['frame'], {'text': '"""Verbose Level:"""'}), "(frame, text='Verbose Level:')\n", (4277, 4307), False, 'from tkinter import ttk\n'), ((4385, 4437), 'tkinter.ttk.Entry', 'ttk.Entry', (['frame'], {'textvariable': 'self.Verbose', 'width': '(7)'}), '(frame, textvariable=self.Verbose, width=7)\n', (4394, 4437), False, 'from tkinter import ttk\n')] |
import pdb
import json
import numpy as np
file = 'benchmark_data.json'
with open(file, 'r') as f:
json_data = json.load(f)
print(json_data.keys()) # ['domains', 'version']
domains = json_data['domains']
print('domain length', len(domains))
corr_data = []
for domain in domains:
temp = {}
temp['long_description'] = domain['description']
temp['short_description'] = domain['name']
intents = domain['intents']
print('intent length', len(intents))
for intent in intents:
temp['intent'] = intent['name']
queries = intent['queries']
print('query length', len(queries))
for query in queries:
temp['query'] = query['text']
corr_data.append(temp)
print(len(corr_data))
corr_data = np.array(corr_data)
np.save('benchmark_data.npy', corr_data)
"""
(Pdb) json_data['domains'][3]['intents'][0].keys()
dict_keys(['description', 'benchmark', 'queries', 'slots', '@type', 'name'])
len(json_data['domains'][3]['intents'][0]['description'])
json_data['domains'][3]['intents'][0]['queries']
# length
(Pdb) json_data['domains'][3]['intents'][0]['queries'][0].keys()
dict_keys(['text', 'results_per_service'])
json_data['domains'][3]['intents'][0]['queries'][0]['text']
print(domains.keys()) # ['description', '@type', 'intents', 'name']
"Queries that are related to places (restaurants, shops, concert halls, etc), as well as to the user's location."
'Queries that are related to reservation.'
'Queries that are related to transit and navigation.'
'Queries that relate to weather.'
(Pdb) json_data['domains'][3]['name']
'weather'
(Pdb) json_data['domains'][2]['name']
'transit'
(Pdb) json_data['domains'][1]['name']
'reservation'
(Pdb) json_data['domains'][0]['name']
'places'
print(len(domains)) # 4
(Pdb) len(json_data['domains'][0]['intents'])
4
(Pdb) len(json_data['domains'][1]['intents'])
2
(Pdb) len(json_data['domains'][2]['intents'])
3
(Pdb) len(json_data['domains'][3]['intents'])
1
"""
| [
"numpy.save",
"numpy.array",
"json.load"
] | [((719, 738), 'numpy.array', 'np.array', (['corr_data'], {}), '(corr_data)\n', (727, 738), True, 'import numpy as np\n'), ((739, 779), 'numpy.save', 'np.save', (['"""benchmark_data.npy"""', 'corr_data'], {}), "('benchmark_data.npy', corr_data)\n", (746, 779), True, 'import numpy as np\n'), ((113, 125), 'json.load', 'json.load', (['f'], {}), '(f)\n', (122, 125), False, 'import json\n')] |
# -*- coding: utf-8 -*-
# !/usr/bin/env python
#
# @file __init__.py
# @brief G_MMPBSA DASK PROJECT
# @author <NAME>
#
# <!--------------------------------------------------------------------------
#
# Copyright (c) 2016-2019,<NAME>.
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the molmolpy Developers nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ------------------------------------------------------------------------ -->
import itertools
import time
color_iter = itertools.cycle(['navy', 'c', 'cornflowerblue', 'gold',
'darkorange'])
import multiprocessing
import mdtraj as md
from molmolpy.utils.cluster_quality import *
from molmolpy.utils import folder_utils
import json
from molmolpy.utils import helper as hlp
# matplotlib.style.use('ggplot')
sns.set(style="darkgrid")
low_seed = 1
high_seed = 999999999
mgltools_utilities = '/home/john1990/MGLTools-1.5.6/MGLToolsPckgs/AutoDockTools/Utilities24'
class GMMPBSAObject(object):
"""
Usage example
>>> EPI_folder = '/media/Work/MEGA/Programming/StressHormones/dock_EPI'
>>> EPI_samples = '/media/Work/MEGA/Programming/StressHormones/'
>>>
>>>
>>> receptor_file = EPI_folder + os.sep + 'centroid_model_clust2.pdbqt'
>>> ligand_file = EPI_folder + os.sep + 'EPI.pdbqt'
>>> molname = 'EPI'
>>> receptor_name = 'LasR'
>>> run_type = 'vina_sample'
>>>
>>>
>>>
>>> receptor_file = EPI_folder + os.sep + 'centroid.pdb'
>>> ligand_file = EPI_folder + os.sep + 'EPI.pdb'
>>> molname = 'EPI'
>>> receptor_name = 'LasR'
>>>
>>>
>>> EPI_uber_dock = uber_docker.UberDockerObject(receptor_file, ligand_file, '.', molname=molname, receptor_name=receptor_name)
>>>
>>>
>>> EPI_uber_dock.prepare_uber_dock_protocol()
>>> EPI_uber_dock.run_uber_dock_protocol()
Use together
>>> self.prepare_uber_dock_protocol() for preparation
>>> self.run_uber_dock_protocol()
or seperately
>>> EPI_uber_dock.calculate_max_radius_from_com()
>>> EPI_uber_dock.calculate_cube_edges()
>>> EPI_uber_dock.calculate_box_edges_from_com()
>>>
>>>
>>> EPI_uber_dock.prepare_uber_docker()
>>>
>>>
>>> #This is for rDock, and it works so comment this part for a while
>>> EPI_uber_dock.prepare_rdock_settings()
>>> EPI_uber_dock.generate_rdock_cavity()
>>> # Prepare and run Dock programs
>>> EPI_uber_dock.prep_rDock_dock_run_commands()
>>> EPI_uber_dock.run_rDock_simulation(parallel=True, waitTime=15)
>>>
>>> #This is for FlexAid
>>> EPI_uber_dock.prepare_flexaid_settings()
>>> EPI_uber_dock.process_flexaid_ligand()
>>> EPI_uber_dock.get_flexaid_clefts()
>>> EPI_uber_dock.flexaid_generate_ga_dat_parameters()
>>> EPI_uber_dock.flexaid_generate_config_input()
>>> EPI_uber_dock.prep_FlexAid_dock_run_commands()
>>> EPI_uber_dock.run_FlexAid_simulation(parallel=True, waitTime=15)
>>>
>>>
>>> # This is for Autodock vina
>>> EPI_uber_dock.set_up_Vina_Box()
>>> EPI_uber_dock.prepare_Vina_run()
>>> EPI_uber_dock.prepVinaSim_uberDock()
>>> EPI_uber_dock.runVinaSim_uber()
Molecule object loading of pdb and pbdqt file formats.
Then converts to pandas dataframe.
Create MoleculeObject by parsing pdb or pdbqt file.
2 types of parsers can be used: 1.molmolpy 2. pybel
Stores molecule information in pandas dataframe as well as numpy list.
Read more in the :ref:`User Guide <MoleculeObject>`.
Parameters
----------
filename : str, optional
The maximum distance between two samples for them to be considered
as in the same neighborhood.
>>> LasR_MOR_mmpbsa_calc = g_mmpbsa_dask.GMMPBSAObject(traj, topol_file, tpr_file, mdp_file, index_file, first_index, second_index, molname, receptor_name)
>>>
>>>
>>>
>>> LasR_MOR_mmpbsa_calc.prepare_g_mmpbsa_dask_protocol(client)
>>>
>>>
>>> LasR_MOR_mmpbsa_calc.prepare_for_dask_cluster(parallel=True)
>>> #
>>> # LasR_MOR_mmpbsa_calc.run_dask_docking(client)
Notes
-----
See examples/cluster/plot_dbscan.py for an example.
This implementation bulk-computes all neighborhood queries, which increases
the memory complexity to O(n.d) where d is the average number of neighbors,
while original DBSCAN had memory complexity O(n).
Sparse neighborhoods can be precomputed using
:func:`NearestNeighbors.radius_neighbors_graph
<sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>`
with ``mode='distance'``.
References
----------
"""
def __init__(self,
traj, topol, tpr_file, mdp_file, index_file, first_index, second_index,
molname='Unknown',
receptor_name='Unknown',
folder_path='.',
job_name = 'Unknown',
load_state_file=None):
self.load_state_file = load_state_file
if load_state_file is not None:
self.load_state_data_json(self.load_state_file)
else:
print('G_MMPBSA Object has been created')
self.trajectory_file = traj
self.topology_file = topol
self.tpr_file = tpr_file
self.mdp_file = mdp_file
self.index_file = index_file
self.first_index = first_index
self.second_index = second_index
self.prep_g_mmpbsa_run = False
self.folder_exists = False
# Running vina,whether it's for exhaustiveness or traditional run
self.folder_path = folder_path
self.command_run_list = []
self.command_samples_run_list = []
self.molecule_name = molname
self.ligand_name = molname
self.receptor_name = receptor_name
self.run_type = 'g_mmpbsa'
self.state_data = {}
self.state_data_samples = {}
self.g_mmpbsa_run_finished = False
self.g_mmpbsa_sim_states = {'simStates': {}}
self.objects_loaded = False
self.g_mmpbsa_prepared = False
# This part needs clarification
self.prep_mdtraj_object()
# original data before transformation
# Add receptor name
def set_mgltools_path(self, path):
print('MGLTools path is set to ', path)
self.mgltools_utilities = path
def set_flexaid_path(self, path):
print('FlexAid path is set to ', path)
self.flexaid_path = path
def set_ledock_path(self, path):
print('LeDock path is set to ', path)
self.ledock_path = path
def prep_mdtraj_object(self):
'''
Prepare receptor mdtraj object
get mdtraj topology and save as pandas dataframe
Calculate pdb receptor center of mass
:return:
'''
self.trajectory_mdtraj = md.load_xtc(self.trajectory_file, top=self.topology_file)
self.trajectory_mdtraj_topology = self.trajectory_mdtraj.topology
self.trajectory_mdtraj_topology_dataframe = self.trajectory_mdtraj.topology.to_dataframe()
self.objects_loaded = True
def get_uber_g_mmpbsa_run_folder_name(self):
curr_folder = os.getcwd()
return curr_folder + os.sep + self.run_folder_name
def prepare_g_mmpbsa_dask_protocol(self, dask_client=None,
prep_g_mmpbsa=True):
'''
prepare dask tasks for g_mmpbsa
:return:
'''
self.prepare_g_mmpbsa()
test = 1
curr_client = dask_client
# Testing Phase
total_free_cores = 16
# Production
# worker_status = run_dask_tools.get_dask_worker_status(curr_client)
#
# get_worker_free = run_dask_tools.check_free_resources(worker_status)
#
#
# test = 1
#
# total_free_cores = 0
#
# for worker in get_worker_free:
# preped = get_worker_free[worker]['preped']
# total_free_cores += preped['freeCores']
if prep_g_mmpbsa is False:
print('prep gmmpbsa ', prep_g_mmpbsa)
return 'Do not prepare run files'
if self.g_mmpbsa_prepared is True:
print('Do not prep files')
return 'Do not prep files'
traj_len = len(self.trajectory_mdtraj)
import math
# Free core approach
div_traj = math.ceil(traj_len/total_free_cores)
# select_indexes = list(range(total_free_cores))
# Maximum parallel
#div_traj = math.trunc(traj_len/total_free_cores)
select_frames = list(range(0,traj_len,div_traj))
select_indexes = list(range(len(select_frames)))
folder_to_save = self.g_mmpbsa_folder
temp_mdtraj = []
temp_mdtraj_indexes = []
file_save_list = []
abs_file_save_list = []
simStates = {'simStates':{}}
for i,traj in zip(select_indexes,select_frames):
temp_state = {str(i):{}}
temp_traj = self.trajectory_mdtraj[traj:traj+div_traj]
temp_mdtraj.append(temp_traj)
temp_mdtraj_indexes.append(i)
file_save = 'traj_part{0}.xtc'.format(i)
abs_file_save = folder_to_save + os.sep + file_save
file_save_list.append(file_save)
abs_file_save_list.append(abs_file_save)
temp_state[str(i)].update({'runFinished':False,
'index':i,
'absFolder':folder_to_save,
'fileSave':file_save,
'absFileSave':abs_file_save,
'firstIndex':self.first_index,
'secondIndex':self.second_index,
'indexFile':self.index_file,
'mdpFile':self.mdp_file,
'tprFile':self.tpr_file})
energy_mm = 'energy_MM_{0}.xvg'.format(i)
polar = 'polar_{0}.xvg'.format(i)
apolar = 'apolar_{0}.xvg'.format(i)
contrib_mm = 'contrib_MM_{0}.dat'.format(i)
contrib_pol = 'contrib_pol_{0}.dat'.format(i)
contrib_apol = 'contrib_apol_{0}.dat'.format(i)
temp_state[str(i)].update({'energyMM':energy_mm,
'polar':polar,
'apolar':apolar,
'contrib_MM':contrib_mm,
'contrib_pol':contrib_pol,
'contrib_apol':contrib_apol})
temp_traj.save(abs_file_save)
temp_state[str(i)].update({'fileSaved': True
})
simStates['simStates'].update(temp_state)
self.mdtraj_frames = select_frames
self.mdtraj_sliced = temp_mdtraj
self.mdtraj_parts = temp_mdtraj_indexes
self.file_save_list = file_save_list
self.abs_file_save_list = abs_file_save_list
self.simStates = simStates
test = 1
self.g_mmpbsa_prepared = True
self.state_data['energySoftware']['g_mmpbsa'].update({'frames': self.mdtraj_frames})
self.state_data['energySoftware']['g_mmpbsa'].update({'prepare': self.g_mmpbsa_prepared})
self.state_data['energySoftware']['g_mmpbsa'].update({'parts': self.mdtraj_parts})
self.state_data['energySoftware']['g_mmpbsa'].update({'fileList': self.file_save_list})
self.state_data['energySoftware']['g_mmpbsa'].update({'absFileList': self.abs_file_save_list})
self.state_data['energySoftware']['g_mmpbsa'].update(self.simStates)
self.state_data['energySoftware']['g_mmpbsa'].update({'firstIndex': self.first_index})
self.state_data['energySoftware']['g_mmpbsa'].update({'secondIndex': self.second_index})
self.state_data['energySoftware']['g_mmpbsa'].update({'indexFile': self.index_file})
self.state_data['energySoftware']['g_mmpbsa'].update({'mdpFile': self.mdp_file})
self.state_data['energySoftware']['g_mmpbsa'].update({'tprFile': self.tpr_file})
self.save_state_data_json()
test = 1
#self.g_mmpbsa_sim_states = self.state_data['energySoftware']['g_mmpbsa']['simStates']
#self.ledock_samples = self.state_data['energySoftware']['g_mmpbsa']['LeDockSample_list']
# Divide trajectory to number of free cores
# TODO article Pagadala Software for molecular docking: a review
# This will be for leDock
# if prep_g_mmpbsa is True:
# # self.prepare_uber_docker()
# self.prepare_ledock_settings()
# self.prep_LeDock_dock_run_commands()
@hlp.timeit
def prep_LeDock_dock_run_commands(self, num_samples=10):
'''
Prepare rdock run commands and save to json
:param num_samples: test value 6
:return:
'''
try:
self.g_mmpbsa_sim_states = self.state_data['dockSoftware']['LeDock']['simStates']
self.ledock_samples = self.state_data['dockSoftware']['LeDock']['LeDockSample_list']
print('No need to generate LeDock commands')
except:
self.state_data['dockSoftware']['LeDock'].update({'LeDockSample_list': self.ledock_samples})
self.state_data['dockSoftware']['LeDock'].update(self.LeDock_sim_states)
for sample_num in self.ledock_samples:
self.prep_LeDock_dock_command(sample_num)
print('Now continue for LeDock:D')
self.save_state_data_json()
test = 1
self.prep_LeDock_run = True
@hlp.timeit
def prep_LeDock_dock_command(self, sample_num, pose_gen=20):
'''
prepare each separate rDock run command
:param sample_num:
:param pose_gen: default generate 20 poses
:return:
'''
try:
if self.setup_ledock_pameters is not False:
# print("Running Vina")
# TODO need to think about seed
#./ ledock_linux_x86 dock. in
command_receptor = self.ledock_path + os.sep + 'ledock_linux_x86'
sample_data = self.ledock_input_info[str(sample_num)]
parm_name = sample_data['ledock_parm_name']
test = 1
self.save_run_name = "ledock_{0}_sample_{1}".format(self.run_type, sample_num)
random_seed = np.random.randint(low_seed, high_seed)
command_to_run = "{0} {1}".format(command_receptor, parm_name)
ligand_clear_dok = sample_data['ligand_clear_name'] + '.dok'
# -spli MOR_flexaid.dok
command_to_clean = "{0} -spli {1}".format(command_receptor, ligand_clear_dok)
print(command_to_run)
self.LeDock_command_run_list.append(command_to_run)
print("Launching new Sim")
temp_dict = {str(sample_num): {'save_run_name': self.save_run_name,
'commandRun': command_to_run,
'commandToClean':command_to_clean,
'dokFileName':ligand_clear_dok,
'runFinished': False}}
self.LeDock_sim_states.update(temp_dict)
self.state_data['dockSoftware']['LeDock']['simStates'].update(temp_dict)
# try:
# os.system(command_to_run)
# except KeyboardInterrupt:
# # quit
# sys.exit()
print("LeDock command generation finished")
else:
print('Please setup LeDock settings')
except Exception as e:
print("error in runSim: ", e)
sys.exit(0)
@hlp.timeit
def check_dask_jobs(self, submitted_jobs_dask, finished_jobs, finished_jobs_dict):
import copy
# modified_submitted_jobs_dask = copy.deepcopy(submitted_jobs_dask)
for i, job in enumerate(submitted_jobs_dask):
status = job.status
if status == 'finished':
test = 1
# pop_item = modified_submitted_jobs_dask.pop(i)
try:
if finished_jobs_dict[i] is True:
continue
except Exception as error:
pass
finished_jobs.append(job)
finished_jobs_dict.update({i: True})
results = job.result()
test = 1
try:
key = list(results.keys())[0]
prog = results[key]['Program'] # need [0] key
sample_num = results[key]['part_num']
if prog == 'g_mmpbsa':
sample_num = results[key]['part_num']
results_dask = results[key]['dask']
original_data = self.state_data['energySoftware'][prog]
abs_folder = self.g_mmpbsa_folder # original_data['AbsFolder']
out_name = abs_folder + os.sep + results_dask['out_filename']
out_mem = results_dask['out_mem']
out_file = open(out_name, 'w')
out_file.write(out_mem)
out_file.close()
out_name = abs_folder + os.sep + results_dask['apolar_filename']
out_mem = results_dask['apolar_mem']
out_file = open(out_name, 'w')
out_file.write(out_mem)
out_file.close()
out_name = abs_folder + os.sep + results_dask['polar_filename']
out_mem = results_dask['polar_mem']
out_file = open(out_name, 'w')
out_file.write(out_mem)
out_file.close()
out_name = abs_folder + os.sep + results_dask['energyMM_filename']
out_mem = results_dask['energyMM_mem']
out_file = open(out_name, 'w')
out_file.write(out_mem)
out_file.close()
out_name = abs_folder + os.sep + results_dask['contribMM_filename']
out_mem = results_dask['contribMM_mem']
out_file = open(out_name, 'w')
out_file.write(out_mem)
out_file.close()
out_name = abs_folder + os.sep + results_dask['contrib_apol_filename']
out_mem = results_dask['contrib_apol_mem']
out_file = open(out_name, 'w')
out_file.write(out_mem)
out_file.close()
out_name = abs_folder + os.sep + results_dask['contrib_pol_filename']
out_mem = results_dask['contrib_pol_mem']
out_file = open(out_name, 'w')
out_file.write(out_mem)
out_file.close()
# out_pdbqt_filename = out_pdbqt_name
# self.state_data['dockSoftware'][prog]['simStates'][str(sample_num )] = \
# results[key]
update_results = copy.deepcopy(results)
update_results[key].pop('dask', None)
# self.state_data['dockSoftware'][prog]['simStates'][str(sample_num )] = results[key]
# self.state_data['energySoftware'][prog]['simStates'][str(sample_num)] = update_results[key]
self.before_dask['energySoftware'][prog]['simStates'][str(sample_num)] = update_results[key]
# results_dask = results[key]['dask']
# else:
# self.state_data['dockSoftware'][prog]['simStates'][str(sample_num)] = results[key]
# if filename is None and filedata is None:
# # filename = self.json_state_file
# filename = self.absolute_json_state_file
# filedata = self.state_data
self.save_state_data_json(filedata=self.before_dask, filename=self.absolute_json_state_file)
# allow CPU to cool down
# self.hold_nSec(5)
print('This success ---> ', i)
except Exception as error:
print('error is ', error)
# print('i is ', i)
print('Finished checking dask submissions ---\n')
print('---' * 10)
return finished_jobs, finished_jobs_dict
# @hlp.timeit
def run_dask_gmmpbsa(self, client=None, max_jobs_to_run=10):
# from molmolpy.moldock import run_dask_tools
from molmolpy.tools import run_dask_tools
test = 1
curr_client = client
worker_status = run_dask_tools.get_dask_worker_status(curr_client)
get_worker_free = run_dask_tools.check_free_resources(worker_status)
import copy
original_get_worker_free = copy.deepcopy(get_worker_free)
# TEST IT WORKS
# queue_jobs = self.run_mmpbsa_dask
# job_test = queue_jobs[0]
#
# result = run_dask_tools.run_gmmpbsa_using_dask(job_test)
test = 1
# Local upload test
# big_future = self.dask_prep
# run_dask_tools.upload_g_mmpbsa_files_dask(big_future)
#TODO
# Scatter a lot better using scatter for big files for upload G_MMPBSA files
# test = 1
# tasks_upload = []
# big_future = client.scatter(self.dask_prep, broadcast=True)
# for worker in get_worker_free:
# worker_info = get_worker_free[worker]
# worker_address = worker_info['preped']['workerAddress']
#
# retries_num = 2
#
# # Upload files to all clients client.upload_file
# task = client.submit(run_dask_tools.upload_g_mmpbsa_files_dask,
# big_future,
# workers=[worker_address],
# key='key_scatter_{0}'.format(worker_address),
# retries=retries_num)
# tasks_upload.append(task)
# print("Starting uploading to ", worker_address)
test = 1
# TODO
# This part runs the main program
submitted_jobs = []
submitted_jobs_dask = []
queue_jobs = self.run_mmpbsa_dask
job_quantity = len(queue_jobs)
finished_jobs = []
finished_jobs_dict = {}
worker_status_free = None
test = 1
# maybe 2 async threads, one checks finished simulations, other submits jobs
###############################################################################################
gmmbpsa_min_mem = 1000
retries_num = 2
curr_index = 0
curr_worker = 0
# prepare worker ids for easier switch
worker_ids = {}
for i, id in enumerate(get_worker_free):
worker_ids.update({i: id})
custom_index_curr = 3
while len(queue_jobs) > 0:
if curr_index == len(queue_jobs):
curr_index = 0
if curr_worker == len(worker_ids):
curr_worker = 0
print('-----------------------------------------------------------------')
worker_status_temp = run_dask_tools.get_dask_worker_status(curr_client, custom_index=custom_index_curr)
get_worker_free_temp = run_dask_tools.check_free_resources(worker_status_temp)
custom_index_curr += 2
print('----------------TEST------------------')
curr_item = queue_jobs[curr_index]
test = 1
curr_worker_id = worker_ids[curr_worker]
workstation_info_temp = get_worker_free_temp[curr_worker_id]
workstation_preped_temp = workstation_info_temp['preped']
workstation_address = workstation_preped_temp['workerAddress']
# This way folder is buggy
workstation_dir = original_get_worker_free[curr_worker_id]['preped']['workerDir']
workstation_freemem = workstation_preped_temp['freeMemory']
workstation_freecpu = workstation_preped_temp['freeCores']
curr_item_prog = curr_item['Program']
############################################################
# submitted_jobs_dask len less than 16
jobs_running = len(submitted_jobs_dask) - len(finished_jobs)
max_jobus = max_jobs_to_run
# g_mmpbsa part
if curr_item_prog == 'g_mmpbsa':
if workstation_freemem > gmmbpsa_min_mem and jobs_running <max_jobus:
print('Submit MMPBSA job to DASK')
pop_item = queue_jobs.pop(curr_index)
key_name = pop_item['save_run_name']
run_name = 'key_{0}_{1}'.format(key_name, curr_worker_id)
print('Cur run ', run_name)
if curr_index == 0:
curr_index = 0
else:
curr_index -= 1
pop_item.update({'workingDir':workstation_dir})
submitted_jobs.append(pop_item)
# MAYBE CHECK FOLDER HERE
#
#big_future = client.scatter(pop_item, workers=[workstation_address], hash=False)
big_future = pop_item
task_g_mmpbsa = client.submit(run_dask_tools.run_gmmpbsa_using_dask,
big_future,
workers=[workstation_address],
key=run_name,
retries=retries_num)
submitted_jobs_dask.append(task_g_mmpbsa)
else:
key_name = curr_item['save_run_name']
run_name = 'key_{0}_{1}'.format(key_name, curr_worker_id)
print('Passed running ', run_name)
# submitted_jobs_dask_temp, finished_jobs_temp = self.check_dask_jobs(submitted_jobs_dask,finished_jobs)
finished_jobs, finished_jobs_dict = self.check_dask_jobs(submitted_jobs_dask, finished_jobs,
finished_jobs_dict)
test = 1
###################################################3
# update index
# print(curr_item)
# How to save submitted jobs state
print('-------')
if curr_index == 0 and len(submitted_jobs_dask) == 1:
curr_index = 0
else:
curr_index += 1
curr_worker += 1
time.sleep(10)
test = 1
# ###############################################################################################
#
# # work_address = workstation1_preped['workerAddress']
# #
# # # This is to run on dask server
# #
# # # TODO this works need to create a quiiee
# # retries_num = 2
# # task = client.submit(run_dask_tools.run_vina_using_dask,
# # data,
# # workers=[work_address],
# # key='key_test',
# # retries=retries_num)
#
# # TODO This part needs further refinement
#
# # break
#
# test = 1
#
print('Last Check of submitted jobs')
while len(finished_jobs) != job_quantity:
finished_jobs, finished_jobs_dict = self.check_dask_jobs(submitted_jobs_dask, finished_jobs,
finished_jobs_dict)
time.sleep(60)
print('->' * 10)
print('Everything is finished :))))))')
print('---' * 10)
print('\n')
def prepare_for_dask_cluster(self, LeDock=2, rDock=2, FlexAid=2, Vina=2, parallel=False):
'''
run uber dock protocol for LeDock, rDock,FlexAid, Vina
:return:
'''
current_pid = multiprocessing.current_process().pid
print("Main Process with PID:{}".format(current_pid))
# free_threads_for_Vina = num_threads - LeDock-rDock-FlexAid
run_g_mmpbsa = []
run_mmpbsa_queue = []
# Prepare outputs
import copy
self.before_dask = copy.deepcopy(self.state_data)
################################################################################
if self.g_mmpbsa_prepared is True:
full_g_mmpbsa_data = self.state_data['energySoftware']['g_mmpbsa']
test = 1
tpr_abs= full_g_mmpbsa_data['tprFile']
tpr_file = open(tpr_abs, 'rb')
tpr_mem = tpr_file.read()
tpr_filename = tpr_abs.split(os.sep)[-1]
#
mdp_abs= full_g_mmpbsa_data['mdpFile']
mdp_file = open(mdp_abs, 'r')
mdp_mem = mdp_file.read()
mdp_filename = mdp_abs.split(os.sep)[-1]
index_abs= full_g_mmpbsa_data['indexFile']
index_file = open(index_abs, 'r')
index_mem = index_file.read()
index_filename = index_abs.split(os.sep)[-1]
# data_pre = self.state_data['energySoftware']['g_mmpbsa']
# data_pre.update({'dask': {}})
data_pre = {}
data_pre.update({'tprName':tpr_filename, 'tprMem':tpr_mem})
data_pre.update({'mdpName':mdp_filename, 'mdpMem':mdp_mem})
data_pre.update({'indexName':index_filename, 'indexMem':index_mem})
self.dask_prep = data_pre
for part_num in full_g_mmpbsa_data['parts']:
# self.run_FlexAid_sim(FlexAid_sample_num, waitTime=waitTime)
data = self.state_data['energySoftware']['g_mmpbsa']['simStates'][str(part_num)]
save_run_name = "g_mmpbsa_part_{0}".format(part_num)
data.update({'Program': 'g_mmpbsa'})
data.update({'part_num': part_num})
data.update({'save_run_name': save_run_name})
data.update({'dask': {}})
traj_abs = data['absFileSave']
traj_file = open(traj_abs, 'rb')
traj_mem = traj_file.read()
traj_filename = data['fileSave']
data['dask'].update({'tprName': tpr_filename})
data['dask'].update({'mdpName': mdp_filename})
data['dask'].update({'indexName': index_filename})
data['dask'].update({'trajMem':traj_mem, 'trajName':traj_filename})
data['dask'].update({'tprName': tpr_filename, 'tprMem': tpr_mem})
data['dask'].update({'mdpName': mdp_filename, 'mdpMem': mdp_mem})
data['dask'].update({'indexName': index_filename, 'indexMem': index_mem})
test = 1
# data['dask'].update({'cavFile':cav_file_mem })
# self.state_data['dockSoftware']['LeDock']['simStates'][str(LeDock_sample_num)] = data
test = 1
run_g_mmpbsa.append(data)
# # result = run_dock_tools.run_LeDock_sim_parallel(LeDock_sample_num, data)
# # test = 1
#
# test = 1
###################################################################################################
test = 1
####################################################################################################
self.run_mmpbsa_dask = run_g_mmpbsa
curr_LeDock = 0
# very slow
# while len(run_docking_queue) != 40:
# run_docking_queue += run_docking_LeDock[curr_LeDock:curr_LeDock + LeDock]
# curr_LeDock += LeDock
#
# test = 1
# run_docking_queue += run_docking_rDock[curr_rDock:curr_rDock + rDock]
# curr_rDock += rDock
#
# run_docking_queue += run_docking_FlexAid[curr_FlexAid:curr_FlexAid + FlexAid]
#
# curr_FlexAid += FlexAid
#
# run_docking_queue += run_docking_Vina[curr_Vina:curr_Vina + Vina]
# curr_Vina += Vina
#
# test = 1
#
test = 1
run_mmpbsa_queue = run_g_mmpbsa
# run_docking_queue = run_docking_LeDock + run_docking_FlexAid + run_docking_Vina
final_queue_job = []
# Need to select those that are not finished
for pre_job in run_mmpbsa_queue:
# print(pre_job)
if pre_job['runFinished'] is False:
final_queue_job.append(pre_job)
test = 1
self.run_mmpbsa_dask = final_queue_job
# random.shuffle(self.run_docking_queue)
print('Finished preparing g_mmpbsa jobs')
# TODO should I add json saving of information or not?
def load_state_data_json(self, filename):
'''
:param filename: load json state data
:return:
'''
# self.absolute_path = os.path.abspath(filename)
self.load_state_called = True
print(os.path.abspath(__file__))
self.state_data = json.load(open(filename, "r"))
# os.chdir('HSL_exhaustiveness')
self.trajectory_file = self.state_data['trajectoryFile']
self.mdp_file = self.state_data['mdpFile']
self.tpr_file = self.state_data['tprFile']
self.index_file = self.state_data['indexFile']
self.folder_path = self.state_data['folderPath']
self.run_type = self.state_data['runType']
self.molecule_name = self.state_data['molName']
self.receptor_name = self.state_data['receptorName']
# TODO test
self.sim_folder_run = self.state_data['simRunFolder'] # .split('/')[-1]
self.directories = self.state_data['directory']
self.folder_exists = self.state_data['folderCreated']
self.absolute_json_state_file = self.state_data['absoluteJsonStates']
self.g_mmpbsa_folder = self.state_data['RunFolder']
self.json_state_file = self.state_data['jsonStates']
test = 1
# self.rdock_folder_name = self.receptor_name + '_' + self.molecule_name + '_' + 'rDock'
# self.rdock_absolute_folder_name = self.uber_dock_folder + os.sep + self.rdock_folder_name
# self.directories = self.find_sample_folders(self.folder_path, dir_name=self.run_type)
# self.directories = folder_utils.find_folder_in_path(self.uber_dock_folder, self.rdock_folder_name)
# print('TADA ', self.directories)
test = 1
# This will hold information about run states
# self.uber_dock_folder = self.get_uber_dock_run_folder_name()
########################################################################################
# # LeDock settings part
#
# self.ledock_data = self.state_data['dockSoftware']['LeDock']
# test = 1
#
# # Try to load initial LeDock
try:
self.mdtraj_frames = self.state_data['energySoftware']['g_mmpbsa']['frames']
self.mdtraj_parts = self.state_data['energySoftware']['g_mmpbsa']['parts']
self.file_save_list = self.state_data['energySoftware']['g_mmpbsa']['fileList']
self.abs_file_save_list = self.state_data['energySoftware']['g_mmpbsa']['absFileList']
self.simStates = self.state_data['energySoftware']['g_mmpbsa']['simStates']
test = 1
self.g_mmpbsa_prepared = self.state_data['energySoftware']['g_mmpbsa']['prepare']
# self.state_data['energySoftware']['g_mmpbsa'].update({'frames': self.mdtraj_frames})
# self.state_data['energySoftware']['g_mmpbsa'].update({'prepare': self.g_mmpbsa_prepared})
# self.state_data['energySoftware']['g_mmpbsa'].update({'parts': self.mdtraj_parts})
# self.state_data['energySoftware']['g_mmpbsa'].update({'fileList': self.file_save_list})
# self.state_data['energySoftware']['g_mmpbsa'].update({'absFileList': self.abs_file_save_list})
# self.state_data['energySoftware']['g_mmpbsa'].update(self.simStates)
except:
print('G_mmpbsa is empty verify yolo')
#
# test = 1
#
# try:
# self.setup_ledock_pameters = self.ledock_data['setup_LeDock']
# self.ledock_num_samples = self.ledock_data['num_samples']
# self.ledock_input_info = self.ledock_data['LeDockInputInfo']
# self.param_ledock_template = self.ledock_data['paramFull']
# except:
# print('LeDock setting part is empty verify yolo')
#
# try:
# self.ledock_param_title = self.ledock_data['LeDock_params']['title']
# self.rdock_title = self.ledock_data['LeDock_params']['title']
#
# self.receptor_file_ledock = self.ledock_data['LeDock_params']['receptorFile']
# self.ledock_rmsd = self.ledock_data['LeDock_params']['LeDockRMSD']
#
# self.ledock_xmin = self.ledock_data['LeDock_params']['xmin']
# self.ledock_xmax = self.ledock_data['LeDock_params']['xmax']
# self.ledock_ymin = self.ledock_data['LeDock_params']['ymin']
# self.ledock_ymax = self.ledock_data['LeDock_params']['ymax']
# self.ledock_zmin = self.ledock_data['LeDock_params']['zmin']
# self.ledock_zmax = self.ledock_data['LeDock_params']['zmax']
#
# except:
# print('LeDock_params is empty verify yolo')
#
# try:
# self.LeDock_sim_states = self.state_data['dockSoftware']['LeDock']['simStates']
# self.ledock_samples = self.state_data['dockSoftware']['LeDock']['LeDockSample_list']
# print('No need to generate LeDock commands')
# self.prep_LeDock_run = True
# except:
# print('LeDock_params simStates is empty verify yolo')
#
# test = 1
def prepare_g_mmpbsa(self):
'''
Prepare g_mmpbsa run folder and initial json configuration
:return:
'''
self.run_folder_name = self.receptor_name + '_' + self.molecule_name + '_' + self.run_type
self.sim_folder_run = self.folder_path + os.sep + self.run_folder_name
# Create folder don't forget
# self.directories = self.find_sample_folders(self.folder_path, dir_name=self.run_type)
self.directories = folder_utils.find_folder_in_path(self.folder_path, self.run_folder_name)
print('TADA ', self.directories)
self.json_state_file = self.sim_folder_run + os.sep + self.receptor_name + '_' + self.molecule_name + '_' + self.run_type + '.json'
# This will hold information about run states
self.g_mmpbsa_folder = self.get_uber_g_mmpbsa_run_folder_name()
self.absolute_json_state_file = self.g_mmpbsa_folder + os.sep + self.receptor_name + '_' + self.molecule_name + '_' + self.run_type + '.json'
if len(self.directories) == 0:
print('Creating folder for g_mmpbsa run\n')
print(self.sim_folder_run)
folder_utils.create_folder(self.sim_folder_run)
self.folder_exists = True
programs_dict = {'energySoftware': {'g_mmpbsa': {}}}
self.state_data.update({'trajectoryFile': self.trajectory_file,
'mdpFile': self.mdp_file,
'tprFile': self.tpr_file,
'indexFile': self.index_file,
'runFolderName': self.run_folder_name,
'folderPath': self.folder_path,
'jsonStates': self.json_state_file,
'runType': self.run_type,
'molName': self.molecule_name,
'receptorName': self.receptor_name,
'simRunFolder': self.sim_folder_run,
'RunFolder': self.g_mmpbsa_folder,
'absoluteJsonStates': self.absolute_json_state_file,
'directory': self.directories,
'folderCreated': self.folder_exists,
'simStates': {}})
self.state_data.update(programs_dict)
# self.prepVinaSim_exhaust()
self.save_state_data_json()
self.load_state_called = False
else:
self.load_state_file = self.json_state_file
self.load_state_called = True
self.load_state_data_json(self.load_state_file)
def prepare_ledock_settings(self):
'''
Prepare ultraDock folder and initial json configuration
>>> EPI_uber_dock.prepare_rdock_settings()
Convert with pybel to mol2 for receptor and sd for ligand
:return:
'''
# self.output_receptor_rdock = Outputfile("mol2", "{0}.mol2".format(self.receptor_name))
# self.output_receptor_rdock.write(self.receptor_pybel)
# self.output_receptor_rdock.close()
#
# self.output_ligand_rdock = Outputfile("sd", "{0}.sd".format(self.ligand_name))
# self.output_ligand_rdock.write(self.ligand_pybel )
# self.output_ligand_rdock.close()
self.ledock_folder_name = self.receptor_name + '_' + self.molecule_name + '_' + 'LeDock'
self.ledock_absolute_folder_name = self.uber_dock_folder + os.sep + self.ledock_folder_name
test = 1
# self.directories = self.find_sample_folders(self.folder_path, dir_name=self.run_type)
self.ledock_directories = folder_utils.find_folder_in_path(self.uber_dock_folder, self.ledock_folder_name)
print('TADA ', self.ledock_directories)
test = 1
# This will hold information about run states
# self.uber_dock_folder = self.get_uber_dock_run_folder_name()
if len(self.ledock_directories) == 0:
print('Creating rdock folder in uberDocker folder \n')
print(self.ledock_directories)
folder_utils.create_folder(self.ledock_absolute_folder_name)
test = 1
self.receptor_ledock_pdb = "{0}.pdb".format(self.receptor_name)
self.ligand_ledock_mol2 = "{0}.mol2".format(self.ligand_name)
self.absolute_receptor_ledock_pdb = self.ledock_absolute_folder_name + os.sep + self.receptor_ledock_pdb
self.absolute_ligand_ledock_mol2 = self.ledock_absolute_folder_name + os.sep + self.ligand_ledock_mol2
self.receptor_pybel.write("pdb", self.absolute_receptor_ledock_pdb, overwrite=True)
self.ligand_pybel.write("mol2", self.absolute_ligand_ledock_mol2, overwrite=True)
self.ledock_folder_exists = True
test = 1
# TODO enter ledock folder and process structure for docking using lepro
# ./lepro_linux_x86 LasR_flexaid.pdb
os.chdir(self.ledock_absolute_folder_name)
command_receptor = self.ledock_path + os.sep + 'lepro_linux_x86' + ' {0} '.format(self.receptor_ledock_pdb)
os.system(command_receptor)
self.lepro_pdb_file = 'pro.pdb'
# Need to check whteter lepro ran fine
print('Updated receptor with LePro\n')
os.chdir(self.uber_dock_folder)
self.state_data['dockSoftware']['LeDock'].update(
{'receptor_pdb': self.receptor_ledock_pdb,
'ligand_mol2': self.ligand_ledock_mol2,
'lepro_pdb': self.lepro_pdb_file,
'lepro_abs_pdb': self.ledock_absolute_folder_name + os.sep + self.lepro_pdb_file,
'abs_receptor_pdb': self.absolute_receptor_ledock_pdb,
'abs_ligand_mol2': self.absolute_ligand_ledock_mol2,
'LeDockFolderStatus': self.ledock_folder_exists,
'LeDockAbsFolder': self.ledock_absolute_folder_name,
'LeDockFolderName': self.ledock_folder_name})
self.save_state_data_json()
self.load_state_called = False
self.ledock_title = self.receptor_name + '_' + self.ligand_name + '_LeDock Parameter file'
self.ledock_rmsd = 0.5
self.set_up_ledock_dock_blind_parameters(title=self.ledock_title,
receptor_file=self.lepro_pdb_file,
ledock_rmsd=self.ledock_rmsd,
x_center=self.x_center,
y_center=self.y_center,
z_center=self.z_center)
else:
print('state has beeen loaded \n')
##############################################################################
def flexaid_generate_ga_dat_parameters(self):
'''
Generate GA dat parameters for flexaid docking
:return:
'''
self.flexaid_ga_dat_param_template = '''# Number of chromosomes (number individuals in the population)
# Integer in interval [1-N]
NUMCHROM 500
# Number of generations
# Integer in interval [1-N]
NUMGENER 500
# Use Adaptive Genetic-Algorithm
# Value of 0 or 1
ADAPTVGA 1
# Adaptive crossover and mutation probabilities
# Floats in interval [0.0,1.0]
ADAPTKCO 0.95 0.10 0.95 0.10
# Constant crossover probability
# Float in interval [0.0,1.0]
# Only considered when ADAPTVGA is 0
CROSRATE 0.90
# Constant mutation probability
# Float in interval [0.0,1.0]
# Only considered when ADAPTVGA is 0
MUTARATE 0.10
# Crossover operator
# Intragenic crossovers are possible
INTRAGEN
# Specifies that the initial population is generated randomly
POPINIMT RANDOM
# Fitness function
# Value in [LINEAR,PSHARE]
FITMODEL PSHARE
# Parameters of the shared fitness function
# Floats in interval [0.0,1000.0]
SHAREALF 4.0
SHAREPEK 5.0
SHARESCL 10.0
# Reproduction model
# Values in [BOOM,STEADY]
REPMODEL BOOM
# Fraction of population to create
# Only considered when REPMODEL is BOOM
BOOMFRAC 1.0
# Number of new individuals to generate at each generation
# Only considered when REPMODEL is STEADY
# Integer in interval [1,N-1] where N is NUMCHROM
STEADNUM 950
# Number of TOP individuals to print in console
# Integer in interval [1,N] where N is NUMCHROM
PRINTCHR 10
'''
self.generate_ga_dat_pameters = True
self.generate_ga_dat = 'ga_inp_' + self.receptor_name + '-' + self.ligand_name + '.dat'
self.generate_ga_dat_name_abs = self.flexaid_absolute_input_folder + os.sep + self.generate_ga_dat
self.generate_ga_dat_object_file = open(self.generate_ga_dat_name_abs, 'w')
self.generate_ga_dat_object_file.write(self.flexaid_ga_dat_param_template)
self.generate_ga_dat_object_file.close()
self.state_data['dockSoftware']['FlexAid'].update({'GA_params': {}})
self.state_data['dockSoftware']['FlexAid']['GA_params'].update(
{'generateGA_param': self.generate_ga_dat_pameters,
'GA_DataName': self.generate_ga_dat,
'GA_DATA_Abs': self.generate_ga_dat_name_abs,
'GA_ParamFull': self.flexaid_ga_dat_param_template})
# self.state_data_samples = self.state_data.copy()
self.save_state_data_json()
# TODO this part needs to be thought out
####################################################################################################################
def flexaid_generate_ga_dat_parameters_dask(self):
'''
Generate GA dat parameters for flexaid docking
:return:
'''
self.flexaid_ga_dat_param_template = '''# Number of chromosomes (number individuals in the population)
# Integer in interval [1-N]
NUMCHROM 500
# Number of generations
# Integer in interval [1-N]
NUMGENER 500
# Use Adaptive Genetic-Algorithm
# Value of 0 or 1
ADAPTVGA 1
# Adaptive crossover and mutation probabilities
# Floats in interval [0.0,1.0]
ADAPTKCO 0.95 0.10 0.95 0.10
# Constant crossover probability
# Float in interval [0.0,1.0]
# Only considered when ADAPTVGA is 0
CROSRATE 0.90
# Constant mutation probability
# Float in interval [0.0,1.0]
# Only considered when ADAPTVGA is 0
MUTARATE 0.10
# Crossover operator
# Intragenic crossovers are possible
INTRAGEN
# Specifies that the initial population is generated randomly
POPINIMT RANDOM
# Fitness function
# Value in [LINEAR,PSHARE]
FITMODEL PSHARE
# Parameters of the shared fitness function
# Floats in interval [0.0,1000.0]
SHAREALF 4.0
SHAREPEK 5.0
SHARESCL 10.0
# Reproduction model
# Values in [BOOM,STEADY]
REPMODEL BOOM
# Fraction of population to create
# Only considered when REPMODEL is BOOM
BOOMFRAC 1.0
# Number of new individuals to generate at each generation
# Only considered when REPMODEL is STEADY
# Integer in interval [1,N-1] where N is NUMCHROM
STEADNUM 950
# Number of TOP individuals to print in console
# Integer in interval [1,N] where N is NUMCHROM
PRINTCHR 10
'''
generate_ga_dat = 'ga_inp_' + self.receptor_name + '-' + self.ligand_name + '.dat'
generate_ga_dat_name_abs = self.flexaid_absolute_input_folder + os.sep + self.generate_ga_dat
return [generate_ga_dat, ]
# self.generate_ga_dat_object_file = open(self.generate_ga_dat_name_abs, 'w')
# self.generate_ga_dat_object_file.write(self.flexaid_ga_dat_param_template)
# self.generate_ga_dat_object_file.close()
#
# self.state_data['dockSoftware']['FlexAid'].update({'GA_params': {}})
#
# self.state_data['dockSoftware']['FlexAid']['GA_params'].update(
# {'generateGA_param': self.generate_ga_dat_pameters,
# 'GA_DataName': self.generate_ga_dat,
# 'GA_DATA_Abs': self.generate_ga_dat_name_abs,
# 'GA_ParamFull': self.flexaid_ga_dat_param_template})
# self.state_data_samples = self.state_data.copy()
##############################################################################################
def flexaid_generate_config_input_dask(self):
'''
Generate flexaid config input file
Flexaid is very strict about spaces
:return:
'''
flexaid_config_input_template = '''# Optimization method (genetic-algorithms)
METOPT GA
# The variation in degrees for the anchor angle of the ligand
# Float in interval [1.0-30.0]
VARANG 5.0
# The variation in degrees for the anchor dihedral of the ligand
# Float in interval [1.0-30.0]
VARDIH 5.0
# The variation in degrees for flexible dihedrals of the ligand
# Float in interval [1.0-30.0]
VARFLX 10.0
# Use Vcontacts in the calculations of surfaces in contact
COMPLF VCT
# Do not consider intramolecular interactions
NOINTR
# Side-chain rotamer acceptance threshold
# Float in interval [0.0-1.0]
DEECLA 0.8
# Use instances of side-chain conformers rather than using the Penultimate Rotamer Library
#ROTOBS
# Defines the grid spacing of the binding-site
# Float in interval [0.1,1.0]
SPACER 0.375
# Exclude hetero groups in the target (water,metal,modified amino acids,cofactors,ligands)
# To exclude these groups, uncomment the next line
#EXCHET
# Include water molecules in the target (always removed by default)
# Only considered if EXCHET is disabled
# To include water molecules, uncomment the next line
#INCHOH
# Permeability allowed between atoms
# Float in interval [0.0,1.0] from fully permeable to no permeability
PERMEA 0.9
# Permeability for side-chain rotamer acceptance
# Float in interval [0.0,1.0] from fully permeable to no permeability
ROTPER 0.8
# Solvent term penalty
# When the value is 0.0 the solvent interactions are derived from the interaction matrix
# Float in interval [-200.0,200.0]
SLVPEN 0.0
# Use Vcontacts indexing
VINDEX
# Vcontacts plane definition
# Value in [B,R,X] for Bissecting, Radical and Extended radical plane
# See McConkey et al. (2002) Bioinformatics. 18(10); 1365-1373
VCTPLA R
# Use normalized surfaces in contacts
NORMAR
# Define the RMSD cutoff between clusters
# Float in interval [0.5,3.0]
CLRMSD 2.0
# Number of results/docking poses to output
MAXRES 20
# Only output scored atoms in the final results
# Comment the next line if you wish to obtain the whole complex
SCOOUT
# Only calculate the CF for ligand atoms despite including flexible side-chains
#SCOLIG
# Ends reading of CONFIG file
ENDINP
'''
final_str = ''''''
# Specify the processed target file to use
pdbnam = 'PDBNAM ' + '{0}\n\n'.format(
self.receptor_flexaid_mol2)
# Specify the processed ligand file to use
# BTN.inp has the unique RESNUMC identifier LIG9999A
inplig = 'INPLIG ' + '{0}.inp\n\n'.format(
self.ligand_flexaid_initials)
# Specify to use one or multiple cleft(s) as binding-site
rgnopt_locclf = 'RNGOPT LOCCLF ' + 'global_binding_site.pdb\n\n'
# Specify the degrees of freedom (DOF) of the processed ligand with residue number 9999 and chain A
# Translational DOF of the ligand (-1)
optimz1 = 'OPTIMZ 9999 {0} -1\n\n'.format(self.flexaid_res_chain)
# Rotational DOF of the ligand (0)
optimz2 = 'OPTIMZ 9999 {0} 0\n\n'.format(self.flexaid_res_chain)
# Add one extra line for each flexible bond of the ligand
# The allowable flexible bonds are listed as FLEDIH lines in Processed_files/BTN.inp
# In our example, Biotin has 5 flexible bonds
flexible_bonds_data = open(
self.flexaid_absolute_processed_files_folder + os.sep + '{0}.inp'.format(self.ligand_flexaid_initials), 'r')
flexible_bonds_data_text = flexible_bonds_data.read()
flexible_bonds_data.close()
flexible_bonds_data_text_list = flexible_bonds_data_text.split('\n')
flexible_index_list_phrases = []
flexible_index_list = []
for i in flexible_bonds_data_text_list:
if 'FLEDIH' in i:
print(i)
temp = i.split(' ')
print(temp)
flex_index = temp[-2]
flexible_index_list.append(int(flex_index))
temp_line = 'OPTIMZ {0} {1} {2}\n'.format(self.flexaid_res_number, self.flexaid_res_chain, flex_index)
flexible_index_list_phrases.append(temp_line)
test = 1
final_str += pdbnam
final_str += inplig
final_str += rgnopt_locclf
final_str += optimz1
final_str += optimz2
for y in flexible_index_list_phrases:
final_str += y
final_str += '\n'
rmsdst = 'RMSDST ' + '{0}_ref.pdb\n\n'.format(
self.ligand_flexaid_initials)
final_str += rmsdst
final_str += flexaid_config_input_template
generate_config_input_file = 'CONFIG_' + self.receptor_name + '-' + self.ligand_name + '.inp'
return generate_config_input_file, final_str
# self.state_data['dockSoftware']['FlexAid'].update({'GA_params': {}})
#
# self.state_data['dockSoftware']['FlexAid']['GA_params'].update(
# {'generateGA_param': self.generate_ga_dat_pameters,
# 'GA_DataName': self.generate_ga_dat,
# 'GA_DATA_Abs': self.generate_ga_dat_name_abs,
# 'GA_ParamFull': self.flexaid_ga_dat_param_template})
#
# # self.state_data_samples = self.state_data.copy()
#
# self.save_state_data_json()
# TODO this part needs to be thought out
####################################################################################################################
def prepare_samples_collection_run(self, standard_exhaust=128,
num_samples_run=100,
run_type='samples_run'):
if self.setup_box is False:
print('Please setup simulation box')
sys.exit(0)
self.run_type_samples = run_type
self.prep_samples_run = True
self.samples_exhaust = standard_exhaust
self.samples_run = list(range(1, num_samples_run + 1))
self.run_folder_name_samples = self.receptor_name + '_' + self.molecule_name + '_' + self.run_type_samples
self.sim_folder_run_samples = self.folder_path + os.sep + self.run_folder_name_samples
# Create folder don't forget
# Exhaustiveness for all samples
# self.directories = self.find_sample_folders(self.folder_path, dir_name=self.run_type)
self.directories_samples = folder_utils.find_folder_in_path(self.folder_path, self.run_folder_name_samples)
print('TADA ', self.directories_samples)
self.json_samples_state_file = self.sim_folder_run_samples + os.sep + self.receptor_name + '_' + self.molecule_name + '_' + self.run_type_samples + '.json'
# This will hold information about run states
if len(self.directories_samples) == 0:
print('Creating folder for vina samples run\n')
print('Vina run type: {0}'.format(self.run_type_samples))
print(self.sim_folder_run_samples)
folder_utils.create_folder(self.sim_folder_run_samples)
self.folder_exists_samples = True
self.state_data_samples.update({'receptorFile': self.receptor_file,
'ligandFile': self.ligand_file,
'exhaustivenessList': self.exhaustiveness,
'samples_exhaust': self.samples_exhaust,
'samplesList': self.samples_run,
'folderPath': self.folder_path,
'runType': self.run_type_samples,
'molName': self.molecule_name,
'receptorName': self.receptor_name,
'simRunFolder': self.sim_folder_run_samples,
'directory': self.directories_samples,
'setup': self.setup_box,
'folderCreated': self.folder_exists_samples,
'simStates': {}})
self.prepVinaSim_samples()
self.save_state_data_json(filedata=self.state_data_samples, filename=self.json_samples_state_file)
self.load_state_called_samples = False
self.prep_sample_run = True
else:
self.load_state_file_samples = self.json_samples_state_file
self.load_state_called_samples = True
self.load_samples_state_data_json(self.load_state_file_samples)
self.prep_sample_run = True
def get_exhaust_run_folder_name(self):
curr_folder = os.getcwd()
return curr_folder + os.sep + self.run_folder_name
def get_samples_run_folder_name(self):
curr_folder = os.getcwd()
print("Yippie yi kay", curr_folder)
return curr_folder + os.sep + self.run_folder_name_samples
def save_state_data_json(self, filedata=None, filename=None):
'''
:param filename: Saves state file
:return:
'''
# import json
# with open(filename, 'w') as outfile:
# json.dump(self.cluster_models, outfile)
# pickle.dump(self.cluster_models, open(filename, "wb"))
# TODO create folder for run saving state run
# filename = self.sim_folder_run + os.sep + self.receptor_name + '_' + self.molecule_name + '.json'
if filename is None and filedata is None:
# filename = self.json_state_file
filename = self.absolute_json_state_file
filedata = self.state_data
# elif filedata is not None:
# filedata = filedata
# filename = self.absolute_json_state_file
else:
filedata = filedata
filename = filename
json.dump(filedata, open(filename, "w"), sort_keys=True, indent=4)
# TODO should I add json saving of information or not?
def load_samples_state_data_json(self, filename):
'''
:param filename: load json state data
:return:
'''
# self.absolute_path = os.path.abspath(filename)
self.load_state_called_samples = True
print(os.path.abspath(__file__))
self.state_data_samples = json.load(open(filename, "r"))
# os.chdir('HSL_exhaustiveness')
self.receptor_file = self.state_data_samples['receptorFile']
self.ligand_file = self.state_data_samples['ligandFile']
self.exhaustiveness = self.state_data_samples['exhaustivenessList']
self.samples_run = self.state_data_samples['samplesList']
self.folder_path = self.state_data_samples['folderPath']
self.run_type = self.state_data_samples['runType']
self.molecule_name = self.state_data_samples['molName']
self.receptor_name = self.state_data_samples['receptorName']
# TODO test
self.samples_exhaust = self.state_data_samples['samples_exhaust']
self.sim_folder_run_samples = self.state_data_samples['simRunFolder'] # .split('/')[-1]
self.directories_samples = self.state_data_samples['directory']
self.setup_box = self.state_data_samples['setup']
self.folder_exists = self.state_data_samples['folderCreated']
self.x_center = self.state_data_samples['boxSettings']['center_x']
self.y_center = self.state_data_samples['boxSettings']['center_y']
self.z_center = self.state_data_samples['boxSettings']['center_z']
self.x_size = self.state_data_samples['boxSettings']['size_x']
self.y_size = self.state_data_samples['boxSettings']['size_y']
self.z_size = self.state_data_samples['boxSettings']['size_z']
self.num_modes = self.state_data_samples['boxSettings']['numModes']
def hold_nSec(self, n):
for i in range(1, n + 1):
print(i)
time.sleep(1) # Delay for 1 sec
print('Ok %s secs have pass' % (n))
@hlp.timeit
def prepVinaSampleCommand(self, sample_num):
# try:
if self.setup_box is not False:
# print("Running Vina")
# TODO need to think about seed
self.save_run_name = 'vina_' + self.run_type_samples + '_' + str(sample_num)
command_to_run = "vina --receptor {0} " \
"--ligand {1} " \
"--center_x {2} " \
"--center_y {3} " \
"--center_z {4} " \
"--size_x {5} " \
"--size_y {6} " \
"--size_z {7} " \
"--exhaustiveness {8} " \
"--num_modes {9} " \
"--seed 10 " \
"--log {10}.txt " \
"--out {11}_out.pdbqt".format(self.receptor_file,
self.ligand_file,
self.x_center,
self.y_center,
self.z_center,
self.x_size,
self.y_size,
self.z_size,
self.samples_exhaust,
self.num_modes,
self.save_run_name,
self.save_run_name)
print(command_to_run)
self.command_samples_run_list.append(command_to_run)
print("Launching new Sim")
self.state_data_samples['simStates'].update({str(sample_num): {'save_run_name': self.save_run_name,
'commandRun': command_to_run,
'runFinished': False}})
# try:
# os.system(command_to_run)
# except KeyboardInterrupt:
# # quit
# sys.exit()
print("Vina sample run command prep finished")
else:
print('Please setup vina box settings')
# except Exception as e:
# print("error in Sample runSim: ", e)
# sys.exit(0)
def get_molecule_name(self):
return self.molecule_name
def get_receptor_name(self):
return self.receptor_name
def set_molecule_name(self, mol_name):
self.molecule_name = mol_name
def set_receptor_name(self, receptor_name):
self.receptor_name = receptor_name
# This might need to get modified
def find_sample_files(self, folder):
try:
VIP = []
for dirname, dirnames, filenames in os.walk(folder):
for i in filenames:
# print i
if 'out' in i:
VIP.append(i)
# This is not necessary since info is inside pdbqt file
# elif 'vina_sample_' in i:
# VIP.append(i)
return VIP
except Exception as e:
print("error in find_files: ", e)
sys.exit(0)
def find_sample_folders(self, folder_path='.', dir_name='vina_sample'):
try:
dir_names = []
for dirname, dirnames, filenames in os.walk(folder_path):
# print(dirname, '-')
if dir_name in dirname: #
# print(dir_name)
dir_names.append(dirname)
# print sorted(dir_names)
return sorted(dir_names)
except Exception as e:
print("Problem with finding folders : ", e)
sys.exit(0)
| [
"copy.deepcopy",
"molmolpy.utils.folder_utils.find_folder_in_path",
"multiprocessing.current_process",
"math.ceil",
"molmolpy.utils.folder_utils.create_folder",
"molmolpy.tools.run_dask_tools.get_dask_worker_status",
"time.sleep",
"itertools.cycle",
"mdtraj.load_xtc",
"molmolpy.tools.run_dask_tools.check_free_resources"
] | [((1917, 1987), 'itertools.cycle', 'itertools.cycle', (["['navy', 'c', 'cornflowerblue', 'gold', 'darkorange']"], {}), "(['navy', 'c', 'cornflowerblue', 'gold', 'darkorange'])\n", (1932, 1987), False, 'import itertools\n'), ((8609, 8666), 'mdtraj.load_xtc', 'md.load_xtc', (['self.trajectory_file'], {'top': 'self.topology_file'}), '(self.trajectory_file, top=self.topology_file)\n', (8620, 8666), True, 'import mdtraj as md\n'), ((10176, 10214), 'math.ceil', 'math.ceil', (['(traj_len / total_free_cores)'], {}), '(traj_len / total_free_cores)\n', (10185, 10214), False, 'import math\n'), ((23093, 23143), 'molmolpy.tools.run_dask_tools.get_dask_worker_status', 'run_dask_tools.get_dask_worker_status', (['curr_client'], {}), '(curr_client)\n', (23130, 23143), False, 'from molmolpy.tools import run_dask_tools\n'), ((23171, 23221), 'molmolpy.tools.run_dask_tools.check_free_resources', 'run_dask_tools.check_free_resources', (['worker_status'], {}), '(worker_status)\n', (23206, 23221), False, 'from molmolpy.tools import run_dask_tools\n'), ((23278, 23308), 'copy.deepcopy', 'copy.deepcopy', (['get_worker_free'], {}), '(get_worker_free)\n', (23291, 23308), False, 'import copy\n'), ((30911, 30941), 'copy.deepcopy', 'copy.deepcopy', (['self.state_data'], {}), '(self.state_data)\n', (30924, 30941), False, 'import copy\n'), ((41066, 41138), 'molmolpy.utils.folder_utils.find_folder_in_path', 'folder_utils.find_folder_in_path', (['self.folder_path', 'self.run_folder_name'], {}), '(self.folder_path, self.run_folder_name)\n', (41098, 41138), False, 'from molmolpy.utils import folder_utils\n'), ((44391, 44476), 'molmolpy.utils.folder_utils.find_folder_in_path', 'folder_utils.find_folder_in_path', (['self.uber_dock_folder', 'self.ledock_folder_name'], {}), '(self.uber_dock_folder, self.ledock_folder_name\n )\n', (44423, 44476), False, 'from molmolpy.utils import folder_utils\n'), ((59539, 59624), 'molmolpy.utils.folder_utils.find_folder_in_path', 'folder_utils.find_folder_in_path', (['self.folder_path', 'self.run_folder_name_samples'], {}), '(self.folder_path, self.run_folder_name_samples\n )\n', (59571, 59624), False, 'from molmolpy.utils import folder_utils\n'), ((25692, 25779), 'molmolpy.tools.run_dask_tools.get_dask_worker_status', 'run_dask_tools.get_dask_worker_status', (['curr_client'], {'custom_index': 'custom_index_curr'}), '(curr_client, custom_index=\n custom_index_curr)\n', (25729, 25779), False, 'from molmolpy.tools import run_dask_tools\n'), ((25810, 25865), 'molmolpy.tools.run_dask_tools.check_free_resources', 'run_dask_tools.check_free_resources', (['worker_status_temp'], {}), '(worker_status_temp)\n', (25845, 25865), False, 'from molmolpy.tools import run_dask_tools\n'), ((29180, 29194), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (29190, 29194), False, 'import time\n'), ((30246, 30260), 'time.sleep', 'time.sleep', (['(60)'], {}), '(60)\n', (30256, 30260), False, 'import time\n'), ((30607, 30640), 'multiprocessing.current_process', 'multiprocessing.current_process', ([], {}), '()\n', (30638, 30640), False, 'import multiprocessing\n'), ((41746, 41793), 'molmolpy.utils.folder_utils.create_folder', 'folder_utils.create_folder', (['self.sim_folder_run'], {}), '(self.sim_folder_run)\n', (41772, 41793), False, 'from molmolpy.utils import folder_utils\n'), ((44834, 44894), 'molmolpy.utils.folder_utils.create_folder', 'folder_utils.create_folder', (['self.ledock_absolute_folder_name'], {}), '(self.ledock_absolute_folder_name)\n', (44860, 44894), False, 'from molmolpy.utils import folder_utils\n'), ((60126, 60181), 'molmolpy.utils.folder_utils.create_folder', 'folder_utils.create_folder', (['self.sim_folder_run_samples'], {}), '(self.sim_folder_run_samples)\n', (60152, 60181), False, 'from molmolpy.utils import folder_utils\n'), ((65117, 65130), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (65127, 65130), False, 'import time\n'), ((21444, 21466), 'copy.deepcopy', 'copy.deepcopy', (['results'], {}), '(results)\n', (21457, 21466), False, 'import copy\n')] |
import random
from randomized_tsp.utils import cost, random_neighbour, random_tour
def init_population(population_size, num_of_cities):
"""
Initializes the population
"""
population = set()
while len(population) != population_size:
population.add(tuple(random_tour(num_of_cities)))
return [list(tour) for tour in population]
def calculate_fitness(population, num_of_cities, distance_matrix):
"""
Return a fitness list for the population
Fitness is just 1 / cost(tour)
"""
fitness = [1 / cost(num_of_cities, distance_matrix, tour)
for tour in population]
sum_fitness = sum(fitness)
return [f / sum_fitness for f in fitness]
def order_crossover(num_of_cities, parent1, parent2):
"""
Implements order crossover operator
"""
start = random.randint(0, num_of_cities - 2)
end = random.randint(start, num_of_cities - 1)
child1 = parent1[start:end]
child2 = parent2[start:end]
for city in parent1:
if city not in child2:
child2.append(city)
for city in parent2:
if city not in child1:
child1.append(city)
return [child1, child2]
def cycle_crossover(num_of_cities, parent1, parent2):
"""
Implements cycle crossover operator
"""
child1 = [-1] * num_of_cities
child2 = child1.copy()
i = 0
while child1[i] == -1:
child1[i] = parent1[i]
i = parent1.index(parent2[i])
i = 0
while child2[i] == -1:
child2[i] = parent2[i]
i = parent2.index(parent1[i])
for i in range(num_of_cities):
if child1[i] == -1:
child1[i] = parent2[i]
if child2[i] == -1:
child2[i] = parent1[i]
return [child1, child2]
def mutate(num_of_cities, child):
"""
Given a child will will give a mutation
Mutation is just random exchange any two cities
"""
return random_neighbour(num_of_cities, child)
def _genetic_algorithm(num_of_cities,
distance_matrix,
population_size,
mutation_prob,
crossover):
"""
Implements the genetic algorithm for TSP
Returns the best tour found and cost of that tour
"""
crossover_func = order_crossover
if crossover == 'cycle':
crossover_func = cycle_crossover
population = init_population(population_size, num_of_cities)
num_of_epochs = num_of_cities * 2
# In my experience a good value for `num_of_epochs` is directly
# proportional to `num_of_cities`.
# You can also experiment with different terminating condition
for _ in range(num_of_epochs):
# selection
fitness = calculate_fitness(population, num_of_cities, distance_matrix)
selected = random.choices(population, fitness, k=population_size)
random.shuffle(selected)
# offsprings
offsprings = []
for i in range(population_size // 2):
children = crossover_func(num_of_cities, selected[i], selected[i + population_size // 2])
offsprings.extend(children)
# mutation
for index in range(population_size):
if random.uniform(0, 1) < mutation_prob:
offsprings[index] = mutate(num_of_cities, offsprings[index])
# replacement
population.extend(offsprings)
fitness = calculate_fitness(population, num_of_cities, distance_matrix)
population = [tour for _, tour in sorted(zip(fitness, population), reverse=True)]
population = population[:population_size]
return population[0], cost(num_of_cities, distance_matrix, population[0])
| [
"randomized_tsp.utils.random_tour",
"randomized_tsp.utils.cost",
"random.randint",
"random.uniform",
"random.shuffle",
"random.choices",
"randomized_tsp.utils.random_neighbour"
] | [((827, 863), 'random.randint', 'random.randint', (['(0)', '(num_of_cities - 2)'], {}), '(0, num_of_cities - 2)\n', (841, 863), False, 'import random\n'), ((874, 914), 'random.randint', 'random.randint', (['start', '(num_of_cities - 1)'], {}), '(start, num_of_cities - 1)\n', (888, 914), False, 'import random\n'), ((1928, 1966), 'randomized_tsp.utils.random_neighbour', 'random_neighbour', (['num_of_cities', 'child'], {}), '(num_of_cities, child)\n', (1944, 1966), False, 'from randomized_tsp.utils import cost, random_neighbour, random_tour\n'), ((2815, 2869), 'random.choices', 'random.choices', (['population', 'fitness'], {'k': 'population_size'}), '(population, fitness, k=population_size)\n', (2829, 2869), False, 'import random\n'), ((2878, 2902), 'random.shuffle', 'random.shuffle', (['selected'], {}), '(selected)\n', (2892, 2902), False, 'import random\n'), ((3639, 3690), 'randomized_tsp.utils.cost', 'cost', (['num_of_cities', 'distance_matrix', 'population[0]'], {}), '(num_of_cities, distance_matrix, population[0])\n', (3643, 3690), False, 'from randomized_tsp.utils import cost, random_neighbour, random_tour\n'), ((543, 585), 'randomized_tsp.utils.cost', 'cost', (['num_of_cities', 'distance_matrix', 'tour'], {}), '(num_of_cities, distance_matrix, tour)\n', (547, 585), False, 'from randomized_tsp.utils import cost, random_neighbour, random_tour\n'), ((283, 309), 'randomized_tsp.utils.random_tour', 'random_tour', (['num_of_cities'], {}), '(num_of_cities)\n', (294, 309), False, 'from randomized_tsp.utils import cost, random_neighbour, random_tour\n'), ((3217, 3237), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (3231, 3237), False, 'import random\n')] |
from django.db import models
from django_extensions.db.fields import CreationDateTimeField
from django_extensions.db.models import TimeStampedModel
class CreationSortedTimeStampedModel(TimeStampedModel):
class Meta(TimeStampedModel.Meta):
abstract = True
ordering = ['-created']
get_latest_by = 'created'
created = CreationDateTimeField(db_index=True)
class CopyrightLicense(models.TextChoices):
CC_0 = 'CC-0', 'CC-0'
# These 2 require attribution
CC_BY = 'CC-BY', 'CC-BY'
CC_BY_NC = 'CC-BY-NC', 'CC-BY-NC'
| [
"django_extensions.db.fields.CreationDateTimeField"
] | [((350, 386), 'django_extensions.db.fields.CreationDateTimeField', 'CreationDateTimeField', ([], {'db_index': '(True)'}), '(db_index=True)\n', (371, 386), False, 'from django_extensions.db.fields import CreationDateTimeField\n')] |
'''
Created on Feb 24, 2015
@author: <NAME> <<EMAIL>>
This module provides functions and classes for probability distributions, which
build upon the scipy.stats package and extend it.
'''
from __future__ import division
import numpy as np
from scipy import stats, special, linalg, optimize
from ..data_structures.cache import cached_property
def lognorm_mean_var_to_mu_sigma(mean, variance, definition='scipy'):
""" determines the parameters of the log-normal distribution such that the
distribution yields a given mean and variance. The optional parameter
`definition` can be used to choose a definition of the resulting parameters
that is suitable for the given software package. """
mean2 = mean**2
mu = mean2/np.sqrt(mean2 + variance)
sigma = np.sqrt(np.log(1 + variance/mean2))
if definition == 'scipy':
return mu, sigma
elif definition == 'numpy':
return np.log(mu), sigma
else:
raise ValueError('Unknown definition `%s`' % definition)
def lognorm_mean(mean, sigma):
""" returns a lognormal distribution parameterized by its mean and a spread
parameter `sigma` """
if sigma == 0:
return DeterministicDistribution(mean)
else:
mu = mean * np.exp(-0.5 * sigma**2)
return stats.lognorm(scale=mu, s=sigma)
def lognorm_mean_var(mean, variance):
""" returns a lognormal distribution parameterized by its mean and its
variance. """
if variance == 0:
return DeterministicDistribution(mean)
else:
scale, sigma = lognorm_mean_var_to_mu_sigma(mean, variance, 'scipy')
return stats.lognorm(scale=scale, s=sigma)
def lognorm_sum_leastsq(count, var_norm, sim_terms=1e5, bins=64):
""" returns the parameters of a log-normal distribution that estimates the
sum of `count` log-normally distributed random variables with mean 1 and
variance `var_norm`. These parameters are determined by fitting the
probability density function to a histogram obtained by drawing `sim_terms`
random numbers """
sum_mean = count
sum_var = count * var_norm
# get random numbers
dist = lognorm_mean_var(1, var_norm)
vals = dist.rvs((int(sim_terms), count)).sum(axis=1)
# get the histogram
val_max = sum_mean + 3 * np.sqrt(sum_var)
bins = np.linspace(0, val_max, bins + 1)
xs = 0.5*(bins[:-1] + bins[1:])
density, _ = np.histogram(vals, bins=bins, range=[0, val_max],
density=True)
def pdf_diff(params):
""" evaluate the estimated pdf """
scale, sigma = params
return stats.lognorm.pdf(xs, scale=scale, s=sigma) - density
# do the least square fitting
params_init = lognorm_mean_var_to_mu_sigma(sum_mean, sum_var, 'scipy')
params, _ = optimize.leastsq(pdf_diff, params_init)
return params
def lognorm_sum(count, mean, variance, method='fenton'):
""" returns an estimate of the distribution of the sum of `count`
log-normally distributed variables with `mean` and `variance`. The returned
distribution is again log-normal with mean and variance determined from the
given parameters. Here, several methods can be used:
`fenton` - match the first two moments of the distribution
`leastsq` - minimize the error in the interval
"""
if method == 'fenton':
# use the moments directly
return lognorm_mean_var(count * mean, count * variance)
elif method == 'leastsq':
# determine the moments from fitting
var_norm = variance / mean**2
scale, sigma = lognorm_sum_leastsq(count, var_norm)
return stats.lognorm(scale=scale * mean, s=sigma)
else:
raise ValueError('Unknown method `%s` for determining the sum of '
'lognormal distributions. Accepted methods are '
'[`fenton`, `leastsq`].')
def gamma_mean_var(mean, variance):
""" returns a gamma distribution with given mean and variance """
alpha = mean**2 / variance
beta = variance / mean
return stats.gamma(scale=beta, a=alpha)
def loguniform_mean(mean, width):
""" returns a loguniform distribution parameterized by its mean and a spread
parameter `width`. The ratio between the maximal value and the minimal value
is given by width**2 """
if width == 1:
# treat special case separately
return DeterministicDistribution(mean)
else:
scale = mean * (2*width*np.log(width)) / (width**2 - 1)
return LogUniformDistribution(scale=scale, s=width)
def loguniform_mean_var(mean, var):
""" returns a loguniform distribution parameterized by its mean and
variance. Here, we need to solve a non-linear equation numerically, which
might degrade accuracy and performance of the result """
if var < 0:
raise ValueError('Variance must be positive')
elif var == 0:
# treat special case separately
return DeterministicDistribution(mean)
else:
# determine width parameter numerically
cv2 = var / mean**2 # match square coefficient of variation
def _rhs(q):
""" match the coefficient of variation """
return 0.5 * (q + 1) * np.log(q) / (q - 1) - 1 - cv2
width = optimize.newton(_rhs, 1.1)
return loguniform_mean(mean, np.sqrt(width))
def random_log_uniform(v_min, v_max, size):
""" returns random variables that a distributed uniformly in log space """
log_min, log_max = np.log(v_min), np.log(v_max)
res = np.random.uniform(log_min, log_max, size)
return np.exp(res)
def dist_skewness(dist):
""" returns the skewness of the distribution `dist` """
mean = dist.mean()
var = dist.var()
return (dist.moment(3) - 3*mean*var - mean**3) / var**(3/2)
class DeterministicDistribution_gen(stats.rv_continuous):
""" deterministic distribution that always returns a given value
Code copied from
https://docs.scipy.org/doc/scipy/reference/tutorial/stats.html#making-a-continuous-distribution-i-e-subclassing-rv-continuous
"""
def _cdf(self, x):
return np.where(x < 0, 0., 1.)
def _stats(self):
return 0., 0., 0., 0.
def _rvs(self):
return np.zeros(self._size)
DeterministicDistribution = DeterministicDistribution_gen(
name='DeterministicDistribution'
)
class LogUniformDistribution_gen(stats.rv_continuous):
"""
Log-uniform distribution.
"""
def freeze(self, *args, **kwds):
frozen = super(LogUniformDistribution_gen, self).freeze(*args, **kwds)
frozen.support = self.support(*args, **kwds)
return frozen
def support(self, *args, **kwds):
""" return the interval in which the PDF of the distribution is
non-zero """
extra_args, _, _, _ = self._parse_args_stats(*args, **kwds)
mean = self.mean(*args, **kwds)
scale = extra_args[0]
width = mean * (2*scale*np.log(scale)) / (scale**2 - 1)
return (width / scale, width * scale)
def _rvs(self, s):
""" random variates """
# choose the receptor response characteristics
return random_log_uniform(1/s, s, self._size)
def _pdf(self, x, s):
""" probability density function """
s = s[0] # reset broadcasting
res = np.zeros_like(x)
idx = (1 < x*s) & (x < s)
res[idx] = 1/(x[idx] * np.log(s*s))
return res
def _cdf(self, x, s):
""" cumulative probability function """
s = s[0] # reset broadcasting
res = np.zeros_like(x)
idx = (1 < x*s) & (x < s)
log_s = np.log(s)
res[idx] = (log_s + np.log(x[idx]))/(2 * log_s)
res[x > s] = 1
return res
def _ppf(self, q, s):
""" percent point function (inverse of cdf) """
s = s[0] # reset broadcasting
res = np.zeros_like(q)
idx = (q > 0)
res[idx] = s**(2*q[idx] - 1)
return res
def _stats(self, s):
""" calculates statistics of the distribution """
mean = (s**2 - 1)/(2*s*np.log(s))
var = ((s**4 - 1) * np.log(s) - (s**2 - 1)**2) \
/ (4 * s**2 * np.log(s)**2)
return mean, var, None, None
LogUniformDistribution = LogUniformDistribution_gen(
a=0, name='LogUniformDistribution'
)
class HypoExponentialDistribution(object):
"""
Hypoexponential distribution.
Unfortunately, the framework supplied by scipy.stats.rv_continuous does not
support a variable number of parameters and we thus only mimic its
interface here.
"""
def __init__(self, rates, method='sum'):
""" initializes the hypoexponential distribution.
`rates` are the rates of the underlying exponential processes
`method` determines what method is used for calculating the cdf and can
be either `sum` or `eigen`
"""
if method in {'sum', 'eigen'}:
self.method = method
# prepare the rates of the system
self.rates = np.asarray(rates)
self.alpha = 1 / self.rates
if np.any(rates <= 0):
raise ValueError('All rates must be positive')
if len(np.unique(self.alpha)) != len(self.alpha):
raise ValueError('The current implementation only supports cases '
'where all rates are different from each other.')
# calculate terms that we need later
with np.errstate(divide='ignore'):
mat = self.alpha[:, None] \
/ (self.alpha[:, None] - self.alpha[None, :])
mat[(self.alpha[:, None] - self.alpha[None, :]) == 0] = 1
self._terms = np.prod(mat, 1)
def rvs(self, size):
""" random variates """
# choose the receptor response characteristics
return sum(np.random.exponential(scale=alpha, size=size)
for alpha in self.alpha)
def mean(self):
""" mean of the distribution """
return self.alpha.sum()
def variance(self):
""" variance of the distribution """
return (2 * np.sum(self.alpha**2 * self._terms) -
(self.alpha.sum())**2)
def pdf(self, x):
""" probability density function """
if not np.isscalar(x):
x = np.asarray(x)
res = np.zeros_like(x)
nz = (x > 0)
if np.any(nz):
if self.method == 'sum':
factor = np.exp(-x[nz, None] * self.rates[..., :]) \
/ self.rates[..., :]
res[nz] = np.sum(self._terms[..., :] * factor, axis=1)
else:
Theta = (np.diag(-self.rates, 0) +
np.diag(self.rates[:-1], 1))
for i in np.flatnonzero(nz):
res.flat[i] = \
1 - linalg.expm(x.flat[i]*Theta)[0, :].sum()
elif x == 0:
res = 0
else:
if self.method == 'sum':
factor = np.exp(-x*self.rates)/self.ratesx
res[nz] = np.sum(self._terms * factor)
else:
Theta = np.diag(-self.rates, 0) + np.diag(self.rates[:-1], 1)
res = 1 - linalg.expm(x*Theta)[0, :].sum()
return res
def cdf(self, x):
""" cumulative density function """
if not np.isscalar(x):
x = np.asarray(x)
res = np.zeros_like(x)
nz = (x > 0)
if np.any(nz):
factor = np.exp(-x[nz, None]*self.rates[..., :])
res = 1 - np.sum(self._terms[..., :] * factor, axis=1)
elif x == 0:
res = 0
else:
factor = np.exp(-x*self.rates)
res = 1 - np.sum(self._terms * factor)
return res
# ==============================================================================
# OLD DISTRIBUTIONS THAT MIGHT NOT BE NEEDED ANYMORE
# ==============================================================================
class PartialLogNormDistribution_gen(stats.rv_continuous):
"""
partial log-normal distribution.
a fraction `frac` of the distribution follows a log-normal distribution,
while the remaining fraction `1 - frac` is zero
Similar to the lognorm distribution, this does not support any location
parameter
"""
def _rvs(self, s, frac):
""" random variates """
# choose the items response characteristics
res = np.exp(s * np.random.standard_normal(self._size))
if frac != 1:
# switch off items randomly
res[np.random.random(self._size) > frac] = 0
return res
def _pdf(self, x, s, frac):
""" probability density function """
s, frac = s[0], frac[0] # reset broadcasting
return frac / (s*x*np.sqrt(2*np.pi)) * np.exp(-1/2*(np.log(x)/s)**2)
def _cdf(self, x, s, frac):
""" cumulative probability function """
s, frac = s[0], frac[0] # reset broadcasting
return 1 + frac*(-0.5 + 0.5*special.erf(np.log(x)/(s*np.sqrt(2))))
def _ppf(self, q, s, frac):
""" percent point function (inverse of cdf) """
s, frac = s[0], frac[0] # reset broadcasting
q_scale = (q - (1 - frac)) / frac
res = np.zeros_like(q)
idx = (q_scale > 0)
res[idx] = np.exp(s * special.ndtri(q_scale[idx]))
return res
PartialLogNormDistribution = PartialLogNormDistribution_gen(
a=0, name='PartialLogNormDistribution'
)
class PartialLogUniformDistribution_gen(stats.rv_continuous):
"""
partial log-uniform distribution.
a fraction `frac` of the distribution follows a log-uniform distribution,
while the remaining fraction `1 - frac` is zero
"""
def _rvs(self, s, frac):
""" random variates """
# choose the receptor response characteristics
res = random_log_uniform(1/s, s, self._size)
# switch off receptors randomly
if frac != 1:
res[np.random.random(self._size) > frac] = 0
return res
def _pdf(self, x, s, frac):
""" probability density function """
s, frac = s[0], frac[0] # reset broadcasting
res = np.zeros_like(x)
idx = (1 < x*s) & (x < s)
res[idx] = frac/(x[idx] * np.log(s*s))
return res
def _cdf(self, x, s, frac):
""" cumulative probability function """
s, frac = s[0], frac[0] # reset broadcasting
res = np.zeros_like(x)
idx = (1 < x*s) & (x < s)
log_s = np.log(s)
res[idx] = (log_s + np.log(x[idx]))/(2 * log_s)
res[x > s] = 1
return (1 - frac) + frac*res
def _ppf(self, q, s, frac):
""" percent point function (inverse of cdf) """
s, frac = s[0], frac[0] # reset broadcasting
q_scale = (q - (1 - frac)) / frac
res = np.zeros_like(q)
idx = (q_scale > 0)
res[idx] = s**(2*q_scale[idx] - 1)
return res
PartialLogUniformDistribution = PartialLogUniformDistribution_gen(
a=0, name='PartialLogUniformDistribution'
)
NORMAL_DISTRIBUTION_NORMALIZATION = 1/np.sqrt(2*np.pi)
class NormalDistribution(object):
""" class representing normal distributions """
def __init__(self, mean, var, count=None):
""" normal distributions are described by their mean and variance.
Additionally, count denotes how many observations were used to
estimate the parameters. All values can also be numpy arrays to
represent many distributions efficiently """
self.mean = mean
self.var = var
self.count = count
def copy(self):
return self.__class__(self.mean, self.var, self.count)
@cached_property()
def std(self):
""" return standard deviation """
return np.sqrt(self.var)
def pdf(self, value, mask=None):
""" return probability density function at value """
if mask is None:
mean = self.mean
var = self.var
std = self.std
else:
mean = self.mean[mask]
var = self.var[mask]
std = self.std[mask]
return NORMAL_DISTRIBUTION_NORMALIZATION/std \
* np.exp(-0.5*(value - mean)**2 / var)
def add_observation(self, value):
""" add an observed value and adjust mean and variance of the
distribution. This returns a new distribution and only works if
count was set """
if self.count is None:
return self.copy()
else:
M2 = self.var*(self.count - 1)
count = self.count + 1
delta = value - self.mean
mean = self.mean + delta/count
M2 = M2 + delta*(value - mean)
return NormalDistribution(mean, M2/(count - 1), count)
def distance(self, other, kind='kullback-leibler'):
""" return the distance between two normal distributions """
if kind == 'kullback-leibler':
dist = 0.5*(np.log(other.var/self.var) +
(self.var + (self.mean - self.mean)**2)/other.var - 1)
elif kind == 'bhattacharyya':
var_ratio = self.var/other.var
term1 = np.log(0.25*(var_ratio + 1/var_ratio + 2))
term2 = (self.mean - other.mean)**2/(self.var + other.var)
dist = 0.25*(term1 + term2)
elif kind == 'hellinger':
dist_b = self.distance(other, kind='bhattacharyya')
dist = np.sqrt(1 - np.exp(-dist_b))
else:
raise ValueError('Unknown distance `%s`' % kind)
return dist
def welch_test(self, other):
""" performs Welch's t-test of two normal distributions """
# calculate the degrees of freedom
s1, s2 = self.var/self.count, other.var/other.count
nu1, nu2 = self.count - 1, other.count - 1
dof = (s1 + s2)**2/(s1**2/nu1 + s2**2/nu2)
# calculate the Welch t-value
t = (self.mean - other.mean)/np.sqrt(s1 + s2)
# calculate the probability using the Student's T distribution
prob = stats.t.sf(np.abs(t), dof) * 2
return prob
def overlap(self, other, common_variance=True):
""" estimates the amount of overlap between two distributions """
if common_variance:
if self.count is None:
if other.count is None: # neither is sampled
S = np.sqrt(0.5*(self.var + other.var))
else: # other is sampled
S = self.std
else:
if other.count is None: # self is sampled
S = other.std
else: # both are sampled
expr = ((self.count - 1)*self.var +
(other.count - 1)*other.var)
S = np.sqrt(expr/(self.count + other.count - 2))
delta = np.abs(self.mean - other.mean)/S
return 2*stats.norm.cdf(-0.5*delta)
else:
# here, we would have to integrate numerically
raise NotImplementedError
| [
"numpy.sum",
"numpy.abs",
"scipy.special.ndtri",
"numpy.random.exponential",
"scipy.optimize.leastsq",
"numpy.histogram",
"scipy.optimize.newton",
"numpy.exp",
"numpy.diag",
"numpy.prod",
"numpy.unique",
"numpy.zeros_like",
"scipy.stats.lognorm.pdf",
"scipy.stats.norm.cdf",
"scipy.stats.gamma",
"numpy.linspace",
"numpy.asarray",
"numpy.random.standard_normal",
"numpy.random.uniform",
"scipy.linalg.expm",
"numpy.log",
"numpy.isscalar",
"numpy.flatnonzero",
"scipy.stats.lognorm",
"numpy.zeros",
"numpy.errstate",
"numpy.any",
"numpy.where",
"numpy.random.random",
"numpy.sqrt"
] | [((2333, 2366), 'numpy.linspace', 'np.linspace', (['(0)', 'val_max', '(bins + 1)'], {}), '(0, val_max, bins + 1)\n', (2344, 2366), True, 'import numpy as np\n'), ((2420, 2483), 'numpy.histogram', 'np.histogram', (['vals'], {'bins': 'bins', 'range': '[0, val_max]', 'density': '(True)'}), '(vals, bins=bins, range=[0, val_max], density=True)\n', (2432, 2483), True, 'import numpy as np\n'), ((2821, 2860), 'scipy.optimize.leastsq', 'optimize.leastsq', (['pdf_diff', 'params_init'], {}), '(pdf_diff, params_init)\n', (2837, 2860), False, 'from scipy import stats, special, linalg, optimize\n'), ((4129, 4161), 'scipy.stats.gamma', 'stats.gamma', ([], {'scale': 'beta', 'a': 'alpha'}), '(scale=beta, a=alpha)\n', (4140, 4161), False, 'from scipy import stats, special, linalg, optimize\n'), ((5626, 5667), 'numpy.random.uniform', 'np.random.uniform', (['log_min', 'log_max', 'size'], {}), '(log_min, log_max, size)\n', (5643, 5667), True, 'import numpy as np\n'), ((5679, 5690), 'numpy.exp', 'np.exp', (['res'], {}), '(res)\n', (5685, 5690), True, 'import numpy as np\n'), ((15535, 15553), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (15542, 15553), True, 'import numpy as np\n'), ((748, 773), 'numpy.sqrt', 'np.sqrt', (['(mean2 + variance)'], {}), '(mean2 + variance)\n', (755, 773), True, 'import numpy as np\n'), ((794, 822), 'numpy.log', 'np.log', (['(1 + variance / mean2)'], {}), '(1 + variance / mean2)\n', (800, 822), True, 'import numpy as np\n'), ((1292, 1324), 'scipy.stats.lognorm', 'stats.lognorm', ([], {'scale': 'mu', 's': 'sigma'}), '(scale=mu, s=sigma)\n', (1305, 1324), False, 'from scipy import stats, special, linalg, optimize\n'), ((1630, 1665), 'scipy.stats.lognorm', 'stats.lognorm', ([], {'scale': 'scale', 's': 'sigma'}), '(scale=scale, s=sigma)\n', (1643, 1665), False, 'from scipy import stats, special, linalg, optimize\n'), ((5587, 5600), 'numpy.log', 'np.log', (['v_min'], {}), '(v_min)\n', (5593, 5600), True, 'import numpy as np\n'), ((5602, 5615), 'numpy.log', 'np.log', (['v_max'], {}), '(v_max)\n', (5608, 5615), True, 'import numpy as np\n'), ((6214, 6239), 'numpy.where', 'np.where', (['(x < 0)', '(0.0)', '(1.0)'], {}), '(x < 0, 0.0, 1.0)\n', (6222, 6239), True, 'import numpy as np\n'), ((6335, 6355), 'numpy.zeros', 'np.zeros', (['self._size'], {}), '(self._size)\n', (6343, 6355), True, 'import numpy as np\n'), ((7451, 7467), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (7464, 7467), True, 'import numpy as np\n'), ((7711, 7727), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (7724, 7727), True, 'import numpy as np\n'), ((7778, 7787), 'numpy.log', 'np.log', (['s'], {}), '(s)\n', (7784, 7787), True, 'import numpy as np\n'), ((8032, 8048), 'numpy.zeros_like', 'np.zeros_like', (['q'], {}), '(q)\n', (8045, 8048), True, 'import numpy as np\n'), ((9229, 9246), 'numpy.asarray', 'np.asarray', (['rates'], {}), '(rates)\n', (9239, 9246), True, 'import numpy as np\n'), ((9294, 9312), 'numpy.any', 'np.any', (['(rates <= 0)'], {}), '(rates <= 0)\n', (9300, 9312), True, 'import numpy as np\n'), ((9880, 9895), 'numpy.prod', 'np.prod', (['mat', '(1)'], {}), '(mat, 1)\n', (9887, 9895), True, 'import numpy as np\n'), ((13622, 13638), 'numpy.zeros_like', 'np.zeros_like', (['q'], {}), '(q)\n', (13635, 13638), True, 'import numpy as np\n'), ((14571, 14587), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (14584, 14587), True, 'import numpy as np\n'), ((14864, 14880), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (14877, 14880), True, 'import numpy as np\n'), ((14931, 14940), 'numpy.log', 'np.log', (['s'], {}), '(s)\n', (14937, 14940), True, 'import numpy as np\n'), ((15266, 15282), 'numpy.zeros_like', 'np.zeros_like', (['q'], {}), '(q)\n', (15279, 15282), True, 'import numpy as np\n'), ((16258, 16275), 'numpy.sqrt', 'np.sqrt', (['self.var'], {}), '(self.var)\n', (16265, 16275), True, 'import numpy as np\n'), ((1253, 1278), 'numpy.exp', 'np.exp', (['(-0.5 * sigma ** 2)'], {}), '(-0.5 * sigma ** 2)\n', (1259, 1278), True, 'import numpy as np\n'), ((2305, 2321), 'numpy.sqrt', 'np.sqrt', (['sum_var'], {}), '(sum_var)\n', (2312, 2321), True, 'import numpy as np\n'), ((2633, 2676), 'scipy.stats.lognorm.pdf', 'stats.lognorm.pdf', (['xs'], {'scale': 'scale', 's': 'sigma'}), '(xs, scale=scale, s=sigma)\n', (2650, 2676), False, 'from scipy import stats, special, linalg, optimize\n'), ((3685, 3727), 'scipy.stats.lognorm', 'stats.lognorm', ([], {'scale': '(scale * mean)', 's': 'sigma'}), '(scale=scale * mean, s=sigma)\n', (3698, 3727), False, 'from scipy import stats, special, linalg, optimize\n'), ((5358, 5384), 'scipy.optimize.newton', 'optimize.newton', (['_rhs', '(1.1)'], {}), '(_rhs, 1.1)\n', (5373, 5384), False, 'from scipy import stats, special, linalg, optimize\n'), ((9656, 9684), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""'}), "(divide='ignore')\n", (9667, 9684), True, 'import numpy as np\n'), ((10495, 10509), 'numpy.isscalar', 'np.isscalar', (['x'], {}), '(x)\n', (10506, 10509), True, 'import numpy as np\n'), ((10527, 10540), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (10537, 10540), True, 'import numpy as np\n'), ((10559, 10575), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (10572, 10575), True, 'import numpy as np\n'), ((10616, 10626), 'numpy.any', 'np.any', (['nz'], {}), '(nz)\n', (10622, 10626), True, 'import numpy as np\n'), ((11641, 11655), 'numpy.isscalar', 'np.isscalar', (['x'], {}), '(x)\n', (11652, 11655), True, 'import numpy as np\n'), ((11673, 11686), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (11683, 11686), True, 'import numpy as np\n'), ((11705, 11721), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (11718, 11721), True, 'import numpy as np\n'), ((11762, 11772), 'numpy.any', 'np.any', (['nz'], {}), '(nz)\n', (11768, 11772), True, 'import numpy as np\n'), ((16693, 16733), 'numpy.exp', 'np.exp', (['(-0.5 * (value - mean) ** 2 / var)'], {}), '(-0.5 * (value - mean) ** 2 / var)\n', (16699, 16733), True, 'import numpy as np\n'), ((18583, 18599), 'numpy.sqrt', 'np.sqrt', (['(s1 + s2)'], {}), '(s1 + s2)\n', (18590, 18599), True, 'import numpy as np\n'), ((924, 934), 'numpy.log', 'np.log', (['mu'], {}), '(mu)\n', (930, 934), True, 'import numpy as np\n'), ((5422, 5436), 'numpy.sqrt', 'np.sqrt', (['width'], {}), '(width)\n', (5429, 5436), True, 'import numpy as np\n'), ((7533, 7546), 'numpy.log', 'np.log', (['(s * s)'], {}), '(s * s)\n', (7539, 7546), True, 'import numpy as np\n'), ((7816, 7830), 'numpy.log', 'np.log', (['x[idx]'], {}), '(x[idx])\n', (7822, 7830), True, 'import numpy as np\n'), ((8251, 8260), 'numpy.log', 'np.log', (['s'], {}), '(s)\n', (8257, 8260), True, 'import numpy as np\n'), ((9388, 9409), 'numpy.unique', 'np.unique', (['self.alpha'], {}), '(self.alpha)\n', (9397, 9409), True, 'import numpy as np\n'), ((10041, 10086), 'numpy.random.exponential', 'np.random.exponential', ([], {'scale': 'alpha', 'size': 'size'}), '(scale=alpha, size=size)\n', (10062, 10086), True, 'import numpy as np\n'), ((10329, 10366), 'numpy.sum', 'np.sum', (['(self.alpha ** 2 * self._terms)'], {}), '(self.alpha ** 2 * self._terms)\n', (10335, 10366), True, 'import numpy as np\n'), ((11799, 11840), 'numpy.exp', 'np.exp', (['(-x[nz, None] * self.rates[..., :])'], {}), '(-x[nz, None] * self.rates[..., :])\n', (11805, 11840), True, 'import numpy as np\n'), ((11986, 12009), 'numpy.exp', 'np.exp', (['(-x * self.rates)'], {}), '(-x * self.rates)\n', (11992, 12009), True, 'import numpy as np\n'), ((12790, 12827), 'numpy.random.standard_normal', 'np.random.standard_normal', (['self._size'], {}), '(self._size)\n', (12815, 12827), True, 'import numpy as np\n'), ((13697, 13724), 'scipy.special.ndtri', 'special.ndtri', (['q_scale[idx]'], {}), '(q_scale[idx])\n', (13710, 13724), False, 'from scipy import stats, special, linalg, optimize\n'), ((14656, 14669), 'numpy.log', 'np.log', (['(s * s)'], {}), '(s * s)\n', (14662, 14669), True, 'import numpy as np\n'), ((14969, 14983), 'numpy.log', 'np.log', (['x[idx]'], {}), '(x[idx])\n', (14975, 14983), True, 'import numpy as np\n'), ((17761, 17807), 'numpy.log', 'np.log', (['(0.25 * (var_ratio + 1 / var_ratio + 2))'], {}), '(0.25 * (var_ratio + 1 / var_ratio + 2))\n', (17767, 17807), True, 'import numpy as np\n'), ((18707, 18716), 'numpy.abs', 'np.abs', (['t'], {}), '(t)\n', (18713, 18716), True, 'import numpy as np\n'), ((19500, 19530), 'numpy.abs', 'np.abs', (['(self.mean - other.mean)'], {}), '(self.mean - other.mean)\n', (19506, 19530), True, 'import numpy as np\n'), ((19554, 19582), 'scipy.stats.norm.cdf', 'stats.norm.cdf', (['(-0.5 * delta)'], {}), '(-0.5 * delta)\n', (19568, 19582), False, 'from scipy import stats, special, linalg, optimize\n'), ((4538, 4551), 'numpy.log', 'np.log', (['width'], {}), '(width)\n', (4544, 4551), True, 'import numpy as np\n'), ((7069, 7082), 'numpy.log', 'np.log', (['scale'], {}), '(scale)\n', (7075, 7082), True, 'import numpy as np\n'), ((8290, 8299), 'numpy.log', 'np.log', (['s'], {}), '(s)\n', (8296, 8299), True, 'import numpy as np\n'), ((8349, 8358), 'numpy.log', 'np.log', (['s'], {}), '(s)\n', (8355, 8358), True, 'import numpy as np\n'), ((10825, 10869), 'numpy.sum', 'np.sum', (['(self._terms[..., :] * factor)'], {'axis': '(1)'}), '(self._terms[..., :] * factor, axis=1)\n', (10831, 10869), True, 'import numpy as np\n'), ((11035, 11053), 'numpy.flatnonzero', 'np.flatnonzero', (['nz'], {}), '(nz)\n', (11049, 11053), True, 'import numpy as np\n'), ((11351, 11379), 'numpy.sum', 'np.sum', (['(self._terms * factor)'], {}), '(self._terms * factor)\n', (11357, 11379), True, 'import numpy as np\n'), ((11865, 11909), 'numpy.sum', 'np.sum', (['(self._terms[..., :] * factor)'], {'axis': '(1)'}), '(self._terms[..., :] * factor, axis=1)\n', (11871, 11909), True, 'import numpy as np\n'), ((12030, 12058), 'numpy.sum', 'np.sum', (['(self._terms * factor)'], {}), '(self._terms * factor)\n', (12036, 12058), True, 'import numpy as np\n'), ((12907, 12935), 'numpy.random.random', 'np.random.random', (['self._size'], {}), '(self._size)\n', (12923, 12935), True, 'import numpy as np\n'), ((13135, 13153), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (13142, 13153), True, 'import numpy as np\n'), ((14356, 14384), 'numpy.random.random', 'np.random.random', (['self._size'], {}), '(self._size)\n', (14372, 14384), True, 'import numpy as np\n'), ((19032, 19069), 'numpy.sqrt', 'np.sqrt', (['(0.5 * (self.var + other.var))'], {}), '(0.5 * (self.var + other.var))\n', (19039, 19069), True, 'import numpy as np\n'), ((19434, 19480), 'numpy.sqrt', 'np.sqrt', (['(expr / (self.count + other.count - 2))'], {}), '(expr / (self.count + other.count - 2))\n', (19441, 19480), True, 'import numpy as np\n'), ((10698, 10739), 'numpy.exp', 'np.exp', (['(-x[nz, None] * self.rates[..., :])'], {}), '(-x[nz, None] * self.rates[..., :])\n', (10704, 10739), True, 'import numpy as np\n'), ((10921, 10944), 'numpy.diag', 'np.diag', (['(-self.rates)', '(0)'], {}), '(-self.rates, 0)\n', (10928, 10944), True, 'import numpy as np\n'), ((10977, 11004), 'numpy.diag', 'np.diag', (['self.rates[:-1]', '(1)'], {}), '(self.rates[:-1], 1)\n', (10984, 11004), True, 'import numpy as np\n'), ((11291, 11314), 'numpy.exp', 'np.exp', (['(-x * self.rates)'], {}), '(-x * self.rates)\n', (11297, 11314), True, 'import numpy as np\n'), ((11422, 11445), 'numpy.diag', 'np.diag', (['(-self.rates)', '(0)'], {}), '(-self.rates, 0)\n', (11429, 11445), True, 'import numpy as np\n'), ((11448, 11475), 'numpy.diag', 'np.diag', (['self.rates[:-1]', '(1)'], {}), '(self.rates[:-1], 1)\n', (11455, 11475), True, 'import numpy as np\n'), ((17537, 17565), 'numpy.log', 'np.log', (['(other.var / self.var)'], {}), '(other.var / self.var)\n', (17543, 17565), True, 'import numpy as np\n'), ((13168, 13177), 'numpy.log', 'np.log', (['x'], {}), '(x)\n', (13174, 13177), True, 'import numpy as np\n'), ((18057, 18072), 'numpy.exp', 'np.exp', (['(-dist_b)'], {}), '(-dist_b)\n', (18063, 18072), True, 'import numpy as np\n'), ((5303, 5312), 'numpy.log', 'np.log', (['q'], {}), '(q)\n', (5309, 5312), True, 'import numpy as np\n'), ((13395, 13404), 'numpy.log', 'np.log', (['x'], {}), '(x)\n', (13401, 13404), True, 'import numpy as np\n'), ((11502, 11524), 'scipy.linalg.expm', 'linalg.expm', (['(x * Theta)'], {}), '(x * Theta)\n', (11513, 11524), False, 'from scipy import stats, special, linalg, optimize\n'), ((13408, 13418), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (13415, 13418), True, 'import numpy as np\n'), ((11131, 11161), 'scipy.linalg.expm', 'linalg.expm', (['(x.flat[i] * Theta)'], {}), '(x.flat[i] * Theta)\n', (11142, 11161), False, 'from scipy import stats, special, linalg, optimize\n')] |
#!/usr/bin/env python3
# coding=utf-8
import json
import os
import sys
import unittest
from src.utils.payloadHelper import PayloadHelper
class MappedPayloadTests(unittest.TestCase):
def setUp(self):
pass
def test_hartbeat(self):
script_dir = os.path.dirname(__file__)
rel_path = 'data/source/heartbeatPayload.json'
abs_file_path = os.path.join(script_dir, rel_path)
with open(abs_file_path) as hartbeatData:
self.hartbeatJson = json.load(hartbeatData)
self.helper = PayloadHelper()
for item in self.hartbeatJson:
payload = self.helper.map(item)
self.assertIsNotNone(payload.heartbeat)
def tearDown(self):
self.helper = None
self.assertIsNone(self.helper)
pass
if __name__ == '__main__':
unittest.main(exit=False)
| [
"unittest.main",
"json.load",
"os.path.dirname",
"src.utils.payloadHelper.PayloadHelper",
"os.path.join"
] | [((833, 858), 'unittest.main', 'unittest.main', ([], {'exit': '(False)'}), '(exit=False)\n', (846, 858), False, 'import unittest\n'), ((271, 296), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (286, 296), False, 'import os\n'), ((376, 410), 'os.path.join', 'os.path.join', (['script_dir', 'rel_path'], {}), '(script_dir, rel_path)\n', (388, 410), False, 'import os\n'), ((494, 517), 'json.load', 'json.load', (['hartbeatData'], {}), '(hartbeatData)\n', (503, 517), False, 'import json\n'), ((544, 559), 'src.utils.payloadHelper.PayloadHelper', 'PayloadHelper', ([], {}), '()\n', (557, 559), False, 'from src.utils.payloadHelper import PayloadHelper\n')] |
import random
import string
from .player import Player
# from flask import current_app
# from flask_socketio import SocketIO, emit
# socketio = SocketIO(current_app)
# logger = current_app.logger
class Word:
def __init__(self, word: str, player: Player, guesser: Player = None):
self.word = word.upper()
self.scrambled = Word.scramble(word) # Scrambled also contains bonus letters at endgame
self.creator = player
self.guesser = guesser
self.revealed_idx = 0
def __repr__(self):
return "Word" + ','.join([str(x) for x in [self.word, self.scrambled, self.guesser, self.revealed_idx]])
def advance(self, state):
if self.revealed_idx >= len(self.word) - 1:
self.scrambled += random.choice(string.ascii_lowercase)
self.revealed_idx += 1
state.update_history_log(f"{self.guesser} advanced")
# socketio.emit('word advanced', {}, namespace='/word')
def assign_guesser(self, players):
# TODO: Assign a random guesser instead of a fixed one
if self.guesser is None:
self_idx = players.index(self.creator)
if self_idx < len(
players) - 1: # Array [a, b, c] has len 3 and idx 0, 1, 2. If it's 0 or 1, move right otherwise overflow
guesser_idx = self_idx + 1
else:
guesser_idx = 0
self.guesser = players[guesser_idx]
# def assign_guesser(self, player: Player):
# self.guesser = player
@staticmethod
def scramble(word):
l = list(word)
random.shuffle(l)
return ''.join(l)
@staticmethod
def word_for_guesser(guesser: Player, list_of_words):
for word_in_list in list_of_words:
if guesser == word_in_list.guesser:
return word_in_list
return None
| [
"random.shuffle",
"random.choice"
] | [((1589, 1606), 'random.shuffle', 'random.shuffle', (['l'], {}), '(l)\n', (1603, 1606), False, 'import random\n'), ((756, 793), 'random.choice', 'random.choice', (['string.ascii_lowercase'], {}), '(string.ascii_lowercase)\n', (769, 793), False, 'import random\n')] |
from nose.tools import *
from tests.base import OsfTestCase
from tests.factories import UserFactory
from scripts.migration.migrate_mailing_lists_to_mailchimp_field import main, get_users_with_no_mailchimp_mailing_lists
class TestMigrateMailingLists(OsfTestCase):
def setUp(self):
super(TestMigrateMailingLists, self).setUp()
self.user1 = UserFactory(mailing_lists={'mail': True})
self.user2 = UserFactory(mailing_lists={'mail': False})
self.user3 = UserFactory()
self.user1.save()
self.user2.save()
def test_get_users_with_mailing_lists(self):
users_with_mailing_list_ids = [user._id for user in get_users_with_no_mailchimp_mailing_lists()]
assert_equal(len(users_with_mailing_list_ids), 2)
assert_true(self.user1._id in users_with_mailing_list_ids)
assert_true(self.user2._id in users_with_mailing_list_ids)
assert_false(self.user3._id in users_with_mailing_list_ids)
def test_migration_of_mailing_lists(self):
assert_equal(self.user1.mailchimp_mailing_lists, {})
assert_equal(self.user2.mailchimp_mailing_lists, {})
main()
self.user1.reload()
self.user2.reload()
assert_true(self.user1.mailchimp_mailing_lists.get(u'mail'))
assert_false(self.user2.mailchimp_mailing_lists.get(u'mail'))
| [
"scripts.migration.migrate_mailing_lists_to_mailchimp_field.get_users_with_no_mailchimp_mailing_lists",
"scripts.migration.migrate_mailing_lists_to_mailchimp_field.main",
"tests.factories.UserFactory"
] | [((361, 402), 'tests.factories.UserFactory', 'UserFactory', ([], {'mailing_lists': "{'mail': True}"}), "(mailing_lists={'mail': True})\n", (372, 402), False, 'from tests.factories import UserFactory\n'), ((424, 466), 'tests.factories.UserFactory', 'UserFactory', ([], {'mailing_lists': "{'mail': False}"}), "(mailing_lists={'mail': False})\n", (435, 466), False, 'from tests.factories import UserFactory\n'), ((488, 501), 'tests.factories.UserFactory', 'UserFactory', ([], {}), '()\n', (499, 501), False, 'from tests.factories import UserFactory\n'), ((1151, 1157), 'scripts.migration.migrate_mailing_lists_to_mailchimp_field.main', 'main', ([], {}), '()\n', (1155, 1157), False, 'from scripts.migration.migrate_mailing_lists_to_mailchimp_field import main, get_users_with_no_mailchimp_mailing_lists\n'), ((664, 707), 'scripts.migration.migrate_mailing_lists_to_mailchimp_field.get_users_with_no_mailchimp_mailing_lists', 'get_users_with_no_mailchimp_mailing_lists', ([], {}), '()\n', (705, 707), False, 'from scripts.migration.migrate_mailing_lists_to_mailchimp_field import main, get_users_with_no_mailchimp_mailing_lists\n')] |
import pickle
from typing import List, Set
from os.path import join, exists, isfile, isdir
from os import makedirs, listdir
from hotpot.config import CORPUS_DIR
from hotpot.configurable import Configurable
from hotpot.data_handling.data import RelevanceQuestion
from hotpot.data_handling.word_vectors import load_word_vectors
from hotpot.utils import ResourceLoader
""" Squad data. For now, leaving out answer spans. When we want to predict answers, we will deal with it."""
class SquadParagraph(object):
def __init__(self, doc_title: str, par_id: int, par_text: List[str], pickle_text=True):
self.doc_title = doc_title
self.par_id = par_id
self.par_text = par_text
self.pickle_text = pickle_text
@property
def num_tokens(self):
return len(self.par_text)
def get_paragraph_without_text_pickling(self):
return SquadParagraph(self.doc_title, self.par_id, self.par_text, pickle_text=False)
def __repr__(self) -> str:
return f"Title: {self.doc_title}, Id: {self.par_id}\n" \
f"Paragraph:\n" + ' '.join(self.par_text)
def __getstate__(self):
if not self.pickle_text:
state = self.__dict__.copy()
state['par_text'] = None
return state
return self.__dict__
class SquadDocument(object):
def __init__(self, title: str, paragraphs: List[SquadParagraph]):
self.title = title
self.paragraphs = paragraphs
self.id_to_par = self._build_id_paragraph_dict()
def _build_id_paragraph_dict(self):
return {x.par_id: x for x in self.paragraphs}
def get_par(self, par_id) -> SquadParagraph:
return self.id_to_par[par_id]
def add_par(self, par: SquadParagraph):
if par.par_id in self.id_to_par:
raise ValueError("This paragraph id already exists in this document!")
if par.doc_title != self.title:
raise ValueError("Paragraph title not matching document title!")
self.paragraphs.append(SquadParagraph(par.doc_title, par.par_id, par.par_text, pickle_text=True))
self.id_to_par[par.par_id] = self.paragraphs[-1]
def __repr__(self) -> str:
return f"Title: {self.title}. Number of paragraphs: {len(self.paragraphs)}"
class SquadQuestion(object):
""" Squad Question and paragraphs."""
def __init__(self, question_id: str, question: List[str],
answers: Set[str], paragraph: SquadParagraph):
self.question_id = question_id
self.question = question
self.answers = answers
self.paragraph = paragraph # .get_paragraph_without_text_pickling()
def __repr__(self) -> str:
return f"{self.question_id}: {' '.join(self.question)}\nAnswer(s): {self.answers}\n" \
f"Paragraph:\n" + ' '.join(self.paragraph.par_text)
class SquadQuestionWithDistractors(SquadQuestion):
def __init__(self, question_id: str, question: List[str],
answers: Set[str], paragraph: SquadParagraph,
distractors: List[SquadParagraph]):
super().__init__(question_id, question, answers, paragraph)
# self.distractors = [x.get_paragraph_without_text_pickling() for x in distractors]
self.distractors = distractors
def add_distractors(self, paragraphs: List[SquadParagraph]):
""" Doesn't add duplicates """
for paragraph in paragraphs:
if any((x.par_id == paragraph.par_id and x.doc_title == paragraph.doc_title) for x in self.distractors):
continue
# self.distractors.append(paragraph.get_paragraph_without_text_pickling())
self.distractors.append(paragraph)
def squad_question_to_relevance_question(squad_question: SquadQuestionWithDistractors) -> RelevanceQuestion:
return RelevanceQuestion(dataset_name='squad',
question_id=squad_question.question_id,
question_tokens=squad_question.question,
supporting_facts=[squad_question.paragraph.par_text],
distractors=[x.par_text for x in squad_question.distractors])
class SquadRelevanceCorpus(Configurable):
TRAIN_DOC_FILE = "train_documents.pkl"
TRAIN_FILE = "train_questions.pkl"
DEV_DOC_FILE = "dev_documents.pkl"
DEV_FILE = "dev_questions.pkl"
NAME = "squad"
VOCAB_FILE = "squad_vocab.txt"
WORD_VEC_SUFFIX = "_pruned"
@staticmethod
def make_corpus(train_documents: List[SquadDocument],
train: List[SquadQuestionWithDistractors],
dev_documents: List[SquadDocument],
dev: List[SquadQuestionWithDistractors]):
dir = join(CORPUS_DIR, SquadRelevanceCorpus.NAME)
# if isfile(dir) or (exists(dir) and len(listdir(dir))) > 0:
# raise ValueError("Directory %s already exists and is non-empty" % dir)
if not exists(dir):
makedirs(dir)
train_document_dict = {doc.title: doc for doc in train_documents}
if len(train_document_dict) != len(train_documents):
raise ValueError("different train documents have the same title!")
dev_document_dict = {doc.title: doc for doc in dev_documents}
if len(dev_document_dict) != len(dev_documents):
raise ValueError("different dev documents have the same title!")
for name, data in [(SquadRelevanceCorpus.TRAIN_FILE, train), (SquadRelevanceCorpus.DEV_FILE, dev),
(SquadRelevanceCorpus.TRAIN_DOC_FILE, train_document_dict),
(SquadRelevanceCorpus.DEV_DOC_FILE, dev_document_dict)]:
if data is not None:
with open(join(dir, name), 'wb') as f:
pickle.dump(data, f)
def __init__(self):
dir = join(CORPUS_DIR, self.NAME)
if not exists(dir) or not isdir(dir):
raise ValueError("No directory %s, corpus not built yet?" % dir)
self.dir = dir
self.train_title_to_document = None
self.dev_title_to_document = None
@property
def evidence(self):
return None
def get_vocab_file(self):
self.get_vocab()
return join(self.dir, self.VOCAB_FILE)
def get_vocab(self):
""" get all-lower cased unique words for this corpus, includes train/dev/test files """
voc_file = join(self.dir, self.VOCAB_FILE)
if exists(voc_file):
with open(voc_file, "r") as f:
return [x.rstrip() for x in f]
else:
voc = set()
for fn in [self.get_train, self.get_dev, self.get_test]:
for question in fn():
voc.update(x.lower() for x in question.question)
for para in (question.distractors + [question.paragraph]):
voc.update(x.lower() for x in para.par_text)
voc_list = sorted(list(voc))
with open(voc_file, "w") as f:
for word in voc_list:
f.write(word)
f.write("\n")
return voc_list
def get_pruned_word_vecs(self, word_vec_name, voc=None):
"""
Loads word vectors that have been pruned to the case-insensitive vocab of this corpus.
WARNING: this includes dev words
This exists since loading word-vecs each time we startup can be a big pain, so
we cache the pruned vecs on-disk as a .npy file we can re-load quickly.
"""
vec_file = join(self.dir, word_vec_name + self.WORD_VEC_SUFFIX + ".npy")
if isfile(vec_file):
print("Loading word vec %s for %s from cache" % (word_vec_name, self.name))
with open(vec_file, "rb") as f:
return pickle.load(f)
else:
print("Building pruned word vec %s for %s" % (self.name, word_vec_name))
voc = self.get_vocab()
vecs = load_word_vectors(word_vec_name, voc)
with open(vec_file, "wb") as f:
pickle.dump(vecs, f)
return vecs
def get_resource_loader(self):
return ResourceLoader(self.get_pruned_word_vecs)
def _load_document_dict(self, train: bool):
if train:
if self.train_title_to_document is None:
self.train_title_to_document = self._load(join(self.dir, self.TRAIN_DOC_FILE))
else:
if self.dev_title_to_document is None:
self.dev_title_to_document = self._load(join(self.dir, self.DEV_DOC_FILE))
def _insert_text_to_paragraph(self, paragraph: SquadParagraph, train: bool):
title_to_doc = self.train_title_to_document if train else self.dev_title_to_document
paragraph.par_text = title_to_doc[paragraph.doc_title].get_par(paragraph.par_id).par_text
paragraph.pickle_text = True # So that there will be no problems later
def _insert_text_to_question(self, question: SquadQuestionWithDistractors, train: bool):
for par in [question.paragraph] + question.distractors:
self._insert_text_to_paragraph(par, train)
def _populate_questions(self, questions: List[SquadQuestionWithDistractors], train: bool):
self._load_document_dict(train)
for q in questions:
self._insert_text_to_question(q, train)
def get_train(self) -> List[SquadQuestionWithDistractors]:
questions = self._load(join(self.dir, self.TRAIN_FILE))
self._populate_questions(questions, train=True)
return questions
def get_dev(self) -> List[SquadQuestionWithDistractors]:
questions = self._load(join(self.dir, self.DEV_FILE))
self._populate_questions(questions, train=False)
return questions
def get_test(self) -> List[SquadQuestionWithDistractors]:
return []
def _load(self, file):
if not exists(file):
return []
with open(file, "rb") as f:
return pickle.load(f)
def __getstate__(self):
state = self.__dict__.copy()
state['train_title_to_document'] = None
state['dev_title_to_document'] = None
return state
def __setstate__(self, state):
self.__dict__ = state
| [
"pickle.dump",
"os.makedirs",
"os.path.isdir",
"os.path.exists",
"hotpot.data_handling.word_vectors.load_word_vectors",
"os.path.isfile",
"hotpot.utils.ResourceLoader",
"pickle.load",
"hotpot.data_handling.data.RelevanceQuestion",
"os.path.join"
] | [((3821, 4071), 'hotpot.data_handling.data.RelevanceQuestion', 'RelevanceQuestion', ([], {'dataset_name': '"""squad"""', 'question_id': 'squad_question.question_id', 'question_tokens': 'squad_question.question', 'supporting_facts': '[squad_question.paragraph.par_text]', 'distractors': '[x.par_text for x in squad_question.distractors]'}), "(dataset_name='squad', question_id=squad_question.\n question_id, question_tokens=squad_question.question, supporting_facts=\n [squad_question.paragraph.par_text], distractors=[x.par_text for x in\n squad_question.distractors])\n", (3838, 4071), False, 'from hotpot.data_handling.data import RelevanceQuestion\n'), ((4733, 4776), 'os.path.join', 'join', (['CORPUS_DIR', 'SquadRelevanceCorpus.NAME'], {}), '(CORPUS_DIR, SquadRelevanceCorpus.NAME)\n', (4737, 4776), False, 'from os.path import join, exists, isfile, isdir\n'), ((5852, 5879), 'os.path.join', 'join', (['CORPUS_DIR', 'self.NAME'], {}), '(CORPUS_DIR, self.NAME)\n', (5856, 5879), False, 'from os.path import join, exists, isfile, isdir\n'), ((6242, 6273), 'os.path.join', 'join', (['self.dir', 'self.VOCAB_FILE'], {}), '(self.dir, self.VOCAB_FILE)\n', (6246, 6273), False, 'from os.path import join, exists, isfile, isdir\n'), ((6415, 6446), 'os.path.join', 'join', (['self.dir', 'self.VOCAB_FILE'], {}), '(self.dir, self.VOCAB_FILE)\n', (6419, 6446), False, 'from os.path import join, exists, isfile, isdir\n'), ((6458, 6474), 'os.path.exists', 'exists', (['voc_file'], {}), '(voc_file)\n', (6464, 6474), False, 'from os.path import join, exists, isfile, isdir\n'), ((7556, 7617), 'os.path.join', 'join', (['self.dir', "(word_vec_name + self.WORD_VEC_SUFFIX + '.npy')"], {}), "(self.dir, word_vec_name + self.WORD_VEC_SUFFIX + '.npy')\n", (7560, 7617), False, 'from os.path import join, exists, isfile, isdir\n'), ((7629, 7645), 'os.path.isfile', 'isfile', (['vec_file'], {}), '(vec_file)\n', (7635, 7645), False, 'from os.path import join, exists, isfile, isdir\n'), ((8164, 8205), 'hotpot.utils.ResourceLoader', 'ResourceLoader', (['self.get_pruned_word_vecs'], {}), '(self.get_pruned_word_vecs)\n', (8178, 8205), False, 'from hotpot.utils import ResourceLoader\n'), ((4946, 4957), 'os.path.exists', 'exists', (['dir'], {}), '(dir)\n', (4952, 4957), False, 'from os.path import join, exists, isfile, isdir\n'), ((4971, 4984), 'os.makedirs', 'makedirs', (['dir'], {}), '(dir)\n', (4979, 4984), False, 'from os import makedirs, listdir\n'), ((7970, 8007), 'hotpot.data_handling.word_vectors.load_word_vectors', 'load_word_vectors', (['word_vec_name', 'voc'], {}), '(word_vec_name, voc)\n', (7987, 8007), False, 'from hotpot.data_handling.word_vectors import load_word_vectors\n'), ((9454, 9485), 'os.path.join', 'join', (['self.dir', 'self.TRAIN_FILE'], {}), '(self.dir, self.TRAIN_FILE)\n', (9458, 9485), False, 'from os.path import join, exists, isfile, isdir\n'), ((9661, 9690), 'os.path.join', 'join', (['self.dir', 'self.DEV_FILE'], {}), '(self.dir, self.DEV_FILE)\n', (9665, 9690), False, 'from os.path import join, exists, isfile, isdir\n'), ((9898, 9910), 'os.path.exists', 'exists', (['file'], {}), '(file)\n', (9904, 9910), False, 'from os.path import join, exists, isfile, isdir\n'), ((9989, 10003), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (10000, 10003), False, 'import pickle\n'), ((5895, 5906), 'os.path.exists', 'exists', (['dir'], {}), '(dir)\n', (5901, 5906), False, 'from os.path import join, exists, isfile, isdir\n'), ((5914, 5924), 'os.path.isdir', 'isdir', (['dir'], {}), '(dir)\n', (5919, 5924), False, 'from os.path import join, exists, isfile, isdir\n'), ((7802, 7816), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (7813, 7816), False, 'import pickle\n'), ((8068, 8088), 'pickle.dump', 'pickle.dump', (['vecs', 'f'], {}), '(vecs, f)\n', (8079, 8088), False, 'import pickle\n'), ((5792, 5812), 'pickle.dump', 'pickle.dump', (['data', 'f'], {}), '(data, f)\n', (5803, 5812), False, 'import pickle\n'), ((8384, 8419), 'os.path.join', 'join', (['self.dir', 'self.TRAIN_DOC_FILE'], {}), '(self.dir, self.TRAIN_DOC_FILE)\n', (8388, 8419), False, 'from os.path import join, exists, isfile, isdir\n'), ((8542, 8575), 'os.path.join', 'join', (['self.dir', 'self.DEV_DOC_FILE'], {}), '(self.dir, self.DEV_DOC_FILE)\n', (8546, 8575), False, 'from os.path import join, exists, isfile, isdir\n'), ((5743, 5758), 'os.path.join', 'join', (['dir', 'name'], {}), '(dir, name)\n', (5747, 5758), False, 'from os.path import join, exists, isfile, isdir\n')] |
from flask import current_app, render_template, url_for
from markupsafe import Markup
from mass_flask_core.models import FileSample, IPSample, DomainSample, URISample, ExecutableBinarySample, UserLevel
from mass_flask_webui.config import webui_blueprint
@webui_blueprint.context_processor
def sample_processors():
def sample_icon(sample):
if isinstance(sample, FileSample):
return Markup('<i class="fa fa-file"></i>')
elif isinstance(sample, IPSample):
return Markup('<i class="fa fa-desktop"></i>')
elif isinstance(sample, DomainSample):
return Markup('<i class="fa fa-globe"></i>')
elif isinstance(sample, URISample):
return Markup('<i class="fa fa-at"></i>')
else:
return Markup('<i class="fa fa-question"></i>')
def is_file_sample(sample):
return isinstance(sample, FileSample)
def is_executable_binary_sample(sample):
return isinstance(sample, ExecutableBinarySample)
def tag_search_link(tag):
kwargs = {
'common-tags': tag,
'submit': 'Submit'
}
return url_for('.sample_search', **kwargs)
return dict(
sample_icon=sample_icon,
is_file_sample=is_file_sample,
is_executable_binary_sample=is_executable_binary_sample,
tag_search_link=tag_search_link
)
@webui_blueprint.context_processor
def user_processors():
def user_level(user):
if user.user_level == UserLevel.USER_LEVEL_ADMIN:
return 'Administrator'
elif user.user_level == UserLevel.USER_LEVEL_MANAGER:
return 'Manager'
elif user.user_level == UserLevel.USER_LEVEL_PRIVILEGED:
return 'Privileged user'
elif user.user_level == UserLevel.USER_LEVEL_USER:
return 'Normal user'
elif user.user_level == UserLevel.USER_LEVEL_ANONYMOUS:
return 'Guest user'
else:
return 'Unknown user level'
return dict(
user_level=user_level
)
@webui_blueprint.context_processor
def generic_processors():
def mass_version():
return current_app.version
def pagination(paginator):
return Markup(render_template('pagination.html', paginator=paginator))
return dict(
mass_version=mass_version,
pagination=pagination
)
| [
"markupsafe.Markup",
"flask.url_for",
"flask.render_template"
] | [((1144, 1179), 'flask.url_for', 'url_for', (['""".sample_search"""'], {}), "('.sample_search', **kwargs)\n", (1151, 1179), False, 'from flask import current_app, render_template, url_for\n'), ((408, 444), 'markupsafe.Markup', 'Markup', (['"""<i class="fa fa-file"></i>"""'], {}), '(\'<i class="fa fa-file"></i>\')\n', (414, 444), False, 'from markupsafe import Markup\n'), ((2225, 2280), 'flask.render_template', 'render_template', (['"""pagination.html"""'], {'paginator': 'paginator'}), "('pagination.html', paginator=paginator)\n", (2240, 2280), False, 'from flask import current_app, render_template, url_for\n'), ((507, 546), 'markupsafe.Markup', 'Markup', (['"""<i class="fa fa-desktop"></i>"""'], {}), '(\'<i class="fa fa-desktop"></i>\')\n', (513, 546), False, 'from markupsafe import Markup\n'), ((613, 650), 'markupsafe.Markup', 'Markup', (['"""<i class="fa fa-globe"></i>"""'], {}), '(\'<i class="fa fa-globe"></i>\')\n', (619, 650), False, 'from markupsafe import Markup\n'), ((714, 748), 'markupsafe.Markup', 'Markup', (['"""<i class="fa fa-at"></i>"""'], {}), '(\'<i class="fa fa-at"></i>\')\n', (720, 748), False, 'from markupsafe import Markup\n'), ((782, 822), 'markupsafe.Markup', 'Markup', (['"""<i class="fa fa-question"></i>"""'], {}), '(\'<i class="fa fa-question"></i>\')\n', (788, 822), False, 'from markupsafe import Markup\n')] |
from users.models import User
from django.db import models
PET_SIZES = [('P', 'Pequeno'), ('M', 'Médio'), ('G', 'Grande')]
PET_SEX = [('M', 'Macho'), ('F', 'Fêmea')]
# PET TYPE
GATO = 'Gato'
CACHORRO = 'Cachorro'
PASSARO = 'Pássaro'
ROEDOR = 'Roedor'
OUTRO = 'Outro'
# DEFAULT
DE00 = 'Sem raça definida'
DE01 = 'Outra'
# CAT BREED
CB00 = 'Abssínios'
CB01 = 'Alemão de pelo comprido'
CB02 = 'American Curl'
CB03 = 'American Shorthair'
CB04 = 'American Wirehair'
CB05 = 'Azul Russo'
CB06 = 'Balineses'
CB07 = 'Bengalês'
CB08 = 'Bobtail'
CB09 = 'Bobtail Japonês'
CB10 = 'Bombay'
CB11 = 'British Shorthair'
CB12 = 'Burmês'
CB13 = 'Burmilla'
CB14 = 'Califórinia Spangled'
CB15 = 'Chartreux'
CB16 = 'Cornish Rex'
CB17 = 'Cymric'
CB18 = 'Devon Rex'
CB19 = 'Exóticos'
CB20 = 'Foldex'
CB21 = 'German Rex'
CB22 = 'Habana'
CB23 = 'High Land Fold'
CB24 = 'Himalaios'
CB25 = 'Javaneses'
CB26 = 'Khao Manee'
CB27 = 'Korat'
CB28 = 'Maine Coon'
CB29 = 'Manx'
CB30 = '<NAME>'
CB31 = '<NAME>'
CB32 = 'Ragdoll'
CB33 = '<NAME>'
CB34 = 'Ragamuffin'
CB35 = 'Ragdoll'
# DOG BREED
DB00 = 'Akita'
DB01 = 'Basset hound'
DB02 = 'Beagle'
DB03 = 'Boiadeiro australiano'
DB04 = 'Border collie'
DB05 = 'Boston terrier'
DB06 = 'Boxer'
DB07 = 'Buldogue'
DB08 = 'Bull terrier'
DB09 = 'Chihuahua'
DB10 = 'Chow chow'
DB11 = 'Dálmata'
DB12 = 'Doberman'
DB13 = 'Dogo argentino'
DB14 = 'Dogue alemão'
DB15 = 'Fila brasileiro'
DB16 = 'Golden retriever'
DB17 = 'Husky siberiano'
DB18 = '<NAME>'
DB19 = 'Labrador'
DB20 = 'Lhasa apso'
DB21 = 'Lulu da pomerânia'
DB22 = 'Maltês'
DB23 = 'Pastor alemão'
DB24 = 'Pastor australianoPastor de Shetland'
DB25 = 'Pequinês'
DB26 = 'Pinscher'
DB27 = 'Pit bull'
DB28 = 'Poodle'
DB29 = 'Pug'
DB30 = 'Rottweiler'
DB31 = 'Shar-pei'
DB32 = 'Shiba'
DB33 = 'Shih tzu'
DB34 = 'Weimaraner'
DB35 = 'Yorkshire'
# BIRD BREED
BB00 = 'Agapornis'
BB01 = 'Araponga'
BB02 = 'Arara'
BB03 = 'Azulão'
BB04 = 'Bavete'
BB05 = 'Bicudo'
BB06 = 'Cabloquinho'
BB07 = 'Cacatua'
BB08 = 'Calafete'
BB09 = 'Calopsita'
BB10 = 'Canário'
BB11 = 'Cardeal'
BB12 = 'Coleiro'
BB13 = 'Cordonbleu'
BB14 = 'Coruja'
BB15 = 'Curió'
BB16 = 'Diamante Mandarin'
BB17 = 'Dominó'
BB18 = 'Explêndido'
BB19 = 'Granatina'
BB20 = 'Jandaia'
BB21 = 'Lóris'
BB22 = 'Mainá'
BB23 = 'Modesto'
BB24 = 'Papagaio'
BB25 = 'Pássaro Preto'
BB26 = 'Patativa'
BB27 = 'Perequito Autraliano'
BB28 = 'Pica-pau'
BB29 = 'Pintassilgo'
BB30 = 'Pombo'
BB31 = 'Rolinha'
BB32 = 'Rouxinol'
BB33 = 'S<NAME>'
BB34 = 'Tangará'
BB35 = 'Tico-tico'
BB36 = 'Tucano'
# RODENT BREED
RB00 = 'Camundongo'
RB01 = 'Chinchila'
RB02 = 'Gerbil - Esquilo da MOngólia'
RB03 = 'Hamster Anão Russo'
RB04 = 'Hamster Sírio'
RB05 = 'Mecol - Twister'
RB06 = 'Porquinho da índia'
RB07 = 'Topolino'
TYPE_CHOICES = [(GATO, GATO), (CACHORRO, CACHORRO), (PASSARO, PASSARO), (ROEDOR, ROEDOR), (OUTRO, OUTRO),]
BREED_CHOICES = [
(DE00, DE00), (DE01, DE01),
(CB00, CB00), (CB01, CB01), (CB02, CB02), (CB03, CB03), (CB04, CB04), (CB05, CB05),
(CB06, CB06), (CB07, CB07), (CB08, CB08), (CB09, CB09), (CB10, CB10), (CB11, CB11),
(CB12, CB12), (CB13, CB13), (CB14, CB14), (CB15, CB15), (CB16, CB16), (CB17, CB17),
(CB18, CB18), (CB19, CB19), (CB20, CB20), (CB21, CB21), (CB22, CB22), (CB23, CB23),
(CB24, CB24), (CB25, CB25), (CB26, CB26), (CB27, CB27), (CB28, CB28), (CB29, CB29),
(CB30, CB30), (CB31, CB31), (CB32, CB32), (CB33, CB33), (CB34, CB34), (CB35, CB35),
(DB00, DB00), (DB01, DB01), (DB02, DB02), (DB03, DB03), (DB04, DB04), (DB05, DB05),
(DB06, DB06), (DB07, DB07), (DB08, DB08), (DB09, DB09), (DB10, DB10), (DB11, DB11),
(DB12, DB12), (DB13, DB13), (DB14, DB14), (DB15, DB15), (DB16, DB16), (DB17, DB17),
(DB18, DB18), (DB19, DB19), (DB20, DB20), (DB21, DB21), (DB22, DB22), (DB23, DB23),
(DB24, DB24), (DB25, DB25), (DB26, DB26), (DB27, DB27), (DB28, DB28), (DB29, DB29),
(DB30, DB30), (DB31, DB31), (DB32, DB32), (DB33, DB33), (DB34, DB34), (DB35, DB35),
(BB00, BB00), (BB01, BB01), (BB02, BB02), (BB03, BB03), (BB04, BB04), (BB05, BB05),
(BB06, BB06), (BB07, BB07), (BB08, BB08), (BB09, BB09), (BB10, BB10), (BB11, BB11),
(BB12, BB12), (BB13, BB13), (BB14, BB14), (BB15, BB15), (BB16, BB16), (BB17, BB17),
(BB18, BB18), (BB19, BB19), (BB20, BB20), (BB21, BB21), (BB22, BB22), (BB23, BB23),
(BB24, BB24), (BB25, BB25), (BB26, BB26), (BB27, BB27), (BB28, BB28), (BB29, BB29),
(BB30, BB30), (BB31, BB31), (BB32, BB32), (BB33, BB33), (BB34, BB34), (BB35, BB35),
(RB00, RB00), (RB01, RB01), (RB02, RB02), (RB03, RB03), (RB04, RB04), (RB05, RB05),
(RB06, RB06), (RB07, RB07),
]
def get_cat_breeds():
catBreeds = [
DE00, DE01,
CB00, CB01, CB02, CB03, CB04, CB05, CB06, CB07, CB08, CB09, CB10, CB11,
CB12, CB13, CB14, CB15, CB16, CB17, CB18, CB19, CB20, CB21, CB22, CB23,
CB24, CB25, CB26, CB27, CB28, CB29, CB30, CB31, CB32, CB33, CB34, CB35,
]
return catBreeds
def get_dog_breeds():
dogBreeds = [
DE00, DE01,
DB00, DB01, DB02, DB03, DB04, DB05, DB06, DB07, DB08, DB09, DB10, DB11,
DB12, DB13, DB14, DB15, DB16, DB17, DB18, DB19, DB20, DB21, DB22, DB23,
DB24, DB25, DB26, DB27, DB28, DB29, DB30, DB31, DB32, DB33, DB34, DB35,
]
return dogBreeds
def get_bird_breeds():
birdBreeds = [
DE00, DE01,
BB00, BB01, BB02, BB03, BB04, BB05, BB06, BB07, BB08, BB09, BB10, BB11,
BB12, BB13, BB14, BB15, BB16, BB17, BB18, BB19, BB20, BB21, BB22, BB23,
BB24, BB25, BB26, BB27, BB28, BB29, BB30, BB31, BB32, BB33, BB34, BB35,
]
return birdBreeds
def get_rodent_breeds():
rodentBreeds = [
DE00, DE01,
RB00, RB01, RB02, RB03, RB04, RB05, RB06, RB07,
]
return rodentBreeds
def get_other_breeds():
otherBreed = [DE01,]
return otherBreed
class Pet(models.Model):
user = models.ForeignKey(User, default=None, on_delete=models.CASCADE)
image = models.ImageField(upload_to='pet_image', blank=False, null=False)
name = models.CharField(max_length=30, blank=False, null=False)
description = models.CharField(max_length=500, blank=False, null=False)
age = models.PositiveSmallIntegerField(null=True)
size = models.CharField(max_length=1, choices=PET_SIZES, blank=False, null=False)
sex = models.CharField(max_length=1, choices=PET_SEX, blank=False, null=False)
vaccinated = models.BooleanField(default=False)
castrated = models.BooleanField(default=False)
dewormed = models.BooleanField(default=False)
vulnerable = models.BooleanField(default=False)
isAdopted = models.BooleanField(default=False)
pet_type = models.CharField(max_length=50, choices=TYPE_CHOICES)
breed = models.CharField(max_length=50, choices=BREED_CHOICES)
| [
"django.db.models.ForeignKey",
"django.db.models.CharField",
"django.db.models.BooleanField",
"django.db.models.PositiveSmallIntegerField",
"django.db.models.ImageField"
] | [((6494, 6557), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'default': 'None', 'on_delete': 'models.CASCADE'}), '(User, default=None, on_delete=models.CASCADE)\n', (6511, 6557), False, 'from django.db import models\n'), ((6570, 6635), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': '"""pet_image"""', 'blank': '(False)', 'null': '(False)'}), "(upload_to='pet_image', blank=False, null=False)\n", (6587, 6635), False, 'from django.db import models\n'), ((6647, 6703), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)', 'blank': '(False)', 'null': '(False)'}), '(max_length=30, blank=False, null=False)\n', (6663, 6703), False, 'from django.db import models\n'), ((6722, 6779), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(500)', 'blank': '(False)', 'null': '(False)'}), '(max_length=500, blank=False, null=False)\n', (6738, 6779), False, 'from django.db import models\n'), ((6790, 6833), 'django.db.models.PositiveSmallIntegerField', 'models.PositiveSmallIntegerField', ([], {'null': '(True)'}), '(null=True)\n', (6822, 6833), False, 'from django.db import models\n'), ((6845, 6919), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(1)', 'choices': 'PET_SIZES', 'blank': '(False)', 'null': '(False)'}), '(max_length=1, choices=PET_SIZES, blank=False, null=False)\n', (6861, 6919), False, 'from django.db import models\n'), ((6930, 7002), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(1)', 'choices': 'PET_SEX', 'blank': '(False)', 'null': '(False)'}), '(max_length=1, choices=PET_SEX, blank=False, null=False)\n', (6946, 7002), False, 'from django.db import models\n'), ((7020, 7054), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (7039, 7054), False, 'from django.db import models\n'), ((7071, 7105), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (7090, 7105), False, 'from django.db import models\n'), ((7121, 7155), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (7140, 7155), False, 'from django.db import models\n'), ((7173, 7207), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (7192, 7207), False, 'from django.db import models\n'), ((7224, 7258), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (7243, 7258), False, 'from django.db import models\n'), ((7274, 7327), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'choices': 'TYPE_CHOICES'}), '(max_length=50, choices=TYPE_CHOICES)\n', (7290, 7327), False, 'from django.db import models\n'), ((7340, 7394), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'choices': 'BREED_CHOICES'}), '(max_length=50, choices=BREED_CHOICES)\n', (7356, 7394), False, 'from django.db import models\n')] |
# coding: utf-8
import pygame
import os
from functions import *
from color import *
from pygame.locals import *
from listOfCards import allCards, values
from About import About
from Text import *
from color import *
class Card(pygame.sprite.Sprite):
"""Manages the cards in the game"""
def __init__(self, number, owner):
super(pygame.sprite.Sprite).__init__(Card)
self.owner = owner
self.number = number
self.name = allCards[self.number]
self.image = None
#self.verso = carteVerso
self.About = About(self.name, self)
# We put the numbers of the card according to listeCartes.py
self.top = values[number][0]
self.right = values[number][1]
self.bottom = values[number][2]
self.left = values[number][3]
self.values = []
self.values.append(self.top)
self.values.append(self.right)
self.values.append(self.bottom)
self.values.append(self.left)
self.parseToInt()
# Which element
self.elementName = values[number][4]
# Offensive or defensive. Unused for now
self.type = values[number][5]
self.modifierValue = 0
self.inHand = 1
getCard(self)
self.rect = self.image.get_rect()
if self.elementName != None:
self.element, self.elementRect = loadElement(self.elementName)
self.elementRect.topright = self.rect.topright
self.elementRect.move_ip(-2, 2)
self.image.blit(self.element, self.elementRect)
def changeOwner(self):
getCard(self)
self.image.set_alpha()
def addModifier(self, value):
"""Add bonus or malus to the card and draw the bonus on the card"""
self.modifierValue = value
if value > 0:
value = "+" + str(value)
else:
value = str(value)
self.modifier = Text(value, "rimouski sb.ttf", white, 60)
self.modifierBack = Text(value, "rimouski sb.ttf", black, 60)
#self.modifier.rect.topleft = self.rect.topleft
self.modifier.rect.move_ip(35, 15)
self.modifierBack.rect.move_ip(38, 18)
self.image.blit(self.modifierBack.surface, self.modifierBack.rect)
self.image.blit(self.modifier.surface, self.modifier.rect)
for i in range(0, 4):
self.values[i] += self.modifierValue
def addCursor(self):
"""Add a colored border to the focused card"""
self.border, self.borderRect = loadImage("images/border.png")
def parseToInt(self):
for i in range(0, 4):
if (self.values[i] == 'A'):
self.values[i] = 10
else:
self.values[i] = int(self.values[i])
def __repr__(self):
return "<Card at %s >" % (self.rect)
| [
"About.About"
] | [((561, 583), 'About.About', 'About', (['self.name', 'self'], {}), '(self.name, self)\n', (566, 583), False, 'from About import About\n')] |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import torch
from torch.utils.data import Dataset
from sklearn.model_selection import train_test_split
# Define the class for the Meta-material dataset
class MetaMaterialDataSet(Dataset):
""" The Meta Material Dataset Class """
def __init__(self, ftr, lbl, bool_train):
"""
Instantiate the Dataset Object
:param ftr: the features which is always the Geometry !!
:param lbl: the labels, which is always the Spectra !!
:param bool_train:
"""
self.ftr = ftr
self.lbl = lbl
self.bool_train = bool_train
self.len = len(ftr)
def __len__(self):
return self.len
def __getitem__(self, ind):
return self.ftr[ind, :], self.lbl[ind, :]
## Copied from Omar's code
# Make geometry samples
def MM_Geom(n):
# Parameter bounds for metamaterial radius and height
r_min = 20
r_max = 200
h_min = 20
h_max = 100
# Defines hypergeometric space of parameters to choose from
space = 10
r_space = np.linspace(r_min, r_max, space + 1)
h_space = np.linspace(h_min, h_max, space + 1)
# Shuffles r,h arrays each iteration and then selects 0th element to generate random n x n parameter set
r, h = np.zeros(n, dtype=float), np.zeros(n, dtype=float)
for i in range(n):
np.random.shuffle(r_space)
np.random.shuffle(h_space)
r[i] = r_space[0]
h[i] = h_space[0]
return r, h
# Make geometry and spectra
def Make_MM_Model(n):
r, h = MM_Geom(n)
spectra = np.zeros(300)
geom = np.concatenate((r, h), axis=0)
for i in range(n):
w0 = 100 / h[i]
wp = (1 / 100) * np.sqrt(np.pi) * r[i]
g = (1 / 1000) * np.sqrt(np.pi) * r[i]
w, e2 = Lorentzian(w0, wp, g)
spectra += e2
return geom, spectra
# Calculate Lorentzian function to get spectra
def Lorentzian(w0, wp, g):
freq_low = 0
freq_high = 5
num_freq = 300
w = np.arange(freq_low, freq_high, (freq_high - freq_low) / num_freq)
# e1 = np.divide(np.multiply(np.power(wp, 2), np.add(np.power(w0, 2), -np.power(w, 2))),
# np.add(np.power(np.add(np.power(w0, 2), -np.power(w, 2)), 2),
# np.multiply(np.power(w, 2), np.power(g, 2))))
e2 = np.divide(np.multiply(np.power(wp, 2), np.multiply(w, g)),
np.add(np.power(np.add(np.power(w0, 2), -np.power(w, 2)), 2),
np.multiply(np.power(w, 2), np.power(g, 2))))
return w, e2
# Generates randomized dataset of simulated spectra for training and testing
def Prepare_Data(osc, sets, batch_size):
features = []
labels = []
for i in range(sets):
geom, spectra = Make_MM_Model(osc)
features.append(geom)
labels.append(spectra)
features = np.array(features, dtype='float32')
labels = np.array(labels, dtype='float32')
ftrsize = features.size / sets
lblsize = labels.size / sets
print('Size of Features is %i, Size of Labels is %i' % (ftrsize, lblsize))
print('There are %i datasets:' % sets)
ftrTrain, ftrTest, lblTrain, lblTest = train_test_split(features, labels, test_size=0.2, random_state=1234)
train_data = MetaMaterialDataSet(ftrTrain, lblTrain, bool_train=True)
test_data = MetaMaterialDataSet(ftrTest, lblTest, bool_train=False)
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size)
print('Number of Training samples is {}'.format(len(ftrTrain)))
print('Number of Test samples is {}'.format(len(ftrTest)))
return train_loader, test_loader
def gen_data(name):
train_loader, test_loader = Prepare_Data(1, 10000, 1000)
with open(name, 'a') as datafile:
for j, (geometry, spectra) in enumerate(train_loader):
concate = np.concatenate([geometry, spectra], axis=1)
# print(np.shape(concate))
np.savetxt(datafile, concate, delimiter=',')
if __name__ == "__main__":
train_loader, test_loader = Prepare_Data(1, 10000, 1000)
with open('toy_data/mm1d_6.csv', 'a') as datafile:
for j, (geometry, spectra) in enumerate(train_loader):
concate = np.concatenate([geometry, spectra], axis=1)
#print(np.shape(concate))
np.savetxt(datafile, concate, delimiter=',') | [
"numpy.random.shuffle",
"numpy.multiply",
"torch.utils.data.DataLoader",
"sklearn.model_selection.train_test_split",
"numpy.power",
"numpy.savetxt",
"numpy.zeros",
"numpy.arange",
"numpy.array",
"numpy.linspace",
"numpy.concatenate",
"numpy.sqrt"
] | [((1093, 1129), 'numpy.linspace', 'np.linspace', (['r_min', 'r_max', '(space + 1)'], {}), '(r_min, r_max, space + 1)\n', (1104, 1129), True, 'import numpy as np\n'), ((1144, 1180), 'numpy.linspace', 'np.linspace', (['h_min', 'h_max', '(space + 1)'], {}), '(h_min, h_max, space + 1)\n', (1155, 1180), True, 'import numpy as np\n'), ((1602, 1615), 'numpy.zeros', 'np.zeros', (['(300)'], {}), '(300)\n', (1610, 1615), True, 'import numpy as np\n'), ((1627, 1657), 'numpy.concatenate', 'np.concatenate', (['(r, h)'], {'axis': '(0)'}), '((r, h), axis=0)\n', (1641, 1657), True, 'import numpy as np\n'), ((2022, 2087), 'numpy.arange', 'np.arange', (['freq_low', 'freq_high', '((freq_high - freq_low) / num_freq)'], {}), '(freq_low, freq_high, (freq_high - freq_low) / num_freq)\n', (2031, 2087), True, 'import numpy as np\n'), ((2885, 2920), 'numpy.array', 'np.array', (['features'], {'dtype': '"""float32"""'}), "(features, dtype='float32')\n", (2893, 2920), True, 'import numpy as np\n'), ((2934, 2967), 'numpy.array', 'np.array', (['labels'], {'dtype': '"""float32"""'}), "(labels, dtype='float32')\n", (2942, 2967), True, 'import numpy as np\n'), ((3203, 3271), 'sklearn.model_selection.train_test_split', 'train_test_split', (['features', 'labels'], {'test_size': '(0.2)', 'random_state': '(1234)'}), '(features, labels, test_size=0.2, random_state=1234)\n', (3219, 3271), False, 'from sklearn.model_selection import train_test_split\n'), ((3437, 3499), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_data'], {'batch_size': 'batch_size'}), '(train_data, batch_size=batch_size)\n', (3464, 3499), False, 'import torch\n'), ((3518, 3579), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['test_data'], {'batch_size': 'batch_size'}), '(test_data, batch_size=batch_size)\n', (3545, 3579), False, 'import torch\n'), ((1302, 1326), 'numpy.zeros', 'np.zeros', (['n'], {'dtype': 'float'}), '(n, dtype=float)\n', (1310, 1326), True, 'import numpy as np\n'), ((1328, 1352), 'numpy.zeros', 'np.zeros', (['n'], {'dtype': 'float'}), '(n, dtype=float)\n', (1336, 1352), True, 'import numpy as np\n'), ((1384, 1410), 'numpy.random.shuffle', 'np.random.shuffle', (['r_space'], {}), '(r_space)\n', (1401, 1410), True, 'import numpy as np\n'), ((1419, 1445), 'numpy.random.shuffle', 'np.random.shuffle', (['h_space'], {}), '(h_space)\n', (1436, 1445), True, 'import numpy as np\n'), ((2377, 2392), 'numpy.power', 'np.power', (['wp', '(2)'], {}), '(wp, 2)\n', (2385, 2392), True, 'import numpy as np\n'), ((2394, 2411), 'numpy.multiply', 'np.multiply', (['w', 'g'], {}), '(w, g)\n', (2405, 2411), True, 'import numpy as np\n'), ((3954, 3997), 'numpy.concatenate', 'np.concatenate', (['[geometry, spectra]'], {'axis': '(1)'}), '([geometry, spectra], axis=1)\n', (3968, 3997), True, 'import numpy as np\n'), ((4049, 4093), 'numpy.savetxt', 'np.savetxt', (['datafile', 'concate'], {'delimiter': '""","""'}), "(datafile, concate, delimiter=',')\n", (4059, 4093), True, 'import numpy as np\n'), ((4325, 4368), 'numpy.concatenate', 'np.concatenate', (['[geometry, spectra]'], {'axis': '(1)'}), '([geometry, spectra], axis=1)\n', (4339, 4368), True, 'import numpy as np\n'), ((4419, 4463), 'numpy.savetxt', 'np.savetxt', (['datafile', 'concate'], {'delimiter': '""","""'}), "(datafile, concate, delimiter=',')\n", (4429, 4463), True, 'import numpy as np\n'), ((1730, 1744), 'numpy.sqrt', 'np.sqrt', (['np.pi'], {}), '(np.pi)\n', (1737, 1744), True, 'import numpy as np\n'), ((1777, 1791), 'numpy.sqrt', 'np.sqrt', (['np.pi'], {}), '(np.pi)\n', (1784, 1791), True, 'import numpy as np\n'), ((2533, 2547), 'numpy.power', 'np.power', (['w', '(2)'], {}), '(w, 2)\n', (2541, 2547), True, 'import numpy as np\n'), ((2549, 2563), 'numpy.power', 'np.power', (['g', '(2)'], {}), '(g, 2)\n', (2557, 2563), True, 'import numpy as np\n'), ((2456, 2471), 'numpy.power', 'np.power', (['w0', '(2)'], {}), '(w0, 2)\n', (2464, 2471), True, 'import numpy as np\n'), ((2474, 2488), 'numpy.power', 'np.power', (['w', '(2)'], {}), '(w, 2)\n', (2482, 2488), True, 'import numpy as np\n')] |
# Read the problem below and then implement it in code. You do not need to submit your
# written decomposition of how you’ve worked it out but make sure to comment your code
# to explain what you’ve done.
#
# A computer generates a random number from 0 – 10. It then asks the user to make a
# guess. They have 5 attempts to get it right. If they get it correct, the program says
# they’ve won and ends. If they’re wrong, they’re asked to guess again and told how many
# attempts they have remaining.
from random import randint
# Inclusive
random_num = randint(0, 10)
turns = 5
# 5 turns
for turn in range(turns - 1, 0, -1):
guess = int(input("Make a guess: "))
if (guess == random_num):
print("You're correct!")
break
else:
print(f"Incorrect guess. You have {turn} guesses remaining.")
| [
"random.randint"
] | [((555, 569), 'random.randint', 'randint', (['(0)', '(10)'], {}), '(0, 10)\n', (562, 569), False, 'from random import randint\n')] |
from django.urls import reverse_lazy
from oauth2_provider import views
from .. import mixins
class ApplicationList(mixins.TwoFactorMixin, mixins.ApplicationViewMixin, views.ApplicationList):
template_name = 'oauth2/applications/list.html'
class ApplicationRegistration(mixins.ApplicationCreationMixin, mixins.TwoFactorMixin, views.ApplicationRegistration):
template_name = 'oauth2/applications/register.html'
def get_success_url(self):
return reverse_lazy('application_detail', kwargs={'pk': self.object.pk})
class ApplicationDetail(mixins.ApplicationViewMixin, mixins.TwoFactorMixin, views.ApplicationDetail):
template_name = 'oauth2/applications/detail.html'
class ApplicationDelete(mixins.ApplicationDeleteMixin, mixins.TwoFactorMixin, views.ApplicationDelete):
template_name = 'oauth2/applications/delete.html'
success_url = reverse_lazy('application_list')
class ApplicationUpdate(mixins.ApplicationChangeMixin, mixins.TwoFactorMixin, views.ApplicationUpdate):
template_name = 'oauth2/applications/update.html'
def get_success_url(self):
return reverse_lazy('application_detail', kwargs={'pk': self.object.pk})
| [
"django.urls.reverse_lazy"
] | [((871, 903), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""application_list"""'], {}), "('application_list')\n", (883, 903), False, 'from django.urls import reverse_lazy\n'), ((469, 534), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""application_detail"""'], {'kwargs': "{'pk': self.object.pk}"}), "('application_detail', kwargs={'pk': self.object.pk})\n", (481, 534), False, 'from django.urls import reverse_lazy\n'), ((1111, 1176), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""application_detail"""'], {'kwargs': "{'pk': self.object.pk}"}), "('application_detail', kwargs={'pk': self.object.pk})\n", (1123, 1176), False, 'from django.urls import reverse_lazy\n')] |
import frappe
def execute():
a=frappe.new_doc("Task")
a.subject='axy'
a.save()
print(a.name)
# #bench execute demo.doctype.task.execute
# print('***************') | [
"frappe.new_doc"
] | [((35, 57), 'frappe.new_doc', 'frappe.new_doc', (['"""Task"""'], {}), "('Task')\n", (49, 57), False, 'import frappe\n')] |
from gettext import gettext
def foo():
print(gettext('ssshhh....'))
| [
"gettext.gettext"
] | [((51, 72), 'gettext.gettext', 'gettext', (['"""ssshhh...."""'], {}), "('ssshhh....')\n", (58, 72), False, 'from gettext import gettext\n')] |
import numpy as np
from ..colors import Color
from .widget import Widget, overlapping_region
from .widget_data_structures import Point, Size, Rect
class _Root(Widget):
"""
Root widget. Meant to be instantiated by the `App` class. Renders to terminal.
"""
def __init__(self, app, env_out, default_char, default_color: Color):
self._app = app
self.env_out = env_out
self.default_char = default_char
self.default_color = default_color
self.children = [ ]
self.resize(env_out.get_size())
def resize(self, dim: Size):
"""
Resize canvas. Last render is erased.
"""
self.env_out.erase_screen()
self.env_out.flush()
self._dim = dim
self._last_canvas = np.full(dim, self.default_char, dtype=object)
self._last_colors = np.full((*dim, 6), self.default_color, dtype=np.uint8)
self.canvas = np.full_like(self._last_canvas, "><") # "><" will guarantee an entire screen redraw.
self.colors = self._last_colors.copy()
# Buffer arrays to re-use in the `render` method:
self._char_diffs = np.zeros_like(self.canvas, dtype=np.bool8)
self._color_diffs = np.zeros_like(self.colors, dtype=np.bool8)
self._reduced_color_diffs = np.zeros_like(self.canvas, dtype=np.bool8)
for child in self.children:
child.update_geometry()
@property
def top(self):
return 0
@property
def left(self):
return 0
@property
def pos(self):
return Point(0, 0)
@property
def absolute_pos(self):
return Point(0, 0)
@property
def is_transparent(self):
return False
@property
def is_visible(self):
return True
@property
def parent(self):
return None
@property
def root(self):
return self
@property
def app(self):
return self._app
def absolute_to_relative_coords(self, coord):
return coord
def render(self):
"""
Paint canvas. Render to terminal.
"""
# Swap canvas with last render:
self.canvas, self._last_canvas = self._last_canvas, self.canvas
self.colors, self._last_colors = self._last_colors, self.colors
# Bring arrays into locals:
canvas = self.canvas
colors = self.colors
char_diffs = self._char_diffs
color_diffs = self._color_diffs
reduced_color_diffs = self._reduced_color_diffs
env_out = self.env_out
write = env_out._buffer.append
# Erase canvas:
canvas[:] = self.default_char
colors[:, :] = self.default_color
overlap = overlapping_region
height, width = canvas.shape
rect = Rect(
0,
0,
height,
width,
height,
width,
)
for child in self.children:
if region := overlap(rect, child):
dest_slice, child_rect = region
child.render(canvas[dest_slice], colors[dest_slice], child_rect)
# Find differences between current render and last render:
# (This is optimized version of `(last_canvas != canvas) | np.any(last_colors != colors, axis=-1)`
# that re-uses buffers instead of creating new arrays.)
np.not_equal(self._last_canvas, canvas, out=char_diffs)
np.not_equal(self._last_colors, colors, out=color_diffs)
np.any(color_diffs, axis=-1, out=reduced_color_diffs)
np.logical_or(char_diffs, reduced_color_diffs, out=char_diffs)
write("\x1b[?25l") # Hide cursor
ys, xs = np.nonzero(char_diffs)
for y, x, color, char in zip(ys, xs, colors[ys, xs], canvas[ys, xs]):
# The escape codes for moving the cursor and setting the color concatenated:
write("\x1b[{};{}H\x1b[0;38;2;{};{};{};48;2;{};{};{}m{}".format(y + 1, x + 1, *color, char))
write("\x1b[0m") # Reset attributes
env_out.flush()
def dispatch_press(self, key_press):
"""
Dispatch key press to descendants until handled.
"""
any(widget.dispatch_press(key_press) for widget in reversed(self.children))
def dispatch_click(self, mouse_event):
"""
Dispatch mouse event to descendents until handled.
"""
any(widget.dispatch_click(mouse_event) for widget in reversed(self.children))
| [
"numpy.full",
"numpy.full_like",
"numpy.zeros_like",
"numpy.not_equal",
"numpy.any",
"numpy.nonzero",
"numpy.logical_or"
] | [((775, 820), 'numpy.full', 'np.full', (['dim', 'self.default_char'], {'dtype': 'object'}), '(dim, self.default_char, dtype=object)\n', (782, 820), True, 'import numpy as np\n'), ((849, 903), 'numpy.full', 'np.full', (['(*dim, 6)', 'self.default_color'], {'dtype': 'np.uint8'}), '((*dim, 6), self.default_color, dtype=np.uint8)\n', (856, 903), True, 'import numpy as np\n'), ((927, 964), 'numpy.full_like', 'np.full_like', (['self._last_canvas', '"""><"""'], {}), "(self._last_canvas, '><')\n", (939, 964), True, 'import numpy as np\n'), ((1146, 1188), 'numpy.zeros_like', 'np.zeros_like', (['self.canvas'], {'dtype': 'np.bool8'}), '(self.canvas, dtype=np.bool8)\n', (1159, 1188), True, 'import numpy as np\n'), ((1217, 1259), 'numpy.zeros_like', 'np.zeros_like', (['self.colors'], {'dtype': 'np.bool8'}), '(self.colors, dtype=np.bool8)\n', (1230, 1259), True, 'import numpy as np\n'), ((1296, 1338), 'numpy.zeros_like', 'np.zeros_like', (['self.canvas'], {'dtype': 'np.bool8'}), '(self.canvas, dtype=np.bool8)\n', (1309, 1338), True, 'import numpy as np\n'), ((3369, 3424), 'numpy.not_equal', 'np.not_equal', (['self._last_canvas', 'canvas'], {'out': 'char_diffs'}), '(self._last_canvas, canvas, out=char_diffs)\n', (3381, 3424), True, 'import numpy as np\n'), ((3433, 3489), 'numpy.not_equal', 'np.not_equal', (['self._last_colors', 'colors'], {'out': 'color_diffs'}), '(self._last_colors, colors, out=color_diffs)\n', (3445, 3489), True, 'import numpy as np\n'), ((3498, 3551), 'numpy.any', 'np.any', (['color_diffs'], {'axis': '(-1)', 'out': 'reduced_color_diffs'}), '(color_diffs, axis=-1, out=reduced_color_diffs)\n', (3504, 3551), True, 'import numpy as np\n'), ((3560, 3622), 'numpy.logical_or', 'np.logical_or', (['char_diffs', 'reduced_color_diffs'], {'out': 'char_diffs'}), '(char_diffs, reduced_color_diffs, out=char_diffs)\n', (3573, 3622), True, 'import numpy as np\n'), ((3684, 3706), 'numpy.nonzero', 'np.nonzero', (['char_diffs'], {}), '(char_diffs)\n', (3694, 3706), True, 'import numpy as np\n')] |
import re
import time
from datetime import datetime
import urllib
import mechanicalsoup
import getpass
from bs4 import BeautifulSoup
# Conver Time Function
def convertTime(time):
try:
time = time.replace(",","").replace("@","").replace("."," ").replace(":"," ")
t2 = datetime.strptime(time[3:], "%A %B %d %Y %I %M%p")
return (t2-datetime(1970,1,1)).total_seconds()
except Exception:
print ("Error while converting time to seconds")
return 1
url = 'https://slashdot.org/'
# List Variables
outputList = []
article_headline_list = []
writer_list = []
time_posted_list = []
response = []
# Count Variables
totalRecords=0
totalRecordsOut=0
page=-1
timestamp=0
browser = mechanicalsoup.StatefulBrowser()
On_This_Page = False
logged_in = False
# Loop until logged in
browser.open(url)
while not logged_in:
nick = input("Enter your nickname for slashdot.org: ") #Chazzio1
passw = getpass.getpass("Enter your password: ") #<PASSWORD>
while(timestamp<1):
try:
timestamp = int(input("Enter timestamp in seconds since 1970: ")) # 1535241600
except Exception:
"Not a valid number"
browser.select_form(nr=1)
browser['unickname'] = nick
browser['upasswd'] = passw
result = browser.submit_selected()
response = result.content
soup_0 = BeautifulSoup(response, "lxml")
user = str(soup_0.find_all(class_="user-access"))
if user.find(nick)>0:
logged_in=True
print ("Logged in")
else:
print ("Try Again\n")
time.sleep(5)
# Loop until date found
while not(On_This_Page):
page+=1
try:
browser.open(url)
except Exception:
print ("Error cannot open next page ")
print ("Page " + url + " may not exist")
browser.close()
break
#release resources
# HTML to BeautifulSoup
response = ""
response=result.content
soup = ""
soup = BeautifulSoup(response, "lxml")
# Find all Headlines
article_headline = soup.find_all('span',class_="story-title")
poster = soup.find_all('span', class_="story-byline")
time_posted = soup.find_all('time')
# Store all required info
for headline in article_headline:
title = '\"'+headline.a.get_text()+'\"'
article_headline_list.append(title) #Get Text headline
totalRecords+=1
for t in time_posted:
time_posted_list.append(convertTime(t.get("datetime")))
for val in poster:
writer = val.find(text=True)
writer = " ".join(re.split("\s+", writer, flags=re.UNICODE))
writer = writer.replace(' ', '')
writer = writer.replace('Posted','')
writer = writer.replace('by','')
writer_list.append(writer)
# Make output List as per format required
for j in range(totalRecords):
if (int(time_posted_list[j]) < timestamp):
On_This_Page = True
break
else:
outputList.append(str(
"{" "\n" "\"headline\": ") + str(article_headline_list[j]) +
"\n\"author\": \"" + str(writer_list[j]) +
"\"\n\"date\": " + str(int(time_posted_list[j])) + "\n},\n"
)
totalRecordsOut+=1;
# All records on page within timeframe, open next page
if totalRecordsOut%totalRecords == 0:
totalRecordsOut=0
url = str('https://slashdot.org/?page=') + str(page+1)
# Display this message while loading other pages
print ("Opening next page " + url)
for headline in outputList:
print (headline)
print ("Total headlines returned: " + str(totalRecordsOut))
browser.close()
| [
"re.split",
"getpass.getpass",
"time.replace",
"time.sleep",
"datetime.datetime",
"mechanicalsoup.StatefulBrowser",
"datetime.datetime.strptime",
"bs4.BeautifulSoup"
] | [((718, 750), 'mechanicalsoup.StatefulBrowser', 'mechanicalsoup.StatefulBrowser', ([], {}), '()\n', (748, 750), False, 'import mechanicalsoup\n'), ((936, 976), 'getpass.getpass', 'getpass.getpass', (['"""Enter your password: """'], {}), "('Enter your password: ')\n", (951, 976), False, 'import getpass\n'), ((1376, 1407), 'bs4.BeautifulSoup', 'BeautifulSoup', (['response', '"""lxml"""'], {}), "(response, 'lxml')\n", (1389, 1407), False, 'from bs4 import BeautifulSoup\n'), ((1584, 1597), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (1594, 1597), False, 'import time\n'), ((1978, 2009), 'bs4.BeautifulSoup', 'BeautifulSoup', (['response', '"""lxml"""'], {}), "(response, 'lxml')\n", (1991, 2009), False, 'from bs4 import BeautifulSoup\n'), ((289, 339), 'datetime.datetime.strptime', 'datetime.strptime', (['time[3:]', '"""%A %B %d %Y %I %M%p"""'], {}), "(time[3:], '%A %B %d %Y %I %M%p')\n", (306, 339), False, 'from datetime import datetime\n'), ((2577, 2619), 're.split', 're.split', (['"""\\\\s+"""', 'writer'], {'flags': 're.UNICODE'}), "('\\\\s+', writer, flags=re.UNICODE)\n", (2585, 2619), False, 'import re\n'), ((359, 379), 'datetime.datetime', 'datetime', (['(1970)', '(1)', '(1)'], {}), '(1970, 1, 1)\n', (367, 379), False, 'from datetime import datetime\n'), ((204, 225), 'time.replace', 'time.replace', (['""","""', '""""""'], {}), "(',', '')\n", (216, 225), False, 'import time\n')] |
import os
from bddcli import Given, stdout, Application, when, given
def foos(): # pragma: no cover
e = os.environ.copy()
# For Linux and Windows
discarded_variables = ['LC_CTYPE', 'PWD',
'COMSPEC', 'PATHEXT', 'PROMPT', 'SYSTEMROOT']
# Windows environment variables are case-insensitive, lowercase them
print(' '.join(
f'{k}: {v}' for k, v in e.items() if k not in discarded_variables
).lower())
app = Application('foo', 'tests.test_environ:foos')
def test_environ():
with Given(app, environ={'bar': 'baz'}):
assert stdout == 'bar: baz\n'
when(environ=given - 'bar')
assert stdout == '\n'
when(environ=given + {'qux': 'quux'})
assert stdout == 'bar: baz qux: quux\n'
when(environ=given | {'bar': 'quux'})
assert stdout == 'bar: quux\n'
| [
"bddcli.Application",
"os.environ.copy",
"bddcli.when",
"bddcli.Given"
] | [((467, 512), 'bddcli.Application', 'Application', (['"""foo"""', '"""tests.test_environ:foos"""'], {}), "('foo', 'tests.test_environ:foos')\n", (478, 512), False, 'from bddcli import Given, stdout, Application, when, given\n'), ((112, 129), 'os.environ.copy', 'os.environ.copy', ([], {}), '()\n', (127, 129), False, 'import os\n'), ((544, 578), 'bddcli.Given', 'Given', (['app'], {'environ': "{'bar': 'baz'}"}), "(app, environ={'bar': 'baz'})\n", (549, 578), False, 'from bddcli import Given, stdout, Application, when, given\n'), ((627, 654), 'bddcli.when', 'when', ([], {'environ': "(given - 'bar')"}), "(environ=given - 'bar')\n", (631, 654), False, 'from bddcli import Given, stdout, Application, when, given\n'), ((694, 731), 'bddcli.when', 'when', ([], {'environ': "(given + {'qux': 'quux'})"}), "(environ=given + {'qux': 'quux'})\n", (698, 731), False, 'from bddcli import Given, stdout, Application, when, given\n'), ((789, 826), 'bddcli.when', 'when', ([], {'environ': "(given | {'bar': 'quux'})"}), "(environ=given | {'bar': 'quux'})\n", (793, 826), False, 'from bddcli import Given, stdout, Application, when, given\n')] |
from .base import *
import dj_database_url
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# If DEBUG is False, send the errors to the email:
ADMINS = [
('Andre', '<EMAIL>'),
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'VEnCode_Django.urls'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': config("DB_NAME"),
'USER': config("DB_USER"),
'PASSWORD': '',
'HOST': config("DB_HOST"),
'PORT': '',
}
}
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# Configure Django App for Heroku.
django_heroku.settings(locals())
# Production set up for heroku:
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
# Allauth configurations, backend to send sign-in e-mail verification e-mail:
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# REDIS related settings
CELERY_BROKER_URL = config('REDIS_URL', default="redis://")
CELERY_RESULT_BACKEND = config('REDIS_URL', default="redis://")
BROKER_URL = config('REDIS_URL', default="redis://")
# Allauth related settings
EMAIL_HOST = config("MAILGUN_SMTP_SERVER")
EMAIL_PORT = config("MAILGUN_SMTP_PORT")
EMAIL_HOST_USER = DEFAULT_FROM_EMAIL = config("MAILGUN_SMTP_LOGIN")
EMAIL_HOST_PASSWORD = config("MAILGUN_SMTP_PASSWORD")
| [
"dj_database_url.config"
] | [((1363, 1403), 'dj_database_url.config', 'dj_database_url.config', ([], {'conn_max_age': '(500)'}), '(conn_max_age=500)\n', (1385, 1403), False, 'import dj_database_url\n')] |
import logging
import numpy as np
from openpnm.utils import SettingsAttr, Docorator
from openpnm.integrators import ScipyRK45
from openpnm.algorithms import GenericAlgorithm
from openpnm.algorithms._solution import SolutionContainer, TransientSolution
logger = logging.getLogger(__name__)
docstr = Docorator()
@docstr.dedent
class TransientMultiPhysicsSettings:
r"""
Parameters
----------
%(GenericAlgorithmSettings.parameters)s
algorithms: list
List of transient algorithm objects to be solved in a coupled manner
"""
algorithms = []
@docstr.dedent
class TransientMultiPhysics(GenericAlgorithm):
r"""
A subclass for transient multiphysics simulations.
"""
def __init__(self, algorithms, settings=None, **kwargs):
self.settings = SettingsAttr(TransientMultiPhysicsSettings, settings)
self.settings.algorithms = [alg.name for alg in algorithms]
self._algs = algorithms
super().__init__(settings=self.settings, **kwargs)
def run(self, x0, tspan, saveat=None, integrator=None):
"""
Runs all of the transient algorithms simultaneoulsy and returns the
solution.
Parameters steal from transient reactive transport
----------
x0 : ndarray or float
Array (or scalar) containing initial condition values.
tspan : array_like
Tuple (or array) containing the integration time span.
saveat : array_like or float, optional
If an array is passed, it signifies the time points at which
the solution is to be stored, and if a scalar is passed, it
refers to the interval at which the solution is to be stored.
integrator : Integrator, optional
Integrator object which will be used to to the time stepping.
Can be instantiated using openpnm.integrators module.
Returns
-------
TransientSolution
The solution object, which is basically a numpy array with
the added functionality that it can be called to return the
solution at intermediate times (i.e., those not stored in the
solution object). In the case of multiphysics, the solution object
is a combined array of solutions for each physics. The solution
for each physics is available on each algorithm object
independently.
"""
logger.info('Running TransientMultiphysics')
if np.isscalar(saveat):
saveat = np.arange(*tspan, saveat)
if (saveat is not None) and (tspan[1] not in saveat):
saveat = np.hstack((saveat, [tspan[1]]))
integrator = ScipyRK45() if integrator is None else integrator
for i, alg in enumerate(self._algs):
# Perform pre-solve validations
alg._validate_settings()
alg._validate_data_health()
# Write x0 to algorithm the obj (needed by _update_iterative_props)
x0_i = self._get_x0(x0, i)
alg['pore.ic'] = x0_i = np.ones(alg.Np, dtype=float) * x0_i
alg._merge_inital_and_boundary_values()
# Build RHS (dx/dt = RHS), then integrate the system of ODEs
rhs = self._build_rhs()
# Integrate RHS using the given solver
soln = integrator.solve(rhs, x0, tspan, saveat)
# Return dictionary containing solution
self.soln = SolutionContainer()
for i, alg in enumerate(self._algs):
# Slice soln and attach as TransientSolution object to each alg
t = soln.t
x = soln[i*alg.Np:(i+1)*alg.Np, :]
alg.soln = TransientSolution(t, x)
# Add solution of each alg to solution dictionary
self.soln[alg.settings['quantity']] = alg.soln
return self.soln
def _run_special(self, x0): ...
def _build_rhs(self):
"""
Returns a function handle, which calculates dy/dt = rhs(y, t).
Notes
-----
``y`` is a composite array that contains ALL the variables that
the multiphysics algorithm solves for, e.g., if the constituent
algorithms are ``TransientFickianDiffusion``, and
``TransientFourierConduction``, ``y[0:Np-1]`` refers to the
concentration, and ``[Np:2*Np-1]`` refers to the temperature
values.
"""
def ode_func(t, y):
# Initialize RHS
rhs = []
for i, alg in enumerate(self._algs):
# Get x from y, assume alg.Np is same for all algs
x = self._get_x0(y, i) # again use helper function
# Store x onto algorithm,
alg.x = x
# Build A and b
alg._update_A_and_b()
A = alg.A.tocsc()
b = alg.b
# Retrieve volume
V = alg.network[alg.settings["pore_volume"]]
# Calcualte RHS
rhs_alg = np.hstack(-A.dot(x) + b)/V
rhs = np.hstack((rhs, rhs_alg))
return rhs
return ode_func
def _get_x0(self, x0, i):
tmp = [alg.Np for alg in self._algs]
idx_end = np.cumsum(tmp)
idx_start = np.hstack((0, idx_end[:-1]))
x0 = x0[idx_start[i]:idx_end[i]]
return x0
| [
"openpnm.algorithms._solution.TransientSolution",
"numpy.isscalar",
"openpnm.utils.Docorator",
"numpy.ones",
"openpnm.algorithms._solution.SolutionContainer",
"numpy.hstack",
"numpy.cumsum",
"openpnm.integrators.ScipyRK45",
"numpy.arange",
"openpnm.utils.SettingsAttr",
"logging.getLogger"
] | [((261, 288), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (278, 288), False, 'import logging\n'), ((298, 309), 'openpnm.utils.Docorator', 'Docorator', ([], {}), '()\n', (307, 309), False, 'from openpnm.utils import SettingsAttr, Docorator\n'), ((797, 850), 'openpnm.utils.SettingsAttr', 'SettingsAttr', (['TransientMultiPhysicsSettings', 'settings'], {}), '(TransientMultiPhysicsSettings, settings)\n', (809, 850), False, 'from openpnm.utils import SettingsAttr, Docorator\n'), ((2497, 2516), 'numpy.isscalar', 'np.isscalar', (['saveat'], {}), '(saveat)\n', (2508, 2516), True, 'import numpy as np\n'), ((3432, 3451), 'openpnm.algorithms._solution.SolutionContainer', 'SolutionContainer', ([], {}), '()\n', (3449, 3451), False, 'from openpnm.algorithms._solution import SolutionContainer, TransientSolution\n'), ((5210, 5224), 'numpy.cumsum', 'np.cumsum', (['tmp'], {}), '(tmp)\n', (5219, 5224), True, 'import numpy as np\n'), ((5245, 5273), 'numpy.hstack', 'np.hstack', (['(0, idx_end[:-1])'], {}), '((0, idx_end[:-1]))\n', (5254, 5273), True, 'import numpy as np\n'), ((2539, 2564), 'numpy.arange', 'np.arange', (['*tspan', 'saveat'], {}), '(*tspan, saveat)\n', (2548, 2564), True, 'import numpy as np\n'), ((2648, 2679), 'numpy.hstack', 'np.hstack', (['(saveat, [tspan[1]])'], {}), '((saveat, [tspan[1]]))\n', (2657, 2679), True, 'import numpy as np\n'), ((2701, 2712), 'openpnm.integrators.ScipyRK45', 'ScipyRK45', ([], {}), '()\n', (2710, 2712), False, 'from openpnm.integrators import ScipyRK45\n'), ((3666, 3689), 'openpnm.algorithms._solution.TransientSolution', 'TransientSolution', (['t', 'x'], {}), '(t, x)\n', (3683, 3689), False, 'from openpnm.algorithms._solution import SolutionContainer, TransientSolution\n'), ((3072, 3100), 'numpy.ones', 'np.ones', (['alg.Np'], {'dtype': 'float'}), '(alg.Np, dtype=float)\n', (3079, 3100), True, 'import numpy as np\n'), ((5042, 5067), 'numpy.hstack', 'np.hstack', (['(rhs, rhs_alg)'], {}), '((rhs, rhs_alg))\n', (5051, 5067), True, 'import numpy as np\n')] |
# Copyright (c) OpenMMLab. All rights reserved.
import mmocr.utils as utils
import torch
import torch.nn.functional as F
from mmdeploy.core import FUNCTION_REWRITER
@FUNCTION_REWRITER.register_rewriter(
func_name='mmocr.models.textrecog.encoders.SAREncoder.forward',
backend='default')
def sar_encoder__forward(ctx, self, feat, img_metas=None):
"""Rewrite `forward` of SAREncoder for default backend.
Rewrite this function to:
1. convert tuple value of feat.size to int, making model exportable.
2. use torch.ceil to replace original math.ceil and if else in mmocr.
Args:
ctx (ContextCaller): The context with additional information.
self: The instance of the class SAREncoder.
feat (Tensor): Encoded feature map of shape (N, C, H, W).
img_metas (Optional[list[dict]]): A list of image info dict where each
dict has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys, see
:class:`mmdet.datasets.pipelines.Collect`.
Returns:
holistic_feat (Tensor): A feature map output from SAREncoder. The shape
[N, M].
"""
if img_metas is not None:
assert utils.is_type_list(img_metas, dict)
assert len(img_metas) == feat.size(0)
valid_ratios = None
if img_metas is not None:
valid_ratios = [
img_meta.get('valid_ratio', 1.0) for img_meta in img_metas
] if self.mask else None
h_feat = int(feat.size(2))
feat_v = F.max_pool2d(feat, kernel_size=(h_feat, 1), stride=1, padding=0)
feat_v = feat_v.squeeze(2) # bsz * C * W
feat_v = feat_v.permute(0, 2, 1).contiguous() # bsz * W * C
holistic_feat = self.rnn_encoder(feat_v)[0] # bsz * T * C
if valid_ratios is not None:
valid_hf = []
T = holistic_feat.size(1)
for i, valid_ratio in enumerate(valid_ratios):
# use torch.ceil to replace original math.ceil and if else in mmocr
valid_step = torch.ceil(T * valid_ratio).long() - 1
valid_hf.append(holistic_feat[i, valid_step, :])
valid_hf = torch.stack(valid_hf, dim=0)
else:
valid_hf = holistic_feat[:, -1, :] # bsz * C
holistic_feat = self.linear(valid_hf) # bsz * C
return holistic_feat
| [
"mmocr.utils.is_type_list",
"torch.stack",
"torch.ceil",
"torch.nn.functional.max_pool2d",
"mmdeploy.core.FUNCTION_REWRITER.register_rewriter"
] | [((169, 292), 'mmdeploy.core.FUNCTION_REWRITER.register_rewriter', 'FUNCTION_REWRITER.register_rewriter', ([], {'func_name': '"""mmocr.models.textrecog.encoders.SAREncoder.forward"""', 'backend': '"""default"""'}), "(func_name=\n 'mmocr.models.textrecog.encoders.SAREncoder.forward', backend='default')\n", (204, 292), False, 'from mmdeploy.core import FUNCTION_REWRITER\n'), ((1612, 1676), 'torch.nn.functional.max_pool2d', 'F.max_pool2d', (['feat'], {'kernel_size': '(h_feat, 1)', 'stride': '(1)', 'padding': '(0)'}), '(feat, kernel_size=(h_feat, 1), stride=1, padding=0)\n', (1624, 1676), True, 'import torch.nn.functional as F\n'), ((1301, 1336), 'mmocr.utils.is_type_list', 'utils.is_type_list', (['img_metas', 'dict'], {}), '(img_metas, dict)\n', (1319, 1336), True, 'import mmocr.utils as utils\n'), ((2221, 2249), 'torch.stack', 'torch.stack', (['valid_hf'], {'dim': '(0)'}), '(valid_hf, dim=0)\n', (2232, 2249), False, 'import torch\n'), ((2102, 2129), 'torch.ceil', 'torch.ceil', (['(T * valid_ratio)'], {}), '(T * valid_ratio)\n', (2112, 2129), False, 'import torch\n')] |
# Tests for WikiText parsing
#
# Copyright (c) 2020-2021 <NAME>. See file LICENSE and https://ylonen.org
import unittest
from wikitextprocessor import Wtp
from wikitextprocessor.parser import (print_tree, NodeKind, WikiNode)
def parse_with_ctx(title, text, **kwargs):
assert isinstance(title, str)
assert isinstance(text, str)
ctx = Wtp()
ctx.analyze_templates()
ctx.start_page(title)
root = ctx.parse(text, **kwargs)
print("parse_with_ctx: root", type(root), root)
return root, ctx
def parse(title, text, **kwargs):
root, ctx = parse_with_ctx(title, text, **kwargs)
assert isinstance(root, WikiNode)
assert isinstance(ctx, Wtp)
return root
class NodeExpTests(unittest.TestCase):
def backcvt(self, text, expected):
root, ctx = parse_with_ctx("test", text)
self.assertEqual(ctx.errors, [])
self.assertEqual(ctx.warnings, [])
t = ctx.node_to_wikitext(root)
self.assertEqual(t, expected)
def tohtml(self, text, expected):
root, ctx = parse_with_ctx("test", text)
self.assertEqual(ctx.errors, [])
self.assertEqual(ctx.warnings, [])
t = ctx.node_to_html(root)
self.assertEqual(t, expected)
def totext(self, text, expected):
root, ctx = parse_with_ctx("test", text)
self.assertEqual(ctx.errors, [])
self.assertEqual(ctx.warnings, [])
t = ctx.node_to_text(root)
self.assertEqual(t, expected)
def test_basic1(self):
self.backcvt("", "")
def test_basic2(self):
self.backcvt("foo bar\nxyz\n", "foo bar\nxyz\n")
def test_basic3(self):
self.backcvt("&amp;", "&amp;")
def test_basic4(self):
self.backcvt("{{", "{{")
def test_title1(self):
self.backcvt("== T1 ==\nxyz\n", "\n== T1 ==\n\nxyz\n")
def test_title2(self):
self.backcvt("=== T1 ===\nxyz\n", "\n=== T1 ===\n\nxyz\n")
def test_title3(self):
self.backcvt("==== T1 ====\nxyz\n", "\n==== T1 ====\n\nxyz\n")
def test_title4(self):
self.backcvt("===== T1 =====\nxyz\n", "\n===== T1 =====\n\nxyz\n")
def test_title5(self):
self.backcvt("====== T1 ======\nxyz\n", "\n====== T1 ======\n\nxyz\n")
def test_hline1(self):
self.backcvt("aaa\n----\nbbbb", "aaa\n\n----\n\nbbbb")
def test_list1(self):
self.backcvt("*a\n* b\n", "*a\n* b\n")
def test_list2(self):
self.backcvt("abc\n*a\n* b\ndef", "abc\n*a\n* b\ndef")
def test_list3(self):
self.backcvt("abc\n*a\n*# c\n*# d\n* b\ndef",
"abc\n*a\n*# c\n*# d\n* b\ndef")
def test_list4(self):
self.backcvt("abc\n*a\n**b\n*:c\n",
"abc\n*a\n**b\n*:c\n")
def test_pre1(self):
self.backcvt("a<pre>foo\n bar</pre>b",
"a<pre>foo\n bar</pre>b")
def test_preformatted1(self):
self.backcvt(" a\n b", " a\n b")
def test_link1(self):
self.backcvt("[[foo bar]]", "[[foo bar]]")
def test_link2(self):
self.backcvt("[[foo|bar]]", "[[foo|bar]]")
def test_link3(self):
self.backcvt("a [[foo]]s bar", "a [[foo]]s bar")
def test_template1(self):
self.backcvt("{{foo|a|b|c=4|{{{arg}}}}}", "{{foo|a|b|c=4|{{{arg}}}}}")
def test_template2(self):
self.backcvt("{{foo}}", "{{foo}}")
def test_template3(self):
self.backcvt("{{!}}", "{{!}}")
def test_templatearg1(self):
self.backcvt("{{{1}}}", "{{{1}}}")
def test_templatearg1(self):
self.backcvt("{{{{{templ}}}}}", "{{{{{templ}}}}}")
def test_templatearg2(self):
self.backcvt("{{{a|def}}}", "{{{a|def}}}")
def test_templatearg3(self):
self.backcvt("{{{a|}}}", "{{{a|}}}")
def test_parserfn1(self):
self.backcvt("{{#expr: 1 + 2}}", "{{#expr: 1 + 2}}")
def test_parserfn2(self):
self.backcvt("{{#expr:1+{{v}}}}", "{{#expr:1+{{v}}}}")
def test_parserfn3(self):
self.backcvt("{{ROOTPAGENAME}}", "{{ROOTPAGENAME:}}")
def test_url1(self):
self.backcvt("[https://wikipedia.org]", "[https://wikipedia.org]")
def test_url2(self):
self.backcvt("https://wikipedia.org/", "[https://wikipedia.org/]")
def test_url3(self):
self.backcvt("https://wikipedia.org/x/y?a=7%255",
"[https://wikipedia.org/x/y?a=7%255]")
def test_table1(self):
self.backcvt("{| |}", "\n{| \n\n|}\n")
def test_table2(self):
self.backcvt('{| class="x"\n|}', '\n{| class="x"\n\n|}\n')
def test_tablecaption1(self):
self.backcvt("{|\n|+\ncapt\n|}", "\n{| \n\n|+ \n\ncapt\n\n|}\n")
def test_tablerowcell1(self):
self.backcvt("{|\n|- a=1\n| cell\n|}",
'\n{| \n\n|- a="1"\n\n| cell\n\n\n|}\n')
def test_tablerowhdr1(self):
self.backcvt("{|\n|- a=1\n! cell\n|}",
'\n{| \n\n|- a="1"\n\n! cell\n\n\n|}\n')
def test_magicword1(self):
self.backcvt("a\n__TOC__\nb", "a\n\n__TOC__\n\nb")
def test_html1(self):
self.backcvt("a<b>foo</b>b", "a<b>foo</b>b")
def test_html1(self):
self.backcvt('a<span class="bar">foo</span>b',
'a<span class="bar">foo</span>b')
def test_italic1(self):
self.backcvt("''i''", "''i''")
def test_bold1(self):
self.backcvt("''b''", "''b''")
def test_text1(self):
self.totext("", "")
def test_text2(self):
self.totext("\nfoo bar ", "foo bar")
def test_text3(self):
self.totext("<b>foo</b>", "foo")
def test_text4(self):
self.totext("<h1>foo</h1><p>bar</p>", "foo\n\nbar")
def test_text5(self):
self.totext("foo<ref x=1>bar</ref> z", "foo z")
| [
"wikitextprocessor.Wtp"
] | [((349, 354), 'wikitextprocessor.Wtp', 'Wtp', ([], {}), '()\n', (352, 354), False, 'from wikitextprocessor import Wtp\n')] |
import collections
import io
import json
import math
import zipfile
import logging
from urllib.error import URLError
from urllib.request import urlopen
import pandas as pd
from matplotlib import pyplot as plt
# Getting data
def set_source(filename):
"""
Sets source global variable to the path of .zip file.
:param filename: path to the downloaded .zip file
:return: None
You can provide relative path to file
>>> set_source('facebook-YourName.zip')
Absolute path (works only on Windows)
>>> set_source('C:/Users/Admin/Downloads/facebook-YourName.zip')
"""
filename = f'file:///{filename}' if filename[1] == ':' \
else (f'file:./{filename}' if filename.endswith('.zip') else f'file:./{filename}.zip')
try:
global source
source = zipfile.ZipFile(io.BytesIO(urlopen(filename).read()))
except URLError:
logging.error('File not found, try again.')
def get_data(conversation=None, chars=False, user=False):
"""
Reads data from messages.json or messages_chars.json
and finds key based on the beginning of the string.
:param conversation: beginning of the conversation id
or None for overall statistics (default None)
:param chars: True for counting chars in messages_chars.json,
False for counting messages in messages.json (default False)
:param user: True for user name instead of conversation id,
False otherwise (default False)
:return: dictionary containing the data and if applicable
a key pointing to a specific conversation, otherwise None
"""
try:
data = json.loads(open('messages_chars.json' if chars else 'messages.json', 'r', encoding='utf-8').read())
if user:
data = pd.DataFrame(data).fillna(0).astype('int')
for key in data.index:
if key.lower().startswith(conversation.lower()):
return data, key
else:
logging.error('Conversation not found.')
return None, None
if conversation is not None:
for key in data.keys():
if key.lower().startswith(conversation.lower()):
return data, key
else:
logging.error('Conversation not found.')
return None, None
else:
return data, None
except FileNotFoundError:
logging.error('Characters not counted.' if chars else 'Messages not counted.')
# Counting messages and characters
def count_messages():
"""
Counts messages and saves output to messages.json.
:return: None
"""
namelist = source.namelist()
total, senders = {}, {x.split('/')[2] for x in namelist
if (x.endswith('/') and x.startswith('messages/inbox/') and x != 'messages/inbox/')}
for sender in senders:
messages, i = collections.Counter(), 0
while True:
try:
i += 1
messages += collections.Counter(pd.DataFrame(json.loads(
source.open('messages/inbox/' + sender + '/message_' + str(i) + '.json').read())[
'messages']).iloc[:, 0])
except KeyError:
break
total[sender] = {k.encode('iso-8859-1').decode('utf-8'): v for k, v in messages.items()}
total[sender]['total'] = sum(messages.values())
with open('messages.json', 'w', encoding='utf-8') as output:
json.dump(total, output, ensure_ascii=False)
def count_characters():
"""
Counts characters from messages and saves output to messages_chars.json.
:return: None
"""
namelist = source.namelist()
total, senders = {}, {x.split('/')[2] for x in namelist
if (x.endswith('/') and x.startswith('messages/inbox/') and x != 'messages/inbox/')}
for sender in senders:
counted_all, i = collections.Counter(), 0
while True:
try:
i += 1
frame = pd.DataFrame(json.loads(
source.open('messages/inbox/' + sender + '/message_' + str(i) + '.json').read())['messages'])
frame['counted'] = frame.apply(
lambda row: collections.Counter(str(row['content']).encode('iso-8859-1').decode('utf-8')), axis=1)
counted_all += sum(frame['counted'], collections.Counter())
except KeyError:
break
total[sender] = dict(counted_all)
with open('messages_chars.json', 'w', encoding='utf-8') as output:
json.dump(total, output, ensure_ascii=False)
def count(chars=False):
"""
Counts messages or characters from messages
and saves output to the file.
:param chars: True for counting characters,
False for counting messages (default False)
:return: None
"""
if chars:
count_characters()
else:
count_messages()
# Statistics
def statistics(data_source, conversation=None, chars=False):
"""
Prints statistics of given data source.
:param data_source: dictionary containing prepared data generated
by the get_data() function
:param conversation: conversation id or None for overall statistics
(default None)
:param chars: True for character statistics instead of messages,
False otherwise (default False)
:return: None
"""
if conversation is None:
if chars:
characters_statistics(data_source)
else:
messages_statistics(data_source)
else:
if chars:
raise NotImplementedError()
else:
print(conversation)
conversation_statistics(data_source, conversation)
def messages_statistics(data_source):
"""
Prints messages overall statistics of given data source.
:param data_source: dictionary containing prepared data generated
by the get_data() function
:return: None
"""
data_source = pd.DataFrame(data_source).fillna(0).astype('int')
pd.set_option('display.max_rows', None)
total_values = data_source.loc['total'].sort_values(ascending=False)
print(total_values)
print(total_values.describe())
total_values = total_values.sort_values()
plt.rcdefaults()
plt.barh(total_values.index.astype(str).str[:10][-20:], total_values.iloc[-20:])
plt.show()
def conversation_statistics(data_source, conversation):
"""
Prints messages statistics for specific conversation of given data source.
:param data_source: dictionary containing prepared data generated
by the get_data() function
:param conversation: conversation id, or key from get_data() function
:return: None
"""
data_source = pd.DataFrame(data_source)
data_source = data_source.loc[:, conversation]
data_source = data_source[data_source > 0].sort_values(ascending=False).astype('int')
pd.set_option('display.max_rows', None)
print(data_source)
def characters_statistics(data_source):
"""
Prints characters statistics of given data source.
:param data_source: dictionary containing prepared data generated
by the get_data() function
:return: None
"""
data_source = pd.DataFrame(data_source)
data_source['total'] = data_source.sum(axis=1)
data_source = data_source.iloc[:, -1]
data_source = data_source.sort_values(ascending=False).astype('int')
pd.set_option('display.max_rows', None)
print(data_source)
print(f'Total characters: {data_source.sum()}')
# TODO characters conversation statistics
def characters_conversation_statistics(data_source, conversation):
"""
Prints characters statistics for specific conversation of given data source.
:param data_source: dictionary containing prepared data generated
by the get_data() function
:param conversation: conversation id, or key from get_data() function
:return: None
"""
pass
# User statistics
def user_statistics(data_source, user_name):
"""
Prints detailed statistics for specific person of given data source.
:param data_source: dictionary containing prepared data generated
by the get_data() function
:param user_name: person name, or key from get_data() function
:return: None
"""
data_source = data_source.loc[user_name]
data_source = data_source[data_source > 0].sort_values(ascending=False)
data_source.index = data_source.index.map(lambda x: x.split('_')[0][:30])
pd.set_option('display.max_rows', None)
print(user_name, 'statistics:')
print(data_source)
# Intervals
def interval_count(inbox_name, function, delta=0.0):
"""
Counts number of messages based on given timeframe function
:param inbox_name: directory name that contains requested messages
(usually conversation id)
:param function: pandas function that returns requested time part
:param delta: number of hours to time shift by
and count messages differently (default 0.0)
:return: dictionary of number of messages grouped by timeframe
"""
messages, i = collections.Counter(), 0
while True:
try:
i += 1
# iterates over all .json files in requested directory
messages += collections.Counter(function(pd.to_datetime(pd.DataFrame(json.loads(
source.open('messages/inbox/' + inbox_name + '/message_' + str(i) + '.json').read())[
'messages']).iloc[:, 1], unit='ms').dt.tz_localize('UTC').dt.tz_convert(
'Europe/Warsaw').add(pd.Timedelta(hours=-delta))))
except KeyError:
break
return messages
def interval_plot(messages):
"""
Shows chart based on previously defined timeframe
:param messages: dictionary of number of messages
grouped by timeframe
:return: None
"""
messages = pd.Series(messages).sort_index()
print(messages.describe())
plt.bar(messages.index, messages)
plt.savefig('messages.pdf')
plt.show()
# Hours
def hours(difference, conversation=None):
"""
Shows chart of average number of messages
send by hour throughout the day.
:param difference: number of hours to time shift by
and show statistics differently
:param conversation: conversation id or None for statistics
from all conversations (default None)
:return: None
"""
if conversation is None:
hours_chats(difference)
else:
data = json.loads(open('messages.json', 'r', encoding='utf-8').read())
for key in data.keys():
if key.lower().startswith(conversation.lower()):
hours_conversation(key, difference)
break
else:
print('Conversation not found.')
def hours_conversation(conversation, delta=0.0):
"""
Shows chart of average number of messages send
in specific conversation by hour throughout the day.
:param conversation: conversation id, or key from get_data() function
:param delta: number of hours to time shift by
and show statistics differently (default 0.0)
:return: None
"""
hours_plot(interval_count(conversation, lambda x: x.dt.hour, delta), delta)
def hours_chats(delta=0.0):
"""
Shows chart of average number of messages send
across all conversations by hour throughout the day.
:param delta: number of hours to time shift by
and show statistics differently (default 0.0)
:return: None
"""
messages = collections.Counter()
for sender in {x.split('/')[2] for x in source.namelist()
if (x.endswith('/') and x.startswith('messages/inbox/') and x != 'messages/inbox/')}:
messages += interval_count(sender, lambda x: x.dt.hour, delta)
hours_plot(messages, delta)
def hours_plot(messages, delta):
"""
Shows chart of average number of messages
grouped by hour throughout the day.
:param messages: dictionary of number of messages
grouped by timeframe
:param delta: number of hours to time shift by
and show statistics differently
:return: None
"""
messages = pd.DataFrame(messages, index=[0])
print(messages.iloc[0].describe())
plt.bar(messages.columns, messages.iloc[0])
plt.xticks(list(range(24)), [f'{x % 24}:{int(abs((delta - int(delta)) * 60)):02}'
for x in range(-(-math.floor(delta) % 24),
math.floor(delta) % 24 if math.floor(delta) % 24 != 0 else 24)], rotation=90)
plt.xlim(-1, 24)
plt.savefig('messages.pdf')
plt.show()
# Daily
def daily(difference, conversation=None):
"""
Shows chart of number of messages per day.
:param difference: number of hours to time shift by
and show statistics differently
:param conversation: conversation id or None for statistics
from all conversations (default None)
:return: None
"""
if conversation is None:
daily_chats(difference)
else:
data = json.loads(open('messages.json', 'r', encoding='utf-8').read())
for key in data.keys():
if key.lower().startswith(conversation.lower()):
daily_conversation(key, difference)
break
else:
print('Conversation not found.')
def daily_conversation(conversation, delta=0.0):
"""
Shows chart of number of messages per day
from the beginning of the conversation.
:param conversation: conversation id, or key from get_data() function
:param delta: number of hours to time shift by
and show statistics differently (default 0.0)
:return: None
"""
interval_plot(interval_count(conversation, lambda x: x.dt.date, delta))
def daily_chats(delta=0.0):
"""
Shows chart of number of messages per day
across all conversation.
:param delta: number of hours to time shift by
and show statistics differently (default 0.0)
:return: None
"""
messages = collections.Counter()
for sender in {x.split('/')[2] for x in source.namelist() if
(x.endswith('/') and x.startswith('messages/inbox/') and x != 'messages/inbox/')}:
messages += interval_count(sender, lambda x: x.dt.date, delta)
interval_plot(messages)
# Monthly (not working)
def monthly_conversation(conversation): # TODO not working charts for monthly
"""
Shows chart of number of messages per month.
:param conversation: conversation id or None for statistics
from all conversations (default None)
:return: None
"""
interval_plot(interval_count(conversation, lambda x: x.dt.to_period("M").astype('datetime64[ns]')))
def monthly_chats():
"""
Shows chart of number of messages per month
across all conversation.
:return: None
"""
messages = collections.Counter()
for sender in {x.split('/')[2] for x in source.namelist() if
(x.endswith('/') and x.startswith('messages/inbox/') and x != 'messages/inbox/')}:
messages += interval_count(sender, lambda x: x.dt.to_period("M").astype('datetime64[ns]'))
interval_plot(messages)
# Yearly
def yearly(conversation=None):
"""
Shows chart of number of messages per year.
:param conversation: conversation id or None for statistics
from all conversations (default None)
:return: None
"""
if conversation is None:
yearly_chats()
else:
data = json.loads(open('messages.json', 'r', encoding='utf-8').read())
for key in data.keys():
if key.lower().startswith(conversation.lower()):
yearly_conversation(key)
break
else:
print('Conversation not found.')
def yearly_conversation(conversation):
"""
Shows chart of number of messages per year
from the beginning of the conversation.
:param conversation: conversation id, or key from get_data() function
:return: None
"""
interval_plot(interval_count(conversation, lambda x: x.dt.year))
def yearly_chats():
"""
Shows chart of number of messages per year
across all conversation.
:return: None
"""
messages = collections.Counter()
for sender in {x.split('/')[2] for x in source.namelist()
if (x.endswith('/') and x.startswith('messages/inbox/') and x != 'messages/inbox/')}:
messages += interval_count(sender, lambda x: x.dt.year)
messages = pd.DataFrame(messages, index=[0])
print(messages.iloc[0].describe())
plt.bar(messages.columns, messages.iloc[0])
plt.savefig('messages.pdf')
plt.show()
if __name__=='__main__':
while True:
filename = input('Enter filename: ')
filename = f'file:///{filename}' if filename[1] == ':'\
else (f'file:./{filename}' if filename.endswith('.zip') else f'file:./{filename}.zip')
try:
source = zipfile.ZipFile(io.BytesIO(urlopen(filename).read()))
break
except URLError:
print('File not found, try again.')
while True:
user_input = input('>').split(' ')
if user_input[0] == 'exit':
break
if user_input[0] == '' or user_input[0] == 'count':
count_messages()
if user_input[0] == 'chars':
count_characters()
if user_input[0] == 'help' or user_input[0] == '?':
print('Messenger Counter available commands:')
print(' count - counts all messages and saves to messages.json')
print(' chars - counts all characters and saves to messages_chars.json')
print(' stats [conversation, -c] - displays statistics for counted messages')
print(' [detailed statistics for specific conversation, character statistics]')
print(' user [name] - detailed statistics for specific user')
print(' yearly [name] - yearly messages')
print(' [specific user]')
# print(' monthly [name, -d] - monthly messages (available soon)')
# print(' [specific user, day difference]')
print(' daily [name, -h] - daily messages')
print(' [specific user, hours difference]')
print(' hours [name, -h] - hour distribution of messages')
print(' [specific user, hours difference]')
print(' help - displays this help prompt')
print(' exit - exits the program')
if user_input[0] == 'stats':
if len(user_input) > 2 and user_input[2] == '-c':
try:
data = json.loads(open('messages_chars.json', 'r', encoding='utf-8').read())
for key in data.keys():
if key.startswith(user_input[1]):
characters_conversation_statistics(data, key)
break
else:
print('Conversation not found.')
except FileNotFoundError:
if input('Characters not counted. Count characters?[y/n] ').lower() == 'y':
count_characters()
elif len(user_input) > 1 and not user_input[1] == '-c':
try:
data = json.loads(open('messages.json', 'r', encoding='utf-8').read())
for key in data.keys():
if key.startswith(user_input[1]):
conversation_statistics(data, key)
break
else:
print('Conversation not found.')
except FileNotFoundError:
if input('Messages not counted. Count messages?[y/n] ').lower() == 'y':
count_messages()
elif len(user_input) > 1 and user_input[1] == '-c':
try:
data = json.loads(open('messages_chars.json', 'r', encoding='utf-8').read())
characters_statistics(data)
except FileNotFoundError:
if input('Characters not counted. Count characters?[y/n] ').lower() == 'y':
count_characters()
else:
try:
data = json.loads(open('messages.json', 'r', encoding='utf-8').read())
messages_statistics(data)
except FileNotFoundError:
if input('Messages not counted. Count messages?[y/n] ').lower() == 'y':
count_messages()
if user_input[0] == 'user':
if len(user_input) > 1:
try:
data = json.loads(open('messages.json', 'r', encoding='utf-8').read())
data = pd.DataFrame(data).fillna(0).astype('int')
for key in data.index:
if key.startswith(' '.join(user_input[1:])):
user_statistics(data, key)
break
else:
print('Conversation not found.')
except FileNotFoundError:
if input('Messages not counted. Count messages?[y/n] ').lower() == 'y':
count_messages()
else:
print('Please specify user name.')
if user_input[0] == 'daily':
if len(user_input) > 1 and not user_input[1] == '-h':
try:
data = json.loads(open('messages.json', 'r', encoding='utf-8').read())
if len(user_input) > 1:
for key in data.keys():
if key.startswith(user_input[1]):
if len(user_input) < 3:
daily_conversation(key)
else:
daily_conversation(key, float(user_input[2]))
break
else:
print('Conversation not found.')
else:
print('Please specify conversation.')
except FileNotFoundError:
if input('Messages not counted. Count messages?[y/n] ').lower() == 'y':
count_messages()
elif len(user_input) > 1 and user_input[1] == '-h':
daily_chats(float(user_input[2]))
else:
daily_chats()
if user_input[0] == 'monthly':
if len(user_input) > 1:
try:
data = json.loads(open('messages.json', 'r', encoding='utf-8').read())
if len(user_input) > 1:
for key in data.keys():
if key.startswith(user_input[1]):
monthly_conversation(key)
else:
print('Conversation not found.')
else:
print('Please specify conversation.')
except FileNotFoundError:
if input('Messages not counted. Count messages?[y/n] ').lower() == 'y':
count_messages()
else:
monthly_chats()
if user_input[0] == 'yearly':
if len(user_input) > 1:
try:
data = json.loads(open('messages.json', 'r', encoding='utf-8').read())
if len(user_input) > 1:
for key in data.keys():
if key.startswith(user_input[1]):
yearly_conversation(key)
break
else:
print('Conversation not found.')
else:
print('Please specify conversation.')
except FileNotFoundError:
if input('Messages not counted. Count messages?[y/n] ').lower() == 'y':
count_messages()
else:
yearly_chats()
if user_input[0] == 'hours':
if len(user_input) > 1 and not user_input[1] == '-h':
try:
data = json.loads(open('messages.json', 'r', encoding='utf-8').read())
if len(user_input) > 1:
for key in data.keys():
if key.startswith(user_input[1]):
if len(user_input) < 3:
hours_conversation(key)
else:
hours_conversation(key, float(user_input[2]))
break
else:
print('Conversation not found.')
else:
print('Please specify conversation.')
except FileNotFoundError:
if input('Messages not counted. Count messages?[y/n] ').lower() == 'y':
count_messages()
elif len(user_input) > 1 and user_input[1] == '-h':
hours_chats(float(user_input[2]))
else:
hours_chats()
| [
"pandas.DataFrame",
"matplotlib.pyplot.xlim",
"json.dump",
"logging.error",
"matplotlib.pyplot.show",
"matplotlib.pyplot.bar",
"math.floor",
"urllib.request.urlopen",
"matplotlib.pyplot.rcdefaults",
"pandas.Series",
"pandas.Timedelta",
"collections.Counter",
"pandas.set_option",
"matplotlib.pyplot.savefig"
] | [((6203, 6242), 'pandas.set_option', 'pd.set_option', (['"""display.max_rows"""', 'None'], {}), "('display.max_rows', None)\n", (6216, 6242), True, 'import pandas as pd\n'), ((6425, 6441), 'matplotlib.pyplot.rcdefaults', 'plt.rcdefaults', ([], {}), '()\n', (6439, 6441), True, 'from matplotlib import pyplot as plt\n'), ((6531, 6541), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6539, 6541), True, 'from matplotlib import pyplot as plt\n'), ((6926, 6951), 'pandas.DataFrame', 'pd.DataFrame', (['data_source'], {}), '(data_source)\n', (6938, 6951), True, 'import pandas as pd\n'), ((7097, 7136), 'pandas.set_option', 'pd.set_option', (['"""display.max_rows"""', 'None'], {}), "('display.max_rows', None)\n", (7110, 7136), True, 'import pandas as pd\n'), ((7430, 7455), 'pandas.DataFrame', 'pd.DataFrame', (['data_source'], {}), '(data_source)\n', (7442, 7455), True, 'import pandas as pd\n'), ((7626, 7665), 'pandas.set_option', 'pd.set_option', (['"""display.max_rows"""', 'None'], {}), "('display.max_rows', None)\n", (7639, 7665), True, 'import pandas as pd\n'), ((8736, 8775), 'pandas.set_option', 'pd.set_option', (['"""display.max_rows"""', 'None'], {}), "('display.max_rows', None)\n", (8749, 8775), True, 'import pandas as pd\n'), ((10249, 10282), 'matplotlib.pyplot.bar', 'plt.bar', (['messages.index', 'messages'], {}), '(messages.index, messages)\n', (10256, 10282), True, 'from matplotlib import pyplot as plt\n'), ((10287, 10314), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""messages.pdf"""'], {}), "('messages.pdf')\n", (10298, 10314), True, 'from matplotlib import pyplot as plt\n'), ((10319, 10329), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10327, 10329), True, 'from matplotlib import pyplot as plt\n'), ((11879, 11900), 'collections.Counter', 'collections.Counter', ([], {}), '()\n', (11898, 11900), False, 'import collections\n'), ((12538, 12571), 'pandas.DataFrame', 'pd.DataFrame', (['messages'], {'index': '[0]'}), '(messages, index=[0])\n', (12550, 12571), True, 'import pandas as pd\n'), ((12615, 12658), 'matplotlib.pyplot.bar', 'plt.bar', (['messages.columns', 'messages.iloc[0]'], {}), '(messages.columns, messages.iloc[0])\n', (12622, 12658), True, 'from matplotlib import pyplot as plt\n'), ((12936, 12952), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-1)', '(24)'], {}), '(-1, 24)\n', (12944, 12952), True, 'from matplotlib import pyplot as plt\n'), ((12957, 12984), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""messages.pdf"""'], {}), "('messages.pdf')\n", (12968, 12984), True, 'from matplotlib import pyplot as plt\n'), ((12989, 12999), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12997, 12999), True, 'from matplotlib import pyplot as plt\n'), ((14458, 14479), 'collections.Counter', 'collections.Counter', ([], {}), '()\n', (14477, 14479), False, 'import collections\n'), ((15316, 15337), 'collections.Counter', 'collections.Counter', ([], {}), '()\n', (15335, 15337), False, 'import collections\n'), ((16697, 16718), 'collections.Counter', 'collections.Counter', ([], {}), '()\n', (16716, 16718), False, 'import collections\n'), ((16965, 16998), 'pandas.DataFrame', 'pd.DataFrame', (['messages'], {'index': '[0]'}), '(messages, index=[0])\n', (16977, 16998), True, 'import pandas as pd\n'), ((17042, 17085), 'matplotlib.pyplot.bar', 'plt.bar', (['messages.columns', 'messages.iloc[0]'], {}), '(messages.columns, messages.iloc[0])\n', (17049, 17085), True, 'from matplotlib import pyplot as plt\n'), ((17090, 17117), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""messages.pdf"""'], {}), "('messages.pdf')\n", (17101, 17117), True, 'from matplotlib import pyplot as plt\n'), ((17122, 17132), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (17130, 17132), True, 'from matplotlib import pyplot as plt\n'), ((3563, 3607), 'json.dump', 'json.dump', (['total', 'output'], {'ensure_ascii': '(False)'}), '(total, output, ensure_ascii=False)\n', (3572, 3607), False, 'import json\n'), ((4664, 4708), 'json.dump', 'json.dump', (['total', 'output'], {'ensure_ascii': '(False)'}), '(total, output, ensure_ascii=False)\n', (4673, 4708), False, 'import json\n'), ((9373, 9394), 'collections.Counter', 'collections.Counter', ([], {}), '()\n', (9392, 9394), False, 'import collections\n'), ((886, 929), 'logging.error', 'logging.error', (['"""File not found, try again."""'], {}), "('File not found, try again.')\n", (899, 929), False, 'import logging\n'), ((2454, 2532), 'logging.error', 'logging.error', (["('Characters not counted.' if chars else 'Messages not counted.')"], {}), "('Characters not counted.' if chars else 'Messages not counted.')\n", (2467, 2532), False, 'import logging\n'), ((2936, 2957), 'collections.Counter', 'collections.Counter', ([], {}), '()\n', (2955, 2957), False, 'import collections\n'), ((4001, 4022), 'collections.Counter', 'collections.Counter', ([], {}), '()\n', (4020, 4022), False, 'import collections\n'), ((10181, 10200), 'pandas.Series', 'pd.Series', (['messages'], {}), '(messages)\n', (10190, 10200), True, 'import pandas as pd\n'), ((2013, 2053), 'logging.error', 'logging.error', (['"""Conversation not found."""'], {}), "('Conversation not found.')\n", (2026, 2053), False, 'import logging\n'), ((2297, 2337), 'logging.error', 'logging.error', (['"""Conversation not found."""'], {}), "('Conversation not found.')\n", (2310, 2337), False, 'import logging\n'), ((4469, 4490), 'collections.Counter', 'collections.Counter', ([], {}), '()\n', (4488, 4490), False, 'import collections\n'), ((6149, 6174), 'pandas.DataFrame', 'pd.DataFrame', (['data_source'], {}), '(data_source)\n', (6161, 6174), True, 'import pandas as pd\n'), ((830, 847), 'urllib.request.urlopen', 'urlopen', (['filename'], {}), '(filename)\n', (837, 847), False, 'from urllib.request import urlopen\n'), ((9858, 9884), 'pandas.Timedelta', 'pd.Timedelta', ([], {'hours': '(-delta)'}), '(hours=-delta)\n', (9870, 9884), True, 'import pandas as pd\n'), ((1799, 1817), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (1811, 1817), True, 'import pandas as pd\n'), ((12854, 12871), 'math.floor', 'math.floor', (['delta'], {}), '(delta)\n', (12864, 12871), False, 'import math\n'), ((17446, 17463), 'urllib.request.urlopen', 'urlopen', (['filename'], {}), '(filename)\n', (17453, 17463), False, 'from urllib.request import urlopen\n'), ((12796, 12813), 'math.floor', 'math.floor', (['delta'], {}), '(delta)\n', (12806, 12813), False, 'import math\n'), ((12880, 12897), 'math.floor', 'math.floor', (['delta'], {}), '(delta)\n', (12890, 12897), False, 'import math\n'), ((21288, 21306), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (21300, 21306), True, 'import pandas as pd\n')] |
import xml.etree.ElementTree as et
from objects.node import Node
from objects.way import Way
def extract_road(item, roads):
way_id = int(item.attrib['id'])
way = Way(way_id)
is_highway = False
for child in item:
if child.tag == "nd":
way.add_node(int(child.attrib['ref']))
elif child.tag == "tag":
key = child.attrib['k']
val = child.attrib['v']
if key == "name" or (key == "ref" and way.name is None):
way.name = val
elif key == "oneway":
way.is_one_way = val == "yes"
elif key == "highway":
is_highway = True
if way.name is not None and is_highway:
roads.append(way)
def extract_node(item, nodes):
node_id = int(item.attrib['id'])
node_lat = float(item.attrib['lat'])
node_lon = float(item.attrib['lon'])
node = Node(node_id, node_lat, node_lon)
for child in item:
key = child.attrib['k']
val = child.attrib['v']
if child.tag == "tag":
node.add_tag(key, val)
nodes[node_id] = node
def parse_osm_file(filename):
tree = et.parse(filename)
roads = []
nodes = dict()
for item in tree.iter():
if item.tag == "node":
extract_node(item, nodes)
elif item.tag == "way":
extract_road(item, roads)
return roads, nodes
if __name__ == "__main__":
roads, nodes = parse_osm_file("../osm_birmingham.xml")
print(str(len(nodes)) + " nodes in dataset")
print(str(len(roads)) + " roads in dataset")
pass
| [
"objects.way.Way",
"xml.etree.ElementTree.parse",
"objects.node.Node"
] | [((173, 184), 'objects.way.Way', 'Way', (['way_id'], {}), '(way_id)\n', (176, 184), False, 'from objects.way import Way\n'), ((905, 938), 'objects.node.Node', 'Node', (['node_id', 'node_lat', 'node_lon'], {}), '(node_id, node_lat, node_lon)\n', (909, 938), False, 'from objects.node import Node\n'), ((1162, 1180), 'xml.etree.ElementTree.parse', 'et.parse', (['filename'], {}), '(filename)\n', (1170, 1180), True, 'import xml.etree.ElementTree as et\n')] |
import json
import os
from jinja2 import Template
from chronologer.config import config
def write_html():
html_file = os.path.join(os.path.dirname(__file__), "templates", "index.html")
with open(html_file) as fp:
html_template = Template(fp.read())
if not config.dry_run:
boxplot_spec = json.dumps(_get_boxplot_spec(), indent=2)
with open(config.html_output_file, "w") as fp:
fp.write(html_template.render(boxplot_spec=boxplot_spec))
def _get_boxplot_spec():
with open(config.combined_benchmark_file) as fp:
values = json.load(fp)
return {
"$schema": "https://vega.github.io/schema/vega-lite/v3.json",
"data": {"values": values},
"mark": {"type": "boxplot", "extent": "min-max", "size": 5},
"width": 1400,
"height": 500,
"encoding": {
"y": {"field": "time", "type": "quantitative", "axis": {"title": "Time"}},
"x": {
"field": "commit",
"type": "ordinal",
"axis": {"title": "Commit", "labels": False, "ticks": False},
},
"tooltip": {"field": "message", "type": "ordinal", "aggregate": "min"},
},
}
| [
"os.path.dirname",
"json.load"
] | [((139, 164), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (154, 164), False, 'import os\n'), ((583, 596), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (592, 596), False, 'import json\n')] |
from core import *
import core
def Initialize():
"""
find libcom-api and initialize
"""
import os
import sys
dirname = os.path.dirname(__file__)
ext = '.so'
if sys.platform == 'darwin':
ext = '.dylib'
elif sys.platform.startswith("win"):
ext = '.dll'
AX_AAF_COMAPI = os.path.join(dirname,'libcom-api' + ext)
os.environ['AX_AAF_COMAPI'] = os.environ.get('AX_AAF_COMAPI', AX_AAF_COMAPI)
return core.AxInit()
_AxInit = Initialize()
from util import __AxWrap
__AxWrap(globals())
from open import open
from util import Ax | [
"sys.platform.startswith",
"os.path.dirname",
"os.environ.get",
"core.AxInit",
"os.path.join"
] | [((145, 170), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (160, 170), False, 'import os\n'), ((339, 380), 'os.path.join', 'os.path.join', (['dirname', "('libcom-api' + ext)"], {}), "(dirname, 'libcom-api' + ext)\n", (351, 380), False, 'import os\n'), ((414, 460), 'os.environ.get', 'os.environ.get', (['"""AX_AAF_COMAPI"""', 'AX_AAF_COMAPI'], {}), "('AX_AAF_COMAPI', AX_AAF_COMAPI)\n", (428, 460), False, 'import os\n'), ((477, 490), 'core.AxInit', 'core.AxInit', ([], {}), '()\n', (488, 490), False, 'import core\n'), ((257, 287), 'sys.platform.startswith', 'sys.platform.startswith', (['"""win"""'], {}), "('win')\n", (280, 287), False, 'import sys\n')] |
import yaml
import os
fileNamePath = os.path.split(os.path.realpath(__file__))[0]
dir = os.path.join(fileNamePath,'../conf')
def get(file_name,*keys,file_path=dir):
yamlPath = os.path.join(file_path, file_name)
file = open(yamlPath, 'r', encoding='utf-8')
config = yaml.load(file)
for key in keys:
config = config[key]
return config
if __name__ == '__main__':
# wait_time = yaml_utils.get("constant.yaml", "wait_elements_time")
# driver = get("host","url_regerister")
# driver2 = get_url("constant.yaml","host")
driver2 = get("constant.yaml","test1","test2","test33")
print(driver2)
# a = (1,2)
# print(type(a)) | [
"yaml.load",
"os.path.join",
"os.path.realpath"
] | [((89, 126), 'os.path.join', 'os.path.join', (['fileNamePath', '"""../conf"""'], {}), "(fileNamePath, '../conf')\n", (101, 126), False, 'import os\n'), ((182, 216), 'os.path.join', 'os.path.join', (['file_path', 'file_name'], {}), '(file_path, file_name)\n', (194, 216), False, 'import os\n'), ((279, 294), 'yaml.load', 'yaml.load', (['file'], {}), '(file)\n', (288, 294), False, 'import yaml\n'), ((52, 78), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (68, 78), False, 'import os\n')] |
__copyright__ = "Copyright (C) 2019 <NAME>"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import numpy as np
import pystella as ps
import pytest
from pyopencl.tools import ( # noqa
pytest_generate_tests_for_pyopencl as pytest_generate_tests)
@pytest.mark.parametrize("dtype", [np.float64])
@pytest.mark.parametrize("Stepper", [ps.RungeKutta4, ps.LowStorageRK54])
def test_expansion(ctx_factory, proc_shape, dtype, Stepper, timing=False):
if proc_shape != (1, 1, 1):
pytest.skip("test expansion only on one rank")
def sol(w, t):
x = (1 + 3*w)
return (x*(t/np.sqrt(3) + 2/x))**(2/x)/2**(2/x)
from pystella.step import LowStorageRKStepper
is_low_storage = LowStorageRKStepper in Stepper.__bases__
for w in [0, 1/3, 1/2, 1, -1/4]:
def energy(a):
return a**(-3-3*w)
def pressure(a):
return w * energy(a)
t = 0
dt = .005
expand = ps.Expansion(energy(1.), Stepper, mpl=np.sqrt(8.*np.pi))
while t <= 10. - dt:
for s in range(expand.stepper.num_stages):
slc = (0) if is_low_storage else (0 if s == 0 else 1)
expand.step(s, energy(expand.a[slc]), pressure(expand.a[slc]), dt)
t += dt
slc = () if is_low_storage else (0)
order = expand.stepper.expected_order
rtol = dt**order
print(order,
w,
expand.a[slc]/sol(w, t) - 1,
expand.constraint(energy(expand.a[slc])))
assert np.allclose(expand.a[slc], sol(w, t), rtol=rtol, atol=0), \
f"FLRW solution inaccurate for {w=}"
assert expand.constraint(energy(expand.a[slc])) < rtol, \
f"FLRW solution disobeying constraint for {w=}"
if __name__ == "__main__":
from common import parser
args = parser.parse_args()
from pystella.step import all_steppers
for stepper in all_steppers[-5:]:
test_expansion(
None, proc_shape=args.proc_shape, dtype=args.dtype, timing=args.timing,
Stepper=stepper,
)
| [
"pytest.mark.parametrize",
"numpy.sqrt",
"pytest.skip",
"common.parser.parse_args"
] | [((1284, 1330), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', '[np.float64]'], {}), "('dtype', [np.float64])\n", (1307, 1330), False, 'import pytest\n'), ((1333, 1404), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""Stepper"""', '[ps.RungeKutta4, ps.LowStorageRK54]'], {}), "('Stepper', [ps.RungeKutta4, ps.LowStorageRK54])\n", (1356, 1404), False, 'import pytest\n'), ((2919, 2938), 'common.parser.parse_args', 'parser.parse_args', ([], {}), '()\n', (2936, 2938), False, 'from common import parser\n'), ((1523, 1569), 'pytest.skip', 'pytest.skip', (['"""test expansion only on one rank"""'], {}), "('test expansion only on one rank')\n", (1534, 1569), False, 'import pytest\n'), ((2038, 2058), 'numpy.sqrt', 'np.sqrt', (['(8.0 * np.pi)'], {}), '(8.0 * np.pi)\n', (2045, 2058), True, 'import numpy as np\n'), ((1637, 1647), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (1644, 1647), True, 'import numpy as np\n')] |
import pytest
from mergify_engine import subscription
def test_init():
subscription.Subscription(
123, True, "friend", {}, frozenset({subscription.Features.PRIVATE_REPOSITORY})
)
def test_dict():
owner_id = 1234
sub = subscription.Subscription(
owner_id,
True,
"friend",
{},
frozenset({subscription.Features.PRIVATE_REPOSITORY}),
)
assert sub.from_dict(owner_id, sub.to_dict()) == sub
@pytest.mark.parametrize(
"features",
(
{},
{subscription.Features.PRIVATE_REPOSITORY},
{
subscription.Features.PRIVATE_REPOSITORY,
subscription.Features.PRIORITY_QUEUES,
},
),
)
@pytest.mark.asyncio
async def test_save_sub(features):
owner_id = 1234
sub = subscription.Subscription(owner_id, True, "friend", {}, frozenset(features))
await sub.save_subscription_to_cache()
rsub = await subscription.Subscription._retrieve_subscription_from_cache(owner_id)
assert rsub == sub
@pytest.mark.asyncio
async def test_unknown_sub():
sub = await subscription.Subscription._retrieve_subscription_from_cache(98732189)
assert sub is None
def test_from_dict_unknown_features():
assert subscription.Subscription.from_dict(
123,
{
"subscription_active": True,
"subscription_reason": "friend",
"tokens": {},
"features": ["unknown feature"],
},
) == subscription.Subscription(
123,
True,
"friend",
{},
frozenset(),
)
def test_active_feature():
sub = subscription.Subscription(
123,
True,
"friend",
{},
frozenset(),
)
assert sub.has_feature(subscription.Features.PRIORITY_QUEUES) is False
sub = subscription.Subscription(
123,
False,
"friend",
{},
frozenset([subscription.Features.PRIORITY_QUEUES]),
)
assert sub.has_feature(subscription.Features.PRIORITY_QUEUES) is False
sub = subscription.Subscription(
123,
True,
"friend",
{},
frozenset([subscription.Features.PRIORITY_QUEUES]),
)
assert sub.has_feature(subscription.Features.PRIORITY_QUEUES) is True
| [
"pytest.mark.parametrize",
"mergify_engine.subscription.Subscription._retrieve_subscription_from_cache",
"mergify_engine.subscription.Subscription.from_dict"
] | [((466, 643), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""features"""', '({}, {subscription.Features.PRIVATE_REPOSITORY}, {subscription.Features.\n PRIVATE_REPOSITORY, subscription.Features.PRIORITY_QUEUES})'], {}), "('features', ({}, {subscription.Features.\n PRIVATE_REPOSITORY}, {subscription.Features.PRIVATE_REPOSITORY,\n subscription.Features.PRIORITY_QUEUES}))\n", (489, 643), False, 'import pytest\n'), ((936, 1005), 'mergify_engine.subscription.Subscription._retrieve_subscription_from_cache', 'subscription.Subscription._retrieve_subscription_from_cache', (['owner_id'], {}), '(owner_id)\n', (995, 1005), False, 'from mergify_engine import subscription\n'), ((1098, 1167), 'mergify_engine.subscription.Subscription._retrieve_subscription_from_cache', 'subscription.Subscription._retrieve_subscription_from_cache', (['(98732189)'], {}), '(98732189)\n', (1157, 1167), False, 'from mergify_engine import subscription\n'), ((1243, 1403), 'mergify_engine.subscription.Subscription.from_dict', 'subscription.Subscription.from_dict', (['(123)', "{'subscription_active': True, 'subscription_reason': 'friend', 'tokens': {},\n 'features': ['unknown feature']}"], {}), "(123, {'subscription_active': True,\n 'subscription_reason': 'friend', 'tokens': {}, 'features': [\n 'unknown feature']})\n", (1278, 1403), False, 'from mergify_engine import subscription\n')] |
'''
Setup file for Operator and Hamiltonain Generators.
'''
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config=Configuration('hgen',parent_package,top_path)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(configuration=configuration)
| [
"numpy.distutils.core.setup",
"numpy.distutils.misc_util.Configuration"
] | [((179, 226), 'numpy.distutils.misc_util.Configuration', 'Configuration', (['"""hgen"""', 'parent_package', 'top_path'], {}), "('hgen', parent_package, top_path)\n", (192, 226), False, 'from numpy.distutils.misc_util import Configuration\n'), ((318, 352), 'numpy.distutils.core.setup', 'setup', ([], {'configuration': 'configuration'}), '(configuration=configuration)\n', (323, 352), False, 'from numpy.distutils.core import setup\n')] |
# Generated by Django 3.2.9 on 2022-01-10 08:19
import cloudinary.models
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('hoodapp', '0013_auto_20220110_1102'),
]
operations = [
migrations.AlterModelOptions(
name='post',
options={'ordering': ['-pk']},
),
migrations.RemoveField(
model_name='post',
name='image',
),
migrations.AddField(
model_name='business',
name='image',
field=cloudinary.models.CloudinaryField(blank=True, max_length=255, null=True, verbose_name='image'),
),
]
| [
"django.db.migrations.RemoveField",
"django.db.migrations.AlterModelOptions"
] | [((252, 324), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""post"""', 'options': "{'ordering': ['-pk']}"}), "(name='post', options={'ordering': ['-pk']})\n", (280, 324), False, 'from django.db import migrations\n'), ((369, 424), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""post"""', 'name': '"""image"""'}), "(model_name='post', name='image')\n", (391, 424), False, 'from django.db import migrations\n')] |
from abc import ABCMeta, abstractmethod
from typing import Optional
import json
def error_catcher(method):
def wrapper(*args, **kwargs):
try:
return method(*args, **kwargs)
except (AttributeError, ValueError):
return "File error: указан неверный тип файла."
return wrapper
class AbstractHandler(metaclass=ABCMeta):
"""The Interface for handling requests."""
@abstractmethod
def set_successor(self, successor):
"""Set the next handler in the chain"""
pass
@abstractmethod
def handle(self, file) -> Optional[str]:
"""Handle the event"""
pass
class JSON(AbstractHandler):
def __init__(self):
self._successor = None
self._temp: list = list()
def set_successor(self, successor):
self._successor = successor
return successor
@error_catcher
def handle(self, FILE):
"""Handle the *.json file event"""
file_name, file_ext = str(FILE).split(".")
if file_ext == self.__class__.__name__.lower():
with open(FILE, "r") as f:
self.deserialization(json.load(f))
return self.getter()
else:
return self._successor.handle(FILE)
def deserialization(self, data):
length = len(data['x'])
for i in range(length):
x_temp = list(map(float, data['x'][i]))
y_temp = list(map(float, data['y'][i]))
temp = [x_temp, y_temp]
self._temp.append(temp)
def __repr__(self):
return f"{self.__class__.__name__}"
def getter(self):
return self._temp
class TXT(AbstractHandler):
def __init__(self):
self._successor = None
self._temp: list = list()
def set_successor(self, successor):
self._successor = successor
return successor
@error_catcher
def handle(self, FILE):
"""Handle the *.txt file event"""
file_name, file_ext = str(FILE).split(".")
if file_ext == self.__class__.__name__.lower():
with open(FILE, "r") as f:
for line in f.read().split('\n'):
reformat_line = line[1:-1].split('];[')
a = [list(map(float, elem.split(','))) for elem in reformat_line]
self._temp.append(a)
return self.getter()
else:
return self._successor.handle(FILE)
def __repr__(self):
return f"{self.__class__.__name__}"
def getter(self):
return self._temp
class CSV(AbstractHandler):
def __init__(self):
self._successor = None
self._temp: list = list()
def set_successor(self, successor):
self._successor = successor
return successor
@error_catcher
def handle(self, FILE):
"""Handle the *.csv file event"""
file_name, file_ext = str(FILE).split(".")
if file_ext == self.__class__.__name__.lower():
with open(FILE, "r") as f:
for line in f.read().split(',\n'):
reformat_line = line[1:-1].split('","')
a = [list(map(float, elem.split(','))) for elem in reformat_line]
self._temp.append(a)
return self.getter()
else:
return self._successor.handle(FILE)
def __repr__(self):
return f"{self.__class__.__name__}"
def getter(self):
return self._temp
class FilesChain:
def __init__(self):
self.chain1 = JSON()
self.chain2 = TXT()
self.chain3 = CSV()
# set the chain of responsibility
# The Client may compose chains once or
# the hadler can set them dynamically at
# handle time
self.chain1.set_successor(self.chain2).set_successor(self.chain3)
def client_code(self):
FILE = str(input("Input file name: "))
return self.chain1.handle(FILE)
| [
"json.load"
] | [((1144, 1156), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1153, 1156), False, 'import json\n')] |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: update_comment.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from topboard_sdk.model.topboard import comment_pb2 as topboard__sdk_dot_model_dot_topboard_dot_comment__pb2
from topboard_sdk.model.topboard import issue_basic_pb2 as topboard__sdk_dot_model_dot_topboard_dot_issue__basic__pb2
from topboard_sdk.model.cmdb import user_pb2 as topboard__sdk_dot_model_dot_cmdb_dot_user__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='update_comment.proto',
package='topboard',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x14update_comment.proto\x12\x08topboard\x1a)topboard_sdk/model/topboard/comment.proto\x1a-topboard_sdk/model/topboard/issue_basic.proto\x1a\"topboard_sdk/model/cmdb/user.proto\"M\n\x14UpdateCommentRequest\x12\x11\n\tcommentID\x18\x01 \x01(\t\x12\"\n\x07\x63omment\x18\x02 \x01(\x0b\x32\x11.topboard.Comment\"q\n\x1cUpdateCommentResponseWrapper\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x13\n\x0b\x63odeExplain\x18\x02 \x01(\t\x12\r\n\x05\x65rror\x18\x03 \x01(\t\x12\x1f\n\x04\x64\x61ta\x18\x04 \x01(\x0b\x32\x11.topboard.Commentb\x06proto3')
,
dependencies=[topboard__sdk_dot_model_dot_topboard_dot_comment__pb2.DESCRIPTOR,topboard__sdk_dot_model_dot_topboard_dot_issue__basic__pb2.DESCRIPTOR,topboard__sdk_dot_model_dot_cmdb_dot_user__pb2.DESCRIPTOR,])
_UPDATECOMMENTREQUEST = _descriptor.Descriptor(
name='UpdateCommentRequest',
full_name='topboard.UpdateCommentRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='commentID', full_name='topboard.UpdateCommentRequest.commentID', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='comment', full_name='topboard.UpdateCommentRequest.comment', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=160,
serialized_end=237,
)
_UPDATECOMMENTRESPONSEWRAPPER = _descriptor.Descriptor(
name='UpdateCommentResponseWrapper',
full_name='topboard.UpdateCommentResponseWrapper',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='code', full_name='topboard.UpdateCommentResponseWrapper.code', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='codeExplain', full_name='topboard.UpdateCommentResponseWrapper.codeExplain', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='error', full_name='topboard.UpdateCommentResponseWrapper.error', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='topboard.UpdateCommentResponseWrapper.data', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=239,
serialized_end=352,
)
_UPDATECOMMENTREQUEST.fields_by_name['comment'].message_type = topboard__sdk_dot_model_dot_topboard_dot_comment__pb2._COMMENT
_UPDATECOMMENTRESPONSEWRAPPER.fields_by_name['data'].message_type = topboard__sdk_dot_model_dot_topboard_dot_comment__pb2._COMMENT
DESCRIPTOR.message_types_by_name['UpdateCommentRequest'] = _UPDATECOMMENTREQUEST
DESCRIPTOR.message_types_by_name['UpdateCommentResponseWrapper'] = _UPDATECOMMENTRESPONSEWRAPPER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
UpdateCommentRequest = _reflection.GeneratedProtocolMessageType('UpdateCommentRequest', (_message.Message,), {
'DESCRIPTOR' : _UPDATECOMMENTREQUEST,
'__module__' : 'update_comment_pb2'
# @@protoc_insertion_point(class_scope:topboard.UpdateCommentRequest)
})
_sym_db.RegisterMessage(UpdateCommentRequest)
UpdateCommentResponseWrapper = _reflection.GeneratedProtocolMessageType('UpdateCommentResponseWrapper', (_message.Message,), {
'DESCRIPTOR' : _UPDATECOMMENTRESPONSEWRAPPER,
'__module__' : 'update_comment_pb2'
# @@protoc_insertion_point(class_scope:topboard.UpdateCommentResponseWrapper)
})
_sym_db.RegisterMessage(UpdateCommentResponseWrapper)
# @@protoc_insertion_point(module_scope)
| [
"google.protobuf.symbol_database.Default",
"google.protobuf.descriptor.FieldDescriptor",
"google.protobuf.reflection.GeneratedProtocolMessageType"
] | [((468, 494), 'google.protobuf.symbol_database.Default', '_symbol_database.Default', ([], {}), '()\n', (492, 494), True, 'from google.protobuf import symbol_database as _symbol_database\n'), ((5415, 5584), 'google.protobuf.reflection.GeneratedProtocolMessageType', '_reflection.GeneratedProtocolMessageType', (['"""UpdateCommentRequest"""', '(_message.Message,)', "{'DESCRIPTOR': _UPDATECOMMENTREQUEST, '__module__': 'update_comment_pb2'}"], {}), "('UpdateCommentRequest', (_message.\n Message,), {'DESCRIPTOR': _UPDATECOMMENTREQUEST, '__module__':\n 'update_comment_pb2'})\n", (5455, 5584), True, 'from google.protobuf import reflection as _reflection\n'), ((5736, 5921), 'google.protobuf.reflection.GeneratedProtocolMessageType', '_reflection.GeneratedProtocolMessageType', (['"""UpdateCommentResponseWrapper"""', '(_message.Message,)', "{'DESCRIPTOR': _UPDATECOMMENTRESPONSEWRAPPER, '__module__':\n 'update_comment_pb2'}"], {}), "('UpdateCommentResponseWrapper', (\n _message.Message,), {'DESCRIPTOR': _UPDATECOMMENTRESPONSEWRAPPER,\n '__module__': 'update_comment_pb2'})\n", (5776, 5921), True, 'from google.protobuf import reflection as _reflection\n'), ((2339, 2687), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""comment"""', 'full_name': '"""topboard.UpdateCommentRequest.comment"""', 'index': '(1)', 'number': '(2)', 'type': '(11)', 'cpp_type': '(10)', 'label': '(1)', 'has_default_value': '(False)', 'default_value': 'None', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'serialized_options': 'None', 'file': 'DESCRIPTOR'}), "(name='comment', full_name=\n 'topboard.UpdateCommentRequest.comment', index=1, number=2, type=11,\n cpp_type=10, label=1, has_default_value=False, default_value=None,\n message_type=None, enum_type=None, containing_type=None, is_extension=\n False, extension_scope=None, serialized_options=None, file=DESCRIPTOR)\n", (2366, 2687), True, 'from google.protobuf import descriptor as _descriptor\n'), ((3153, 3498), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""code"""', 'full_name': '"""topboard.UpdateCommentResponseWrapper.code"""', 'index': '(0)', 'number': '(1)', 'type': '(5)', 'cpp_type': '(1)', 'label': '(1)', 'has_default_value': '(False)', 'default_value': '(0)', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'serialized_options': 'None', 'file': 'DESCRIPTOR'}), "(name='code', full_name=\n 'topboard.UpdateCommentResponseWrapper.code', index=0, number=1, type=5,\n cpp_type=1, label=1, has_default_value=False, default_value=0,\n message_type=None, enum_type=None, containing_type=None, is_extension=\n False, extension_scope=None, serialized_options=None, file=DESCRIPTOR)\n", (3180, 3498), True, 'from google.protobuf import descriptor as _descriptor\n'), ((4321, 4672), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""data"""', 'full_name': '"""topboard.UpdateCommentResponseWrapper.data"""', 'index': '(3)', 'number': '(4)', 'type': '(11)', 'cpp_type': '(10)', 'label': '(1)', 'has_default_value': '(False)', 'default_value': 'None', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'serialized_options': 'None', 'file': 'DESCRIPTOR'}), "(name='data', full_name=\n 'topboard.UpdateCommentResponseWrapper.data', index=3, number=4, type=\n 11, cpp_type=10, label=1, has_default_value=False, default_value=None,\n message_type=None, enum_type=None, containing_type=None, is_extension=\n False, extension_scope=None, serialized_options=None, file=DESCRIPTOR)\n", (4348, 4672), True, 'from google.protobuf import descriptor as _descriptor\n')] |
from setuptools import setup
import numpy
setup(
name='CIGAN',
version='0.2dev',
packages=['vpa'],
license='MIT License',
include_dirs=[numpy.get_include(),],
) | [
"numpy.get_include"
] | [((157, 176), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (174, 176), False, 'import numpy\n')] |
#!/usr/bin/env python3
import re, argparse, numpy as np, glob, os
#from sklearn.neighbors.kde import KernelDensity
import matplotlib.pyplot as plt
from extractTargetFilesNonDim import epsNuFromRe
from extractTargetFilesNonDim import getAllData
from computeSpectraNonDim import readAllSpectra
colors = ['#1f78b4', '#33a02c', '#e31a1c', '#ff7f00', '#6a3d9a', '#b15928', '#a6cee3', '#b2df8a', '#fb9a99', '#fdbf6f', '#cab2d6', '#ffff99']
colors = ['#e41a1c', '#377eb8', '#4daf4a', '#984ea3', '#ff7f00', '#ffff33', '#a65628', '#f781bf', '#999999']
#colors = ['#a6cee3', '#1f78b4', '#b2df8a', '#33a02c', '#fb9a99', '#e31a1c', '#fdbf6f', '#ff7f00', '#cab2d6', '#6a3d9a', '#ffff99', '#b15928']
#colors = ['#abd9e9', '#74add1', '#4575b4', '#313695', '#006837', '#1a9850', '#66bd63', '#a6d96a', '#d9ef8b', '#fee08b', '#fdae61', '#f46d43', '#d73027', '#a50026', '#8e0152', '#c51b7d', '#de77ae', '#f1b6da']
def findDirectory(runspath, re, token):
retoken = 'RE%03d' % re
alldirs = glob.glob(runspath + '/*')
for dirn in alldirs:
if retoken not in dirn: continue
if token not in dirn: continue
return dirn
assert(False, 're-token combo not found')
def main_integral(runspath, target, REs, tokens, labels):
nBins = 2 * 16//2 - 1
modes = np.arange(1, nBins+1, dtype=np.float64) # assumes box is 2 pi
plt.figure()
#REs = findAllParams(path)
nRes = len(REs)
axes, lines = [], []
for j in range(nRes):
axes += [ plt.subplot(1, nRes, j+1) ]
for j in range(nRes):
RE = REs[j]
# read target file
logSpectra, logEnStdev, _, _ = readAllSpectra(target, [RE])
for i in range(len(tokens)):
eps, nu = epsNuFromRe(RE)
dirn = findDirectory(runspath, RE, tokens[i])
runData = getAllData(dirn, eps, nu, nBins, fSkip=1)
logE = np.log(runData['spectra'])
avgLogSpec = np.mean(logE, axis=0)
assert(avgLogSpec.size == nBins)
LL = (avgLogSpec.ravel() - logSpectra.ravel()) / logEnStdev.ravel()
print(LL.shape)
p = axes[j].plot(LL, modes, label=labels[i], color=colors[i])
#p = axes[j].plot(LL, modes, color=colors[i])
if j == 0: lines += [p]
#stdLogSpec = np.std(logE, axis=0)
#covLogSpec = np.cov(logE, rowvar=False)
#print(covLogSpec.shape)
axes[0].set_ylabel(r'$k$')
for j in range(nRes):
axes[j].set_title(r'$Re_\lambda$ = %d' % REs[j])
#axes[j].set_xscale("log")
axes[j].set_ylim([1, 15])
axes[j].grid()
axes[j].set_xlabel(r'$\frac{\log E(k) - \mu_{\log E(k)}}{\sigma_{\log E(k)}}$')
for j in range(1,nRes): axes[j].set_yticklabels([])
#axes[0].legend(lines, labels, bbox_to_anchor=(-0.1, 2.5), borderaxespad=0)
assert(len(lines) == len(labels))
#axes[0].legend(lines, labels, bbox_to_anchor=(0.5, 0.5))
axes[0].legend(bbox_to_anchor=(0.5, 0.5))
plt.tight_layout()
plt.show()
#axes[0].legend(loc='lower left')
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description = "Compute a target file for RL agent from DNS data.")
parser.add_argument('--target', help="Path to target files directory")
parser.add_argument('--tokens', nargs='+', help="Text token distinguishing each series of runs")
parser.add_argument('--res', nargs='+', type=int, help="Reynolds numbers")
parser.add_argument('--labels', nargs='+', help="Plot labels to assiciate to tokens")
parser.add_argument('--runspath', help="Plot labels to assiciate to tokens")
args = parser.parse_args()
assert(len(args.tokens) == len(args.labels))
main_integral(args.runspath, args.target, args.res, args.tokens, args.labels)
| [
"matplotlib.pyplot.subplot",
"extractTargetFilesNonDim.epsNuFromRe",
"matplotlib.pyplot.show",
"argparse.ArgumentParser",
"numpy.log",
"extractTargetFilesNonDim.getAllData",
"computeSpectraNonDim.readAllSpectra",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.arange",
"glob.glob",
"matplotlib.pyplot.tight_layout"
] | [((982, 1008), 'glob.glob', 'glob.glob', (["(runspath + '/*')"], {}), "(runspath + '/*')\n", (991, 1008), False, 'import re, argparse, numpy as np, glob, os\n'), ((1277, 1318), 'numpy.arange', 'np.arange', (['(1)', '(nBins + 1)'], {'dtype': 'np.float64'}), '(1, nBins + 1, dtype=np.float64)\n', (1286, 1318), True, 'import re, argparse, numpy as np, glob, os\n'), ((1343, 1355), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1353, 1355), True, 'import matplotlib.pyplot as plt\n'), ((2974, 2992), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2990, 2992), True, 'import matplotlib.pyplot as plt\n'), ((2997, 3007), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3005, 3007), True, 'import matplotlib.pyplot as plt\n'), ((3086, 3179), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Compute a target file for RL agent from DNS data."""'}), "(description=\n 'Compute a target file for RL agent from DNS data.')\n", (3109, 3179), False, 'import re, argparse, numpy as np, glob, os\n'), ((1617, 1645), 'computeSpectraNonDim.readAllSpectra', 'readAllSpectra', (['target', '[RE]'], {}), '(target, [RE])\n', (1631, 1645), False, 'from computeSpectraNonDim import readAllSpectra\n'), ((1476, 1503), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', 'nRes', '(j + 1)'], {}), '(1, nRes, j + 1)\n', (1487, 1503), True, 'import matplotlib.pyplot as plt\n'), ((1705, 1720), 'extractTargetFilesNonDim.epsNuFromRe', 'epsNuFromRe', (['RE'], {}), '(RE)\n', (1716, 1720), False, 'from extractTargetFilesNonDim import epsNuFromRe\n'), ((1801, 1842), 'extractTargetFilesNonDim.getAllData', 'getAllData', (['dirn', 'eps', 'nu', 'nBins'], {'fSkip': '(1)'}), '(dirn, eps, nu, nBins, fSkip=1)\n', (1811, 1842), False, 'from extractTargetFilesNonDim import getAllData\n'), ((1862, 1888), 'numpy.log', 'np.log', (["runData['spectra']"], {}), "(runData['spectra'])\n", (1868, 1888), True, 'import re, argparse, numpy as np, glob, os\n'), ((1914, 1935), 'numpy.mean', 'np.mean', (['logE'], {'axis': '(0)'}), '(logE, axis=0)\n', (1921, 1935), True, 'import re, argparse, numpy as np, glob, os\n')] |
#!/usr/bin/env python3
""" An attempt to solve the Last Factorial Digit """
import sys
# This is totally wrong, but given N maxes out at 10, and anything after 5 the last digit is 0,
# this is likely cheaper and faster
result_dict = {1: 1,
2: 2,
3: 6,
4: 4}
dont_care = sys.stdin.readline()
for line in sys.stdin.readlines():
number = int(line.rstrip())
if number >= 5:
print(0)
else:
print(result_dict[number])
| [
"sys.stdin.readline",
"sys.stdin.readlines"
] | [((318, 338), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (336, 338), False, 'import sys\n'), ((352, 373), 'sys.stdin.readlines', 'sys.stdin.readlines', ([], {}), '()\n', (371, 373), False, 'import sys\n')] |
import os, sys
from PyQt5 import QtCore, QtGui
from qtpy.QtWidgets import QApplication
import ctypes
from sys import platform
sys.path.insert(0, os.path.join( os.path.dirname(__file__), "..", ".." ))
from window import ExecutionNodeEditorWindow
if __name__ == '__main__':
app = QApplication(sys.argv)
exe_path = os.path.dirname(os.path.realpath(sys.argv[0]))
assets_dir = os.path.join(exe_path, 'assets')
for (dirpath, dirnames, filenames) in os.walk(os.path.join(assets_dir, 'fonts')):
for f in filenames:
font_id = QtGui.QFontDatabase.addApplicationFont(f)
if QtGui.QFontDatabase.applicationFontFamilies(font_id) == -1:
print("Could not load font")
sys.exit(-1)
# print(QStyleFactory.keys())
app.setStyle('Fusion')
app_icon = QtGui.QIcon()
app_icon.addFile(os.path.join(assets_dir, 'icons/16x16.png'), QtCore.QSize(16,16))
app_icon.addFile(os.path.join(assets_dir, 'icons/24x24.png'), QtCore.QSize(24,24))
app_icon.addFile(os.path.join(assets_dir, 'icons/32x32.png'), QtCore.QSize(32,32))
app_icon.addFile(os.path.join(assets_dir, 'icons/48x48.png'), QtCore.QSize(48,48))
app_icon.addFile(os.path.join(assets_dir, 'icons/64x64.png'), QtCore.QSize(64,64))
app_icon.addFile(os.path.join(assets_dir, 'icons/128x128.png'), QtCore.QSize(128,128))
app_icon.addFile(os.path.join(assets_dir, 'icons/256x256.png'), QtCore.QSize(256,256))
app.setWindowIcon(app_icon)
if platform == "win32":
# Windows...
#This will make sure that the app icon is set in the taskbar on windows
# See https://stackoverflow.com/questions/1551605/how-to-set-applications-taskbar-icon-in-windows-7/1552105#1552105
myappid = u'no-company.node-editor.execution-graph-editor.1.0' # arbitrary string
ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid)
wnd = ExecutionNodeEditorWindow()
wnd.setWindowIcon(app_icon)
wnd.show()
wnd.actNew.trigger()
if len(sys.argv) == 2:
wnd.openFile(sys.argv[1])
sys.exit(app.exec_())
| [
"PyQt5.QtGui.QIcon",
"os.path.join",
"PyQt5.QtGui.QFontDatabase.addApplicationFont",
"PyQt5.QtGui.QFontDatabase.applicationFontFamilies",
"os.path.dirname",
"window.ExecutionNodeEditorWindow",
"os.path.realpath",
"PyQt5.QtCore.QSize",
"ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID",
"qtpy.QtWidgets.QApplication",
"sys.exit"
] | [((287, 309), 'qtpy.QtWidgets.QApplication', 'QApplication', (['sys.argv'], {}), '(sys.argv)\n', (299, 309), False, 'from qtpy.QtWidgets import QApplication\n'), ((390, 422), 'os.path.join', 'os.path.join', (['exe_path', '"""assets"""'], {}), "(exe_path, 'assets')\n", (402, 422), False, 'import os, sys\n'), ((830, 843), 'PyQt5.QtGui.QIcon', 'QtGui.QIcon', ([], {}), '()\n', (841, 843), False, 'from PyQt5 import QtCore, QtGui\n'), ((1929, 1956), 'window.ExecutionNodeEditorWindow', 'ExecutionNodeEditorWindow', ([], {}), '()\n', (1954, 1956), False, 'from window import ExecutionNodeEditorWindow\n'), ((161, 186), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (176, 186), False, 'import os, sys\n'), ((342, 371), 'os.path.realpath', 'os.path.realpath', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (358, 371), False, 'import os, sys\n'), ((475, 508), 'os.path.join', 'os.path.join', (['assets_dir', '"""fonts"""'], {}), "(assets_dir, 'fonts')\n", (487, 508), False, 'import os, sys\n'), ((865, 908), 'os.path.join', 'os.path.join', (['assets_dir', '"""icons/16x16.png"""'], {}), "(assets_dir, 'icons/16x16.png')\n", (877, 908), False, 'import os, sys\n'), ((910, 930), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(16)', '(16)'], {}), '(16, 16)\n', (922, 930), False, 'from PyQt5 import QtCore, QtGui\n'), ((952, 995), 'os.path.join', 'os.path.join', (['assets_dir', '"""icons/24x24.png"""'], {}), "(assets_dir, 'icons/24x24.png')\n", (964, 995), False, 'import os, sys\n'), ((997, 1017), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(24)', '(24)'], {}), '(24, 24)\n', (1009, 1017), False, 'from PyQt5 import QtCore, QtGui\n'), ((1039, 1082), 'os.path.join', 'os.path.join', (['assets_dir', '"""icons/32x32.png"""'], {}), "(assets_dir, 'icons/32x32.png')\n", (1051, 1082), False, 'import os, sys\n'), ((1084, 1104), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(32)', '(32)'], {}), '(32, 32)\n', (1096, 1104), False, 'from PyQt5 import QtCore, QtGui\n'), ((1126, 1169), 'os.path.join', 'os.path.join', (['assets_dir', '"""icons/48x48.png"""'], {}), "(assets_dir, 'icons/48x48.png')\n", (1138, 1169), False, 'import os, sys\n'), ((1171, 1191), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(48)', '(48)'], {}), '(48, 48)\n', (1183, 1191), False, 'from PyQt5 import QtCore, QtGui\n'), ((1213, 1256), 'os.path.join', 'os.path.join', (['assets_dir', '"""icons/64x64.png"""'], {}), "(assets_dir, 'icons/64x64.png')\n", (1225, 1256), False, 'import os, sys\n'), ((1258, 1278), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(64)', '(64)'], {}), '(64, 64)\n', (1270, 1278), False, 'from PyQt5 import QtCore, QtGui\n'), ((1300, 1345), 'os.path.join', 'os.path.join', (['assets_dir', '"""icons/128x128.png"""'], {}), "(assets_dir, 'icons/128x128.png')\n", (1312, 1345), False, 'import os, sys\n'), ((1347, 1369), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(128)', '(128)'], {}), '(128, 128)\n', (1359, 1369), False, 'from PyQt5 import QtCore, QtGui\n'), ((1391, 1436), 'os.path.join', 'os.path.join', (['assets_dir', '"""icons/256x256.png"""'], {}), "(assets_dir, 'icons/256x256.png')\n", (1403, 1436), False, 'import os, sys\n'), ((1438, 1460), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(256)', '(256)'], {}), '(256, 256)\n', (1450, 1460), False, 'from PyQt5 import QtCore, QtGui\n'), ((1847, 1917), 'ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID', 'ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID', (['myappid'], {}), '(myappid)\n', (1908, 1917), False, 'import ctypes\n'), ((561, 602), 'PyQt5.QtGui.QFontDatabase.addApplicationFont', 'QtGui.QFontDatabase.addApplicationFont', (['f'], {}), '(f)\n', (599, 602), False, 'from PyQt5 import QtCore, QtGui\n'), ((618, 670), 'PyQt5.QtGui.QFontDatabase.applicationFontFamilies', 'QtGui.QFontDatabase.applicationFontFamilies', (['font_id'], {}), '(font_id)\n', (661, 670), False, 'from PyQt5 import QtCore, QtGui\n'), ((739, 751), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (747, 751), False, 'import os, sys\n')] |
from environs import Env
env = Env()
env.read_env()
db_host = env.str('DB_HOST', 'localhost')
db_port = env.int('DB_PORT', 27017) | [
"environs.Env"
] | [((32, 37), 'environs.Env', 'Env', ([], {}), '()\n', (35, 37), False, 'from environs import Env\n')] |
import cv2
def image_equalize(imgA, imgB):
new_size = max(imgA.shape, imgB.shape)
new_imgA = cv2.resize(imgA, new_size)
new_imgB = cv2.resize(imgB, new_size)
return new_imgA, new_imgB
| [
"cv2.resize"
] | [((105, 131), 'cv2.resize', 'cv2.resize', (['imgA', 'new_size'], {}), '(imgA, new_size)\n', (115, 131), False, 'import cv2\n'), ((147, 173), 'cv2.resize', 'cv2.resize', (['imgB', 'new_size'], {}), '(imgB, new_size)\n', (157, 173), False, 'import cv2\n')] |
import click
import pandas as pd
@click.command()
@click.option("--input-path", "-i", default = "data/0_raw/", required=True,
help="Path to csv file to be processed.",
)
@click.option("--output-path", "-o", default="data/3_processed/",
help="Path to csv file to store the result.")
def main(input_path, output_path):
""" Runs data processing scripts to read raw data (../0_raw) and convert it into
processed csv file (../3_processed) to be used for further analysis.
"""
print("Preprocessing indian_license_plate.csv")
df = pd.read_csv(input_path+"indian_license_plates.csv", dtype={'image_name':str})
df["image_name"] = df["image_name"] + ".jpg"
df.to_csv(output_path+"processed.csv", index=False)
print("Preprocessed and saved as processed.csv")
if __name__ == '__main__':
main()
| [
"pandas.read_csv",
"click.option",
"click.command"
] | [((35, 50), 'click.command', 'click.command', ([], {}), '()\n', (48, 50), False, 'import click\n'), ((52, 170), 'click.option', 'click.option', (['"""--input-path"""', '"""-i"""'], {'default': '"""data/0_raw/"""', 'required': '(True)', 'help': '"""Path to csv file to be processed."""'}), "('--input-path', '-i', default='data/0_raw/', required=True,\n help='Path to csv file to be processed.')\n", (64, 170), False, 'import click\n'), ((176, 291), 'click.option', 'click.option', (['"""--output-path"""', '"""-o"""'], {'default': '"""data/3_processed/"""', 'help': '"""Path to csv file to store the result."""'}), "('--output-path', '-o', default='data/3_processed/', help=\n 'Path to csv file to store the result.')\n", (188, 291), False, 'import click\n'), ((558, 643), 'pandas.read_csv', 'pd.read_csv', (["(input_path + 'indian_license_plates.csv')"], {'dtype': "{'image_name': str}"}), "(input_path + 'indian_license_plates.csv', dtype={'image_name': str}\n )\n", (569, 643), True, 'import pandas as pd\n')] |
"""Tests for remote.py."""
import logging
from datetime import timedelta
import pytest
from custom_components.hueremote import DOMAIN
from custom_components.hueremote.data_manager import HueSensorData
from custom_components.hueremote.hue_api_response import (
parse_hue_api_response,
parse_rwl,
parse_zgp,
parse_z3_rotary,
)
from custom_components.hueremote.remote import async_setup_platform, HueRemote
from .conftest import (
DEV_ID_REMOTE_1,
entity_test_added_to_hass,
patch_async_track_time_interval,
)
from .api_samples import (
MOCK_RWL,
MOCK_ZGP,
MOCK_Z3_ROTARY,
PARSED_RWL,
PARSED_ZGP,
PARSED_Z3_ROTARY,
)
@pytest.mark.parametrize(
"raw_response, sensor_key, parsed_response, parser_func",
(
(MOCK_ZGP, "ZGP_00:44:23:08", PARSED_ZGP, parse_zgp),
(MOCK_RWL, "RWL_00:17:88:01:10:3e:3a:dc-02", PARSED_RWL, parse_rwl),
(
MOCK_Z3_ROTARY,
"Z3-_ff:ff:00:0f:e7:fd:ba:b7-01-fc00",
PARSED_Z3_ROTARY,
parse_z3_rotary,
),
),
)
def test_parse_remote_raw_data(
raw_response, sensor_key, parsed_response, parser_func, caplog
):
"""Test data parsers for known remotes and check behavior for unknown."""
assert parser_func(raw_response) == parsed_response
unknown_sensor_data = {"modelid": "new_one", "uniqueid": "ff:00:11:22"}
assert parse_hue_api_response(
[raw_response, unknown_sensor_data, raw_response]
) == {sensor_key: parsed_response}
assert len(caplog.messages) == 0
async def test_platform_remote_setup(mock_hass, caplog):
"""Test platform setup for remotes."""
with caplog.at_level(logging.DEBUG):
with patch_async_track_time_interval():
await async_setup_platform(
mock_hass,
{"platform": "hueremote", "scan_interval": timedelta(seconds=3)},
lambda *x: logging.warning("Added remote entity: %s", x[0]),
)
assert DOMAIN in mock_hass.data
data_manager = mock_hass.data[DOMAIN]
assert isinstance(data_manager, HueSensorData)
assert len(data_manager.registered_entities) == 1
assert data_manager._scan_interval == timedelta(seconds=3)
assert len(data_manager.data) == 1
assert DEV_ID_REMOTE_1 in data_manager.data
assert len(data_manager.sensors) == 0
assert len(data_manager.registered_entities) == 1
remote = data_manager.registered_entities[DEV_ID_REMOTE_1]
assert not remote.hass
await entity_test_added_to_hass(data_manager, remote)
# await remote.async_added_to_hass()
assert len(data_manager.sensors) == 1
assert DEV_ID_REMOTE_1 in data_manager.sensors
assert isinstance(remote, HueRemote)
assert remote.hass
assert remote.force_update
assert remote.state == "3_click"
assert remote.icon == "mdi:remote"
assert not remote.should_poll
assert "last_updated" in remote.device_state_attributes
assert remote.unique_id == DEV_ID_REMOTE_1
await remote.async_will_remove_from_hass()
assert len(data_manager.sensors) == 0
assert len(data_manager.registered_entities) == 0
assert not data_manager.available
| [
"logging.warning",
"pytest.mark.parametrize",
"custom_components.hueremote.hue_api_response.parse_hue_api_response",
"datetime.timedelta"
] | [((672, 992), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""raw_response, sensor_key, parsed_response, parser_func"""', "((MOCK_ZGP, 'ZGP_00:44:23:08', PARSED_ZGP, parse_zgp), (MOCK_RWL,\n 'RWL_00:17:88:01:10:3e:3a:dc-02', PARSED_RWL, parse_rwl), (\n MOCK_Z3_ROTARY, 'Z3-_ff:ff:00:0f:e7:fd:ba:b7-01-fc00', PARSED_Z3_ROTARY,\n parse_z3_rotary))"], {}), "(\n 'raw_response, sensor_key, parsed_response, parser_func', ((MOCK_ZGP,\n 'ZGP_00:44:23:08', PARSED_ZGP, parse_zgp), (MOCK_RWL,\n 'RWL_00:17:88:01:10:3e:3a:dc-02', PARSED_RWL, parse_rwl), (\n MOCK_Z3_ROTARY, 'Z3-_ff:ff:00:0f:e7:fd:ba:b7-01-fc00', PARSED_Z3_ROTARY,\n parse_z3_rotary)))\n", (695, 992), False, 'import pytest\n'), ((1395, 1468), 'custom_components.hueremote.hue_api_response.parse_hue_api_response', 'parse_hue_api_response', (['[raw_response, unknown_sensor_data, raw_response]'], {}), '([raw_response, unknown_sensor_data, raw_response])\n', (1417, 1468), False, 'from custom_components.hueremote.hue_api_response import parse_hue_api_response, parse_rwl, parse_zgp, parse_z3_rotary\n'), ((2250, 2270), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(3)'}), '(seconds=3)\n', (2259, 2270), False, 'from datetime import timedelta\n'), ((1870, 1890), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(3)'}), '(seconds=3)\n', (1879, 1890), False, 'from datetime import timedelta\n'), ((1920, 1968), 'logging.warning', 'logging.warning', (['"""Added remote entity: %s"""', 'x[0]'], {}), "('Added remote entity: %s', x[0])\n", (1935, 1968), False, 'import logging\n')] |
# -*- coding: utf-8 -*-
# (c) University of Strathclyde 2021
# Author: <NAME>
#
# Contact: <EMAIL>
#
# <NAME>,
# Strathclyde Institute for Pharmacy and Biomedical Sciences,
# Cathedral Street,
# Glasgow,
# G4 0RE
# Scotland,
# UK
#
# The MIT License
#
# Copyright (c) 2021 University of Strathclyde
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Code to implement the ANIblastall average nucleotide identity method."""
import logging
import os
import platform
import re
import shutil
import subprocess
from pathlib import Path
from . import pyani_config
from . import PyaniException
class PyaniblastallException(PyaniException):
"""ANIblastall-specific exception for pyani."""
def get_version(blast_exe: Path = pyani_config.BLASTALL_DEFAULT) -> str:
r"""Return BLAST blastall version as a string.
:param blast_exe: path to blastall executable
We expect blastall to return a string as, for example
.. code-block:: bash
$ blastall -version
[blastall 2.2.26] ERROR: Number of database sequences to show \
one-line descriptions for (V) [ersion] is bad or out of range [? to ?]
This is concatenated with the OS name.
The following circumstances are explicitly reported as strings
- no executable at passed path
- non-executable file at passed path (this includes cases where the user doesn't have execute permissions on the file)
- no version info returned
- executable cannot be run on this OS
"""
logger = logging.getLogger(__name__)
try:
blastall_path = Path(shutil.which(blast_exe)) # type:ignore
except TypeError:
return f"{blast_exe} is not found in $PATH"
if not blastall_path.is_file(): # no executable
return f"No blastall at {blastall_path}"
# This should catch cases when the file can't be executed by the user
if not os.access(blastall_path, os.X_OK): # file exists but not executable
return f"blastall exists at {blastall_path} but not executable"
if platform.system() == "Darwin":
cmdline = [blast_exe, "-version"]
else:
cmdline = [blast_exe]
try:
result = subprocess.run(
cmdline, # type: ignore
shell=False,
stdout=subprocess.PIPE, # type: ignore
stderr=subprocess.PIPE,
check=False, # blastall doesn't return 0
)
except OSError:
logger.warning("blastall executable will not run", exc_info=True)
return f"blastall exists at {blastall_path} but could not be executed"
version = re.search( # type: ignore
r"(?<=blastall\s)[0-9\.]*", str(result.stderr, "utf-8")
).group()
if 0 == len(version.strip()):
return f"blastall exists at {blastall_path} but could not retrieve version"
return f"{platform.system()}_{version} ({blastall_path})"
| [
"subprocess.run",
"shutil.which",
"platform.system",
"os.access",
"logging.getLogger"
] | [((2508, 2535), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2525, 2535), False, 'import logging\n'), ((2878, 2911), 'os.access', 'os.access', (['blastall_path', 'os.X_OK'], {}), '(blastall_path, os.X_OK)\n', (2887, 2911), False, 'import os\n'), ((3027, 3044), 'platform.system', 'platform.system', ([], {}), '()\n', (3042, 3044), False, 'import platform\n'), ((3167, 3269), 'subprocess.run', 'subprocess.run', (['cmdline'], {'shell': '(False)', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE', 'check': '(False)'}), '(cmdline, shell=False, stdout=subprocess.PIPE, stderr=\n subprocess.PIPE, check=False)\n', (3181, 3269), False, 'import subprocess\n'), ((2575, 2598), 'shutil.which', 'shutil.which', (['blast_exe'], {}), '(blast_exe)\n', (2587, 2598), False, 'import shutil\n'), ((3825, 3842), 'platform.system', 'platform.system', ([], {}), '()\n', (3840, 3842), False, 'import platform\n')] |
from functools import partial
from collections.abc import MutableSequence
from . import base_types
from . import tree
from . import transform
from .state import CncState
from . import point as pt
from . import action
class Assembly(tree.Tree, transform.TransformableMixin):
'''tree of assembly items'''
def __init__(self, name=None, parent=None, state=None):
super().__init__(name=name, parent=parent)
if state is not None:
if not isinstance(state, CncState):
raise TypeError('state must be of type CncState, not {}'.format(type(state)))
self._state = state
@property
def state(self):
return self._state
@state.setter
def state(self, new_state):
self._state = new_state
for child in self.children:
child.state = self.state
def check_type(self, other):
assert isinstance(other, Assembly)
def append(self, arg):
super().append(arg)
arg.state = self.state
def last(self):
return self.children[-1]
def get_gcode(self):
return self.get_actions().get_gcode()
def get_points(self):
return self.get_actions().get_points()
def update_children_preorder(self):
pass
def get_preorder_actions(self):
return ()
def get_postorder_actions(self):
return ()
def update_children_postorder(self):
pass
def get_actions(self):
with self.state.excursion():
al = action.ActionList()
for step in self.depth_first_walk():
if step.is_visit:
if step.is_preorder:
step.visited.update_children_preorder()
al.extend(step.visited.get_preorder_actions())
elif step.is_postorder:
al.extend(step.visited.get_postorder_actions())
step.visited.update_children_postorder()
return al
@property
def pos(self):
return self.state['position']
@pos.setter
def pos(self, arg):
self.state['position'] = arg
def pos_offset(self, x=None, y=None, z=None):
self.pos = self.pos.offset(x, y, z)
@property
def root_transforms(self):
'''get transforms stacked all the way to the root'''
result = transform.TransformList()
for walk_step in self.root_walk():
if walk_step.is_visit and walk_step.is_preorder:
if isinstance(walk_step.visited, Assembly):
# extend left
result[0:0] = walk_step.visited.transforms
return result
class SafeJog(Assembly):
def __init__(self, x=0, y=0, z=0, name=None, parent=None, state=None):
super().__init__(name=name, parent=parent, state=state)
self.dest = pt.PointList(((x, y, z), ))
@property
def point(self):
return pt.PointList(self.root_transforms(self.dest.arr))[0]
@property
def changes(self):
return pt.changes(self.pos, self.point)
def get_preorder_actions(self):
al = action.ActionList()
# print(self.changes)
if self.changes:
jog = partial(action.Jog, state=self.state)
al += jog(x=self.pos.x, y=self.pos.y, z=self.state['z_safe'])
al += jog(x=self.point.x, y=self.point.y, z=self.pos.z)
al += jog(x=self.point.x, y=self.point.y, z=self.point.z)
# print("safejog", self.state['position'])
return al
class SafeZ(Assembly):
def __init__(self, name=None, parent=None, state=None):
super().__init__(name=name, parent=parent, state=state)
def get_preorder_actions(self):
al = action.ActionList()
points = pt.PointList(((0, 0, self.state['z_margin']), ))
point = pt.PointList(self.root_transforms(points.arr))[0]
jog = partial(action.Jog, state=self.state)
al += jog(x=self.pos.x, y=self.pos.y, z=self.state['z_safe'])
return al
| [
"functools.partial"
] | [((3896, 3933), 'functools.partial', 'partial', (['action.Jog'], {'state': 'self.state'}), '(action.Jog, state=self.state)\n', (3903, 3933), False, 'from functools import partial\n'), ((3208, 3245), 'functools.partial', 'partial', (['action.Jog'], {'state': 'self.state'}), '(action.Jog, state=self.state)\n', (3215, 3245), False, 'from functools import partial\n')] |
from django.shortcuts import render
# # from django.shortcuts import get_object_or_404
# from django.http import HttpResponseRedirect
# from django.urls import reverse
import datetime
from django.contrib.auth.decorators import permission_required
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.urls import reverse_lazy
from .models import Book, Author, BookInstance
from django.views import generic
from django.contrib.auth.mixins import PermissionRequiredMixin
def index(request):
"""View function for home page of site."""
# Generate counts of some of the main objects
num_books = Book.objects.all().count()
num_instances = BookInstance.objects.all().count()
# # Available copies of books
num_instances_available = \
BookInstance.objects.filter(status__exact='a').count()
num_authors = Author.objects.count() # The 'all()' is implied by default.
# Number of visits to this view, as counted in the session variable.
# num_visits = request.session.get('num_visits', 0)
# request.session['num_visits'] = num_visits + 1
# Render the HTML template index.html
# with the data in the context variable.
return render(request,'index.html',context={
'num_books': num_books,
'num_instances': num_instances,
'num_instances_available': num_instances_available,
'num_authors': num_authors, }
)
class BookListView(generic.ListView):
"""Generic class-based view for a list of books."""
model = Book
paginate_by = 2
class BookDetailView(generic.DetailView):
"""Generic class-based detail view for a book."""
model = Book
class AuthorListView(generic.ListView):
"""Generic class-based list view for a list of authors."""
model = Author
paginate_by = 2
class AuthorDetailView(generic.DetailView):
"""Generic class-based detail view for an author."""
model = Author
@permission_required('catalog.can_mark_returned')
class LoanedBooksAllListView(PermissionRequiredMixin, generic.ListView):
# Generic class-based view listing all books
# on loan. Only visible to users with can_mark_returned permission."""
model = BookInstance
permission_required = 'catalog.can_mark_returned'
template_name = 'catalog/bookinstance_list_borrowed_all.html'
paginate_by = 2
class AuthorCreate(PermissionRequiredMixin, CreateView):
model = Author
fields = '__all__'
initial = {'date_of_death': '05/01/2018'}
permission_required = 'catalog.can_mark_returned'
class AuthorUpdate(PermissionRequiredMixin, UpdateView):
model = Author
fields = ['first_name', 'last_name', 'date_of_birth', 'date_of_death']
permission_required = 'catalog.can_mark_returned'
class AuthorDelete(PermissionRequiredMixin, DeleteView):
model = Author
success_url = reverse_lazy('authors')
permission_required = 'catalog.can_mark_returned'
# Classes created for the forms challenge
class BookCreate(PermissionRequiredMixin, CreateView):
model = Book
fields = '__all__'
permission_required = 'catalog.can_mark_returned'
class BookUpdate(PermissionRequiredMixin, UpdateView):
model = Book
fields = '__all__'
permission_required = 'catalog.can_mark_returned'
class BookDelete(PermissionRequiredMixin, DeleteView):
model = Book
success_url = reverse_lazy('books')
permission_required = 'catalog.can_mark_returned'
| [
"django.shortcuts.render",
"django.urls.reverse_lazy",
"django.contrib.auth.decorators.permission_required"
] | [((2099, 2147), 'django.contrib.auth.decorators.permission_required', 'permission_required', (['"""catalog.can_mark_returned"""'], {}), "('catalog.can_mark_returned')\n", (2118, 2147), False, 'from django.contrib.auth.decorators import permission_required\n'), ((1209, 1392), 'django.shortcuts.render', 'render', (['request', '"""index.html"""'], {'context': "{'num_books': num_books, 'num_instances': num_instances,\n 'num_instances_available': num_instances_available, 'num_authors':\n num_authors}"}), "(request, 'index.html', context={'num_books': num_books,\n 'num_instances': num_instances, 'num_instances_available':\n num_instances_available, 'num_authors': num_authors})\n", (1215, 1392), False, 'from django.shortcuts import render\n'), ((3014, 3037), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""authors"""'], {}), "('authors')\n", (3026, 3037), False, 'from django.urls import reverse_lazy\n'), ((3528, 3549), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""books"""'], {}), "('books')\n", (3540, 3549), False, 'from django.urls import reverse_lazy\n')] |
"""
# Read report and generate messages to fill missing scenes
#### Utility utilization
The DAG can be parameterized with run time configurations `scenes_limit`, which receives a INT as value.
* The option scenes_limit limit the number of scenes to be read from the report,
therefore limit the number of messages to be sent
#### example conf in json format
{
"scenes_limit":10
}
"""
import gzip
import json
import logging
import traceback
from datetime import datetime
from typing import Optional
from airflow import DAG
from airflow.contrib.hooks.aws_sqs_hook import SQSHook
from airflow.operators.python_operator import PythonOperator
from odc.aws.queue import publish_messages
from infra.connections import CONN_LANDSAT_SYNC
from infra.s3_buckets import LANDSAT_SYNC_BUCKET_NAME
from infra.sqs_queues import LANDSAT_SYNC_USGS_SNS_FILTER_SQS_NAME
from infra.variables import REGION
from landsat_scenes_sync.variables import STATUS_REPORT_FOLDER_NAME
from utility.utility_slackoperator import task_fail_slack_alert, task_success_slack_alert
from utils.aws_utils import S3
REPORTING_PREFIX = "status-report/"
# This process is manually run
SCHEDULE_INTERVAL = None
default_args = {
"owner": "RODRIGO",
"start_date": datetime(2021, 6, 7),
"email": ["<EMAIL>"],
"email_on_failure": True,
"email_on_success": True,
"email_on_retry": False,
"retries": 0,
"version": "0.0.1",
"on_failure_callback": task_fail_slack_alert,
}
def post_messages(message_list) -> None:
"""
Publish messages
:param message_list:(list) list of messages
:return:(None)
"""
count = 0
messages = []
sqs_conn = SQSHook(aws_conn_id=CONN_LANDSAT_SYNC)
sqs_hook = sqs_conn.get_resource_type(
resource_type="sqs", region_name=REGION
)
queue = sqs_hook.get_queue_by_name(QueueName=LANDSAT_SYNC_USGS_SNS_FILTER_SQS_NAME)
logging.info("Sending messages")
for message_dict in message_list:
message = {
"Id": str(count),
"MessageBody": str(json.dumps(message_dict)),
}
messages.append(message)
count += 1
# Send 10 messages per time
if count % 10 == 0:
publish_messages(queue, messages)
messages = []
# Post the last messages if there are any
if len(messages) > 0:
publish_messages(queue, messages)
logging.info(f"{count} messages sent successfully")
def find_latest_report(landsat: str) -> str:
"""
Function to find the latest gap report
:param landsat:(str)satellite name
:return:(str) return the latest report file name
"""
continuation_token = None
list_reports = []
while True:
s3 = S3(conn_id=CONN_LANDSAT_SYNC)
resp = s3.list_objects(
bucket_name=LANDSAT_SYNC_BUCKET_NAME,
region=REGION,
prefix=f"{STATUS_REPORT_FOLDER_NAME}/",
continuation_token=continuation_token,
)
if not resp.get("Contents"):
raise Exception(
f"Report not found at "
f"{LANDSAT_SYNC_BUCKET_NAME}/{STATUS_REPORT_FOLDER_NAME}/"
f" - returned {resp}"
)
list_reports.extend(
[
obj["Key"]
for obj in resp["Contents"]
if landsat in obj["Key"] and "orphaned" not in obj["Key"]
]
)
# The S3 API is paginated, returning up to 1000 keys at a time.
if resp.get("NextContinuationToken"):
continuation_token = resp["NextContinuationToken"]
else:
break
list_reports.sort()
return list_reports[-1] if list_reports else ""
def build_message(missing_scene_paths, update_stac):
"""
"""
message_list = []
for path in missing_scene_paths:
landsat_product_id = str(path.strip("/").split("/")[-1])
if not landsat_product_id:
raise Exception(f'It was not possible to build product ID from path {path}')
message_list.append(
{
"Message": {
"landsat_product_id": landsat_product_id,
"s3_location": str(path),
"update_stac": update_stac
}
}
)
return message_list
def fill_the_gap(landsat: str, scenes_limit: Optional[int] = None) -> None:
"""
Function to retrieve the latest gap report and create messages to the filter queue process.
:param landsat:(str) satellite name
:param scenes_limit:(str) limit of how many scenes will be filled
:return:(None)
"""
try:
logging.info("Looking for latest report")
latest_report = find_latest_report(landsat=landsat)
logging.info(f"Latest report found {latest_report}")
if not latest_report:
logging.error("Report not found")
raise RuntimeError("Report not found!")
else:
logging.info("Reading missing scenes from the report")
s3 = S3(conn_id=CONN_LANDSAT_SYNC)
missing_scene_file_gzip = s3.get_s3_contents_and_attributes(
bucket_name=LANDSAT_SYNC_BUCKET_NAME,
region=REGION,
key=latest_report,
)
# This should just use Pandas. It's already a dependency.
missing_scene_paths = [
scene_path
for scene_path in gzip.decompress(missing_scene_file_gzip).decode("utf-8").split("\n")
if scene_path
]
logging.info(f"Number of scenes found {len(missing_scene_paths)}")
logging.info(f"Example scenes: {missing_scene_paths[0:10]}")
logging.info(f"Limited: {'No limit' if scenes_limit else scenes_limit}")
if scenes_limit:
missing_scene_paths = missing_scene_paths[:int(scenes_limit)]
update_stac = False
if 'update' in latest_report:
logging.info('FORCED UPDATE FLAGGED!')
update_stac = True
messages_to_send = build_message(
missing_scene_paths=missing_scene_paths,
update_stac=update_stac
)
logging.info("Publishing messages")
post_messages(message_list=messages_to_send)
except Exception as error:
logging.error(error)
# print traceback but does not stop execution
traceback.print_exc()
raise error
with DAG(
"landsat_scenes_fill_the_gap",
default_args=default_args,
schedule_interval=SCHEDULE_INTERVAL,
tags=["Landsat_scenes", "fill the gap"],
catchup=False,
) as dag:
PROCESSES = []
satellites = [
"landsat_8",
"landsat_7",
"landsat_5"
]
for sat in satellites:
PROCESSES.append(
PythonOperator(
task_id=f"{sat}_fill_the_gap",
python_callable=fill_the_gap,
op_kwargs=dict(landsat=sat, scenes_limit="{{ dag_run.conf.scenes_limit }}"),
on_success_callback=task_success_slack_alert,
)
)
PROCESSES
| [
"logging.error",
"airflow.contrib.hooks.aws_sqs_hook.SQSHook",
"traceback.print_exc",
"airflow.DAG",
"odc.aws.queue.publish_messages",
"gzip.decompress",
"json.dumps",
"datetime.datetime",
"logging.info",
"utils.aws_utils.S3"
] | [((1248, 1268), 'datetime.datetime', 'datetime', (['(2021)', '(6)', '(7)'], {}), '(2021, 6, 7)\n', (1256, 1268), False, 'from datetime import datetime\n'), ((1674, 1712), 'airflow.contrib.hooks.aws_sqs_hook.SQSHook', 'SQSHook', ([], {'aws_conn_id': 'CONN_LANDSAT_SYNC'}), '(aws_conn_id=CONN_LANDSAT_SYNC)\n', (1681, 1712), False, 'from airflow.contrib.hooks.aws_sqs_hook import SQSHook\n'), ((1903, 1935), 'logging.info', 'logging.info', (['"""Sending messages"""'], {}), "('Sending messages')\n", (1915, 1935), False, 'import logging\n'), ((2402, 2453), 'logging.info', 'logging.info', (['f"""{count} messages sent successfully"""'], {}), "(f'{count} messages sent successfully')\n", (2414, 2453), False, 'import logging\n'), ((6531, 6693), 'airflow.DAG', 'DAG', (['"""landsat_scenes_fill_the_gap"""'], {'default_args': 'default_args', 'schedule_interval': 'SCHEDULE_INTERVAL', 'tags': "['Landsat_scenes', 'fill the gap']", 'catchup': '(False)'}), "('landsat_scenes_fill_the_gap', default_args=default_args,\n schedule_interval=SCHEDULE_INTERVAL, tags=['Landsat_scenes',\n 'fill the gap'], catchup=False)\n", (6534, 6693), False, 'from airflow import DAG\n'), ((2363, 2396), 'odc.aws.queue.publish_messages', 'publish_messages', (['queue', 'messages'], {}), '(queue, messages)\n', (2379, 2396), False, 'from odc.aws.queue import publish_messages\n'), ((2733, 2762), 'utils.aws_utils.S3', 'S3', ([], {'conn_id': 'CONN_LANDSAT_SYNC'}), '(conn_id=CONN_LANDSAT_SYNC)\n', (2735, 2762), False, 'from utils.aws_utils import S3\n'), ((4675, 4716), 'logging.info', 'logging.info', (['"""Looking for latest report"""'], {}), "('Looking for latest report')\n", (4687, 4716), False, 'import logging\n'), ((4785, 4837), 'logging.info', 'logging.info', (['f"""Latest report found {latest_report}"""'], {}), "(f'Latest report found {latest_report}')\n", (4797, 4837), False, 'import logging\n'), ((2222, 2255), 'odc.aws.queue.publish_messages', 'publish_messages', (['queue', 'messages'], {}), '(queue, messages)\n', (2238, 2255), False, 'from odc.aws.queue import publish_messages\n'), ((4881, 4914), 'logging.error', 'logging.error', (['"""Report not found"""'], {}), "('Report not found')\n", (4894, 4914), False, 'import logging\n'), ((4993, 5047), 'logging.info', 'logging.info', (['"""Reading missing scenes from the report"""'], {}), "('Reading missing scenes from the report')\n", (5005, 5047), False, 'import logging\n'), ((5066, 5095), 'utils.aws_utils.S3', 'S3', ([], {'conn_id': 'CONN_LANDSAT_SYNC'}), '(conn_id=CONN_LANDSAT_SYNC)\n', (5068, 5095), False, 'from utils.aws_utils import S3\n'), ((5677, 5737), 'logging.info', 'logging.info', (['f"""Example scenes: {missing_scene_paths[0:10]}"""'], {}), "(f'Example scenes: {missing_scene_paths[0:10]}')\n", (5689, 5737), False, 'import logging\n'), ((5751, 5823), 'logging.info', 'logging.info', (['f"""Limited: {\'No limit\' if scenes_limit else scenes_limit}"""'], {}), '(f"Limited: {\'No limit\' if scenes_limit else scenes_limit}")\n', (5763, 5823), False, 'import logging\n'), ((6267, 6302), 'logging.info', 'logging.info', (['"""Publishing messages"""'], {}), "('Publishing messages')\n", (6279, 6302), False, 'import logging\n'), ((6399, 6419), 'logging.error', 'logging.error', (['error'], {}), '(error)\n', (6412, 6419), False, 'import logging\n'), ((6482, 6503), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (6501, 6503), False, 'import traceback\n'), ((2055, 2079), 'json.dumps', 'json.dumps', (['message_dict'], {}), '(message_dict)\n', (2065, 2079), False, 'import json\n'), ((6022, 6060), 'logging.info', 'logging.info', (['"""FORCED UPDATE FLAGGED!"""'], {}), "('FORCED UPDATE FLAGGED!')\n", (6034, 6060), False, 'import logging\n'), ((5472, 5512), 'gzip.decompress', 'gzip.decompress', (['missing_scene_file_gzip'], {}), '(missing_scene_file_gzip)\n', (5487, 5512), False, 'import gzip\n')] |
"""Increase sql path column length to 128
Revision ID: 799310dca712
Revises: ca514840f404
Create Date: 2020-04-09 11:34:05.456439
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '799310dca712'
down_revision = 'ca514840f404'
branch_labels = None
depends_on = None
def upgrade():
op.alter_column('flask_usage', 'path', type_=sa.String(128), existing_type=sa.String(length=32))
def downgrade():
op.alter_column('flask_usage', 'path', type_=sa.String(32), existing_type=sa.String(length=128))
| [
"sqlalchemy.String"
] | [((389, 403), 'sqlalchemy.String', 'sa.String', (['(128)'], {}), '(128)\n', (398, 403), True, 'import sqlalchemy as sa\n'), ((419, 439), 'sqlalchemy.String', 'sa.String', ([], {'length': '(32)'}), '(length=32)\n', (428, 439), True, 'import sqlalchemy as sa\n'), ((509, 522), 'sqlalchemy.String', 'sa.String', (['(32)'], {}), '(32)\n', (518, 522), True, 'import sqlalchemy as sa\n'), ((538, 559), 'sqlalchemy.String', 'sa.String', ([], {'length': '(128)'}), '(length=128)\n', (547, 559), True, 'import sqlalchemy as sa\n')] |
# -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand, CommandError
from asylum.tests.fixtures.full import generate_all
class Command(BaseCommand):
help = 'Generates full set of test data'
def add_arguments(self, parser):
pass
def handle(self, *args, **options):
generate_all()
| [
"asylum.tests.fixtures.full.generate_all"
] | [((318, 332), 'asylum.tests.fixtures.full.generate_all', 'generate_all', ([], {}), '()\n', (330, 332), False, 'from asylum.tests.fixtures.full import generate_all\n')] |
import numpy as np
import pyvista as pv
from pylie import SE3
class Viewer3D:
"""Visualises the lab in 3D"""
def __init__(self):
"""Sets up the 3D viewer"""
self._plotter = pv.Plotter()
# Add scene origin and plane
scene_plane = pv.Plane(i_size=1000, j_size=1000)
self._plotter.add_mesh(scene_plane, show_edges=True, style='wireframe')
self._add_axis(SE3(), 100)
# Set camera.
self._plotter.camera.position = (100, 1500, -500)
self._plotter.camera.up = (-0.042739, -0.226979, -0.972961)
self._plotter.camera.focal_point = (100, 300, -200)
self._plotter.show(title="3D visualization", interactive_update=True)
def add_body_axes(self, pose_local_body: SE3):
"""Add axes representing the body pose to the 3D world
:param pose_local_body: The pose of the body in the local coordinate system.
"""
self._add_axis(pose_local_body)
def add_camera_axes(self, pose_local_camera: SE3):
"""Add axes representing the camera pose to the 3D world
:param pose_local_camera: The pose of the camera in the local coordinate system.
"""
self._add_axis(pose_local_camera)
def add_camera_frustum(self, camera_model, image):
"""Add a frustum representing the camera model and image to the 3D world"""
self._add_frustum(camera_model, image)
def _add_axis(self, pose: SE3, scale=10.0):
T = pose.to_matrix()
point = pv.Sphere(radius=0.1*scale)
point.transform(T)
self._plotter.add_mesh(point)
x_arrow = pv.Arrow(direction=(1.0, 0.0, 0.0), scale=scale)
x_arrow.transform(T)
self._plotter.add_mesh(x_arrow, color='red')
y_arrow = pv.Arrow(direction=(0.0, 1.0, 0.0), scale=scale)
y_arrow.transform(T)
self._plotter.add_mesh(y_arrow, color='green')
z_arrow = pv.Arrow(direction=(0.0, 0.0, 1.0), scale=scale)
z_arrow.transform(T)
self._plotter.add_mesh(z_arrow, color='blue')
def _add_frustum(self, camera_model, image, scale=20.0):
S = camera_model.pose_world_camera.to_matrix() @ np.diag([scale, scale, scale, 1.0])
img_height, img_width = image.shape[:2]
point_bottom_left = np.squeeze(camera_model.pixel_to_normalised(np.array([img_width-1., img_height-1.])))
point_bottom_right = np.squeeze(camera_model.pixel_to_normalised(np.array([0., img_height-1.])))
point_top_left = np.squeeze(camera_model.pixel_to_normalised(np.array([0., 0.])))
point_top_right = np.squeeze(camera_model.pixel_to_normalised(np.array([img_width-1., 0.])))
point_focal = np.zeros([3])
pyramid = pv.Pyramid([point_bottom_left, point_bottom_right, point_top_left, point_top_right, point_focal])
pyramid.transform(S)
rectangle = pv.Rectangle([point_bottom_left, point_bottom_right, point_top_left, point_top_right])
rectangle.texture_map_to_plane(inplace=True)
rectangle.transform(S)
image_flipped_rgb = image[::-1, :, ::-1].copy()
tex = pv.numpy_to_texture(image_flipped_rgb)
self._plotter.add_mesh(pyramid, show_edges=True, style='wireframe')
self._plotter.add_mesh(rectangle, texture=tex, opacity=0.9)
def update(self, time=500):
self._plotter.update(time)
def show(self):
self._plotter.show()
| [
"numpy.zeros",
"pyvista.Plotter",
"pyvista.Pyramid",
"pyvista.numpy_to_texture",
"pyvista.Plane",
"numpy.array",
"pyvista.Rectangle",
"pyvista.Arrow",
"numpy.diag",
"pylie.SE3",
"pyvista.Sphere"
] | [((200, 212), 'pyvista.Plotter', 'pv.Plotter', ([], {}), '()\n', (210, 212), True, 'import pyvista as pv\n'), ((273, 307), 'pyvista.Plane', 'pv.Plane', ([], {'i_size': '(1000)', 'j_size': '(1000)'}), '(i_size=1000, j_size=1000)\n', (281, 307), True, 'import pyvista as pv\n'), ((1511, 1540), 'pyvista.Sphere', 'pv.Sphere', ([], {'radius': '(0.1 * scale)'}), '(radius=0.1 * scale)\n', (1520, 1540), True, 'import pyvista as pv\n'), ((1623, 1671), 'pyvista.Arrow', 'pv.Arrow', ([], {'direction': '(1.0, 0.0, 0.0)', 'scale': 'scale'}), '(direction=(1.0, 0.0, 0.0), scale=scale)\n', (1631, 1671), True, 'import pyvista as pv\n'), ((1773, 1821), 'pyvista.Arrow', 'pv.Arrow', ([], {'direction': '(0.0, 1.0, 0.0)', 'scale': 'scale'}), '(direction=(0.0, 1.0, 0.0), scale=scale)\n', (1781, 1821), True, 'import pyvista as pv\n'), ((1925, 1973), 'pyvista.Arrow', 'pv.Arrow', ([], {'direction': '(0.0, 0.0, 1.0)', 'scale': 'scale'}), '(direction=(0.0, 0.0, 1.0), scale=scale)\n', (1933, 1973), True, 'import pyvista as pv\n'), ((2695, 2708), 'numpy.zeros', 'np.zeros', (['[3]'], {}), '([3])\n', (2703, 2708), True, 'import numpy as np\n'), ((2728, 2829), 'pyvista.Pyramid', 'pv.Pyramid', (['[point_bottom_left, point_bottom_right, point_top_left, point_top_right,\n point_focal]'], {}), '([point_bottom_left, point_bottom_right, point_top_left,\n point_top_right, point_focal])\n', (2738, 2829), True, 'import pyvista as pv\n'), ((2876, 2966), 'pyvista.Rectangle', 'pv.Rectangle', (['[point_bottom_left, point_bottom_right, point_top_left, point_top_right]'], {}), '([point_bottom_left, point_bottom_right, point_top_left,\n point_top_right])\n', (2888, 2966), True, 'import pyvista as pv\n'), ((3118, 3156), 'pyvista.numpy_to_texture', 'pv.numpy_to_texture', (['image_flipped_rgb'], {}), '(image_flipped_rgb)\n', (3137, 3156), True, 'import pyvista as pv\n'), ((411, 416), 'pylie.SE3', 'SE3', ([], {}), '()\n', (414, 416), False, 'from pylie import SE3\n'), ((2176, 2211), 'numpy.diag', 'np.diag', (['[scale, scale, scale, 1.0]'], {}), '([scale, scale, scale, 1.0])\n', (2183, 2211), True, 'import numpy as np\n'), ((2334, 2379), 'numpy.array', 'np.array', (['[img_width - 1.0, img_height - 1.0]'], {}), '([img_width - 1.0, img_height - 1.0])\n', (2342, 2379), True, 'import numpy as np\n'), ((2449, 2482), 'numpy.array', 'np.array', (['[0.0, img_height - 1.0]'], {}), '([0.0, img_height - 1.0])\n', (2457, 2482), True, 'import numpy as np\n'), ((2550, 2570), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (2558, 2570), True, 'import numpy as np\n'), ((2641, 2673), 'numpy.array', 'np.array', (['[img_width - 1.0, 0.0]'], {}), '([img_width - 1.0, 0.0])\n', (2649, 2673), True, 'import numpy as np\n')] |
import asyncio
from unittest.mock import MagicMock
class MockException(Exception):
pass
# AsyncMock is new in Python 3.8
class AsyncMock(MagicMock):
async def __call__(self, *args, **kwargs): # pylint: disable = invalid-overridden-method, useless-super-delegation
return super().__call__(*args, **kwargs)
class CancellableAsyncMock(AsyncMock): # pylint: disable = too-many-ancestors
async def __call__(self, *args, **kwargs):
await asyncio.sleep(1)
return await super().__call__(*args, **kwargs)
| [
"asyncio.sleep"
] | [((445, 461), 'asyncio.sleep', 'asyncio.sleep', (['(1)'], {}), '(1)\n', (458, 461), False, 'import asyncio\n')] |
import sys
import os
import logging
sys.path.append(os.environ["PWD"])
from pyga import *
population_size = 10
elite_count = 2
crossover_points = 2
crossover_mutate_probability = 0.2
max_weight = 15
city_names = ['a', 'b', 'c', 'd']
distances = [
# a b c d
[ 0, 130, 180, 300], # a
[130, 0, 320, 350], # b
[180, 320, 0, 360], # c
[300, 350, 360, 0] # d
]
class SalesmanFitnessEvaluator(FitnessEvaluator):
def __init__(self, distances):
super().__init__()
self.distances = distances
def get_fitness(self, candidate, population):
total_distance = 0
cities_order = candidate.data
for i, city in enumerate(cities_order):
next_city = cities_order[i+1] if i+1 < len(cities_order) else cities_order[0]
total_distance += self.distances[city][next_city]
return Fitness(-total_distance, is_natural=False)
def print_results(result):
print('Visit cities in this order:')
cities_order = result.data
for i, city in enumerate(cities_order):
next_city = cities_order[i + 1] if i + 1 < len(cities_order) else cities_order[0]
print('- ', city_names[city], distances[city][next_city])
print('Total distance: ', abs(result.fitness))
logging.basicConfig(level=logging.DEBUG)
random = Random()
probability = Probability(crossover_mutate_probability, random)
candidate_factory = ListFactory(random, len(distances)-1)
crossover = ListOrderCrossover(probability, random)
mutation = ListOrderMutation(probability, random, 2)
operator = PipelineOperator()
operator.append_operator(crossover)
operator.append_operator(mutation)
fitness_evaluator = SalesmanFitnessEvaluator(distances)
selection_strategy = RouletteWheelSelection(random)
termination_condition = Stagnation(100)
engine = GenerationalEvolutionEngine()
engine.create(candidate_factory, operator, fitness_evaluator, selection_strategy)
population = engine.evolve(population_size, elite_count, termination_condition)
print_results(population.get_best())
| [
"sys.path.append",
"logging.basicConfig"
] | [((37, 71), 'sys.path.append', 'sys.path.append', (["os.environ['PWD']"], {}), "(os.environ['PWD'])\n", (52, 71), False, 'import sys\n'), ((1272, 1312), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (1291, 1312), False, 'import logging\n')] |
""" Add a species to your database
usiing a log file
"""
import sys
import os
import autofile
import automol
from mechanalyzer.inf import thy as tinfo
from mechanalyzer.inf import rxn as rinfo
from mechanalyzer.inf import spc as sinfo
import elstruct
import autorun
from mechroutines.es._routines.conformer import _saved_cnf_info
from mechroutines.es._routines.conformer import _sym_unique
from mechroutines.es._routines.conformer import _save_unique_parsed_conformer
from mechroutines.es._routines.conformer import _geo_unique
from mechroutines.es._routines.conformer import _fragment_ring_geo
from mechroutines.es._routines._sadpt import save_saddle_point
from mechlib.reaction.rxnid import _id_reaction
THEORY_DCT = {
'lvl_wbs': {
'orb_res': 'RU',
'program': 'gaussian09',
'method': 'wb97xd',
'basis': '6-31g*'
},
'lvl_wbm': {
'orb_res': 'RU',
'program': 'gaussian09',
'method': 'wb97xd',
'basis': '6-31+g*'
},
'lvl_wbt': {
'orb_res': 'RU',
'program': 'gaussian09',
'method': 'wb97xd',
'basis': 'cc-pvtz'},
'lvl_m06s': {
'orb_res': 'RU',
'program': 'gaussian09',
'method': 'm062x',
'basis': '6-31g*'
},
'lvl_m06m': {
'orb_res': 'RU',
'program': 'gaussian09',
'method': 'm062x',
'basis': '6-31+g*'
},
'lvl_m06t': {
'orb_res': 'RU',
'program': 'gaussian09',
'method': 'm062x',
'basis': 'cc-pvtz'},
'lvl_b2d': {
'orb_res': 'RU',
'program': 'gaussian09',
'method': 'b2plypd3',
'basis': 'cc-pvdz'},
'lvl_b2t': {
'orb_res': 'RU',
'program': 'gaussian09',
'method': 'b2plypd3',
'basis': 'cc-pvtz'},
'lvl_b2q': {
'orb_res': 'RU',
'program': 'gaussian09',
'method': 'b2plypd3',
'basis': 'cc-pvqz'
},
'lvl_b3s': {
'orb_res': 'RU',
'program': 'gaussian09',
'method': 'b3lyp',
'basis': '6-31g*'
},
'lvl_b3mg': {
'orb_res': 'RU',
'program': 'gaussian09',
'method': 'b3lyp',
'basis': '6-311g**'
},
'lvl_b3t': {
'orb_res': 'RU',
'program': 'gaussian09',
'method': 'b3lyp',
'basis': 'cc-pvtz'},
'cc_lvl_d': {
'orb_res': 'RR',
'program': 'molpro2015',
'method': 'ccsd(t)', 'basis': 'cc-pvdz'},
'cc_lvl_t': {
'orb_res': 'RR',
'program': 'molpro2015',
'method': 'ccsd(t)', 'basis': 'cc-pvtz'},
'cc_lvl_q': {
'orb_res': 'RR',
'program': 'molpro2015',
'method': 'ccsd(t)', 'basis': 'cc-pvqz'
},
'cc_lvl_df': {
'orb_res': 'RR',
'program': 'molpro2015',
'method': 'ccsd(t)-f12',
'basis': 'cc-pvdz-f12'
},
'cc_lvl_tf': {
'orb_res': 'RR',
'program': 'molpro2015',
'method': 'ccsd(t)-f12',
'basis': 'cc-pvtz-f12'
},
'cc_lvl_qf': {
'orb_res': 'RR',
'program': 'molpro2015',
'method': 'ccsd(t)-f12',
'basis': 'cc-pvqz-f12'
},
'mlvl_cas_dz': {
'orb_res': 'RR',
'program': 'molpro2015',
'method': 'caspt2',
'basis': 'cc-pvdz'},
'mlvl_cas_tz': {
'orb_res': 'RR',
'program': 'molpro2015',
'method': 'caspt2',
'basis': 'cc-pvtz'}}
def parse_user_locs(insert_dct):
rid = insert_dct['rid']
cid = insert_dct['cid']
if rid is None:
rid = autofile.schema.generate_new_ring_id()
if cid is None:
cid = autofile.schema.generate_new_conformer_id()
return (rid, cid)
def parse_user_species(insert_dct):
smi = insert_dct['smiles']
ich = insert_dct['inchi']
mult = insert_dct['mult']
chg = insert_dct['charge']
if ich is None and smi is None:
print(
'Error: user did not specify species' +
'with an inchi or smiles in input')
sys.exit()
if ich is None:
ich = automol.smiles.inchi(smi)
if not automol.inchi.is_complete(ich):
ich = automol.inchi.add_stereo(ich)
if mult is None:
print('Error: user did not specify mult in input')
sys.exit()
if chg is None:
print('Error: user did not specify charge in input')
sys.exit()
return sinfo.from_data(ich, chg, mult)
def parse_user_reaction(insert_dct):
smis = insert_dct['smiles']
ichs = insert_dct['inchi']
mults = insert_dct['mult']
chgs = insert_dct['charge']
rxn_class = insert_dct['rxn_class']
# zrxn_file = insert_dct['zrxn_file']
if ichs is None:
ichs = [[], []]
for smi in smis[0]:
ichs[0].append(automol.smiles.inchi(smi))
for smi in smis[1]:
ichs[1].append(automol.smiles.inchi(smi))
for idx, ich in enumerate(ichs[0]):
if not automol.inchi.is_complete(ich):
ich = automol.inchi.add_stereo(ich)
ichs[0][idx] = ich
for idx, ich in enumerate(ichs[1]):
if not automol.inchi.is_complete(ich):
ich = automol.inchi.add_stereo(ich)
ichs[1][idx] = ich
if mults is None:
print('Error: user did not specify mults in input')
sys.exit()
if chgs is None:
print('Error: user did not specify charges in input')
sys.exit()
flat_ichs = sum(ichs, [])
if len(flat_ichs) != len(mults):
print(
'Error: number of species does not match number of mults')
sys.exit()
if len(flat_ichs) != len(chgs):
print(
'Error: number of species does not match number of charges')
sys.exit()
idx = 0
rxn_muls = [[], []]
rxn_chgs = [[], []]
for ich in ichs[0]:
mults_allowed = automol.graph.possible_spin_multiplicities(
automol.inchi.graph(ich, stereo=False))
if mults[idx] not in mults_allowed:
print(
'user specified mult of {}'.format(mults[idx]) +
'is not an allowed multiplicty for inchi {}'.format(ich))
sys.exit()
rxn_muls[0].append(mults[idx])
rxn_chgs[0].append(chgs[idx])
idx += 1
for ich in ichs[1]:
mults_allowed = automol.graph.possible_spin_multiplicities(
automol.inchi.graph(ich, stereo=False))
if mults[idx] not in mults_allowed:
print(
'user specified mult of {}'.format(mults[idx]) +
'is not an allowed multiplicty for inchi {}'.format(ich))
sys.exit()
rxn_muls[1].append(mults[idx])
rxn_chgs[1].append(chgs[idx])
idx += 1
ts_mult = insert_dct['ts_mult']
if ts_mult is None:
print(
'Error: user did not specify ts_mul')
sys.exit()
rxn_info = rinfo.sort((ichs, rxn_chgs, rxn_muls, ts_mult))
ts_info = rinfo.ts_info(rxn_info)
# if zrxn_file is not None:
# zrxn_str = autofile.io_.read_file(zrxn_file)
# zrxns = [automol.reac.from_string(zrxn_str)]
# else:
# zrxns, _ = _id_reaction(rxn_info)
if rxn_class is None:
print(
'Error: user did not specify rxn_class')
sys.exit()
return rxn_info, ts_info, rxn_class
def parse_user_theory(insert_dct):
# Get input method explicitly inputted
program = insert_dct['program']
method = insert_dct['method']
basis = insert_dct['basis']
orb_res = insert_dct['orb_res']
# Get input method from theory dictionary
theory = insert_dct['theory']
if theory is None:
if program is None:
print('Error: user did not specify program in input')
sys.exit()
elif method is None:
print('Error: user did not specify method in input')
sys.exit()
elif basis is None:
print('Error: user did not specify basis in input')
sys.exit()
elif orb_res is None:
print('Error: user did not specify orb_res in input')
sys.exit()
else:
thy_info = (program, method, basis, orb_res)
else:
if theory in THEORY_DCT:
thy_info = tinfo.from_dct(THEORY_DCT[theory])
else:
print(
'Error: user did not specify a theory {}'.format(theory) +
' that is in the THEORY_DCT' +
'please add it to the dct in the script or use program/method/basis/orb_dct' +
'keywords instead of theory')
sys.exit()
return thy_info
def create_species_filesystems(prefix, spc_info, mod_thy_info, locs=None):
# species filesystem
spc_fs = autofile.fs.species(prefix)
spc_fs[-1].create(spc_info)
spc_prefix = spc_fs[-1].path(spc_info)
# theory filesystem
thy_fs = autofile.fs.theory(spc_prefix)
thy_fs[-1].create(mod_thy_info[1:])
thy_prefix = thy_fs[-1].path(mod_thy_info[1:])
# conformer
cnf_fs = autofile.fs.conformer(thy_prefix)
if locs is not None:
cnf_fs[-1].create(locs)
cnf_prefix = cnf_fs[-1].path(locs)
else:
cnf_prefix = None
return (
(spc_fs, thy_fs, cnf_fs), (spc_prefix, thy_prefix, cnf_prefix))
def create_reaction_filesystems(
prefix, rxn_info, mod_thy_info, ts_locs=None, locs=None):
# species filesystem
print('rxn_info', rxn_info)
rxn_fs = autofile.fs.reaction(prefix)
sort_rxn_info = rinfo.sort(rxn_info, scheme='autofile')
rxn_fs[-1].create(sort_rxn_info)
rxn_prefix = rxn_fs[-1].path(sort_rxn_info)
# theory filesystem
thy_fs = autofile.fs.theory(rxn_prefix)
thy_fs[-1].create(mod_thy_info[1:])
thy_prefix = thy_fs[-1].path(mod_thy_info[1:])
if ts_locs is None:
ts_locs = (0,)
ts_fs = autofile.fs.transition_state(thy_prefix)
ts_fs[-1].create(ts_locs)
ts_prefix = ts_fs[-1].path(ts_locs)
# conformer
cnf_fs = autofile.fs.conformer(ts_prefix)
if locs is not None:
cnf_fs[-1].create(locs)
cnf_prefix = cnf_fs[-1].path(locs)
else:
cnf_prefix = None
return (
(rxn_fs, thy_fs, ts_fs, cnf_fs),
(rxn_prefix, thy_prefix, ts_prefix, cnf_prefix))
def read_user_file(dct, keyword):
if dct[keyword] is None:
print(
'ERROR: No filename is specified for {}'.format(keyword) +
'Script will exit')
sys.exit()
file_name = dct[keyword]
return autofile.io_.read_file(file_name)
def read_user_filesystem(dct):
if dct['save_filesystem'] is None:
print(
'ERROR: No save_filesystem}' +
'Script will exit')
sys.exit()
return dct['save_filesystem']
def choose_cutoff_distance(geo):
rqhs = [x * 0.1 for x in range(26, 38, 2)]
chosen_ts_gra = []
chosen_oversaturated_atom = None
for rqh in rqhs:
ts_gras = automol.geom.connectivity_graph(geo, rqq_bond_max=3.5, rqh_bond_max=rqh, rhh_bond_max=2.3)
ts_gras = automol.graph.set_stereo_from_geometry(ts_gras, geo)
ts_gras = automol.graph.connected_components(ts_gras)
if len(ts_gras) != 1:
continue
for ts_gra_i in ts_gras:
vals = automol.graph.atom_unsaturated_valences(ts_gra_i, bond_order=True)
oversaturated_atoms = [atm for atm, val in vals.items() if val < 0]
if len(oversaturated_atoms) == 1:
chosen_ts_gra = ts_gras[0]
chosen_oversaturated_atom = oversaturated_atoms[0]
break
if chosen_oversaturated_atom is None:
print('could not figure out which H is being transfered')
sys.exit()
return chosen_ts_gra, chosen_oversaturated_atom
def get_zrxn(geo, rxn_info, rxn_class):
ts_gra, oversaturated_atom = choose_cutoff_distance(geo)
atoms_bnd = automol.graph.atoms_bond_keys(ts_gra)
bonds = atoms_bnd[oversaturated_atom]
if len(bonds) != 2:
print('too many bonds to transfered atom for me to figure out')
print('I promise i will be smarter in the future')
sys.exit()
breaking_bond, forming_bond = bonds
# when we move on to other reaction types we have to check for double
# bonds when doing bond orders
forw_bnd_ord_dct = {breaking_bond: 0.9, forming_bond: 0.1}
back_bnd_ord_dct = {breaking_bond: 0.1, forming_bond: 0.9}
forward_gra = automol.graph.set_bond_orders(ts_gra, forw_bnd_ord_dct)
backward_gra = automol.graph.set_bond_orders(ts_gra, back_bnd_ord_dct)
reactant_gras = automol.graph.without_dummy_bonds(
automol.graph.without_fractional_bonds(forward_gra))
reactant_gras = automol.graph.connected_components(reactant_gras)
product_gras = automol.graph.without_dummy_bonds(
automol.graph.without_fractional_bonds(backward_gra))
product_gras = automol.graph.connected_components(product_gras)
ts_gras = [forward_gra, backward_gra]
rxn_gras = [reactant_gras, product_gras]
rxn_smis = [[], []]
for i, side in enumerate(rxn_info[0]):
for ich in side:
rxn_smis[i].append(automol.inchi.smiles(ich))
ts_smis = [[], []]
ts_ichs = [[], []]
for rgra in reactant_gras:
try:
rich = automol.graph.inchi(rgra, stereo=True)
except IndexError:
rich = automol.graph.inchi(rgra)
rsmi = automol.inchi.smiles(rich)
ts_ichs[0].append(rich)
ts_smis[0].append(rsmi)
for pgra in product_gras:
try:
pich = automol.graph.inchi(pgra, stereo=True)
except IndexError:
pich = automol.graph.inchi(pgra)
psmi = automol.inchi.smiles(pich)
ts_ichs[1].append(pich)
ts_smis[1].append(psmi)
reactant_match = False
product_match = False
if ts_smis[0] == rxn_smis[0]:
reactant_match = True
elif ts_smis[0][::-1] == rxn_smis[0]:
ts_ichs[0] = ts_ichs[0][::-1]
ts_smis[0] = ts_smis[0][::-1]
reactant_match = True
else:
ts_ichs = ts_ichs[::-1]
ts_smis = ts_smis[::-1]
ts_gras = ts_gras[::-1]
rxn_gras = rxn_gras[::-1]
if ts_smis[0] == rxn_smis[0]:
reactant_match = True
elif ts_smis[0][::-1] == rxn_smis[0]:
ts_ichs[0] = ts_ichs[0][::-1]
ts_smis[0] = ts_smis[0][::-1]
reactant_match = True
if reactant_match:
if ts_smis[1] == rxn_smis[1]:
product_match = True
elif ts_smis[1][::-1] == rxn_smis[-1]:
ts_ichs[1] = ts_ichs[1][::-1]
ts_smis[1] = ts_smis[1][::-1]
product_match = True
if reactant_match and product_match:
reactant_keys = []
for gra in rxn_gras[0]:
reactant_keys.append(automol.graph.atom_keys(gra))
product_keys = []
for gra in rxn_gras[1]:
product_keys.append(automol.graph.atom_keys(gra))
std_rxn = automol.reac.Reaction(
rxn_class, *ts_gras, reactant_keys, product_keys)
ts_zma, zma_keys, dummy_key_dct = automol.reac.ts_zmatrix(
std_rxn, geo)
std_zrxn = automol.reac.relabel_for_zmatrix(
std_rxn, zma_keys, dummy_key_dct)
rxn_info = (ts_ichs, *rxn_info[1:])
ts_geo = automol.zmat.geometry(ts_zma)
# geo_reorder_dct = {}
# dummies = []
# for dummy in dummy_key_dct.keys():
# add_idx = 1
# for dumm_j in dummies:
# if dummy > dumm_j:
# add_idx += 1
# dummies.append(dummy + add_idx)
# remove_idx = 0
# for idx_i, idx_j in enumerate(zma_keys):
# if idx_i in dummies:
# remove_idx -= 1
# else:
# geo_reorder_dct[idx_i + remove_idx] = idx_j
# ts_geo = automol.geom.reorder_coordinates(geo, geo_reorder_dct)
else:
print(
'The reactants and products found for the transition state' +
'did not match those specified in user input')
sys.exit()
return std_zrxn, ts_zma, ts_geo, rxn_info
def main(insert_dct):
prefix = read_user_filesystem(insert_dct)
# Read in the input and output files that we
# Are inserting into the filesystem
inp_str = read_user_file(insert_dct, 'input_file')
out_str = read_user_file(insert_dct, 'output_file')
# parse method from insert input file
thy_info = parse_user_theory(insert_dct)
# parse out geo information first, to make sure
# user save specifications match output
prog, method, basis, _ = thy_info
ene = elstruct.reader.energy(prog, method, out_str)
geo = elstruct.reader.opt_geometry(prog, out_str)
if geo is None:
print(
'No geometry could be parsed from output' +
'Check that the program matches user specied' +
' {}'.format(prog) + ' and method matches' +
' {}'.format(method))
sys.exit()
# Parse out user specified save location
zrxn = None
if insert_dct['saddle']:
rxn_info, spc_info, rxn_class = parse_user_reaction(insert_dct)
zrxn, zma, geo, rxn_info = get_zrxn(geo, rxn_info, rxn_class)
# for zrxn_i in zrxns:
# forw_form_key = automol.reac.forming_bond_keys(zrxn_i)
# back_form_key = automol.reac.forming_bond_keys(zrxn_i, rev=True)
# forw_brk_key = automol.reac.breaking_bond_keys(zrxn_i)
# back_brk_key = automol.reac.breaking_bond_keys(zrxn_i, rev=True)
# forward_gra = automol.graph.without_stereo_parities(
# automol.graph.without_dummy_bonds(
# automol.graph.without_fractional_bonds(
# zrxn_i.forward_ts_graph)))
# forward_gra = automol.graph.add_bonds(forward_gra, forw_form_key)
# backward_gra = automol.graph.without_stereo_parities(
# automol.graph.without_dummy_bonds(
# automol.graph.without_fractional_bonds(
# zrxn_i.backward_ts_graph)))
# backward_gra = automol.graph.add_bonds(backward_gra, back_form_key)
# if zrxn_i.class_ == 'hydrogen abstraction':
# forward_gra = automol.graph.remove_bonds(forward_gra, forw_brk_key)
# backward_gra = automol.graph.remove_bonds(backward_gra, back_brk_key)
# print('forRXN', automol.graph.string(zrxn_i.forward_ts_graph))
# print('forRXN', automol.graph.string(forward_gra))
# print('bacRXN', automol.graph.string(zrxn_i.backward_ts_graph))
# print('bacRXN', automol.graph.string(backward_gra))
# if forward_gra == automol.geom.graph(geo, stereo=False):
# zrxn = zrxn_i
# zma, _, _ = automol.reac.ts_zmatrix(zrxn, geo)
# elif backward_gra == automol.geom.graph(geo, stereo=False):
# zrxn = automol.reac.reverse(zrxn_i)
# zma, _, _ = automol.reac.ts_zmatrix(zrxn, geo)
# if zrxn is None:
# print(
# 'Your geometry did not match any of the attempted ' +
# 'zrxns, which are the following')
# for zrxn_i in zrxns:
# print(zrxns)
# sys.exit()
# # hess = elstruct.reader.hessian(prog, out_str)
# Hess = None
# If hess is None:
# print(
# 'No hessian found in output, cannot save ' +
# 'a transition state without a hessian')
# sys.exit()
# run_path = insert_dct['run_path']
# if run_path is None:
# run_path = os.getcwd()
# run_fs = autofile.fs.run(run_path)
# freq_run_path = run_fs[-1].path(['hessian'])
# run_fs[-1].create(['hessian'])
# script_str = autorun.SCRIPT_DCT['projrot']
# freqs, _, imags, _ = autorun.projrot.frequencies(
# script_str, freq_run_path, [geo], [[]], [hess])
# if len(imags) != 1:
# print(
# 'Can only save a transition state that has a single' +
# 'imaginary frequency, projrot found the following' +
# 'frequencies: ' + ','.join(imags))
# sys.exit()
else:
spc_info = parse_user_species(insert_dct)
mod_thy_info = tinfo.modify_orb_label(thy_info, spc_info)
locs = parse_user_locs(insert_dct)
# Check that the save location matches geo information
if not insert_dct['saddle']:
if not species_match(geo, spc_info):
print(
'I refuse to save this geometry until user specified' +
' info matches the info in user given output')
sys.exit()
# Check that the rid/cid info matches the filesystem
fs_array, prefix_array = create_species_filesystems(
prefix, spc_info, mod_thy_info, locs=None)
else:
fs_array, prefix_array = create_reaction_filesystems(
prefix, rxn_info, mod_thy_info,
ts_locs=insert_dct['ts_locs'], locs=None)
cnf_fs = fs_array[-1]
if not locs_match(geo, cnf_fs, locs):
print(
'I refuse to save this geometry until user specified' +
' info matches the info in user given output')
sys.exit()
inf_obj = autofile.schema.info_objects.run(
job=elstruct.Job.OPTIMIZATION, prog=prog, version='',
method=method, basis=basis, status=autofile.schema.RunStatus.SUCCESS)
ret = (inf_obj, inp_str, out_str)
_, saved_geos, saved_enes = _saved_cnf_info(
cnf_fs, mod_thy_info)
if _geo_unique(geo, ene, saved_geos, saved_enes, zrxn=zrxn):
sym_id = _sym_unique(
geo, ene, saved_geos, saved_enes)
if sym_id is None:
if cnf_fs[0].file.info.exists():
rinf_obj = cnf_fs[0].file.info.read()
else:
rinf_obj = autofile.schema.info_objects.conformer_trunk(0)
rinf_obj.nsamp = 1
if cnf_fs[1].file.info.exists([locs[0]]):
cinf_obj = cnf_fs[1].file.info.read(locs[0])
cnsampd = cinf_obj.nsamp
cnsampd += 1
cinf_obj.nsamp = cnsampd
else:
cinf_obj = autofile.schema.info_objects.conformer_branch(0)
cinf_obj.nsamp = 1
cnf_fs[1].create([locs[0]])
cnf_fs[0].file.info.write(rinf_obj)
cnf_fs[1].file.info.write(cinf_obj, [locs[0]])
hess, freqs, imags = None, None, None
if hess is not None and zrxn is not None:
hess_inf_obj = autofile.schema.info_objects.run(
job=elstruct.Job.HESSIAN, prog=prog, version='',
method=method, basis=basis,
status=autofile.schema.RunStatus.SUCCESS)
hess_ret = (hess_inf_obj, inp_str, out_str)
save_saddle_point(
zrxn, ret, hess_ret, freqs, imags,
mod_thy_info, {'runlvl_cnf_fs': (cnf_fs, None)}, locs,
zma_locs=(0,), zma=zma)
else:
_save_unique_parsed_conformer(
mod_thy_info, cnf_fs, locs, (geo, zma, ene),
inf_obj, inp_str, zrxn=zrxn, zma_locs=(0,))
print(
'geometry is now saved at {}'.format(cnf_fs[-1].path(locs)))
else:
print(
'the geometry in the output is not unique to filesystem' +
'... not saving')
def species_match(geo, spc_info):
match = True
ich, _, mul = spc_info
mults_allowed = automol.graph.possible_spin_multiplicities(
automol.inchi.graph(ich, stereo=False))
geo_ich = automol.geom.inchi(geo, stereo=True)
if ich != geo_ich:
print(
'user specified inchi {}'.format(ich) +
'does not match inchi from output {}'.format(geo_ich) +
'which is based on geometry from output:\n' +
'{}'.format(automol.geom.string(geo)))
match = False
if mul not in mults_allowed:
print(
'user specified mult of {}'.format(mul) +
'is not an allowed multiplicty for inchi {}'.format(ich))
match = False
return match
def locs_match(geo, cnf_fs, locs):
match = True
rid = locs[0]
geo_rid = rng_loc_for_geo(geo, cnf_fs)
if geo_rid is not None:
if geo_rid != rid:
print(
'Error: rid mismatch for the filesystem at' +
' {}'.format(cnf_fs[0].path()) +
'\nthe expected rid for this geo is {}'.format(geo_rid) +
'\nthe user rid in input file is {}'.format(rid))
match = False
return match
def rng_loc_for_geo(geo, cnf_fs):
rid = None
frag_geo = _fragment_ring_geo(geo)
if frag_geo is not None:
frag_zma = automol.geom.zmatrix(frag_geo)
checked_rids = []
for locs in cnf_fs[-1].existing():
current_rid, _ = locs
if current_rid in checked_rids:
continue
if cnf_fs[-1].file.geometry.exists(locs):
checked_rids.append(current_rid)
locs_geo = cnf_fs[-1].file.geometry.read(locs)
frag_locs_geo = _fragment_ring_geo(locs_geo)
if frag_locs_geo is None:
rid = locs[0]
break
frag_locs_zma = automol.geom.zmatrix(frag_locs_geo)
if automol.zmat.almost_equal(
frag_locs_zma, frag_zma, dist_rtol=0.1, ang_atol=.4):
rid = locs[0]
break
return rid
def parse_script_input(script_input_file):
script_input = autofile.io_.read_file(script_input_file).splitlines()
insert_dct = {
'save_filesystem': None,
'smiles': None,
'inchi': None,
'mult': None,
'charge': None,
'rid': None,
'cid': None,
'theory': None,
'program': None,
'method': None,
'basis': None,
'orb_res': None,
'input_file': None,
'output_file': None,
'ts_locs': None,
'ts_mult': None,
'rxn_class': None,
'zrxn_file': None,
'run_path': None,
'saddle': False,
}
for i, line in enumerate(script_input):
if len(line) < 2:
continue
elif '!' in line[0]:
continue
line = line.split('!')[0]
if ':' not in line:
print(
'ERROR: line\n({}) {}\n is not parsable, '.format(i, line) +
'script will exit until input is resolved to avoid' +
' filesystem contamination.' +
'Comment lines should contain "!"' +
'Key format should be:\n' +
'<Keyword>: <Value>\n' +
'Allowed keywords are:\n' +
'{}'.format('\n'.join(list(insert_dct.keys())))
)
sys.exit()
keyword, value = line.split(':')
if keyword in insert_dct:
if 'None' in value:
value = None
elif keyword in ['mult', 'charge', 'ts_mult']:
values = []
for val in value.split(','):
values.append(int(val))
if len(values) == 1:
value = values[0]
else:
value = values
elif keyword in ['ts_locs']:
value = (int(value),)
elif keyword in ['rxn_class']:
# strip whitespaces form either side of reaction
# class but not in between words
value = value.split()
for i, val in enumerate(value):
value[i] = val.replace(' ', '')
value = ' '.join(value)
elif keyword not in ['smiles', 'inchi']:
value = value.replace(' ', '')
else:
value = value.split(' = ')
if len(value) > 1:
insert_dct['saddle'] = True
reactants, products = value
reactants = reactants.split(' + ')
products = products.split(' + ')
values = [[], []]
for reactant in reactants:
values[0].append(reactant.replace(' ', ''))
for product in products:
values[1].append(product.replace(' ', ''))
value = values
else:
value = value[0].replace(' ', '')
print(keyword, value)
insert_dct[keyword] = value
else:
print(
'ERROR: Keyword {} is not recognized'.format(keyword) +
'script will exit until inpupt is resolved to avoid' +
' filesystem contamination.' +
'Allowed keywords are:\n' +
'{}'.format('\n'.join(list(insert_dct.keys())))
)
sys.exit()
return insert_dct
if __name__ == '__main__':
SCRIPT_INPUT_FILE = 'insert_options.txt'
insert_dct = parse_script_input(SCRIPT_INPUT_FILE)
main(insert_dct)
| [
"automol.graph.set_stereo_from_geometry",
"automol.reac.Reaction",
"autofile.schema.generate_new_ring_id",
"elstruct.reader.energy",
"mechroutines.es._routines.conformer._fragment_ring_geo",
"automol.reac.relabel_for_zmatrix",
"automol.smiles.inchi",
"automol.graph.atom_keys",
"autofile.fs.species",
"mechanalyzer.inf.rxn.ts_info",
"automol.graph.inchi",
"mechroutines.es._routines.conformer._save_unique_parsed_conformer",
"elstruct.reader.opt_geometry",
"autofile.fs.conformer",
"mechroutines.es._routines.conformer._geo_unique",
"automol.inchi.is_complete",
"automol.graph.connected_components",
"automol.graph.without_fractional_bonds",
"automol.reac.ts_zmatrix",
"mechroutines.es._routines.conformer._saved_cnf_info",
"automol.zmat.geometry",
"automol.inchi.graph",
"automol.graph.atom_unsaturated_valences",
"automol.geom.string",
"automol.inchi.smiles",
"mechanalyzer.inf.thy.modify_orb_label",
"autofile.schema.generate_new_conformer_id",
"automol.geom.inchi",
"autofile.schema.info_objects.conformer_branch",
"mechroutines.es._routines.conformer._sym_unique",
"mechanalyzer.inf.thy.from_dct",
"autofile.fs.transition_state",
"mechroutines.es._routines._sadpt.save_saddle_point",
"automol.zmat.almost_equal",
"mechanalyzer.inf.spc.from_data",
"autofile.io_.read_file",
"automol.graph.atoms_bond_keys",
"autofile.fs.reaction",
"sys.exit",
"automol.inchi.add_stereo",
"autofile.fs.theory",
"automol.geom.zmatrix",
"automol.graph.set_bond_orders",
"autofile.schema.info_objects.run",
"automol.geom.connectivity_graph",
"mechanalyzer.inf.rxn.sort",
"autofile.schema.info_objects.conformer_trunk"
] | [((4448, 4479), 'mechanalyzer.inf.spc.from_data', 'sinfo.from_data', (['ich', 'chg', 'mult'], {}), '(ich, chg, mult)\n', (4463, 4479), True, 'from mechanalyzer.inf import spc as sinfo\n'), ((6931, 6978), 'mechanalyzer.inf.rxn.sort', 'rinfo.sort', (['(ichs, rxn_chgs, rxn_muls, ts_mult)'], {}), '((ichs, rxn_chgs, rxn_muls, ts_mult))\n', (6941, 6978), True, 'from mechanalyzer.inf import rxn as rinfo\n'), ((6993, 7016), 'mechanalyzer.inf.rxn.ts_info', 'rinfo.ts_info', (['rxn_info'], {}), '(rxn_info)\n', (7006, 7016), True, 'from mechanalyzer.inf import rxn as rinfo\n'), ((8784, 8811), 'autofile.fs.species', 'autofile.fs.species', (['prefix'], {}), '(prefix)\n', (8803, 8811), False, 'import autofile\n'), ((8925, 8955), 'autofile.fs.theory', 'autofile.fs.theory', (['spc_prefix'], {}), '(spc_prefix)\n', (8943, 8955), False, 'import autofile\n'), ((9077, 9110), 'autofile.fs.conformer', 'autofile.fs.conformer', (['thy_prefix'], {}), '(thy_prefix)\n', (9098, 9110), False, 'import autofile\n'), ((9505, 9533), 'autofile.fs.reaction', 'autofile.fs.reaction', (['prefix'], {}), '(prefix)\n', (9525, 9533), False, 'import autofile\n'), ((9554, 9593), 'mechanalyzer.inf.rxn.sort', 'rinfo.sort', (['rxn_info'], {'scheme': '"""autofile"""'}), "(rxn_info, scheme='autofile')\n", (9564, 9593), True, 'from mechanalyzer.inf import rxn as rinfo\n'), ((9717, 9747), 'autofile.fs.theory', 'autofile.fs.theory', (['rxn_prefix'], {}), '(rxn_prefix)\n', (9735, 9747), False, 'import autofile\n'), ((9900, 9940), 'autofile.fs.transition_state', 'autofile.fs.transition_state', (['thy_prefix'], {}), '(thy_prefix)\n', (9928, 9940), False, 'import autofile\n'), ((10041, 10073), 'autofile.fs.conformer', 'autofile.fs.conformer', (['ts_prefix'], {}), '(ts_prefix)\n', (10062, 10073), False, 'import autofile\n'), ((10564, 10597), 'autofile.io_.read_file', 'autofile.io_.read_file', (['file_name'], {}), '(file_name)\n', (10586, 10597), False, 'import autofile\n'), ((11944, 11981), 'automol.graph.atoms_bond_keys', 'automol.graph.atoms_bond_keys', (['ts_gra'], {}), '(ts_gra)\n', (11973, 11981), False, 'import automol\n'), ((12491, 12546), 'automol.graph.set_bond_orders', 'automol.graph.set_bond_orders', (['ts_gra', 'forw_bnd_ord_dct'], {}), '(ts_gra, forw_bnd_ord_dct)\n', (12520, 12546), False, 'import automol\n'), ((12566, 12621), 'automol.graph.set_bond_orders', 'automol.graph.set_bond_orders', (['ts_gra', 'back_bnd_ord_dct'], {}), '(ts_gra, back_bnd_ord_dct)\n', (12595, 12621), False, 'import automol\n'), ((12758, 12807), 'automol.graph.connected_components', 'automol.graph.connected_components', (['reactant_gras'], {}), '(reactant_gras)\n', (12792, 12807), False, 'import automol\n'), ((12943, 12991), 'automol.graph.connected_components', 'automol.graph.connected_components', (['product_gras'], {}), '(product_gras)\n', (12977, 12991), False, 'import automol\n'), ((16711, 16756), 'elstruct.reader.energy', 'elstruct.reader.energy', (['prog', 'method', 'out_str'], {}), '(prog, method, out_str)\n', (16733, 16756), False, 'import elstruct\n'), ((16767, 16810), 'elstruct.reader.opt_geometry', 'elstruct.reader.opt_geometry', (['prog', 'out_str'], {}), '(prog, out_str)\n', (16795, 16810), False, 'import elstruct\n'), ((20458, 20500), 'mechanalyzer.inf.thy.modify_orb_label', 'tinfo.modify_orb_label', (['thy_info', 'spc_info'], {}), '(thy_info, spc_info)\n', (20480, 20500), True, 'from mechanalyzer.inf import thy as tinfo\n'), ((21446, 21611), 'autofile.schema.info_objects.run', 'autofile.schema.info_objects.run', ([], {'job': 'elstruct.Job.OPTIMIZATION', 'prog': 'prog', 'version': '""""""', 'method': 'method', 'basis': 'basis', 'status': 'autofile.schema.RunStatus.SUCCESS'}), "(job=elstruct.Job.OPTIMIZATION, prog=prog,\n version='', method=method, basis=basis, status=autofile.schema.\n RunStatus.SUCCESS)\n", (21478, 21611), False, 'import autofile\n'), ((21690, 21727), 'mechroutines.es._routines.conformer._saved_cnf_info', '_saved_cnf_info', (['cnf_fs', 'mod_thy_info'], {}), '(cnf_fs, mod_thy_info)\n', (21705, 21727), False, 'from mechroutines.es._routines.conformer import _saved_cnf_info\n'), ((21744, 21800), 'mechroutines.es._routines.conformer._geo_unique', '_geo_unique', (['geo', 'ene', 'saved_geos', 'saved_enes'], {'zrxn': 'zrxn'}), '(geo, ene, saved_geos, saved_enes, zrxn=zrxn)\n', (21755, 21800), False, 'from mechroutines.es._routines.conformer import _geo_unique\n'), ((23874, 23910), 'automol.geom.inchi', 'automol.geom.inchi', (['geo'], {'stereo': '(True)'}), '(geo, stereo=True)\n', (23892, 23910), False, 'import automol\n'), ((24960, 24983), 'mechroutines.es._routines.conformer._fragment_ring_geo', '_fragment_ring_geo', (['geo'], {}), '(geo)\n', (24978, 24983), False, 'from mechroutines.es._routines.conformer import _fragment_ring_geo\n'), ((3622, 3660), 'autofile.schema.generate_new_ring_id', 'autofile.schema.generate_new_ring_id', ([], {}), '()\n', (3658, 3660), False, 'import autofile\n'), ((3695, 3738), 'autofile.schema.generate_new_conformer_id', 'autofile.schema.generate_new_conformer_id', ([], {}), '()\n', (3736, 3738), False, 'import autofile\n'), ((4080, 4090), 'sys.exit', 'sys.exit', ([], {}), '()\n', (4088, 4090), False, 'import sys\n'), ((4125, 4150), 'automol.smiles.inchi', 'automol.smiles.inchi', (['smi'], {}), '(smi)\n', (4145, 4150), False, 'import automol\n'), ((4162, 4192), 'automol.inchi.is_complete', 'automol.inchi.is_complete', (['ich'], {}), '(ich)\n', (4187, 4192), False, 'import automol\n'), ((4208, 4237), 'automol.inchi.add_stereo', 'automol.inchi.add_stereo', (['ich'], {}), '(ich)\n', (4232, 4237), False, 'import automol\n'), ((4326, 4336), 'sys.exit', 'sys.exit', ([], {}), '()\n', (4334, 4336), False, 'import sys\n'), ((4426, 4436), 'sys.exit', 'sys.exit', ([], {}), '()\n', (4434, 4436), False, 'import sys\n'), ((5358, 5368), 'sys.exit', 'sys.exit', ([], {}), '()\n', (5366, 5368), False, 'import sys\n'), ((5460, 5470), 'sys.exit', 'sys.exit', ([], {}), '()\n', (5468, 5470), False, 'import sys\n'), ((5632, 5642), 'sys.exit', 'sys.exit', ([], {}), '()\n', (5640, 5642), False, 'import sys\n'), ((5775, 5785), 'sys.exit', 'sys.exit', ([], {}), '()\n', (5783, 5785), False, 'import sys\n'), ((6905, 6915), 'sys.exit', 'sys.exit', ([], {}), '()\n', (6913, 6915), False, 'import sys\n'), ((7317, 7327), 'sys.exit', 'sys.exit', ([], {}), '()\n', (7325, 7327), False, 'import sys\n'), ((10513, 10523), 'sys.exit', 'sys.exit', ([], {}), '()\n', (10521, 10523), False, 'import sys\n'), ((10768, 10778), 'sys.exit', 'sys.exit', ([], {}), '()\n', (10776, 10778), False, 'import sys\n'), ((10994, 11088), 'automol.geom.connectivity_graph', 'automol.geom.connectivity_graph', (['geo'], {'rqq_bond_max': '(3.5)', 'rqh_bond_max': 'rqh', 'rhh_bond_max': '(2.3)'}), '(geo, rqq_bond_max=3.5, rqh_bond_max=rqh,\n rhh_bond_max=2.3)\n', (11025, 11088), False, 'import automol\n'), ((11103, 11155), 'automol.graph.set_stereo_from_geometry', 'automol.graph.set_stereo_from_geometry', (['ts_gras', 'geo'], {}), '(ts_gras, geo)\n', (11141, 11155), False, 'import automol\n'), ((11174, 11217), 'automol.graph.connected_components', 'automol.graph.connected_components', (['ts_gras'], {}), '(ts_gras)\n', (11208, 11217), False, 'import automol\n'), ((11762, 11772), 'sys.exit', 'sys.exit', ([], {}), '()\n', (11770, 11772), False, 'import sys\n'), ((12187, 12197), 'sys.exit', 'sys.exit', ([], {}), '()\n', (12195, 12197), False, 'import sys\n'), ((12685, 12736), 'automol.graph.without_fractional_bonds', 'automol.graph.without_fractional_bonds', (['forward_gra'], {}), '(forward_gra)\n', (12723, 12736), False, 'import automol\n'), ((12870, 12922), 'automol.graph.without_fractional_bonds', 'automol.graph.without_fractional_bonds', (['backward_gra'], {}), '(backward_gra)\n', (12908, 12922), False, 'import automol\n'), ((13464, 13490), 'automol.inchi.smiles', 'automol.inchi.smiles', (['rich'], {}), '(rich)\n', (13484, 13490), False, 'import automol\n'), ((13743, 13769), 'automol.inchi.smiles', 'automol.inchi.smiles', (['pich'], {}), '(pich)\n', (13763, 13769), False, 'import automol\n'), ((15034, 15105), 'automol.reac.Reaction', 'automol.reac.Reaction', (['rxn_class', '*ts_gras', 'reactant_keys', 'product_keys'], {}), '(rxn_class, *ts_gras, reactant_keys, product_keys)\n', (15055, 15105), False, 'import automol\n'), ((15161, 15198), 'automol.reac.ts_zmatrix', 'automol.reac.ts_zmatrix', (['std_rxn', 'geo'], {}), '(std_rxn, geo)\n', (15184, 15198), False, 'import automol\n'), ((15231, 15297), 'automol.reac.relabel_for_zmatrix', 'automol.reac.relabel_for_zmatrix', (['std_rxn', 'zma_keys', 'dummy_key_dct'], {}), '(std_rxn, zma_keys, dummy_key_dct)\n', (15263, 15297), False, 'import automol\n'), ((15372, 15401), 'automol.zmat.geometry', 'automol.zmat.geometry', (['ts_zma'], {}), '(ts_zma)\n', (15393, 15401), False, 'import automol\n'), ((16150, 16160), 'sys.exit', 'sys.exit', ([], {}), '()\n', (16158, 16160), False, 'import sys\n'), ((17061, 17071), 'sys.exit', 'sys.exit', ([], {}), '()\n', (17069, 17071), False, 'import sys\n'), ((21420, 21430), 'sys.exit', 'sys.exit', ([], {}), '()\n', (21428, 21430), False, 'import sys\n'), ((21819, 21864), 'mechroutines.es._routines.conformer._sym_unique', '_sym_unique', (['geo', 'ene', 'saved_geos', 'saved_enes'], {}), '(geo, ene, saved_geos, saved_enes)\n', (21830, 21864), False, 'from mechroutines.es._routines.conformer import _sym_unique\n'), ((23820, 23858), 'automol.inchi.graph', 'automol.inchi.graph', (['ich'], {'stereo': '(False)'}), '(ich, stereo=False)\n', (23839, 23858), False, 'import automol\n'), ((25032, 25062), 'automol.geom.zmatrix', 'automol.geom.zmatrix', (['frag_geo'], {}), '(frag_geo)\n', (25052, 25062), False, 'import automol\n'), ((4991, 5021), 'automol.inchi.is_complete', 'automol.inchi.is_complete', (['ich'], {}), '(ich)\n', (5016, 5021), False, 'import automol\n'), ((5041, 5070), 'automol.inchi.add_stereo', 'automol.inchi.add_stereo', (['ich'], {}), '(ich)\n', (5065, 5070), False, 'import automol\n'), ((5157, 5187), 'automol.inchi.is_complete', 'automol.inchi.is_complete', (['ich'], {}), '(ich)\n', (5182, 5187), False, 'import automol\n'), ((5207, 5236), 'automol.inchi.add_stereo', 'automol.inchi.add_stereo', (['ich'], {}), '(ich)\n', (5231, 5236), False, 'import automol\n'), ((5950, 5988), 'automol.inchi.graph', 'automol.inchi.graph', (['ich'], {'stereo': '(False)'}), '(ich, stereo=False)\n', (5969, 5988), False, 'import automol\n'), ((6204, 6214), 'sys.exit', 'sys.exit', ([], {}), '()\n', (6212, 6214), False, 'import sys\n'), ((6413, 6451), 'automol.inchi.graph', 'automol.inchi.graph', (['ich'], {'stereo': '(False)'}), '(ich, stereo=False)\n', (6432, 6451), False, 'import automol\n'), ((6667, 6677), 'sys.exit', 'sys.exit', ([], {}), '()\n', (6675, 6677), False, 'import sys\n'), ((7795, 7805), 'sys.exit', 'sys.exit', ([], {}), '()\n', (7803, 7805), False, 'import sys\n'), ((8294, 8328), 'mechanalyzer.inf.thy.from_dct', 'tinfo.from_dct', (['THEORY_DCT[theory]'], {}), '(THEORY_DCT[theory])\n', (8308, 8328), True, 'from mechanalyzer.inf import thy as tinfo\n'), ((8637, 8647), 'sys.exit', 'sys.exit', ([], {}), '()\n', (8645, 8647), False, 'import sys\n'), ((11321, 11387), 'automol.graph.atom_unsaturated_valences', 'automol.graph.atom_unsaturated_valences', (['ts_gra_i'], {'bond_order': '(True)'}), '(ts_gra_i, bond_order=True)\n', (11360, 11387), False, 'import automol\n'), ((13338, 13376), 'automol.graph.inchi', 'automol.graph.inchi', (['rgra'], {'stereo': '(True)'}), '(rgra, stereo=True)\n', (13357, 13376), False, 'import automol\n'), ((13617, 13655), 'automol.graph.inchi', 'automol.graph.inchi', (['pgra'], {'stereo': '(True)'}), '(pgra, stereo=True)\n', (13636, 13655), False, 'import automol\n'), ((20844, 20854), 'sys.exit', 'sys.exit', ([], {}), '()\n', (20852, 20854), False, 'import sys\n'), ((25397, 25425), 'mechroutines.es._routines.conformer._fragment_ring_geo', '_fragment_ring_geo', (['locs_geo'], {}), '(locs_geo)\n', (25415, 25425), False, 'from mechroutines.es._routines.conformer import _fragment_ring_geo\n'), ((25544, 25579), 'automol.geom.zmatrix', 'automol.geom.zmatrix', (['frag_locs_geo'], {}), '(frag_locs_geo)\n', (25564, 25579), False, 'import automol\n'), ((25595, 25674), 'automol.zmat.almost_equal', 'automol.zmat.almost_equal', (['frag_locs_zma', 'frag_zma'], {'dist_rtol': '(0.1)', 'ang_atol': '(0.4)'}), '(frag_locs_zma, frag_zma, dist_rtol=0.1, ang_atol=0.4)\n', (25620, 25674), False, 'import automol\n'), ((25827, 25868), 'autofile.io_.read_file', 'autofile.io_.read_file', (['script_input_file'], {}), '(script_input_file)\n', (25849, 25868), False, 'import autofile\n'), ((27096, 27106), 'sys.exit', 'sys.exit', ([], {}), '()\n', (27104, 27106), False, 'import sys\n'), ((29172, 29182), 'sys.exit', 'sys.exit', ([], {}), '()\n', (29180, 29182), False, 'import sys\n'), ((4827, 4852), 'automol.smiles.inchi', 'automol.smiles.inchi', (['smi'], {}), '(smi)\n', (4847, 4852), False, 'import automol\n'), ((4909, 4934), 'automol.smiles.inchi', 'automol.smiles.inchi', (['smi'], {}), '(smi)\n', (4929, 4934), False, 'import automol\n'), ((7912, 7922), 'sys.exit', 'sys.exit', ([], {}), '()\n', (7920, 7922), False, 'import sys\n'), ((13202, 13227), 'automol.inchi.smiles', 'automol.inchi.smiles', (['ich'], {}), '(ich)\n', (13222, 13227), False, 'import automol\n'), ((13423, 13448), 'automol.graph.inchi', 'automol.graph.inchi', (['rgra'], {}), '(rgra)\n', (13442, 13448), False, 'import automol\n'), ((13702, 13727), 'automol.graph.inchi', 'automol.graph.inchi', (['pgra'], {}), '(pgra)\n', (13721, 13727), False, 'import automol\n'), ((14866, 14894), 'automol.graph.atom_keys', 'automol.graph.atom_keys', (['gra'], {}), '(gra)\n', (14889, 14894), False, 'import automol\n'), ((14986, 15014), 'automol.graph.atom_keys', 'automol.graph.atom_keys', (['gra'], {}), '(gra)\n', (15009, 15014), False, 'import automol\n'), ((22049, 22096), 'autofile.schema.info_objects.conformer_trunk', 'autofile.schema.info_objects.conformer_trunk', (['(0)'], {}), '(0)\n', (22093, 22096), False, 'import autofile\n'), ((22403, 22451), 'autofile.schema.info_objects.conformer_branch', 'autofile.schema.info_objects.conformer_branch', (['(0)'], {}), '(0)\n', (22448, 22451), False, 'import autofile\n'), ((22769, 22929), 'autofile.schema.info_objects.run', 'autofile.schema.info_objects.run', ([], {'job': 'elstruct.Job.HESSIAN', 'prog': 'prog', 'version': '""""""', 'method': 'method', 'basis': 'basis', 'status': 'autofile.schema.RunStatus.SUCCESS'}), "(job=elstruct.Job.HESSIAN, prog=prog,\n version='', method=method, basis=basis, status=autofile.schema.\n RunStatus.SUCCESS)\n", (22801, 22929), False, 'import autofile\n'), ((23058, 23194), 'mechroutines.es._routines._sadpt.save_saddle_point', 'save_saddle_point', (['zrxn', 'ret', 'hess_ret', 'freqs', 'imags', 'mod_thy_info', "{'runlvl_cnf_fs': (cnf_fs, None)}", 'locs'], {'zma_locs': '(0,)', 'zma': 'zma'}), "(zrxn, ret, hess_ret, freqs, imags, mod_thy_info, {\n 'runlvl_cnf_fs': (cnf_fs, None)}, locs, zma_locs=(0,), zma=zma)\n", (23075, 23194), False, 'from mechroutines.es._routines._sadpt import save_saddle_point\n'), ((23285, 23407), 'mechroutines.es._routines.conformer._save_unique_parsed_conformer', '_save_unique_parsed_conformer', (['mod_thy_info', 'cnf_fs', 'locs', '(geo, zma, ene)', 'inf_obj', 'inp_str'], {'zrxn': 'zrxn', 'zma_locs': '(0,)'}), '(mod_thy_info, cnf_fs, locs, (geo, zma, ene),\n inf_obj, inp_str, zrxn=zrxn, zma_locs=(0,))\n', (23314, 23407), False, 'from mechroutines.es._routines.conformer import _save_unique_parsed_conformer\n'), ((8027, 8037), 'sys.exit', 'sys.exit', ([], {}), '()\n', (8035, 8037), False, 'import sys\n'), ((24151, 24175), 'automol.geom.string', 'automol.geom.string', (['geo'], {}), '(geo)\n', (24170, 24175), False, 'import automol\n'), ((8146, 8156), 'sys.exit', 'sys.exit', ([], {}), '()\n', (8154, 8156), False, 'import sys\n')] |
#!/bin/python
from deap import tools
import numpy as np
import os
statisticsNames = {'avg': 'Average profit',
'std': 'Profit variation',
'min': 'Minimum profit',
'max': 'Maximum profit',
'size': 'Population size',
'maxsize': 'Max population size'}
def getStatisticsMeter():
stats = tools.Statistics(lambda ind: ind.fitness.values[0])
stats.register("avg", np.mean)
stats.register("std", np.std)
stats.register("min", np.min)
stats.register("max", np.max)
return stats
def write_evolution_logs(i, stats, filename="output/evolution_gen.csv"):
#print(i, stats)
if type(stats) == dict:
message = ','.join([str(x) for x in [i,stats['avg'],
stats['std'],
stats['min'],
stats['max'],
stats['dateRange']]])
elif type(stats) == list:
message = ','.join([str(x) for x in [i] + stats])
else:
raise
#print(message)
if i == 0 and os.path.isfile(filename):
os.remove(filename)
f=open(filename, 'a+')
f.write(message+"\n")
#print(message)
f.close()
| [
"deap.tools.Statistics",
"os.path.isfile",
"os.remove"
] | [((382, 433), 'deap.tools.Statistics', 'tools.Statistics', (['(lambda ind: ind.fitness.values[0])'], {}), '(lambda ind: ind.fitness.values[0])\n', (398, 433), False, 'from deap import tools\n'), ((1169, 1193), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (1183, 1193), False, 'import os\n'), ((1203, 1222), 'os.remove', 'os.remove', (['filename'], {}), '(filename)\n', (1212, 1222), False, 'import os\n')] |
#Callbacks
"""Create training callbacks"""
import os
import numpy as np
import pandas as pd
from datetime import datetime
from DeepTreeAttention.utils import metrics
from DeepTreeAttention.visualization import visualize
from tensorflow.keras.callbacks import ReduceLROnPlateau
from tensorflow.keras.callbacks import Callback, TensorBoard
from tensorflow import expand_dims
class F1Callback(Callback):
def __init__(self, experiment, eval_dataset, y_true, label_names, submodel, train_shp, n=10):
"""F1 callback
Args:
n: number of epochs to run. If n=4, function will run every 4 epochs
y_true: instead of iterating through the dataset every time, just do it once and pass the true labels to the function
"""
self.experiment = experiment
self.eval_dataset = eval_dataset
self.label_names = label_names
self.submodel = submodel
self.n = n
self.train_shp = train_shp
self.y_true = y_true
def on_train_end(self, logs={}):
y_pred = []
sites = []
#gather site and species matrix
y_pred = self.model.predict(self.eval_dataset)
if self.submodel in ["spectral","spatial"]:
y_pred = y_pred[0]
#F1
macro, micro = metrics.f1_scores(self.y_true, y_pred)
self.experiment.log_metric("Final MicroF1", micro)
self.experiment.log_metric("Final MacroF1", macro)
#Log number of predictions to make sure its constant
self.experiment.log_metric("Prediction samples",y_pred.shape[0])
results = pd.DataFrame({"true":np.argmax(self.y_true, 1),"predicted":np.argmax(y_pred, 1)})
#assign labels
if self.label_names:
results["true_taxonID"] = results.true.apply(lambda x: self.label_names[x])
results["predicted_taxonID"] = results.predicted.apply(lambda x: self.label_names[x])
#Within site confusion
site_lists = self.train_shp.groupby("taxonID").siteID.unique()
site_confusion = metrics.site_confusion(y_true = results.true_taxonID, y_pred = results.predicted_taxonID, site_lists=site_lists)
self.experiment.log_metric(name = "Within_site confusion[training]", value = site_confusion)
plot_lists = self.train_shp.groupby("taxonID").plotID.unique()
plot_confusion = metrics.site_confusion(y_true = results.true_taxonID, y_pred = results.predicted_taxonID, site_lists=plot_lists)
self.experiment.log_metric(name = "Within_plot confusion[training]", value = plot_confusion)
domain_lists = self.train_shp.groupby("taxonID").domainID.unique()
domain_confusion = metrics.site_confusion(y_true = results.true_taxonID, y_pred = results.predicted_taxonID, site_lists=domain_lists)
self.experiment.log_metric(name = "Within_domain confusion[training]", value = domain_confusion)
#Genus of all the different taxonID variants should be the same, take the first
scientific_dict = self.train_shp.groupby('taxonID')['scientific'].apply(lambda x: x.head(1).values.tolist()).to_dict()
genus_confusion = metrics.genus_confusion(y_true = results.true_taxonID, y_pred = results.predicted_taxonID, scientific_dict=scientific_dict)
self.experiment.log_metric(name = "Within Genus confusion", value = genus_confusion)
#Most confused
most_confused = results.groupby(["true_taxonID","predicted_taxonID"]).size().reset_index(name="count")
most_confused = most_confused[~(most_confused.true_taxonID == most_confused.predicted_taxonID)].sort_values("count", ascending=False)
self.experiment.log_table("most_confused.csv",most_confused.values)
def on_epoch_end(self, epoch, logs={}):
if not epoch % self.n == 0:
return None
y_pred = []
sites = []
#gather site and species matrix
y_pred = self.model.predict(self.eval_dataset)
if self.submodel in ["spectral","spatial"]:
y_pred = y_pred[0]
#F1
macro, micro = metrics.f1_scores(self.y_true, y_pred)
self.experiment.log_metric("MicroF1", micro)
self.experiment.log_metric("MacroF1", macro)
#Log number of predictions to make sure its constant
self.experiment.log_metric("Prediction samples",y_pred.shape[0])
class ConfusionMatrixCallback(Callback):
def __init__(self, experiment, dataset, label_names, y_true, submodel):
self.experiment = experiment
self.dataset = dataset
self.label_names = label_names
self.submodel = submodel
self.y_true = y_true
def on_train_end(self, epoch, logs={}):
y_pred = self.model.predict(self.dataset)
if self.submodel is "metadata":
name = "Metadata Confusion Matrix"
elif self.submodel in ["ensemble"]:
name = "Ensemble Matrix"
else:
name = "Confusion Matrix"
cm = self.experiment.log_confusion_matrix(
self.y_true,
y_pred,
title=name,
file_name= name,
labels=self.label_names,
max_categories=90,
max_example_per_cell=1)
class ImageCallback(Callback):
def __init__(self, experiment, dataset, label_names, submodel=False):
self.experiment = experiment
self.dataset = dataset
self.label_names = label_names
self.submodel = submodel
def on_train_end(self, epoch, logs={}):
"""Plot sample images with labels annotated"""
#fill until there is atleast 20 images
images = []
y_pred = []
y_true = []
limit = 20
num_images = 0
for data, label in self.dataset:
if num_images < limit:
pred = self.model.predict(data)
images.append(data)
if self.submodel:
y_pred.append(pred[0])
y_true.append(label[0])
else:
y_pred.append(pred)
y_true.append(label)
num_images += label.shape[0]
else:
break
images = np.vstack(images)
y_true = np.concatenate(y_true)
y_pred = np.concatenate(y_pred)
y_true = np.argmax(y_true, axis=1)
y_pred = np.argmax(y_pred, axis=1)
true_taxonID = [self.label_names[x] for x in y_true]
pred_taxonID = [self.label_names[x] for x in y_pred]
counter = 0
for label, prediction, image in zip(true_taxonID, pred_taxonID, images):
figure = visualize.plot_prediction(image=image,
prediction=prediction,
label=label)
self.experiment.log_figure(figure_name="{}_{}".format(label, counter))
counter += 1
def create(experiment, train_data, validation_data, train_shp, log_dir=None, label_names=None, submodel=False):
"""Create a set of callbacks
Args:
experiment: a comet experiment object
train_data: a tf data object to generate data
validation_data: a tf data object to generate data
train_shp: the original shapefile for the train data to check site error
"""
#turn off callbacks for metadata
callback_list = []
reduce_lr = ReduceLROnPlateau(monitor='val_loss',
factor=0.5,
patience=10,
min_delta=0.1,
min_lr=0.00001,
verbose=1)
callback_list.append(reduce_lr)
#Get the true labels since they are not shuffled
y_true = [ ]
for data, label in validation_data:
if submodel in ["spatial","spectral"]:
label = label[0]
y_true.append(label)
y_true = np.concatenate(y_true)
if not submodel in ["spatial","spectral"]:
confusion_matrix = ConfusionMatrixCallback(experiment=experiment, y_true=y_true, dataset=validation_data, label_names=label_names, submodel=submodel)
callback_list.append(confusion_matrix)
f1 = F1Callback(experiment=experiment, y_true=y_true, eval_dataset=validation_data, label_names=label_names, submodel=submodel, train_shp=train_shp)
callback_list.append(f1)
#if submodel is None:
#plot_images = ImageCallback(experiment, validation_data, label_names, submodel=submodel)
#callback_list.append(plot_images)
if log_dir is not None:
print("saving tensorboard logs at {}".format(log_dir))
tensorboard = TensorBoard(log_dir=log_dir, histogram_freq=0, profile_batch=30)
callback_list.append(tensorboard)
return callback_list
| [
"numpy.concatenate",
"numpy.argmax",
"tensorflow.keras.callbacks.ReduceLROnPlateau",
"DeepTreeAttention.utils.metrics.f1_scores",
"DeepTreeAttention.utils.metrics.site_confusion",
"DeepTreeAttention.utils.metrics.genus_confusion",
"DeepTreeAttention.visualization.visualize.plot_prediction",
"tensorflow.keras.callbacks.TensorBoard",
"numpy.vstack"
] | [((7816, 7923), 'tensorflow.keras.callbacks.ReduceLROnPlateau', 'ReduceLROnPlateau', ([], {'monitor': '"""val_loss"""', 'factor': '(0.5)', 'patience': '(10)', 'min_delta': '(0.1)', 'min_lr': '(1e-05)', 'verbose': '(1)'}), "(monitor='val_loss', factor=0.5, patience=10, min_delta=\n 0.1, min_lr=1e-05, verbose=1)\n", (7833, 7923), False, 'from tensorflow.keras.callbacks import ReduceLROnPlateau\n'), ((8369, 8391), 'numpy.concatenate', 'np.concatenate', (['y_true'], {}), '(y_true)\n', (8383, 8391), True, 'import numpy as np\n'), ((1328, 1366), 'DeepTreeAttention.utils.metrics.f1_scores', 'metrics.f1_scores', (['self.y_true', 'y_pred'], {}), '(self.y_true, y_pred)\n', (1345, 1366), False, 'from DeepTreeAttention.utils import metrics\n'), ((4320, 4358), 'DeepTreeAttention.utils.metrics.f1_scores', 'metrics.f1_scores', (['self.y_true', 'y_pred'], {}), '(self.y_true, y_pred)\n', (4337, 4358), False, 'from DeepTreeAttention.utils import metrics\n'), ((6618, 6635), 'numpy.vstack', 'np.vstack', (['images'], {}), '(images)\n', (6627, 6635), True, 'import numpy as np\n'), ((6653, 6675), 'numpy.concatenate', 'np.concatenate', (['y_true'], {}), '(y_true)\n', (6667, 6675), True, 'import numpy as np\n'), ((6693, 6715), 'numpy.concatenate', 'np.concatenate', (['y_pred'], {}), '(y_pred)\n', (6707, 6715), True, 'import numpy as np\n'), ((6734, 6759), 'numpy.argmax', 'np.argmax', (['y_true'], {'axis': '(1)'}), '(y_true, axis=1)\n', (6743, 6759), True, 'import numpy as np\n'), ((6777, 6802), 'numpy.argmax', 'np.argmax', (['y_pred'], {'axis': '(1)'}), '(y_pred, axis=1)\n', (6786, 6802), True, 'import numpy as np\n'), ((9126, 9190), 'tensorflow.keras.callbacks.TensorBoard', 'TensorBoard', ([], {'log_dir': 'log_dir', 'histogram_freq': '(0)', 'profile_batch': '(30)'}), '(log_dir=log_dir, histogram_freq=0, profile_batch=30)\n', (9137, 9190), False, 'from tensorflow.keras.callbacks import Callback, TensorBoard\n'), ((2118, 2231), 'DeepTreeAttention.utils.metrics.site_confusion', 'metrics.site_confusion', ([], {'y_true': 'results.true_taxonID', 'y_pred': 'results.predicted_taxonID', 'site_lists': 'site_lists'}), '(y_true=results.true_taxonID, y_pred=results.\n predicted_taxonID, site_lists=site_lists)\n', (2140, 2231), False, 'from DeepTreeAttention.utils import metrics\n'), ((2457, 2570), 'DeepTreeAttention.utils.metrics.site_confusion', 'metrics.site_confusion', ([], {'y_true': 'results.true_taxonID', 'y_pred': 'results.predicted_taxonID', 'site_lists': 'plot_lists'}), '(y_true=results.true_taxonID, y_pred=results.\n predicted_taxonID, site_lists=plot_lists)\n', (2479, 2570), False, 'from DeepTreeAttention.utils import metrics\n'), ((2810, 2925), 'DeepTreeAttention.utils.metrics.site_confusion', 'metrics.site_confusion', ([], {'y_true': 'results.true_taxonID', 'y_pred': 'results.predicted_taxonID', 'site_lists': 'domain_lists'}), '(y_true=results.true_taxonID, y_pred=results.\n predicted_taxonID, site_lists=domain_lists)\n', (2832, 2925), False, 'from DeepTreeAttention.utils import metrics\n'), ((3300, 3424), 'DeepTreeAttention.utils.metrics.genus_confusion', 'metrics.genus_confusion', ([], {'y_true': 'results.true_taxonID', 'y_pred': 'results.predicted_taxonID', 'scientific_dict': 'scientific_dict'}), '(y_true=results.true_taxonID, y_pred=results.\n predicted_taxonID, scientific_dict=scientific_dict)\n', (3323, 3424), False, 'from DeepTreeAttention.utils import metrics\n'), ((7049, 7123), 'DeepTreeAttention.visualization.visualize.plot_prediction', 'visualize.plot_prediction', ([], {'image': 'image', 'prediction': 'prediction', 'label': 'label'}), '(image=image, prediction=prediction, label=label)\n', (7074, 7123), False, 'from DeepTreeAttention.visualization import visualize\n'), ((1667, 1692), 'numpy.argmax', 'np.argmax', (['self.y_true', '(1)'], {}), '(self.y_true, 1)\n', (1676, 1692), True, 'import numpy as np\n'), ((1705, 1725), 'numpy.argmax', 'np.argmax', (['y_pred', '(1)'], {}), '(y_pred, 1)\n', (1714, 1725), True, 'import numpy as np\n')] |
import rtpipe.RT as rt
import rtpipe.parsecands as pc
import rtpipe.parsesdm as ps
import rtpipe.reproduce as reproduce
import click, os, glob
import logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logging.captureWarnings(True)
logger = logging.getLogger(__name__)
@click.group('rtpipe')
def cli():
pass
@cli.command()
@click.argument('filename')
@click.option('--paramfile', default='')
@click.option('--bdfdir', default='')
@click.option('--scan', default=1)
def read(filename, paramfile, bdfdir, scan):
""" Simple parse and return metadata for pipeline for first scan """
filename = os.path.abspath(filename)
scans = ps.read_scans(filename, bdfdir=bdfdir)
logger.info('Scans, Target names:')
logger.info('%s' % str([(ss, scans[ss]['source']) for ss in scans]))
logger.info('Example pipeline:')
state = rt.set_pipeline(filename, scan, paramfile=paramfile, logfile=False)
@cli.command()
@click.argument('filename', type=str)
@click.option('--scan', type=int, default=0)
@click.option('--paramfile', type=str, default='rtpipe_cbe.conf')
@click.option('--logfile', type=bool, default=False)
@click.option('--bdfdir', default='')
def searchone(filename, scan, paramfile, logfile, bdfdir):
""" Searches one scan of filename
filename is name of local sdm ('filename.GN' expected locally).
scan is scan number to search. if none provided, script prints all.
assumes filename is an sdm.
"""
filename = os.path.abspath(filename)
scans = ps.read_scans(filename, bdfdir=bdfdir)
if scan != 0:
d = rt.set_pipeline(filename, scan, paramfile=paramfile,
fileroot=os.path.basename(filename), logfile=logfile)
rt.pipeline(d, range(d['nsegments']))
# clean up and merge files
pc.merge_segments(filename, scan)
pc.merge_scans(os.path.dirname(filename), os.path.basename(filename), scans.keys())
else:
logger.info('Scans, Target names:')
logger.info('%s' % str([(ss, scans[ss]['source']) for ss in scans]))
logger.info('Example pipeline:')
state = rt.set_pipeline(filename, scans.popitem()[0], paramfile=paramfile,
fileroot=os.path.basename(filename), logfile=logfile)
@cli.command()
@click.argument('filename')
@click.option('--snrmin', default=0.)
@click.option('--snrmax', default=999.)
@click.option('--bdfdir', default='')
def mergeall(filename, snrmin, snrmax, bdfdir):
""" Merge cands/noise files over all scans
Tries to find scans from filename, but will fall back to finding relevant files if it does not exist.
"""
filename = os.path.abspath(filename)
bignumber = 500
if os.path.exists(filename):
scans = ps.read_scans(filename, bdfdir=bdfdir)
scanlist = sorted(scans.keys())
else:
logger.warn('Could not find file {0}. Estimating scans from available files.'.format(filename))
filelist = glob.glob(os.path.join(os.path.dirname(filename), '*{0}_sc*pkl'.format(os.path.basename(filename))))
try:
scanlist = sorted(set([int(fn.rstrip('.pkl').split('_sc')[1].split('seg')[0]) for fn in filelist]))
except IndexError:
logger.warn('Could not parse filenames for scans. Looking over big range.')
scanlist = range(bignumber)
logger.info('Merging over scans {0}'.format(scanlist))
for scan in scanlist:
pc.merge_segments(filename, scan)
pc.merge_scans(os.path.dirname(filename), os.path.basename(filename), scanlist, snrmin=snrmin, snrmax=snrmax)
@cli.command()
@click.argument('filename', type=str)
@click.option('--html', type=bool, default=True, help='Create html version')
@click.option('--basenb', type=str, default='', help='Full path to base notebook. Default to distribution version')
@click.option('--agdir', type=str, default='', help='Activegit repo for applying classifications')
def nbcompile(filename, html, basenb, agdir):
""" Compile the baseinteract.ipynb notebook into an analysis notebook for filename """
filename = os.path.abspath(filename)
pc.nbcompile(os.path.dirname(filename), os.path.basename(filename), html=html, basenb=basenb, agdir=agdir)
@cli.command()
@click.argument('candsfile', type=str)
@click.option('--threshold', type=float, default=0., help='Filter candidates to abs(snr) > threshold')
def list_cands(candsfile, threshold):
""" Print candidates above abs(snr) in candsfile """
reproduce.list_cands(candsfile, threshold)
@cli.command()
@click.argument('candsfile', type=str)
@click.argument('candnum', type=int)
@click.option('--threshold', type=float, default=0., help='Filter candidates to abs(snr) > threshold')
def refine_cand(candsfile, candnum, threshold):
""" Run refinement search for candnum in list_cands with abs(snr) > threshold """
reproduce.refine_cand(candsfile, candnum=candnum, threshold=threshold)
@cli.command()
@click.argument('candsfile', type=str)
@click.option('--threshold', type=float, default=0., help='Filter candidates to abs(snr) > threshold')
def refine_cands(candsfile, threshold):
""" Run refinement search and save candidates for all in candsfile with snr > threshold """
reproduce.refine_cands(candsfile, threshold=threshold)
if __name__ == '__main__':
cli()
| [
"os.path.abspath",
"click.argument",
"logging.basicConfig",
"os.path.basename",
"rtpipe.RT.set_pipeline",
"rtpipe.reproduce.list_cands",
"rtpipe.reproduce.refine_cand",
"os.path.dirname",
"click.option",
"os.path.exists",
"logging.captureWarnings",
"rtpipe.parsesdm.read_scans",
"click.group",
"rtpipe.reproduce.refine_cands",
"logging.getLogger",
"rtpipe.parsecands.merge_segments"
] | [((158, 265), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(asctime)s - %(name)s - %(levelname)s - %(message)s"""'}), "(level=logging.INFO, format=\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n", (177, 265), False, 'import logging\n'), ((261, 290), 'logging.captureWarnings', 'logging.captureWarnings', (['(True)'], {}), '(True)\n', (284, 290), False, 'import logging\n'), ((300, 327), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (317, 327), False, 'import logging\n'), ((331, 352), 'click.group', 'click.group', (['"""rtpipe"""'], {}), "('rtpipe')\n", (342, 352), False, 'import click, os, glob\n'), ((391, 417), 'click.argument', 'click.argument', (['"""filename"""'], {}), "('filename')\n", (405, 417), False, 'import click, os, glob\n'), ((419, 458), 'click.option', 'click.option', (['"""--paramfile"""'], {'default': '""""""'}), "('--paramfile', default='')\n", (431, 458), False, 'import click, os, glob\n'), ((460, 496), 'click.option', 'click.option', (['"""--bdfdir"""'], {'default': '""""""'}), "('--bdfdir', default='')\n", (472, 496), False, 'import click, os, glob\n'), ((498, 531), 'click.option', 'click.option', (['"""--scan"""'], {'default': '(1)'}), "('--scan', default=1)\n", (510, 531), False, 'import click, os, glob\n'), ((992, 1028), 'click.argument', 'click.argument', (['"""filename"""'], {'type': 'str'}), "('filename', type=str)\n", (1006, 1028), False, 'import click, os, glob\n'), ((1030, 1073), 'click.option', 'click.option', (['"""--scan"""'], {'type': 'int', 'default': '(0)'}), "('--scan', type=int, default=0)\n", (1042, 1073), False, 'import click, os, glob\n'), ((1075, 1139), 'click.option', 'click.option', (['"""--paramfile"""'], {'type': 'str', 'default': '"""rtpipe_cbe.conf"""'}), "('--paramfile', type=str, default='rtpipe_cbe.conf')\n", (1087, 1139), False, 'import click, os, glob\n'), ((1141, 1192), 'click.option', 'click.option', (['"""--logfile"""'], {'type': 'bool', 'default': '(False)'}), "('--logfile', type=bool, default=False)\n", (1153, 1192), False, 'import click, os, glob\n'), ((1194, 1230), 'click.option', 'click.option', (['"""--bdfdir"""'], {'default': '""""""'}), "('--bdfdir', default='')\n", (1206, 1230), False, 'import click, os, glob\n'), ((2343, 2369), 'click.argument', 'click.argument', (['"""filename"""'], {}), "('filename')\n", (2357, 2369), False, 'import click, os, glob\n'), ((2371, 2408), 'click.option', 'click.option', (['"""--snrmin"""'], {'default': '(0.0)'}), "('--snrmin', default=0.0)\n", (2383, 2408), False, 'import click, os, glob\n'), ((2409, 2448), 'click.option', 'click.option', (['"""--snrmax"""'], {'default': '(999.0)'}), "('--snrmax', default=999.0)\n", (2421, 2448), False, 'import click, os, glob\n'), ((2449, 2485), 'click.option', 'click.option', (['"""--bdfdir"""'], {'default': '""""""'}), "('--bdfdir', default='')\n", (2461, 2485), False, 'import click, os, glob\n'), ((3662, 3698), 'click.argument', 'click.argument', (['"""filename"""'], {'type': 'str'}), "('filename', type=str)\n", (3676, 3698), False, 'import click, os, glob\n'), ((3700, 3775), 'click.option', 'click.option', (['"""--html"""'], {'type': 'bool', 'default': '(True)', 'help': '"""Create html version"""'}), "('--html', type=bool, default=True, help='Create html version')\n", (3712, 3775), False, 'import click, os, glob\n'), ((3777, 3896), 'click.option', 'click.option', (['"""--basenb"""'], {'type': 'str', 'default': '""""""', 'help': '"""Full path to base notebook. Default to distribution version"""'}), "('--basenb', type=str, default='', help=\n 'Full path to base notebook. Default to distribution version')\n", (3789, 3896), False, 'import click, os, glob\n'), ((3893, 3995), 'click.option', 'click.option', (['"""--agdir"""'], {'type': 'str', 'default': '""""""', 'help': '"""Activegit repo for applying classifications"""'}), "('--agdir', type=str, default='', help=\n 'Activegit repo for applying classifications')\n", (3905, 3995), False, 'import click, os, glob\n'), ((4300, 4337), 'click.argument', 'click.argument', (['"""candsfile"""'], {'type': 'str'}), "('candsfile', type=str)\n", (4314, 4337), False, 'import click, os, glob\n'), ((4339, 4446), 'click.option', 'click.option', (['"""--threshold"""'], {'type': 'float', 'default': '(0.0)', 'help': '"""Filter candidates to abs(snr) > threshold"""'}), "('--threshold', type=float, default=0.0, help=\n 'Filter candidates to abs(snr) > threshold')\n", (4351, 4446), False, 'import click, os, glob\n'), ((4602, 4639), 'click.argument', 'click.argument', (['"""candsfile"""'], {'type': 'str'}), "('candsfile', type=str)\n", (4616, 4639), False, 'import click, os, glob\n'), ((4641, 4676), 'click.argument', 'click.argument', (['"""candnum"""'], {'type': 'int'}), "('candnum', type=int)\n", (4655, 4676), False, 'import click, os, glob\n'), ((4678, 4785), 'click.option', 'click.option', (['"""--threshold"""'], {'type': 'float', 'default': '(0.0)', 'help': '"""Filter candidates to abs(snr) > threshold"""'}), "('--threshold', type=float, default=0.0, help=\n 'Filter candidates to abs(snr) > threshold')\n", (4690, 4785), False, 'import click, os, glob\n'), ((5008, 5045), 'click.argument', 'click.argument', (['"""candsfile"""'], {'type': 'str'}), "('candsfile', type=str)\n", (5022, 5045), False, 'import click, os, glob\n'), ((5047, 5154), 'click.option', 'click.option', (['"""--threshold"""'], {'type': 'float', 'default': '(0.0)', 'help': '"""Filter candidates to abs(snr) > threshold"""'}), "('--threshold', type=float, default=0.0, help=\n 'Filter candidates to abs(snr) > threshold')\n", (5059, 5154), False, 'import click, os, glob\n'), ((666, 691), 'os.path.abspath', 'os.path.abspath', (['filename'], {}), '(filename)\n', (681, 691), False, 'import click, os, glob\n'), ((705, 743), 'rtpipe.parsesdm.read_scans', 'ps.read_scans', (['filename'], {'bdfdir': 'bdfdir'}), '(filename, bdfdir=bdfdir)\n', (718, 743), True, 'import rtpipe.parsesdm as ps\n'), ((906, 973), 'rtpipe.RT.set_pipeline', 'rt.set_pipeline', (['filename', 'scan'], {'paramfile': 'paramfile', 'logfile': '(False)'}), '(filename, scan, paramfile=paramfile, logfile=False)\n', (921, 973), True, 'import rtpipe.RT as rt\n'), ((1525, 1550), 'os.path.abspath', 'os.path.abspath', (['filename'], {}), '(filename)\n', (1540, 1550), False, 'import click, os, glob\n'), ((1563, 1601), 'rtpipe.parsesdm.read_scans', 'ps.read_scans', (['filename'], {'bdfdir': 'bdfdir'}), '(filename, bdfdir=bdfdir)\n', (1576, 1601), True, 'import rtpipe.parsesdm as ps\n'), ((2712, 2737), 'os.path.abspath', 'os.path.abspath', (['filename'], {}), '(filename)\n', (2727, 2737), False, 'import click, os, glob\n'), ((2766, 2790), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (2780, 2790), False, 'import click, os, glob\n'), ((4144, 4169), 'os.path.abspath', 'os.path.abspath', (['filename'], {}), '(filename)\n', (4159, 4169), False, 'import click, os, glob\n'), ((4541, 4583), 'rtpipe.reproduce.list_cands', 'reproduce.list_cands', (['candsfile', 'threshold'], {}), '(candsfile, threshold)\n', (4561, 4583), True, 'import rtpipe.reproduce as reproduce\n'), ((4919, 4989), 'rtpipe.reproduce.refine_cand', 'reproduce.refine_cand', (['candsfile'], {'candnum': 'candnum', 'threshold': 'threshold'}), '(candsfile, candnum=candnum, threshold=threshold)\n', (4940, 4989), True, 'import rtpipe.reproduce as reproduce\n'), ((5290, 5344), 'rtpipe.reproduce.refine_cands', 'reproduce.refine_cands', (['candsfile'], {'threshold': 'threshold'}), '(candsfile, threshold=threshold)\n', (5312, 5344), True, 'import rtpipe.reproduce as reproduce\n'), ((1858, 1891), 'rtpipe.parsecands.merge_segments', 'pc.merge_segments', (['filename', 'scan'], {}), '(filename, scan)\n', (1875, 1891), True, 'import rtpipe.parsecands as pc\n'), ((2808, 2846), 'rtpipe.parsesdm.read_scans', 'ps.read_scans', (['filename'], {'bdfdir': 'bdfdir'}), '(filename, bdfdir=bdfdir)\n', (2821, 2846), True, 'import rtpipe.parsesdm as ps\n'), ((3496, 3529), 'rtpipe.parsecands.merge_segments', 'pc.merge_segments', (['filename', 'scan'], {}), '(filename, scan)\n', (3513, 3529), True, 'import rtpipe.parsecands as pc\n'), ((3549, 3574), 'os.path.dirname', 'os.path.dirname', (['filename'], {}), '(filename)\n', (3564, 3574), False, 'import click, os, glob\n'), ((3576, 3602), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (3592, 3602), False, 'import click, os, glob\n'), ((4188, 4213), 'os.path.dirname', 'os.path.dirname', (['filename'], {}), '(filename)\n', (4203, 4213), False, 'import click, os, glob\n'), ((4215, 4241), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (4231, 4241), False, 'import click, os, glob\n'), ((1915, 1940), 'os.path.dirname', 'os.path.dirname', (['filename'], {}), '(filename)\n', (1930, 1940), False, 'import click, os, glob\n'), ((1942, 1968), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (1958, 1968), False, 'import click, os, glob\n'), ((1723, 1749), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (1739, 1749), False, 'import click, os, glob\n'), ((2280, 2306), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (2296, 2306), False, 'import click, os, glob\n'), ((3043, 3068), 'os.path.dirname', 'os.path.dirname', (['filename'], {}), '(filename)\n', (3058, 3068), False, 'import click, os, glob\n'), ((3091, 3117), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (3107, 3117), False, 'import click, os, glob\n')] |
# Generated by Django 2.1.2 on 2018-12-16 13:01
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('vespene', '0008_auto_20181106_2233'),
]
operations = [
migrations.RemoveField(
model_name='workerpool',
name='sudo_password',
),
]
| [
"django.db.migrations.RemoveField"
] | [((227, 296), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""workerpool"""', 'name': '"""sudo_password"""'}), "(model_name='workerpool', name='sudo_password')\n", (249, 296), False, 'from django.db import migrations\n')] |
from unittest import TestCase
from unittest.mock import Mock, patch
from profile_generator import generator
from profile_generator.generator import (
ConfigFileReadError,
InvalidConfigFileError,
NoConfigFileError,
OutputDirCreationFailure,
ProfileWriteError,
TemplateFileReadError,
)
from profile_generator.schema import object_of, type_of
class ProfileGeneratorTest(TestCase):
@patch("sys.argv", ["app.py", "one.json", "two.json"])
def test_get_config_files_returns_config_files(self) -> None:
self.assertEqual(["one.json", "two.json"], generator.get_config_files())
@patch("sys.argv", ["app.py"])
def test_get_config_files_raises_error_when_arguments_are_missing(self) -> None:
self.assertRaises(NoConfigFileError, generator.get_config_files)
@patch(
"profile_generator.util.file.create_dir", lambda *xs: "/root/" + "/".join(xs)
)
def test_create_output_dir_raises_returns_created_dir_path(self) -> None:
self.assertEqual("/root/profiles", generator.create_output_dir())
@patch("profile_generator.util.file.create_dir")
def test_create_output_dir_raises_error_when_cannot_create_dir(
self, create_dir: Mock
) -> None:
create_dir.side_effect = OSError
self.assertRaises(OutputDirCreationFailure, generator.create_output_dir)
@patch("profile_generator.util.file.read_file")
@patch(
"profile_generator.util.file.get_full_path", lambda *xs: "/root/" + "/".join(xs)
)
def test_get_profile_template_returns_template_file_content(
self, read_file: Mock
) -> None:
read_file.return_value = "file content"
self.assertEqual("file content", generator.get_profile_template())
read_file.assert_called_once_with("/root/templates/raw_therapee.pp3")
@patch("profile_generator.util.file.read_file")
def test_get_profile_template_raises_error_when_cannot_read_template_file(
self, read_file: Mock
) -> None:
read_file.side_effect = OSError
self.assertRaises(TemplateFileReadError, generator.get_profile_template)
@patch("profile_generator.util.file.read_file")
def test_load_configuration_file_loads_configuration_files(
self, read_file: Mock
) -> None:
read_file.return_value = '{"a": 2}'
schema = object_of({"a": type_of(int)})
config = generator.load_configuration_file("config.json", schema)
self.assertEqual({"a": 2}, config)
read_file.assert_called_once_with("config.json")
@patch("profile_generator.util.file.read_file")
def test_load_configuration_file_raises_error_when_config_file_cannot_be_read(
self, read_file: Mock
) -> None:
schema = object_of({"a": type_of(int)})
read_file.side_effect = OSError
self.assertRaises(
ConfigFileReadError,
generator.load_configuration_file,
"config.json",
schema,
)
@patch("profile_generator.util.file.read_file")
def test_load_configuration_file_raises_error_when_contains_variable_error(
self, read_file: Mock
) -> None:
read_file.return_value = '{"a": "$a"}'
schema = object_of({"a": type_of(str)})
self.assertRaises(
InvalidConfigFileError,
generator.load_configuration_file,
"config.json",
schema,
)
@patch("profile_generator.util.file.read_file")
def test_load_configuration_file_raises_error_when_config_file_is_invalid(
self, read_file: Mock
) -> None:
read_file.return_value = '{"a": false}'
schema = object_of({"a": type_of(int)})
self.assertRaises(
InvalidConfigFileError,
generator.load_configuration_file,
"config.json",
schema,
)
@patch("profile_generator.util.file.read_file")
def test_load_configuration_file_raises_error_when_config_file_is_invalid_json(
self, read_file: Mock
) -> None:
read_file.return_value = '{"a": false'
schema = object_of({"a": type_of(int)})
self.assertRaises(
InvalidConfigFileError,
generator.load_configuration_file,
"config.json",
schema,
)
def test_create_profile_content_should_create_profile_content(self) -> None:
template = "{a}"
cfg = {"a": "1"}
marshall = lambda x: x
content = generator.create_profile_content(template, cfg, marshall)
self.assertEqual(content, "1")
@classmethod
@patch("profile_generator.util.file.write_file")
def test_presist_profile_should_persist_profile(cls, write_file: Mock) -> None:
name = "profile_name"
content = "1"
output_dir = "dir"
generator.persist_profile(name, content, output_dir)
write_file.assert_called_once_with(content, output_dir, f"{name}.pp3")
@patch("profile_generator.util.file.write_file")
def test_persist_profile_should_raise_error_when_writing_file_failed(
self, write_file: Mock
) -> None:
name = "profile_name"
content = "1"
output_dir = "dir"
write_file.side_effect = OSError
self.assertRaises(
ProfileWriteError,
generator.persist_profile,
name,
content,
output_dir,
)
| [
"profile_generator.generator.get_config_files",
"profile_generator.schema.type_of",
"profile_generator.generator.create_output_dir",
"profile_generator.generator.create_profile_content",
"profile_generator.generator.persist_profile",
"unittest.mock.patch",
"profile_generator.generator.get_profile_template",
"profile_generator.generator.load_configuration_file"
] | [((410, 463), 'unittest.mock.patch', 'patch', (['"""sys.argv"""', "['app.py', 'one.json', 'two.json']"], {}), "('sys.argv', ['app.py', 'one.json', 'two.json'])\n", (415, 463), False, 'from unittest.mock import Mock, patch\n'), ((617, 646), 'unittest.mock.patch', 'patch', (['"""sys.argv"""', "['app.py']"], {}), "('sys.argv', ['app.py'])\n", (622, 646), False, 'from unittest.mock import Mock, patch\n'), ((1068, 1115), 'unittest.mock.patch', 'patch', (['"""profile_generator.util.file.create_dir"""'], {}), "('profile_generator.util.file.create_dir')\n", (1073, 1115), False, 'from unittest.mock import Mock, patch\n'), ((1359, 1405), 'unittest.mock.patch', 'patch', (['"""profile_generator.util.file.read_file"""'], {}), "('profile_generator.util.file.read_file')\n", (1364, 1405), False, 'from unittest.mock import Mock, patch\n'), ((1831, 1877), 'unittest.mock.patch', 'patch', (['"""profile_generator.util.file.read_file"""'], {}), "('profile_generator.util.file.read_file')\n", (1836, 1877), False, 'from unittest.mock import Mock, patch\n'), ((2130, 2176), 'unittest.mock.patch', 'patch', (['"""profile_generator.util.file.read_file"""'], {}), "('profile_generator.util.file.read_file')\n", (2135, 2176), False, 'from unittest.mock import Mock, patch\n'), ((2560, 2606), 'unittest.mock.patch', 'patch', (['"""profile_generator.util.file.read_file"""'], {}), "('profile_generator.util.file.read_file')\n", (2565, 2606), False, 'from unittest.mock import Mock, patch\n'), ((2994, 3040), 'unittest.mock.patch', 'patch', (['"""profile_generator.util.file.read_file"""'], {}), "('profile_generator.util.file.read_file')\n", (2999, 3040), False, 'from unittest.mock import Mock, patch\n'), ((3435, 3481), 'unittest.mock.patch', 'patch', (['"""profile_generator.util.file.read_file"""'], {}), "('profile_generator.util.file.read_file')\n", (3440, 3481), False, 'from unittest.mock import Mock, patch\n'), ((3876, 3922), 'unittest.mock.patch', 'patch', (['"""profile_generator.util.file.read_file"""'], {}), "('profile_generator.util.file.read_file')\n", (3881, 3922), False, 'from unittest.mock import Mock, patch\n'), ((4618, 4665), 'unittest.mock.patch', 'patch', (['"""profile_generator.util.file.write_file"""'], {}), "('profile_generator.util.file.write_file')\n", (4623, 4665), False, 'from unittest.mock import Mock, patch\n'), ((4977, 5024), 'unittest.mock.patch', 'patch', (['"""profile_generator.util.file.write_file"""'], {}), "('profile_generator.util.file.write_file')\n", (4982, 5024), False, 'from unittest.mock import Mock, patch\n'), ((2396, 2452), 'profile_generator.generator.load_configuration_file', 'generator.load_configuration_file', (['"""config.json"""', 'schema'], {}), "('config.json', schema)\n", (2429, 2452), False, 'from profile_generator import generator\n'), ((4497, 4554), 'profile_generator.generator.create_profile_content', 'generator.create_profile_content', (['template', 'cfg', 'marshall'], {}), '(template, cfg, marshall)\n', (4529, 4554), False, 'from profile_generator import generator\n'), ((4838, 4890), 'profile_generator.generator.persist_profile', 'generator.persist_profile', (['name', 'content', 'output_dir'], {}), '(name, content, output_dir)\n', (4863, 4890), False, 'from profile_generator import generator\n'), ((581, 609), 'profile_generator.generator.get_config_files', 'generator.get_config_files', ([], {}), '()\n', (607, 609), False, 'from profile_generator import generator\n'), ((1031, 1060), 'profile_generator.generator.create_output_dir', 'generator.create_output_dir', ([], {}), '()\n', (1058, 1060), False, 'from profile_generator import generator\n'), ((1713, 1745), 'profile_generator.generator.get_profile_template', 'generator.get_profile_template', ([], {}), '()\n', (1743, 1745), False, 'from profile_generator import generator\n'), ((2363, 2375), 'profile_generator.schema.type_of', 'type_of', (['int'], {}), '(int)\n', (2370, 2375), False, 'from profile_generator.schema import object_of, type_of\n'), ((2768, 2780), 'profile_generator.schema.type_of', 'type_of', (['int'], {}), '(int)\n', (2775, 2780), False, 'from profile_generator.schema import object_of, type_of\n'), ((3246, 3258), 'profile_generator.schema.type_of', 'type_of', (['str'], {}), '(str)\n', (3253, 3258), False, 'from profile_generator.schema import object_of, type_of\n'), ((3687, 3699), 'profile_generator.schema.type_of', 'type_of', (['int'], {}), '(int)\n', (3694, 3699), False, 'from profile_generator.schema import object_of, type_of\n'), ((4132, 4144), 'profile_generator.schema.type_of', 'type_of', (['int'], {}), '(int)\n', (4139, 4144), False, 'from profile_generator.schema import object_of, type_of\n')] |
import logging
#store loggging file in ~/filename.log with encoding utf-8 and anything above log level logging DEBUG which is everything.
logging.basicConfig(filename="filename.log",encoding="utf-8",level = logging.DEBUG)
logging.debug()
logging.info()
logging,warning()
logging.error()
logging.critical()
# One logger go to one file_handler, one logger go to different file_handle with different log level.
file_handler = logging.FileHandler(filename = "filename.log")
file_handler.setLevel(logging.DEBUG)
file_handler.set_name()
format_string = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
formatter = logging.formatter(format_string)
file_handler.setFormatter(formatter)
logger = logging.getLogger()
logger.addHandler(file_handler)
logging.StreamHandler()
try:
run()
except:
logging.exception('Got exception on main handler')
raise
#logging level
level:numeric_value
CRITICAL : 50
ERROR : 40
WARNING : 30
INFO : 20
DEBUG : 10
NOTSET : 0
| [
"logging.error",
"logging.exception",
"logging.debug",
"logging.FileHandler",
"logging.basicConfig",
"logging.StreamHandler",
"logging.formatter",
"logging.info",
"logging.critical",
"logging.getLogger"
] | [((139, 227), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': '"""filename.log"""', 'encoding': '"""utf-8"""', 'level': 'logging.DEBUG'}), "(filename='filename.log', encoding='utf-8', level=\n logging.DEBUG)\n", (158, 227), False, 'import logging\n'), ((223, 238), 'logging.debug', 'logging.debug', ([], {}), '()\n', (236, 238), False, 'import logging\n'), ((239, 253), 'logging.info', 'logging.info', ([], {}), '()\n', (251, 253), False, 'import logging\n'), ((272, 287), 'logging.error', 'logging.error', ([], {}), '()\n', (285, 287), False, 'import logging\n'), ((288, 306), 'logging.critical', 'logging.critical', ([], {}), '()\n', (304, 306), False, 'import logging\n'), ((425, 469), 'logging.FileHandler', 'logging.FileHandler', ([], {'filename': '"""filename.log"""'}), "(filename='filename.log')\n", (444, 469), False, 'import logging\n'), ((618, 650), 'logging.formatter', 'logging.formatter', (['format_string'], {}), '(format_string)\n', (635, 650), False, 'import logging\n'), ((701, 720), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (718, 720), False, 'import logging\n'), ((756, 779), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (777, 779), False, 'import logging\n'), ((808, 858), 'logging.exception', 'logging.exception', (['"""Got exception on main handler"""'], {}), "('Got exception on main handler')\n", (825, 858), False, 'import logging\n')] |
from django.contrib.auth import authenticate
from django.contrib.auth import get_user_model
from django.utils.translation import gettext_lazy as _
from rest_framework import serializers
from rest_framework.exceptions import AuthenticationFailed
from rest_framework_simplejwt.tokens import RefreshToken
User = get_user_model()
class SignupSerializer(serializers.ModelSerializer):
"""Serializer for signup user."""
password = serializers.CharField(max_length=150,
min_length=6,
write_only=True)
def create(self, validated_data):
"""Create a new user."""
return User.objects.create_user(**validated_data)
class Meta:
"""Meta information for signup serializer."""
model = User
fields = ['username', 'name', 'role', 'password']
extra_kwargs = {
'username': {
'required': True
},
'role': {
'required': True
},
'password': {
'required': True
}
}
ref_name = 'Sign up credentials'
class LoginSerializer(serializers.Serializer):
"""Serializer for login user."""
password = serializers.CharField(max_length=150,
min_length=5,
write_only=True)
username = serializers.CharField(max_length=150,
min_length=5,
write_only=True)
def validate(self, attrs):
"""Validate credentials and get user tokens."""
username = attrs.get('username', '')
password = attrs.get('password', '')
user = authenticate(username=username, password=password)
if not user:
raise AuthenticationFailed(_('Invalid credentials'))
refresh = RefreshToken.for_user(user)
return {'access': str(refresh.access_token), 'refresh': str(refresh)}
class Meta:
"""Meta information for login serializer."""
ref_name = 'Login credentials'
| [
"django.utils.translation.gettext_lazy",
"django.contrib.auth.get_user_model",
"rest_framework_simplejwt.tokens.RefreshToken.for_user",
"rest_framework.serializers.CharField",
"django.contrib.auth.authenticate"
] | [((312, 328), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (326, 328), False, 'from django.contrib.auth import get_user_model\n'), ((438, 506), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(150)', 'min_length': '(6)', 'write_only': '(True)'}), '(max_length=150, min_length=6, write_only=True)\n', (459, 506), False, 'from rest_framework import serializers\n'), ((1257, 1325), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(150)', 'min_length': '(5)', 'write_only': '(True)'}), '(max_length=150, min_length=5, write_only=True)\n', (1278, 1325), False, 'from rest_framework import serializers\n'), ((1415, 1483), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(150)', 'min_length': '(5)', 'write_only': '(True)'}), '(max_length=150, min_length=5, write_only=True)\n', (1436, 1483), False, 'from rest_framework import serializers\n'), ((1751, 1801), 'django.contrib.auth.authenticate', 'authenticate', ([], {'username': 'username', 'password': 'password'}), '(username=username, password=password)\n', (1763, 1801), False, 'from django.contrib.auth import authenticate\n'), ((1906, 1933), 'rest_framework_simplejwt.tokens.RefreshToken.for_user', 'RefreshToken.for_user', (['user'], {}), '(user)\n', (1927, 1933), False, 'from rest_framework_simplejwt.tokens import RefreshToken\n'), ((1862, 1886), 'django.utils.translation.gettext_lazy', '_', (['"""Invalid credentials"""'], {}), "('Invalid credentials')\n", (1863, 1886), True, 'from django.utils.translation import gettext_lazy as _\n')] |
#!/usr/bin/env python
"""Remove embedded signalalign analyses from files"""
########################################################################
# File: remove_sa_analyses.py
# executable: remove_sa_analyses.py
#
# Author: <NAME>
# History: 02/06/19 Created
########################################################################
import os
from py3helpers.utils import list_dir
from py3helpers.multiprocess import *
from argparse import ArgumentParser
from signalalign.fast5 import Fast5
import numpy as np
def parse_args():
parser = ArgumentParser(description=__doc__)
# required arguments
parser.add_argument('--directory', '-d', required=True, action='store',
dest='dir', type=str, default=None,
help="Path to directory of fast5 files")
parser.add_argument('--analysis', required=False, action='store_true',
dest='analysis', default=False,
help="Remove all analysis files")
parser.add_argument('--basecall', required=False, action='store_true',
dest='basecall', default=False,
help="Remove all basecall files")
parser.add_argument('--signalalign', required=False, action='store_true',
dest='signalalign', default=False,
help="Remove all signalalign files")
parser.add_argument('--threads', required=False, action='store',
dest='threads', default=1, type=int,
help="number of threads to run")
args = parser.parse_args()
return args
def remove_sa_analyses(fast5):
"""Remove signalalign analyses from a fast5 file"""
assert os.path.exists(fast5), "Fast5 path does not exist".format(fast5)
fh = Fast5(fast5, read='r+')
counter = 0
for analyses in [x for x in list(fh["Analyses"].keys()) if "SignalAlign" in x]:
fh.delete(os.path.join("Analyses", analyses))
counter += 1
fh = fh.repack()
fh.close()
return counter
def remove_basecall_analyses(fast5):
"""Remove basecall analyses from a fast5 file"""
assert os.path.exists(fast5), "Fast5 path does not exist".format(fast5)
fh = Fast5(fast5, read='r+')
counter = 0
for analyses in [x for x in list(fh["Analyses"].keys()) if "Basecall" in x]:
fh.delete(os.path.join("Analyses", analyses))
counter += 1
fh = fh.repack()
fh.close()
return counter
def remove_analyses(fast5):
"""Remove analyses from a fast5 file"""
assert os.path.exists(fast5), "Fast5 path does not exist".format(fast5)
fh = Fast5(fast5, read='r+')
counter = 0
for analyses in [x for x in list(fh["Analyses"].keys())]:
fh.delete(os.path.join("Analyses", analyses))
counter += 1
fh.delete("Analyses")
fh = fh.repack()
fh.close()
return counter
def main():
args = parse_args()
function_to_run = None
if args.analysis:
function_to_run = remove_analyses
else:
if args.signalalign or not args.basecall:
function_to_run = remove_sa_analyses
elif args.basecall:
function_to_run = remove_basecall_analyses
assert function_to_run is not None, "Must select --analysis, --signalalign or --basecall."
service = BasicService(function_to_run, service_name="forward_multiprocess_aggregate_all_variantcalls")
files = list_dir(args.dir, ext="fast5")
total, failure, messages, output = run_service(service.run, files,
{}, ["fast5"], worker_count=args.threads)
print("Deleted {} analysis datasets deleted from {} files".format(np.asarray(output).sum(), len(files)))
if __name__ == '__main__':
main()
| [
"argparse.ArgumentParser",
"numpy.asarray",
"os.path.exists",
"signalalign.fast5.Fast5",
"os.path.join",
"py3helpers.utils.list_dir"
] | [((547, 582), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '__doc__'}), '(description=__doc__)\n', (561, 582), False, 'from argparse import ArgumentParser\n'), ((1720, 1741), 'os.path.exists', 'os.path.exists', (['fast5'], {}), '(fast5)\n', (1734, 1741), False, 'import os\n'), ((1794, 1817), 'signalalign.fast5.Fast5', 'Fast5', (['fast5'], {'read': '"""r+"""'}), "(fast5, read='r+')\n", (1799, 1817), False, 'from signalalign.fast5 import Fast5\n'), ((2151, 2172), 'os.path.exists', 'os.path.exists', (['fast5'], {}), '(fast5)\n', (2165, 2172), False, 'import os\n'), ((2225, 2248), 'signalalign.fast5.Fast5', 'Fast5', (['fast5'], {'read': '"""r+"""'}), "(fast5, read='r+')\n", (2230, 2248), False, 'from signalalign.fast5 import Fast5\n'), ((2561, 2582), 'os.path.exists', 'os.path.exists', (['fast5'], {}), '(fast5)\n', (2575, 2582), False, 'import os\n'), ((2635, 2658), 'signalalign.fast5.Fast5', 'Fast5', (['fast5'], {'read': '"""r+"""'}), "(fast5, read='r+')\n", (2640, 2658), False, 'from signalalign.fast5 import Fast5\n'), ((3431, 3462), 'py3helpers.utils.list_dir', 'list_dir', (['args.dir'], {'ext': '"""fast5"""'}), "(args.dir, ext='fast5')\n", (3439, 3462), False, 'from py3helpers.utils import list_dir\n'), ((1936, 1970), 'os.path.join', 'os.path.join', (['"""Analyses"""', 'analyses'], {}), "('Analyses', analyses)\n", (1948, 1970), False, 'import os\n'), ((2364, 2398), 'os.path.join', 'os.path.join', (['"""Analyses"""', 'analyses'], {}), "('Analyses', analyses)\n", (2376, 2398), False, 'import os\n'), ((2755, 2789), 'os.path.join', 'os.path.join', (['"""Analyses"""', 'analyses'], {}), "('Analyses', analyses)\n", (2767, 2789), False, 'import os\n'), ((3697, 3715), 'numpy.asarray', 'np.asarray', (['output'], {}), '(output)\n', (3707, 3715), True, 'import numpy as np\n')] |
# 1
def max_elem(a):
max0 = a[0]
for elem in a:
if elem > max0:
max0 = elem
return max0
list0 = [2,3,4,5,6,7,1,2,3]
result = max_elem(list0)
print("#1 :",result) # return 7
# 2
list1 = [10,12,3,14,20,7,6,5]
list1.sort()
print("#2 :",list1[-1])
# 3
list2 = [3,5,9,7,1,5,8,8,7,5,6]
max_num = max(list2)
print("#3 :", max_num)
#4
from functools import reduce
list3 = [-5,-6,-7,-99,-67,-3,-4,-9]
print("#4 :",reduce(max, list3)) | [
"functools.reduce"
] | [((471, 489), 'functools.reduce', 'reduce', (['max', 'list3'], {}), '(max, list3)\n', (477, 489), False, 'from functools import reduce\n')] |
#!/usr/bin/env python
import orjson
from falcon import media
from app import wsgi
# custom JSON handler
JSONHandler = media.JSONHandler(dumps=orjson.dumps, loads=orjson.loads)
extra_handlers = {
"application/json": JSONHandler,
"application/json; charset=UTF-8": JSONHandler
}
wsgi.req_options.media_handlers.update(extra_handlers)
wsgi.resp_options.media_handlers.update(extra_handlers)
| [
"app.wsgi.resp_options.media_handlers.update",
"falcon.media.JSONHandler",
"app.wsgi.req_options.media_handlers.update"
] | [((120, 177), 'falcon.media.JSONHandler', 'media.JSONHandler', ([], {'dumps': 'orjson.dumps', 'loads': 'orjson.loads'}), '(dumps=orjson.dumps, loads=orjson.loads)\n', (137, 177), False, 'from falcon import media\n'), ((287, 341), 'app.wsgi.req_options.media_handlers.update', 'wsgi.req_options.media_handlers.update', (['extra_handlers'], {}), '(extra_handlers)\n', (325, 341), False, 'from app import wsgi\n'), ((342, 397), 'app.wsgi.resp_options.media_handlers.update', 'wsgi.resp_options.media_handlers.update', (['extra_handlers'], {}), '(extra_handlers)\n', (381, 397), False, 'from app import wsgi\n')] |
# -*- coding: utf-8 -*-
"""
This module
"""
import attr
import typing
from ..core.model import (
Property, Resource, Tag, GetAtt, TypeHint, TypeCheck,
)
from ..core.constant import AttrMeta
#--- Property declaration ---
@attr.s
class EnvironmentOptionSetting(Property):
"""
AWS Object Type = "AWS::ElasticBeanstalk::Environment.OptionSetting"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html
Property Document:
- ``rp_Namespace``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-namespace
- ``rp_OptionName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-optionname
- ``p_ResourceName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-elasticbeanstalk-environment-optionsetting-resourcename
- ``p_Value``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-value
"""
AWS_OBJECT_TYPE = "AWS::ElasticBeanstalk::Environment.OptionSetting"
rp_Namespace: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Namespace"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-namespace"""
rp_OptionName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "OptionName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-optionname"""
p_ResourceName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "ResourceName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-elasticbeanstalk-environment-optionsetting-resourcename"""
p_Value: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Value"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-value"""
@attr.s
class ApplicationVersionSourceBundle(Property):
"""
AWS Object Type = "AWS::ElasticBeanstalk::ApplicationVersion.SourceBundle"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-sourcebundle.html
Property Document:
- ``rp_S3Bucket``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-sourcebundle.html#cfn-beanstalk-sourcebundle-s3bucket
- ``rp_S3Key``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-sourcebundle.html#cfn-beanstalk-sourcebundle-s3key
"""
AWS_OBJECT_TYPE = "AWS::ElasticBeanstalk::ApplicationVersion.SourceBundle"
rp_S3Bucket: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "S3Bucket"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-sourcebundle.html#cfn-beanstalk-sourcebundle-s3bucket"""
rp_S3Key: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "S3Key"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-sourcebundle.html#cfn-beanstalk-sourcebundle-s3key"""
@attr.s
class ApplicationMaxAgeRule(Property):
"""
AWS Object Type = "AWS::ElasticBeanstalk::Application.MaxAgeRule"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html
Property Document:
- ``p_DeleteSourceFromS3``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-deletesourcefroms3
- ``p_Enabled``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-enabled
- ``p_MaxAgeInDays``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-maxageindays
"""
AWS_OBJECT_TYPE = "AWS::ElasticBeanstalk::Application.MaxAgeRule"
p_DeleteSourceFromS3: bool = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(bool)),
metadata={AttrMeta.PROPERTY_NAME: "DeleteSourceFromS3"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-deletesourcefroms3"""
p_Enabled: bool = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(bool)),
metadata={AttrMeta.PROPERTY_NAME: "Enabled"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-enabled"""
p_MaxAgeInDays: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "MaxAgeInDays"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-maxageindays"""
@attr.s
class ConfigurationTemplateSourceConfiguration(Property):
"""
AWS Object Type = "AWS::ElasticBeanstalk::ConfigurationTemplate.SourceConfiguration"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-sourceconfiguration.html
Property Document:
- ``rp_ApplicationName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-sourceconfiguration.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration-applicationname
- ``rp_TemplateName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-sourceconfiguration.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration-templatename
"""
AWS_OBJECT_TYPE = "AWS::ElasticBeanstalk::ConfigurationTemplate.SourceConfiguration"
rp_ApplicationName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "ApplicationName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-sourceconfiguration.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration-applicationname"""
rp_TemplateName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "TemplateName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-sourceconfiguration.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration-templatename"""
@attr.s
class EnvironmentTier(Property):
"""
AWS Object Type = "AWS::ElasticBeanstalk::Environment.Tier"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html
Property Document:
- ``p_Name``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-name
- ``p_Type``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-type
- ``p_Version``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-version
"""
AWS_OBJECT_TYPE = "AWS::ElasticBeanstalk::Environment.Tier"
p_Name: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Name"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-name"""
p_Type: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Type"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-type"""
p_Version: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Version"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-version"""
@attr.s
class ConfigurationTemplateConfigurationOptionSetting(Property):
"""
AWS Object Type = "AWS::ElasticBeanstalk::ConfigurationTemplate.ConfigurationOptionSetting"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html
Property Document:
- ``rp_Namespace``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-namespace
- ``rp_OptionName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-optionname
- ``p_ResourceName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-resourcename
- ``p_Value``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-value
"""
AWS_OBJECT_TYPE = "AWS::ElasticBeanstalk::ConfigurationTemplate.ConfigurationOptionSetting"
rp_Namespace: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Namespace"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-namespace"""
rp_OptionName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "OptionName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-optionname"""
p_ResourceName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "ResourceName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-resourcename"""
p_Value: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Value"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-value"""
@attr.s
class ApplicationMaxCountRule(Property):
"""
AWS Object Type = "AWS::ElasticBeanstalk::Application.MaxCountRule"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html
Property Document:
- ``p_DeleteSourceFromS3``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-deletesourcefroms3
- ``p_Enabled``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-enabled
- ``p_MaxCount``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-maxcount
"""
AWS_OBJECT_TYPE = "AWS::ElasticBeanstalk::Application.MaxCountRule"
p_DeleteSourceFromS3: bool = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(bool)),
metadata={AttrMeta.PROPERTY_NAME: "DeleteSourceFromS3"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-deletesourcefroms3"""
p_Enabled: bool = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(bool)),
metadata={AttrMeta.PROPERTY_NAME: "Enabled"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-enabled"""
p_MaxCount: int = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(int)),
metadata={AttrMeta.PROPERTY_NAME: "MaxCount"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-maxcount"""
@attr.s
class ApplicationApplicationVersionLifecycleConfig(Property):
"""
AWS Object Type = "AWS::ElasticBeanstalk::Application.ApplicationVersionLifecycleConfig"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html
Property Document:
- ``p_MaxAgeRule``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html#cfn-elasticbeanstalk-application-applicationversionlifecycleconfig-maxagerule
- ``p_MaxCountRule``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html#cfn-elasticbeanstalk-application-applicationversionlifecycleconfig-maxcountrule
"""
AWS_OBJECT_TYPE = "AWS::ElasticBeanstalk::Application.ApplicationVersionLifecycleConfig"
p_MaxAgeRule: typing.Union['ApplicationMaxAgeRule', dict] = attr.ib(
default=None,
converter=ApplicationMaxAgeRule.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(ApplicationMaxAgeRule)),
metadata={AttrMeta.PROPERTY_NAME: "MaxAgeRule"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html#cfn-elasticbeanstalk-application-applicationversionlifecycleconfig-maxagerule"""
p_MaxCountRule: typing.Union['ApplicationMaxCountRule', dict] = attr.ib(
default=None,
converter=ApplicationMaxCountRule.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(ApplicationMaxCountRule)),
metadata={AttrMeta.PROPERTY_NAME: "MaxCountRule"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html#cfn-elasticbeanstalk-application-applicationversionlifecycleconfig-maxcountrule"""
@attr.s
class ApplicationApplicationResourceLifecycleConfig(Property):
"""
AWS Object Type = "AWS::ElasticBeanstalk::Application.ApplicationResourceLifecycleConfig"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationresourcelifecycleconfig.html
Property Document:
- ``p_ServiceRole``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationresourcelifecycleconfig.html#cfn-elasticbeanstalk-application-applicationresourcelifecycleconfig-servicerole
- ``p_VersionLifecycleConfig``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationresourcelifecycleconfig.html#cfn-elasticbeanstalk-application-applicationresourcelifecycleconfig-versionlifecycleconfig
"""
AWS_OBJECT_TYPE = "AWS::ElasticBeanstalk::Application.ApplicationResourceLifecycleConfig"
p_ServiceRole: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "ServiceRole"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationresourcelifecycleconfig.html#cfn-elasticbeanstalk-application-applicationresourcelifecycleconfig-servicerole"""
p_VersionLifecycleConfig: typing.Union['ApplicationApplicationVersionLifecycleConfig', dict] = attr.ib(
default=None,
converter=ApplicationApplicationVersionLifecycleConfig.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(ApplicationApplicationVersionLifecycleConfig)),
metadata={AttrMeta.PROPERTY_NAME: "VersionLifecycleConfig"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationresourcelifecycleconfig.html#cfn-elasticbeanstalk-application-applicationresourcelifecycleconfig-versionlifecycleconfig"""
#--- Resource declaration ---
@attr.s
class ConfigurationTemplate(Resource):
"""
AWS Object Type = "AWS::ElasticBeanstalk::ConfigurationTemplate"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html
Property Document:
- ``rp_ApplicationName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-applicationname
- ``p_Description``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-description
- ``p_EnvironmentId``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-environmentid
- ``p_OptionSettings``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-optionsettings
- ``p_PlatformArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-platformarn
- ``p_SolutionStackName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-solutionstackname
- ``p_SourceConfiguration``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration
"""
AWS_OBJECT_TYPE = "AWS::ElasticBeanstalk::ConfigurationTemplate"
rp_ApplicationName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "ApplicationName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-applicationname"""
p_Description: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Description"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-description"""
p_EnvironmentId: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "EnvironmentId"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-environmentid"""
p_OptionSettings: typing.List[typing.Union['ConfigurationTemplateConfigurationOptionSetting', dict]] = attr.ib(
default=None,
converter=ConfigurationTemplateConfigurationOptionSetting.from_list,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(ConfigurationTemplateConfigurationOptionSetting), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "OptionSettings"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-optionsettings"""
p_PlatformArn: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "PlatformArn"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-platformarn"""
p_SolutionStackName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "SolutionStackName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-solutionstackname"""
p_SourceConfiguration: typing.Union['ConfigurationTemplateSourceConfiguration', dict] = attr.ib(
default=None,
converter=ConfigurationTemplateSourceConfiguration.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(ConfigurationTemplateSourceConfiguration)),
metadata={AttrMeta.PROPERTY_NAME: "SourceConfiguration"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration"""
@attr.s
class Application(Resource):
"""
AWS Object Type = "AWS::ElasticBeanstalk::Application"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html
Property Document:
- ``p_ApplicationName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-name
- ``p_Description``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-description
- ``p_ResourceLifecycleConfig``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-resourcelifecycleconfig
"""
AWS_OBJECT_TYPE = "AWS::ElasticBeanstalk::Application"
p_ApplicationName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "ApplicationName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-name"""
p_Description: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Description"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-description"""
p_ResourceLifecycleConfig: typing.Union['ApplicationApplicationResourceLifecycleConfig', dict] = attr.ib(
default=None,
converter=ApplicationApplicationResourceLifecycleConfig.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(ApplicationApplicationResourceLifecycleConfig)),
metadata={AttrMeta.PROPERTY_NAME: "ResourceLifecycleConfig"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-resourcelifecycleconfig"""
@attr.s
class Environment(Resource):
"""
AWS Object Type = "AWS::ElasticBeanstalk::Environment"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html
Property Document:
- ``rp_ApplicationName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-applicationname
- ``p_CNAMEPrefix``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-cnameprefix
- ``p_Description``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-description
- ``p_EnvironmentName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-name
- ``p_OperationsRole``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-operations-role
- ``p_OptionSettings``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-optionsettings
- ``p_PlatformArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-platformarn
- ``p_SolutionStackName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-solutionstackname
- ``p_TemplateName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-templatename
- ``p_Tier``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-tier
- ``p_VersionLabel``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-versionlabel
- ``p_Tags``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-elasticbeanstalk-environment-tags
"""
AWS_OBJECT_TYPE = "AWS::ElasticBeanstalk::Environment"
rp_ApplicationName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "ApplicationName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-applicationname"""
p_CNAMEPrefix: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "CNAMEPrefix"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-cnameprefix"""
p_Description: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Description"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-description"""
p_EnvironmentName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "EnvironmentName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-name"""
p_OperationsRole: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "OperationsRole"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-operations-role"""
p_OptionSettings: typing.List[typing.Union['EnvironmentOptionSetting', dict]] = attr.ib(
default=None,
converter=EnvironmentOptionSetting.from_list,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(EnvironmentOptionSetting), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "OptionSettings"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-optionsettings"""
p_PlatformArn: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "PlatformArn"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-platformarn"""
p_SolutionStackName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "SolutionStackName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-solutionstackname"""
p_TemplateName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "TemplateName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-templatename"""
p_Tier: typing.Union['EnvironmentTier', dict] = attr.ib(
default=None,
converter=EnvironmentTier.from_dict,
validator=attr.validators.optional(attr.validators.instance_of(EnvironmentTier)),
metadata={AttrMeta.PROPERTY_NAME: "Tier"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-tier"""
p_VersionLabel: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "VersionLabel"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-versionlabel"""
p_Tags: typing.List[typing.Union[Tag, dict]] = attr.ib(
default=None,
converter=Tag.from_list,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(Tag), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "Tags"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-elasticbeanstalk-environment-tags"""
@property
def rv_EndpointURL(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#aws-properties-beanstalk-environment-return-values"""
return GetAtt(resource=self, attr_name="EndpointURL")
@attr.s
class ApplicationVersion(Resource):
"""
AWS Object Type = "AWS::ElasticBeanstalk::ApplicationVersion"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html
Property Document:
- ``rp_ApplicationName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html#cfn-elasticbeanstalk-applicationversion-applicationname
- ``rp_SourceBundle``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html#cfn-elasticbeanstalk-applicationversion-sourcebundle
- ``p_Description``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html#cfn-elasticbeanstalk-applicationversion-description
"""
AWS_OBJECT_TYPE = "AWS::ElasticBeanstalk::ApplicationVersion"
rp_ApplicationName: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "ApplicationName"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html#cfn-elasticbeanstalk-applicationversion-applicationname"""
rp_SourceBundle: typing.Union['ApplicationVersionSourceBundle', dict] = attr.ib(
default=None,
converter=ApplicationVersionSourceBundle.from_dict,
validator=attr.validators.instance_of(ApplicationVersionSourceBundle),
metadata={AttrMeta.PROPERTY_NAME: "SourceBundle"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html#cfn-elasticbeanstalk-applicationversion-sourcebundle"""
p_Description: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),
metadata={AttrMeta.PROPERTY_NAME: "Description"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html#cfn-elasticbeanstalk-applicationversion-description"""
| [
"attr.validators.instance_of"
] | [((1399, 1456), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (1426, 1456), False, 'import attr\n'), ((1775, 1832), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (1802, 1832), False, 'import attr\n'), ((3682, 3739), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (3709, 3739), False, 'import attr\n'), ((4046, 4103), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (4073, 4103), False, 'import attr\n'), ((7558, 7615), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (7585, 7615), False, 'import attr\n'), ((8015, 8072), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (8042, 8072), False, 'import attr\n'), ((11930, 11987), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (11957, 11987), False, 'import attr\n'), ((12387, 12444), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (12414, 12444), False, 'import attr\n'), ((22073, 22130), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (22100, 22130), False, 'import attr\n'), ((30065, 30122), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (30092, 30122), False, 'import attr\n'), ((36405, 36462), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (36432, 36462), False, 'import attr\n'), ((36888, 36947), 'attr.validators.instance_of', 'attr.validators.instance_of', (['ApplicationVersionSourceBundle'], {}), '(ApplicationVersionSourceBundle)\n', (36915, 36947), False, 'import attr\n'), ((2179, 2236), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (2206, 2236), False, 'import attr\n'), ((2599, 2656), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (2626, 2656), False, 'import attr\n'), ((5424, 5457), 'attr.validators.instance_of', 'attr.validators.instance_of', (['bool'], {}), '(bool)\n', (5451, 5457), False, 'import attr\n'), ((5827, 5860), 'attr.validators.instance_of', 'attr.validators.instance_of', (['bool'], {}), '(bool)\n', (5854, 5860), False, 'import attr\n'), ((6212, 6244), 'attr.validators.instance_of', 'attr.validators.instance_of', (['int'], {}), '(int)\n', (6239, 6244), False, 'import attr\n'), ((9306, 9363), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (9333, 9363), False, 'import attr\n'), ((9686, 9743), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (9713, 9743), False, 'import attr\n'), ((10069, 10126), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (10096, 10126), False, 'import attr\n'), ((12872, 12929), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (12899, 12929), False, 'import attr\n'), ((13355, 13412), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (13382, 13412), False, 'import attr\n'), ((14832, 14865), 'attr.validators.instance_of', 'attr.validators.instance_of', (['bool'], {}), '(bool)\n', (14859, 14865), False, 'import attr\n'), ((15239, 15272), 'attr.validators.instance_of', 'attr.validators.instance_of', (['bool'], {}), '(bool)\n', (15266, 15272), False, 'import attr\n'), ((15624, 15656), 'attr.validators.instance_of', 'attr.validators.instance_of', (['int'], {}), '(int)\n', (15651, 15656), False, 'import attr\n'), ((17077, 17127), 'attr.validators.instance_of', 'attr.validators.instance_of', (['ApplicationMaxAgeRule'], {}), '(ApplicationMaxAgeRule)\n', (17104, 17127), False, 'import attr\n'), ((17626, 17678), 'attr.validators.instance_of', 'attr.validators.instance_of', (['ApplicationMaxCountRule'], {}), '(ApplicationMaxCountRule)\n', (17653, 17678), False, 'import attr\n'), ((19108, 19165), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (19135, 19165), False, 'import attr\n'), ((19720, 19793), 'attr.validators.instance_of', 'attr.validators.instance_of', (['ApplicationApplicationVersionLifecycleConfig'], {}), '(ApplicationApplicationVersionLifecycleConfig)\n', (19747, 19793), False, 'import attr\n'), ((22511, 22568), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (22538, 22568), False, 'import attr\n'), ((22944, 23001), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (22971, 23001), False, 'import attr\n'), ((24077, 24134), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (24104, 24134), False, 'import attr\n'), ((24514, 24571), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (24541, 24571), False, 'import attr\n'), ((25075, 25144), 'attr.validators.instance_of', 'attr.validators.instance_of', (['ConfigurationTemplateSourceConfiguration'], {}), '(ConfigurationTemplateSourceConfiguration)\n', (25102, 25144), False, 'import attr\n'), ((26374, 26431), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (26401, 26431), False, 'import attr\n'), ((26765, 26822), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (26792, 26822), False, 'import attr\n'), ((27291, 27365), 'attr.validators.instance_of', 'attr.validators.instance_of', (['ApplicationApplicationResourceLifecycleConfig'], {}), '(ApplicationApplicationResourceLifecycleConfig)\n', (27318, 27365), False, 'import attr\n'), ((30471, 30528), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (30498, 30528), False, 'import attr\n'), ((30870, 30927), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (30897, 30927), False, 'import attr\n'), ((31273, 31330), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (31300, 31330), False, 'import attr\n'), ((31672, 31729), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (31699, 31729), False, 'import attr\n'), ((32675, 32732), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (32702, 32732), False, 'import attr\n'), ((33080, 33137), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (33107, 33137), False, 'import attr\n'), ((33492, 33549), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (33519, 33549), False, 'import attr\n'), ((33946, 33990), 'attr.validators.instance_of', 'attr.validators.instance_of', (['EnvironmentTier'], {}), '(EnvironmentTier)\n', (33973, 33990), False, 'import attr\n'), ((34319, 34376), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (34346, 34376), False, 'import attr\n'), ((37300, 37357), 'attr.validators.instance_of', 'attr.validators.instance_of', (['TypeCheck.intrinsic_str_type'], {}), '(TypeCheck.intrinsic_str_type)\n', (37327, 37357), False, 'import attr\n'), ((23566, 23642), 'attr.validators.instance_of', 'attr.validators.instance_of', (['ConfigurationTemplateConfigurationOptionSetting'], {}), '(ConfigurationTemplateConfigurationOptionSetting)\n', (23593, 23642), False, 'import attr\n'), ((23663, 23696), 'attr.validators.instance_of', 'attr.validators.instance_of', (['list'], {}), '(list)\n', (23690, 23696), False, 'import attr\n'), ((32219, 32272), 'attr.validators.instance_of', 'attr.validators.instance_of', (['EnvironmentOptionSetting'], {}), '(EnvironmentOptionSetting)\n', (32246, 32272), False, 'import attr\n'), ((32293, 32326), 'attr.validators.instance_of', 'attr.validators.instance_of', (['list'], {}), '(list)\n', (32320, 32326), False, 'import attr\n'), ((34807, 34839), 'attr.validators.instance_of', 'attr.validators.instance_of', (['Tag'], {}), '(Tag)\n', (34834, 34839), False, 'import attr\n'), ((34860, 34893), 'attr.validators.instance_of', 'attr.validators.instance_of', (['list'], {}), '(list)\n', (34887, 34893), False, 'import attr\n')] |
"""
Efficient serialization GPU arrays.
"""
import cupy
from .cuda import cuda_serialize, cuda_deserialize
class PatchedCudaArrayInterface:
"""This class do two things:
1) Makes sure that __cuda_array_interface__['strides']
behaves as specified in the protocol.
2) Makes sure that the cuda context is active
when deallocating the base cuda array.
Notice, this is only needed when the array to deserialize
isn't a native cupy array.
"""
def __init__(self, ary):
cai = ary.__cuda_array_interface__
cai_cupy_vsn = cupy.ndarray(0).__cuda_array_interface__["version"]
if cai.get("strides") is None and cai_cupy_vsn < 2:
cai.pop("strides", None)
self.__cuda_array_interface__ = cai
# Save a ref to ary so it won't go out of scope
self.base = ary
def __del__(self):
# Making sure that the cuda context is active
# when deallocating the base cuda array
try:
import numba.cuda
numba.cuda.current_context()
except ImportError:
pass
del self.base
@cuda_serialize.register(cupy.ndarray)
def serialize_cupy_ndarray(x):
# Making sure `x` is behaving
if not x.flags.c_contiguous:
x = cupy.array(x, copy=True)
header = x.__cuda_array_interface__.copy()
return header, [x]
@cuda_deserialize.register(cupy.ndarray)
def deserialize_cupy_array(header, frames):
(frame,) = frames
if not isinstance(frame, cupy.ndarray):
frame = PatchedCudaArrayInterface(frame)
arr = cupy.ndarray(
header["shape"], dtype=header["typestr"], memptr=cupy.asarray(frame).data
)
return arr
| [
"cupy.ndarray",
"cupy.array",
"cupy.asarray"
] | [((1298, 1322), 'cupy.array', 'cupy.array', (['x'], {'copy': '(True)'}), '(x, copy=True)\n', (1308, 1322), False, 'import cupy\n'), ((596, 611), 'cupy.ndarray', 'cupy.ndarray', (['(0)'], {}), '(0)\n', (608, 611), False, 'import cupy\n'), ((1677, 1696), 'cupy.asarray', 'cupy.asarray', (['frame'], {}), '(frame)\n', (1689, 1696), False, 'import cupy\n')] |
import autosar
ws = autosar.workspace("4.2.2")
components = ws.createPackage("ComponentTypes")
swc = components.createCompositionComponent("MyComposition")
print(swc.name)
| [
"autosar.workspace"
] | [((23, 49), 'autosar.workspace', 'autosar.workspace', (['"""4.2.2"""'], {}), "('4.2.2')\n", (40, 49), False, 'import autosar\n')] |
'''
Task
You are given a date. Your task is to find what the day is on that date.
Input Format
A single line of input containing the space separated month, day and year, respectively, in format.
Constraints
* 2000<year<3000
Output Format
Output the correct day in capital letters.
Sample Input
08 05 2015
Sample Output
WEDNESDAY
Explanation
The day on August 5th 2015 was WEDNESDAY.
'''
# Enter your code here. Read input from STDIN. Print output to STDOUT
import calendar as cal
day={0:'MONDAY',1:'TUESDAY',2:'WEDNESDAY',3:'THURSDAY',4:'FRIDAY',5:'SATURDAY',6:'SUNDAY'}
n=list(map(int,input().split()))
if n[2] in range(2001,3000):
n1=cal.weekday(n[2],n[0],n[1])
for i in day:
if i==n1:
print(day[i])
'''
output:
08 05 2015
WEDNESDAY
'''
| [
"calendar.weekday"
] | [((653, 682), 'calendar.weekday', 'cal.weekday', (['n[2]', 'n[0]', 'n[1]'], {}), '(n[2], n[0], n[1])\n', (664, 682), True, 'import calendar as cal\n')] |
import os, json
import cmd
import asyncio
from fitbit import Fitbit
from flask import Flask, render_template, url_for, session, redirect
from authlib.integrations.flask_client import OAuth
from azure.keyvault.secrets import SecretClient
from azure.identity import DefaultAzureCredential
from azure.core.exceptions import ResourceExistsError
from azure.eventhub.aio import EventHubProducerClient
from azure.eventhub import EventData
app = Flask(__name__)
app.config.from_object('config')
app.secret_key = '!secret'
oauth = OAuth(app)
client = oauth.register(name="fitbit")
# Step 1: Bring user to homepage to offer sync service with device cloud (fitbit in this example)
@app.route('/')
def home():
return render_template("index.html")
@app.route("/login")
def login():
redirect_uri = url_for('auth', _external=True)
return oauth.fitbit.authorize_redirect("")
@app.route('/auth')
def auth():
token = oauth.fitbit.authorize_access_token()
secretName = session["user"] = token["user_id"]
secretValue = token["refresh_token"]
app.secret_key = token["access_token"]
client = SecretClient(vault_url=app.config["VAULT_URL"], credential=DefaultAzureCredential())
try:
client.set_secret(secretName, secretValue)
except ResourceExistsError:
# assume user has renabled the service reset the key
client.begin_delete_secret(secretName)
# sync data with FHIR API using Io[M]T Connector
loop = asyncio.new_event_loop()
loop.run_until_complete(sync())
return "Successful Sync"
@app.route('/sync')
async def sync():
fit_client = Fitbit(user=session["user"], access_token=app.secret_key)
result = fit_client.init_sync()
# Create a producer client to send messages to the event hub.
# Specify a connection string to your event hubs namespace and
# the event hub name.
producer = EventHubProducerClient.from_connection_string(conn_str=app.config["EVENT_HUB_CONN_STR"])
async with producer:
# Create a batch.
event_data_batch = await producer.create_batch()
for item in result:
print(item)
event_data_batch.add(EventData(json.dumps(item, indent = 4)))
# Send the batch of events to the event hub.
await producer.send_batch(event_data_batch)
if __name__ == '__main__':
app.run()
| [
"azure.eventhub.aio.EventHubProducerClient.from_connection_string",
"flask.Flask",
"json.dumps",
"flask.url_for",
"flask.render_template",
"fitbit.Fitbit",
"authlib.integrations.flask_client.OAuth",
"azure.identity.DefaultAzureCredential",
"asyncio.new_event_loop"
] | [((439, 454), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (444, 454), False, 'from flask import Flask, render_template, url_for, session, redirect\n'), ((524, 534), 'authlib.integrations.flask_client.OAuth', 'OAuth', (['app'], {}), '(app)\n', (529, 534), False, 'from authlib.integrations.flask_client import OAuth\n'), ((713, 742), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (728, 742), False, 'from flask import Flask, render_template, url_for, session, redirect\n'), ((797, 828), 'flask.url_for', 'url_for', (['"""auth"""'], {'_external': '(True)'}), "('auth', _external=True)\n", (804, 828), False, 'from flask import Flask, render_template, url_for, session, redirect\n'), ((1466, 1490), 'asyncio.new_event_loop', 'asyncio.new_event_loop', ([], {}), '()\n', (1488, 1490), False, 'import asyncio\n'), ((1613, 1670), 'fitbit.Fitbit', 'Fitbit', ([], {'user': "session['user']", 'access_token': 'app.secret_key'}), "(user=session['user'], access_token=app.secret_key)\n", (1619, 1670), False, 'from fitbit import Fitbit\n'), ((1881, 1974), 'azure.eventhub.aio.EventHubProducerClient.from_connection_string', 'EventHubProducerClient.from_connection_string', ([], {'conn_str': "app.config['EVENT_HUB_CONN_STR']"}), "(conn_str=app.config[\n 'EVENT_HUB_CONN_STR'])\n", (1926, 1974), False, 'from azure.eventhub.aio import EventHubProducerClient\n'), ((1169, 1193), 'azure.identity.DefaultAzureCredential', 'DefaultAzureCredential', ([], {}), '()\n', (1191, 1193), False, 'from azure.identity import DefaultAzureCredential\n'), ((2174, 2200), 'json.dumps', 'json.dumps', (['item'], {'indent': '(4)'}), '(item, indent=4)\n', (2184, 2200), False, 'import os, json\n')] |
"""Meteo-France generic test utils."""
from unittest.mock import patch
import pytest
@pytest.fixture(autouse=True)
def patch_requests():
"""Stub out services that makes requests."""
patch_client = patch("homeassistant.components.meteo_france.meteofranceClient")
patch_weather_alert = patch(
"homeassistant.components.meteo_france.VigilanceMeteoFranceProxy"
)
with patch_client, patch_weather_alert:
yield
| [
"unittest.mock.patch",
"pytest.fixture"
] | [((89, 117), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)'}), '(autouse=True)\n', (103, 117), False, 'import pytest\n'), ((208, 272), 'unittest.mock.patch', 'patch', (['"""homeassistant.components.meteo_france.meteofranceClient"""'], {}), "('homeassistant.components.meteo_france.meteofranceClient')\n", (213, 272), False, 'from unittest.mock import patch\n'), ((299, 371), 'unittest.mock.patch', 'patch', (['"""homeassistant.components.meteo_france.VigilanceMeteoFranceProxy"""'], {}), "('homeassistant.components.meteo_france.VigilanceMeteoFranceProxy')\n", (304, 371), False, 'from unittest.mock import patch\n')] |
import os
from contextlib import contextmanager
from pyspark.sql import SparkSession
from .log import logger
@contextmanager
def spark_session(config=None):
pre_spark = SparkSession.builder \
.appName('science-papers-ml') \
.master(f"spark://{os.environ.get('SPARK_MASTER_HOST', 'spark-master')}:"
f"{os.environ.get('SPARK_MASTER_PORT', '7077')}") \
if config is not None:
for key, value in config.items():
pre_spark = pre_spark.config(key, value)
spark = pre_spark.getOrCreate()
logger.info("Created Spark session")
try:
yield spark
finally:
logger.info("Stopping Spark Session")
spark.stop()
| [
"os.environ.get",
"pyspark.sql.SparkSession.builder.appName"
] | [((177, 226), 'pyspark.sql.SparkSession.builder.appName', 'SparkSession.builder.appName', (['"""science-papers-ml"""'], {}), "('science-papers-ml')\n", (205, 226), False, 'from pyspark.sql import SparkSession\n'), ((267, 318), 'os.environ.get', 'os.environ.get', (['"""SPARK_MASTER_HOST"""', '"""spark-master"""'], {}), "('SPARK_MASTER_HOST', 'spark-master')\n", (281, 318), False, 'import os\n'), ((341, 384), 'os.environ.get', 'os.environ.get', (['"""SPARK_MASTER_PORT"""', '"""7077"""'], {}), "('SPARK_MASTER_PORT', '7077')\n", (355, 384), False, 'import os\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.