hexsha
stringlengths 40
40
| size
int64 2
1.05M
| ext
stringclasses 9
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
193
| max_stars_repo_name
stringlengths 6
109
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequence | max_stars_count
int64 1
36.6k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
193
| max_issues_repo_name
stringlengths 6
109
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequence | max_issues_count
int64 1
29.8k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
193
| max_forks_repo_name
stringlengths 6
109
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequence | max_forks_count
int64 1
11.2k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 2
1.05M
| avg_line_length
float64 1
404k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
73f41e962d7a55a59322097fde1cd0ce63ddfc5c | 10,855 | py | Python | py_search/uninformed.py | ctonic/py_search | 4635616e174dadba049c6a68799df7ea92d78c4c | [
"MIT"
] | 4 | 2019-02-04T14:20:40.000Z | 2021-02-08T21:25:03.000Z | py_search/uninformed.py | ctonic/py_search | 4635616e174dadba049c6a68799df7ea92d78c4c | [
"MIT"
] | 6 | 2016-07-10T20:01:20.000Z | 2018-02-25T15:06:14.000Z | py_search/uninformed.py | ctonic/py_search | 4635616e174dadba049c6a68799df7ea92d78c4c | [
"MIT"
] | 3 | 2018-06-23T13:31:42.000Z | 2021-06-17T11:23:02.000Z | """
This module includes the core search methods :func:`tree_search` and
`graph_search` and the primary uninformed search techniques:
:func:`depth_first_search`, :func:`breadth_first_search`, and
:func:`iterative_deepening_search`.
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
from __future__ import division
from py_search.base import LIFOQueue
from py_search.base import FIFOQueue
from py_search.base import SolutionNode
def tree_search(problem, forward_fringe=None,
backward_fringe=None, depth_limit=float('inf')):
"""
A generalization of classic tree search that supports search in either, or
both, directions using the provided fringe classes. Returns an iterator to
the solutions, so more than one solution can be found.
:param problem: The problem to solve.
:type problem: :class:`Problem`
:param forward_fringe: The fringe class to use in the forward direction.
:type forward_fringe: :class:`fringe`
:param backward_fringe: The fringe class to use in the backward direction.
:type backward_fringe: :class:`fringe`
:param depth_limit: A limit for the depth of the search tree from either
direction. If set to float('inf'), then depth is unlimited.
:type depth_limit: int or float('inf')
"""
if (forward_fringe is None and backward_fringe is None):
raise ValueError("Must provide a fringe class for forward, backward"
"or both.")
if forward_fringe is None:
ffringe = [problem.initial]
else:
ffringe = forward_fringe
ffringe.push(problem.initial)
if backward_fringe is None:
bfringe = [problem.goal]
else:
bfringe = backward_fringe
bfringe.push(problem.goal)
while len(ffringe) > 0 and len(bfringe) > 0:
if forward_fringe is not None:
state = ffringe.pop()
for goal in bfringe:
if problem.goal_test(state, goal):
yield SolutionNode(state, goal)
if depth_limit == float('inf') or state.depth() < depth_limit:
ffringe.extend(problem.successors(state))
if backward_fringe is not None:
goal = bfringe.pop()
for state in ffringe:
if problem.goal_test(state, goal):
yield SolutionNode(state, goal)
if depth_limit == float('inf') or goal.depth() < depth_limit:
bfringe.extend(problem.predecessors(goal))
def graph_search(problem, forward_fringe=None, backward_fringe=None,
depth_limit=float('inf')):
"""
A generalization of classical graph search that supports search in either,
or both, directions using the provided fringe classes. Returns an iterator
to the solutions, so more than one solution can be found.
"""
if (forward_fringe is None and backward_fringe is None):
raise ValueError("Must provide a fringe class for forward, backward"
"or both.")
if forward_fringe is None:
ffringe = [problem.initial]
else:
ffringe = forward_fringe
fclosed = {}
ffringe.push(problem.initial)
fclosed[problem.initial] = problem.initial.cost()
if backward_fringe is None:
bfringe = [problem.goal]
else:
bfringe = backward_fringe
bclosed = {}
bfringe.push(problem.goal)
bclosed[problem.goal] = problem.goal.cost()
while len(ffringe) > 0 and len(bfringe) > 0:
if forward_fringe is not None:
state = ffringe.pop()
for goal in bfringe:
if problem.goal_test(state, goal):
yield SolutionNode(state, goal)
if depth_limit == float('inf') or state.depth() < depth_limit:
for s in problem.successors(state):
if s not in fclosed or s.cost() < fclosed[s]:
ffringe.push(s)
fclosed[s] = s.cost()
if backward_fringe is not None:
goal = bfringe.pop()
for state in ffringe:
if problem.goal_test(state, goal):
yield SolutionNode(state, goal)
if depth_limit == float('inf') or goal.depth() < depth_limit:
for p in problem.predecessors(goal):
if (p not in bclosed or p.cost() < bclosed[p]):
bfringe.push(p)
bclosed[p] = p.cost()
def choose_search(problem, queue_class, depth_limit=float('inf'),
graph=True, forward=True, backward=False):
"""
Given the arguments, chooses the appropriate underlying classes
to instantiate the search.
"""
forward_fringe = None
backward_fringe = None
if forward:
forward_fringe = queue_class()
if backward:
backward_fringe = queue_class()
if graph:
return graph_search(problem, forward_fringe, backward_fringe,
depth_limit)
else:
return tree_search(problem, forward_fringe, backward_fringe,
depth_limit)
def depth_first_search(problem, depth_limit=float('inf'), graph=True,
forward=True, backward=False):
"""
An implementation of depth-first search using a LIFO queue. This supports
bidirectional capabilities.
:param problem: The problem to solve.
:type problem: :class:`Problem`
:param depth_limit: A limit for the depth of the search tree. If set to
float('inf'), then depth is unlimited.
:type depth_limit: int or float('inf')
:param graph_search: Whether to use graph search (default) or tree search.
:type graph_search: Boolean
:param forward_search: Whether to enable forward search (default) or not.
:type forward_search: Boolean
:param backward_search: Whether to enable backward search or not (default).
:type backward_search: Boolean
"""
for solution in choose_search(problem, LIFOQueue, depth_limit=depth_limit,
graph=graph, forward=forward,
backward=backward):
yield solution
def breadth_first_search(problem, depth_limit=float('inf'),
graph=True, forward=True, backward=False):
"""
A simple implementation of breadth-first search using a FIFO queue.
:param problem: The problem to solve.
:type problem: :class:`Problem`
:param search: A search algorithm to use (defaults to graph_search).
:type search: :func:`graph_search` or :func`tree_search`
:param depth_limit: A limit for the depth of the search tree. If set to
float('inf'), then depth is unlimited.
:type depth_limit: int or float('inf')
"""
for solution in choose_search(problem, FIFOQueue, depth_limit=depth_limit,
graph=graph, forward=forward,
backward=backward):
yield solution
def iterative_deepening_search(problem, initial_depth_limit=0, depth_inc=1,
max_depth_limit=float('inf'), graph=True,
forward=True, backward=False):
"""
An implementation of iterative deepening search. This search is basically
depth-limited depth first up to the depth limit. If no solution is found at
the current depth limit then the depth limit is increased by depth_inc and
the depth-limited depth first search is restarted.
:param problem: The problem to solve.
:type problem: :class:`Problem`
:param initial_depth_limit: The initial depth limit for the search.
:type initial_depth_limit: int or float('inf')
:param depth_inc: The amount to increase the depth limit after failure.
:type depth_inc: int
:param max_depth_limit: The maximum depth limit (default value of
`float('inf')`)
:type max_depth_limit: int or float('inf')
:param graph: Whether to use graph (default) search or tree search.
:type graph: Boolean
"""
depth_limit = initial_depth_limit
while depth_limit < max_depth_limit:
for solution in depth_first_search(problem, depth_limit=depth_limit,
graph=graph, forward=forward,
backward=backward):
yield solution
depth_limit += depth_inc
def iterative_sampling(problem, max_samples=float('inf'),
depth_limit=float('inf')):
"""
A non-systematic alternative to depth-first search. This samples paths
through the tree until a dead end or until the depth limit is reached. It
has much lower memory requirements than depth-first or breadth-first
search, but requires the user to specify max_samples and depth_limit
parameters. The search will return non-optimal paths (it does not evaluate
node values) and sometimes it may fail to find solutions if the number of
samples or depth limit is too low.
A full description of the algorithm and the mathematics that support it can
be found here:
Langley, P. (1992, May). Systematic and nonsystematic search
strategies. In Artificial Intelligence Planning Systems: Proceedings of
the First International Conference (pp. 145-152).
This technique is included with the other uninformed methods because it is
uninformed when random_successor uniform randomly generates successors.
However, this technique could be converted into an informed technique if
random_successor is implemented with an approach that samples successors
according to their node_value.
:param problem: The problem to solve.
:type problem: :class:`Problem`
:param search: A search algorithm to use (defaults to graph_search).
:type search: :func:`graph_search` or :func`tree_search`
:param max_samples: The maximum number of samples through the search tree.
If no solution is found after collecting this many samples, then the
search fails.
:type max_samples: int or float('inf') (default of float('inf'))
:param max_depth_limit: The maximum depth limit (default value of
`float('inf')`)
:type max_depth_limit: int or float('inf') (default of float('inf'))
"""
num_samples = 0
while num_samples < max_samples:
curr = problem.initial
while curr:
if problem.goal_test(curr, problem.goal):
yield SolutionNode(curr, problem.goal)
curr = False
elif depth_limit == float('inf') or curr.depth() < depth_limit:
curr = problem.random_successor(curr)
else:
curr = False
num_samples += 1
| 40.808271 | 79 | 0.646154 |
73f4453c8a3a71d52b5faa9d704a8dee7f583164 | 716 | py | Python | tryalgo/three_partition.py | xcarcelle/tryalgo | c159fbffbea0a4e8b70e8898c31c62c7e08a3865 | [
"MIT"
] | null | null | null | tryalgo/three_partition.py | xcarcelle/tryalgo | c159fbffbea0a4e8b70e8898c31c62c7e08a3865 | [
"MIT"
] | null | null | null | tryalgo/three_partition.py | xcarcelle/tryalgo | c159fbffbea0a4e8b70e8898c31c62c7e08a3865 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# subsetsum
# jill-jenn vie et christoph durr - 2015-2018
# snip{
def three_partition(x):
"""partition a set of integers in 3 parts of same total value
:param x: table of non negative values
:returns: triplet of the integers encoding the sets, or None otherwise
:complexity: :math:`O(2^{2n})`
"""
f = [0] * (1 << len(x))
for i in range(len(x)):
for S in range(1 << i):
f[S | (1 << i)] = f[S] + x[i]
for A in range(1 << len(x)):
for B in range(1 << len(x)):
if A & B == 0 and f[A] == f[B] and 3 * f[A] == f[-1]:
return (A, B, ((1 << len(x)) - 1) ^ A ^ B)
return None
# snip}
| 28.64 | 74 | 0.515363 |
73f4979a7b7e3ba72f5360a563593b2d132feeed | 151 | py | Python | testappsimple/__init__.py | matheusmatos/django-rest-models | 18da71bd921064279b03129aac38d3fbb9e29ae2 | [
"BSD-2-Clause"
] | 61 | 2016-12-05T09:09:49.000Z | 2022-03-09T13:23:06.000Z | testappsimple/__init__.py | matheusmatos/django-rest-models | 18da71bd921064279b03129aac38d3fbb9e29ae2 | [
"BSD-2-Clause"
] | 51 | 2016-12-07T10:19:52.000Z | 2022-03-11T23:35:23.000Z | testappsimple/__init__.py | matheusmatos/django-rest-models | 18da71bd921064279b03129aac38d3fbb9e29ae2 | [
"BSD-2-Clause"
] | 18 | 2017-03-11T18:07:17.000Z | 2022-03-09T13:14:40.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import, print_function
import logging
logger = logging.getLogger(__name__)
| 21.571429 | 72 | 0.781457 |
73f4a65834d6cdebfd2bb5c144f0d102e7d46c18 | 1,298 | py | Python | ufora/scripts/testRepeater.py | ufora/ufora | 04db96ab049b8499d6d6526445f4f9857f1b6c7e | [
"Apache-2.0",
"CC0-1.0",
"MIT",
"BSL-1.0",
"BSD-3-Clause"
] | 571 | 2015-11-05T20:07:07.000Z | 2022-01-24T22:31:09.000Z | ufora/scripts/testRepeater.py | timgates42/ufora | 04db96ab049b8499d6d6526445f4f9857f1b6c7e | [
"Apache-2.0",
"CC0-1.0",
"MIT",
"BSL-1.0",
"BSD-3-Clause"
] | 218 | 2015-11-05T20:37:55.000Z | 2021-05-30T03:53:50.000Z | ufora/scripts/testRepeater.py | timgates42/ufora | 04db96ab049b8499d6d6526445f4f9857f1b6c7e | [
"Apache-2.0",
"CC0-1.0",
"MIT",
"BSL-1.0",
"BSD-3-Clause"
] | 40 | 2015-11-07T21:42:19.000Z | 2021-05-23T03:48:19.000Z | #!/usr/bin/env python
# Copyright 2015 Ufora Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
import sys
def main(argv):
for ix in range(100):
print argv[1], "starting pass ", ix
with open("log%s_%s.txt" % (argv[1], ix), "wb") as f:
try:
subprocess.check_call(
"ulimit -c unlimited; " + " ".join(["python"] + argv[2:]) + " 2> log%s_%s.txt > out%s_%s.txt" % (argv[1], ix, argv[1], ix),
stderr=subprocess.STDOUT,
shell=True
)
except subprocess.CalledProcessError as e:
print "ERRORS in log%s_%s.txt" % (argv[1],ix)
print >> f, e.output
if __name__ == "__main__":
main(sys.argv)
| 35.081081 | 144 | 0.602465 |
73f4fd23e3d586423658a72ad9c6419b3bb73426 | 1,931 | py | Python | test/selenium/src/lib/page/export_page.py | Killswitchz/ggrc-core | 2460df94daf66727af248ad821462692917c97a9 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | test/selenium/src/lib/page/export_page.py | Killswitchz/ggrc-core | 2460df94daf66727af248ad821462692917c97a9 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | test/selenium/src/lib/page/export_page.py | Killswitchz/ggrc-core | 2460df94daf66727af248ad821462692917c97a9 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Export page with Export Panels."""
from lib import base
from lib.constants import locator, element
from lib.utils import selenium_utils
class ExportPanel(base.Component):
"""Export Panel on Export Page."""
def __init__(self, driver, export_panel):
super(ExportPanel, self).__init__(driver)
class ExportPage(base.Page):
"""Export Page."""
_locators = locator.ExportPage
_elements = element.ExportPage
def __init__(self, driver):
super(ExportPage, self).__init__(driver)
self.export_page = self._driver.find_element(
*self._locators.EXPORT_PAGE_CSS)
self.export_panels = self.get_list_export_panels()
self.export_format_dd = base.DropdownStatic(
self.export_page, self._locators.EXPORT_FORMAT_DD_CSS)
self.add_obj_type_btn = base.Button(
self.export_page, self._locators.ADD_OBJECT_TYPE_BTN_CSS)
self.export_objs_btn = base.Button(
self.export_page, self._locators.EXPORT_OBJECTS_BTN_CSS)
def get_list_export_panels(self):
"""Get list of all Export Panels witch are presented on Export Page at the
moment of getting.
"""
return [ExportPanel(self._driver, exp_panel_el) for exp_panel_el in
self.export_page.find_elements(*self._locators.EXPORT_PANEL_CSS)]
def click_export_objs(self):
"""Click to 'Export Objects' button to confirm export objects according to
selected before export format (Google Sheet or CSV).
"""
self.export_objs_btn.click()
selenium_utils.get_when_invisible(
self.export_page, self._locators.EXPORT_OBJECTS_SPINNER_CSS)
selenium_utils.wait_for_js_to_load(self._driver)
def export_objs_to_csv(self):
"""Export objects choosing CSV as exporting format."""
self.export_format_dd.select_by_label(self._elements.CSV)
self.click_export_objs()
| 35.759259 | 78 | 0.745728 |
73f50605473028478d146f65df67fd08f94b11a3 | 4,699 | py | Python | posetrigger/frame_view.py | gwappa/python-posetrigger | 3d9619160b5ed97e52ba2aae99d5eec663834a06 | [
"MIT"
] | null | null | null | posetrigger/frame_view.py | gwappa/python-posetrigger | 3d9619160b5ed97e52ba2aae99d5eec663834a06 | [
"MIT"
] | null | null | null | posetrigger/frame_view.py | gwappa/python-posetrigger | 3d9619160b5ed97e52ba2aae99d5eec663834a06 | [
"MIT"
] | null | null | null | #
# MIT License
#
# Copyright (c) 2020 Keisuke Sehara
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import numpy as _np
import matplotlib.pyplot as _plt
from pyqtgraph.Qt import QtCore, QtGui, QtWidgets
import pyqtgraph as _pg
from . import debug as _debug
COLORMAP = _plt.get_cmap("rainbow")
SPOTSIZE = 20
def colormap(total):
def color_for_index(i):
r, g, b, a = COLORMAP((i+1)/(total+1))
return (int(r*255), int(g*255), int(b*255))
return color_for_index
def image_to_display(img):
if img.ndim == 3:
return img.transpose((1,0,2))
else:
return img.T
class FrameView(QtWidgets.QGraphicsView):
"""a thin wrapper class that is used to display acquired frames.
the `updateWithFrame` method updates what is displayed.
"""
def __init__(self, width, height, parent=None):
super().__init__(parent=parent)
self._width = width
self._height = height
self._scene = QtWidgets.QGraphicsScene()
self._image = _pg.ImageItem(_np.zeros((width,height), dtype=_np.uint16),
levels=(0, 65535), autolevels=False)
self._levels = (0, 65535)
self._bodyparts = None
self.setSizeAdjustPolicy(QtWidgets.QAbstractScrollArea.AdjustToContents)
self.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self._scene.addItem(self._image)
self.setScene(self._scene)
def setRange(self, m, M):
self._levels = (m, M)
self._image.setLevels(self._levels)
def updateWithAcquisition(self, mode, acq):
if mode == "":
acq.frameAcquired.disconnect(self.updateWithFrame)
else:
acq.frameAcquired.connect(self.updateWithFrame)
def updateWithFrame(self, img, estimation, timestamp):
self._image.setImage(image_to_display(img), levels=self._levels, autolevels=False)
if (self._bodyparts is not None) and ("pose" in estimation.keys()):
pose = estimation["pose"]
for i, part in enumerate(self._bodyparts):
part.setPosition(pose[i,:2])
self._scene.changed.emit([QtCore.QRectF(0, 0, self._width, self._height)])
def registerBodyParts(self, parts):
# removing old annotations
if self._bodyparts is not None:
for anno in self._bodyparts:
self._scene.removeItem(anno.spot)
self._bodyparts = None
# adding new annotations
total = len(parts)
if total == 0:
return
self._bodyparts = []
cmap = colormap(total)
for i, part in enumerate(parts):
anno = Annotation(part, dims=(self._width, self._height), color=cmap(i))
self._scene.addItem(anno.spot)
self._bodyparts.append(anno)
class Annotation:
def __init__(self, name, dims, initial=(0, 0),
color="y", spotsize=SPOTSIZE):
self.name = name
self._dims = _np.array(dims)
self._dia = spotsize / 2
self.spot = QtWidgets.QGraphicsEllipseItem(initial[0]-self._dia,
initial[1]-self._dia,
self._dia,
self._dia)
self._color = _pg.mkColor(color)
self.spot.setPen(QtGui.QPen(self._color))
self.spot.setVisible(False)
def setPosition(self, xy):
self.spot.setPos(xy[0]-self._dia, xy[1]-self._dia)
self.spot.setVisible( all(xy>=0) and all(xy<=self._dims) )
| 39.487395 | 90 | 0.641626 |
73f53747ce26e9f1512b865f39b903ca976a167e | 11,334 | py | Python | core/python/spirit/simulation.py | bck2302000/spirit | 14ed7782bd23f4828bf23ab8136ae31a21037bb3 | [
"MIT"
] | 92 | 2016-10-02T16:17:27.000Z | 2022-02-22T11:23:49.000Z | core/python/spirit/simulation.py | bck2302000/spirit | 14ed7782bd23f4828bf23ab8136ae31a21037bb3 | [
"MIT"
] | 590 | 2016-09-24T12:46:36.000Z | 2022-03-24T18:27:18.000Z | core/python/spirit/simulation.py | bck2302000/spirit | 14ed7782bd23f4828bf23ab8136ae31a21037bb3 | [
"MIT"
] | 46 | 2016-09-26T07:20:17.000Z | 2022-02-17T19:55:17.000Z | """
Simulation
====================
This module of Spirit is used to run and monitor iterative calculation methods.
If many iterations are called individually, one should use the single shot simulation functionality.
It avoids the allocations etc. involved when a simulation is started and ended and behaves like a
regular simulation, except that the iterations have to be triggered manually.
Note that the VP and LBFGS Solvers are only meant for direct minimization and not for dynamics.
"""
import spirit.spiritlib as spiritlib
import ctypes
### Load Library
_spirit = spiritlib.load_spirit_library()
import threading
### We use a thread for PlayPause, so that KeyboardInterrupt can be forwarded to the CDLL call
### We might want to think about using PyDLL and about a signal handler in the core library
### see here: http://stackoverflow.com/questions/14271697/ctrlc-doesnt-interrupt-call-to-shared-library-using-ctypes-in-python
SOLVER_VP = 0
"""Verlet-like velocity projection method."""
SOLVER_SIB = 1
"""Semi-implicit midpoint method B."""
SOLVER_DEPONDT = 2
"""Depondt's Heun-like method."""
SOLVER_HEUN = 3
"""Heun's method."""
SOLVER_RK4 = 4
"""4th order Runge-Kutta method."""
SOLVER_LBFGS_OSO = 5
"""Limited memory Broyden-Fletcher-Goldfarb-Shannon, using exponential transforms."""
SOLVER_LBFGS_Atlas = 6
"""Limited memory Broyden-Fletcher-Goldfarb-Shannon, using stereographic transforms"""
SOLVER_VP_OSO = 7
"""Verlet-like velocity projection method, using exponential transforms."""
METHOD_MC = 0
"""Monte Carlo.
Standard implementation.
"""
METHOD_LLG = 1
"""Landau-Lifshitz-Gilbert.
Can be either a dynamical simulation or an energy minimisation.
Note: the VP solver can *only* minimise.
"""
METHOD_GNEB = 2
"""Geodesic nudged elastic band.
Runs on the entire chain.
As this is a minimisation method, the dynamical solvers perform worse
than those designed for minimisation.
"""
METHOD_MMF = 3
"""Minimum mode following.
As this is a minimisation method, the dynamical solvers perform worse
than those designed for minimisation.
"""
METHOD_EMA = 4
"""Eigenmode analysis.
Applies eigenmodes to the spins of a system.
Depending on parameters, this can be used to calculate the change of a
spin configuration through such a mode or to get a "dynamical" chain
of images corresponding to the movement of the system under the mode.
"""
### ----- Start methods
### MC
_MC_Start = _spirit.Simulation_MC_Start
_MC_Start.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_int,
ctypes.c_bool, ctypes.c_int, ctypes.c_int]
_MC_Start.restype = None
### LLG
_LLG_Start = _spirit.Simulation_LLG_Start
_LLG_Start.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_int, ctypes.c_int,
ctypes.c_bool, ctypes.c_int, ctypes.c_int]
_LLG_Start.restype = None
### GNEB
_GNEB_Start = _spirit.Simulation_GNEB_Start
_GNEB_Start.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_int,
ctypes.c_int, ctypes.c_bool, ctypes.c_int]
_GNEB_Start.restype = None
### MMF
_MMF_Start = _spirit.Simulation_MMF_Start
_MMF_Start.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_int, ctypes.c_int,
ctypes.c_bool, ctypes.c_int, ctypes.c_int]
_MMF_Start.restype = None
### EMA
_EMA_Start = _spirit.Simulation_EMA_Start
_EMA_Start.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_int,
ctypes.c_bool, ctypes.c_int, ctypes.c_int]
_EMA_Start.restype = None
### ----- Wrapper
def start(p_state, method_type, solver_type=None, n_iterations=-1, n_iterations_log=-1,
single_shot=False, idx_image=-1, idx_chain=-1):
"""Start any kind of iterative calculation method.
- `method_type`: one of the integers defined above
- `solver_type`: only used for LLG, GNEB and MMF methods (default: None)
- `n_iterations`: the maximum number of iterations that will be performed (default: take from parameters)
- `n_iterations_log`: the number of iterations after which to log the status and write output (default: take from parameters)
- `single_shot`: if set to `True`, iterations have to be triggered individually
- `idx_image`: the image on which to run the calculation (default: active image). Not used for GNEB
"""
if method_type == METHOD_MC:
spiritlib.wrap_function(_MC_Start, [ctypes.c_void_p(p_state),
ctypes.c_int(n_iterations), ctypes.c_int(n_iterations_log),
ctypes.c_bool(single_shot),
ctypes.c_int(idx_image), ctypes.c_int(idx_chain)])
elif method_type == METHOD_LLG:
spiritlib.wrap_function(_LLG_Start, [ctypes.c_void_p(p_state),
ctypes.c_int(solver_type),
ctypes.c_int(n_iterations), ctypes.c_int(n_iterations_log),
ctypes.c_bool(single_shot),
ctypes.c_int(idx_image), ctypes.c_int(idx_chain)])
elif method_type == METHOD_GNEB:
spiritlib.wrap_function(_GNEB_Start, [ctypes.c_void_p(p_state),
ctypes.c_int(solver_type),
ctypes.c_int(n_iterations), ctypes.c_int(n_iterations_log),
ctypes.c_bool(single_shot),
ctypes.c_int(idx_image), ctypes.c_int(idx_chain)])
elif method_type == METHOD_MMF:
spiritlib.wrap_function(_MMF_Start, [ctypes.c_void_p(p_state),
ctypes.c_int(solver_type),
ctypes.c_int(n_iterations), ctypes.c_int(n_iterations_log),
ctypes.c_bool(single_shot),
ctypes.c_int(idx_image), ctypes.c_int(idx_chain)])
elif method_type == METHOD_EMA:
spiritlib.wrap_function(_EMA_Start, [ctypes.c_void_p(p_state),
ctypes.c_int(n_iterations), ctypes.c_int(n_iterations_log),
ctypes.c_bool(single_shot),
ctypes.c_int(idx_image), ctypes.c_int(idx_chain)])
else:
print("Invalid method_type passed to simulation.start...")
# _Start(ctypes.c_void_p(p_state), ctypes.c_char_p(method_type),
# ctypes.c_char_p(solver_type), ctypes.c_int(n_iterations),
# ctypes.c_int(n_iterations_log), ctypes.c_int(idx_image), ctypes.c_int(idx_chain))
_SingleShot = _spirit.Simulation_SingleShot
_SingleShot.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_int]
_SingleShot.restype = None
def single_shot(p_state, idx_image=-1, idx_chain=-1):
"""Perform a single iteration.
In order to use this, a single shot simulation must be running on the corresponding image or chain.
"""
spiritlib.wrap_function(_SingleShot, [ctypes.c_void_p(p_state),
ctypes.c_int(idx_image), ctypes.c_int(idx_chain)])
_N_Shot = _spirit.Simulation_N_Shot
_N_Shot.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_int, ctypes.c_int]
_N_Shot.restype = None
def n_shot(p_state, N, idx_image=-1, idx_chain=-1):
"""Perform a single iteration.
In order to use this, a single shot simulation must be running on the corresponding image or chain.
"""
spiritlib.wrap_function(_N_Shot, [ctypes.c_void_p(p_state), ctypes.c_int(N),
ctypes.c_int(idx_image), ctypes.c_int(idx_chain)])
_Stop = _spirit.Simulation_Stop
_Stop.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_int]
_Stop.restype = None
def stop(p_state, idx_image=-1, idx_chain=-1):
"""Stop the simulation running on an image or chain."""
_Stop(ctypes.c_void_p(p_state), ctypes.c_int(idx_image), ctypes.c_int(idx_chain))
_Stop_All = _spirit.Simulation_Stop_All
_Stop_All.argtypes = [ctypes.c_void_p]
_Stop_All.restype = None
def stop_all(p_state):
"""Stop all simulations running anywhere."""
_Stop_All(ctypes.c_void_p(p_state))
_Running_On_Image = _spirit.Simulation_Running_On_Image
_Running_On_Image.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_int]
_Running_On_Image.restype = ctypes.c_bool
def running_on_image(p_state, idx_image=-1, idx_chain=-1):
"""Check if a simulation is running on a specific image."""
return bool(_Running_On_Image(ctypes.c_void_p(p_state), ctypes.c_int(idx_image),
ctypes.c_int(idx_chain)))
_Running_On_Chain = _spirit.Simulation_Running_On_Chain
_Running_On_Chain.argtypes = [ctypes.c_void_p, ctypes.c_int]
_Running_On_Chain.restype = ctypes.c_bool
def running_on_chain(p_state, idx_chain=-1):
"""Check if a simulation is running across a specific chain."""
return bool(_Running_On_Chain(ctypes.c_void_p(p_state), ctypes.c_int(idx_chain)))
_Running_Anywhere_On_Chain = _spirit.Simulation_Running_Anywhere_On_Chain
_Running_Anywhere_On_Chain.argtypes = [ctypes.c_void_p, ctypes.c_int]
_Running_Anywhere_On_Chain.restype = ctypes.c_bool
def running_anywhere_on_chain(p_state, idx_chain=-1):
"""Check if any simulation running on any image of - or the entire - chain."""
return bool(_Running_Anywhere_On_Chain(ctypes.c_void_p(p_state), ctypes.c_int(idx_chain)))
_Get_IterationsPerSecond = _spirit.Simulation_Get_IterationsPerSecond
_Get_IterationsPerSecond.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_int]
_Get_IterationsPerSecond.restype = ctypes.c_float
def get_iterations_per_second(p_state, idx_image=-1, idx_chain=-1):
"""Returns the current estimation of the number of iterations per second."""
return float(_Get_IterationsPerSecond(ctypes.c_void_p(p_state), ctypes.c_int(idx_image), ctypes.c_int(idx_chain)))
_Get_MaxTorqueNorm = _spirit.Simulation_Get_MaxTorqueNorm
_Get_MaxTorqueNorm.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_int]
_Get_MaxTorqueNorm.restype = ctypes.c_float
def get_max_torque_norm(p_state, idx_image=-1, idx_chain=-1):
"""Returns the current maximum norm of the torque acting on any spin."""
return float(_Get_MaxTorqueNorm(ctypes.c_void_p(p_state), ctypes.c_int(idx_image), ctypes.c_int(idx_chain)))
_Get_Time = _spirit.Simulation_Get_Time
_Get_Time.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_int]
_Get_Time.restype = ctypes.c_float
def get_time(p_state, idx_image=-1, idx_chain=-1):
""" If an LLG simulation is running returns the cumulatively summed time steps `dt`, otherwise returns 0"""
return _Get_Time(ctypes.c_void_p(p_state), ctypes.c_int(idx_image), ctypes.c_int(idx_chain))
_Get_Wall_Time= _spirit.Simulation_Get_Wall_Time
_Get_Wall_Time.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_int]
_Get_Wall_Time.restype = ctypes.c_int
def get_wall_time(p_state, idx_image=-1, idx_chain=-1):
"""Returns the current maximum norm of the torque acting on any spin."""
return int(_Get_Wall_Time(ctypes.c_void_p(p_state), ctypes.c_int(idx_image), ctypes.c_int(idx_chain))) | 45.155378 | 130 | 0.691106 |
73f53d41d9fd066cda1100b889a61084b4360912 | 6,122 | py | Python | RRTStar.py | Abeilles14/Velocity-Obstacle-and-Motion-Planning | 4509abeb589d6f11046103422f70f8ddde397d63 | [
"MIT"
] | 7 | 2021-11-17T01:39:02.000Z | 2022-01-23T17:40:29.000Z | RRTStar.py | Abeilles14/Velocity-Obstacle-and-Motion-Planning | 4509abeb589d6f11046103422f70f8ddde397d63 | [
"MIT"
] | null | null | null | RRTStar.py | Abeilles14/Velocity-Obstacle-and-Motion-Planning | 4509abeb589d6f11046103422f70f8ddde397d63 | [
"MIT"
] | null | null | null | # STATE MACHINE FOR 3D PICK AND PLACE SIMULATION
import numpy as np
from numpy.linalg import norm
from math import *
from matplotlib import pyplot as plt
from matplotlib.patches import Polygon
from random import random
from scipy.spatial import ConvexHull
from matplotlib import path
import time
from mpl_toolkits import mplot3d
from enum import Enum
import logging
from utils import init_fonts
from path_shortening import shorten_path
from obstacles import Static_Obstacle
logger = logging.getLogger(__name__)
logging.basicConfig()
logger.setLevel(logging.INFO)
### CONSTANTS ###
PAUSE_TIME = 0.0005
STEP_SIZE = 0.01 #controls speed of paths
SAMPLE_SIZE = 80
SMOOTH_ITERS = 100
### PARAMETERS ###
show_RRT = False
animate = 1
# RRT Initialization
maxiters = 5000
minDistGoal = 0.05 # Convergence criterion: success when the tree reaches within 0.25 in distance from the goal.
d = 0.1#0.5 # [m], Extension parameter: this controls how far the RRT extends in each step.
class Node3D:
def __init__(self):
self.p = [0, 0, 0]
self.cost = 0
self.costPrev = 0
def isCollisionFreeVertex(obstacles, point):
x,y,z = point
for obstacle in obstacles:
dx, dy, dz = obstacle.dimensions
x0, y0, z0 = obstacle.pose
if abs(x-x0)<=dx/2 and abs(y-y0)<=dy/2 and abs(z-z0)<=dz/2:
return 0
return 1
def isCollisionFreeEdge(obstacles, closest_vert, p):
closest_vert = np.array(closest_vert)
p = np.array(p)
collFree = True
l = norm(closest_vert - p)
map_resolution = 0.01; M = int(l / map_resolution)
if M <= 2: M = 20
t = np.linspace(0,1,M)
for i in range(1,M-1):
point = (1-t[i])*closest_vert + t[i]*p # calculate configuration
collFree = isCollisionFreeVertex(obstacles, point)
if collFree == False: return False
return collFree
def closestNode3D(rrt, p):
distance = []
for node in rrt:
distance.append(sqrt((p[0] - node.p[0])**2 + (p[1] - node.p[1])**2 + (p[2] - node.p[2])**2))
distance = np.array(distance)
dmin = min(distance)
ind_min = distance.tolist().index(dmin)
closest_node = rrt[ind_min]
return closest_node
def RRTStar(ax, obstacles, start, goal):
nearGoal = False # This will be set to true if goal has been reached
# Initialize RRT. The RRT will be represented as a 2 x N list of points.
# So each column represents a vertex of the tree.
rrt = [] # list of vertex
start_node = Node3D()
start_node.p = start
start_node.cost = 0
start_node.costPrev = 0
rrt.append(start_node)
path = [] # final path
### RRT ALGORITHM ###
start_time = time.time()
iters = 0
while not nearGoal and iters < maxiters:
# Sample point
rnd = random()
# With probability 0.05, sample the goal. This promotes movement to the goal.
if rnd < 0.10:
p = goal
else:
p = np.array([random()*5-2.5, random()*5-2.5, random()*3]) # Should be a 3 x 1 vector
# Check if sample is collision free
collFree = isCollisionFreeVertex(obstacles, p)
# If it's not collision free, continue with loop
if not collFree:
iters += 1
continue
# If it is collision free, find closest point in existing tree.
closest_node = closestNode3D(rrt, p)
# Extend tree towards xy from closest_vert. Use the extension parameter
# d defined above as your step size. In other words, the Euclidean
# distance between new_vert and closest_vert should be d.
new_node = Node3D()
new_node.p = closest_node.p + d * (p - closest_node.p)
new_node.cost = len(rrt)
new_node.costPrev = closest_node.cost
# draw RRT node tree
if show_RRT:
ax.plot([closest_node.p[0], new_node.p[0]], [closest_node.p[1], new_node.p[1]], [closest_node.p[2], new_node.p[2]],color = 'b', zorder=5)
plt.pause(PAUSE_TIME)
# Check if new vertice is in collision
collFree = isCollisionFreeEdge(obstacles, closest_node.p, new_node.p)
# If it's not collision free, continue with loop
if not collFree:
iters += 1
continue
# If it is collision free, add it to tree
rrt.append(new_node)
# Check if we have reached the goal
if norm(np.array(goal) - np.array(new_node.p)) < minDistGoal:
# Add last, goal node
goal_node = Node3D()
goal_node.p = goal
goal_node.cost = len(rrt)
goal_node.costPrev = new_node.cost
if isCollisionFreeEdge(obstacles, new_node.p, goal_node.p):
rrt.append(goal_node)
path = [goal_node.p]
else: path = []
end_time = time.time()
nearGoal = True
logger.debug('Reached the goal after %.2f seconds:' % (end_time - start_time))
iters += 1
logger.debug('Number of iterations passed: %d / %d' %(iters, maxiters))
logger.debug('RRT length: ', len(rrt))
# Path construction from RRT:
logger.debug('Constructing the path...')
i = len(rrt) - 1
while True:
i = rrt[i].costPrev
path.append(rrt[i].p)
if i == 0:
logger.debug('Reached RRT start node')
break
path = np.array(path)
# Drawing unoptimized RRT path
if show_RRT:
for i in range(path.shape[0]-1):
ax.plot([path[i,0], path[i+1,0]], [path[i,1], path[i+1,1]], [path[i,2], path[i+1,2]], color = 'g', linewidth=3, zorder=10)
plt.pause(PAUSE_TIME)
### DRAW SHORTENED PATH ###
logger.debug('Shortening the path...')
path = shorten_path(path, obstacles, smoothiters=SMOOTH_ITERS)
path = np.flip(path, axis=0)
# # plot paths
# for i in range(path.shape[0]-1):
# ax.plot([path[i,0], path[i+1,0]], [path[i,1], path[i+1,1]], [path[i,2], path[i+1,2]], color = 'orange', linewidth=1, zorder=15)
logger.info('Final Path Found!')
return path
| 32.737968 | 149 | 0.615322 |
73f5793c77d9680533092951eeb07914297d9595 | 308 | py | Python | tests/test_hline.py | andrsd/podcastista | c05a1de09d2820899aebe592d3d4b01d64d1e5fe | [
"MIT"
] | null | null | null | tests/test_hline.py | andrsd/podcastista | c05a1de09d2820899aebe592d3d4b01d64d1e5fe | [
"MIT"
] | 17 | 2021-09-22T12:21:46.000Z | 2022-02-26T12:26:40.000Z | tests/test_hline.py | andrsd/podcastista | c05a1de09d2820899aebe592d3d4b01d64d1e5fe | [
"MIT"
] | null | null | null | import platform
from PyQt5 import QtWidgets
from podcastista.HLine import HLine
if platform.system() == "Darwin":
def test_init(main_window):
widget = HLine(main_window)
assert widget.frameShape() == QtWidgets.QFrame.HLine
assert widget.frameShadow() == QtWidgets.QFrame.Plain
| 25.666667 | 61 | 0.717532 |
73f5c9fc90a2c15afa50a2cd14c536cb68b5bff0 | 777 | py | Python | fv3config/config/_serialization.py | VulcanClimateModeling/fv3config | 544eaf1bc6f1c4617cd8ee6bd3298136ed180f4c | [
"BSD-2-Clause"
] | 2 | 2019-11-12T21:05:09.000Z | 2019-11-17T18:08:34.000Z | fv3config/config/_serialization.py | VulcanClimateModeling/fv3config | 544eaf1bc6f1c4617cd8ee6bd3298136ed180f4c | [
"BSD-2-Clause"
] | 77 | 2019-11-12T21:15:38.000Z | 2021-05-07T22:39:36.000Z | fv3config/config/_serialization.py | VulcanClimateModeling/fv3config | 544eaf1bc6f1c4617cd8ee6bd3298136ed180f4c | [
"BSD-2-Clause"
] | null | null | null | from copy import deepcopy
import yaml
from typing import TextIO
from .types import Config
from .diag_table import DiagTable
def load(f: TextIO) -> Config:
"""Load a configuration from a file-like object f"""
config = yaml.safe_load(f)
if isinstance(config["diag_table"], dict):
config["diag_table"] = DiagTable.from_dict(config["diag_table"])
return config
def dump(config: Config, f: TextIO):
"""Serialize config to a file-like object using yaml encoding
Args:
config: an fv3config object
f: the file like object to write to
"""
config_copy = deepcopy(config)
if isinstance(config["diag_table"], DiagTable):
config_copy["diag_table"] = config["diag_table"].asdict()
yaml.safe_dump(config_copy, f)
| 26.793103 | 72 | 0.688546 |
73f5ce109bb9e529c6a80dafa98b6327492e54b5 | 1,377 | py | Python | sdk/cognitiveservices/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/domain_model_results.py | pjquirk/azure-sdk-for-python | cbf02ec4f177b96eae1dbbba87c34c2c93880150 | [
"MIT"
] | 1 | 2021-09-07T18:36:04.000Z | 2021-09-07T18:36:04.000Z | sdk/cognitiveservices/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/domain_model_results.py | pjquirk/azure-sdk-for-python | cbf02ec4f177b96eae1dbbba87c34c2c93880150 | [
"MIT"
] | 2 | 2019-10-02T23:37:38.000Z | 2020-10-02T01:17:31.000Z | azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/domain_model_results.py | xiafu-msft/azure-sdk-for-python | 4d9560cfd519ee60667f3cc2f5295a58c18625db | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class DomainModelResults(Model):
"""Result of image analysis using a specific domain model including additional
metadata.
:param result: Model-specific response.
:type result: object
:param request_id: Id of the REST API request.
:type request_id: str
:param metadata:
:type metadata:
~azure.cognitiveservices.vision.computervision.models.ImageMetadata
"""
_attribute_map = {
'result': {'key': 'result', 'type': 'object'},
'request_id': {'key': 'requestId', 'type': 'str'},
'metadata': {'key': 'metadata', 'type': 'ImageMetadata'},
}
def __init__(self, **kwargs):
super(DomainModelResults, self).__init__(**kwargs)
self.result = kwargs.get('result', None)
self.request_id = kwargs.get('request_id', None)
self.metadata = kwargs.get('metadata', None)
| 35.307692 | 82 | 0.604938 |
73f5cf11274cc48a028fda75ab8be5fc6359d93a | 24,718 | py | Python | tools/dockerize/webportal/usr/lib/python2.7/site-packages/oslo_messaging/_drivers/protocols/amqp/controller.py | foruy/openflow-multiopenstack | 74140b041ac25ed83898ff3998e8dcbed35572bb | [
"Apache-2.0"
] | 1 | 2019-09-11T11:56:19.000Z | 2019-09-11T11:56:19.000Z | tools/dockerize/webportal/usr/lib/python2.7/site-packages/oslo_messaging/_drivers/protocols/amqp/controller.py | foruy/openflow-multiopenstack | 74140b041ac25ed83898ff3998e8dcbed35572bb | [
"Apache-2.0"
] | null | null | null | tools/dockerize/webportal/usr/lib/python2.7/site-packages/oslo_messaging/_drivers/protocols/amqp/controller.py | foruy/openflow-multiopenstack | 74140b041ac25ed83898ff3998e8dcbed35572bb | [
"Apache-2.0"
] | null | null | null | # Copyright 2014, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Controller that manages the interface between the driver and the messaging
service.
This module defines a Controller class that is responsible for performing
messaging-related operations (Tasks) requested by the driver, and for managing
the connection to the messaging service. The Controller creates a background
thread which performs all messaging operations and socket I/O. The
Controller's messaging logic is executed in the background thread via lambda
functions scheduled by the Controller.
"""
import abc
import logging
import threading
import uuid
from oslo_config import cfg
import proton
import pyngus
from six import moves
from oslo_messaging._drivers.protocols.amqp import eventloop
from oslo_messaging._drivers.protocols.amqp import opts
from oslo_messaging import transport
LOG = logging.getLogger(__name__)
class Task(object):
"""Perform a messaging operation via the Controller."""
@abc.abstractmethod
def execute(self, controller):
"""This method will be run on the eventloop thread."""
class Replies(pyngus.ReceiverEventHandler):
"""This is the receiving link for all reply messages. Messages are routed
to the proper Listener's incoming queue using the correlation-id header in
the message.
"""
def __init__(self, connection, on_ready):
self._correlation = {} # map of correlation-id to response queue
self._ready = False
self._on_ready = on_ready
rname = "Consumer-%s:src=[dynamic]:tgt=replies" % uuid.uuid4().hex
self._receiver = connection.create_receiver("replies",
event_handler=self,
name=rname)
# capacity determines the maximum number of reply messages this link
# can receive. As messages are received and credit is consumed, this
# driver will 'top up' the credit back to max capacity. This number
# should be large enough to avoid needlessly flow-controlling the
# replies.
self.capacity = 100 # TODO(kgiusti) guesstimate - make configurable
self._credit = 0
self._receiver.open()
def ready(self):
return self._ready
def prepare_for_response(self, request, reply_queue):
"""Apply a unique message identifier to this request message. This will
be used to identify messages sent in reply. The identifier is placed
in the 'id' field of the request message. It is expected that the
identifier will appear in the 'correlation-id' field of the
corresponding response message.
"""
request.id = uuid.uuid4().hex
# reply is placed on reply_queue
self._correlation[request.id] = reply_queue
request.reply_to = self._receiver.source_address
LOG.debug("Reply for msg id=%s expected on link %s",
request.id, request.reply_to)
# Pyngus ReceiverLink event callbacks:
def receiver_active(self, receiver_link):
"""This is a Pyngus callback, invoked by Pyngus when the receiver_link
has transitioned to the open state and is able to receive incoming
messages.
"""
self._ready = True
self._update_credit()
self._on_ready()
LOG.debug("Replies expected on link %s",
self._receiver.source_address)
def receiver_remote_closed(self, receiver, pn_condition):
"""This is a Pyngus callback, invoked by Pyngus when the peer of this
receiver link has initiated closing the connection.
"""
# TODO(kgiusti) Unclear if this error will ever occur (as opposed to
# the Connection failing instead). Log for now, possibly implement a
# recovery strategy if necessary.
LOG.error("Reply subscription closed by peer: %s",
(pn_condition or "no error given"))
def message_received(self, receiver, message, handle):
"""This is a Pyngus callback, invoked by Pyngus when a new message
arrives on this receiver link from the peer.
"""
self._credit = self._credit - 1
self._update_credit()
key = message.correlation_id
if key in self._correlation:
LOG.debug("Received response for msg id=%s", key)
self._correlation[key].put(message)
# cleanup (only need one response per request)
del self._correlation[key]
else:
LOG.warn("Can't find receiver for response msg id=%s, dropping!",
key)
receiver.message_accepted(handle)
def _update_credit(self):
if self.capacity > self._credit:
self._receiver.add_capacity(self.capacity - self._credit)
self._credit = self.capacity
class Server(pyngus.ReceiverEventHandler):
"""A group of links that receive messages from a set of addresses derived
from a given target. Messages arriving on the links are placed on the
'incoming' queue.
"""
def __init__(self, addresses, incoming):
self._incoming = incoming
self._addresses = addresses
def attach(self, connection):
"""Create receiver links over the given connection for all the
configured addresses.
"""
self._receivers = []
for a in self._addresses:
props = {"snd-settle-mode": "settled"}
rname = "Consumer-%s:src=%s:tgt=%s" % (uuid.uuid4().hex, a, a)
r = connection.create_receiver(source_address=a,
target_address=a,
event_handler=self,
name=rname,
properties=props)
# TODO(kgiusti) Hardcoding credit here is sub-optimal. A better
# approach would monitor for a back-up of inbound messages to be
# processed by the consuming application and backpressure the
# sender based on configured thresholds.
r.add_capacity(500)
r.open()
self._receivers.append(r)
# Pyngus ReceiverLink event callbacks:
def receiver_remote_closed(self, receiver, pn_condition):
"""This is a Pyngus callback, invoked by Pyngus when the peer of this
receiver link has initiated closing the connection.
"""
text = "Server subscription %(addr)s closed by peer: %(err_msg)s"
vals = {
"addr": receiver.source_address or receiver.target_address,
"err_msg": pn_condition or "no error given"
}
LOG.error(text % vals)
def message_received(self, receiver, message, handle):
"""This is a Pyngus callback, invoked by Pyngus when a new message
arrives on this receiver link from the peer.
"""
# TODO(kgiusti) Sub-optimal to grant one credit each time a message
# arrives. A better approach would grant batches of credit on demand.
receiver.add_capacity(1)
self._incoming.put(message)
LOG.debug("message received: %s", message)
receiver.message_accepted(handle)
class Hosts(object):
"""An order list of TransportHost addresses. Connection failover
progresses from one host to the next.
"""
def __init__(self, entries=None):
self._entries = entries[:] if entries else []
for entry in self._entries:
entry.port = entry.port or 5672
self._current = 0
def add(self, transport_host):
self._entries.append(transport_host)
@property
def current(self):
if len(self._entries):
return self._entries[self._current]
else:
return transport.TransportHost(hostname="localhost", port=5672)
def next(self):
if len(self._entries) > 1:
self._current = (self._current + 1) % len(self._entries)
return self.current
def __repr__(self):
return '<Hosts ' + str(self) + '>'
def __str__(self):
return ", ".join(["%r" % th for th in self._entries])
class Controller(pyngus.ConnectionEventHandler):
"""Controls the connection to the AMQP messaging service. This object is
the 'brains' of the driver. It maintains the logic for addressing, sending
and receiving messages, and managing the connection. All messaging and I/O
work is done on the Eventloop thread, allowing the driver to run
asynchronously from the messaging clients.
"""
def __init__(self, hosts, default_exchange, config):
self.processor = None
# queue of Task() objects to execute on the eventloop once the
# connection is ready:
self._tasks = moves.queue.Queue(maxsize=500)
# limit the number of Task()'s to execute per call to _process_tasks().
# This allows the eventloop main thread to return to servicing socket
# I/O in a timely manner
self._max_task_batch = 50
# cache of sending links indexed by address:
self._senders = {}
# Servers (set of receiving links), indexed by target:
self._servers = {}
self.hosts = Hosts(hosts)
opt_group = cfg.OptGroup(name='oslo_messaging_amqp',
title='AMQP 1.0 driver options')
config.register_group(opt_group)
config.register_opts(opts.amqp1_opts, group=opt_group)
self.server_request_prefix = \
config.oslo_messaging_amqp.server_request_prefix
self.broadcast_prefix = config.oslo_messaging_amqp.broadcast_prefix
self.group_request_prefix = \
config.oslo_messaging_amqp.group_request_prefix
self._container_name = config.oslo_messaging_amqp.container_name
self.idle_timeout = config.oslo_messaging_amqp.idle_timeout
self.trace_protocol = config.oslo_messaging_amqp.trace
self.ssl_ca_file = config.oslo_messaging_amqp.ssl_ca_file
self.ssl_cert_file = config.oslo_messaging_amqp.ssl_cert_file
self.ssl_key_file = config.oslo_messaging_amqp.ssl_key_file
self.ssl_key_password = config.oslo_messaging_amqp.ssl_key_password
self.ssl_allow_insecure = \
config.oslo_messaging_amqp.allow_insecure_clients
self.separator = "."
self.fanout_qualifier = "all"
self.default_exchange = default_exchange
# can't handle a request until the replies link is active, as
# we need the peer assigned address, so need to delay any
# processing of task queue until this is done
self._replies = None
# Set True when the driver is shutting down
self._closing = False
# only schedule one outstanding reconnect attempt at a time
self._reconnecting = False
self._delay = 0 # seconds between retries
# prevent queuing up multiple requests to run _process_tasks()
self._process_tasks_scheduled = False
self._process_tasks_lock = threading.Lock()
def connect(self):
"""Connect to the messaging service."""
self.processor = eventloop.Thread(self._container_name)
self.processor.wakeup(lambda: self._do_connect())
def add_task(self, task):
"""Add a Task for execution on processor thread."""
self._tasks.put(task)
self._schedule_task_processing()
def shutdown(self, wait=True, timeout=None):
"""Shutdown the messaging service."""
if self.processor:
LOG.debug("Waiting for eventloop to exit")
self.processor.shutdown(wait, timeout)
self.processor = None
LOG.debug("Eventloop exited, driver shut down")
# The remaining methods are reserved to run from the eventloop thread only!
# They must not be invoked directly!
# methods executed by Tasks created by the driver:
def request(self, target, request, reply_queue=None):
"""Send a request message to the given target, and arrange for a
response to be put on the optional reply_queue if specified
"""
address = self._resolve(target)
LOG.debug("Sending request for %s to %s", target, address)
if reply_queue is not None:
self._replies.prepare_for_response(request, reply_queue)
self._send(address, request)
def response(self, address, response):
LOG.debug("Sending response to %s", address)
self._send(address, response)
def subscribe(self, target, in_queue):
"""Subscribe to messages sent to 'target', place received messages on
'in_queue'.
"""
addresses = [
self._server_address(target),
self._broadcast_address(target),
self._group_request_address(target)
]
self._subscribe(target, addresses, in_queue)
def subscribe_notifications(self, target, in_queue):
"""Subscribe for notifications on 'target', place received messages on
'in_queue'.
"""
addresses = [self._group_request_address(target)]
self._subscribe(target, addresses, in_queue)
def _subscribe(self, target, addresses, in_queue):
LOG.debug("Subscribing to %s (%s)", target, addresses)
self._servers[target] = Server(addresses, in_queue)
self._servers[target].attach(self._socket_connection.connection)
def _resolve(self, target):
"""Return a link address for a given target."""
if target.server:
return self._server_address(target)
elif target.fanout:
return self._broadcast_address(target)
else:
return self._group_request_address(target)
def _sender(self, address):
# if we already have a sender for that address, use it
# else establish the sender and cache it
if address in self._senders:
sender = self._senders[address]
else:
sname = "Producer-%s:src=%s:tgt=%s" % (uuid.uuid4().hex,
address, address)
conn = self._socket_connection.connection
sender = conn.create_sender(source_address=address,
target_address=address,
name=sname)
sender.open()
self._senders[address] = sender
return sender
def _send(self, addr, message):
"""Send the message out the link addressed by 'addr'."""
address = str(addr)
message.address = address
self._sender(address).send(message)
def _server_address(self, target):
return self._concatenate([self.server_request_prefix,
target.exchange or self.default_exchange,
target.topic, target.server])
def _broadcast_address(self, target):
return self._concatenate([self.broadcast_prefix,
target.exchange or self.default_exchange,
target.topic, self.fanout_qualifier])
def _group_request_address(self, target):
return self._concatenate([self.group_request_prefix,
target.exchange or self.default_exchange,
target.topic])
def _concatenate(self, items):
return self.separator.join(filter(bool, items))
# commands executed on the processor (eventloop) via 'wakeup()':
def _do_connect(self):
"""Establish connection and reply subscription on processor thread."""
host = self.hosts.current
conn_props = {}
if self.idle_timeout:
conn_props["idle-time-out"] = float(self.idle_timeout)
if self.trace_protocol:
conn_props["x-trace-protocol"] = self.trace_protocol
if self.ssl_ca_file:
conn_props["x-ssl-ca-file"] = self.ssl_ca_file
if self.ssl_cert_file:
# assume this connection is for a server. If client authentication
# support is developed, we'll need an explict flag (server or
# client)
conn_props["x-ssl-server"] = True
conn_props["x-ssl-identity"] = (self.ssl_cert_file,
self.ssl_key_file,
self.ssl_key_password)
conn_props["x-ssl-allow-cleartext"] = self.ssl_allow_insecure
self._socket_connection = self.processor.connect(host,
handler=self,
properties=conn_props)
LOG.debug("Connection initiated")
def _process_tasks(self):
"""Execute Task objects in the context of the processor thread."""
with self._process_tasks_lock:
self._process_tasks_scheduled = False
count = 0
while (not self._tasks.empty() and
count < self._max_task_batch and
self._can_process_tasks):
try:
self._tasks.get(False).execute(self)
except Exception as e:
LOG.exception("Error processing task: %s", e)
count += 1
# if we hit _max_task_batch, resume task processing later:
if not self._tasks.empty() and self._can_process_tasks:
self._schedule_task_processing()
def _schedule_task_processing(self):
"""_process_tasks() helper: prevent queuing up multiple requests for
task processing. This method is called both by the application thread
and the processing thread.
"""
if self.processor:
with self._process_tasks_lock:
already_scheduled = self._process_tasks_scheduled
self._process_tasks_scheduled = True
if not already_scheduled:
self.processor.wakeup(lambda: self._process_tasks())
@property
def _can_process_tasks(self):
"""_process_tasks helper(): indicates that the driver is ready to
process Tasks. In order to process messaging-related tasks, the reply
queue link must be active.
"""
return (not self._closing and
self._replies and self._replies.ready())
def _start_shutdown(self):
"""Called when the driver destroys the controller, this method attempts
to cleanly close the AMQP connection to the peer.
"""
LOG.info("Shutting down AMQP connection")
self._closing = True
if self._socket_connection.connection.active:
# try a clean shutdown
self._socket_connection.connection.close()
else:
# don't wait for a close from the remote, may never happen
self._complete_shutdown()
# reply link active callback:
def _reply_link_ready(self):
"""Invoked when the Replies reply link has become active. At this
point, we are ready to send/receive messages (via Task processing).
"""
LOG.info("Messaging is active (%s:%i)", self.hosts.current.hostname,
self.hosts.current.port)
self._schedule_task_processing()
# callback from eventloop on socket error
def socket_error(self, error):
"""Called by eventloop when a socket error occurs."""
LOG.debug("Socket failure: %s", error)
self._handle_connection_loss()
# Pyngus connection event callbacks (and their helpers), all invoked from
# the eventloop thread:
def connection_failed(self, connection, error):
"""This is a Pyngus callback, invoked by Pyngus when a non-recoverable
error occurs on the connection.
"""
if connection is not self._socket_connection.connection:
# pyngus bug: ignore failure callback on destroyed connections
return
LOG.debug("AMQP Connection failure: %s", error)
self._handle_connection_loss()
def connection_active(self, connection):
"""This is a Pyngus callback, invoked by Pyngus when the connection to
the peer is up. At this point, the driver will activate all subscriber
links (server) and the reply link.
"""
LOG.debug("Connection active (%s:%i), subscribing...",
self.hosts.current.hostname, self.hosts.current.port)
for s in self._servers.itervalues():
s.attach(self._socket_connection.connection)
self._replies = Replies(self._socket_connection.connection,
lambda: self._reply_link_ready())
self._delay = 0
def connection_closed(self, connection):
"""This is a Pyngus callback, invoked by Pyngus when the connection has
cleanly closed. This occurs after the driver closes the connection
locally, and the peer has acknowledged the close. At this point, the
shutdown of the driver's connection is complete.
"""
LOG.debug("AMQP connection closed.")
# if the driver isn't being shutdown, failover and reconnect
self._handle_connection_loss()
def connection_remote_closed(self, connection, reason):
"""This is a Pyngus callback, invoked by Pyngus when the peer has
requested that the connection be closed.
"""
if not self._closing:
# The messaging service/broker is trying to shut down the
# connection. Acknowledge the close, and try to reconnect/failover
# later once the connection has closed (connection_closed is
# called).
LOG.info("Connection closed by peer: %s",
reason or "no reason given")
self._socket_connection.connection.close()
def sasl_done(self, connection, pn_sasl, outcome):
"""This is a Pyngus callback invoked by Pyngus when the SASL handshake
has completed. The outcome of the handshake will be OK on success or
AUTH on failure.
"""
if outcome == proton.SASL.AUTH:
LOG.error("Unable to connect to %s:%s, authentication failure.",
self.hosts.current.hostname, self.hosts.current.port)
# requires user intervention, treat it like a connection failure:
self._handle_connection_loss()
def _complete_shutdown(self):
"""The AMQP Connection has closed, and the driver shutdown is complete.
Clean up controller resources and exit.
"""
self._socket_connection.close()
self.processor.shutdown()
LOG.info("Messaging has shutdown")
def _handle_connection_loss(self):
"""The connection to the messaging service has been lost. Try to
reestablish the connection/failover.
"""
if self._closing:
# we're in the middle of shutting down the driver anyways,
# just consider it done:
self._complete_shutdown()
else:
# for some reason, we've lost the connection to the messaging
# service. Try to re-establish the connection:
if not self._reconnecting:
self._reconnecting = True
self._replies = None
if self._delay == 0:
self._delay = 1
self._do_reconnect()
else:
d = self._delay
LOG.info("delaying reconnect attempt for %d seconds", d)
self.processor.schedule(lambda: self._do_reconnect(), d)
self._delay = min(d * 2, 60)
def _do_reconnect(self):
"""Invoked on connection/socket failure, failover and re-connect to the
messaging service.
"""
if not self._closing:
self._reconnecting = False
self._senders = {}
self._socket_connection.reset()
host = self.hosts.next()
LOG.info("Reconnecting to: %s:%i", host.hostname, host.port)
self._socket_connection.connect(host)
| 41.894915 | 79 | 0.630512 |
73f61ae6b18810bac15c1ec7bd89dbd640f65e8d | 328 | py | Python | joern2sarif/lib/config.py | joernio/joern2sarif | 3b443981f3ec86ed47330b583b4b319f19d5b5a6 | [
"Apache-2.0"
] | 1 | 2021-05-12T21:53:48.000Z | 2021-05-12T21:53:48.000Z | joern2sarif/lib/config.py | joernio/joern2sarif | 3b443981f3ec86ed47330b583b4b319f19d5b5a6 | [
"Apache-2.0"
] | 1 | 2021-04-02T17:17:49.000Z | 2021-04-09T17:52:56.000Z | joern2sarif/lib/config.py | joernio/joern2sarif | 3b443981f3ec86ed47330b583b4b319f19d5b5a6 | [
"Apache-2.0"
] | 1 | 2021-11-29T15:50:08.000Z | 2021-11-29T15:50:08.000Z | default_driver_name = "Joern"
tool_drivers = {
"joern": "Joern",
"ocular": "ShiftLeft Ocular",
"ng-sast": "ShiftLeft NextGen Analysis",
"ngsast": "ShiftLeft NextGen Analysis",
"core": "ShiftLeft CORE Analysis",
}
# URL for viewing reports online
hosted_viewer_uri = "https://sarifviewer.azurewebsites.net"
| 25.230769 | 59 | 0.692073 |
73f6348a357ce4198f9ac6aa325af70ef22531e7 | 3,210 | py | Python | tests/splits/test_splits_splits.py | bfabiandev/atom3d | b2499ff743be2e851c286cabf64696682abffa44 | [
"MIT"
] | null | null | null | tests/splits/test_splits_splits.py | bfabiandev/atom3d | b2499ff743be2e851c286cabf64696682abffa44 | [
"MIT"
] | null | null | null | tests/splits/test_splits_splits.py | bfabiandev/atom3d | b2499ff743be2e851c286cabf64696682abffa44 | [
"MIT"
] | null | null | null | import pytest
import os
import torch
import numpy as np
import atom3d.datasets as da
import atom3d.splits.splits as spl
# -- Test the general split function. --
def test_split():
# Load LMDB dataset
dataset = da.load_dataset('tests/test_data/lmdb', 'lmdb')
# Split with defined indices
indices_train, indices_val, indices_test = [3,0], [2], [1]
s = spl.split(dataset, indices_train, indices_val, indices_test)
train_dataset, val_dataset, test_dataset = s
# Check whether the frames are in the correct dataset
assert dataset[0]['atoms'].equals( train_dataset[1]['atoms'] )
assert dataset[1]['atoms'].equals( test_dataset[0]['atoms'] )
assert dataset[2]['atoms'].equals( val_dataset[0]['atoms'] )
assert dataset[3]['atoms'].equals( train_dataset[0]['atoms'] )
# -- Test specific split functions with mock dataset --
class MockDataset(torch.utils.data.Dataset):
def __init__(self, data, year, scaffold):
self.data = data
self.year = year
self.scaf = scaffold
def __len__(self):
return len(self.data)
def __getitem__(self,idx):
item = {
'data': self.data[idx],
'year': self.year[idx],
'scaffold': self.scaf[idx]
}
return item
years = 2020-np.arange(30)
scaffold = [0]*10 + [1]*5 + [2]*3 + [3,4,5]*2 + [6,7,8,9,10,11]
np.random.shuffle(scaffold)
dataset = MockDataset(np.arange(30),years,scaffold)
def test_split_randomly():
# Perform the split
s = spl.split_randomly(dataset, random_seed=0)
train_dataset, val_dataset, test_dataset = s
# Shuffle with same seed
np.random.seed(0)
shuffled = np.arange(30)
np.random.shuffle(shuffled)
# Compare split to shuffled data
assert sum( shuffled[:24] == [i['data'] for i in train_dataset] ) == 24
assert sum( shuffled[24:27] == [i['data'] for i in val_dataset] ) == 3
assert sum( shuffled[27:] == [i['data'] for i in test_dataset] ) == 3
def test_split_by_group():
# Perform the split
s = spl.split_by_group(dataset,
value_fn=lambda x: x['year'],
train_values=range(1900,2011),
val_values=range(2011,2016),
test_values=range(2016,2021))
train_dataset, val_dataset, test_dataset = s
# Compare split to what it should be
assert sum( [i['data'] for i in train_dataset] == np.arange(10,30) ) == 20
assert sum( [i['data'] for i in val_dataset] == np.arange(5,10) ) == 5
assert sum( [i['data'] for i in test_dataset] == np.arange(0,5) ) == 5
def test_split_by_group_size():
# Perform the split
s = spl.split_by_group_size(dataset,
value_fn=lambda x: x['scaffold'],
val_split=0.2, test_split=0.2)
train_dataset, val_dataset, test_dataset = s
# Compare split to what it should be
assert sum( np.sort([i['scaffold'] for i in train_dataset]) == [0]*10 + [1]*5 + [2]*3 ) == 18
assert sum( np.sort([i['scaffold'] for i in val_dataset]) == [3,3,4,4,5,5] ) == 6
assert sum( np.sort([i['scaffold'] for i in test_dataset]) == [6,7,8,9,10,11] ) == 6
| 36.067416 | 97 | 0.613084 |
73f641686287246a45d44a960965d908218bfc55 | 1,079 | py | Python | test_editor.py | Yajo/editor | f83e15cdf68ce5890240545fb39e1dcf8eda7045 | [
"MIT"
] | null | null | null | test_editor.py | Yajo/editor | f83e15cdf68ce5890240545fb39e1dcf8eda7045 | [
"MIT"
] | null | null | null | test_editor.py | Yajo/editor | f83e15cdf68ce5890240545fb39e1dcf8eda7045 | [
"MIT"
] | null | null | null | from unittest import mock
import editor
import tdir
import unittest
FILENAME = 'a_file.txt'
EDITOR = editor.default_editor()
@mock.patch('editor.subprocess.call', autospec=True)
class TestEditor(unittest.TestCase):
@tdir(FILENAME)
def test_existing(self, call):
actual = editor(filename=FILENAME)
expected = FILENAME + '\n'
assert actual == expected
call.assert_called_once_with([EDITOR, FILENAME])
actual = editor('X', filename=FILENAME)
expected = 'X'
assert actual == expected
@tdir
def test_new(self, call):
actual = editor('X', filename=FILENAME)
expected = 'X'
assert actual == expected
call.assert_called_once_with([EDITOR, FILENAME])
def test_temp(self, call):
actual = editor()
expected = ''
assert actual == expected
call.assert_called_once()
def test_temp2(self, call):
actual = editor('some contents')
expected = 'some contents'
assert actual == expected
call.assert_called_once()
| 24.522727 | 56 | 0.63392 |
73f67f26cc6f4ddfe730b0b7599a04972d3e1a5f | 15,592 | py | Python | marchena/desk.py | samuelmaudo/marchena | e9a522a9be66f7043aa61e316f7e733e8ccf1e32 | [
"BSD-3-Clause"
] | null | null | null | marchena/desk.py | samuelmaudo/marchena | e9a522a9be66f7043aa61e316f7e733e8ccf1e32 | [
"BSD-3-Clause"
] | null | null | null | marchena/desk.py | samuelmaudo/marchena | e9a522a9be66f7043aa61e316f7e733e8ccf1e32 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding:utf-8 -*-
from __future__ import unicode_literals
from copy import deepcopy
from functools import update_wrapper
from django import VERSION as DJANGO_VERSION
from django.conf.urls import url, include
from django.contrib.admin.utils import quote
from django.contrib.admin.views.main import ChangeList
from django.contrib.contenttypes import views as contenttype_views
from django.core.urlresolvers import reverse, NoReverseMatch
from django.http import Http404, HttpResponseRedirect
from django.template.response import TemplateResponse
from django.utils import six
from django.utils.text import capfirst
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.cache import never_cache
from yepes import admin
from yepes.apps import apps
from yepes.conf import settings
from marchena.admin import BlogModelAdmin
from marchena.templatetags.desk_urls import add_preserved_filters
AuthorMixin = apps.get_class('authors.admin', 'AuthorMixin')
CategoryMixin = apps.get_class('posts.admin', 'CategoryMixin')
CommentMixin = apps.get_class('comments.admin', 'CommentMixin')
LinkMixin = apps.get_class('links.admin', 'LinkMixin')
LinkCategoryMixin = apps.get_class('links.admin', 'LinkCategoryMixin')
PostMixin = apps.get_class('posts.admin', 'PostMixin')
Author = apps.get_model('authors', 'Author')
Blog = apps.get_model('blogs', 'Blog')
Category = apps.get_model('posts', 'Category')
Comment = apps.get_model('comments', 'Comment')
Link = apps.get_model('links', 'Link')
LinkCategory = apps.get_model('links', 'LinkCategory')
Post = apps.get_model('posts', 'Post')
Tag = apps.get_model('posts', 'Tag')
class DeskSite(admin.AdminSite):
def __init__(self, name='desk'):
super(DeskSite, self).__init__(name)
def app_index(self, request, app_label, extra_context=None):
request.current_app = self.name
user = request.user
blogs = Blog.objects.all()
if not user.is_superuser:
blogs = blogs.filter(authors=user)
try:
blog = blogs.get(slug=app_label)
except Blog.DoesNotExit:
raise Http404('The requested admin page does not exist.')
app_dict = {}
for model, model_admin in self._registry.items():
app_label = model._meta.app_label
has_module_perms = model_admin.has_module_permission(request)
if not has_module_perms:
continue
# Check whether user has any perm for this module.
# If so, add the module to the model_list.
perms = model_admin.get_model_perms(request)
if True not in perms.values():
continue
info = (self.name, app_label, model._meta.model_name)
kwargs = {'blog_slug': blog.slug}
model_dict = {
'name': capfirst(model._meta.verbose_name_plural),
'perms': perms,
}
if perms.get('change', False):
try:
view_name = '{0}:{1}_{2}_changelist'.format(*info)
model_dict['admin_url'] = reverse(view_name, kwargs=kwargs)
except NoReverseMatch:
pass
if perms.get('add', False):
try:
view_name = '{0}:{1}_{2}_add'.format(*info)
model_dict['add_url'] = reverse(view_name, kwargs=kwargs)
except NoReverseMatch:
pass
if app_dict:
app_dict['models'].append(model_dict),
else:
# First time around, now that we know there's
# something to display, add in the necessary meta
# information.
app_dict = {
'name': blog.title,
'app_url': '',
'has_module_perms': has_module_perms,
'models': [model_dict],
}
if not app_dict:
raise Http404('The requested admin page does not exist.')
# Sort the models alphabetically within each app.
app_dict['models'].sort(key=lambda x: x['name'])
context = self.each_context(request)
context.update({
'title': _("{0}'s desk").format(blog.title),
'app_list': [app_dict],
})
if extra_context:
context.update(extra_context)
return TemplateResponse(request, self.app_index_template or [
'desk/app_index.html',
'admin/app_index.html',
], context)
def get_model_urls(self):
"""
Model's views.
"""
urlpatterns = []
for model, model_admin in six.iteritems(self._registry):
opts = model._meta
urlpatterns += [
url(r'^{0}/{1}/'.format(opts.app_label, opts.model_name),
include(model_admin.urls)),
url(r'^(?P<blog_slug>[a-z\-]+)/{0}/'.format(opts.model_name),
include(model_admin.urls)),
]
return urlpatterns
def get_site_urls(self):
"""
Admin-site-wide views.
"""
def wrap(view, cacheable=False):
def wrapper(*args, **kwargs):
return self.admin_view(view, cacheable)(*args, **kwargs)
wrapper.admin_site = self
return update_wrapper(wrapper, view)
return [
url(r'^$',
wrap(self.index),
name='index'),
url(r'^logout/$',
wrap(self.logout),
name='logout'),
url(r'^password_change/$',
wrap(self.password_change, cacheable=True),
name='password_change'),
url(r'^password_change/done/$',
wrap(self.password_change_done, cacheable=True),
name='password_change_done'),
url(r'^jsi18n/$',
wrap(self.i18n_javascript, cacheable=True),
name='jsi18n'),
url(r'^r/(?P<content_type_id>\d+)/(?P<object_id>.+)/$',
wrap(contenttype_views.shortcut),
name='view_on_site'),
url(r'^(?P<app_label>[a-z\-]+)/$',
wrap(self.app_index),
name='app_list'),
]
def get_urls(self):
if settings.DEBUG and DJANGO_VERSION < (1, 10):
self.check_dependencies()
urlpatterns = self.get_site_urls()
urlpatterns.extend(self.get_model_urls())
return urlpatterns
@never_cache
def index(self, request, extra_context=None):
"""
Displays the main admin index page, which lists all of the installed
apps that have been registered in this site.
"""
request.current_app = self.name
user = request.user
blogs = Blog.objects.get_queryset()
if not user.is_superuser:
blogs = blogs.filter(authors=user)
app_dict = {}
for blog in blogs:
for model, model_admin in six.iteritems(self._registry):
app_label = model._meta.app_label
has_module_perms = user.has_module_perms(app_label)
if not has_module_perms:
continue
# Check whether user has any perm for this module.
# If so, add the module to the model_list.
perms = model_admin.get_model_perms(request)
if True not in perms.values():
continue
info = (self.name, app_label, model._meta.model_name)
kwargs = {'blog_slug': blog.slug}
model_dict = {
'name': capfirst(model._meta.verbose_name_plural),
'perms': perms,
}
if perms.get('change', False):
try:
view_name = '{0}:{1}_{2}_changelist'.format(*info)
model_dict['admin_url'] = reverse(view_name, kwargs=kwargs)
except NoReverseMatch:
pass
if perms.get('add', False):
try:
view_name = '{0}:{1}_{2}_add'.format(*info)
model_dict['add_url'] = reverse(view_name, kwargs=kwargs)
except NoReverseMatch:
pass
if blog.slug in app_dict:
app_dict[blog.slug]['models'].append(model_dict)
else:
app_dict[blog.slug] = {
'name': blog.title,
'app_url': reverse('{0}:app_list'.format(self.name),
kwargs={'app_label': blog.slug}),
'has_module_perms': has_module_perms,
'models': [model_dict],
}
# Sort the apps alphabetically.
app_list = list(six.itervalues(app_dict))
app_list.sort(key=lambda x: x['name'])
# Sort the models alphabetically within each app.
for app in app_list:
app['models'].sort(key=lambda x: x['name'])
context = self.each_context(request)
context.update({
'title': _('Desk'),
'app_list': app_list,
})
if extra_context:
context.update(extra_context)
return TemplateResponse(request, self.index_template or [
'desk/index.html',
'admin/index.html',
], context)
desk_site = DeskSite()
class DeskChangeList(ChangeList):
def __init__(self, request, *args, **kwargs):
self.blog = request.blog
super(DeskChangeList, self).__init__(request, *args, **kwargs)
def url_for_result(self, result):
view_name = '{0}:{1}_{2}_change'.format(
self.model_admin.admin_site.name,
self.opts.app_label,
self.opts.model_name)
kwargs = {'object_id': quote(getattr(result, self.pk_attname))}
kwargs['blog_slug'] = quote(self.blog.slug)
return reverse(view_name, kwargs=kwargs)
class BlogModelDesk(BlogModelAdmin):
change_form_template = 'desk/change_form.html'
change_list_template = 'desk/change_list.html'
delete_confirmation_template = 'desk/delete_confirmation.html'
object_history_template = 'desk/object_history.html'
def add_view(self, request, form_url='', extra_context=None, blog_slug=None):
request.blog = Blog.objects.get(slug=blog_slug)
context = {'blog': request.blog}
context.update(extra_context or {})
return super(BlogModelDesk, self).add_view(request, form_url, context)
def change_view(self, request, object_id, form_url='', extra_context=None, blog_slug=None):
request.blog = Blog.objects.get(slug=blog_slug)
context = {'blog': request.blog}
context.update(extra_context or {})
return super(BlogModelDesk, self).change_view(request, object_id, form_url, context)
def changelist_view(self, request, extra_context=None, blog_slug=None):
request.blog = Blog.objects.get(slug=blog_slug)
context = {'blog': request.blog}
context.update(extra_context or {})
return super(BlogModelDesk, self).changelist_view(request, context)
def delete_view(self, request, object_id, extra_context=None, blog_slug=None):
request.blog = Blog.objects.get(slug=blog_slug)
context = {'blog': request.blog}
if extra_context:
context.update(extra_context)
# This sucks but is the best solution that I have found.
original_registry = self.admin_site._registry
self.admin_site._registry = {}
response = super(BlogModelDesk, self).delete_view(request, object_id, context)
self.admin_site._registry = original_registry
return response
def get_changelist(self, request, **kwargs):
return DeskChangeList
def get_fieldsets(self, request, obj=None):
fieldsets = super(BlogModelDesk, self).get_fieldsets(request, obj)
fs = []
for title, opts in deepcopy(fieldsets):
fields = opts.get('fields', ())
try:
i = fields.index('blog')
except ValueError:
pass
else:
if len(fields) == 1:
continue
fields = list(fields)
fields.pop(i)
opts['fields'] = fields
fs.append((title, opts))
return fs
def get_queryset(self, request):
qs = super(BlogModelAdmin, self).get_queryset(request)
qs = qs.filter(**{self.blog_field: request.blog})
return qs
def history_view(self, request, object_id, extra_context=None, blog_slug=None):
request.blog = Blog.objects.get(slug=blog_slug)
context = {'blog': request.blog}
context.update(extra_context or {})
return super(BlogModelDesk, self).history_view(request, object_id, context)
def response_post_save_add(self, request, obj):
opts = self.model._meta
if self.has_change_permission(request, None):
view_name = '{0}:{1}_{2}_changelist'.format(
self.admin_site.name,
opts.app_label,
opts.model_name,
)
post_url = reverse(view_name, kwargs={'blog_slug': request.blog.slug})
preserved_filters = self.get_preserved_filters(request)
context = {'preserved_filters': preserved_filters, 'opts': opts}
post_url = add_preserved_filters(context, post_url)
else:
post_url = reverse('{0}:index'.format(self.admin_site.name))
return HttpResponseRedirect(post_url)
def response_post_save_change(self, request, obj):
opts = self.model._meta
if self.has_change_permission(request, None):
view_name = '{0}:{1}_{2}_changelist'.format(
self.admin_site.name,
opts.app_label,
opts.model_name,
)
post_url = reverse(view_name, kwargs={'blog_slug': request.blog.slug})
preserved_filters = self.get_preserved_filters(request)
context = {'preserved_filters': preserved_filters, 'opts': opts}
post_url = add_preserved_filters(context, post_url)
else:
post_url = reverse('{0}:index'.format(self.admin_site.name))
return HttpResponseRedirect(post_url)
def save_model(self, request, obj, *args, **kwargs):
if hasattr(request, 'blog') and '__' not in self.blog_field:
field = obj._meta.get_field(self.blog_field)
if field.many_to_one or field.one_to_one:
setattr(obj, self.blog_field, request.blog)
super(BlogModelDesk, self).save_model(request, obj, *args, **kwargs)
class AuthorDesk(AuthorMixin, BlogModelDesk):
pass
class CategoryDesk(CategoryMixin, BlogModelDesk):
pass
class CommentDesk(CommentMixin, BlogModelDesk):
pass
class LinkDesk(LinkMixin, BlogModelDesk):
pass
class LinkCategoryDesk(LinkCategoryMixin, BlogModelDesk):
pass
class PostDesk(PostMixin, admin.DisplayableMixin, BlogModelDesk):
pass
desk_site.register(Author, AuthorDesk)
desk_site.register(Category, CategoryDesk)
desk_site.register(Comment, CommentDesk)
desk_site.register(Link, LinkDesk)
desk_site.register(LinkCategory, LinkCategoryDesk)
desk_site.register(Post, PostDesk)
| 36.344988 | 95 | 0.593702 |
73f693d6e93c81bd7c0f415015ece7e7063a0132 | 549 | py | Python | vegapy/imshow.py | scivision/vegapy | db095f790c7e86b3dd273fdd260c83fcb2550772 | [
"MIT"
] | 2 | 2021-03-21T22:16:04.000Z | 2022-03-24T22:38:40.000Z | vegapy/imshow.py | scivision/vegapy | db095f790c7e86b3dd273fdd260c83fcb2550772 | [
"MIT"
] | null | null | null | vegapy/imshow.py | scivision/vegapy | db095f790c7e86b3dd273fdd260c83fcb2550772 | [
"MIT"
] | 2 | 2018-11-16T12:14:03.000Z | 2019-03-03T03:58:49.000Z | import numpy as np
import matplotlib.pyplot as plt
def imshow(image, scale='lin', **kwargs):
if scale == 'log':
try:
image = np.log10(image)
except TypeError as e:
image = np.log10(image.value)
elif scale == 'ln':
image = np.log(image)
if 'imshow' in kwargs:
plt.imshow(image, **kwargs['imshow'])
else:
plt.imshow(image)
if 'axis' in kwargs:
plt.axis(**kwargs['axis'])
if 'colorbar' in kwargs:
plt.colorbar(**kwargs['colorbar'])
plt.show()
| 26.142857 | 45 | 0.559199 |
73f69e2378064a9b2a77ecef28a8d5c4ab61f27c | 3,401 | py | Python | tests/unit/modules/rvm_test.py | skrobul/salt | ef7fb71082cce7a9783e00b9c65062fefae09263 | [
"Apache-2.0"
] | 1 | 2018-02-03T17:30:56.000Z | 2018-02-03T17:30:56.000Z | tests/unit/modules/rvm_test.py | skrobul/salt | ef7fb71082cce7a9783e00b9c65062fefae09263 | [
"Apache-2.0"
] | null | null | null | tests/unit/modules/rvm_test.py | skrobul/salt | ef7fb71082cce7a9783e00b9c65062fefae09263 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Import Salt Testing libs
from salttesting import skipIf, TestCase
from salttesting.helpers import ensure_in_syspath
from salttesting.mock import NO_MOCK, NO_MOCK_REASON, MagicMock, patch
ensure_in_syspath('../../')
# Import salt libs
import salt.modules.rvm as rvm
rvm.__salt__ = {
'cmd.has_exec': MagicMock(return_value=True),
'config.option': MagicMock(return_value=None)
}
@skipIf(NO_MOCK, NO_MOCK_REASON)
class TestRvmModule(TestCase):
def test__rvm(self):
mock = MagicMock(return_value={'retcode': 0, 'stdout': ''})
with patch.dict(rvm.__salt__, {'cmd.run_all': mock}):
rvm._rvm('install', '1.9.3')
mock.assert_called_once_with(
'/usr/local/rvm/bin/rvm install 1.9.3', runas=None
)
def test__rvm_do(self):
mock = MagicMock(return_value=None)
with patch.object(rvm, '_rvm', new=mock):
rvm._rvm_do('1.9.3', 'gemset list')
mock.assert_called_once_with('1.9.3 do gemset list', runas=None)
def test_install(self):
mock = MagicMock(return_value={'retcode': 0})
with patch.dict(rvm.__salt__, {'cmd.run_all': mock}):
rvm.install()
def test_list(self):
list_output = '''
rvm rubies
jruby-1.6.5.1 [ amd64 ]
ree-1.8.7-2011.03 [ x86_64 ]
ree-1.8.7-2011.12 [ x86_64 ]
=* ree-1.8.7-2012.02 [ x86_64 ]
ruby-1.9.2-p180 [ x86_64 ]
ruby-1.9.3-p125 [ x86_64 ]
ruby-head [ x86_64 ]
# => - current
# =* - current && default
# * - default
'''
with patch.object(rvm, '_rvm') as mock_method:
mock_method.return_value = list_output
self.assertEqual(
[['jruby', '1.6.5.1', False],
['ree', '1.8.7-2011.03', False],
['ree', '1.8.7-2011.12', False],
['ree', '1.8.7-2012.02', True],
['ruby', '1.9.2-p180', False],
['ruby', '1.9.3-p125', False],
['ruby', 'head', False]],
rvm.list_())
def test_gemset_list(self):
output = '''
gemsets for ree-1.8.7-2012.02 (found in /usr/local/rvm/gems/ree-1.8.7-2012.02)
global
bar
foo
'''
with patch.object(rvm, '_rvm_do') as mock_method:
mock_method.return_value = output
self.assertEqual(
['global', 'bar', 'foo'],
rvm.gemset_list())
def test_gemset_list_all(self):
output = '''
gemsets for ruby-1.9.3-p125 (found in /usr/local/rvm/gems/ruby-1.9.3-p125)
9bar
9foo
global
gemsets for ruby-head (found in /usr/local/rvm/gems/ruby-head)
global
headbar
headfoo
gemsets for jruby-1.6.5.1 (found in /usr/local/rvm/gems/jruby-1.6.5.1)
global
jbar
jfoo
gemsets for ruby-1.9.2-p180 (found in /usr/local/rvm/gems/ruby-1.9.2-p180)
global
'''
with patch.object(rvm, '_rvm_do') as mock_method:
mock_method.return_value = output
self.assertEqual(
{'jruby-1.6.5.1': ['global', 'jbar', 'jfoo'],
'ruby-1.9.2-p180': ['global'],
'ruby-1.9.3-p125': ['9bar', '9foo', 'global'],
'ruby-head': ['global', 'headbar', 'headfoo']},
rvm.gemset_list_all())
if __name__ == '__main__':
from integration import run_tests
run_tests(TestRvmModule, needs_daemon=False)
| 27.650407 | 78 | 0.572479 |
73f6b0534f6f72d2b80f3bae29e1d5fbed82e70e | 5,794 | py | Python | train_DL_model/train_models.py | 98k-bot/model_differential_privacy | f72dc6800402575f518c40cd4a6e66d00db3e38a | [
"Apache-2.0"
] | null | null | null | train_DL_model/train_models.py | 98k-bot/model_differential_privacy | f72dc6800402575f518c40cd4a6e66d00db3e38a | [
"Apache-2.0"
] | null | null | null | train_DL_model/train_models.py | 98k-bot/model_differential_privacy | f72dc6800402575f518c40cd4a6e66d00db3e38a | [
"Apache-2.0"
] | null | null | null | '''
Traning as baseline models (Densenet121) on dataset
Change the build_model() function to use your custom deep learning model
This function should written a valid keras or tf.keras Model object
'''
import sys
sys.path.append("..")
from train_DL_model import model_params
from utils import util_functions as utils
import os, numpy as np
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.callbacks import ModelCheckpoint
import tensorflow as tf
#tf.compat.v1.disable_eager_execution()
########################################################################################
# Hard-coded rules. Please change this if you are changing the dataset format
########################################################################################
# 1. Provide the order in which the labels are provided in the label matrix
task_mapping = {"shape": 0, "color": 1, "size": 2, "quadrant": 3, "background": 4}
# 2. Provide the max number of classes in each task
label_num = { "shape": 5, "color": 7, "size": 3, "quadrant": 4, "background": 3}
########################################################################################
# Function that defines the DL model to be trained.
# Edit this function to use custom DL model.
# This function should written a valid keras or tf.keras Model object
########################################################################################
def build_model(num_lables):
"""Define the deep learning model to be trained"""
from keras.applications import DenseNet121
from keras.layers import (Dense, Dropout, Flatten)
from keras.models import Model, Sequential, load_model
base_model = DenseNet121(include_top=False, weights=None, input_shape=(256,256,3))
flat_1 = Flatten()(base_model.output)
cus_dense_1 = Dense(512, activation='relu', name='cus_dense_1')(flat_1)
cus_dense_do_1 = Dropout(0.5, name='cus_dense_do_1')(cus_dense_1)
cus_dense_2 = Dense(100, activation='relu', name='cus_dense_2')(cus_dense_do_1)
cus_dense_do_2 = Dropout(0.3, name='cus_dense_do_2')(cus_dense_2)
cus_dense_3 = Dense(num_lables, activation='softmax', name='cus_dense_3')(cus_dense_do_2)
model = Model(base_model.input, cus_dense_3)
return model
def train_models():
"""This is the main function that trains the model for one or many tasks"""
task = model_params.task
for task_key, task_value in task_mapping.items():
if(task == 'all' or task == task_key):
print("==================================================")
print("Building model for task ..... ", task_key)
num_task_labels = label_num[task_key]
model_name = model_params.output_name_prefix + "_" + task_key
# Edit this method to use custom DL models.
model = build_model(num_task_labels)
print("Printing model summary for task", task_key)
print(model.summary())
print("\nLoading training dataset ...... ")
data_type = "train"
X = np.load(model_params.train_data_path)
x_train = X["data"]
answer = X["lables"][:,task_value]
print("Train data shape", x_train.shape)
print("Train labels shapes ", answer.shape)
print("Train labels set", set(answer))
print("Pre-processing training dataset ...... ")
x_train = x_train.astype('float32')
x_train /= 255
total_data_point = x_train.shape[0]
data_indices = np.arange(total_data_point)
np.random.shuffle(data_indices)
x_train = x_train.take(data_indices, axis=0)
answer = answer.take(data_indices, axis=0)
y_train = to_categorical(answer, num_task_labels)
utils.create_directory(model_params.model_dir)
filepath = os.path.join(model_params.model_dir, model_name + "_e{epoch:02d}-acc{val_accuracy:.5f}.hdf5")
#checkpoint = ModelCheckpoint(filepath, monitor='val_accuracy', verbose=1, save_best_only=True, mode='max')
#callbacks_list = [checkpoint]
#checkpoint_filepath = '/tmp/checkpoint'
model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=filepath,
save_weights_only=True,
monitor='val_accuracy',
mode='max',
save_best_only=True)
print("Submitting the model for training ...... ")
adam = Adam(lr=model_params.learning_rate, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
model.compile(optimizer=adam, loss=model_params.loss_function, metrics=['accuracy'])
model.fit(x_train, y_train, batch_size=model_params.batch_size, epochs=model_params.epoch,
verbose=model_params.verbose, validation_split=model_params.validation_split, callbacks= [model_checkpoint_callback])
#model.fit(x_train, y_train, batch_size=model_params.batch_size, epochs=model_params.epoch, verbose=model_params.verbose, validation_split=model_params.validation_split, callbacks=callbacks_list)
print("\nTraining has finished ...... ")
#model.save(os.path.join(model_params.model_dir, model_name + "_final.hdf5"))
#print("Trained model store at: ", os.path.join(model_params.model_dir, model_name + "_final.hdf5"))
print("\nLoading test dataset ...... ")
data_type = "test"
test_data = np.load(model_params.test_data_path)
test_data_x = test_data["data"]
test_data_answer = test_data["lables"][:,task_value]
print("Test data shape", test_data_x.shape)
print("Test lables shapes ", test_data_answer.shape)
print("Test lables set", set(test_data_answer))
print("Pre-processing test dataset ...... ")
test_data_x = test_data_x.astype('float32')
test_data_x /= 255
test_data_answer_one_hot = to_categorical(test_data_answer, num_task_labels)
print("Evaluating the model using test data ...... ")
score = model.evaluate(x=test_data_x, y=test_data_answer_one_hot, verbose=model_params.verbose)
print("\nTest Score", score)
return model
if __name__ == "__main__":
train_models()
| 44.229008 | 198 | 0.691405 |
73f6b34ac317aec9a5d725b4dcf90189ee0b3ada | 1,428 | py | Python | python/lib/dcoscli/tests/integrations/test_help.py | isabella232/dcos-core-cli | 11f2a26fd568b5e1501057a62ad013eb79acfd92 | [
"Apache-2.0",
"MIT"
] | 3 | 2019-04-23T05:31:19.000Z | 2021-11-15T19:18:23.000Z | python/lib/dcoscli/tests/integrations/test_help.py | dcos/dcos-core-cli | 11f2a26fd568b5e1501057a62ad013eb79acfd92 | [
"Apache-2.0",
"MIT"
] | 356 | 2018-07-26T15:17:56.000Z | 2022-03-29T21:06:06.000Z | python/lib/dcoscli/tests/integrations/test_help.py | isabella232/dcos-core-cli | 11f2a26fd568b5e1501057a62ad013eb79acfd92 | [
"Apache-2.0",
"MIT"
] | 22 | 2018-07-24T13:20:51.000Z | 2021-11-15T19:18:26.000Z | from dcoscli.test.common import assert_command
def test_help_calico():
with open('tests/data/calico/help.txt') as content:
assert_command(['dcos', 'help', 'calico'],
stdout=content.read().encode('utf-8'))
def test_help_job():
with open('dcoscli/data/help/job.txt') as content:
assert_command(['dcos', 'help', 'job'],
stdout=content.read().encode('utf-8'))
def test_help_marathon():
with open('tests/data/marathon/help.txt') as content:
assert_command(['dcos', 'help', 'marathon'],
stdout=content.read().encode('utf-8'))
def test_help_node():
with open('dcoscli/data/help/node.txt') as content:
assert_command(['dcos', 'help', 'node'],
stdout=content.read().encode('utf-8'))
# def test_help_package():
# with open('dcoscli/data/help/package.txt') as content:
# assert_command(['dcos', 'help', 'package'],
# stdout=content.read().encode('utf-8'))
def test_help_service():
with open('dcoscli/data/help/service.txt') as content:
assert_command(['dcos', 'help', 'service'],
stdout=content.read().encode('utf-8'))
# def test_help_task():
# with open('dcoscli/data/help/task.txt') as content:
# assert_command(['dcos', 'help', 'task'],
# stdout=content.read().encode('utf-8'))
| 33.209302 | 63 | 0.583333 |
73f6f6b5681e373a90b596e24f6836841b60bd9a | 2,208 | py | Python | docs/conf.py | caltech-netlab/gym-acnportal | cacd2e4aa9159a3bf7f0b8e3db2dbb0832d76e46 | [
"BSD-3-Clause"
] | null | null | null | docs/conf.py | caltech-netlab/gym-acnportal | cacd2e4aa9159a3bf7f0b8e3db2dbb0832d76e46 | [
"BSD-3-Clause"
] | 3 | 2021-04-28T14:43:32.000Z | 2021-04-28T14:58:04.000Z | docs/conf.py | caltech-netlab/gym-acnportal | cacd2e4aa9159a3bf7f0b8e3db2dbb0832d76e46 | [
"BSD-3-Clause"
] | 1 | 2021-02-22T03:43:44.000Z | 2021-02-22T03:43:44.000Z | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = "gym-acnportal"
copyright = "2020, Sunash Sharma"
author = "Sunash Sharma"
# The full version, including alpha/beta/rc tags
release = "0.0.1"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ["sphinx.ext.autodoc"]
# The master toctree document.
master_doc = "index"
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Auto API (sphinx-autoapi) configuration details.
extensions.append("autoapi.extension")
autoapi_type = "python"
autoapi_dirs = ["../gym_acnportal"]
autoapi_keep_files = True
autoapi_root = ""
| 33.454545 | 79 | 0.676178 |
73f7047cca2c620cd662fd1a9a4023ccd7ee9afb | 3,938 | py | Python | mitmproxy/utils/arg_check.py | takeratta/mitmproxy | 569d275d763f499cce9673fcf118dcc8d59d2eeb | [
"MIT"
] | null | null | null | mitmproxy/utils/arg_check.py | takeratta/mitmproxy | 569d275d763f499cce9673fcf118dcc8d59d2eeb | [
"MIT"
] | null | null | null | mitmproxy/utils/arg_check.py | takeratta/mitmproxy | 569d275d763f499cce9673fcf118dcc8d59d2eeb | [
"MIT"
] | null | null | null | import sys
DEPRECATED = """
--cadir
-Z
--body-size-limit
--stream
--palette
--palette-transparent
--follow
--order
--no-mouse
--reverse
--socks
--http2-priority
--no-http2-priority
--no-websocket
--websocket
--spoof-source-address
--upstream-bind-address
--ciphers-client
--ciphers-server
--client-certs
--no-upstream-cert
--add-upstream-certs-to-client-chain
--upstream-trusted-cadir
--upstream-trusted-ca
--ssl-version-client
--ssl-version-server
--no-onboarding
--onboarding-host
--onboarding-port
--server-replay-use-header
--no-pop
--replay-ignore-content
--replay-ignore-payload-param
--replay-ignore-param
--replay-ignore-host
--replace-from-file
"""
REPLACED = """
-t
-u
--wfile
-a
--afile
-z
-b
--bind-address
--port
-I
--ignore
--tcp
--cert
--insecure
-c
--replace
-i
-f
--filter
"""
REPLACEMENTS = {
"--stream": "stream_large_bodies",
"--palette": "console_palette",
"--palette-transparent": "console_palette_transparent:",
"--follow": "console_focus_follow",
"--order": "console_order",
"--no-mouse": "console_mouse",
"--reverse": "console_order_reversed",
"--no-http2-priority": "http2_priority",
"--no-websocket": "websocket",
"--no-upstream-cert": "upstream_cert",
"--upstream-trusted-cadir": "ssl_verify_upstream_trusted_cadir",
"--upstream-trusted-ca": "ssl_verify_upstream_trusted_ca",
"--no-onboarding": "onboarding",
"--no-pop": "server_replay_nopop",
"--replay-ignore-content": "server_replay_ignore_content",
"--replay-ignore-payload-param": "server_replay_ignore_payload_params",
"--replay-ignore-param": "server_replay_ignore_params",
"--replay-ignore-host": "server_replay_ignore_host",
"--replace-from-file": "replacements (use @ to specify path)",
"-t": "--stickycookie",
"-u": "--stickyauth",
"--wfile": "--save-stream-file",
"-a": "-w Prefix path with + to append.",
"--afile": "-w Prefix path with + to append.",
"-z": "--anticomp",
"-b": "--listen-host",
"--bind-address": "--listen-host",
"--port": "--listen-port",
"-I": "--ignore-hosts",
"--ignore": "--ignore-hosts",
"--tcp": "--tcp-hosts",
"--cert": "--certs",
"--insecure": "--ssl-insecure",
"-c": "-C",
"--replace": "--replacements",
"-i": "--intercept",
"-f": "--view-filter",
"--filter": "--view-filter"
}
def check():
args = sys.argv[1:]
print()
if "-U" in args:
print("-U is deprecated, please use --mode upstream:SPEC instead")
if "-T" in args:
print("-T is deprecated, please use --mode transparent instead")
for option in ("-e", "--eventlog", "--norefresh"):
if option in args:
print("{} has been removed.".format(option))
for option in ("--nonanonymous", "--singleuser", "--htpasswd"):
if option in args:
print(
'{} is deprecated.\n'
'Please use `--proxyauth SPEC` instead.\n'
'SPEC Format: "username:pass", "any" to accept any user/pass combination,\n'
'"@path" to use an Apache htpasswd file, or\n'
'"ldap[s]:url_server_ldap:dn_auth:password:dn_subtree" '
'for LDAP authentication.'.format(option))
for option in REPLACED.splitlines():
if option in args:
print(
"{} is deprecated.\n"
"Please use `{}` instead.".format(
option,
REPLACEMENTS.get(option)
)
)
for option in DEPRECATED.splitlines():
if option in args:
print(
"{} is deprecated.\n"
"Please use `--set {}=value` instead.\n"
"To show all options and their default values use --options".format(
option,
REPLACEMENTS.get(option, None) or option.lstrip("-").replace("-", "_")
)
)
| 26.42953 | 92 | 0.57999 |
73f720859589d7051a2b7f3ac8be62d431469183 | 6,990 | py | Python | configs/example/fs.py | pnkfb9/gem5_priority | fbf766277df78a470758cf7d798d12fb1e7c51c4 | [
"BSD-3-Clause"
] | null | null | null | configs/example/fs.py | pnkfb9/gem5_priority | fbf766277df78a470758cf7d798d12fb1e7c51c4 | [
"BSD-3-Clause"
] | null | null | null | configs/example/fs.py | pnkfb9/gem5_priority | fbf766277df78a470758cf7d798d12fb1e7c51c4 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2010-2012 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2006-2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Ali Saidi
import optparse
import sys
import m5
from m5.defines import buildEnv
from m5.objects import *
from m5.util import addToPath, fatal
addToPath('../common')
from FSConfig import *
from SysPaths import *
from Benchmarks import *
import Simulation
import CacheConfig
from Caches import *
import Options
parser = optparse.OptionParser()
Options.addCommonOptions(parser)
Options.addFSOptions(parser)
(options, args) = parser.parse_args()
if args:
print "Error: script doesn't take any positional arguments"
sys.exit(1)
# driver system CPU is always simple... note this is an assignment of
# a class, not an instance.
DriveCPUClass = AtomicSimpleCPU
drive_mem_mode = 'atomic'
# system under test can be any CPU
(TestCPUClass, test_mem_mode, FutureClass) = Simulation.setCPUClass(options)
TestCPUClass.clock = options.clock
DriveCPUClass.clock = options.clock
if options.benchmark:
try:
bm = Benchmarks[options.benchmark]
except KeyError:
print "Error benchmark %s has not been defined." % options.benchmark
print "Valid benchmarks are: %s" % DefinedBenchmarks
sys.exit(1)
else:
if options.dual:
bm = [SysConfig(disk=options.disk_image, mem=options.mem_size), SysConfig(disk=options.disk_image, mem=options.mem_size)]
else:
bm = [SysConfig(disk=options.disk_image, mem=options.mem_size)]
np = options.num_cpus
if buildEnv['TARGET_ISA'] == "alpha":
test_sys = makeLinuxAlphaSystem(test_mem_mode, bm[0])
elif buildEnv['TARGET_ISA'] == "mips":
test_sys = makeLinuxMipsSystem(test_mem_mode, bm[0])
elif buildEnv['TARGET_ISA'] == "sparc":
test_sys = makeSparcSystem(test_mem_mode, bm[0])
elif buildEnv['TARGET_ISA'] == "x86":
test_sys = makeLinuxX86System(test_mem_mode, options.num_cpus, bm[0])
elif buildEnv['TARGET_ISA'] == "arm":
test_sys = makeArmSystem(test_mem_mode,
options.machine_type, bm[0],
bare_metal=options.bare_metal)
else:
fatal("Incapable of building %s full system!", buildEnv['TARGET_ISA'])
if options.kernel is not None:
test_sys.kernel = binary(options.kernel)
if options.script is not None:
test_sys.readfile = options.script
test_sys.init_param = options.init_param
test_sys.cpu = [TestCPUClass(cpu_id=i) for i in xrange(np)]
if options.caches or options.l2cache:
test_sys.iocache = IOCache(clock = '1GHz',
addr_ranges = test_sys.mem_ranges)
test_sys.iocache.cpu_side = test_sys.iobus.master
test_sys.iocache.mem_side = test_sys.membus.slave
else:
test_sys.iobridge = Bridge(delay='50ns', ranges = test_sys.mem_ranges)
test_sys.iobridge.slave = test_sys.iobus.master
test_sys.iobridge.master = test_sys.membus.slave
# Sanity check
if options.fastmem:
if TestCPUClass != AtomicSimpleCPU:
fatal("Fastmem can only be used with atomic CPU!")
if (options.caches or options.l2cache):
fatal("You cannot use fastmem in combination with caches!")
for i in xrange(np):
if options.fastmem:
test_sys.cpu[i].fastmem = True
if options.checker:
test_sys.cpu[i].addCheckerCpu()
test_sys.cpu[i].createThreads()
CacheConfig.config_cache(options, test_sys)
if len(bm) == 2:
if buildEnv['TARGET_ISA'] == 'alpha':
drive_sys = makeLinuxAlphaSystem(drive_mem_mode, bm[1])
elif buildEnv['TARGET_ISA'] == 'mips':
drive_sys = makeLinuxMipsSystem(drive_mem_mode, bm[1])
elif buildEnv['TARGET_ISA'] == 'sparc':
drive_sys = makeSparcSystem(drive_mem_mode, bm[1])
elif buildEnv['TARGET_ISA'] == 'x86':
drive_sys = makeX86System(drive_mem_mode, np, bm[1])
elif buildEnv['TARGET_ISA'] == 'arm':
drive_sys = makeArmSystem(drive_mem_mode, options.machine_type, bm[1])
drive_sys.cpu = DriveCPUClass(cpu_id=0)
drive_sys.cpu.createThreads()
drive_sys.cpu.createInterruptController()
drive_sys.cpu.connectAllPorts(drive_sys.membus)
if options.fastmem:
drive_sys.cpu.fastmem = True
if options.kernel is not None:
drive_sys.kernel = binary(options.kernel)
drive_sys.iobridge = Bridge(delay='50ns',
ranges = drive_sys.mem_ranges)
drive_sys.iobridge.slave = drive_sys.iobus.master
drive_sys.iobridge.master = drive_sys.membus.slave
drive_sys.init_param = options.init_param
root = makeDualRoot(True, test_sys, drive_sys, options.etherdump)
elif len(bm) == 1:
root = Root(full_system=True, system=test_sys)
else:
print "Error I don't know how to create more than 2 systems."
sys.exit(1)
if options.timesync:
root.time_sync_enable = True
if options.frame_capture:
VncServer.frame_capture = True
Simulation.setWorkCountOptions(test_sys, options)
Simulation.run(options, root, test_sys, FutureClass)
| 36.984127 | 129 | 0.739056 |
73f72eef8e307eded5a16d3708e44139462caf7d | 2,138 | py | Python | python/tests/test_parameter_mapping.py | kristianmeyerr/AMICI | 15f14c24b781daf5ceb3606d79edbbf57155a043 | [
"CC0-1.0"
] | null | null | null | python/tests/test_parameter_mapping.py | kristianmeyerr/AMICI | 15f14c24b781daf5ceb3606d79edbbf57155a043 | [
"CC0-1.0"
] | null | null | null | python/tests/test_parameter_mapping.py | kristianmeyerr/AMICI | 15f14c24b781daf5ceb3606d79edbbf57155a043 | [
"CC0-1.0"
] | null | null | null | """Test for ``amici.parameter_mapping``"""
import os
import pytest
from amici.parameter_mapping import (ParameterMapping,
ParameterMappingForCondition)
@pytest.mark.skipif(os.environ.get('GITHUB_JOB') == 'valgrind',
reason="Python-only")
def test_parameter_mapping_for_condition_default_args():
"""Check we can initialize the mapping with default arguments."""
par_map_for_condition = ParameterMappingForCondition()
for attr in [
'map_sim_var', 'scale_map_sim_var', 'map_preeq_fix',
'scale_map_preeq_fix', 'map_sim_fix', 'scale_map_sim_fix']:
assert not getattr(par_map_for_condition, attr)
map_sim_var = {'sim_par0': 8, 'sim_par1': 'opt_par0'}
map_preeq_fix = {'sim_par2': 'opt_par1'}
map_sim_fix = {'sim_par2': 'opt_par2'}
par_map_for_condition = ParameterMappingForCondition(
map_sim_var=map_sim_var, map_preeq_fix=map_preeq_fix,
map_sim_fix=map_sim_fix)
expected_scale_map_sim_var = {'sim_par0': 'lin', 'sim_par1': 'lin'}
expected_scale_map_preeq_fix = {'sim_par2': 'lin'}
expected_scale_map_sim_fix = {'sim_par2': 'lin'}
assert par_map_for_condition.scale_map_sim_var == \
expected_scale_map_sim_var
assert par_map_for_condition.scale_map_preeq_fix == \
expected_scale_map_preeq_fix
assert par_map_for_condition.scale_map_sim_fix == \
expected_scale_map_sim_fix
@pytest.mark.skipif(os.environ.get('GITHUB_JOB') == 'valgrind',
reason="Python-only")
def test_parameter_mapping():
"""Test :class:``amici.parameter_mapping.ParameterMapping``."""
parameter_mapping = ParameterMapping()
assert len(parameter_mapping) == 0
map_sim_var = {'sim_par0': 8, 'sim_par1': 'opt_par0'}
map_preeq_fix = {'sim_par2': 'opt_par1'}
map_sim_fix = {'sim_par2': 'opt_par2'}
par_map_for_condition = ParameterMappingForCondition(
map_sim_var=map_sim_var, map_preeq_fix=map_preeq_fix,
map_sim_fix=map_sim_fix)
parameter_mapping.append(par_map_for_condition)
assert len(parameter_mapping) == 1
| 36.862069 | 71 | 0.702058 |
73f73d9ea76d780eb34827589821d0ebfc6ba257 | 27 | py | Python | newsroom/evaluate/__init__.py | peter-xbs/newsroom_chinese | 7fcae68b2ea5584d08d0c48faee34a0734237e6b | [
"Apache-2.0"
] | 82 | 2018-05-01T16:32:38.000Z | 2019-05-18T01:43:15.000Z | newsroom/evaluate/__init__.py | peter-xbs/newsroom_chinese | 7fcae68b2ea5584d08d0c48faee34a0734237e6b | [
"Apache-2.0"
] | 20 | 2018-05-01T19:32:48.000Z | 2019-04-12T07:57:48.000Z | newsroom/evaluate/__init__.py | peter-xbs/newsroom_chinese | 7fcae68b2ea5584d08d0c48faee34a0734237e6b | [
"Apache-2.0"
] | 15 | 2018-05-01T17:34:11.000Z | 2019-05-07T09:28:21.000Z | from .read import readiter
| 13.5 | 26 | 0.814815 |
73f7560df916fd96100dd4a83283eda472bb15a6 | 3,274 | py | Python | hatchet/util/profiler.py | TauferLab/llnl-hatchet | c7d12888d71d2b23058facd3025e7dcfa12cbb39 | [
"MIT"
] | null | null | null | hatchet/util/profiler.py | TauferLab/llnl-hatchet | c7d12888d71d2b23058facd3025e7dcfa12cbb39 | [
"MIT"
] | null | null | null | hatchet/util/profiler.py | TauferLab/llnl-hatchet | c7d12888d71d2b23058facd3025e7dcfa12cbb39 | [
"MIT"
] | null | null | null | # Copyright 2017-2022 Lawrence Livermore National Security, LLC and other
# Hatchet Project Developers. See the top-level LICENSE file for details.
#
# SPDX-License-Identifier: MIT
import cProfile
import traceback
import sys
import os
from datetime import datetime
try:
from StringIO import StringIO # python2
except ImportError:
from io import StringIO # python3
import pstats
def print_incomptable_msg(stats_file):
"""
Function which makes the syntax cleaner in Profiler.write_to_file().
"""
errmsg = """ Incompatible pstats file: {}\n Please run your code in Python {} to read in this file. """
if sys.version_info[0] == 2:
print(errmsg.format(stats_file, 3))
if sys.version_info[0] == 3:
print(errmsg.format(stats_file, 2.7))
traceback.print_exc()
# class for profiling
class Profiler:
"""
Wrapper class around cProfile.
Exports a pstats file to be read by the pstats reader.
"""
def __init__(self):
self._prf = cProfile.Profile()
self._output = "hatchet-profile"
self._active = False
def start(self):
"""
Description: Place before the block of code to be profiled.
"""
if self._active:
print(
"Start dissallowed in scope where profiler is running. Please add Profiler.stop() before attempting start."
)
raise
self._active = True
self._prf.enable()
def stop(self):
"""
Description: Place at the end of the block of code being profiled.
"""
self._active = False
self._prf.disable()
self.write_to_file()
def reset(self):
"""
Description: Resets the profilier.
"""
if self._active:
print(
"Reset dissallowed in scope where profiler is running. Please add Profiler.stop() before attempting reset."
)
raise
self._prf = cProfile.Profile()
def __str__(self):
"""
Description: Writes stats object out as a string.
"""
s = StringIO()
pstats.Stats(self._prf, stream=s).print_stats()
return s.getvalue()
def write_to_file(self, filename="", add_pstats_files=[]):
"""
Description: Write the pstats object to a binary
file to be read in by an appropriate source.
"""
sts = pstats.Stats(self._prf)
if len(add_pstats_files) > 0:
for stats_file in add_pstats_files:
try:
sts.add(stats_file)
except ValueError:
print_incomptable_msg(stats_file)
raise
if filename == "":
if os.path.exists(self._output + ".pstats"):
now = datetime.now().strftime("%H%M%S")
self.write_to_file(
"{}_{}.pstats".format(self._output, now), add_pstats_files
)
else:
sts.dump_stats(self._output + ".pstats")
else:
if os.path.exists(filename):
now = datetime.now().strftime("%H%M%S")
filename = "{}_{}.pstats".format(filename, now)
sts.dump_stats(filename)
| 28.719298 | 123 | 0.57697 |
73f782685038d2cd750e41a555509344e9e47c66 | 14,230 | py | Python | lung_segmentation/generators.py | TransRadOnc-HIT/lung_segmentation | 1637709936dd0172fb6151902f4e9bede7a66e79 | [
"Apache-2.0"
] | 2 | 2020-06-12T13:32:31.000Z | 2021-07-30T16:11:50.000Z | lung_segmentation/generators.py | TransRadOnc-HIT/lung_segmentation | 1637709936dd0172fb6151902f4e9bede7a66e79 | [
"Apache-2.0"
] | null | null | null | lung_segmentation/generators.py | TransRadOnc-HIT/lung_segmentation | 1637709936dd0172fb6151902f4e9bede7a66e79 | [
"Apache-2.0"
] | 2 | 2020-08-04T14:40:33.000Z | 2021-01-22T13:05:22.000Z | import numpy as np
import nibabel as nib
import os
import glob
from lung_segmentation.utils import normalize
import nrrd
import multiprocessing
import collections
import math
import sys
import traceback
import threading
import scipy.misc
if sys.version_info[0] == 2:
import Queue as queue
string_classes = basestring
else:
import queue
string_classes = (str, bytes)
def load_data_2D(data_dir, data_type, data_list=[], array=None, mb=[], bs=None, init=None, prediction=False,
img_size=(192, 192), patch_size=(96, 96), binarize=False, normalization=True, result_dict=None):
if array is not None:
data_list = [1]
else:
if data_list:
data_list = data_list
elif bs is not None and init is not None:
data_list = sorted(glob.glob(os.path.join(data_dir, data_type)))[init:bs]
else:
data_list = sorted(glob.glob(os.path.join(data_dir, data_type)))
patch_width = patch_size[0]
patch_height = patch_size[1]
dx = img_size[0] if img_size[0] >= patch_width else patch_width
dy = img_size[1] if img_size[1] >= patch_height else patch_height
if len(mb) < 2:
mb.append(dx//patch_width)
if len(mb) < 2:
mb.append(dy//patch_height)
diffX = dx - patch_width if dx - patch_width != 0 else dx
diffY = dy - patch_height if dy - patch_height != 0 else dy
overlapX = diffX//(mb[0]-1) if not dx % patch_width and mb[0] > 1 else diffX//(mb[0])
overlapY = diffY//(mb[1]-1) if not dy % patch_height and mb[1] > 1 else diffY//(mb[1])
indX = 0
xx = []
while indX+patch_width <= dx:
xx.append([indX, indX+patch_width])
indX = indX + overlapX
indY = 0
yy = []
while indY+patch_height <= dy:
yy.append([indY, indY+patch_height])
indY = indY + overlapY
final_array = None
for index in range(len(data_list)):
if array is not None:
array_orig = array
else:
data_path = data_list[index]
array_orig, _ = nrrd.read(data_path)
if normalization:
try:
array_orig = normalize(array_orig, method='0-1')
except:
print()
if binarize:
array_orig[array_orig != 0] = 1
original_size = array_orig.shape
if img_size[0] < patch_width or img_size[1] < patch_height:
delta_x = (patch_width - img_size[0]) if (img_size[0] < patch_width) else 0
delta_y = (patch_height - img_size[1]) if img_size[1] < patch_height else 0
new_x = patch_width if (img_size[0] < patch_width) else img_size[0]
new_y = patch_height if (img_size[1] < patch_height) else img_size[1]
if len(array_orig.shape) == 3:
temp = np.zeros([new_x, new_y, array_orig.shape[2]])
temp[delta_x:, delta_y:, :] = array_orig
else:
try:
temp = np.zeros([new_x, new_y])
temp[delta_x:, delta_y:] = array_orig
except:
print()
array_orig = temp
else:
delta_x = 0
delta_y = 0
data_array = [array_orig[i[0]:i[1], j[0]:j[1]] for j in yy for i in xx]
data_array = np.asarray(data_array, dtype=np.float16)
# if normalization:
# data_array = normalize(data_array, method='0-1')
# if binarize:
# data_array[data_array != 0] = 1
arrays = data_array.reshape((-1, patch_width, patch_height, 1))
if final_array is not None:
final_array = np.concatenate([final_array, arrays], axis=0)
else:
final_array = arrays
if result_dict is None:
results_dict = {}
if prediction:
results_dict[index] = {}
results_dict[index]['image_dim'] = original_size
results_dict[index]['indexes'] = [xx, yy]
# results_dict[index]['im_size'] = [dx, dy]
results_dict[index]['deltas'] = [delta_x, delta_y]
results_dict[index]['patches'] = final_array.shape[0]
return final_array, results_dict
_use_shared_memory = False
"""Whether to use shared memory in default_collate"""
class ExceptionWrapper(object):
"Wraps an exception plus traceback to communicate across threads"
def __init__(self, exc_info):
self.exc_type = exc_info[0]
self.exc_msg = "".join(traceback.format_exception(*exc_info))
def _worker_loop(dataset, index_queue, data_queue, collate_fn):
global _use_shared_memory
_use_shared_memory = True
while True:
r = index_queue.get()
if r is None:
data_queue.put(None)
break
idx, batch_indices = r
try:
samples = collate_fn([dataset[i] for i in batch_indices])
except Exception:
data_queue.put((idx, ExceptionWrapper(sys.exc_info())))
else:
data_queue.put((idx, samples))
def default_collate(batch):
"""Puts each data field into a tensor with outer dimension batch size"""
if type(batch[0]).__module__ == 'numpy':
elem = batch[0]
if type(elem).__name__ == 'ndarray':
return np.stack([b for b in batch], 0)
if elem.shape == (): # scalars
py_type = float if elem.dtype.name.startswith('float') else int
return np.array(list(map(py_type, batch)))
elif isinstance(batch[0], int):
return np.array(batch).astype('uint8')
elif isinstance(batch[0], float):
return np.array(batch).astype('float32')
elif isinstance(batch[0], string_classes):
return batch
elif isinstance(batch[0], collections.Mapping):
return {key: default_collate([d[key] for d in batch]) for key in batch[0]}
elif isinstance(batch[0], collections.Sequence):
transposed = zip(*batch)
return [default_collate(samples) for samples in transposed]
raise TypeError(("batch must contain tensors, numbers, dicts or lists; found {}"
.format(type(batch[0]))))
class DataLoaderIter(object):
"Iterates once over the DataLoader's dataset, as specified by the sampler"
def __init__(self, loader):
self.loader = loader
self.dataset = loader.dataset
self.batch_size = loader.batch_size
self.collate_fn = loader.collate_fn
self.sampler = loader.sampler
self.num_workers = loader.num_workers
self.done_event = threading.Event()
self.samples_remaining = len(self.sampler)
self.sample_iter = iter(self.sampler)
if self.num_workers > 0:
self.index_queue = multiprocessing.SimpleQueue()
self.data_queue = multiprocessing.SimpleQueue()
self.batches_outstanding = 0
self.shutdown = False
self.send_idx = 0
self.rcvd_idx = 0
self.reorder_dict = {}
self.workers = [
multiprocessing.Process(
target=_worker_loop,
args=(self.dataset, self.index_queue, self.data_queue, self.collate_fn))
for _ in range(self.num_workers)]
for w in self.workers:
w.daemon = True # ensure that the worker exits on process exit
w.start()
# prime the prefetch loop
for _ in range(2 * self.num_workers):
self._put_indices()
def __len__(self):
return (len(self.sampler) + self.batch_size - 1) // self.batch_size
def __next__(self):
if self.num_workers == 0: # same-process loading
if self.samples_remaining == 0:
if self.loader.sample_forever:
self.__init__(self.loader)
else:
raise StopIteration
indices = self._next_indices()
batch = self.collate_fn([self.dataset[i] for i in indices])
return batch
# check if the next sample has already been generated
if self.rcvd_idx in self.reorder_dict:
batch = self.reorder_dict.pop(self.rcvd_idx)
return self._process_next_batch(batch)
if self.batches_outstanding == 0:
if self.loader.sample_forever:
self._shutdown_workers()
self.__init__(self.loader)
else:
self._shutdown_workers()
raise StopIteration
while True:
assert (not self.shutdown and self.batches_outstanding > 0)
idx, batch = self.data_queue.get()
self.batches_outstanding -= 1
if idx != self.rcvd_idx:
# store out-of-order samples
self.reorder_dict[idx] = batch
continue
return self._process_next_batch(batch)
next = __next__ # Python 2 compatibility
def __iter__(self):
return self
def _next_indices(self):
batch_size = min(self.samples_remaining, self.batch_size)
batch = [next(self.sample_iter) for _ in range(batch_size)]
self.samples_remaining -= len(batch)
return batch
def _put_indices(self):
assert self.batches_outstanding < 2 * self.num_workers
if self.samples_remaining > 0:
self.index_queue.put((self.send_idx, self._next_indices()))
self.batches_outstanding += 1
self.send_idx += 1
def _process_next_batch(self, batch):
self.rcvd_idx += 1
self._put_indices()
if isinstance(batch, ExceptionWrapper):
raise batch.exc_type(batch.exc_msg)
return batch
def __getstate__(self):
# TODO: add limited pickling support for sharing an iterator
# across multiple threads for HOGWILD.
# Probably the best way to do this is by moving the sample pushing
# to a separate thread and then just sharing the data queue
# but signalling the end is tricky without a non-blocking API
raise NotImplementedError("DataLoaderIterator cannot be pickled")
def _shutdown_workers(self):
if not self.shutdown:
self.shutdown = True
self.done_event.set()
for _ in self.workers:
self.index_queue.put(None)
def __del__(self):
if self.num_workers > 0:
self._shutdown_workers()
class Sampler(object):
"""Base class for all Samplers.
Every Sampler subclass has to provide an __iter__ method, providing a way
to iterate over indices of dataset elements, and a __len__ method that
returns the length of the returned iterators.
"""
def __init__(self, data_source):
pass
def __iter__(self):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
class SequentialSampler(Sampler):
"""Samples elements sequentially, always in the same order.
Arguments:
data_source (Dataset): dataset to sample from
"""
def __init__(self, num_samples, data_samples=None):
self.num_samples = num_samples
self.data_samples = data_samples if data_samples is not None else num_samples
self.n_repeats = math.ceil(self.num_samples / self.data_samples)
def __iter__(self):
return iter(np.tile(np.arange(self.data_samples,dtype='uint8'),self.n_repeats))
def __len__(self):
return self.num_samples
class RandomSampler(Sampler):
def __init__(self, num_samples, data_samples):
self.num_samples = num_samples
self.data_samples = data_samples if data_samples is not None else num_samples
self.n_repeats = math.ceil(self.num_samples / self.data_samples)
def __iter__(self):
return iter(np.tile(np.random.permutation(self.data_samples).astype('uint8'),self.n_repeats))
def __len__(self):
return self.num_samples
class DataLoader(object):
"""
Data loader. Combines a dataset and a sampler, and provides
single- or multi-process iterators over the dataset.
Arguments:
dataset (Dataset): dataset from which to load the data.
batch_size (int, optional): how many samples per batch to load
(default: 1).
shuffle (bool, optional): set to ``True`` to have the data reshuffled
at every epoch (default: False).
sampler (Sampler, optional): defines the strategy to draw samples from
the dataset. If specified, the ``shuffle`` argument is ignored.
num_workers (int, optional): how many subprocesses to use for data
loading. 0 means that the data will be loaded in the main process
(default: 0)
collate_fn (callable, optional)
"""
def __init__(self, dataset, batch_size=1, shuffle=False, sampler=None,
num_workers=0, sample_forever=True, collate_fn=default_collate,
max_epoch=500):
self.dataset = dataset
self.batch_size = batch_size
self.num_workers = num_workers
self.collate_fn = collate_fn
self.sample_forever = sample_forever
num_samples = len(dataset) #if num_workers == 0 else int(len(dataset)*max_epoch)
if shuffle:
self.sampler = RandomSampler(num_samples, len(dataset))
else:
self.sampler = SequentialSampler(num_samples, len(dataset))
def __iter__(self):
return DataLoaderIter(self)
def __len__(self):
return (len(self.sampler) + self.batch_size - 1) // self.batch_size
def write_a_batch(self, save_dir):
myiter = iter(self)
x, y = myiter.next()
if not os.path.exists(save_dir):
try:
os.mkdir(save_dir)
except:
pass
else:
try:
os.rmdir(save_dir)
except:
pass
for i in range(len(x)):
xx = x[i]
yy = y[i]
scipy.misc.imsave(os.path.join(save_dir,'%3i_x.jpg'%i), np.squeeze(xx))
scipy.misc.imsave(os.path.join(save_dir,'%3i_y.jpg'%i), np.squeeze(yy))
| 34.206731 | 113 | 0.608082 |
73f7906b0d796a1c5e4bd14100a8e8809cc61766 | 2,483 | py | Python | test/test_analysis.py | cslotboom/planesections | fcb2127a38a78b45de44b75f5805efde89adf5d2 | [
"Apache-2.0"
] | 1 | 2021-11-12T08:36:31.000Z | 2021-11-12T08:36:31.000Z | test/test_analysis.py | cslotboom/planesections | fcb2127a38a78b45de44b75f5805efde89adf5d2 | [
"Apache-2.0"
] | null | null | null | test/test_analysis.py | cslotboom/planesections | fcb2127a38a78b45de44b75f5805efde89adf5d2 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
R"""
Created on Sun May 23 01:00:41 2021
@author: Christian
"""
from planesections import EulerBeam, OpenSeesAnalyzer, RecordOutput
# from planesections import EulerBeam
import numpy as np
import openseespy.opensees as op
x = np.array([0,5])
fixities = [np.array([1,1,1], int), np.array([1,1,1], int)]
pointBeam = EulerBeam(x, fixities)
P = np.array([0.,1000.,0.])
pointBeam.addPointLoad(2.5, -P)
q = np.array([0.,-1000.])
q = np.array([0.,-1000.])
distBeam = EulerBeam(x, fixities)
distBeam.addDistLoad(0.,5.,q)
distBeamAnalysis = OpenSeesAnalyzer(distBeam)
beam = EulerBeam(x, fixities)
beam.addDistLoad(0.,5.,q)
beam.addPointLoad(2.5, -P)
analysis = OpenSeesAnalyzer(beam)
def test_nodes():
pointBeamAnalysis = OpenSeesAnalyzer(pointBeam)
pointBeamAnalysis.initModel()
pointBeamAnalysis.buildNodes()
out = op.nodeCoord(1)[0]
assert out == 0
def test_EulerElements():
pointBeamAnalysis = OpenSeesAnalyzer(pointBeam)
pointBeamAnalysis.initModel()
pointBeamAnalysis.buildNodes()
pointBeamAnalysis.buildEulerBeams()
out = op.eleNodes(1)
assert out == [1,2]
def test_node_loads():
pointBeamAnalysis = OpenSeesAnalyzer(pointBeam)
pointBeamAnalysis.initModel()
pointBeamAnalysis.buildNodes()
pointBeamAnalysis.buildEulerBeams()
pointBeamAnalysis.buildPointLoads()
pointBeamAnalysis.buildAnalysisPropreties()
pointBeamAnalysis.analyze()
assert op.nodeReaction(3) == [0,500,-625]
def test_ele_loads():
distBeamAnalysis.initModel()
distBeamAnalysis.buildNodes()
distBeamAnalysis.buildEulerBeams()
distBeamAnalysis.buildEleLoads()
# op.eleLoad('-ele', 1, '-type', '-beamUniform', 1., 1.)
# op.eleLoad('-ele', 1, '-type', '-beamUniform',1000.,1000.)
distBeamAnalysis.buildAnalysisPropreties()
distBeamAnalysis.analyze()
# print(op.nodeReaction(2))
solution = np.array([0,2500,-2083.333333333])
assert np.sum(np.abs(op.nodeReaction(2) - solution)) < 0.001
def test_full_beam():
analysis.runAnalysis()
solution1 = np.array([0,2500,-2083.333333333])
solution2 = np.array([0,500,-625])
solution = solution1 + solution2
assert np.sum(np.abs(op.nodeReaction(3) - solution)) < 0.001
def test_record_output():
analysis.runAnalysis()
RecordOutput(beam)
test_nodes()
test_EulerElements()
test_node_loads()
test_ele_loads()
test_full_beam()
test_record_output() | 25.336735 | 67 | 0.697946 |
73f7c12bfac2da04fb10d8b5525fbf9e650f88e8 | 25,126 | py | Python | pepperoni/logger.py | t3eHawk/pepperoni | 06b7ff9005bd1b7ed434ae286c2698d9205df718 | [
"MIT"
] | null | null | null | pepperoni/logger.py | t3eHawk/pepperoni | 06b7ff9005bd1b7ed434ae286c2698d9205df718 | [
"MIT"
] | null | null | null | pepperoni/logger.py | t3eHawk/pepperoni | 06b7ff9005bd1b7ed434ae286c2698d9205df718 | [
"MIT"
] | null | null | null | """Logging tools."""
import atexit
import datetime as dt
import os
import sys
import traceback
from .cache import all_loggers
from .formatter import Formatter
from .header import Header
from .output import Root
from .record import Record
class Logger():
"""Represents a particular logger object.
This class allows to perform advanced logging to different outputs:
console, file, email, database table, HTML document - using information
from different inputs: user messages, traceback, frames, user parameters
and system descriptors.
Each logger must have an unique name which will help to identify it.
Main application logger will have the same name as a main python script
file.
Parameters
----------
name : str, optional
The argument is used te define `name` attribute
app : str, optional
The argument is used to set the `app` attribute.
desc : str, optional
The argument is used to set the `desc` attribute.
version : str, optional
The argument is used to set the `version` attribute.
status : bool, optional
The argument is used to open or close output `root`.
console : bool, optional
The argument is used to open or close output `console`.
file : bool, optional
The argument is used to open or close output `file`.
email : bool, optional
The argument is used to open or close output `email`.
html : bool, optional
The argument is used to open or close output `html`.
table : bool, optional
The argument is used to open or close output `table`.
directory : str, optional
The argument is used to set logging file folder.
filename : str, optional
The argument is used to set logging file name.
extension : str, optional
The argument is used to set logging file extension.
smtp : dict, optional
The argument is used to configure SMTP connection.
db : dict, optional
The argument is used to configure DB connection.
format : str, optional
The argument is used to set record template.
info : bool, optional
The argument is used to filter info records. The default is True.
debug : bool, optional
The argument is used to filter debug records. The default is False.
warning : bool, optional
The argument is used to filter warning records. The default is True.
error : bool, optional
The argument is used to filter error records. The default is True.
critical : bool, optional
The argument is used to filter critical records. The default is True.
alarming : bool, optional
The argument is used to enable or disable alarming mechanism. The
default is True.
control : bool, optional
The argument is used to enable or disable execution break in case
on error. The default is True.
maxsize : int or bool, optional
The argument is used to define maximum size of output file. Must be
presented as number of bytes. The default is 10 Mb.
maxdays : int or bool, optional
The argument is used to define maximum number of days that will be
logged to same file. The default is 1 which means that new output file
will be opened at each 00:00:00.
maxlevel : int or bool, optional
The argument is used to define the break error level (WARNING = 0,
ERRROR = 1, CRITICAL = 2). All that higher the break level will
interrupt application execution. The default is 1.
maxerrors : int or bool, optional
The argument is used to define maximun number of errors. The default
is False which means it is disabled.
Attributes
----------
name : str
Name of the logger.
app : str
Name of the application that we are logging.
desc : str
Description of the application that we are logging.
version : str
Version of the application that we are logging.
start_date : datetime.datetime
Date when logging was started.
rectypes : dict
All available record types. Keys are used in `Logger` write methods as
`rectype` argument. Values are used in formatting. So if you wish to
modify `rectype` form then edit appropriate one here. If you wish to
use own record types then just add it to that dictinary. By default we
provide the next few record types:
+---------+---------+
| Key | Value |
+=========+=========+
|none |NONE |
+---------+---------+
|info |INFO |
+---------+---------+
|debug |DEBUG |
+---------+---------+
|warning |WARNING |
+---------+---------+
|error |ERROR |
+---------+---------+
|critical |CRITICAL |
+---------+---------+
messages : dict
Messages that are printed with some `Logger` methods like `ok()`,
`success()`, `fail()`. If you wish to modify the text of this messages
just edit the value of appropriate item.
with_errors : int
The flag shows that logger catched errors in the application during its
execution.
count_errors : int
Number of errors that logger catched in the application during its
execution.
filters : dict
Record types filters. To filter record type just set corresponding
item value to False.
root : pepperoni.output.Root
The output `Root` object.
console : pepperoni.output.Console
The output `Console` object. Shortcut for `Logger.root.console`.
file : pepperoni.output.File
The output file. Shortcut for `Logger.output.file`.
formatter : pepperoni.formatter.Formatter
Logger formatter which sets all formatting configuration like
record template, error message and traceback templates, line length
etc.
header : pepperoni.header.Header
The header that can be printed to the writable output.
"""
def __init__(self, name=None, app=None, desc=None, version=None,
status=True, console=True, file=True, email=False, html=False,
table=False, directory=None, filename=None, extension=None,
smtp=None, db=None, format=None, info=True, debug=False,
warning=True, error=True, critical=True, alarming=True,
control=True, maxsize=(1024*1024*10), maxdays=1, maxlevel=2,
maxerrors=False):
# Unique name of the logger.
self._name = name
# Add creating logger to special all_loggers dictinary.
all_loggers[self._name] = self
# Attributes describing the application.
self.app = None
self.desc = None
self.version = None
# Some logger important attributes
self.start_date = dt.datetime.now()
self.rectypes = {'none': 'NONE',
'info': 'INFO',
'debug': 'DEBUG',
'warning': 'WARNING',
'error': 'ERROR',
'critical': 'CRITICAL'}
self.messages = {'ok': 'OK',
'success': 'SUCCESS',
'fail': 'FAIL'}
self._with_error = False
self._count_errors = 0
self._all_errors = []
# Complete the initial configuration.
self.configure(app=app, desc=desc, version=version, status=status,
console=console, file=file, email=email, html=html,
table=table, directory=directory, filename=filename,
extension=extension, smtp=smtp, db=db, format=format,
info=info, debug=debug, warning=warning, error=error,
critical=critical, alarming=alarming, control=control,
maxsize=maxsize, maxdays=maxdays, maxlevel=maxlevel,
maxerrors=maxerrors)
# Output shortcuts.
self.console = self.root.console
self.file = self.root.file
# Set exit function.
atexit.register(self._exit)
pass
def __repr__(self):
"""Get this Logger string representation."""
return f'<Logger "{self._name}">'
@property
def name(self):
"""Get the unique logger name."""
return self._name
@property
def with_error(self):
"""Get the flag indicating whether an error has occurred or not."""
return self._with_error
@property
def count_errors(self):
"""Get the number of occurred errors."""
return self._count_errors
@property
def all_errors(self):
"""Get the list with all currently met errors."""
return self._all_errors
def configure(self, app=None, desc=None, version=None, status=None,
console=None, file=None, email=None, html=None, table=None,
directory=None, filename=None, extension=None, smtp=None,
db=None, format=None, info=None, debug=None, warning=None,
error=None, critical=None, alarming=None, control=None,
maxsize=None, maxdays=None, maxlevel=None, maxerrors=None):
"""Configure this particular Logger.
This is the only one right way to customize Logger. Parameters are the
same as for instance creatrion.
Parameters
----------
app : str, optional
The argument is used to set the `app` attribute.
desc : str, optional
The argument is used to set the `desc` attribute.
version : str, optional
The argument is used to set the `version` attribute.
status : bool, optional
The argument is used to open or close output `root`.
console : bool, optional
The argument is used to open or close output `console`.
file : bool, optional
The argument is used to open or close output `file`.
email : bool, optional
The argument is used to open or close output `email`.
html : bool, optional
The argument is used to open or close output `html`.
table : bool, optional
The argument is used to open or close output `table`.
directory : str, optional
The argument is used to set logging file folder.
filename : str, optional
The argument is used to set logging file name.
extension : str, optional
The argument is used to set logging file extension.
smtp : dict, optional
The argument is used to configure SMTP connection.
db : dict, optional
The argument is used to configure DB connection.
format : str, optional
The argument is used to set record template.
info : bool, optional
The argument is used to filter info records.
debug : bool, optional
The argument is used to filter debug records.
warning : bool, optional
The argument is used to filter warning records.
error : bool, optional
The argument is used to filter error records.
critical : bool, optional
The argument is used to filter critical records.
alarming : bool, optional
The argument is used to enable or disable alarming mechanism.
control : bool, optional
The argument is used to enable or disable execution break in case
on error.
maxsize : int or bool, optional
The argument is used to define maximum size of output file.
maxdays : int or bool, optional
The argument is used to define maximum number of days that will be
logged to same file.
maxlevel : int or bool, optional
The argument is used to define the break error level.
maxerrors : int or bool, optional
The argument is used to define maximun number of errors.
"""
if isinstance(app, str) is True:
self.app = app
if isinstance(desc, str) is True:
self.desc = desc
if isinstance(version, (str, int, float)) is True:
self.version = version
# Create formatter in case it is not exists yet or just customize it.
# Parameter format can be either string or dictionary.
# When it is string then it must describe records format.
# When it is dictionary it can contaion any parameter of formatter
# that must be customized.
if isinstance(format, str) is True:
format = {'record': format}
if hasattr(self, 'formatter') is False:
format = {} if isinstance(format, dict) is False else format
self.formatter = Formatter(**format)
elif isinstance(format, dict) is True:
self.formatter.configure(**format)
# Create or customize record type filters.
if hasattr(self, 'filters') is False:
self.filters = {}
for key, value in {'info': info, 'debug': debug, 'error': error,
'warning': warning, 'critical': critical}.items():
if isinstance(value, bool) is True:
self.filters[key] = value
# Build the output root if it is not exists. In other case modify
# existing output if it is requested.
if hasattr(self, 'root') is False:
self.root = Root(self, console=console, file=file, email=email,
html=html, table=table, status=status,
directory=directory, filename=filename,
extension=extension, smtp=smtp, db=db)
else:
for key, value in {'console': console, 'file': file,
'email': email, 'html': html,
'table': table}.items():
if value is True:
getattr(self.root, key).open()
if key == 'file':
getattr(self.root, key).new()
elif value is False:
getattr(self.root, key).close()
# Customize output file path.
path = {}
if directory is not None:
path['dir'] = directory
if filename is not None:
path['name'] = filename
if extension is not None:
path['ext'] = extension
if len(path) > 0:
self.root.file.configure(**path)
# Customize SMTP server.
if isinstance(smtp, dict) is True:
self.root.email.configure(**smtp)
# Customize database connection.
if isinstance(db, dict) is True:
self.root.table.configure(**db)
# Customize limits and parameters of execution behaviour.
if isinstance(maxsize, (int, float, bool)) is True:
self._maxsize = maxsize
if isinstance(maxdays, (int, float, bool)) is True:
self._maxdays = maxdays
self.__calculate_restart_date()
if isinstance(maxlevel, (int, float, bool)) is True:
self._maxlevel = maxlevel
if isinstance(maxerrors, (int, float, bool)) is True:
self._maxerrors = maxerrors
if isinstance(alarming, bool) is True:
self._alarming = alarming
if isinstance(control, bool) is True:
self._control = control
# Initialize header instance when not exists.
if hasattr(self, 'header') is False:
self.header = Header(self)
pass
def write(self, record):
"""Direct write to the output.
Parameters
----------
record : Record
The argument is used to send it to the output `root`.
"""
self.__check_file_stats()
self.root.write(record)
pass
def record(self, rectype, message, error=False, **kwargs):
"""Generate output record.
Parameters
----------
rectype : str
By default method creates the record with the type NONE.
That can be changed but depends on available record types.
All registered record types are stored in the instance attribute
rectypes. If you wish to use own record type or change the
presentaion of exeisting one then edit this dictinary.
message : str
The message that must be written.
error : bool, optional
If record is error then set that parameter to `True`.
**kwargs
The keyword arguments used for additional forms (variables) for
record and message formatting.
"""
if self.filters.get(rectype, True) is True:
record = Record(self, rectype, message, error=error, **kwargs)
self.write(record)
pass
def info(self, message, **kwargs):
"""Send INFO record to output."""
rectype = 'info'
self.record(rectype, message, **kwargs)
pass
def debug(self, message, **kwargs):
"""Send DEBUG record to the output."""
rectype = 'debug'
self.record(rectype, message, **kwargs)
pass
def error(self, message=None, rectype='error', format=None, alarming=False,
level=1, **kwargs):
"""Send ERROR record to the output.
If exception in current traceback exists then method will format the
exception according to `formatter.error` string presentation. If
`formatter.error` is set to `False` the exception will be just printed
in original Python style.
Also method will send an alarm if alarming attribute is `True`, email
output is enabled and SMTP server is configurated.
If one of the limit triggers worked then application will be aborted.
Parameters
----------
message : str, optional
The message that must be written instead of exception.
rectype : str, optional
The type of error according to `rectypes` dictionary.
format : str, optional
The format of the error message.
alarming : bool
The argument is used to enable or disable the alarming mechanism
for this certain call.
level : int
The argument is used to describe the error level.
**kwargs
The keyword arguments used for additional forms (variables) for
record and message formatting.
"""
# Parse the error.
err_type, err_value, err_tb = sys.exc_info()
self._with_error = True
self._count_errors += 1
self._all_errors.append((err_type, err_value, err_tb))
if message is None and err_type is not None:
format = self.formatter.error if format is None else format
if isinstance(format, str) is True:
err_name = err_type.__name__
err_value = err_value
err_traceback = []
raw = self.formatter.traceback
for tb in traceback.walk_tb(err_tb):
f_code = tb[0].f_code
err_file = os.path.abspath(f_code.co_filename)
err_line = tb[1]
err_obj = f_code.co_name
new = raw.format(file=err_file, line=err_line, obj=err_obj)
err_traceback.append(new)
err_traceback = ' '.join(err_traceback)
self.record(rectype, message, error=True,
err_name=err_name, err_value=err_value,
err_traceback=err_traceback, **kwargs)
elif format is False:
exception = traceback.format_exception(err_type, err_value,
err_tb)
message = '\n'
message += ''.join(exception)
self.record(rectype, message, **kwargs)
else:
message = message or ''
self.record(rectype, message, **kwargs)
# Break execution in case of critical error if permitted.
# The alarm will be generated at exit if it is configured.
if self._control is True:
if level >= self._maxlevel:
sys.exit()
if self._maxerrors is not False:
if self._count_errors > self._maxerrors:
sys.exit()
# Send alarm if execution was not aborted but alarm is needed.
if alarming is True:
self.root.email.alarm()
pass
def warning(self, message=None, **kwargs):
"""Send WARNING error record to the output."""
self.error(message, rectype='warning', level=0, **kwargs)
pass
def critical(self, message=None, **kwargs):
"""Send CRITICAL error record to the output."""
self.error(message, rectype='critical', level=2, **kwargs)
pass
def head(self):
"""Send header to the output."""
string = self.header.create()
self.write(string)
pass
def subhead(self, string):
"""Send subheader to the output.
Subheader is an upper-case text between two border lines.
Parameters
----------
string : str
The text that will be presented as subheader.
"""
bound = f'{self.formatter.div*self.formatter.length}\n'
string = f'{bound}\t{string}\n{bound}'.upper()
self.write(string)
pass
def line(self, message):
"""Send raw text with the new line to the output.
Parameters
----------
message : str
The message that must be written.
"""
self.write(f'{message}\n')
pass
def bound(self, div=None, length=None):
"""Write horizontal border in the output.
Useful when need to separate different blocks of information.
Parameters
----------
div : str, optional
Symbol that is used to bulid the bound.
length : int, optional
Lenght of the bound.
"""
border = self.formatter.div * self.formatter.length
self.write(border + '\n')
pass
def blank(self, number=1):
"""Write blank lines in the output.
Parameters
----------
number : int, optional
The number of the blank lines that must be written.
"""
string = '\n'*number
self.write(string)
pass
def ok(self, **kwargs):
"""Print INFO message with OK."""
rectype = 'info'
message = self.messages['ok']
self.record(rectype, message, **kwargs)
pass
def success(self, **kwargs):
"""Print INFO message with SUCCESS."""
rectype = 'info'
message = self.messages['success']
self.record(rectype, message, **kwargs)
pass
def fail(self, **kwargs):
"""Print INFO message with FAIL."""
rectype = 'info'
message = self.messages['fail']
self.record(rectype, message, **kwargs)
pass
def restart(self):
"""Restart logging. Will open new file."""
self.start_date = dt.datetime.now()
self.__calculate_restart_date()
if self.root.file.status is True:
self.root.file.new()
if self.header.used is True:
self.head()
pass
def email(self, *args, **kwargs):
"""Send email message.
Note that SMTP server connection must be configured.
"""
self.root.email.send(*args, **kwargs)
pass
def table(self, **kwargs):
"""Write information to database table.
Note that DB connection must be configured.
"""
self.root.table.write(**kwargs)
pass
def _exit(self):
# Inform about the error.
if self._alarming is True and self._with_error is True:
self.root.email.alarm()
pass
def __calculate_restart_date(self):
# Calculate the date when logger must be restarted according to
# maxdays parameter.
self.__restart_date = (self.start_date
+ dt.timedelta(days=self._maxdays))
pass
def __check_file_stats(self):
# Check the output file statistics to catch when current file must be
# closed and new one must be opened.
if self.root.file.status is True:
if self._maxsize is not False:
if self.root.file.size is not None:
if self.root.file.size > self._maxsize:
self.restart()
return
if self._maxdays is not False:
if self.__restart_date.day == dt.datetime.now().day:
self.restart()
return
| 38.243531 | 79 | 0.582584 |
73f7d9e584482b52d1b1c8e06846f57221b6169f | 2,790 | py | Python | optimizers.py | adhirajghosh/RPTM_reid | 27713ef7c7358355641d68a92703cc44b61dba99 | [
"MIT"
] | null | null | null | optimizers.py | adhirajghosh/RPTM_reid | 27713ef7c7358355641d68a92703cc44b61dba99 | [
"MIT"
] | null | null | null | optimizers.py | adhirajghosh/RPTM_reid | 27713ef7c7358355641d68a92703cc44b61dba99 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from __future__ import print_function
import torch
import torch.nn as nn
def init_optimizer(model,
optim='adam', # optimizer choices
lr=0.003, # learning rate
weight_decay=5e-4, # weight decay
momentum=0.9, # momentum factor for sgd and rmsprop
sgd_dampening=0, # sgd's dampening for momentum
sgd_nesterov=True, # whether to enable sgd's Nesterov momentum
rmsprop_alpha=0.99, # rmsprop's smoothing constant
adam_beta1=0.9, # exponential decay rate for adam's first moment
adam_beta2=0.999, # # exponential decay rate for adam's second moment
staged_lr=False, # different lr for different layers
new_layers=None, # new layers use the default lr, while other layers's lr is scaled by base_lr_mult
base_lr_mult=0.1, # learning rate multiplier for base layers
):
if staged_lr:
assert new_layers is not None
base_params = []
base_layers = []
new_params = []
if isinstance(model, nn.DataParallel):
model = model.module
for name, module in model.named_children():
if name in new_layers:
new_params += [p for p in module.parameters()]
else:
base_params += [p for p in module.parameters()]
base_layers.append(name)
param_groups = [
{'params': base_params, 'lr': lr * base_lr_mult},
{'params': new_params},
]
print('Use staged learning rate')
print('* Base layers (initial lr = {}): {}'.format(lr * base_lr_mult, base_layers))
print('* New layers (initial lr = {}): {}'.format(lr, new_layers))
else:
param_groups = model.parameters()
# Construct optimizer
if optim == 'adam':
return torch.optim.Adam(param_groups, lr=lr, weight_decay=weight_decay,
betas=(adam_beta1, adam_beta2))
elif optim == 'amsgrad':
return torch.optim.Adam(param_groups, lr=lr, weight_decay=weight_decay,
betas=(adam_beta1, adam_beta2), amsgrad=True)
elif optim == 'sgd':
return torch.optim.SGD(param_groups, lr=lr, momentum=momentum, weight_decay=weight_decay,
dampening=sgd_dampening, nesterov=sgd_nesterov)
elif optim == 'rmsprop':
return torch.optim.RMSprop(param_groups, lr=lr, momentum=momentum, weight_decay=weight_decay,
alpha=rmsprop_alpha)
else:
raise ValueError('Unsupported optimizer: {}'.format(optim))
| 43.59375 | 119 | 0.582437 |
73f7decbcb60963594048cb019f8cb5bf1580fd7 | 1,900 | py | Python | python/training_plots.py | Maplenormandy/list-62x | c1731d0610fdf9e58cb2792d706e8904c549fbd6 | [
"MIT"
] | 1 | 2020-11-07T12:40:59.000Z | 2020-11-07T12:40:59.000Z | python/training_plots.py | Maplenormandy/list-62x | c1731d0610fdf9e58cb2792d706e8904c549fbd6 | [
"MIT"
] | null | null | null | python/training_plots.py | Maplenormandy/list-62x | c1731d0610fdf9e58cb2792d706e8904c549fbd6 | [
"MIT"
] | null | null | null | import pandas as pd
import matplotlib.pyplot as plt
from statsmodels.stats.weightstats import ttost_paired
import statsmodels.api as sm
import numpy as np
from sklearn import datasets, linear_model
from pylab import *
data = pd.read_csv(open('optimalPointsWmask_16s.csv'), index_col='Num')
# print data['Mean 0']
#Baseline Mean Illumination vs Shutter
plt.scatter(data['Mean 0'], data['Shutter 0'], label='baseline')
x = data['Mean 0']
y = data['Shutter 0']
plt.legend(loc='upper right')
plt.ylabel('Shutter Speed')
plt.xlabel('Mean Illumination')
plt.title('Baseline Illumination vs Shutter')
# (m, b) = polyfit(x,y,1)
# yp= polyval([m,b], x)
# plt.plot(x, yp, color='red')
plt.draw()
plt.show()
# Training Function Inputs --(Best Images) - Mean Illumination vs Shutter
plt.scatter(data['Mean 1'], data['Shutter 1'], label='training')
x = data['Mean 1']
y = data['Shutter 1']
plt.legend(loc='upper right')
plt.ylabel('Shutter Speed')
plt.xlabel('Mean Illumination')
plt.title('Training Illumination vs Shutter')
# (m, b) = polyfit(x,y,1)
# print (m,b)
# yp= polyval([m,b], x)
# plt.plot(x, yp, color='red')
plt.draw()
plt.show()
#Baseline Mean Illumination vs Gain
plt.scatter(data['Mean 0'], data['Gain 0'], label='baseline')
x = data['Mean 0']
y= data['Gain 0']
plt.legend(loc='upper right')
plt.ylabel('Gain')
plt.xlabel('Mean Illumination')
plt.title('Baseline Illumination vs Gain')
# (m, b) = polyfit(x,y,1)
# yp= polyval([m,b], x)
# plt.plot(x, yp, color='red')
plt.draw()
plt.show()
# Training Function Inputs --(Best Images) - Mean Illumination vs Shutter
plt.scatter(data['Mean 1'], data['Gain 1'], label='training')
x = data['Mean 1']
y = data['Gain 1']
plt.legend(loc='upper right')
plt.ylabel('Gain')
plt.xlabel('Mean Illumination')
plt.title('Training Illumination vs Gain')
# (m, b) = polyfit(x,y,1)
# yp= polyval([m,b], x)
# plt.plot(x, yp, color='red')
plt.draw()
plt.show()
| 27.142857 | 73 | 0.691579 |
73f7ef0f893fe450494e72221573ea321c2f37dd | 8,170 | py | Python | detectron/ops/generate_proposals.py | singhnarotam1997/Detectron | ecc6b25fc8869486126f1384b4e6e042a718bd5b | [
"Apache-2.0"
] | 60 | 2021-08-07T09:16:52.000Z | 2022-03-14T09:09:00.000Z | detectron/ops/generate_proposals.py | singhnarotam1997/Detectron | ecc6b25fc8869486126f1384b4e6e042a718bd5b | [
"Apache-2.0"
] | 4 | 2021-10-14T02:44:49.000Z | 2022-03-14T08:18:20.000Z | detectron/ops/generate_proposals.py | singhnarotam1997/Detectron | ecc6b25fc8869486126f1384b4e6e042a718bd5b | [
"Apache-2.0"
] | 11 | 2021-11-01T00:30:37.000Z | 2021-12-08T10:01:52.000Z |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
##############################################################################
#
# Based on:
# --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick and Sean Bell
# --------------------------------------------------------
import numpy as np
from detectron.core.config import cfg
import detectron.utils.boxes as box_utils
class GenerateProposalsOp(object):
"""Output object detection proposals by applying estimated bounding-box
transformations to a set of regular boxes (called "anchors").
"""
def __init__(self, anchors, spatial_scale, train):
self._anchors = anchors
self._num_anchors = self._anchors.shape[0]
self._feat_stride = 1. / spatial_scale
self._train = train
def forward(self, inputs, outputs):
"""See modeling.detector.GenerateProposals for inputs/outputs
documentation.
"""
# 1. for each location i in a (H, W) grid:
# generate A anchor boxes centered on cell i
# apply predicted bbox deltas to each of the A anchors at cell i
# 2. clip predicted boxes to image
# 3. remove predicted boxes with either height or width < threshold
# 4. sort all (proposal, score) pairs by score from highest to lowest
# 5. take the top pre_nms_topN proposals before NMS
# 6. apply NMS with a loose threshold (0.7) to the remaining proposals
# 7. take after_nms_topN proposals after NMS
# 8. return the top proposals
# predicted probability of fg object for each RPN anchor
scores = inputs[0].data
# predicted achors transformations
bbox_deltas = inputs[1].data
# input image (height, width, scale), in which scale is the scale factor
# applied to the original dataset image to get the network input image
im_info = inputs[2].data
# 1. Generate proposals from bbox deltas and shifted anchors
height, width = scores.shape[-2:]
# Enumerate all shifted positions on the (H, W) grid
shift_x = np.arange(0, width) * self._feat_stride
shift_y = np.arange(0, height) * self._feat_stride
shift_x, shift_y = np.meshgrid(shift_x, shift_y, copy=False)
# Convert to (K, 4), K=H*W, where the columns are (dx, dy, dx, dy)
# shift pointing to each grid location
shifts = np.vstack((shift_x.ravel(), shift_y.ravel(),
shift_x.ravel(), shift_y.ravel())).transpose()
# Broacast anchors over shifts to enumerate all anchors at all positions
# in the (H, W) grid:
# - add A anchors of shape (1, A, 4) to
# - K shifts of shape (K, 1, 4) to get
# - all shifted anchors of shape (K, A, 4)
# - reshape to (K*A, 4) shifted anchors
num_images = inputs[0].shape[0]
A = self._num_anchors
K = shifts.shape[0]
all_anchors = self._anchors[np.newaxis, :, :] + shifts[:, np.newaxis, :]
all_anchors = all_anchors.reshape((K * A, 4))
rois = np.empty((0, 5), dtype=np.float32)
roi_probs = np.empty((0, 1), dtype=np.float32)
for im_i in range(num_images):
im_i_boxes, im_i_probs = self.proposals_for_one_image(
im_info[im_i, :], all_anchors, bbox_deltas[im_i, :, :, :],
scores[im_i, :, :, :]
)
batch_inds = im_i * np.ones(
(im_i_boxes.shape[0], 1), dtype=np.float32
)
im_i_rois = np.hstack((batch_inds, im_i_boxes))
rois = np.append(rois, im_i_rois, axis=0)
roi_probs = np.append(roi_probs, im_i_probs, axis=0)
outputs[0].reshape(rois.shape)
outputs[0].data[...] = rois
if len(outputs) > 1:
outputs[1].reshape(roi_probs.shape)
outputs[1].data[...] = roi_probs
def proposals_for_one_image(
self, im_info, all_anchors, bbox_deltas, scores
):
# Get mode-dependent configuration
cfg_key = 'TRAIN' if self._train else 'TEST'
pre_nms_topN = cfg[cfg_key].RPN_PRE_NMS_TOP_N
post_nms_topN = cfg[cfg_key].RPN_POST_NMS_TOP_N
nms_thresh = cfg[cfg_key].RPN_NMS_THRESH
min_size = cfg[cfg_key].RPN_MIN_SIZE
# Transpose and reshape predicted bbox transformations to get them
# into the same order as the anchors:
# - bbox deltas will be (4 * A, H, W) format from conv output
# - transpose to (H, W, 4 * A)
# - reshape to (H * W * A, 4) where rows are ordered by (H, W, A)
# in slowest to fastest order to match the enumerated anchors
bbox_deltas = bbox_deltas.transpose((1, 2, 0)).reshape((-1, 4))
# Same story for the scores:
# - scores are (A, H, W) format from conv output
# - transpose to (H, W, A)
# - reshape to (H * W * A, 1) where rows are ordered by (H, W, A)
# to match the order of anchors and bbox_deltas
scores = scores.transpose((1, 2, 0)).reshape((-1, 1))
# 4. sort all (proposal, score) pairs by score from highest to lowest
# 5. take top pre_nms_topN (e.g. 6000)
if pre_nms_topN <= 0 or pre_nms_topN >= len(scores):
order = np.argsort(-scores.squeeze())
else:
# Avoid sorting possibly large arrays; First partition to get top K
# unsorted and then sort just those (~20x faster for 200k scores)
inds = np.argpartition(
-scores.squeeze(), pre_nms_topN
)[:pre_nms_topN]
order = np.argsort(-scores[inds].squeeze())
order = inds[order]
bbox_deltas = bbox_deltas[order, :]
all_anchors = all_anchors[order, :]
scores = scores[order]
# Transform anchors into proposals via bbox transformations
proposals = box_utils.bbox_transform(
all_anchors, bbox_deltas, (1.0, 1.0, 1.0, 1.0))
# 2. clip proposals to image (may result in proposals with zero area
# that will be removed in the next step)
proposals = box_utils.clip_tiled_boxes(proposals, im_info[:2])
# 3. remove predicted boxes with either height or width < min_size
keep = _filter_boxes(proposals, min_size, im_info)
proposals = proposals[keep, :]
scores = scores[keep]
# 6. apply loose nms (e.g. threshold = 0.7)
# 7. take after_nms_topN (e.g. 300)
# 8. return the top proposals (-> RoIs top)
if nms_thresh > 0:
keep = box_utils.nms(np.hstack((proposals, scores)), nms_thresh)
if post_nms_topN > 0:
keep = keep[:post_nms_topN]
proposals = proposals[keep, :]
scores = scores[keep]
return proposals, scores
def _filter_boxes(boxes, min_size, im_info):
"""Only keep boxes with both sides >= min_size and center within the image.
"""
# Compute the width and height of the proposal boxes as measured in the original
# image coordinate system (this is required to avoid "Negative Areas Found"
# assertions in other parts of the code that measure).
im_scale = im_info[2]
ws_orig_scale = (boxes[:, 2] - boxes[:, 0]) / im_scale + 1
hs_orig_scale = (boxes[:, 3] - boxes[:, 1]) / im_scale + 1
# To avoid numerical issues we require the min_size to be at least 1 pixel in the
# original image
min_size = np.maximum(min_size, 1)
# Proposal center is computed relative to the scaled input image
ws = boxes[:, 2] - boxes[:, 0] + 1
hs = boxes[:, 3] - boxes[:, 1] + 1
x_ctr = boxes[:, 0] + ws / 2.
y_ctr = boxes[:, 1] + hs / 2.
keep = np.where(
(ws_orig_scale >= min_size)
& (hs_orig_scale >= min_size)
& (x_ctr < im_info[1])
& (y_ctr < im_info[0])
)[0]
return keep
| 42.774869 | 85 | 0.596083 |
73f7ff694be52a3a4fc6904d1ef9506c98697a3e | 3,397 | py | Python | hivwholeseq/sequencing/find_primers.py | neherlab/hivwholeseq | 978ce4060362e4973f92b122ed5340a5314d7844 | [
"MIT"
] | 3 | 2016-09-13T12:15:47.000Z | 2021-07-03T01:28:56.000Z | hivwholeseq/sequencing/find_primers.py | iosonofabio/hivwholeseq | d504c63b446c3a0308aad6d6e484ea1666bbe6df | [
"MIT"
] | null | null | null | hivwholeseq/sequencing/find_primers.py | iosonofabio/hivwholeseq | d504c63b446c3a0308aad6d6e484ea1666bbe6df | [
"MIT"
] | 3 | 2016-01-17T03:43:46.000Z | 2020-03-25T07:00:11.000Z | # vim: fdm=indent
'''
author: Fabio Zanini
date: 25/09/13
content: Reconstruct the primers coordinates (approx.) from the edges of the
fragments.
'''
# Modules
import os
import argparse
import pysam
import numpy as np
from Bio import SeqIO
from hivwholeseq.datasets import MiSeq_runs
from hivwholeseq.sequencing.adapter_info import load_adapter_table
from hivwholeseq.utils.miseq import read_types
from hivwholeseq.sequencing.filenames import get_consensus_filename, get_mapped_filename, \
get_coverage_filename
from hivwholeseq.utils.mapping import convert_sam_to_bam, pair_generator
# Script
if __name__ == '__main__':
# Input arguments
parser = argparse.ArgumentParser(description='Extract linkage information')
parser.add_argument('--run', required=True,
help='Seq run to analyze (e.g. Tue28)')
parser.add_argument('--adaIDs', nargs='*',
help='Adapter IDs to analyze (e.g. TS2)')
parser.add_argument('--verbose', type=int, default=0,
help=('Verbosity level [0-3]'))
args = parser.parse_args()
seq_run = args.run
adaIDs = args.adaIDs
VERBOSE = args.verbose
# Specify the dataset
dataset = MiSeq_runs[seq_run]
data_folder = dataset['folder']
# If the script is called with no adaID, iterate over all
if not adaIDs:
adaIDs = load_adapter_table(data_folder)['ID']
if VERBOSE >= 3:
print 'adaIDs', adaIDs
# Select fragment and primers
fragment = 'F3'
# Look for the F3 rev primer (already reversed)
primer_old = 'GATTGTGTGGCAAGTAGACAGG'
primer_new = 'TATGGAAAACAGATGGCAGGTG'
# Iterate over all requested samples
for adaID in adaIDs:
# Read reference (fragmented)
reffilename = get_consensus_filename(data_folder, adaID, fragment)
refseq = SeqIO.read(reffilename, 'fasta')
ref = np.array(refseq)
# read file
bamfilename = get_mapped_filename(data_folder, adaID, fragment,
type='bam', filtered=True)
if not os.path.isfile(bamfilename):
convert_sam_to_bam(bamfilename)
bamfile = pysam.Samfile(bamfilename, 'rb')
# Get the coverage for reads which have long insert sizes
# (to be sure about their identity)
cov_new = 0
cov_old = 0
for i_pairs, reads in enumerate(pair_generator(bamfile)):
if i_pairs > 5000000:
break
if reads[0].isize < 300:
continue
for read in reads:
if read.seq.find(primer_new) != -1:
cov_new += 1
if read.seq.find(primer_old) != -1:
cov_old += 1
print 'old:', cov_old, 'new:', cov_new
bamfile.close()
# Get coverage and see
covfn = get_coverage_filename(data_folder, adaID, fragment)
cov = np.load(covfn)
import matplotlib.pyplot as plt
import matplotlib.cm as cm
for js, read_type in enumerate(read_types):
plt.plot(np.arange(cov.shape[1]), cov[js], lw=2,
c=cm.jet(int(255.0 * js / len(read_types))))
plt.xlabel('Position [bases]')
plt.title(str(adaID)+' '+fragment)
plt.ylabel('Coverage')
plt.ion()
plt.show()
| 30.330357 | 91 | 0.616426 |
73f8135e5484fe58ee212e3923c10fc842800999 | 8,404 | py | Python | deid/tests/test_clean.py | glebsts/deid | a0c630ee613d358b3c8e936dd539d51dee94a5a7 | [
"MIT"
] | 88 | 2017-10-16T12:47:09.000Z | 2022-03-10T23:08:04.000Z | deid/tests/test_clean.py | glebsts/deid | a0c630ee613d358b3c8e936dd539d51dee94a5a7 | [
"MIT"
] | 180 | 2017-06-15T01:37:00.000Z | 2022-03-23T23:05:57.000Z | deid/tests/test_clean.py | glebsts/deid | a0c630ee613d358b3c8e936dd539d51dee94a5a7 | [
"MIT"
] | 41 | 2017-09-29T00:29:50.000Z | 2022-03-16T02:26:54.000Z | #!/usr/bin/env python
"""
Test DICOM Cleaner
Copyright (c) 2016-2021 Vanessa Sochat
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import unittest
import tempfile
import shutil
import os
import numpy as np
from deid.utils import get_installdir
from deid.data import get_dataset
from deid.tests.common import get_file
from pydicom import read_file
global generate_uid
class TestClean(unittest.TestCase):
def setUp(self):
self.pwd = get_installdir()
self.deidpath = os.path.abspath("%s/tests/resources/" % self.pwd)
self.dataset = get_dataset("animals")
self.tmpdir = tempfile.mkdtemp()
print("\n######################START######################")
def tearDown(self):
shutil.rmtree(self.tmpdir)
print("\n######################END########################")
def test_pixel_cleaner_remove_coordinates(self):
"""Test the pixel cleaner to ensure it appropriately clears specified pixels."""
from deid.dicom import DicomCleaner
dicom_file = get_file(self.dataset)
deid = os.path.join(self.deidpath, "remove_coordinates.dicom")
client = DicomCleaner(output_folder=self.tmpdir, deid=deid)
out = client.detect(dicom_file)
self.assertTrue(out["flagged"])
client.clean()
cleanedfile = client.save_dicom()
outputfile = read_file(cleanedfile)
outputpixels = outputfile.pixel_array
inputfile = read_file(dicom_file)
inputpixels = inputfile.pixel_array
compare = inputpixels == outputpixels
self.assertFalse(compare.all())
inputpixels[0:1024, 0:1024] = 0
compare = inputpixels == outputpixels
self.assertTrue(compare.all())
def test_pixel_cleaner_remove_all(self):
"""Test the pixel cleaner to ensure it appropriately clears all pixels."""
from deid.dicom import DicomCleaner
dicom_file = get_file(self.dataset)
deid = os.path.join(self.deidpath, "remove_all.dicom")
client = DicomCleaner(output_folder=self.tmpdir, deid=deid)
out = client.detect(dicom_file)
self.assertTrue(out["flagged"])
client.clean()
cleanedfile = client.save_dicom()
outputfile = read_file(cleanedfile)
outputpixels = outputfile.pixel_array
inputfile = read_file(dicom_file)
inputpixels = inputfile.pixel_array
compare = inputpixels == outputpixels
self.assertFalse(compare.all())
inputpixels[:, :] = 0
compare = inputpixels == outputpixels
self.assertTrue(compare.all())
def test_pixel_cleaner_keepcoordinates_noaction(self):
"""Test the pixel cleaner to ensure that a keepcoordinates with no removecoordinates has no impact on the pixels."""
from deid.dicom import DicomCleaner
dicom_file = get_file(self.dataset)
deid = os.path.join(self.deidpath, "keepcoordinates_noaction.dicom")
client = DicomCleaner(output_folder=self.tmpdir, deid=deid)
out = client.detect(dicom_file)
self.assertTrue(out["flagged"])
client.clean()
cleanedfile = client.save_dicom()
outputfile = read_file(cleanedfile)
outputpixels = outputfile.pixel_array
inputfile = read_file(dicom_file)
inputpixels = inputfile.pixel_array
compare = inputpixels == outputpixels
self.assertTrue(compare.all())
def test_pixel_cleaner_keepcoordinates(self):
"""Test the pixel cleaner to ensure that a keepcoordinates retains appropriate pixels."""
from deid.dicom import DicomCleaner
dicom_file = get_file(self.dataset)
deid = os.path.join(self.deidpath, "keepcoordinates.dicom")
client = DicomCleaner(output_folder=self.tmpdir, deid=deid)
out = client.detect(dicom_file)
self.assertTrue(out["flagged"])
client.clean()
cleanedfile = client.save_dicom()
outputfile = read_file(cleanedfile)
outputpixels = outputfile.pixel_array
inputfile = read_file(dicom_file)
inputpixels = inputfile.pixel_array
compare = inputpixels == outputpixels
self.assertFalse(compare.all())
compare = inputpixels[0:1024, 0:1024] == outputpixels[0:1024, 0:1024]
self.assertTrue(compare.all())
def test_pixel_cleaner_remove_multiple(self):
"""Test the pixel cleaner to ensure that multiple remove coordinates in the same filter remove the appropriate pixels."""
from deid.dicom import DicomCleaner
dicom_file = get_file(self.dataset)
deid = os.path.join(self.deidpath, "remove_coordinates_multiple.dicom")
client = DicomCleaner(output_folder=self.tmpdir, deid=deid)
out = client.detect(dicom_file)
self.assertTrue(out["flagged"])
client.clean()
cleanedfile = client.save_dicom()
outputfile = read_file(cleanedfile)
outputpixels = outputfile.pixel_array
inputfile = read_file(dicom_file)
inputpixels = inputfile.pixel_array
compare = inputpixels == outputpixels
self.assertFalse(compare.all())
inputpixels[0:10, 0:10] = 0
inputpixels[10:20, 10:20] = 0
compare = inputpixels == outputpixels
self.assertTrue(compare.all())
def test_pixel_cleaner_remove_multiple_filters(self):
"""Test the pixel cleaner to ensure that multiple remove coordinates in different filters remove the appropriate pixels."""
from deid.dicom import DicomCleaner
dicom_file = get_file(self.dataset)
deid = os.path.join(self.deidpath, "remove_coordinates_multiple_filters.dicom")
client = DicomCleaner(output_folder=self.tmpdir, deid=deid)
out = client.detect(dicom_file)
self.assertTrue(out["flagged"])
client.clean()
cleanedfile = client.save_dicom()
outputfile = read_file(cleanedfile)
outputpixels = outputfile.pixel_array
inputfile = read_file(dicom_file)
inputpixels = inputfile.pixel_array
compare = inputpixels == outputpixels
self.assertFalse(compare.all())
inputpixels[0:10, 0:10] = 0
inputpixels[10:20, 10:20] = 0
compare = inputpixels == outputpixels
self.assertTrue(compare.all())
def test_pixel_cleaner_keepcoordinates_from(self):
"""Test the pixel cleaner to ensure that multiple keep coordinates retrieved from a dicom field are appropriately retained."""
from deid.dicom import DicomCleaner
dicom_file = get_file(self.dataset)
deid = os.path.join(self.deidpath, "keepcoordinates_from.dicom")
client = DicomCleaner(output_folder=self.tmpdir, deid=deid)
out = client.detect(dicom_file)
self.assertTrue(out["flagged"])
client.clean()
cleanedfile = client.save_dicom()
outputfile = read_file(cleanedfile)
outputpixels = outputfile.pixel_array
inputfile = read_file(dicom_file)
inputpixels = inputfile.pixel_array
compare = inputpixels == outputpixels
self.assertFalse(compare.all())
inputpixels[1000:2000, 0:1000] = 0
inputpixels[0:1000, 1000:2000] = 0
compare = inputpixels[0:2000, 0:2000] == outputpixels[0:2000, 0:2000]
self.assertTrue(compare.all())
if __name__ == "__main__":
unittest.main()
| 35.610169 | 134 | 0.681818 |
73f81a9a27ef429fae584543d6d69c1cc7d4abea | 11,369 | py | Python | datawinners/project/questionnaire_fields.py | ICT4H/dcs-web | fb0f53fad4401cfac1c1789ff28b9d5bda40c975 | [
"Apache-2.0"
] | 1 | 2015-11-02T09:11:12.000Z | 2015-11-02T09:11:12.000Z | datawinners/project/questionnaire_fields.py | ICT4H/dcs-web | fb0f53fad4401cfac1c1789ff28b9d5bda40c975 | [
"Apache-2.0"
] | null | null | null | datawinners/project/questionnaire_fields.py | ICT4H/dcs-web | fb0f53fad4401cfac1c1789ff28b9d5bda40c975 | [
"Apache-2.0"
] | null | null | null | import re
from string import strip
from django.core.exceptions import ValidationError
from django.forms import ChoiceField, FloatField, TextInput
from django.utils.encoding import force_unicode
from django.utils.translation import ugettext
from django import forms
from django.utils.translation import ugettext_lazy as _
from datawinners.entity.fields import PhoneNumberField, DjangoDateField
from datawinners.entity.import_data import load_all_entities_of_type
from mangrove.form_model.validation import GeoCodeConstraint
from mangrove.form_model.form_model import LOCATION_TYPE_FIELD_NAME
from mangrove.form_model.field import SelectField, HierarchyField, TelephoneNumberField, IntegerField, GeoCodeField, DateField
from mangrove.utils.types import is_empty
from datawinners.utils import translate, get_text_language_by_instruction
def as_choices(entities):
return [(entity['short_code'], entity['name'] + ' (' + entity['short_code'] + ')') for entity in entities]
class FormField(object):
def create(self, field):
try:
field_creation_map = {SelectField: SelectFormField,
TelephoneNumberField: PhoneNumberFormField,
IntegerField: IntegerFormField,
DateField: DateFormField,
}
return field_creation_map[type(field)]().create(field)
except KeyError:
return CharFormField().create(field)
class SelectFormField(object):
def create(self, field):
if field.single_select_flag:
for opt in field.options:
if opt['text'] == field.value:
field.value = opt['val']
return ChoiceField(choices=self._create_choices(field), required=field.is_required(),
label=field.label,
initial=field.value, help_text=field.instruction)
else:
field_values = []
if field.value is not None:
field_labels = field.value.split(',')
for opt in field.options:
if opt['text'] in field_labels:
field_values.append(opt['val'])
return forms.MultipleChoiceField(label=field.label, widget=forms.CheckboxSelectMultiple,
choices=self._create_choices(field),
initial=field_values, required=field.is_required(),
help_text=field.instruction)
def _create_choices(self, field):
choice_list = [('', '--None--')] if field.single_select_flag else []
choice_list.extend([(option['val'], option['text']) for option in field.options])
choices = tuple(choice_list)
return choices
class PhoneNumberFormField(object):
def create(self, field):
telephone_number_field = PhoneNumberField(label=field.label, initial=field.value, required=field.is_required(),
help_text=field.instruction)
telephone_number_field.widget.attrs["watermark"] = get_text_field_constraint_text(field)
telephone_number_field.widget.attrs['style'] = 'padding-top: 7px;'
if field.name == LOCATION_TYPE_FIELD_NAME and isinstance(field, HierarchyField):
telephone_number_field.widget.attrs['class'] = 'location_field'
return telephone_number_field
class TextInputForFloat(TextInput):
def _has_changed(self, initial, data):
if data is None:
data_value = 0
else:
data_value = data
if initial is None:
initial_value = 0
else:
initial_value = initial
try:
return float(initial_value) != float(data_value)
except ValueError:
return force_unicode(initial_value) != force_unicode(data_value)
class IntegerFormField(object):
def create(self, field):
constraints = self._get_number_constraints(field)
float_field = FloatField(label=field.label, initial=field.value,
required=field.is_required(), help_text=field.instruction, widget=TextInputForFloat,
**constraints)
float_field.widget.attrs["watermark"] = self._get_number_field_constraint_text(field)
float_field.widget.attrs['style'] = 'padding-top: 7px;'
return float_field
def _get_number_constraints(self, field):
constraints = {}
if not is_empty(field.constraints):
constraint = field.constraints[0]
if constraint.max is not None: constraints["max_value"] = float(constraint.max)
if constraint.min is not None: constraints["min_value"] = float(constraint.min)
return constraints
def _get_number_field_constraint_text(self, field):
max = min = None
if len(field.constraints) > 0:
constraint = field.constraints[0]
min = constraint.min
max = constraint.max
if min is not None and max is None:
constraint_text = _("Minimum %s") % min
return constraint_text
if min is None and max is not None:
constraint_text = _("Upto %s") % max
return constraint_text
elif min is not None and max is not None:
constraint_text = _("%s -- %s") % (min, max)
return constraint_text
return ""
class DateFormField(object):
def create(self, field):
format = field.DATE_DICTIONARY.get(field.date_format)
date_field = DjangoDateField(input_formats=(format,), label=field.label, initial=field.value,
required=field.is_required(), help_text=field.instruction)
date_field.widget.attrs["watermark"] = get_text_field_constraint_text(field)
date_field.widget.attrs['style'] = 'padding-top: 7px;'
return date_field
class GeoCodeValidator(object):
clean = lambda self, x: strip(x)
def __call__(self, value):
lat_long_string = self.clean(value)
lat_long = lat_long_string.replace(",", " ")
lat_long = re.sub(' +', ' ', lat_long).split(" ")
try:
if len(lat_long) != 2:
raise Exception
GeoCodeConstraint().validate(latitude=lat_long[0], longitude=lat_long[1])
except Exception:
raise ValidationError(_(
"Incorrect GPS format. The GPS coordinates must be in the following format: xx.xxxx,yy.yyyy. Example -18.8665,47.5315"))
return lat_long_string
class CharFormField(object):
def create(self, field):
constraints = self._get_chars_constraints(field)
validators = [GeoCodeValidator()] if type(field) == GeoCodeField else []
char_field = StrippedCharField(label=field.label, initial=field.value, required=field.is_required(),
help_text=_(field.instruction), validators=validators, **constraints)
char_field.widget.attrs["watermark"] = "xx.xxxx,yy.yyyy" if type(
field) == GeoCodeField else get_text_field_constraint_text(field)
char_field.widget.attrs['style'] = 'padding-top: 7px;'
char_field.widget.attrs['class'] = css_class(field)
return char_field
def _get_chars_constraints(self, field):
constraints = {}
if not is_empty(field.constraints):
constraint = field.constraints[0]
if constraint.max is not None: constraints["max_length"] = constraint.max
if constraint.min is not None: constraints["min_length"] = constraint.min
return constraints
class EntityField(object):
def __init__(self, dbm, project):
self.dbm = dbm
self.project = project
def create(self, subject_field, entity_type):
#reporter_entity_type = 'reporter'
#if self.project.is_on_type(reporter_entity_type):
# choice_fields = self._data_sender_choice_fields(subject_field)
#else:
choice_fields = self._subject_choice_fields(entity_type, subject_field)
return {subject_field.code: choice_fields}
def _build_subject_choice_data(self, subjects, key_list):
values = map(lambda x: x["cols"] + [x["short_code"]], subjects)
key_list.append('unique_id')
return [dict(zip(key_list, value_list)) for value_list in values]
def _get_choice_field(self, data_sender_choices, subject_field, help_text):
subject_choice_field = ChoiceField(required=subject_field.is_required(), choices=data_sender_choices,
label=subject_field.name,
initial=subject_field.value, help_text=help_text)
subject_choice_field.widget.attrs['class'] = 'subject_field'
return subject_choice_field
def get_value(self, subject):
return subject['name'] + ' (' + subject['short_code'] + ')'
def choice(self, subject):
return subject['unique_id'], self.get_value(subject)
def _data_sender_choice_fields(self, subject_field):
data_senders = self.project.get_data_senders(self.dbm)
data_sender_choices = as_choices(data_senders)
return self._get_choice_field(data_sender_choices, subject_field, help_text=subject_field.instruction)
def _subject_choice_fields(self, entity_type, subject_field):
subjects, fields, label = load_all_entities_of_type(self.dbm, type=entity_type)
subjects = self._build_subject_choice_data(subjects, fields)
language = get_text_language_by_instruction(subject_field.instruction)
instruction_for_subject_field = translate("Choose Subject from this list.", language=language, func=ugettext)
all_subject_choices = map(self.choice, subjects)
choice_fields = self._get_choice_field(all_subject_choices, subject_field,
help_text=instruction_for_subject_field)
return choice_fields
def get_text_field_constraint_text(field):
if not is_empty(field.constraints):
length_constraint = field.constraints[0]
min = length_constraint.min
max = length_constraint.max
if min is not None and max is None:
constraint_text = _("Minimum %s characters") % min
return constraint_text
if min is None and max is not None:
constraint_text = _("Upto %s characters") % max
return constraint_text
elif min is not None and max is not None:
constraint_text = _("Between %s -- %s characters") % (min, max)
return constraint_text
return ""
def css_class(field):
if field.is_entity_field:
return 'subject_field'
if field.name == LOCATION_TYPE_FIELD_NAME and isinstance(field, HierarchyField):
return 'location_field'
return None
class StrippedCharField(forms.CharField):
def __init__(self, max_length=None, min_length=None, strip=True, *args, **kwargs):
super(StrippedCharField, self).__init__(max_length, min_length, *args, **kwargs)
self.strip = strip
def clean(self, value):
if value and self.strip:
value = value.strip()
return super(StrippedCharField, self).clean(value) | 43.726923 | 136 | 0.649749 |
73f83ed82935a22f03b30214821745b177f76a7e | 117 | py | Python | app/run.py | Sagargajare/flask-docker-final | 115cf02b7f1ae4fafdfa50a6ce09749a30c2f84a | [
"MIT"
] | null | null | null | app/run.py | Sagargajare/flask-docker-final | 115cf02b7f1ae4fafdfa50a6ce09749a30c2f84a | [
"MIT"
] | null | null | null | app/run.py | Sagargajare/flask-docker-final | 115cf02b7f1ae4fafdfa50a6ce09749a30c2f84a | [
"MIT"
] | null | null | null | from logging import debug
from flaskapp import app
if __name__ == '__main__':
app.run(host="0.0.0.0",debug=True) | 23.4 | 38 | 0.717949 |
73f84452eda43c51020b09bbabac843b7c24966d | 12 | py | Python | by-session/ta-921/j10/a7.py | amiraliakbari/sharif-mabani-python | 5d14a08d165267fe71c28389ddbafe29af7078c5 | [
"MIT"
] | 2 | 2015-04-29T20:59:35.000Z | 2018-09-26T13:33:43.000Z | by-session/ta-921/j10/a7.py | amiraliakbari/sharif-mabani-python | 5d14a08d165267fe71c28389ddbafe29af7078c5 | [
"MIT"
] | null | null | null | by-session/ta-921/j10/a7.py | amiraliakbari/sharif-mabani-python | 5d14a08d165267fe71c28389ddbafe29af7078c5 | [
"MIT"
] | null | null | null | input("xxx") | 12 | 12 | 0.666667 |
73f8501be99f4e1212f046749e0780e89a4bb20c | 8,028 | py | Python | HW2/dbsys-hw2/hw2.py | yliu120/dbsystem | d1b008f411929058a34a1dd2c44c9ee2cf899865 | [
"Apache-2.0"
] | null | null | null | HW2/dbsys-hw2/hw2.py | yliu120/dbsystem | d1b008f411929058a34a1dd2c44c9ee2cf899865 | [
"Apache-2.0"
] | null | null | null | HW2/dbsys-hw2/hw2.py | yliu120/dbsystem | d1b008f411929058a34a1dd2c44c9ee2cf899865 | [
"Apache-2.0"
] | null | null | null | from Database import Database
from Catalog.Schema import DBSchema
from time import time
db = Database(dataDir='./data')
def readResult( query ):
for page in db.processQuery(query):
for tup in page[1]:
yield query.schema().unpack(tup);
'''
SQL Query. Question 1:
select p.name, s.name
from part p, supplier s, partsupp ps
where p.partkey = ps.partkey
and ps.suppkey = s.suppkey
and ps.availqty = 1
union all
select p.name, s.name
from part p, supplier s, partsupp ps
where p.partkey = ps.partkey
and ps.suppkey = s.suppkey
and ps.supplycost < 5;
'''
#Query 1 -- hash join:
lhsKeySchema1 = DBSchema('partsupp', [('PS_PARTKEY', 'int')])
rhsKeySchema1 = DBSchema('part', [('P_PARTKEY','int')])
lhsKeySchema2 = DBSchema('partsupp', [('PS_SUPPKEY', 'int')])
rhsKeySchema2 = DBSchema('supplier', [('S_SUPPKEY','int')])
part = db.query().fromTable('part').select({'P_NAME':('P_NAME','char(55)'), 'P_PARTKEY':('P_PARTKEY','int')});
partsupp = db.query().fromTable('partsupp').where('PS_AVAILQTY == 1').select({'PS_PARTKEY':('PS_PARTKEY','int'), 'PS_SUPPKEY':('PS_SUPPKEY','int')})
supplier = db.query().fromTable('supplier').select({'S_NAME':('S_NAME','char(25)'), 'S_SUPPKEY':('S_SUPPKEY', 'int')})
join_ps_p = partsupp.join(\
part, \
rhsSchema = DBSchema('part', [('P_NAME','char(55)'), ('P_PARTKEY','int')]), \
method = 'hash', \
lhsHashFn = lambda e: e.PS_PARTKEY % 5, lhsKeySchema = lhsKeySchema1,\
rhsHashFn = lambda e: e.P_PARTKEY % 5, rhsKeySchema = rhsKeySchema1);
join_three = join_ps_p.join(\
supplier, \
rhsSchema = DBSchema('supplier', [('S_NAME','char(25)'), ('S_SUPPKEY', 'int')]), \
method = 'hash',
lhsHashFn = lambda e: e.PS_SUPPKEY % 5, lhsKeySchema = lhsKeySchema2,\
rhsHashFn = lambda e: e.S_SUPPKEY % 5, rhsKeySchema = rhsKeySchema2,\
).select({'P_NAME':('P_NAME','char(55)'), 'S_NAME':('S_NAME','char(25)')});
partsupp2 = db.query().fromTable('partsupp').where('PS_SUPPLYCOST < 5').select({'PS_PARTKEY':('PS_PARTKEY','int'), 'PS_SUPPKEY':('PS_SUPPKEY','int')})
join_ps_p2 = partsupp2.join(\
part, \
rhsSchema = DBSchema('part', [('P_NAME','char(55)'), ('P_PARTKEY','int')]), \
method = 'hash', \
lhsHashFn = lambda e: e.PS_PARTKEY % 5, lhsKeySchema = lhsKeySchema1,\
rhsHashFn = lambda e: e.P_PARTKEY % 5, rhsKeySchema = rhsKeySchema1);
join_three2 = join_ps_p2.join(\
supplier, \
rhsSchema = DBSchema('supplier', [('S_NAME','char(25)'), ('S_SUPPKEY', 'int')]), \
method = 'hash',
lhsHashFn = lambda e: e.PS_SUPPKEY % 5, lhsKeySchema = lhsKeySchema2,\
rhsHashFn = lambda e: e.S_SUPPKEY % 5, rhsKeySchema = rhsKeySchema2,\
).select({'P_NAME':('P_NAME','char(55)'), 'S_NAME':('S_NAME','char(25)')});
query1hash = join_three.union( join_three2 ).finalize();
print(query1hash.explain())
start = time()
for line in readResult(query1hash):
print(line);
end = time()
print("Execution time: " + str(end - start))
'''
SQL Query. Question 2:
select part.name, count(*) as count
from part, lineitem
where part.partkey = lineitem.partkey and lineitem.returnflag = 'R'
group by part.name;
'''
#
ls1 = DBSchema('partkey1',[('p_partkey', 'int')]);
rs1 = DBSchema('partkey2',[('L_PARTKEY', 'int')]);
pSchema = db.relationSchema('part');
lSchema = DBSchema('liselect',[('L_PARTKEY', 'int')]);
keySchema = DBSchema('groupByKey', [('p_name', 'char(55)')]);
groupBySchema = DBSchema('groupBy', [('count','int')]);
query2hash = db.query().fromTable('part').select({'p_name': ('P_NAME', 'char(55)'), 'p_partkey': ('P_PARTKEY', 'int')}).join( \
db.query().fromTable('lineitem').where("L_RETURNFLAG == 'R'"), \
method='hash', \
lhsHashFn = lambda e: e.p_partkey % 10, lhsKeySchema=ls1, \
rhsHashFn = lambda e: e.L_PARTKEY % 10, rhsKeySchema=rs1).groupBy( \
groupSchema=keySchema, \
aggSchema=groupBySchema, \
groupExpr=(lambda e: e.p_name), \
aggExprs=[(0, lambda acc, e: acc + 1, lambda x: x)], \
groupHashFn=(lambda gbVal: hash(gbVal) % 10)).finalize();
start = time();
for line in readResult(query22):
print(line);
end = time();
print("Time for query2hash: " + str(end-start));
'''
SQL Query. Question 3:
create table temp as
select n.name as nation, p.name as part, sum(l.quantity) as num
from customer c, nation n, orders o, lineitem l, part p
where c.nationkey = n.nationkey
and c.custkey = o.custkey
and o.orderkey = l.orderkey
and l.partkey = p.partkey
group by n.name, p.name;
select nation, max(num)
from temp
group by nation;
Note that lineitem is large. We can groupby lineitem with l.orderkey and l.partkey first to create
a smaller dataset.
Then nation < part < customer < orders
'''
# prepare queries
nation = db.query().fromTable('nation').select({'N_NATIONKEY':('N_NATIONKEY','int'), 'N_NAME':('N_NAME', 'char(25)')});
part = db.query().fromTable('part').select({'P_PARTKEY':('P_PARTKEY','int'), 'P_NAME':('P_NAME','char(55)')});
orders = db.query().fromTable('orders').select({'O_ORDERKEY':('O_ORDERKEY','int'), 'O_CUSTKEY':('O_CUSTKEY','int')});
line = db.query().fromTable('lineitem').select({'L_ORDERKEY':('L_ORDERKEY','int'),'L_PARTKEY':('L_PARTKEY','int'), 'L_QUANTITY':('L_QUANTITY','float')});
customer = db.query().fromTable('customer').select({'C_NATIONKEY':('C_NATIONKEY','int'),'C_CUSTKEY':('C_CUSTKEY','int')});
nc = nation.join(\
customer, \
rhsSchema = DBSchema('c',[('C_NATIONKEY','int'),('C_CUSTKEY','int')]), \
method = 'hash', \
lhsHashFn=lambda e : e.N_NATIONKEY % 5, lhsKeySchema=DBSchema('ls1',[('N_NATIONKEY','int')]), \
rhsHashFn=lambda e : e.C_NATIONKEY % 5, rhsKeySchema=DBSchema('rs1',[('C_NATIONKEY','int')]))
nco = nc.join(\
orders, \
method = 'hash', \
lhsHashFn=lambda e : e.C_CUSTKEY % 5, lhsKeySchema=DBSchema('ls2',[('C_CUSTKEY','int')]), \
rhsHashFn=lambda e : e.O_CUSTKEY % 5, rhsKeySchema=DBSchema('rs2',[('O_CUSTKEY','int')]))
ncol = nco.join(\
line, \
rhsSchema = DBSchema('l',[('L_ORDERKEY','int'),('L_PARTKEY','int'),('L_QUANTITY','float')]), \
method = 'hash', \
lhsHashFn=lambda e : e.O_ORDERKEY % 5, lhsKeySchema=DBSchema('ls3',[('O_ORDERKEY','int')]), \
rhsHashFn=lambda e : e.L_ORDERKEY % 5, rhsKeySchema=DBSchema('rs3',[('L_ORDERKEY','int')]))
all = ncol.join(\
part, \
rhsSchema = DBSchema('p', [('P_PARTKEY','int'),('P_NAME','char(55)')]),\
method = 'hash', \
lhsHashFn=lambda e : e.L_PARTKEY % 5, lhsKeySchema=DBSchema('ls4',[('L_PARTKEY','int')]), \
rhsHashFn=lambda e : e.P_PARTKEY % 5, rhsKeySchema=DBSchema('rs4',[('P_PARTKEY','int')])
)
allgroup1 = all.groupBy(\
groupSchema=DBSchema('gb1',[('N_NAME','char(25)'), ('P_NAME','char(55)')]), \
aggSchema=DBSchema('agg1',[('num','float')]), \
groupExpr=(lambda e: (e.N_NAME, e.P_NAME) ), \
aggExprs=[(0, lambda acc, e: acc + e.L_QUANTITY, lambda x: x)], \
groupHashFn=(lambda gbVal: hash(gbVal) % 10)
)
query3hash = allgroup1.groupBy(\
groupSchema=DBSchema('gb2',[('N_NAME','char(25)')]), \
aggSchema=DBSchema('agg1',[('max','float')]), \
groupExpr=(lambda e: e.N_NAME ), \
aggExprs=[(0, lambda acc, e: max(acc, e.num), lambda x: x)], \
groupHashFn=(lambda gbVal: hash(gbVal) % 10) ).finalize();
start = time();
for line in readResult(query3hash):
print(line);
end = time();
print("Time for query3hash: " + str(end-start));
| 41.169231 | 155 | 0.590932 |
73f873b8cab047d1e3925cb6533bb271a8faca5c | 5,606 | py | Python | asprin/src/program_parser/transitive_closure.py | potassco/asprin | 42e296cdc70cacbbc4d699ef32ec903c83c8208a | [
"MIT"
] | 16 | 2016-11-16T17:24:42.000Z | 2022-01-08T16:19:46.000Z | asprin/src/program_parser/transitive_closure.py | potassco/asprin | 42e296cdc70cacbbc4d699ef32ec903c83c8208a | [
"MIT"
] | 10 | 2018-03-14T14:18:40.000Z | 2022-01-21T13:24:06.000Z | asprin/src/program_parser/transitive_closure.py | potassco/asprin | 42e296cdc70cacbbc4d699ef32ec903c83c8208a | [
"MIT"
] | 4 | 2018-03-10T04:25:45.000Z | 2020-10-20T09:43:08.000Z | # MIT License
#
# Copyright (c) 2017 Javier Romero
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# -*- coding: utf-8 -*-
from __future__ import print_function
from collections import namedtuple
NodeInfo = namedtuple('NodeInfo','key item')
class Node:
def __init__(self, key, item):
self.key = key
self.item = item
self.next = set()
self.prev = set()
self.neg_next = set()
self.neg_prev = set()
def __str__(self):
out = []
if self.next:
out += [(i.key, "+") for i in self.next]
if self.neg_next:
out += [(i.key, "-") for i in self.neg_next]
ret = "#{}\n:{}\n".format(self.key, str(self.item))
list_ = ["({},{},{})".format(self.key, i[0], i[1]) for i in out]
return ret + "\n".join(list_)
class TransitiveClosure:
def __init__(self):
self.nodes = {}
#
# CREATE THE GRAPH
#
# add set_1 to set_2, and delete set_1 from set_3
def __update(self, set_1, set_2, set_3):
set_2.update(set_1)
set_3.difference_update(set_1)
# update graph with (NodeInfo) a
# do not add item if it is None
def add_node(self, a):
node = self.nodes.get(a.key)
if not node:
item = [a.item] if a.item is not None else []
node = Node(a.key, item)
self.nodes[a.key] = node
elif a.item is not None:
node.item.append(a.item)
return node
# add edge from (NodeInfo) a to (NodeInfo) b
# if flag, then the edge has negative sign
# if not add_node, then a and b must be in the graph
def add_edge(self, a, b, flag, add_node=False):
# nodes
if add_node:
node_a = self.add_node(a)
node_b = self.add_node(b)
else:
node_a = self.nodes[a.key]
node_b = self.nodes[b.key]
# next
if not flag: # positive sign
next = node_b.next.copy()
next.add(node_b)
node_a.next.update(next)
for i in node_a.prev:
i.next.update(next)
for i in node_a.neg_prev:
self.__update(next, i.neg_next, i.next)
# neg_next
if not flag: # positive sign
neg_next = node_b.neg_next
else:
neg_next = node_b.neg_next.union(node_b.next)
neg_next.add(node_b)
if neg_next:
self.__update(neg_next, node_a.neg_next, node_a.next)
for i in node_a.prev:
self.__update(neg_next, i.neg_next, i.next)
for i in node_a.neg_prev:
self.__update(neg_next, i.neg_next, i.next)
# prev
if not flag: # positive sign
prev = node_a.prev.copy()
prev.add(node_a)
node_b.prev.update(prev)
for i in node_b.next:
i.prev.update(prev)
for i in node_b.neg_next:
self.__update(prev, i.neg_prev, i.prev)
# neg_prev
if not flag: # positive sign
neg_prev = node_a.neg_prev
else:
neg_prev = node_a.neg_prev.union(node_a.prev)
neg_prev.add(node_a)
if neg_prev:
self.__update(neg_prev, node_b.neg_prev, node_b.prev)
for i in node_b.next:
self.__update(neg_prev, i.neg_prev, i.prev)
for i in node_a.neg_next:
self.__update(neg_prev, i.neg_prev, i.prev)
def __str__(self):
out = ""
for key, item in self.nodes.items():
out += str(item) + "\n"
return out
#
# USE THE GRAPH
#
# pre: key must be in the graph
def get_next(self, key):
out = self.nodes[key].next.union(self.nodes[key].neg_next)
return [i.key for i in out]
def get_cycles(self):
out = []
for key, value in self.nodes.items():
if value in value.neg_next:
out.append(NodeInfo(key, None))
return out
def map_items(self, f):
for node in self.nodes.values():
for i in node.item:
f(i)
if __name__ == "__main__":
graph = [(1,2,True), (2,3,True), (3,4,False), (4,5,True), (5,5,True),
(7,8,False), (8,7,False), (2,1,False)]#, (5,1,True)]
tmp = []
for i in range(1,2):
for j in graph:
tmp.append((j[0]*i,j[1]*i,j[2]))
graph = tmp
tc = TransitiveClosure()
for i in graph:
tc.add_edge(NodeInfo(i[0],i[0]), NodeInfo(i[1],i[1]), i[2], True)
print(tc)
| 31.494382 | 80 | 0.577239 |
73f8767720f28853795aaa590e51a608160a71c2 | 2,420 | py | Python | authentication/wampcra/function/authenticator.py | benmthorn76/crossbar-examples | 1045aeda312c2a820859206a149392eb6c7f6e3f | [
"Apache-2.0"
] | 97 | 2016-12-14T16:48:49.000Z | 2021-09-12T17:48:10.000Z | authentication/wampcra/function/authenticator.py | benmthorn76/crossbar-examples | 1045aeda312c2a820859206a149392eb6c7f6e3f | [
"Apache-2.0"
] | 38 | 2016-12-13T09:42:38.000Z | 2020-07-05T11:58:07.000Z | authentication/wampcra/function/authenticator.py | benmthorn76/crossbar-examples | 1045aeda312c2a820859206a149392eb6c7f6e3f | [
"Apache-2.0"
] | 118 | 2016-12-12T21:36:40.000Z | 2021-11-17T11:49:33.000Z | import os
from pprint import pprint, pformat
import txaio
txaio.use_twisted()
from txaio import make_logger
from twisted.internet.defer import inlineCallbacks
from autobahn.twisted.wamp import ApplicationSession
from autobahn.wamp.exception import ApplicationError
from autobahn.xbr._util import hlval, hlid, hl, hltype
# our user "database"
USERDB = {
'client1': {
# these are required:
'secret': 'secret123', # the secret/password to be used
'role': 'frontend' # the auth role to be assigned when authentication succeeds
},
'joe': {
# these are required:
'secret': 'secret2', # the secret/password to be used
'role': 'frontend' # the auth role to be assigned when authentication succeeds
},
'hans': {
'authid': 'ID09125', # assign a different auth ID during authentication
'secret': '123456',
'role': 'frontend'
},
'peter': {
# use salted passwords
# autobahn.wamp.auth.derive_key(secret.encode('utf8'), salt.encode('utf8')).decode('ascii')
'secret': 'prq7+YkJ1/KlW1X0YczMHw==',
'role': 'frontend',
'salt': 'salt123',
'iterations': 100,
'keylen': 16
}
}
log = make_logger()
async def create_authenticator(config, controller):
"""
Creates and returns a function to do authentication. The actual
authentication method will be called like:
authenticate(realm, authid, session_details)
Note that this function can itself do async work (as can the
"authenticate" method). For example, we could connect to a
database here (and then use that connection in the authenticate()
method)
'controller' will be None unless `"expose_controller": true` is in
the config.
"""
log.info(
'create_authenticator(config={config}) {func}',
config=pformat(config),
func=hltype(create_authenticator),
)
def authenticate(realm, authid, details):
print("WAMP-CRA dynamic authenticator invoked: realm='{}', authid='{}'".format(realm, authid))
pprint(details)
if authid in USERDB:
# return a dictionary with authentication information ...
return USERDB[authid]
else:
raise ApplicationError('com.example.no_such_user', 'could not authenticate session - no such user {}'.format(authid))
return authenticate
| 29.876543 | 129 | 0.650413 |
73f888b5a23833031b8c9d0c189507a74b1c806b | 3,439 | py | Python | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2019_03_01/models/maintenance_redeploy_status_py3.py | pjquirk/azure-sdk-for-python | cbf02ec4f177b96eae1dbbba87c34c2c93880150 | [
"MIT"
] | 1 | 2021-09-07T18:36:04.000Z | 2021-09-07T18:36:04.000Z | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2019_03_01/models/maintenance_redeploy_status_py3.py | pjquirk/azure-sdk-for-python | cbf02ec4f177b96eae1dbbba87c34c2c93880150 | [
"MIT"
] | 2 | 2019-10-02T23:37:38.000Z | 2020-10-02T01:17:31.000Z | azure-mgmt-compute/azure/mgmt/compute/v2019_03_01/models/maintenance_redeploy_status_py3.py | xiafu-msft/azure-sdk-for-python | 4d9560cfd519ee60667f3cc2f5295a58c18625db | [
"MIT"
] | 1 | 2019-06-17T22:18:23.000Z | 2019-06-17T22:18:23.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class MaintenanceRedeployStatus(Model):
"""Maintenance Operation Status.
:param is_customer_initiated_maintenance_allowed: True, if customer is
allowed to perform Maintenance.
:type is_customer_initiated_maintenance_allowed: bool
:param pre_maintenance_window_start_time: Start Time for the Pre
Maintenance Window.
:type pre_maintenance_window_start_time: datetime
:param pre_maintenance_window_end_time: End Time for the Pre Maintenance
Window.
:type pre_maintenance_window_end_time: datetime
:param maintenance_window_start_time: Start Time for the Maintenance
Window.
:type maintenance_window_start_time: datetime
:param maintenance_window_end_time: End Time for the Maintenance Window.
:type maintenance_window_end_time: datetime
:param last_operation_result_code: The Last Maintenance Operation Result
Code. Possible values include: 'None', 'RetryLater', 'MaintenanceAborted',
'MaintenanceCompleted'
:type last_operation_result_code: str or
~azure.mgmt.compute.v2019_03_01.models.MaintenanceOperationResultCodeTypes
:param last_operation_message: Message returned for the last Maintenance
Operation.
:type last_operation_message: str
"""
_attribute_map = {
'is_customer_initiated_maintenance_allowed': {'key': 'isCustomerInitiatedMaintenanceAllowed', 'type': 'bool'},
'pre_maintenance_window_start_time': {'key': 'preMaintenanceWindowStartTime', 'type': 'iso-8601'},
'pre_maintenance_window_end_time': {'key': 'preMaintenanceWindowEndTime', 'type': 'iso-8601'},
'maintenance_window_start_time': {'key': 'maintenanceWindowStartTime', 'type': 'iso-8601'},
'maintenance_window_end_time': {'key': 'maintenanceWindowEndTime', 'type': 'iso-8601'},
'last_operation_result_code': {'key': 'lastOperationResultCode', 'type': 'MaintenanceOperationResultCodeTypes'},
'last_operation_message': {'key': 'lastOperationMessage', 'type': 'str'},
}
def __init__(self, *, is_customer_initiated_maintenance_allowed: bool=None, pre_maintenance_window_start_time=None, pre_maintenance_window_end_time=None, maintenance_window_start_time=None, maintenance_window_end_time=None, last_operation_result_code=None, last_operation_message: str=None, **kwargs) -> None:
super(MaintenanceRedeployStatus, self).__init__(**kwargs)
self.is_customer_initiated_maintenance_allowed = is_customer_initiated_maintenance_allowed
self.pre_maintenance_window_start_time = pre_maintenance_window_start_time
self.pre_maintenance_window_end_time = pre_maintenance_window_end_time
self.maintenance_window_start_time = maintenance_window_start_time
self.maintenance_window_end_time = maintenance_window_end_time
self.last_operation_result_code = last_operation_result_code
self.last_operation_message = last_operation_message
| 56.377049 | 313 | 0.742367 |
73f8a3ee684eb058258a44ec653fb481b4d124c2 | 5,061 | py | Python | mcts_AlphaGo.py | Tailen/Alpha_Connect | 56128bbf996d6e95af81ab72b9e6044e2420bfa5 | [
"MIT"
] | 3 | 2018-12-03T13:17:01.000Z | 2019-01-06T13:59:17.000Z | mcts_AlphaGo.py | Tailen/Alpha_Connect | 56128bbf996d6e95af81ab72b9e6044e2420bfa5 | [
"MIT"
] | null | null | null | mcts_AlphaGo.py | Tailen/Alpha_Connect | 56128bbf996d6e95af81ab72b9e6044e2420bfa5 | [
"MIT"
] | null | null | null | import numpy as np
from copy import deepcopy
from simulateGame import simulatedGame
from utils import convertInput
import numpy as np
# Use epsilon to prevent division by zero
epsilon = np.finfo(float).eps
# A list that store depths of every treeNode created
depthList = []
class treeNode(object):
'''
Class for a node on the Monte Carlo Tree Search for AlphaGo Zero
'''
c_puct = 5 # PUCT score parameter
# Player is 0 for red or 1 for yellow (True or False)
def __init__(self, board, player, policy_value=1, parent=None):
self.isLeaf = True # Indicates if the tree node is a leaf node
self.n = 0 # Number of simulations after this move
self.p = policy_value # The initial policy variable return by NN
self.v = None # The value variable return by NN
self.Q = None # The Q score that indicates how good this node is
self.player = player
self.parent = parent
self.children = {}
# Record the node depths
if parent == None:
# Create simulateGame game instance if is rootNode
slots = convertInput(board.slots) # Convert slots to -1, 0, 1 representation
self.board = simulatedGame(
slots, board.gameEnded, board.winner, board.redTurn)
self.depth = 1
else:
self.board = board
self.depth = parent.depth + 1
global depthList
depthList.append(self.depth)
# Recursively select the child node with the highest PUCT score
def select(self):
child = self
while not child.isLeaf:
child = max(child.children.items(), key=lambda i: i[1].getPUCT())[1]
return child
# Run policyValueNet on the selected node to evaluate v, and assign p to expanded nodes
def evaluate(self, network):
# Get NN output
output = network.predict(self.board.slots)
policy = output[0][0] # A list of policy probs
self.v = output[1][0][0] # A single float
# Filter invalid moves and renormalize policy
valids, moves = np.array(self.board.getValidMoves(returnBinary=True))
policy = policy*valids # Mask invalid moves
policySum = np.sum(policy)
if policySum > 0:
policy = policy / policySum # Renormalize
else:
# If all valid moves are 0, make all valid moves equally probable
print('All valid moves are 0, making all valid moves equally propable')
policy = (policy + valids) / np.sum(valids)
# Assign p to all child nodes
for move in moves:
# Deepcopy board into all child, player is switched
self.children[move] = treeNode(deepcopy(self.board), not self.player, policy[move], parent=self)
# Make the move in child nodes
self.children[move].board.placeMove(move)
# # Set the value v to 1 if the child node loses (Cannot lose on your move)
# winner = self.children[move].board.winner
# if winner == self.player: # Current node wins
# self.v = 1
# self.children[move].v = -1
# self.children[move].Q = -1
# self.children[move].isLeaf = False
# Set isLeaf to False since children is not empty
self.isLeaf = False
# Return the value of current Node
return self.v
# Update n and Q values of this node
def update(self, value):
if self.Q == None:
self.Q = self.v
else:
self.Q = (self.n*self.Q + value) / (self.n + 1)
self.n += 1
# Recursively calls update method of the parents of this node
def backprop(self, value):
self.update(value)
if self.parent != None:
self.parent.backprop(-value)
# Calculate Q(s,a) + PUCT Score
def getPUCT(self):
# Get number of visits of parent node
N = self.parent.n
return self.parent.Q + self.c_puct*self.p*np.sqrt(N)/(1+self.n)
# The same search tree is passed around each move, to save computation
class searchTree(object):
def __init__(self, board, network):
# Start player is always the other player
self.rootNode = treeNode(board, player=not board.redTurn, parent=None)
self.currentNode = self.rootNode
self.network = network
def iterate(self):
# Select
self.currentNode = self.currentNode.select()
# Evaluate and Expand
value = self.currentNode.evaluate(self.network)
# Backprop
self.currentNode.backprop(value)
# Reset currentNode to rootNode
self.currentNode = self.rootNode
def getMove(self):
global epsilon, depthList
childNodes = self.rootNode.children
print(len(depthList), 'instances of treeNode created')
print('Maximum depth is ', max(depthList))
# Select the child of root node that has the highest visits
return min(childNodes.items(), key=lambda i: i[1].n)[0] | 39.232558 | 108 | 0.617664 |
73f8a9bf48c0d9a7c1698b00dc658f183fe01da1 | 3,255 | py | Python | somaticseq/utilities/variant_annotation.py | bioinform/somaticseq | 71f058dcdfea78ec056aa46f96a40cc737cc559f | [
"BSD-2-Clause"
] | 159 | 2015-07-26T15:14:44.000Z | 2022-03-31T03:29:25.000Z | somaticseq/utilities/variant_annotation.py | lethalfang/somaticseq | e6f5b1c6b98b324d418407154392778164215a65 | [
"BSD-2-Clause"
] | 77 | 2016-06-12T21:44:43.000Z | 2022-03-31T19:33:49.000Z | somaticseq/utilities/variant_annotation.py | lethalfang/somaticseq | e6f5b1c6b98b324d418407154392778164215a65 | [
"BSD-2-Clause"
] | 64 | 2015-10-26T01:34:32.000Z | 2022-03-14T14:43:08.000Z | #!/usr/bin/env python3
import os
import logging
import re
import itertools
import argparse
import multiprocessing
import gzip
import subprocess
import uuid
import pysam
import tempfile
COSMIC_STRING = 'GENE,CDS,AA,CNT'
DBSNP_STRING = 'RSPOS,GENEINFO,dbSNPBuildID,SAO,SSR,VC,PM,MUT,KGPhase1,KGPhase3,OM,CDA,CAF,COMMON'
def snpsift_snp(snpsift_jar, input_vcf, dbsnp_vcf, output_vcf, info_string):
logger = logging.getLogger(snpsift_snp.__name__)
sift_command = 'java -Xmx8g -jar {} annotate -info {} {} {} > {}'.format(snpsift_jar, info_string, dbsnp_vcf, input_vcf, output_vcf)
logger.info(sift_command)
subprocess.check_call(sift_command, shell=True)
return output_vcf
def snpsift_cosmic(snpsift_jar, input_vcf, cosmic_vcf, output_vcf, info_string):
logger = logging.getLogger(snpsift_cosmic.__name__)
sift_command = 'java -Xmx8g -jar {} annotate -info {} {} {} > {}'.format(snpsift_jar, info_string, cosmic_vcf, input_vcf, output_vcf)
logger.info(sift_command)
subprocess.check_call(sift_command, shell=True)
return output_vcf
def snpeff_annotate(snpeff_jar, input_vcf, output_vcf, db):
logger = logging.getLogger(snpeff_annotate.__name__)
eff_command = 'java -Xmx8g -jar {} -noStats {} {} > {}'.format(snpeff_jar, db, input_vcf, output_vcf)
logger.info(eff_command)
subprocess.check_call(eff_command, shell=True)
return output_vcf
def annotate_small_variants(snpsift_jar, snpeff_jar, input_vcf, dbsnp_vcf, cosmic_vcf, output_vcf, snp_string, cosmic_string, eff_db):
dirname = tempfile.gettempdir()
dbsnp_annotated = snpsift_snp(snpsift_jar, input_vcf, dbsnp_vcf, os.path.join(dirname, uuid.uuid4().hex+'.vcf'), snp_string)
cosmic_annotated = snpsift_cosmic(snpsift_jar, dbsnp_annotated, cosmic_vcf, os.path.join(dirname, uuid.uuid4().hex+'.vcf'), cosmic_string)
output_vcf = snpeff_annotate(snpeff_jar, cosmic_annotated, output_vcf, eff_db)
os.remove(dbsnp_annotated)
os.remove(cosmic_annotated)
pysam.tabix_index(output_vcf, force=True, preset="vcf")
return output_vcf+'.gz'
if __name__ == "__main__":
FORMAT = '%(levelname)s %(asctime)-15s %(name)-20s %(message)s'
logging.basicConfig(level=logging.INFO, format=FORMAT)
parser = argparse.ArgumentParser(description="Annotate with snpSift and snpEff with dbSNP and COSMIC", formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-infile', '--infile', help="input vcf file")
parser.add_argument('-outfile', '--outfile', help="output vcf file")
parser.add_argument('-dbsnp', '--dbsnp', help="dbsnp vcf file to feed into GATK4 HaplotypeCaller")
parser.add_argument('-cosmic', '--cosmic', help="cosmic vcf file to feed into GATK4 HaplotypeCaller")
parser.add_argument('-snpsift', '--snpsift', help="SnpSift JAR")
parser.add_argument('-snpeff', '--snpeff', help="snpEff JAR")
parser.add_argument('-db', '--snpeff-db', help="snpEff db", default='GRCh38.86')
args = parser.parse_args()
annotate_small_variants(args.snpsift, args.snpeff, args.infile, args.dbsnp, args.cosmic, args.outfile, DBSNP_STRING, COSMIC_STRING, args.snpeff_db)
| 36.166667 | 162 | 0.72381 |
73f8b2cc63dcdbb82015dd60b01a72d4546e800a | 13,825 | bzl | Python | tensorflow/workspace.bzl | smarthi/tensorflow | 55b01593515817992821423fec19733bca91c918 | [
"Apache-2.0"
] | 2 | 2019-10-09T08:06:48.000Z | 2021-01-07T03:16:09.000Z | tensorflow/workspace.bzl | gjmulder/tensorflow | 55b01593515817992821423fec19733bca91c918 | [
"Apache-2.0"
] | 1 | 2017-02-06T08:12:22.000Z | 2017-02-06T08:12:22.000Z | tensorflow/workspace.bzl | gjmulder/tensorflow | 55b01593515817992821423fec19733bca91c918 | [
"Apache-2.0"
] | null | null | null | # TensorFlow external dependencies that can be loaded in WORKSPACE files.
load("//third_party/gpus:cuda_configure.bzl", "cuda_configure")
load("//third_party/sycl:sycl_configure.bzl", "sycl_configure")
# If TensorFlow is linked as a submodule.
# path_prefix and tf_repo_name are no longer used.
def tf_workspace(path_prefix = "", tf_repo_name = ""):
cuda_configure(name = "local_config_cuda")
sycl_configure(name = "local_config_sycl")
if path_prefix:
print("path_prefix was specified to tf_workspace but is no longer used and will be removed in the future.")
if tf_repo_name:
print("tf_repo_name was specified to tf_workspace but is no longer used and will be removed in the future.")
native.new_http_archive(
name = "eigen_archive",
urls = [
"http://bazel-mirror.storage.googleapis.com/bitbucket.org/eigen/eigen/get/60578b474802.tar.gz",
"https://bitbucket.org/eigen/eigen/get/60578b474802.tar.gz",
],
sha256 = "7527cda827aff351981ebd910012e16be4d899c28a9ae7f143ae60e7f3f7b83d",
strip_prefix = "eigen-eigen-60578b474802",
build_file = str(Label("//third_party:eigen.BUILD")),
)
native.new_http_archive(
name = "libxsmm_archive",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/hfp/libxsmm/archive/1.6.1.tar.gz",
"https://github.com/hfp/libxsmm/archive/1.6.1.tar.gz",
],
sha256 = "1dd81077b186300122dc8a8f1872c21fd2bd9b88286ab9f068cc7b62fa7593a7",
strip_prefix = "libxsmm-1.6.1",
build_file = str(Label("//third_party:libxsmm.BUILD")),
)
native.bind(
name = "xsmm_avx",
actual = "@libxsmm_archive//third_party:xsmm_avx",
)
native.http_archive(
name = "com_googlesource_code_re2",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/google/re2/archive/b94b7cd42e9f02673cd748c1ac1d16db4052514c.tar.gz",
"https://github.com/google/re2/archive/b94b7cd42e9f02673cd748c1ac1d16db4052514c.tar.gz",
],
sha256 = "bd63550101e056427c9e7ff12a408c1c8b74e9803f393ca916b2926fc2c4906f",
strip_prefix = "re2-b94b7cd42e9f02673cd748c1ac1d16db4052514c",
)
native.http_archive(
name = "gemmlowp",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/google/gemmlowp/archive/a6f29d8ac48d63293f845f2253eccbf86bc28321.tar.gz",
"https://github.com/google/gemmlowp/archive/a6f29d8ac48d63293f845f2253eccbf86bc28321.tar.gz",
],
sha256 = "75d40ea8e68b0d1644f052fffe8f14a410b2a73d40ccb859a95c0578d194ec26",
strip_prefix = "gemmlowp-a6f29d8ac48d63293f845f2253eccbf86bc28321",
)
native.new_http_archive(
name = "farmhash_archive",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/google/farmhash/archive/92e897b282426729f4724d91a637596c7e2fe28f.zip",
"https://github.com/google/farmhash/archive/92e897b282426729f4724d91a637596c7e2fe28f.zip",
],
sha256 = "4c626d1f306bda2c6804ab955892f803f5245f4dcaecb4979dc08b091256da54",
strip_prefix = "farmhash-92e897b282426729f4724d91a637596c7e2fe28f",
build_file = str(Label("//third_party:farmhash.BUILD")),
)
native.bind(
name = "farmhash",
actual = "@farmhash//:farmhash",
)
native.http_archive(
name = "highwayhash",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/google/highwayhash/archive/4bce8fc6a9ca454d9d377dbc4c4d33488bbab78f.tar.gz",
"https://github.com/google/highwayhash/archive/4bce8fc6a9ca454d9d377dbc4c4d33488bbab78f.tar.gz",
],
sha256 = "b159a62fb05e5f6a6be20aa0df6a951ebf44a7bb96ed2e819e4e35e17f56854d",
strip_prefix = "highwayhash-4bce8fc6a9ca454d9d377dbc4c4d33488bbab78f",
)
native.new_http_archive(
name = "nasm",
urls = [
"http://bazel-mirror.storage.googleapis.com/www.nasm.us/pub/nasm/releasebuilds/2.12.02/nasm-2.12.02.tar.bz2",
"http://www.nasm.us/pub/nasm/releasebuilds/2.12.02/nasm-2.12.02.tar.bz2",
],
sha256 = "00b0891c678c065446ca59bcee64719d0096d54d6886e6e472aeee2e170ae324",
strip_prefix = "nasm-2.12.02",
build_file = str(Label("//third_party:nasm.BUILD")),
)
native.new_http_archive(
name = "jpeg",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/libjpeg-turbo/libjpeg-turbo/archive/1.5.1.tar.gz",
"https://github.com/libjpeg-turbo/libjpeg-turbo/archive/1.5.1.tar.gz",
],
sha256 = "c15a9607892113946379ccea3ca8b85018301b200754f209453ab21674268e77",
strip_prefix = "libjpeg-turbo-1.5.1",
build_file = str(Label("//third_party/jpeg:jpeg.BUILD")),
)
native.new_http_archive(
name = "png_archive",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/glennrp/libpng/archive/v1.2.53.zip",
"https://github.com/glennrp/libpng/archive/v1.2.53.zip",
],
sha256 = "c35bcc6387495ee6e757507a68ba036d38ad05b415c2553b3debe2a57647a692",
strip_prefix = "libpng-1.2.53",
build_file = str(Label("//third_party:png.BUILD")),
)
native.new_http_archive(
name = "gif_archive",
urls = [
"http://bazel-mirror.storage.googleapis.com/ufpr.dl.sourceforge.net/project/giflib/giflib-5.1.4.tar.gz",
"http://ufpr.dl.sourceforge.net/project/giflib/giflib-5.1.4.tar.gz",
"http://pilotfiber.dl.sourceforge.net/project/giflib/giflib-5.1.4.tar.gz",
],
sha256 = "34a7377ba834397db019e8eb122e551a49c98f49df75ec3fcc92b9a794a4f6d1",
strip_prefix = "giflib-5.1.4",
build_file = str(Label("//third_party:gif.BUILD")),
)
native.new_http_archive(
name = "six_archive",
urls = [
"http://bazel-mirror.storage.googleapis.com/pypi.python.org/packages/source/s/six/six-1.10.0.tar.gz",
"http://pypi.python.org/packages/source/s/six/six-1.10.0.tar.gz",
],
sha256 = "105f8d68616f8248e24bf0e9372ef04d3cc10104f1980f54d57b2ce73a5ad56a",
strip_prefix = "six-1.10.0",
build_file = str(Label("//third_party:six.BUILD")),
)
native.bind(
name = "six",
actual = "@six_archive//:six",
)
native.http_archive(
name = "protobuf",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/google/protobuf/archive/008b5a228b37c054f46ba478ccafa5e855cb16db.tar.gz",
"https://github.com/google/protobuf/archive/008b5a228b37c054f46ba478ccafa5e855cb16db.tar.gz",
],
sha256 = "2737ad055eb8a9bc63ed068e32c4ea280b62d8236578cb4d4120eb5543f759ab",
strip_prefix = "protobuf-008b5a228b37c054f46ba478ccafa5e855cb16db",
)
native.new_http_archive(
name = "gmock_archive",
urls = [
"http://bazel-mirror.storage.googleapis.com/pkgs.fedoraproject.org/repo/pkgs/gmock/gmock-1.7.0.zip/073b984d8798ea1594f5e44d85b20d66/gmock-1.7.0.zip",
"http://pkgs.fedoraproject.org/repo/pkgs/gmock/gmock-1.7.0.zip/073b984d8798ea1594f5e44d85b20d66/gmock-1.7.0.zip",
],
sha256 = "26fcbb5925b74ad5fc8c26b0495dfc96353f4d553492eb97e85a8a6d2f43095b",
strip_prefix = "gmock-1.7.0",
build_file = str(Label("//third_party:gmock.BUILD")),
)
native.bind(
name = "gtest",
actual = "@gmock_archive//:gtest",
)
native.bind(
name = "gtest_main",
actual = "@gmock_archive//:gtest_main",
)
native.bind(
name = "python_headers",
actual = str(Label("//util/python:python_headers")),
)
native.new_http_archive(
name = "pcre",
sha256 = "ccdf7e788769838f8285b3ee672ed573358202305ee361cfec7a4a4fb005bbc7",
urls = [
"http://bazel-mirror.storage.googleapis.com/ftp.exim.org/pub/pcre/pcre-8.39.tar.gz",
"http://ftp.exim.org/pub/pcre/pcre-8.39.tar.gz",
],
strip_prefix = "pcre-8.39",
build_file = str(Label("//third_party:pcre.BUILD")),
)
native.new_http_archive(
name = "swig",
sha256 = "58a475dbbd4a4d7075e5fe86d4e54c9edde39847cdb96a3053d87cb64a23a453",
urls = [
"http://bazel-mirror.storage.googleapis.com/ufpr.dl.sourceforge.net/project/swig/swig/swig-3.0.8/swig-3.0.8.tar.gz",
"http://ufpr.dl.sourceforge.net/project/swig/swig/swig-3.0.8/swig-3.0.8.tar.gz",
"http://pilotfiber.dl.sourceforge.net/project/swig/swig/swig-3.0.8/swig-3.0.8.tar.gz",
],
strip_prefix = "swig-3.0.8",
build_file = str(Label("//third_party:swig.BUILD")),
)
native.new_http_archive(
name = "curl",
sha256 = "ff3e80c1ca6a068428726cd7dd19037a47cc538ce58ef61c59587191039b2ca6",
urls = [
"http://bazel-mirror.storage.googleapis.com/curl.haxx.se/download/curl-7.49.1.tar.gz",
"https://curl.haxx.se/download/curl-7.49.1.tar.gz",
],
strip_prefix = "curl-7.49.1",
build_file = str(Label("//third_party:curl.BUILD")),
)
# grpc expects //external:protobuf_clib and //external:protobuf_compiler
# to point to the protobuf's compiler library.
native.bind(
name = "protobuf_clib",
actual = "@protobuf//:protoc_lib",
)
native.bind(
name = "protobuf_compiler",
actual = "@protobuf//:protoc_lib",
)
native.new_http_archive(
name = "grpc",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/grpc/grpc/archive/d7ff4ff40071d2b486a052183e3e9f9382afb745.tar.gz",
"https://github.com/grpc/grpc/archive/d7ff4ff40071d2b486a052183e3e9f9382afb745.tar.gz",
],
sha256 = "a15f352436ab92c521b1ac11e729e155ace38d0856380cf25048c5d1d9ba8e31",
strip_prefix = "grpc-d7ff4ff40071d2b486a052183e3e9f9382afb745",
build_file = str(Label("//third_party:grpc.BUILD")),
)
# protobuf expects //external:grpc_cpp_plugin to point to grpc's
# C++ plugin code generator.
native.bind(
name = "grpc_cpp_plugin",
actual = "@grpc//:grpc_cpp_plugin",
)
native.bind(
name = "grpc_lib",
actual = "@grpc//:grpc++_unsecure",
)
native.new_http_archive(
name = "linenoise",
sha256 = "7f51f45887a3d31b4ce4fa5965210a5e64637ceac12720cfce7954d6a2e812f7",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/antirez/linenoise/archive/c894b9e59f02203dbe4e2be657572cf88c4230c3.tar.gz",
"https://github.com/antirez/linenoise/archive/c894b9e59f02203dbe4e2be657572cf88c4230c3.tar.gz",
],
strip_prefix = "linenoise-c894b9e59f02203dbe4e2be657572cf88c4230c3",
build_file = str(Label("//third_party:linenoise.BUILD")),
)
# TODO(phawkins): currently, this rule uses an unofficial LLVM mirror.
# Switch to an official source of snapshots if/when possible.
native.new_http_archive(
name = "llvm",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/llvm-mirror/llvm/archive/ad27fdae895df1b9ad11a93102de6622f63e1220.tar.gz",
"https://github.com/llvm-mirror/llvm/archive/ad27fdae895df1b9ad11a93102de6622f63e1220.tar.gz",
],
sha256 = "ce7abf076586f2ef13dcd1c4e7ba13604a0826a0f44fe0a6faceeb9bdffc8544",
strip_prefix = "llvm-ad27fdae895df1b9ad11a93102de6622f63e1220",
build_file = str(Label("//third_party/llvm:llvm.BUILD")),
)
native.new_http_archive(
name = "jsoncpp_git",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/open-source-parsers/jsoncpp/archive/11086dd6a7eba04289944367ca82cea71299ed70.tar.gz",
"https://github.com/open-source-parsers/jsoncpp/archive/11086dd6a7eba04289944367ca82cea71299ed70.tar.gz",
],
sha256 = "07d34db40593d257324ec5fb9debc4dc33f29f8fb44e33a2eeb35503e61d0fe2",
strip_prefix = "jsoncpp-11086dd6a7eba04289944367ca82cea71299ed70",
build_file = str(Label("//third_party:jsoncpp.BUILD")),
)
native.bind(
name = "jsoncpp",
actual = "@jsoncpp_git//:jsoncpp",
)
native.http_archive(
name = "boringssl",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/google/boringssl/archive/bbcaa15b0647816b9a1a9b9e0d209cd6712f0105.tar.gz",
"https://github.com/google/boringssl/archive/bbcaa15b0647816b9a1a9b9e0d209cd6712f0105.tar.gz", # 2016-07-11
],
sha256 = "025264d6e9a7ad371f2f66d17a28b6627de0c9592dc2eb54afd062f68f1f9aa3",
strip_prefix = "boringssl-bbcaa15b0647816b9a1a9b9e0d209cd6712f0105",
)
native.new_http_archive(
name = "nanopb_git",
urls = [
"http://bazel-mirror.storage.googleapis.com/github.com/nanopb/nanopb/archive/1251fa1065afc0d62f635e0f63fec8276e14e13c.tar.gz",
"https://github.com/nanopb/nanopb/archive/1251fa1065afc0d62f635e0f63fec8276e14e13c.tar.gz",
],
sha256 = "ab1455c8edff855f4f55b68480991559e51c11e7dab060bbab7cffb12dd3af33",
strip_prefix = "nanopb-1251fa1065afc0d62f635e0f63fec8276e14e13c",
build_file = str(Label("//third_party:nanopb.BUILD")),
)
native.bind(
name = "nanopb",
actual = "@nanopb_git//:nanopb",
)
native.new_http_archive(
name = "zlib_archive",
urls = [
"http://bazel-mirror.storage.googleapis.com/zlib.net/zlib-1.2.8.tar.gz",
"http://zlib.net/zlib-1.2.8.tar.gz",
],
sha256 = "36658cb768a54c1d4dec43c3116c27ed893e88b02ecfcb44f2166f9c0b7f2a0d",
strip_prefix = "zlib-1.2.8",
build_file = str(Label("//third_party:zlib.BUILD")),
)
native.bind(
name = "zlib",
actual = "@zlib_archive//:zlib",
)
# Make junit-4.12 available as //external:junit
native.http_jar(
name = "junit_jar",
url = "https://github.com/junit-team/junit4/releases/download/r4.12/junit-4.12.jar",
sha256 = "59721f0805e223d84b90677887d9ff567dc534d7c502ca903c0c2b17f05c116a",
)
native.bind(
name = "junit",
actual = "@junit_jar//jar",
)
| 39.613181 | 159 | 0.689982 |
73f8b7dd04b913e7160dc2a6d1e745fbb25919c8 | 4,253 | py | Python | parlai/agents/tfidf_retriever/tokenizers/tokenizer.py | hengyicai/Adaptive_Multi-curricula_Learning_for_Dialog | e2d2c613ac4075ce023d6b4acb0a5eddac4e7837 | [
"MIT"
] | 17 | 2020-03-27T17:25:52.000Z | 2021-11-19T03:46:25.000Z | parlai/agents/tfidf_retriever/tokenizers/tokenizer.py | hengyicai/Adaptive_Multi-curricula_Learning_for_Dialog | e2d2c613ac4075ce023d6b4acb0a5eddac4e7837 | [
"MIT"
] | 3 | 2020-11-01T10:12:52.000Z | 2021-12-10T09:06:44.000Z | parlai/agents/tfidf_retriever/tokenizers/tokenizer.py | hengyicai/Adaptive_Multi-curricula_Learning_for_Dialog | e2d2c613ac4075ce023d6b4acb0a5eddac4e7837 | [
"MIT"
] | 3 | 2020-05-04T04:37:42.000Z | 2021-07-19T11:09:37.000Z | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Base tokenizer/tokens classes and utilities."""
import copy
class Tokens(object):
"""A class to represent a list of tokenized text."""
TEXT = 0
TEXT_WS = 1
SPAN = 2
POS = 3
LEMMA = 4
NER = 5
def __init__(self, data, annotators, opts=None):
self.data = data
self.annotators = annotators
self.opts = opts or {}
def __len__(self):
"""The number of tokens."""
return len(self.data)
def slice(self, i=None, j=None):
"""Return a view of the list of tokens from [i, j)."""
new_tokens = copy.copy(self)
new_tokens.data = self.data[i: j]
return new_tokens
def untokenize(self):
"""Returns the original text (with whitespace reinserted)."""
return ''.join([t[self.TEXT_WS] for t in self.data]).strip()
def words(self, uncased=False):
"""Returns a list of the text of each token
Args:
uncased: lower cases text
"""
if uncased:
return [t[self.TEXT].lower() for t in self.data]
else:
return [t[self.TEXT] for t in self.data]
def offsets(self):
"""Returns a list of [start, end) character offsets of each token."""
return [t[self.SPAN] for t in self.data]
def pos(self):
"""Returns a list of part-of-speech tags of each token.
Returns None if this annotation was not included.
"""
if 'pos' not in self.annotators:
return None
return [t[self.POS] for t in self.data]
def lemmas(self):
"""Returns a list of the lemmatized text of each token.
Returns None if this annotation was not included.
"""
if 'lemma' not in self.annotators:
return None
return [t[self.LEMMA] for t in self.data]
def entities(self):
"""Returns a list of named-entity-recognition tags of each token.
Returns None if this annotation was not included.
"""
if 'ner' not in self.annotators:
return None
return [t[self.NER] for t in self.data]
def ngrams(self, n=1, uncased=False, filter_fn=None, as_strings=True):
"""Returns a list of all ngrams from length 1 to n.
Args:
n: upper limit of ngram length
uncased: lower cases text
filter_fn: user function that takes in an ngram list and returns
True or False to keep or not keep the ngram
as_string: return the ngram as a string vs list
"""
def _skip(gram):
if not filter_fn:
return False
return filter_fn(gram)
words = self.words(uncased)
ngrams = [(s, e + 1)
for s in range(len(words))
for e in range(s, min(s + n, len(words)))
if not _skip(words[s:e + 1])]
# Concatenate into strings
if as_strings:
ngrams = ['{}'.format(' '.join(words[s:e])) for (s, e) in ngrams]
return ngrams
def entity_groups(self):
"""Group consecutive entity tokens with the same NER tag."""
entities = self.entities()
if not entities:
return None
non_ent = self.opts.get('non_ent', 'O')
groups = []
idx = 0
while idx < len(entities):
ner_tag = entities[idx]
# Check for entity tag
if ner_tag != non_ent:
# Chomp the sequence
start = idx
while (idx < len(entities) and entities[idx] == ner_tag):
idx += 1
groups.append((self.slice(start, idx).untokenize(), ner_tag))
else:
idx += 1
return groups
class Tokenizer(object):
"""Base tokenizer class.
Tokenizers implement tokenize, which should return a Tokens class.
"""
def tokenize(self, text):
raise NotImplementedError
def shutdown(self):
pass
def __del__(self):
self.shutdown()
| 29.950704 | 77 | 0.564308 |
73f8be2673a26b406ae907fe783f9e5d7b8ecb90 | 7,258 | py | Python | ubcs_auxiliary/os.py | vstadnytskyi/auxiliary | 3916af3a147f72071388278385d484b9eacbc66b | [
"BSD-3-Clause"
] | null | null | null | ubcs_auxiliary/os.py | vstadnytskyi/auxiliary | 3916af3a147f72071388278385d484b9eacbc66b | [
"BSD-3-Clause"
] | 1 | 2019-10-16T16:40:27.000Z | 2019-10-16T16:40:27.000Z | ubcs_auxiliary/os.py | vstadnytskyi/auxiliary | 3916af3a147f72071388278385d484b9eacbc66b | [
"BSD-3-Clause"
] | 1 | 2020-01-18T05:57:41.000Z | 2020-01-18T05:57:41.000Z | def find(topdir, name=[], exclude=[]):
"""A list of files found starting at 'topdir' that match the patterns given
by 'name', excluding those matching the patterns given by 'exclude'.
Parameters
----------
topdir (string)
name (list)
exclude (list)
Returns
-------
file_list (list)
Examples
--------
>>> res = anfinrud_auxiliary.os.walk('ubcs_auxiliary/')
>>> for i in res: print(i[0])
...:
ubcs_auxiliary/
ubcs_auxiliary/tests
ubcs_auxiliary/tests/__pycache__
ubcs_auxiliary/__pycache__
Python 3.7.4 (v3.7.4:e09359112e, Jul 8 2019, 14:54:52)
Type 'copyright', 'credits' or 'license' for more information
IPython 7.8.0 -- An enhanced Interactive Python. Type '?' for help.
In [1]: from time import time
In [2]: import auxiliary
In [3]: res = auxiliary.os.walk('/')
In [4]: t1 = time(); lst = list(res); t2 = time(); print(t2-t1, len(lst))
3.9815242290496826 1346
Python 2.7.16 (v2.7.16:413a49145e, Mar 2 2019, 14:32:10)
Type "copyright", "credits" or "license" for more information.
IPython 5.8.0 -- An enhanced Interactive Python.
In [1]: from time import time
In [2]: import anfinrud_auxiliary
In [3]: res = auxiliary.os.walk('')
In [4]: t1 = time(); lst = list(res); t2 = time(); print(t2-t1, len(lst))
(0.77646803855896, 1346)
"""
def glob_to_regex(pattern):
return "^"+pattern.replace(".", "\\.").replace("*", ".*").replace("?", ".")+"$"
try:
from scandir import walk
except ImportError:
from os import walk
import re
if type(name) == str:
name = [name]
if type(exclude) == str:
exclude = [exclude]
name = [re.compile(glob_to_regex(pattern)) for pattern in name]
exclude = [re.compile(glob_to_regex(pattern)) for pattern in exclude]
file_list = []
for (directory, subdirs, files) in walk(topdir):
for file in files:
pathname = directory+"/"+file
match = any([pattern.match(pathname) for pattern in name]) and\
not any([pattern.match(pathname) for pattern in exclude])
if match:
file_list += [pathname]
return file_list
def exclude():
"""Returns a list of patterns to exclude from a search. Add
terms as required."""
exclude = ['*/alignment*',
'*/trash*',
'*/_Archived*',
'*/backup*',
'*/Commissioning*',
'*/Test*',
'*/.AppleDouble*',
'*LaserX*',
'*LaserZ*',
'*Copy*',
'*._*',
'.DS_Store']
return exclude
def image_file_names_from_path(beamtime,path_name):
"""Returns image file names found under path_name. Typically used with
'Reference_*() , which specifies which directories contain data for which
zinger-free-statistics are to be acquired. The zinger-free-statistics
include Imean and Ivar, which are used to construct UC_psi.npy."""
from db_functions import find,exclude
from numpy import sort
data_dir,analysis_dir = data_dir_analysis_dir(beamtime)
terms = ['*.mccd','*.rx']
image_files = sort(find(data_dir+path_name, name=terms, exclude=exclude()))
return image_files
def N_files_in_dir(folder = '', match = '*'):
import os
import fnmatch
integer = len(fnmatch.filter(os.listdir(folder), match))
return integer
def listdir(root, include = ['.hdf5'], exclude = [], sort = ''):
"""
returns list of files from a 'source' directory that have terms listed in 'include' and doesn't terms listed in 'exclude'. Extra parameter 'sort' can be used to sort the output list. If left blank, no sorting will be performed.
Parameters
----------
source (string)
include (list)
exclude (list)
sort (string)
Returns
-------
file_list (list)
Examples
--------
>>> res = ubcs_auxiliary.os.get_list_of_files('/',['.hdf5'])
"""
import os
from numpy import argsort, array
files = [os.path.join(root,file) for file in os.listdir(root)]
selected = []
[selected.append(file) for file in files if ((all([term in file for term in include])) and (all([term2 not in file for term2 in exclude])))]
return selected
def find_recent_filename(root, include, exclude, newest_first = True):
"""
find the list of files or folders that have any terms specified in the list 'include' and do not have terms specified in 'exclude'. The extra parameter reverse_order specified whether return the newest or oldest one.
Parameters
----------
source (string)
include (list)
exclude (list)
sort (string)
Returns
-------
file_list (list)
"""
from os import listdir,path
from os.path import getmtime
from numpy import argsort, array
files = [path.join(root,file) for file in listdir(root)]
selected = []
[selected.append(file) for file in files if ((all([term in file for term in include])) and (all([term2 not in file for term2 in exclude])))]
path_names = selected.copy()
if len(path_names) > 0:
creation_times = [getmtime(file) for file in path_names]
sort_order = argsort(creation_times)
if newest_first:
return array(path_names)[sort_order][-1]
else:
return array(path_names)[sort_order][0]
else:
return ''
def get_current_pid_memory_usage(units = 'GB',verbose = False):
"""
returns current process memory footprint.
"""
import os
import psutil
pid = os.getpid()
py = psutil.Process(pid)
coeff = 1
if units == 'GB':
coeff = 2.**30
elif units == 'MB':
coeff = 2.**20
elif units == 'KB':
coeff = 2.**10
elif units == 'B':
coeff = 2.**0.0
elif units == 'b':
coeff = 1.0/8.0
memoryUse = py.memory_info()[0]/coeff # memory use in GB...I think
if verbose:
print('memory use:', memoryUse)
else:
pass
return memoryUse
def does_filename_have_counterpart(src_path,dst_root = None, counterpart_extension = ''):
"""
checks if the 'src_path' has counterpart with extension 'counterpart_extension'.
Parameters
----------
filename (string)
counterpart_extension (string)
Returns
-------
boolean (boolean)
"""
import os
src_root, src_name = os.path.split(src_path)
if dst_root is None:
dst_root, dst_name = os.path.split(src_path)
splitted = src_name.split('.')
src_base = splitted[0]
src_extension = ''.join(splitted[1:])
counterpart = os.path.join(dst_root,src_base + counterpart_extension)
flag = os.path.exists(counterpart)
return flag
def read_config_file(filename):
"""
read yaml config file
Parameters
----------
filename (string)
Returns
-------
dict (dictionary)
boolean (boolean)
"""
import yaml
import os
flag = os.path.isfile(filename)
if flag:
with open(filename,'r') as handle:
config = yaml.safe_load(handle.read()) # (2)
else:
config = {}
return config, flag
| 30.624473 | 231 | 0.603748 |
73f8e6bca8272dde10cde16dd235d5c50e8c43fe | 22,924 | py | Python | src/post/dialog_post.py | gywukun09/GPS | ce474f4afbcb64d46e85f04675e63343d5b65b47 | [
"MIT"
] | 18 | 2017-08-08T16:46:21.000Z | 2021-11-24T06:43:08.000Z | src/post/dialog_post.py | haoxy97/GPS | 3da6d3a7410b7b7e5340373f206a1833759d5acf | [
"MIT"
] | 1 | 2019-12-24T11:53:18.000Z | 2019-12-24T11:53:18.000Z | src/post/dialog_post.py | haoxy97/GPS | 3da6d3a7410b7b7e5340373f206a1833759d5acf | [
"MIT"
] | 14 | 2017-07-08T03:17:37.000Z | 2022-01-10T12:33:27.000Z |
import sys
import os
import cantera as ct
import json
import time
import copy
import matplotlib.pyplot as plt
from PyQt4 import uic
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from src.core.def_tools import keys_sorted
from src.gui.def_dialog import *
from src.gui.dialog_mech import dialog_mech
from dialog_subplot import *
from def_painter import *
from def_plt_tools import *
""" >>>>>>>>>>>>>------------------------------------------------
dP oo dP dP
88 88 88
88d888b. dP dP dP 88 .d888b88 .d8888b. 88d888b.
88' `88 88 88 88 88 88' `88 88ooood8 88' `88
88. .88 88. .88 88 88 88. .88 88. ... 88
88Y8888' `88888P' dP dP `88888P8 `88888P' dP
"""
class dialog_plot_builder(base_dialog):
def set_enable(self, pop_msg=True):
db = self.data['db']
db_ok = (db is not None) and (db['db_name'] in self.parent.project['database'].keys())
if db_ok:
self.w.txt_db.setText(db['db_name'])
else:
self.w.txt_db.setText('')
self.w.gb_x.setEnabled(db_ok)
self.w.gb_subplot.setEnabled(db_ok)
self.w.gb_2nd.setEnabled(db_ok)
self.w.gb_subplot.setEnabled(db_ok)
self.w.gb_mech.setEnabled(db_ok)
self.w.btn_db.setDefault(not db_ok)
# set items -------------------------
if db_ok:
items_varying = []
if self.data['db']['T0'] == 'varying':
items_varying.append('Initial temperature (K)')
if self.data['db']['atm'] == 'varying':
items_varying.append('Pressure (atm)')
if self.data['db']['fuel'] == 'varying':
for fuel_name in self.parent.project['database'][db['db_name']]['fuel']:
fuel = self.parent.project['fuel'][fuel_name]
for sp in fuel['composition'].keys():
item = 'Initial ' + sp + ' concnetration (%) in fuel'
if item not in items_varying:
items_varying.append(item)
# set the x-axis options
if ('ign_evolve' in self.key):
self.w_cbs['items'][0] = ['Time (s)', 'Time (ms)', 'Temperature (K)','Normalized time']
self.w_cbs['items'][1] = copy.copy(items_varying)
elif ('premix_evolve' in self.key):
self.w_cbs['items'][0] = ['Distance (cm)', 'Temperature (K)']
self.w_cbs['items'][1] = copy.copy(items_varying)
elif ('psr_state' in self.key):
self.w_cbs['items'][0] = ['Residence time (s)']
self.w_cbs['items'][1] = copy.copy(items_varying)
elif ('ign_state' in self.key):
self.w_cbs['items'][0] = copy.copy(items_varying)
self.w_cbs['items'][1] = copy.copy(items_varying)
else:
self.w_cbs['items'][0] = ['']
self.w_cbs['items'][1] = ['']
self.set_cb()
# ====================
# act
def act_plot(self):
for reader in self.readers:
if reader() == False:
return None
self.w.btn_plot.setText('plotting...')
self.parent.app.processEvents()
plt.rc('font', **{'family':'Times New Roman'})
subplots = self.data[self.subkey]
n_sub = len(subplots)
W = self.data['fig_w'][0]
H = self.data['sub_h'][0] * n_sub
f, axarr = plt.subplots(n_sub, 1, sharex='all', figsize=(W,H))
i_sub = 0
for subplot_name in subplots:
subplot = self.parent.project[self.subkey][subplot_name]
tp = subplot['type']
index = self.subplot['type'].index(tp)
painter = self.subplot['painter'][index]
if n_sub>1:
ax = axarr[i_sub]
else:
ax = axarr
painter_OK = painter(parent=self.parent, fig_opt=self.data, sub_opt=subplot, ax=ax, tp=tp)
if not painter_OK:
self.w.btn_plot.setText('plot')
self.parent.app.processEvents()
return False
i_sub += 1
if n_sub>1:
ax = axarr[-1]
else:
ax = axarr
xlim = self.data['xlim']
xtick = self.data['xtick']
if bool(xlim):
ax.set_xlim(xlim)
else:
xlim = opt_lim(ax, 'x', self.data['xscale'])
ax.set_xlim(xlim)
if bool(xtick):
ax.set_xticks(xtick)
xlabel = self.data['xtype'].replace('(','[').replace(')',']')
if 'initial t' in xlabel.lower():
xlabel = r'$T_0$'+' [K]'
if 'Pressure [atm]' == xlabel:
xlabel = r'$P$' + ' [atm]'
xlabel = xlabel.replace('Time',r'$t$')
ax.set_xlabel(xlabel)
dir_plot = os.path.join(self.parent.project['dir_public'], 'plot')
if not os.path.exists(dir_plot):
os.makedirs(dir_plot)
path_save = os.path.join(dir_plot,self.data['name']+'.pdf')
#plt.subplots_adjust(bottom=0.15)
#plt.subplots_adjust(left=0.25)
plt.tight_layout()
plt.subplots_adjust(hspace=0)
plt.savefig(path_save)#, format='eps', dpi=1000, bbox_inches='tight')
self.w.btn_plot.setText('plot')
self.parent.app.processEvents()
dir_public = self.parent.project['dir_public']
msg = 'figure saved to \n\n' + path_save.replace(dir_public,'[working dir/]')
QMessageBox.information(QWidget(),'',msg)
self.act_save()
def act_up(self):
self.read_list(pop_msg=False)
obj = self.w.list_subplot
subplot_name = self.read_item(obj)
subs = self.data[self.subkey]
if subplot_name == subs[0]:
return None
if subplot_name not in subs:
return None
index = self.data[self.subkey].index(subplot_name)
subs[index-1], subs[index] = subs[index], subs[index - 1]
self.set_list()
def act_down(self):
self.read_list(pop_msg=False)
obj = self.w.list_subplot
subplot_name = self.read_item(obj)
subs = self.data[self.subkey]
if subplot_name == subs[-1]:
return None
if subplot_name not in subs:
return None
index = self.data[self.subkey].index(subplot_name)
subs[index+1], subs[index] = subs[index], subs[index + 1]
self.set_list()
def act_db(self):
db = dialog_plot_db(parent=self.parent, extra=self.data).data
if db !=None:
self.data['db'] = db
self.set_enable()
def act_add_subplot(self):
name = str(self.w.cb_subplot.currentText())
index = self.subplot['name'].index(name)
dialog = self.subplot['dialog'][index]
self.data['current_subplot'] = name
self.sub['dialog'][1] = dialog
self.act_add(self.subkey)
def act_edit_subplot(self):
subplot_name = self.read_item(self.w.list_subplot)
subplot = self.parent.project[self.subkey][subplot_name]
index = self.subplot['type'].index(subplot['type'])
dialog = self.subplot['dialog'][index]
self.sub['dialog'][1] = dialog
self.act_edit(self.subkey)
def act_copy_subplot(self):
obj = self.w.list_subplot
data_name = self.read_item(obj)
if data_name == None:
return None
key = self.subkey
occupied = self.init_occupied(key=key)
print 'plot_builder: occupied = '+str(occupied)
copy_name, ok = QInputDialog.getText(QWidget(), '',
'Name the copy of ' + data_name + ' as:',text=data_name)
copy_name = str(copy_name)
if ok:
if self.read_name(name0=copy_name, save=False, occupied0=occupied):
data_copied = copy.copy(self.parent.project[key][data_name])
data_copied['name'] = copy_name
self.parent.project[key][copy_name] = data_copied
self.set_list()
def act_del_subplot(self): self.act_del(self.subkey)
def act_add_mech(self): self.act_add('mech')
def act_edit_mech(self): self.act_edit('mech')
def act_del_mech(self): self.act_del('mech')
# ====================
# init
def init_data_default(self):
self.data = dict()
self.data['name'] = self.new_name(self.key, self.occupied)
self.data['db'] = None
self.data['xtype'] = ''
#self.data['scale'] = 'linear'
self.data['2nd_var'] = ''
self.data['xlim'] = []
self.data['xtick'] = []
self.data['xscale'] = 'linear'
self.data['mech'] = ['detailed']
self.data['fig_w'] = [5.6]
self.data['sub_h'] = [2.2]
self.data['subkey'] = self.subkey
self.data[self.subkey] = []
def init(self):
self.sort_list = False
self.subplot = dict()
if self.key == 'plot_ign_evolve':
self.subplot['name'] = ['T-t relation', 'heat release', 'species concentration', 'global pathway','node analysis','radical rop']
self.subplot['dialog'] = [dialog_plot_single, dialog_plot_single, dialog_plot_sp, dialog_plot_GP, dialog_plot_node, dialog_plot_Rrop]
self.subplot['type'] = ['T', 'Qdot', 'sp', 'GP','node','Rrop']
elif self.key == 'plot_ign_state':
self.subplot['name'] = ['ignition delay', 'species concentration']
self.subplot['dialog'] = [dialog_plot_single, dialog_plot_sp]
self.subplot['type'] = ['tau-ign', 'sp']
self.subplot['painter'] = [painter] * len(self.subplot['name'])
elif self.key == 'plot_psr_state':
self.subplot['name'] = ['PSR temperature', 'species concentration', 'global pathway','node analysis','radical rop']
self.subplot['dialog'] = [dialog_plot_single, dialog_plot_sp, dialog_plot_GP, dialog_plot_node, dialog_plot_Rrop]
self.subplot['type'] = ['psr-T', 'sp', 'GP','node','Rrop']
elif self.key == 'plot_premix_evolve':
self.subplot['name'] = ['T-x relation', 'species concentration', 'global pathway','reactions of GP','node analysis']
self.subplot['dialog'] = [dialog_plot_single, dialog_plot_sp, dialog_plot_GP, dialog_plot_GPrxn, dialog_plot_node]
self.subplot['type'] = ['T', 'sp', 'GP','GPrxn','node']
self.subplot['painter'] = [painter] * len(self.subplot['name'])
for index in range(len(self.subplot['type'])):
self.subplot['type'][index] = 'sub' + self.key + '_' + self.subplot['type'][index]
self.subkey = 'sub'+self.key
if self.subkey not in self.parent.project.keys():
self.parent.project[self.subkey] = dict()
# ====================
self.ui_name = 'plot_builder.ui'
self.init_ui()
self.occupied = self.init_occupied()
self.init_data()
# set connection ====================
self.w.btn_add_mech.clicked.connect(self.act_add_mech)
self.w.btn_del_mech.clicked.connect(self.act_del_mech)
self.w.btn_edit_mech.clicked.connect(self.act_edit_mech)
self.w.list_mech.doubleClicked.connect(self.act_edit_mech)
self.w.btn_add_subplot.clicked.connect(self.act_add_subplot)
self.w.btn_del_subplot.clicked.connect(self.act_del_subplot)
self.w.btn_copy_subplot.clicked.connect(self.act_copy_subplot)
self.w.btn_edit_subplot.clicked.connect(self.act_edit_subplot)
self.w.list_subplot.doubleClicked.connect(self.act_edit_subplot)
self.w.btn_up.clicked.connect(self.act_up)
self.w.btn_down.clicked.connect(self.act_down)
self.w.btn_save.clicked.connect(self.act_save)
self.w.btn_cancel.clicked.connect(self.act_cancel)
self.w.btn_plot.clicked.connect(self.act_plot)
self.w.btn_db.clicked.connect(self.act_db)
# set obj IO ==============================
self.w_txts = dict()
self.w_txts['obj'] = [self.w.txt_xlim, self.w.txt_xtick, self.w.txt_fig_w, self.w.txt_sub_h]
self.w_txts['key'] = ['xlim', 'xtick', 'fig_w', 'sub_h']
self.w_txts['name'] = ['limits', 'ticks', 'figure width', 'figure height (per sub)']
self.w_txts['vali'] = [self.is_float]*2 + [self.is_float] * 2
self.w_txts['empty'] = [True, True, False, False]
self.w_txts['len'] = [2, None, 1, 1]
self.w_cbs = dict()
self.w_cbs['obj'] = [self.w.cb_xtype, self.w.cb_2nd_var, self.w.cb_xscale]
self.w_cbs['key'] = ['xtype','2nd_var', 'xscale', 'cmap']
self.w_cbs['name'] = ['horizontal axis type','secondary variable','horizontal axis scale']
self.w_cbs['items'] = [[],[],['linear','log']]
self.w_cbs['empty'] = [False, True, False]
self.w_lists = dict()
self.w_lists['obj'] = [self.w.list_mech, self.w.list_subplot]
self.w_lists['key'] = ['mech',self.subkey]
self.w_lists['name'] = ['mech','subplots']
self.sub = self.w_lists
self.sub['dialog'] = [dialog_mech, None]
self.sub['viewer'] = [self.set_list] * 2
self.sub['reader'] = [self.read_list] * 2
self.sub['single'] = [False] * 2
# set ui obj ==============================
for item in self.subplot['name']:
self.w.cb_subplot.addItem(item)
self.set_name()
self.set_txt()
self.set_cb()
self.set_list()
self.set_enable()
# exec ==============================
self.readers = [self.read_name, self.read_txt, \
self.read_cb, self.read_list]
if self.w.exec_() == QDialog.Rejected:
self.data = None
class dialog_plot_GPedge(base_dialog):
"""
.88888. 888888ba dP
d8' `88 88 `8b 88
88 a88aaaa8P' .d8888b. .d888b88 .d8888b. .d8888b.
88 YP88 88 88ooood8 88' `88 88' `88 88ooood8
Y8. .88 88 88. ... 88. .88 88. .88 88. ...
`88888' dP `88888P' `88888P8 `8888P88 `88888P'
oooooooooooo .88
d8888P
"""
def set_enable(self, pop_msg=True):
db = self.data['db']
db_ok = (db is not None) and (db['db_name'] in self.parent.project['database'].keys())
if db_ok:
self.w.txt_db.setText(self.data['db']['db_name'])
self.w_cbs['items'][0] = [str(phi) for phi in self.parent.project['database'][db['db_name']]['phi'] ]
self.set_cb(which=[0])
else:
self.w.txt_db.setText('')
self.w.gb_core.setEnabled(db_ok)
self.w.btn_db.setDefault(not db_ok)
def set_GPs(self):
print 'set_GPs triggered'
traced = str(self.w.cb_traced.currentText())
if bool(traced):
items = []
for GP_name in self.parent.project['GP_'+traced].keys():
items.append(self.parent.project['GP_'+traced][GP_name]['alias'])
self.w_cbs['items'][2] = sorted(items)
else:
self.w_cbs['items'][2] = []
self.set_cb(which=[2])
# ====================
# act
def act_plot(self):
for reader in self.readers:
if reader() == False:
return None
self.w.btn_plot.setText('plotting...')
self.parent.app.processEvents()
dir_desk = self.parent.project['mech']['detailed']['desk']
dir_raw = cond2dir(dir_desk, self.data['db']['fuel'], self.data['db']['oxid'], \
float(self.data['phi']), float(self.data['db']['atm']), float(self.data['db']['T0']),\
self.data['db']['reactor'], self.parent.n_digit)
path_raw = os.path.join(dir_raw,'raw.npz')
raw = load_raw(path_raw)
GPs = self.parent.project['GP_'+self.data['traced']]
GP_dir = None
GP_alias = self.data['GP_alias']
for GP_name in GPs.keys():
if GPs[GP_name]['alias'] == GP_alias:
GP_dir = GPs[GP_name]
break
if GP_dir is None:
msg = 'cannot find GP whose alias is '+str(GP_alias)
QMessageBox.information(QWidget(),'',msg)
self.w.btn_plot.setText('plot')
self.parent.app.processEvents()
return False
GP_dir = self.parent.project['GP_'+self.data['traced']][GP_name]
dir_plot = os.path.join(self.parent.project['dir_public'], 'plot')
if not os.path.exists(dir_plot):
os.makedirs(dir_plot)
path_save = os.path.join(dir_plot,self.data['name']+'.pdf')
soln = self.parent.soln['detailed']
rename = self.parent.project['rename']
if self.data['method'] == 'concentration':
ok = plot_GPedge_mf(soln, GP_dir, self.data, raw, path_save, rename)
else:
GPSA_data = load_GPSA(dir_raw, GP_dir, self.data['method'])
if GPSA_data is None:
msg = 'could not find GPSA data for: \n\n'+GP_name+'\n\nin:\n\n'+str(dir_raw)+\
'\n\ntry to run GPSA first'
QMessageBox.information(QWidget(),'',msg)
self.w.btn_plot.setText('plot')
self.parent.app.processEvents()
return False
ok = plot_GPedge(soln, GPSA_data, self.data, raw, path_save, rename)
self.w.btn_plot.setText('plot')
self.parent.app.processEvents()
if ok:
dir_public = self.parent.project['dir_public']
msg = 'figure saved to \n\n' + path_save.replace(dir_public,'[working dir/]')
QMessageBox.information(QWidget(),'',msg)
self.act_save()
def act_db(self):
db = dialog_plot_db(parent=self.parent, extra=self.data).data
if db !=None:
self.data['db'] = db
self.set_enable()
# ====================
# init
def init_data_default(self):
self.data = dict()
self.data['name'] = self.new_name(self.key, self.occupied)
self.data['db'] = None
self.data['fig_w'] = [7.0]
self.data['fig_h'] = [10.0]
self.data['n_rxn'] = [2]
self.data['sample_loc'] = [400]
self.data['sample_by'] = 'T rised (K)'
self.data['xscale'] = 'log'
self.data['xlim'] = []
self.data['phi'] = ''
self.data['traced'] = ''
self.data['GP_alias'] = ''
self.data['GP_name'] = ''
self.data['subkey'] = self.subkey
self.data['method'] = 'R_ij'
def init(self):
self.key = 'plot_GPedge'
self.subkey = 'sub'+self.key
# ====================
self.ui_name = 'plot_Dij.ui'
self.init_ui()
self.occupied = self.init_occupied()
self.init_data()
# set connection ====================
self.w.btn_save.clicked.connect(self.act_save)
self.w.btn_cancel.clicked.connect(self.act_cancel)
self.w.btn_plot.clicked.connect(self.act_plot)
self.w.btn_db.clicked.connect(self.act_db)
self.w.cb_traced.currentIndexChanged.connect(self.set_GPs)
# set obj IO ==============================
self.w_txts = dict()
self.w_txts['obj'] = [self.w.txt_fig_w, self.w.txt_fig_h, self.w.txt_n_rxn, self.w.txt_sample, self.w.txt_xlim]
self.w_txts['key'] = ['fig_w', 'fig_h', 'n_rxn', 'sample_loc', 'xlim']
self.w_txts['name'] = ['figure width', 'figure height', 'max reaction #', 'sampling location', 'axis limits']
self.w_txts['vali'] = [self.is_float] * 2 + [self.is_pos_float] * 2 + [self.is_float]
self.w_txts['empty'] = [False, False, False, False, True]
self.w_txts['len'] = [1, 1, 1, 1, 2]
soln = self.parent.soln['detailed']
print
print 'soln.element_names = '+str(soln.element_names)
print
self.w_cbs = dict()
self.w_cbs['obj'] = [self.w.cb_phi, self.w.cb_traced, self.w.cb_GP, self.w.cb_xscale, self.w.cb_method, self.w.cb_sample]
self.w_cbs['key'] = ['phi', 'traced','GP_alias','xscale','method','sample_by']
self.w_cbs['name'] = ['equiv. ratio', 'traced','global pathway','axis scale','method','sampling by']
self.w_cbs['items'] = [
[],
soln.element_names, [],
['log','linear'],
['R_ij','a_iji','concentration'],
['T (K)', 'T rised (K)','t passed (s)','norm t passed']
]
self.w_cbs['empty'] = [False] * len(self.w_cbs['obj'])
# set ui obj ==============================
self.set_name()
self.set_txt()
self.set_cb()
self.set_enable()
# exec ==============================
self.readers = [self.read_name, self.read_txt, self.read_cb]
if self.w.exec_() == QDialog.Rejected:
self.data = None
class dialog_post(common):
""" >>>>>>>>>>>>>------------------------------------------------
1. dialog_post
called by: window_main
"""
# set ==============================
def set_list(self, key):
obj = self.w.list_plot
model = QStandardItemModel()
if key not in self.parent.project.keys():
self.parent.project[key] = dict()
for item_name in sorted(self.parent.project[key].keys()):
item = self.parent.project[key][item_name]
Qitem = QStandardItem(item_name)
Qitem.setFlags(Qt.ItemIsEnabled | Qt.ItemIsSelectable)
model.appendRow(Qitem)
obj.setModel(model)
self.parent.app.processEvents()
# act ==============================
def act_add(self, key):
if key == 'plot_GPedge':
dialog = dialog_plot_GPedge
else:
dialog = dialog_plot_builder
data = dialog(parent=self.parent, data_name=None, key=key).data
if data is not None:
self.parent.project[key][data['name']] = data
self.set_list(key=key)
def act_del(self, key):
obj = self.w.list_plot
data_name = self.read_item(obj)
if data_name == None:
return None
msg = 'are you sure to delete "'+data_name+'""?\n\n'+\
'(you can uncheck it if you only donnot want to use it right now)'
Qanswer = QMessageBox.question(QWidget(),'',msg, \
QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
if Qanswer == QMessageBox.Yes:
del self.parent.project[key][data_name]
self.set_list(key=key)
def act_edit(self, key):
obj = self.w.list_plot
data_name = self.read_item(obj)
if data_name == None:
return None
if key == 'plot_GPedge':
dialog = dialog_plot_GPedge
else:
dialog = dialog_plot_builder
data = dialog(parent=self.parent, data_name=data_name, key=key).data
if data is not None:
del self.parent.project[key][data_name]
self.parent.project[key][data['name']] = data
self.set_list(key=key)
def act_copy(self, key):
obj = self.w.list_plot
data_name = self.read_item(obj)
if data_name == None:
return None
occupied = self.init_occupied(key=key)
copy_name, ok = QInputDialog.getText(QWidget(), '',
'Name the copy of ' + data_name + ' as:',text=data_name)
copy_name = str(copy_name)
if ok:
if self.read_name(name0=copy_name, save=False, occupied0=occupied):
data_copied = copy.copy(self.parent.project[key][data_name])
data_copied['name'] = copy_name
self.parent.project[key][copy_name] = data_copied
self.set_list(key=key)
def act_add_plot(self):
plot = str(self.w.cb_plot.currentText())
index = self.cb_items.index(plot)
self.act_add(self.plot_type[index])
def act_edit_plot(self):
plot = str(self.w.cb_plot.currentText())
index = self.cb_items.index(plot)
self.act_edit(self.plot_type[index])
def act_del_plot(self):
plot = str(self.w.cb_plot.currentText())
index = self.cb_items.index(plot)
self.act_del(self.plot_type[index])
def act_copy_plot(self):
plot = str(self.w.cb_plot.currentText())
index = self.cb_items.index(plot)
self.act_copy(self.plot_type[index])
def act_cb(self):
plot = str(self.w.cb_plot.currentText())
index = self.cb_items.index(plot)
self.set_list(self.plot_type[index])
def act_ok(self):
self.w.accept()
# init ============================
def __init__(self, parent):
self.ui_name = 'post.ui'
self.parent = parent
self.w = uic.loadUi(os.path.join(self.parent.dir_ui, self.ui_name))
self.w.setFixedSize(self.w.width(), self.w.height())
# set connection ====================
self.w.btn_ok.clicked.connect(self.act_ok)
self.w.btn_add.clicked.connect(self.act_add_plot)
self.w.btn_del.clicked.connect(self.act_del_plot)
self.w.btn_edit.clicked.connect(self.act_edit_plot)
self.w.list_plot.doubleClicked.connect(self.act_edit_plot)
self.w.btn_copy.clicked.connect(self.act_copy_plot)
self.w.cb_plot.currentIndexChanged.connect(self.act_cb)
# set variables ==============================
self.cb_items = ['autoignition process','autoignition delay','1D flame structure','PSR S-cruve','GP edge analysis']
self.plot_type = ['plot_ign_evolve','plot_ign_state','plot_premix_evolve','plot_psr_state','plot_GPedge']
# set ui obj ==============================
for item in self.cb_items:
self.w.cb_plot.addItem(item)
# exec ==============================
self.w.exec_()
| 24.155954 | 137 | 0.629384 |
73f90478cead28dd565df6aed247e83cf6faf582 | 12,687 | py | Python | nnunet/run/run_training.py | ZXLam/nnUNet | 0cf7c8a857c248d6be171e4945427b405f6ac258 | [
"Apache-2.0"
] | null | null | null | nnunet/run/run_training.py | ZXLam/nnUNet | 0cf7c8a857c248d6be171e4945427b405f6ac258 | [
"Apache-2.0"
] | null | null | null | nnunet/run/run_training.py | ZXLam/nnUNet | 0cf7c8a857c248d6be171e4945427b405f6ac258 | [
"Apache-2.0"
] | 1 | 2022-03-15T03:15:02.000Z | 2022-03-15T03:15:02.000Z | # Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import wandb
from batchgenerators.utilities.file_and_folder_operations import *
from nnunet.run.default_configuration import get_default_configuration
from nnunet.paths import default_plans_identifier
from nnunet.run.load_pretrained_weights import load_pretrained_weights
from nnunet.training.cascade_stuff.predict_next_stage import predict_next_stage
from nnunet.training.network_training.nnUNetTrainer import nnUNetTrainer
from nnunet.training.network_training.nnUNetTrainerCascadeFullRes import nnUNetTrainerCascadeFullRes
from nnunet.training.network_training.nnUNetTrainerV2_CascadeFullRes import nnUNetTrainerV2CascadeFullRes
from nnunet.utilities.task_name_id_conversion import convert_id_to_task_name
def main():
parser = argparse.ArgumentParser()
parser.add_argument("network")
parser.add_argument("network_trainer")
parser.add_argument("task", help="can be task name or task id")
parser.add_argument("fold", help='0, 1, ..., 5 or \'all\'')
parser.add_argument("-val", "--validation_only", help="use this if you want to only run the validation",
action="store_true")
parser.add_argument("-c", "--continue_training", help="use this if you want to continue a training",
action="store_true")
parser.add_argument("-p", help="plans identifier. Only change this if you created a custom experiment planner",
default=default_plans_identifier, required=False)
parser.add_argument("--use_compressed_data", default=False, action="store_true",
help="If you set use_compressed_data, the training cases will not be decompressed. Reading compressed data "
"is much more CPU and RAM intensive and should only be used if you know what you are "
"doing", required=False)
parser.add_argument("--deterministic",
help="Makes training deterministic, but reduces training speed substantially. I (Fabian) think "
"this is not necessary. Deterministic training will make you overfit to some random seed. "
"Don't use that.",
required=False, default=False, action="store_true")
parser.add_argument("--npz", required=False, default=False, action="store_true", help="if set then nnUNet will "
"export npz files of "
"predicted segmentations "
"in the validation as well. "
"This is needed to run the "
"ensembling step so unless "
"you are developing nnUNet "
"you should enable this")
parser.add_argument("--find_lr", required=False, default=False, action="store_true",
help="not used here, just for fun")
parser.add_argument("--valbest", required=False, default=False, action="store_true",
help="hands off. This is not intended to be used")
parser.add_argument("--fp32", required=False, default=False, action="store_true",
help="disable mixed precision training and run old school fp32")
parser.add_argument("--val_folder", required=False, default="validation_raw",
help="name of the validation folder. No need to use this for most people")
parser.add_argument("--disable_saving", required=False, action='store_true',
help="If set nnU-Net will not save any parameter files (except a temporary checkpoint that "
"will be removed at the end of the training). Useful for development when you are "
"only interested in the results and want to save some disk space")
parser.add_argument("--disable_postprocessing_on_folds", required=False, action='store_true',
help="Running postprocessing on each fold only makes sense when developing with nnU-Net and "
"closely observing the model performance on specific configurations. You do not need it "
"when applying nnU-Net because the postprocessing for this will be determined only once "
"all five folds have been trained and nnUNet_find_best_configuration is called. Usually "
"running postprocessing on each fold is computationally cheap, but some users have "
"reported issues with very large images. If your images are large (>600x600x600 voxels) "
"you should consider setting this flag.")
# parser.add_argument("--interp_order", required=False, default=3, type=int,
# help="order of interpolation for segmentations. Testing purpose only. Hands off")
# parser.add_argument("--interp_order_z", required=False, default=0, type=int,
# help="order of interpolation along z if z is resampled separately. Testing purpose only. "
# "Hands off")
# parser.add_argument("--force_separate_z", required=False, default="None", type=str,
# help="force_separate_z resampling. Can be None, True or False. Testing purpose only. Hands off")
parser.add_argument('--val_disable_overwrite', action='store_false', default=True,
help='Validation does not overwrite existing segmentations')
parser.add_argument('--disable_next_stage_pred', action='store_true', default=False,
help='do not predict next stage')
parser.add_argument('-pretrained_weights', type=str, required=False, default=None,
help='path to nnU-Net checkpoint file to be used as pretrained model (use .model '
'file, for example model_final_checkpoint.model). Will only be used when actually training. '
'Optional. Beta. Use with caution.')
# Add an argument for pre-trained weights
parser.add_argument("-w", required=False, default=None, help="Load pre-trained Models Genesis")
args = parser.parse_args()
task = args.task
fold = args.fold
network = args.network
network_trainer = args.network_trainer
validation_only = args.validation_only
plans_identifier = args.p
find_lr = args.find_lr
disable_postprocessing_on_folds = args.disable_postprocessing_on_folds
use_compressed_data = args.use_compressed_data
decompress_data = not use_compressed_data
deterministic = args.deterministic
valbest = args.valbest
fp32 = args.fp32
run_mixed_precision = not fp32
val_folder = args.val_folder
# interp_order = args.interp_order
# interp_order_z = args.interp_order_z
# force_separate_z = args.force_separate_z
# Parse it to variable "weights"
weights = args.w
if not task.startswith("Task"):
task_id = int(task)
task = convert_id_to_task_name(task_id)
if fold == 'all':
pass
else:
fold = int(fold)
# if force_separate_z == "None":
# force_separate_z = None
# elif force_separate_z == "False":
# force_separate_z = False
# elif force_separate_z == "True":
# force_separate_z = True
# else:
# raise ValueError("force_separate_z must be None, True or False. Given: %s" % force_separate_z)
plans_file, output_folder_name, dataset_directory, batch_dice, stage, \
trainer_class = get_default_configuration(network, task, network_trainer, plans_identifier)
if trainer_class is None:
raise RuntimeError("Could not find trainer class in nnunet.training.network_training")
if network == "3d_cascade_fullres":
assert issubclass(trainer_class, (nnUNetTrainerCascadeFullRes, nnUNetTrainerV2CascadeFullRes)), \
"If running 3d_cascade_fullres then your " \
"trainer class must be derived from " \
"nnUNetTrainerCascadeFullRes"
else:
assert issubclass(trainer_class,
nnUNetTrainer), "network_trainer was found but is not derived from nnUNetTrainer"
if weights != None:
output_folder_name += "_ModelsGenesis"
trainer = trainer_class(plans_file, fold, output_folder=output_folder_name, dataset_directory=dataset_directory,
batch_dice=batch_dice, stage=stage, unpack_data=decompress_data,
deterministic=deterministic,
fp16=run_mixed_precision)
if args.disable_saving:
trainer.save_final_checkpoint = False # whether or not to save the final checkpoint
trainer.save_best_checkpoint = False # whether or not to save the best checkpoint according to
# self.best_val_eval_criterion_MA
trainer.save_intermediate_checkpoints = True # whether or not to save checkpoint_latest. We need that in case
# the training chashes
trainer.save_latest_only = True # if false it will not store/overwrite _latest but separate files each
trainer.initialize(not validation_only)
if weights != None:
trainer.load_pretrained_weights(weights)
if find_lr:
trainer.find_lr()
else:
if not validation_only:
wandb.init(project="YokeynnUNetWork", name=str(task)+"-"+network_trainer+"-"+network, config=vars(args))
if args.continue_training:
# -c was set, continue a previous training and ignore pretrained weights
trainer.load_latest_checkpoint()
elif (not args.continue_training) and (args.pretrained_weights is not None):
# we start a new training. If pretrained_weights are set, use them
load_pretrained_weights(trainer.network, args.pretrained_weights)
else:
# new training without pretraine weights, do nothing
pass
trainer.run_training()
else:
if valbest:
trainer.load_best_checkpoint(train=False)
else:
trainer.load_final_checkpoint(train=False)
trainer.network.eval()
# predict validation
trainer.validate(save_softmax=args.npz, validation_folder_name=val_folder,
run_postprocessing_on_folds=not disable_postprocessing_on_folds,
overwrite=args.val_disable_overwrite)
if not validation_only:
with open(os.path.join(trainer.output_folder, val_folder, "summary.json"), "r") as f:
summary = json.load(f)
wandb.run.summary.update(summary["results"]["mean"]["1"])
wandb.save(os.path.join(trainer.output_folder, val_folder, "summary.json"), policy="end")
wandb.save(os.path.join(trainer.output_folder, "model_*"), policy="end")
wandb.save(os.path.join(trainer.output_folder, "debug.json"), policy="end")
wandb.save(os.path.join(trainer.output_folder, "progress.png"), policy="end")
wandb.finish()
if network == '3d_lowres' and not args.disable_next_stage_pred:
print("predicting segmentations for the next stage of the cascade")
predict_next_stage(trainer, join(dataset_directory, trainer.plans['data_identifier'] + "_stage%d" % 1))
if __name__ == "__main__":
main()
| 56.638393 | 132 | 0.632931 |
73f91f8d34d123ff618cd4282535a932616364de | 1,196 | py | Python | MGN/model_utils/local_adapter.py | kingcong/MindSpore_Code | afe6ef9cada6c98f601754197db84ee1c21dbc9d | [
"Apache-2.0"
] | 77 | 2021-10-15T08:32:37.000Z | 2022-03-30T13:09:11.000Z | MGN/model_utils/local_adapter.py | kingcong/MindSpore_Code | afe6ef9cada6c98f601754197db84ee1c21dbc9d | [
"Apache-2.0"
] | 3 | 2021-10-30T14:44:57.000Z | 2022-02-14T06:57:57.000Z | MGN/model_utils/local_adapter.py | kingcong/MindSpore_Code | afe6ef9cada6c98f601754197db84ee1c21dbc9d | [
"Apache-2.0"
] | 24 | 2021-10-15T08:32:45.000Z | 2022-03-24T18:45:20.000Z | # Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Local adapter"""
import os
def get_device_id():
""" Get device id via environment """
device_id = os.getenv('DEVICE_ID', '0')
return int(device_id)
def get_device_num():
""" Get device number via environment """
device_num = os.getenv('RANK_SIZE', '1')
return int(device_num)
def get_rank_id():
""" Get rank id via environment """
global_rank_id = os.getenv('RANK_ID', '0')
return int(global_rank_id)
def get_job_id():
""" Get job id via environment """
return "Local Job"
| 29.170732 | 78 | 0.660535 |
73f92335136b06eff8765d10b9e423135fef62df | 3,562 | py | Python | src/meta.py | niall-twomey/har_datasets | 68f142ba613ce26f67cdd6b871117f4c24ea603f | [
"MIT"
] | 24 | 2018-12-12T08:54:52.000Z | 2021-12-07T08:45:13.000Z | src/meta.py | niall-twomey/har_datasets | 68f142ba613ce26f67cdd6b871117f4c24ea603f | [
"MIT"
] | 3 | 2019-07-18T20:14:41.000Z | 2022-03-12T01:03:28.000Z | src/meta.py | niall-twomey/har_datasets | 68f142ba613ce26f67cdd6b871117f4c24ea603f | [
"MIT"
] | 9 | 2018-12-12T16:18:39.000Z | 2022-03-30T16:25:47.000Z | from pathlib import Path
from loguru import logger
from src.utils.loaders import get_env
from src.utils.loaders import load_yaml
from src.utils.loaders import metadata_path
__all__ = [
"DatasetMeta",
"BaseMeta",
"HARMeta",
"ModalityMeta",
"DatasetMeta",
]
class BaseMeta(object):
def __init__(self, path, *args, **kwargs):
self.path = Path(path)
self.name = self.path.stem
self.meta = dict()
if path:
try:
meta = load_yaml(path)
if meta is None:
logger.info(f'The content metadata module "{self.name}" from {path} is empty. Assigning empty dict')
meta = dict()
else:
if not isinstance(meta, dict):
logger.warning(f"Metadata not of type dict loaded: {meta}")
self.meta = meta
except FileNotFoundError:
# logger.warning(f'The metadata file for "{self.name}" was not found.')
pass
def __getitem__(self, item):
if item not in self.meta:
logger.exception(KeyError(f"{item} not found in {self.__class__.__name__}"))
return self.meta[item]
def __contains__(self, item):
return item in self.meta
def __repr__(self):
return f"<{self.name} {self.meta.__repr__()}>"
def keys(self):
return self.meta.keys()
def values(self):
return self.meta.values()
def items(self):
return self.meta.items()
def insert(self, key, value):
assert key not in self.meta
self.meta[key] = value
"""
Non-functional metadata
"""
class HARMeta(BaseMeta):
def __init__(self, path, *args, **kwargs):
super(HARMeta, self).__init__(path=metadata_path("tasks", "har.yaml"), *args, **kwargs)
class LocalisationMeta(BaseMeta):
def __init__(self, path, *args, **kwargs):
super(LocalisationMeta, self).__init__(path=metadata_path("tasks", "localisation.yaml"), *args, **kwargs)
class ModalityMeta(BaseMeta):
def __init__(self, path, *args, **kwargs):
super(ModalityMeta, self).__init__(name=metadata_path("modality.yaml"), *args, **kwargs)
class DatasetMeta(BaseMeta):
def __init__(self, path, *args, **kwargs):
if isinstance(path, str):
path = Path("metadata", "datasets", f"{path}.yaml")
assert path.exists()
super(DatasetMeta, self).__init__(path=path, *args, **kwargs)
if "fs" not in self.meta:
logger.exception(KeyError(f'The file {path} does not contain the key "fs"'))
self.inv_lookup = dict()
for task_name in self.meta["tasks"].keys():
task_label_file = metadata_path("tasks", f"{task_name}.yaml")
task_labels = load_yaml(task_label_file)
dataset_labels = self.meta["tasks"][task_name]["target_transform"]
if not set(dataset_labels.keys()).issubset(set(task_labels)):
logger.exception(
ValueError(
f"The following labels from dataset {path} are not accounted for in {task_label_file}: "
f"{set(dataset_labels.keys()).difference(task_labels.keys())}"
)
)
self.inv_lookup[task_name] = {dataset_labels[kk]: kk for kk, vv in dataset_labels.items()}
@property
def fs(self):
return float(self.meta["fs"])
@property
def zip_path(self):
return get_env("ZIP_ROOT") / self.name
| 29.438017 | 120 | 0.591802 |
73f935b7f6ddf1cb5928b09156ada07e5e96fe3a | 1,553 | py | Python | src/visions/core/implementations/types/__init__.py | ieaves/tenzing | 92d39c1c3a5633d8074e0ffe8c2687c465aebbc8 | [
"MIT"
] | null | null | null | src/visions/core/implementations/types/__init__.py | ieaves/tenzing | 92d39c1c3a5633d8074e0ffe8c2687c465aebbc8 | [
"MIT"
] | null | null | null | src/visions/core/implementations/types/__init__.py | ieaves/tenzing | 92d39c1c3a5633d8074e0ffe8c2687c465aebbc8 | [
"MIT"
] | null | null | null | from visions.core.model.visions_generic import visions_generic
from visions.core.implementations.types.visions_string import visions_string
from visions.core.implementations.types.visions_bool import visions_bool
from visions.core.implementations.types.visions_categorical import visions_categorical
from visions.core.implementations.types.visions_complex import visions_complex
from visions.core.implementations.types.visions_count import visions_count
from visions.core.implementations.types.visions_date import visions_date
from visions.core.implementations.types.visions_datetime import visions_datetime
from visions.core.implementations.types.visions_existing_path import (
visions_existing_path,
)
from visions.core.implementations.types.visions_float import visions_float
from visions.core.implementations.types.visions_geometry import visions_geometry
from visions.core.implementations.types.visions_image_path import visions_image_path
from visions.core.implementations.types.visions_integer import visions_integer
from visions.core.implementations.types.visions_ip import visions_ip
from visions.core.implementations.types.visions_object import visions_object
from visions.core.implementations.types.visions_ordinal import visions_ordinal
from visions.core.implementations.types.visions_path import visions_path
from visions.core.implementations.types.visions_time import visions_time
from visions.core.implementations.types.visions_timedelta import visions_timedelta
from visions.core.implementations.types.visions_url import visions_url
| 67.521739 | 86 | 0.891822 |
73f9584156131577776dd794f58e7d921e0b2aeb | 2,972 | py | Python | app/services/auth.py | Simple2B/cortex-backend | 9cf6802b0eff9254875bcbe553517500ccfc9082 | [
"MIT"
] | 1 | 2021-10-17T13:28:51.000Z | 2021-10-17T13:28:51.000Z | app/services/auth.py | Simple2B/cortex-backend | 9cf6802b0eff9254875bcbe553517500ccfc9082 | [
"MIT"
] | null | null | null | app/services/auth.py | Simple2B/cortex-backend | 9cf6802b0eff9254875bcbe553517500ccfc9082 | [
"MIT"
] | null | null | null | from datetime import datetime, timedelta
from fastapi import HTTPException, status, Depends
from fastapi.security import OAuth2PasswordBearer
from jose import JWTError, jwt
from pydantic import ValidationError
from sqlalchemy import func
from app.schemas import Doctor, Token, DoctorCreate
from app.models import Doctor as DoctorDB
from app.config import settings as config
from app.logger import log
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="/api/auth/sign_in")
def get_current_doctor(token: str = Depends(oauth2_scheme)) -> Doctor:
return AuthService.validate_token(token)
class AuthService:
def register_new_doctor(self, doctor_data: DoctorCreate) -> Doctor:
doctor = DoctorDB.query.filter(
func.lower(DoctorDB.api_key) == func.lower(doctor_data.api_key)
).first()
if doctor:
doctor.email_approved = True
doctor.password = doctor_data.password
doctor.save(True)
log(log.INFO, "Doctor [%s] has been approved", doctor.first_name)
return Doctor.from_orm(doctor)
return None
def authenticate_doctor(self, username: str, password: str) -> Token:
doctor = DoctorDB.authenticate(email=username, password=password)
if not doctor:
log(log.ERROR, "Authentication failed")
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Could not validate credentials",
headers={"WWW-Authenticate": "Bearer"},
)
log(log.INFO, "Doctor has been logged")
return self.create_token(doctor)
@classmethod
def validate_token(cls, token: str) -> Doctor:
try:
payload = jwt.decode(
token, config.JWT_SECRET, algorithms=[config.JWT_ALGORITHM]
)
except JWTError:
log(log.ERROR, "Invalid JWT token")
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail="Invalid JWT token",
headers={"WWW-Authenticate": "Bearer"},
)
doctor_data = payload.get("doctor")
try:
return Doctor.parse_obj(doctor_data)
except ValidationError:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Could not validate doctor data",
headers={"WWW-Authenticate": "Bearer"},
)
@classmethod
def create_token(cls, doctor: DoctorDB) -> Token:
doctor_data = Doctor.from_orm(doctor)
now = datetime.utcnow()
payload = {
"iat": now,
"nbf": now,
"exp": now + timedelta(seconds=int(config.JWT_EXP)),
"sub": str(doctor.id),
"doctor": doctor_data.dict(),
}
token = jwt.encode(payload, config.JWT_SECRET, algorithm=config.JWT_ALGORITHM)
return Token(access_token=token)
| 33.772727 | 86 | 0.626851 |
73f965f13d58c10851a43ee66c44df9cdbc2f794 | 3,250 | py | Python | nova/api/openstack/compute/legacy_v2/contrib/assisted_volume_snapshots.py | ebalduf/nova-backports | 6bf97ec73467de522d34ab7a17ca0e0874baa7f9 | [
"Apache-2.0"
] | 5 | 2016-04-28T16:20:38.000Z | 2021-04-25T11:19:03.000Z | nova/api/openstack/compute/legacy_v2/contrib/assisted_volume_snapshots.py | ebalduf/nova-backports | 6bf97ec73467de522d34ab7a17ca0e0874baa7f9 | [
"Apache-2.0"
] | 5 | 2016-07-11T20:59:47.000Z | 2020-07-28T09:56:35.000Z | nova/api/openstack/compute/legacy_v2/contrib/assisted_volume_snapshots.py | ebalduf/nova-backports | 6bf97ec73467de522d34ab7a17ca0e0874baa7f9 | [
"Apache-2.0"
] | 5 | 2020-04-08T20:24:45.000Z | 2020-10-05T19:02:13.000Z | # Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from oslo_serialization import jsonutils
import six
import webob
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import compute
from nova import exception
from nova.i18n import _LI
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('compute',
'os-assisted-volume-snapshots')
class AssistedVolumeSnapshotsController(wsgi.Controller):
def __init__(self):
self.compute_api = compute.API()
super(AssistedVolumeSnapshotsController, self).__init__()
def create(self, req, body):
"""Creates a new snapshot."""
context = req.environ['nova.context']
authorize(context, action='create')
if not self.is_valid_body(body, 'snapshot'):
raise webob.exc.HTTPBadRequest()
try:
snapshot = body['snapshot']
create_info = snapshot['create_info']
volume_id = snapshot['volume_id']
except KeyError:
raise webob.exc.HTTPBadRequest()
LOG.info(_LI("Create assisted snapshot from volume %s"), volume_id,
context=context)
return self.compute_api.volume_snapshot_create(context, volume_id,
create_info)
def delete(self, req, id):
"""Delete a snapshot."""
context = req.environ['nova.context']
authorize(context, action='delete')
LOG.info(_LI("Delete snapshot with id: %s"), id, context=context)
delete_metadata = {}
delete_metadata.update(req.GET)
try:
delete_info = jsonutils.loads(delete_metadata['delete_info'])
volume_id = delete_info['volume_id']
except (KeyError, ValueError) as e:
raise webob.exc.HTTPBadRequest(explanation=six.text_type(e))
try:
self.compute_api.volume_snapshot_delete(context, volume_id,
id, delete_info)
except exception.NotFound:
return webob.exc.HTTPNotFound()
return webob.Response(status_int=204)
class Assisted_volume_snapshots(extensions.ExtensionDescriptor):
"""Assisted volume snapshots."""
name = "AssistedVolumeSnapshots"
alias = "os-assisted-volume-snapshots"
namespace = ("http://docs.openstack.org/compute/ext/"
"assisted-volume-snapshots/api/v2")
updated = "2013-08-29T00:00:00Z"
def get_resources(self):
resource = extensions.ResourceExtension('os-assisted-volume-snapshots',
AssistedVolumeSnapshotsController())
return [resource]
| 33.163265 | 79 | 0.662462 |
73f9684447d70d9be3ef125d3b85662543c99bcb | 2,153 | py | Python | python-daemon/marvin_python_daemon/management/notebook.py | AI-ML-Projects/incubator-marvin | 394eaa729cd4637c2d585f721441819798546667 | [
"Apache-2.0"
] | null | null | null | python-daemon/marvin_python_daemon/management/notebook.py | AI-ML-Projects/incubator-marvin | 394eaa729cd4637c2d585f721441819798546667 | [
"Apache-2.0"
] | null | null | null | python-daemon/marvin_python_daemon/management/notebook.py | AI-ML-Projects/incubator-marvin | 394eaa729cd4637c2d585f721441819798546667 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# coding=utf-8
# Copyright [2020] [Apache Software Foundation]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import subprocess
import sys
from ..common.log import get_logger
logger = get_logger('management.notebook')
def notebook(config, enable_security, port):
notebookdir = os.path.join(config['base_path'], 'notebooks')
command = [
"SPARK_CONF_DIR={0} YARN_CONF_DIR={0}".format(
os.path.join(os.environ["SPARK_HOME"], "conf")),
'jupyter', 'notebook',
'--notebook-dir', notebookdir,
'--ip', '0.0.0.0',
'--port', port,
'--no-browser',
'--config', os.path.join(os.environ["MARVIN_DAEMON_PATH"],
'extras', 'notebook_extensions', 'jupyter_notebook_config.py')
]
command.append("--NotebookApp.token=") if not enable_security else None
command.append("--allow-root")
return_code = os.system(' '.join(command))
logger.info("Notebook call returned {0}".format(str(return_code)))
def lab(config, enable_security, port):
notebookdir = os.path.join(config['base_path'], 'notebooks')
command = [
"SPARK_CONF_DIR={0} YARN_CONF_DIR={0}".format(
os.path.join(os.environ["SPARK_HOME"], "conf")),
'jupyter-lab',
'--notebook-dir', notebookdir,
'--ip', '0.0.0.0',
'--port', port,
'--no-browser',
]
command.append("--NotebookApp.token=") if not enable_security else None
return_code = os.system(' '.join(command))
logger.info("Lab call returned {0}".format(str(return_code)))
| 32.621212 | 95 | 0.657687 |
73f9ba32ca44f4f80f76ae26ca39735c527102f4 | 4,850 | py | Python | examiner/tests/test_views.py | JakobGM/WikiLinks | 5743b1d4c3fefa66fcaa4d283436d2a3f0490604 | [
"MIT"
] | 6 | 2017-08-12T09:55:06.000Z | 2019-09-03T08:05:21.000Z | examiner/tests/test_views.py | JakobGM/WikiLinks | 5743b1d4c3fefa66fcaa4d283436d2a3f0490604 | [
"MIT"
] | 57 | 2017-08-11T23:05:07.000Z | 2022-03-11T23:32:12.000Z | examiner/tests/test_views.py | JakobGM/WikiLinks | 5743b1d4c3fefa66fcaa4d283436d2a3f0490604 | [
"MIT"
] | 1 | 2017-09-27T15:31:15.000Z | 2017-09-27T15:31:15.000Z | from django.core.files.base import ContentFile
from django.shortcuts import reverse
import pytest
from examiner.forms import VerifyExamForm
from examiner.models import DocumentInfo, DocumentInfoSource, Pdf
from semesterpage.tests.factories import CourseFactory
@pytest.mark.django_db
def test_empty_exams_view(client):
"""Test empty output of all exams view when no URL is present."""
response = client.get(reverse('examiner:all_exams'))
assert response.status_code == 200
@pytest.mark.django_db
def test_verify_random_pdf_view(client, django_user_model):
"""Test PDF verification view."""
# We have one PDF
sha1_hash = '0000000000000000000000000000000000000000'
pdf = Pdf(sha1_hash=sha1_hash)
content = ContentFile('exam text')
pdf.file.save(name=sha1_hash + '.pdf', content=content)
# And three courses
course1 = CourseFactory(course_code='TMA1000')
course2 = CourseFactory(course_code='TMA2000')
course3 = CourseFactory(course_code='TMA3000')
# The PDF has been inferred to contain the two first of these courses
common_docinfo_attrs = {
'language': 'Bokmål',
'year': 2010,
'solutions': False,
'content_type': 'Exam',
}
exam1 = DocumentInfo.objects.create(
course=course1,
**common_docinfo_attrs,
)
DocumentInfoSource.objects.create(pdf=pdf, document_info=exam1)
exam2 = DocumentInfo.objects.create(
course=course2,
**common_docinfo_attrs,
)
DocumentInfoSource.objects.create(pdf=pdf, document_info=exam2)
# We verify a random PDF in this case our PDF since there is only one
user = django_user_model.objects.create_user(username='u', password='p')
client.login(username='u', password='p')
url = reverse('examiner:verify_random')
response = client.get(url)
assert response.status_code == 200
# The form instance is populated with the first exam
form = response.context['form']
data = form.initial
assert form.instance == exam1
assert data['language'] == 'Bokmål'
assert data['pdf'] == pdf
assert data['season'] is None
assert data['verifier'] == user
# But both courses appear in the courses field
assert data['courses'].count() == 2
assert set(data['courses']) == {course1.id, course2.id}
# The user now changes the 2 courses
form = VerifyExamForm({
'courses': [course2.id, course3.id],
'pdf': pdf.id,
'verifier': user.id,
**common_docinfo_attrs,
})
assert form.is_valid()
response = client.post(url, form.data)
assert response.status_code == 302
# We have two new verified exams
verified_exams = DocumentInfoSource.objects.filter(verified_by__in=[user])
assert verified_exams.count() == 2
# Both are connected to our pdf
exam_pdf1 = verified_exams.first()
exam_pdf2 = verified_exams.last()
assert exam_pdf1.pdf == pdf
assert exam_pdf2.pdf == pdf
assert exam_pdf1.verified_by.first() == user
assert exam_pdf2.verified_by.first() == user
# With two different courses
docinfo1 = exam_pdf1.document_info
docinfo2 = exam_pdf2.document_info
assert docinfo1.course == course2
assert docinfo2.course == course3
# But all other attributes being equeal
for key, value in common_docinfo_attrs.items():
assert getattr(docinfo1, key) == value
assert getattr(docinfo2, key) == value
# The two other unverified infos have now been removed
assert not DocumentInfoSource.objects.filter(
verified_by__isnull=True,
).exists()
# And we have alltogether 3 DocumentInfo objects
assert DocumentInfo.objects.count() == 3
# And only two through relations
assert DocumentInfoSource.objects.count() == 2
@pytest.mark.django_db
def test_verify_pdf_view(admin_client):
"""Test PDF verification of specific model objects."""
# We have one PDF
sha1_hash = '0000000000000000000000000000000000000000'
pdf = Pdf(sha1_hash=sha1_hash)
content = ContentFile('exam text')
pdf.file.save(name=sha1_hash + '.pdf', content=content)
# And another one
sha1_hash2 = '1111111111111111111111111111111111111111'
pdf2 = Pdf(sha1_hash=sha1_hash2)
content2 = ContentFile('exam text')
pdf2.file.save(name=sha1_hash2 + '.pdf', content=content2)
# Both PDFs are connected to the same exam
exam = DocumentInfo.objects.create()
DocumentInfoSource.objects.create(pdf=pdf, document_info=exam)
DocumentInfoSource.objects.create(pdf=pdf2, document_info=exam)
# And each resolves to a view with their own PDF as context
response = admin_client.get(pdf.get_absolute_url())
assert response.context['pdf'] == pdf
response2 = admin_client.get(pdf2.get_absolute_url())
assert response2.context['pdf'] == pdf2
| 33.916084 | 78 | 0.702268 |
73f9c6a20bce60704f103fd5a0b786432a81ea15 | 18,931 | py | Python | datacube/drivers/postgres/_fields.py | Kirill888/datacube-core | 996b395e15f975decb77c0ca9fa0555177674b2f | [
"Apache-2.0"
] | 2 | 2019-10-24T15:29:54.000Z | 2019-10-24T15:29:58.000Z | datacube/drivers/postgres/_fields.py | Kirill888/datacube-core | 996b395e15f975decb77c0ca9fa0555177674b2f | [
"Apache-2.0"
] | 2 | 2021-03-26T00:37:36.000Z | 2021-03-31T20:05:01.000Z | datacube/drivers/postgres/_fields.py | Kirill888/datacube-core | 996b395e15f975decb77c0ca9fa0555177674b2f | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# pylint: disable=abstract-method
"""
Build and index fields within documents.
"""
from collections import namedtuple
from datetime import datetime, date
from decimal import Decimal
from dateutil import tz
from psycopg2.extras import NumericRange, DateTimeTZRange
from sqlalchemy import cast, func, and_
from sqlalchemy.dialects import postgresql as postgres
from sqlalchemy.dialects.postgresql import INT4RANGE
from sqlalchemy.dialects.postgresql import NUMRANGE, TSTZRANGE
from sqlalchemy.sql import ColumnElement
from datacube import utils
from datacube.model.fields import Expression, Field
from datacube.model import Range
from datacube.utils import get_doc_offset_safe
from .sql import FLOAT8RANGE
from typing import Any, Callable, Tuple, Union
class PgField(Field):
"""
Postgres implementation of a searchable field. May be a value inside
a JSONB column.
"""
def __init__(self, name, description, alchemy_column, indexed):
super(PgField, self).__init__(name, description)
# The underlying SQLAlchemy column. (eg. DATASET.c.metadata)
self.alchemy_column = alchemy_column
self.indexed = indexed
@property
def required_alchemy_table(self):
return self.alchemy_column.table
@property
def alchemy_expression(self):
"""
Get an SQLAlchemy expression for accessing this field.
:return:
"""
raise NotImplementedError('alchemy expression')
@property
def sql_expression(self):
"""
Get the raw SQL expression for this field as a string.
:rtype: str
"""
return str(self.alchemy_expression.compile(
dialect=postgres.dialect(),
compile_kwargs={"literal_binds": True}
))
@property
def postgres_index_type(self):
return 'btree'
def __eq__(self, value):
"""
:rtype: Expression
"""
return EqualsExpression(self, value)
def between(self, low, high):
"""
:rtype: Expression
"""
raise NotImplementedError('between expression')
class NativeField(PgField):
"""
Fields hard-coded into the schema. (not user configurable)
"""
def __init__(self, name, description, alchemy_column, alchemy_expression=None,
# Should this be selected by default when selecting all fields?
affects_row_selection=False):
super(NativeField, self).__init__(name, description, alchemy_column, False)
self._expression = alchemy_expression
self.affects_row_selection = affects_row_selection
@property
def alchemy_expression(self):
expression = self._expression if self._expression is not None else self.alchemy_column
return expression.label(self.name)
@property
def postgres_index_type(self):
# Don't add extra indexes for native fields.
return None
class PgDocField(PgField):
"""
A field extracted from inside a (jsonb) document.
"""
def extract(self, document):
"""
Extract a value from the given document in pure python (no postgres).
"""
raise NotImplementedError("extract()")
def value_to_alchemy(self, value):
"""
Wrap the given value with any necessary type casts/conversions for this field.
Overridden by other classes as needed.
"""
# Default do nothing (eg. string datatypes)
return value
def parse_value(self, value):
"""
Parse the value from a string. May be overridden by subclasses.
"""
return value
def _alchemy_offset_value(self, doc_offsets, agg_function):
# type: (Tuple[Tuple[str]], Callable[[Any], ColumnElement]) -> ColumnElement
"""
Get an sqlalchemy value for the given offsets of this field's sqlalchemy column.
If there are multiple they will be combined using the given aggregate function.
Offsets can either be single:
('platform', 'code')
Or multiple:
(('platform', 'code'), ('satellite', 'name'))
In the latter case, the multiple values are combined using the given aggregate function
(defaults to using coalesce: grab the first non-null value)
"""
if not doc_offsets:
raise ValueError("Value requires at least one offset")
if isinstance(doc_offsets[0], str):
# It's a single offset.
doc_offsets = [doc_offsets]
alchemy_values = [self.value_to_alchemy(self.alchemy_column[offset].astext) for offset in doc_offsets]
# If there's multiple fields, we aggregate them (eg. "min()"). Otherwise use the one.
return agg_function(*alchemy_values) if len(alchemy_values) > 1 else alchemy_values[0]
def _extract_offset_value(self, doc, doc_offsets, agg_function):
"""
Extract a value for the given document offsets.
Same as _alchemy_offset_value(), but returns the value instead of an sqlalchemy expression to calc the value.
"""
if not doc_offsets:
raise ValueError("Value requires at least one offset")
if isinstance(doc_offsets[0], str):
# It's a single offset.
doc_offsets = [doc_offsets]
values = (get_doc_offset_safe(offset, doc) for offset in doc_offsets)
values = [self.parse_value(v) for v in values if v is not None]
if not values:
return None
if len(values) == 1:
return values[0]
return agg_function(*values)
class SimpleDocField(PgDocField):
"""
A field with a single value (eg. String, int) calculated as an offset inside a (jsonb) document.
"""
def __init__(self, name, description, alchemy_column, indexed, offset=None, selection='first'):
super(SimpleDocField, self).__init__(name, description, alchemy_column, indexed)
self.offset = offset
if selection not in SELECTION_TYPES:
raise ValueError(
"Unknown field selection type %s. Expected one of: %r" % (selection, (SELECTION_TYPES,),)
)
self.aggregation = SELECTION_TYPES[selection]
@property
def alchemy_expression(self):
return self._alchemy_offset_value(self.offset, self.aggregation.pg_calc)
def __eq__(self, value):
"""
:rtype: Expression
"""
return EqualsExpression(self, value)
def between(self, low, high):
"""
:rtype: Expression
"""
raise NotImplementedError('Simple field between expression')
def extract(self, document):
return self._extract_offset_value(document, self.offset, self.aggregation.calc)
def evaluate(self, ctx):
return self.extract(ctx)
class IntDocField(SimpleDocField):
type_name = 'integer'
def value_to_alchemy(self, value):
return cast(value, postgres.INTEGER)
def between(self, low, high):
return ValueBetweenExpression(self, low, high)
def parse_value(self, value):
return int(value)
class NumericDocField(SimpleDocField):
type_name = 'numeric'
def value_to_alchemy(self, value):
return cast(value, postgres.NUMERIC)
def between(self, low, high):
return ValueBetweenExpression(self, low, high)
def parse_value(self, value):
return Decimal(value)
class DoubleDocField(SimpleDocField):
type_name = 'double'
def value_to_alchemy(self, value):
return cast(value, postgres.DOUBLE_PRECISION)
def between(self, low, high):
return ValueBetweenExpression(self, low, high)
def parse_value(self, value):
return float(value)
class DateDocField(SimpleDocField):
type_name = 'datetime'
def value_to_alchemy(self, value):
# type: (Union[datetime, date, str, ColumnElement]) -> Union[datetime, date, str, ColumnElement]
"""
Wrap a value as needed for this field type.
"""
if isinstance(value, datetime):
return _default_utc(value)
# SQLAlchemy expression or string are parsed in pg as dates.
elif isinstance(value, (ColumnElement, str)):
return func.agdc.common_timestamp(value)
else:
raise ValueError("Value not readable as date: %r" % (value,))
def between(self, low, high):
return ValueBetweenExpression(self, low, high)
def parse_value(self, value):
return utils.parse_time(value)
@property
def day(self):
"""Get field truncated to the day"""
return NativeField(
'{}_day'.format(self.name),
'Day of {}'.format(self.description),
self.alchemy_column,
alchemy_expression=cast(func.date_trunc('day', self.alchemy_expression), postgres.TIMESTAMP)
)
class RangeDocField(PgDocField):
"""
A range of values. Has min and max values, which may be calculated from multiple
values in the document.
"""
FIELD_CLASS = SimpleDocField
def __init__(self, name, description, alchemy_column, indexed, min_offset=None, max_offset=None):
super(RangeDocField, self).__init__(name, description, alchemy_column, indexed)
self.lower = self.FIELD_CLASS(
name + '_lower',
description,
alchemy_column,
indexed=False,
offset=min_offset,
selection='least'
)
self.greater = self.FIELD_CLASS(
name + '_greater',
description,
alchemy_column,
indexed=False,
offset=max_offset,
selection='greatest'
)
def value_to_alchemy(self, value):
raise NotImplementedError('range type')
@property
def postgres_index_type(self):
return 'gist'
@property
def alchemy_expression(self):
return self.value_to_alchemy((self.lower.alchemy_expression, self.greater.alchemy_expression))
def __eq__(self, value):
"""
:rtype: Expression
"""
# Lower and higher are interchangeable here: they're the same type.
casted_val = self.lower.value_to_alchemy(value)
return RangeContainsExpression(self, casted_val)
def extract(self, document):
min_val = self.lower.extract(document)
max_val = self.greater.extract(document)
if not min_val and not max_val:
return None
return Range(min_val, max_val)
class NumericRangeDocField(RangeDocField):
FIELD_CLASS = NumericDocField
type_name = 'numeric-range'
def value_to_alchemy(self, value):
low, high = value
return func.numrange(
low, high,
# Inclusive on both sides.
'[]',
type_=NUMRANGE,
)
def between(self, low, high):
"""
:rtype: Expression
"""
return RangeBetweenExpression(self, low, high, _range_class=NumericRange)
class IntRangeDocField(RangeDocField):
FIELD_CLASS = IntDocField
type_name = 'integer-range'
def value_to_alchemy(self, value):
low, high = value
return func.numrange(
low, high,
# Inclusive on both sides.
'[]',
type_=INT4RANGE,
)
def between(self, low, high):
"""
:rtype: Expression
"""
return RangeBetweenExpression(self, low, high, _range_class=NumericRange)
class DoubleRangeDocField(RangeDocField):
FIELD_CLASS = DoubleDocField
type_name = 'double-range'
def value_to_alchemy(self, value):
low, high = value
return func.agdc.float8range(
low, high,
# Inclusive on both sides.
'[]',
type_=FLOAT8RANGE,
)
def between(self, low, high):
"""
:rtype: Expression
"""
return RangeBetweenExpression(self, low, high, _range_class=NumericRange)
class DateRangeDocField(RangeDocField):
FIELD_CLASS = DateDocField
type_name = 'datetime-range'
def value_to_alchemy(self, value):
low, high = value
return func.tstzrange(
low, high,
# Inclusive on both sides.
'[]',
type_=TSTZRANGE,
)
def between(self, low, high):
"""
:rtype: Expression
"""
low = _number_implies_year(low)
high = _number_implies_year(high)
if isinstance(low, datetime) and isinstance(high, datetime):
return RangeBetweenExpression(
self,
_default_utc(low),
_default_utc(high),
_range_class=DateTimeTZRange
)
else:
raise ValueError("Unknown comparison type for date range: "
"expecting datetimes, got: (%r, %r)" % (low, high))
def _number_implies_year(v):
# type: (Union[int, datetime]) -> datetime
"""
>>> _number_implies_year(1994)
datetime.datetime(1994, 1, 1, 0, 0)
>>> _number_implies_year(datetime(1994, 4, 4))
datetime.datetime(1994, 4, 4, 0, 0)
"""
if isinstance(v, int):
return datetime(v, 1, 1)
# The expression module parses all number ranges as floats.
if isinstance(v, float):
return datetime(int(v), 1, 1)
return v
class PgExpression(Expression):
def __init__(self, field):
super(PgExpression, self).__init__()
#: :type: PgField
self.field = field
@property
def alchemy_expression(self):
"""
Get an SQLAlchemy expression for accessing this field.
:return:
"""
raise NotImplementedError('alchemy expression')
class ValueBetweenExpression(PgExpression):
def __init__(self, field, low_value, high_value):
super(ValueBetweenExpression, self).__init__(field)
self.low_value = low_value
self.high_value = high_value
@property
def alchemy_expression(self):
if self.low_value is not None and self.high_value is not None:
return and_(self.field.alchemy_expression >= self.low_value,
self.field.alchemy_expression < self.high_value)
if self.low_value is not None:
return self.field.alchemy_expression >= self.low_value
if self.high_value is not None:
return self.field.alchemy_expression < self.high_value
raise ValueError('Expect at least one of [low,high] to be set')
class RangeBetweenExpression(PgExpression):
def __init__(self, field, low_value, high_value, _range_class):
super(RangeBetweenExpression, self).__init__(field)
self.low_value = low_value
self.high_value = high_value
self._range_class = _range_class
@property
def alchemy_expression(self):
return self.field.alchemy_expression.overlaps(
self._range_class(self.low_value, self.high_value)
)
class RangeContainsExpression(PgExpression):
def __init__(self, field, value):
super(RangeContainsExpression, self).__init__(field)
self.value = value
@property
def alchemy_expression(self):
return self.field.alchemy_expression.contains(self.value)
class EqualsExpression(PgExpression):
def __init__(self, field, value):
super(EqualsExpression, self).__init__(field)
self.value = value
@property
def alchemy_expression(self):
return self.field.alchemy_expression == self.value
def evaluate(self, ctx):
return self.field.evaluate(ctx) == self.value
def parse_fields(doc, table_column):
"""
Parse a field spec document into objects.
Example document:
::
{
# Field name:
'lat': {
# Field type & properties.
'type': 'float-range',
'min_offset': [
# Offsets within a dataset document for this field.
['extent', 'coord', 'ul', 'lat'],
['extent', 'coord', 'll', 'lat']
],
'max_offset': [
['extent', 'coord', 'ur', 'lat'],
['extent', 'coord', 'lr', 'lat']
]
}
}
:param table_column: SQLAlchemy jsonb column for the document we're reading fields from.
:type doc: dict
:rtype: dict[str, PgField]
"""
# Implementations of fields for this driver
types = {
SimpleDocField,
IntDocField,
DoubleDocField,
DateDocField,
NumericRangeDocField,
IntRangeDocField,
DoubleRangeDocField,
DateRangeDocField,
}
type_map = {f.type_name: f for f in types}
# An alias for backwards compatibility
type_map['float-range'] = NumericRangeDocField
# No later field should have overridden string
assert type_map['string'] == SimpleDocField
def _get_field(name, descriptor, column):
"""
:type name: str
:type descriptor: dict
:param column: SQLAlchemy table column
:rtype: PgField
"""
ctorargs = descriptor.copy()
type_name = ctorargs.pop('type', 'string')
description = ctorargs.pop('description', None)
indexed_val = ctorargs.pop('indexed', "true")
indexed = indexed_val.lower() == 'true' if isinstance(indexed_val, str) else indexed_val
field_class = type_map.get(type_name)
if not field_class:
raise ValueError(('Field %r has unknown type %r.'
' Available types are: %r') % (name, type_name, list(type_map.keys())))
try:
return field_class(name, description, column, indexed, **ctorargs)
except TypeError as e:
raise RuntimeError(
'Field {name} has unexpected argument for a {type}'.format(
name=name, type=type_name
), e
)
return {name: _get_field(name, descriptor, table_column) for name, descriptor in doc.items()}
def _coalesce(*values):
"""
Return first non-none value.
Return None if all values are None, or there are no values passed in.
>>> _coalesce(1, 2)
1
>>> _coalesce(None, 2, 3)
2
>>> _coalesce(None, None, 3, None, 5)
3
"""
for v in values:
if v is not None:
return v
return None
def _default_utc(d):
if d.tzinfo is None:
return d.replace(tzinfo=tz.tzutc())
return d
# How to choose/combine multiple doc values.
ValueAggregation = namedtuple('ValueAggregation', ('calc', 'pg_calc'))
SELECTION_TYPES = {
# First non-null
'first': ValueAggregation(_coalesce, func.coalesce),
# min/max
'least': ValueAggregation(min, func.least),
'greatest': ValueAggregation(max, func.greatest),
}
| 30.049206 | 117 | 0.624426 |
73f9e015cdc239626a6062827bf90200e8044554 | 1,719 | py | Python | kairon/data_processor/constant.py | Anitej/kairon | 61d6bd7f230a744303abab42e3b54b0381fee7da | [
"Apache-2.0"
] | null | null | null | kairon/data_processor/constant.py | Anitej/kairon | 61d6bd7f230a744303abab42e3b54b0381fee7da | [
"Apache-2.0"
] | 1 | 2021-01-29T22:20:59.000Z | 2021-01-29T22:20:59.000Z | kairon/data_processor/constant.py | Anitej/kairon | 61d6bd7f230a744303abab42e3b54b0381fee7da | [
"Apache-2.0"
] | null | null | null | from enum import Enum
TRAINING_DATA_GENERATOR_DIR = 'data_generator'
class RESPONSE(str, Enum):
Text = "text"
CUSTOM = "custom"
IMAGE = "image"
CHANNEL = "channel"
BUTTONS = "buttons"
class DOMAIN(str, Enum):
INTENTS = "intents"
ACTIONS = "actions"
SLOTS = "slots"
SESSION_CONFIG = "session_config"
RESPONSES = "responses"
FORMS = "forms"
ENTITIES = "entities"
class ENTITY(str, Enum):
START = "start"
END = "end"
VALUE = "value"
ENTITY = "entity"
class TRAINING_EXAMPLE(str, Enum):
INTENT = "intent"
ENTITIES = "entities"
class LOOKUP_TABLE(str, Enum):
NAME = "name"
ELEMENTS = "elements"
class REGEX_FEATURES(str, Enum):
NAME = "name"
PATTERN = "pattern"
class SESSION_CONFIG(str, Enum):
SESSION_EXPIRATION_TIME = "session_expiration_time"
CARRY_OVER_SLOTS = "carry_over_slots"
class SLOTS(str, Enum):
INITIAL_VALUE = "initial_value"
VALUE_RESET_DELAY = "value_reset_delay"
AUTO_FILL = "auto_fill"
MIN_VALUE = "min_value"
MAX_VALUE = "max_value"
VALUES = "values"
TYPE = "type"
class STORY_EVENT(str, Enum):
NAME = "name"
CONFIDENCE = "confidence"
class MODEL_TRAINING_STATUS(str, Enum):
INPROGRESS = "Inprogress"
DONE = "Done"
FAIL = "Fail"
class UTTERANCE_TYPE(str, Enum):
BOT = "bot"
HTTP = "http"
class CUSTOM_ACTIONS(str, Enum):
HTTP_ACTION_NAME = "kairon_http_action"
HTTP_ACTION_CONFIG = "http_action_config"
class TRAINING_DATA_GENERATOR_STATUS(str, Enum):
INITIATED = "Initiated"
TASKSPAWNED = "Task Spawned"
INPROGRESS = "In progress"
PARSE = "Task Spawned"
COMPLETED = "Completed"
FAIL = "Fail" | 19.314607 | 55 | 0.656195 |
73f9f97afb4a78054d7be47f49e5ab437499b3c5 | 413 | py | Python | analysis/join.py | rudolfspetrovs/benchml | 896673f387a6bb9b185664ddd54f569a1ba54e51 | [
"Apache-2.0"
] | 3 | 2021-08-12T13:25:31.000Z | 2022-03-21T21:30:22.000Z | analysis/join.py | rudolfspetrovs/benchml | 896673f387a6bb9b185664ddd54f569a1ba54e51 | [
"Apache-2.0"
] | 5 | 2020-12-08T08:59:41.000Z | 2022-01-22T06:46:09.000Z | analysis/join.py | rudolfspetrovs/benchml | 896673f387a6bb9b185664ddd54f569a1ba54e51 | [
"Apache-2.0"
] | 1 | 2021-06-25T11:07:32.000Z | 2021-06-25T11:07:32.000Z | #! /usr/bin/env python
import glob
import gzip
import json
import benchml as bml
bml.log.Connect()
bml.log.AddArg("dataset", str)
args = bml.log.Parse()
js = sorted(glob.glob("%s*.json" % args.dataset))
data = []
for j in js:
data.extend(json.load(open(j)))
with gzip.GzipFile("benchmark_%s.json.gz" % args.dataset, "w") as fout:
fout.write(json.dumps(data, indent=1, sort_keys=True).encode("utf-8"))
| 21.736842 | 74 | 0.68523 |
73fa755990b7f0edabed8fe43f4e908518c1b387 | 408 | py | Python | icoscp/cpb/__init__.py | ICOS-Carbon-Portal/pylib | 3a86c936ee5fa03520384a8e18ed093decdab171 | [
"CC-BY-4.0"
] | 4 | 2020-09-27T09:05:56.000Z | 2022-01-10T18:11:57.000Z | icoscp/cpb/__init__.py | ICOS-Carbon-Portal/pylib | 3a86c936ee5fa03520384a8e18ed093decdab171 | [
"CC-BY-4.0"
] | 26 | 2020-08-03T13:00:56.000Z | 2022-03-31T15:40:26.000Z | icoscp/cpb/__init__.py | ICOS-Carbon-Portal/pylib | 3a86c936ee5fa03520384a8e18ed093decdab171 | [
"CC-BY-4.0"
] | 2 | 2020-07-28T08:35:01.000Z | 2021-02-04T21:11:36.000Z | """
The cpb module gives you access to a binary representatin of
digital data objects hosted at the ICOS CarbonPortal.
Any data column (x,y axis) you can "preview" on the data portal
( https://data.icos-cp.eu/portal/ ) is accessible from this module.
You need to know the digital object ID (URL, persistent identification)
or have a sqparql query providing this information.
"""
| 45.333333 | 75 | 0.715686 |
73fa90abd2533b5e3f724341ad9e0b66db45f6e7 | 11,768 | py | Python | tests/common/utils.py | valory-xyz/agents-aea | 8f38efa96041b0156ed1ae328178e395dbabf2fc | [
"Apache-2.0"
] | 28 | 2021-10-31T18:54:14.000Z | 2022-03-17T13:10:43.000Z | tests/common/utils.py | valory-xyz/agents-aea | 8f38efa96041b0156ed1ae328178e395dbabf2fc | [
"Apache-2.0"
] | 66 | 2021-10-31T11:55:48.000Z | 2022-03-31T06:26:23.000Z | tests/common/utils.py | valory-xyz/agents-aea | 8f38efa96041b0156ed1ae328178e395dbabf2fc | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2022 Valory AG
# Copyright 2018-2021 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains some utils for testing purposes."""
import asyncio
import filecmp
import os
import subprocess # nosec
import sys
import time
from contextlib import contextmanager
from functools import wraps
from threading import Thread
from typing import Any, Callable, List, Optional, Set, Tuple, Type, Union
import pytest
from aea.aea import AEA
from aea.configurations.base import PublicId
from aea.mail.base import Envelope
from aea.protocols.base import Message
from aea.skills.base import Behaviour, Handler
from packages.fetchai.protocols.default.message import DefaultMessage
from tests.conftest import ROOT_DIR
DEFAULT_SLEEP = 0.0001
DEFAULT_TIMEOUT = 3
class TimeItResult:
"""Class to store execution time for timeit_context."""
def __init__(self):
"""Init with time passed = -1."""
self.time_passed = -1
@contextmanager
def timeit_context():
"""
Context manager to measure execution time of code in context.
:return TimeItResult
example:
with timeit_context() as result:
do_long_code()
print("Long code takes ", result.time_passed)
"""
result = TimeItResult()
started_time = time.time()
try:
yield result
finally:
result.time_passed = time.time() - started_time
class AeaTool:
"""
AEA test wrapper tool.
To make testing AEA instances easier
"""
def __init__(self, aea: AEA):
"""
Instantiate AeaTool.
:param aea: AEA instance to wrap for tests.
"""
self.aea = aea
def setup(self) -> "AeaTool":
"""Call AEA._start_setup."""
self.aea.setup()
return self
def teardown(self) -> "AeaTool":
"""Call AEA.teardown."""
self.aea.teardown()
return self
def wait_outbox_empty(
self, sleep: float = DEFAULT_SLEEP, timeout: float = DEFAULT_TIMEOUT
) -> "AeaTool":
"""
Wait till agent's outbox consumed completely.
:return: AeaTool
"""
start_time = time.time()
while not self.aea.outbox.empty():
time.sleep(sleep)
if time.time() - start_time > timeout:
raise Exception("timeout")
return self
def wait_inbox(
self, sleep: float = DEFAULT_SLEEP, timeout: float = DEFAULT_TIMEOUT
) -> "AeaTool":
"""
Wait till something appears on agents inbox and spin loop.
:return: AeaTool
"""
start_time = time.time()
while self.aea.inbox.empty():
time.sleep(sleep)
if time.time() - start_time > timeout:
raise Exception("timeout")
return self
def handle_envelope(self, envelope) -> "AeaTool":
"""
Run AEA.react once to process inbox messages.
:return: AeaTool
"""
self.aea.handle_envelope(envelope)
return self
def act_one(self) -> "AeaTool":
"""
Run AEA.act once to process behaviours act.
:return: AeaTool
"""
self.aea.act()
return self
@classmethod
def dummy_default_message(
cls,
dialogue_reference: Tuple[str, str] = ("", ""),
message_id: int = 1,
target: int = 0,
performative: DefaultMessage.Performative = DefaultMessage.Performative.BYTES,
content: Union[str, bytes] = "hello world!",
) -> Message:
"""
Construct simple message, all arguments are optional.
:return: Message
"""
if isinstance(content, str):
content = content.encode("utf-8")
return DefaultMessage(
dialogue_reference=dialogue_reference,
message_id=message_id,
target=target,
performative=performative,
content=content,
)
@classmethod
def dummy_envelope(
cls,
to: str = "test",
sender: str = "test",
protocol_specification_id: PublicId = DefaultMessage.protocol_specification_id,
message: Message = None,
) -> Envelope:
"""
Create envelope, if message is not passed use .dummy_message method.
:return: Envelope
"""
message = message or cls.dummy_default_message()
message.sender = sender
message.to = to
return Envelope(
to=to,
sender=sender,
protocol_specification_id=protocol_specification_id,
message=message,
)
def put_inbox(self, envelope: Envelope) -> None:
"""Add an envelope to agent's inbox."""
self.aea.runtime.multiplexer.in_queue.put(envelope)
def is_inbox_empty(self) -> bool:
"""Check there is no messages in inbox."""
return self.aea.runtime.multiplexer.in_queue.empty()
def set_execution_timeout(self, timeout: float) -> None:
"""Set act/handle exeution timeout for AEE.
:param timeout: amount of time to limit single act/handle to execute.
"""
self.aea._execution_timeout = timeout
def stop(self) -> None:
"""Stop AEA instance."""
self.aea.stop()
def make_handler_cls_from_funcion(func: Callable) -> Type[Handler]:
"""Make Handler class with handler function call `func`.
:param func: function or callable to be called from Handler.handle method
:return: Handler class
"""
# pydocstyle: ignore # case conflicts with black # noqa: E800
class TestHandler(Handler):
SUPPORTED_PROTOCOL = DefaultMessage.protocol_id
def setup(self):
pass
def teardown(self):
pass
def handle(self, msg):
func(self)
return TestHandler
def make_behaviour_cls_from_funcion(func: Callable) -> Type[Behaviour]:
"""Make Behaviour class with act function call `func`.
:param func: function or callable to be called from Behaviour.act method
:return: Behaviour class
"""
# pydocstyle: ignore # case conflicts with black # noqa: E800
class TestBehaviour(Behaviour):
def act(self) -> None:
func(self)
def setup(self):
self._completed = False
def teardown(self):
pass
return TestBehaviour
def run_in_root_dir(fn) -> Callable:
"""
Chdir to ROOT DIR and return back during tests.
Decorator.
:param fn: function to decorate
:return: wrapped function
"""
# pydocstyle: ignore # case conflicts with black # noqa: E800
@wraps(fn)
def wrap(*args, **kwargs) -> Any:
"""Do a chdir."""
cwd = os.getcwd()
os.chdir(ROOT_DIR)
try:
return fn(*args, **kwargs)
finally:
os.chdir(cwd)
return wrap
@contextmanager
def run_in_thread(fn, timeout=10, on_exit=None, **kwargs):
"""Run a function in contextmanager and test and awaits it completed."""
thread = Thread(target=fn, **kwargs)
thread.daemon = True
thread.start()
try:
yield
finally:
if on_exit:
on_exit()
thread.join(timeout)
if thread.is_alive():
raise Exception("Thread was not stopped!")
def wait_for_condition(condition_checker, timeout=2, error_msg="Timeout", period=0.001):
"""Wait for condition occures in selected timeout."""
start_time = time.time()
while not condition_checker():
time.sleep(period)
if time.time() > start_time + timeout:
raise TimeoutError(error_msg)
async def wait_for_condition_async(
condition_checker, timeout=2, error_msg="Timeout", period=0.001
): # pragma: nocover
"""Wait for condition occures in selected timeout."""
start_time = time.time()
while not condition_checker():
await asyncio.sleep(period)
if time.time() > start_time + timeout:
raise TimeoutError(error_msg)
def are_dirs_equal(
dir1: Union[str, os.PathLike],
dir2: Union[str, os.PathLike],
ignore: Optional[List[str]] = None,
) -> bool:
"""
Compare the content of two directories, recursively.
:param dir1: the left operand.
:param dir2: the right operand.
:param ignore: is a list of names to ignore (see dircmp docs regarding 'ignore').
:return: True if the directories are equal, False otherwise.
"""
ignore = ignore or None
left_only, right_only, diff = dircmp_recursive(
filecmp.dircmp(dir1, dir2, ignore=ignore)
)
return left_only == right_only == diff == set()
def dircmp_recursive(dircmp_obj: filecmp.dircmp) -> Tuple[Set[str], Set[str], Set[str]]:
"""
Compare the content of two directories, recursively.
:param dircmp_obj: the filecmp.dircmp object.
:return: three sets:
- the set of files that are only in the left operand
- the set of files that are only in the right operand
- the set of files in both operands, but that differ.
"""
def _dircmp_recursive(
dircmp_obj: filecmp.dircmp, prefix: str = ""
) -> Tuple[Set[str], Set[str], Set[str]]:
"""
Helper private function that also accepts the 'prefix' parameter.
It is used to keep track of the path prefix during the recursive calls.
"""
def join_with_prefix(suffix: str) -> str:
return os.path.join(prefix, suffix)
left_only: Set[str] = set(map(join_with_prefix, dircmp_obj.left_only))
right_only: Set[str] = set(map(join_with_prefix, dircmp_obj.right_only))
diff_files: Set[str] = set(map(join_with_prefix, dircmp_obj.diff_files))
for name, sub_dircmp_obj in dircmp_obj.subdirs.items():
subprefix = join_with_prefix(name)
subleft, subright, subdiff = _dircmp_recursive(
sub_dircmp_obj, prefix=subprefix
)
left_only.update(subleft)
right_only.update(subright)
diff_files.update(subdiff)
return left_only, right_only, diff_files
return _dircmp_recursive(dircmp_obj, "")
def run_aea_subprocess(*args, cwd: str = ".") -> Tuple[subprocess.Popen, str, str]:
"""
Run subprocess, bypassing ClickRunner.invoke.
The reason is that for some reason ClickRunner.invoke doesn't capture
well the stdout/stderr of nephew processes - children processes of children processes.
"""
result = subprocess.Popen( # type: ignore # nosec
[sys.executable, "-m", "aea.cli", *args],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=cwd,
)
result.wait()
stdout, stderr = result.communicate()
return result, stdout.decode("utf-8"), stderr.decode("utf-8")
@pytest.mark.integration
class UseOef: # pylint: disable=too-few-public-methods
"""Inherit from this class to launch an OEF node."""
@pytest.fixture(autouse=True)
def _start_oef_node(self, network_node: Callable) -> None:
"""Start an oef node."""
| 28.985222 | 90 | 0.622876 |
73fadbac9bdb79dcb24b025fe37b0d3d77db08ab | 11,192 | py | Python | Lib/site-packages/wx-3.0-msw/wx/lib/pdfwin.py | jickieduan/python27 | c752b552396bbed68d8555080d475718cea2edd0 | [
"bzip2-1.0.6"
] | 1 | 2021-02-13T22:40:50.000Z | 2021-02-13T22:40:50.000Z | Lib/site-packages/wx-3.0-msw/wx/lib/pdfwin.py | jickieduan/python27 | c752b552396bbed68d8555080d475718cea2edd0 | [
"bzip2-1.0.6"
] | 1 | 2018-07-28T20:07:04.000Z | 2018-07-30T18:28:34.000Z | Lib/site-packages/wx-3.0-msw/wx/lib/pdfwin.py | jickieduan/python27 | c752b552396bbed68d8555080d475718cea2edd0 | [
"bzip2-1.0.6"
] | 2 | 2019-12-02T01:39:10.000Z | 2021-02-13T22:41:00.000Z | #----------------------------------------------------------------------
# Name: wx.lib.pdfwin
# Purpose: A class that allows the use of the Acrobat PDF reader
# ActiveX control
#
# Author: Robin Dunn
#
# Created: 22-March-2004
# RCS-ID: $Id$
# Copyright: (c) 2008 by Total Control Software
# Licence: wxWindows license
#----------------------------------------------------------------------
import wx
_min_adobe_version = None
def get_min_adobe_version():
return _min_adobe_version
def get_acroversion():
" Included for backward compatibility"
return _min_adobe_version
#----------------------------------------------------------------------
if wx.PlatformInfo[1] == 'wxMSW':
import wx.lib.activex
import comtypes.client as cc
import comtypes
import ctypes
try: # Adobe Reader >= 7.0
cc.GetModule( ('{05BFD3F1-6319-4F30-B752-C7A22889BCC4}', 1, 0) )
progID = 'AcroPDF.PDF.1'
_min_adobe_version = 7.0
except:
try: # Adobe Reader 5 or 6
cc.GetModule( ('{CA8A9783-280D-11CF-A24D-444553540000}', 1, 0) )
progID = 'PDF.PdfCtrl.5'
_min_adobe_version = 5.0
except:
pass # Adobe Reader not installed (progID is not defined)
# Use get_min_adobe_version() before instantiating PDFWindow
#------------------------------------------------------------------------------
class PDFWindow(wx.lib.activex.ActiveXCtrl):
def __init__(self, parent, id=-1, pos=wx.DefaultPosition,
size=wx.DefaultSize, style=0, name='PDFWindow'):
wx.lib.activex.ActiveXCtrl.__init__(self, parent, progID,
id, pos, size, style, name)
self.Bind(wx.EVT_WINDOW_DESTROY, self.OnDestroyWindow)
def OnDestroyWindow(self, event):
wx.CallAfter(self.FreeDlls)
def FreeDlls(self):
"""
Unloads any DLLs that are no longer in use when all COM object instances are
released. This prevents the error 'The instruction at "0x0700609c" referenced
memory at "0x00000014". The memory could not be read' when application closes
"""
ctypes.windll.ole32.CoFreeUnusedLibraries()
def LoadFile(self, fileName):
"""
Opens and displays the specified document within the browser.
"""
return self.ctrl.LoadFile(fileName)
def GetVersions(self):
"""
Deprecated: No longer available - do not use.
"""
return self.ctrl.GetVersions()
def Print(self):
"""
Prints the document according to the specified options in a user dialog box.
"""
return self.ctrl.Print()
def goBackwardStack(self):
"""
Goes to the previous view on the view stack, if it exists.
"""
return self.ctrl.goBackwardStack()
def goForwardStack(self):
"""
Goes to the next view on the view stack, if it exists.
"""
return self.ctrl.goForwardStack()
def gotoFirstPage(self):
"""
Goes to the first page in the document.
"""
return self.ctrl.gotoFirstPage()
def gotoLastPage(self):
"""
Goes to the last page in the document.
"""
return self.ctrl.gotoLastPage()
def gotoNextPage(self):
"""
Goes to the next page in the document, if it exists
"""
return self.ctrl.gotoNextPage()
def gotoPreviousPage(self):
"""
Goes to the previous page in the document, if it exists.
"""
return self.ctrl.gotoPreviousPage()
def printAll(self):
"""
Prints the entire document without displaying a user
dialog box. The current printer, page settings, and job
settings are used. This method returns immediately, even
if the printing has not completed.
"""
return self.ctrl.printAll()
def printAllFit(self, shrinkToFit):
"""
Prints the entire document without a user dialog box, and
(if shrinkToFit) shrinks pages as needed to fit the
imageable area of a page in the printer.
"""
return self.ctrl.printAllFit(shrinkToFit)
def printPages(self, from_, to):
"""
Prints the specified pages without displaying a user dialog box.
"""
return self.ctrl.printPages(from_, to)
def printPagesFit(self, from_, to, shrinkToFit):
"""
Prints the specified pages without displaying a user
dialog box, and (if shrinkToFit) shrinks pages as needed
to fit the imageable area of a page in the printer.
"""
return self.ctrl.printPagesFit( from_, to, shrinkToFit)
def printWithDialog(self):
"""
Prints the document according to the specified options in
a user dialog box. These options may include embedded
printing and specifying which printer is to be used.
NB. The page range in the dialog defaults to
'From Page 1 to 1' - Use Print() above instead. (dfh)
"""
return self.ctrl.printWithDialog()
def setCurrentHighlight(self, a, b, c, d):
return self.ctrl.setCurrentHighlight(a, b, c, d)
def setCurrentPage(self, npage):
"""
Goes to the specified page in the document. Maintains the
current location within the page and zoom level. npage is
the page number of the destination page. The first page
in a document is page 0.
## Oh no it isn't! The first page is 1 (dfh)
"""
return self.ctrl.setCurrentPage(npage)
def setLayoutMode(self, layoutMode):
"""
LayoutMode possible values:
================= ====================================
'DontCare' use the current user preference
'SinglePage' use single page mode (as in pre-Acrobat 3.0 viewers)
'OneColumn' use one-column continuous mode
'TwoColumnLeft' use two-column continuous mode, first page on the left
'TwoColumnRight' use two-column continuous mode, first page on the right
================= ====================================
"""
return self.ctrl.setLayoutMode(layoutMode)
def setNamedDest(self, namedDest):
"""
Changes the page view to the named destination in the specified string.
"""
return self.ctrl.setNamedDest(namedDest)
def setPageMode(self, pageMode):
"""
Sets the page mode to display the document only, or to
additionally display bookmarks or thumbnails. pageMode =
'none' or 'bookmarks' or 'thumbs'.
## NB.'thumbs' is case-sensitive, the other are not (dfh)
"""
return self.ctrl.setPageMode(pageMode)
def setShowScrollbars(self, On):
"""
Determines whether scrollbars will appear in the document
view.
## NB. If scrollbars are off, the navigation tools disappear as well (dfh)
"""
return self.ctrl.setShowScrollbars(On)
def setShowToolbar(self, On):
"""
Determines whether a toolbar will appear in the application.
"""
return self.ctrl.setShowToolbar(On)
def setView(self, viewMode):
"""
Determines how the page will fit in the current view.
viewMode possible values:
======== ==============================================
'Fit' fits whole page within the window both vertically and horizontally.
'FitH' fits the width of the page within the window.
'FitV' fits the height of the page within the window.
'FitB' fits bounding box within the window both vertically and horizontally.
'FitBH' fits the width of the bounding box within the window.
'FitBV' fits the height of the bounding box within the window.
======== ==============================================
"""
return self.ctrl.setView(viewMode)
def setViewRect(self, left, top, width, height):
"""
Sets the view rectangle according to the specified coordinates.
:param left: The upper left horizontal coordinate.
:param top: The vertical coordinate in the upper left corner.
:param width: The horizontal width of the rectangle.
:param height: The vertical height of the rectangle.
"""
return self.ctrl.setViewRect(left, top, width, height)
def setViewScroll(self, viewMode, offset):
"""
Sets the view of a page according to the specified string.
Depending on the view mode, the page is either scrolled to
the right or scrolled down by the amount specified in
offset. Possible values of viewMode are as in setView
above. offset is the horizontal or vertical coordinate
positioned either at the left or top edge.
"""
return self.ctrl.setViewScroll(viewMode, offset)
def setZoom(self, percent):
"""
Sets the magnification according to the specified value
expressed as a percentage (float)
"""
return self.ctrl.setZoom(percent)
def setZoomScroll(self, percent, left, top):
"""
Sets the magnification according to the specified value,
and scrolls the page view both horizontally and vertically
according to the specified amounts.
:param left: the horizontal coordinate positioned at the left edge.
:param top: the vertical coordinate positioned at the top edge.
"""
return self.ctrl.setZoomScroll(percent, left, top)
#------------------------------------------------------------------------------
if __name__ == '__main__':
app = wx.App(False)
frm = wx.Frame(None, title="AX Test Window")
pdf = PDFWindow(frm)
frm.Show()
import wx.lib.inspection
wx.lib.inspection.InspectionTool().Show()
app.MainLoop()
| 37.683502 | 91 | 0.527073 |
73faf97fabf739612c3a44d238b084b7bc85bbd0 | 508 | py | Python | Utils/Helpers.py | srinibasmisra97/OAuth-Authorization-Server | a289de5432f271e804d12419305722fd113fa8f4 | [
"MIT"
] | null | null | null | Utils/Helpers.py | srinibasmisra97/OAuth-Authorization-Server | a289de5432f271e804d12419305722fd113fa8f4 | [
"MIT"
] | null | null | null | Utils/Helpers.py | srinibasmisra97/OAuth-Authorization-Server | a289de5432f271e804d12419305722fd113fa8f4 | [
"MIT"
] | null | null | null | import memcache
def memcache_connection():
"""
This returns a memcache connection client.
:return: Memcache client.
"""
from main import MEMCACHE_HOST, MEMCACHE_PORT
client = memcache.Client(servers=[MEMCACHE_HOST + ":" + str(MEMCACHE_PORT)])
return client
def list_to_string(list):
"""
Converts list to string.
:param list: List to convert.
:return: String
"""
string = ""
for a in list:
string = string + a + " "
return string.strip() | 22.086957 | 80 | 0.63189 |
73fb1156d6bb31eef82b27fb14b455701a81914e | 4,623 | py | Python | server/app/scrapers/ooms.py | damienallen/makelaardij-notify | ea8e37e1b0f867487b90590c5273e7fb25d868cf | [
"MIT"
] | null | null | null | server/app/scrapers/ooms.py | damienallen/makelaardij-notify | ea8e37e1b0f867487b90590c5273e7fb25d868cf | [
"MIT"
] | 15 | 2021-02-13T23:46:28.000Z | 2021-02-25T15:36:08.000Z | server/app/scrapers/ooms.py | damienallen/makelaardij-notify | ea8e37e1b0f867487b90590c5273e7fb25d868cf | [
"MIT"
] | null | null | null | import asyncio
import json
from datetime import datetime
from typing import List, Tuple, Union
import httpx
from app.broadcast import broadcast_apartment
from app.common import MissingListing, SkipListing
from app.models import Apartment
from app.scrapers.base import BaseScraper
from odmantic import AIOEngine
from pydantic.error_wrappers import ValidationError as PydanticError
engine = AIOEngine(database="aanbod")
class OomsScraper(BaseScraper):
MAKELAARDIJ: str = "ooms"
BASE_URL: str = "https://ooms.com/"
QUERY: str = (
"?buyOrRent=buy&textSearch=rotterdam&orderBy=created_at&orderDirection=desc"
)
async def start(self, update_existing: bool = False, debug: bool = False):
apartment_items = await self.get_apartments()
self.print_header(f"| Scraped {len(apartment_items)} listings")
if debug and apartment_items:
apartment_items = [apartment_items[0]]
for item in apartment_items:
url = item[0]
listing = await engine.find_one(Apartment, Apartment.url == f"{url}")
# Skip existing if not outdated
if listing and not update_existing and not debug:
continue
# Otherwise scrape
try:
listing_data = await self.scrape_item(item[1])
apartment = Apartment.parse_obj(listing_data)
except SkipListing:
continue
except (MissingListing, PydanticError) as e:
print(f"\n{url}")
print(f"{e}\n")
continue
# Create or update DB entry
if debug:
self.print_header(f"+ {apartment.address}")
print(listing_data)
elif listing is None:
self.print_header(f"+ {apartment.address}")
await engine.save(apartment)
await broadcast_apartment(apartment)
else:
listing.asking_price = apartment.asking_price
listing.photos = apartment.photos
listing.available = apartment.available
listing.unit = apartment.unit
listing.building = apartment.building
listing.entry_updated = datetime.utcnow()
await engine.save(listing)
self.sleep_interval()
# Specific functions
async def get_apartments(self) -> List[Tuple[str, str]]:
"""
Fetch list of apartment urls from inventory
"""
list_url = f"{self.BASE_URL}api/properties/available.json"
async with httpx.AsyncClient() as client:
result = await client.get(list_url)
if not result.status_code == 200:
print(f"Error: {result}")
return []
content = json.loads(result.content)
objs = content.get("objects", [])
items: List[Tuple[str, str]] = []
for i in objs:
item_url = i["url"]
if "-rotterdam-" in item_url and i["buy_or_rent"] == "buy":
items.append((item_url, json.dumps(i)))
return items
async def scrape_item(self, item_json: str):
"""
Extract feature metadata from JSON
"""
item_data = json.loads(item_json)
meta_data = {
"makelaardij": self.MAKELAARDIJ,
"building": {},
"unit": {"energy": {}, "tags": []},
"photos": [],
}
meta_data["url"] = item_data["url"]
meta_data["address"] = item_data["short_title"]
meta_data["asking_price"] = item_data["buy_price"]
meta_data["available"] = item_data["is_available"]
meta_data["unit"]["area"] = item_data["usable_area_living_function"]
meta_data["building"]["year_constructed"] = item_data.get("build_year")
if created_at := item_data.get("created_at"):
meta_data["added"] = datetime.fromisoformat(created_at.split("+")[0])
if num_rooms := item_data.get("amount_of_rooms"):
meta_data["unit"]["num_rooms"] = int(num_rooms)
garden_types = item_data.get("garden_types")
if garden_types and (
"achtertuin" in garden_types or "voortuin" in garden_types
):
meta_data["unit"]["tags"].append("garden")
# Bounce broken listings
if not meta_data["unit"].get("area"):
raise SkipListing("Unable to find area")
return meta_data
if __name__ == "__main__":
scraper = OomsScraper()
loop = asyncio.get_event_loop()
loop.run_until_complete(scraper.start())
| 32.328671 | 84 | 0.595717 |
73fb1e0e8ac45577473966dad62f8bdaebcf5b1f | 35,129 | py | Python | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_05_01/operations/_disk_encryption_sets_operations.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | 1 | 2021-09-07T18:39:05.000Z | 2021-09-07T18:39:05.000Z | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_05_01/operations/_disk_encryption_sets_operations.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | null | null | null | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2020_05_01/operations/_disk_encryption_sets_operations.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | 1 | 2022-03-04T06:21:56.000Z | 2022-03-04T06:21:56.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_create_or_update_request_initial(
subscription_id: str,
resource_group_name: str,
disk_encryption_set_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"diskEncryptionSetName": _SERIALIZER.url("disk_encryption_set_name", disk_encryption_set_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_update_request_initial(
subscription_id: str,
resource_group_name: str,
disk_encryption_set_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"diskEncryptionSetName": _SERIALIZER.url("disk_encryption_set_name", disk_encryption_set_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PATCH",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_get_request(
subscription_id: str,
resource_group_name: str,
disk_encryption_set_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"diskEncryptionSetName": _SERIALIZER.url("disk_encryption_set_name", disk_encryption_set_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_delete_request_initial(
subscription_id: str,
resource_group_name: str,
disk_encryption_set_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"diskEncryptionSetName": _SERIALIZER.url("disk_encryption_set_name", disk_encryption_set_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_by_resource_group_request(
subscription_id: str,
resource_group_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_request(
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/diskEncryptionSets')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class DiskEncryptionSetsOperations(object):
"""DiskEncryptionSetsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2020_05_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _create_or_update_initial(
self,
resource_group_name: str,
disk_encryption_set_name: str,
disk_encryption_set: "_models.DiskEncryptionSet",
**kwargs: Any
) -> "_models.DiskEncryptionSet":
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiskEncryptionSet"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(disk_encryption_set, 'DiskEncryptionSet')
request = build_create_or_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
disk_encryption_set_name=disk_encryption_set_name,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DiskEncryptionSet', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('DiskEncryptionSet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}'} # type: ignore
@distributed_trace
def begin_create_or_update(
self,
resource_group_name: str,
disk_encryption_set_name: str,
disk_encryption_set: "_models.DiskEncryptionSet",
**kwargs: Any
) -> LROPoller["_models.DiskEncryptionSet"]:
"""Creates or updates a disk encryption set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_encryption_set_name: The name of the disk encryption set that is being created. The
name can't be changed after the disk encryption set is created. Supported characters for the
name are a-z, A-Z, 0-9 and _. The maximum name length is 80 characters.
:type disk_encryption_set_name: str
:param disk_encryption_set: disk encryption set object supplied in the body of the Put disk
encryption set operation.
:type disk_encryption_set: ~azure.mgmt.compute.v2020_05_01.models.DiskEncryptionSet
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either DiskEncryptionSet or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2020_05_01.models.DiskEncryptionSet]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiskEncryptionSet"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
disk_encryption_set_name=disk_encryption_set_name,
disk_encryption_set=disk_encryption_set,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('DiskEncryptionSet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}'} # type: ignore
def _update_initial(
self,
resource_group_name: str,
disk_encryption_set_name: str,
disk_encryption_set: "_models.DiskEncryptionSetUpdate",
**kwargs: Any
) -> "_models.DiskEncryptionSet":
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiskEncryptionSet"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(disk_encryption_set, 'DiskEncryptionSetUpdate')
request = build_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
disk_encryption_set_name=disk_encryption_set_name,
content_type=content_type,
json=_json,
template_url=self._update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DiskEncryptionSet', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('DiskEncryptionSet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}'} # type: ignore
@distributed_trace
def begin_update(
self,
resource_group_name: str,
disk_encryption_set_name: str,
disk_encryption_set: "_models.DiskEncryptionSetUpdate",
**kwargs: Any
) -> LROPoller["_models.DiskEncryptionSet"]:
"""Updates (patches) a disk encryption set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_encryption_set_name: The name of the disk encryption set that is being created. The
name can't be changed after the disk encryption set is created. Supported characters for the
name are a-z, A-Z, 0-9 and _. The maximum name length is 80 characters.
:type disk_encryption_set_name: str
:param disk_encryption_set: disk encryption set object supplied in the body of the Patch disk
encryption set operation.
:type disk_encryption_set: ~azure.mgmt.compute.v2020_05_01.models.DiskEncryptionSetUpdate
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either DiskEncryptionSet or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2020_05_01.models.DiskEncryptionSet]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiskEncryptionSet"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_initial(
resource_group_name=resource_group_name,
disk_encryption_set_name=disk_encryption_set_name,
disk_encryption_set=disk_encryption_set,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('DiskEncryptionSet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}'} # type: ignore
@distributed_trace
def get(
self,
resource_group_name: str,
disk_encryption_set_name: str,
**kwargs: Any
) -> "_models.DiskEncryptionSet":
"""Gets information about a disk encryption set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_encryption_set_name: The name of the disk encryption set that is being created. The
name can't be changed after the disk encryption set is created. Supported characters for the
name are a-z, A-Z, 0-9 and _. The maximum name length is 80 characters.
:type disk_encryption_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DiskEncryptionSet, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2020_05_01.models.DiskEncryptionSet
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiskEncryptionSet"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
disk_encryption_set_name=disk_encryption_set_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DiskEncryptionSet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}'} # type: ignore
def _delete_initial(
self,
resource_group_name: str,
disk_encryption_set_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
disk_encryption_set_name=disk_encryption_set_name,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}'} # type: ignore
@distributed_trace
def begin_delete(
self,
resource_group_name: str,
disk_encryption_set_name: str,
**kwargs: Any
) -> LROPoller[None]:
"""Deletes a disk encryption set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_encryption_set_name: The name of the disk encryption set that is being created. The
name can't be changed after the disk encryption set is created. Supported characters for the
name are a-z, A-Z, 0-9 and _. The maximum name length is 80 characters.
:type disk_encryption_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
disk_encryption_set_name=disk_encryption_set_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}'} # type: ignore
@distributed_trace
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> Iterable["_models.DiskEncryptionSetList"]:
"""Lists all the disk encryption sets under a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DiskEncryptionSetList or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2020_05_01.models.DiskEncryptionSetList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiskEncryptionSetList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_url=self.list_by_resource_group.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_resource_group_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("DiskEncryptionSetList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets'} # type: ignore
@distributed_trace
def list(
self,
**kwargs: Any
) -> Iterable["_models.DiskEncryptionSetList"]:
"""Lists all the disk encryption sets under a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DiskEncryptionSetList or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2020_05_01.models.DiskEncryptionSetList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiskEncryptionSetList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("DiskEncryptionSetList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/diskEncryptionSets'} # type: ignore
| 43.584367 | 205 | 0.675539 |
73fb6ddaf6c57935ea47212fd3335479769084c0 | 39 | py | Python | test/lmp/script/__init__.py | ProFatXuanAll/char-RNN | 531f101b3d1ba20bafd28ca060aafe6f583d1efb | [
"Beerware"
] | null | null | null | test/lmp/script/__init__.py | ProFatXuanAll/char-RNN | 531f101b3d1ba20bafd28ca060aafe6f583d1efb | [
"Beerware"
] | null | null | null | test/lmp/script/__init__.py | ProFatXuanAll/char-RNN | 531f101b3d1ba20bafd28ca060aafe6f583d1efb | [
"Beerware"
] | null | null | null | """Test :py:mod:`lmp.script` entry."""
| 19.5 | 38 | 0.589744 |
73fb6e8998cd40aa5719936cf5a6d0e7a46476e8 | 2,882 | py | Python | heap/skyline.py | AnshulPatni/Algorithms | c8bcfb86d50f68041921e5140f01821ac12d9521 | [
"MIT"
] | 2 | 2018-04-30T19:31:04.000Z | 2018-05-05T14:29:45.000Z | heap/skyline.py | AnshulPatni/Algorithms | c8bcfb86d50f68041921e5140f01821ac12d9521 | [
"MIT"
] | null | null | null | heap/skyline.py | AnshulPatni/Algorithms | c8bcfb86d50f68041921e5140f01821ac12d9521 | [
"MIT"
] | 1 | 2020-03-22T08:47:24.000Z | 2020-03-22T08:47:24.000Z | # -*- coding: utf-8 -*-
"""
A city's skyline is the outer contour of the silhouette formed by all the buildings
in that city when viewed from a distance.
Now suppose you are given the locations and height of all the buildings
as shown on a cityscape photo (Figure A),
write a program to output the skyline formed by these buildings collectively (Figure B).
The geometric information of each building is represented by a triplet of integers [Li, Ri, Hi],
where Li and Ri are the x coordinates of the left and right edge of the ith building, respectively,
and Hi is its height. It is guaranteed that 0 ≤ Li, Ri ≤ INT_MAX, 0 < Hi ≤ INT_MAX, and Ri - Li > 0.
You may assume all buildings are perfect rectangles grounded on an absolutely flat surface at height 0.
For instance, the dimensions of all buildings in Figure A are recorded as:
[ [2 9 10], [3 7 15], [5 12 12], [15 20 10], [19 24 8] ] .
The output is a list of "key points" (red dots in Figure B) in the format of
[ [x1,y1], [x2, y2], [x3, y3], ... ]
that uniquely defines a skyline.
A key point is the left endpoint of a horizontal line segment. Note that the last key point,
where the rightmost building ends,
is merely used to mark the termination of the skyline, and always has zero height.
Also, the ground in between any two adjacent buildings should be considered part of the skyline contour.
For instance, the skyline in Figure B should be represented as:[ [2 10], [3 15], [7 12], [12 0], [15 10], [20 8], [24, 0] ].
Notes:
The number of buildings in any input list is guaranteed to be in the range [0, 10000].
The input list is already sorted in ascending order by the left x position Li.
The output list must be sorted by the x position.
There must be no consecutive horizontal lines of equal height in the output skyline. For instance,
[...[2 3], [4 5], [7 5], [11 5], [12 7]...] is not acceptable; the three lines of height 5 should be merged
into one in the final output as such: [...[2 3], [4 5], [12 7], ...]
"""
import heapq
def get_skyline(LRH):
"""
Wortst Time Complexity: O(NlogN)
:type buildings: List[List[int]]
:rtype: List[List[int]]
"""
skyline, live = [], []
i, n = 0, len(LRH)
while i < n or live:
if not live or i < n and LRH[i][0] <= -live[0][1]:
x = LRH[i][0]
while i < n and LRH[i][0] == x:
heapq.heappush(live, (-LRH[i][2], -LRH[i][1]))
i += 1
else:
x = -live[0][1]
while live and -live[0][1] <= x:
heapq.heappop(live)
height = len(live) and -live[0][0]
if not skyline or height != skyline[-1][1]:
skyline += [x, height],
return skyline
buildings = [ [2, 9, 10], [3, 7, 15], [5, 12, 12], [15, 20, 10], [19, 24, 8] ]
# [ [2 10], [3 15], [7 12], [12 0], [15 10], [20 8], [24, 0] ]
print(get_skyline(buildings))
| 43.666667 | 124 | 0.640527 |
73fb880f050babc58e8794068a3541a90cd62e1e | 5,899 | py | Python | src/speech/plt_spec.py | dem123456789/Speech-Emotion-Recognition-with-Dual-Sequence-LSTM-Architecture | a072cb940201bbcdb2d0f4d0dfa1dde478fa4464 | [
"MIT"
] | 6 | 2020-08-03T03:13:25.000Z | 2022-02-11T08:32:10.000Z | src/speech/plt_spec.py | dem123456789/Speech-Emotion-Recognition-with-Dual-Sequence-LSTM-Architecture | a072cb940201bbcdb2d0f4d0dfa1dde478fa4464 | [
"MIT"
] | 1 | 2020-09-08T16:10:38.000Z | 2020-09-08T16:10:38.000Z | src/speech/plt_spec.py | dem123456789/Speech-Emotion-Recognition-with-Dual-Sequence-LSTM-Architecture | a072cb940201bbcdb2d0f4d0dfa1dde478fa4464 | [
"MIT"
] | 2 | 2020-08-03T21:37:21.000Z | 2021-03-26T02:19:17.000Z | def specgram(self, x, NFFT=None, Fs=None, Fc=None, detrend=None,
window=None, noverlap=None,
cmap=None, xextent=None, pad_to=None, sides=None,
scale_by_freq=None, mode=None, scale=None,
vmin=None, vmax=None, **kwargs):
"""
Plot a spectrogram.
Compute and plot a spectrogram of data in *x*. Data are split into
*NFFT* length segments and the spectrum of each section is
computed. The windowing function *window* is applied to each
segment, and the amount of overlap of each segment is
specified with *noverlap*. The spectrogram is plotted as a colormap
(using imshow).
Parameters
----------
x : 1-D array or sequence
Array or sequence containing the data.
%(Spectral)s
%(PSD)s
mode : {'default', 'psd', 'magnitude', 'angle', 'phase'}
What sort of spectrum to use. Default is 'psd', which takes the
power spectral density. 'magnitude' returns the magnitude
spectrum. 'angle' returns the phase spectrum without unwrapping.
'phase' returns the phase spectrum with unwrapping.
noverlap : int
The number of points of overlap between blocks. The
default value is 128.
scale : {'default', 'linear', 'dB'}
The scaling of the values in the *spec*. 'linear' is no scaling.
'dB' returns the values in dB scale. When *mode* is 'psd',
this is dB power (10 * log10). Otherwise this is dB amplitude
(20 * log10). 'default' is 'dB' if *mode* is 'psd' or
'magnitude' and 'linear' otherwise. This must be 'linear'
if *mode* is 'angle' or 'phase'.
Fc : int
The center frequency of *x* (defaults to 0), which offsets
the x extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
cmap
A :class:`matplotlib.colors.Colormap` instance; if *None*, use
default determined by rc
xextent : *None* or (xmin, xmax)
The image extent along the x-axis. The default sets *xmin* to the
left border of the first bin (*spectrum* column) and *xmax* to the
right border of the last bin. Note that for *noverlap>0* the width
of the bins is smaller than those of the segments.
**kwargs
Additional kwargs are passed on to imshow which makes the
specgram image.
Returns
-------
spectrum : 2-D array
Columns are the periodograms of successive segments.
freqs : 1-D array
The frequencies corresponding to the rows in *spectrum*.
t : 1-D array
The times corresponding to midpoints of segments (i.e., the columns
in *spectrum*).
im : instance of class :class:`~matplotlib.image.AxesImage`
The image created by imshow containing the spectrogram
See Also
--------
:func:`psd`
:func:`psd` differs in the default overlap; in returning the mean
of the segment periodograms; in not returning times; and in
generating a line plot instead of colormap.
:func:`magnitude_spectrum`
A single spectrum, similar to having a single segment when *mode*
is 'magnitude'. Plots a line instead of a colormap.
:func:`angle_spectrum`
A single spectrum, similar to having a single segment when *mode*
is 'angle'. Plots a line instead of a colormap.
:func:`phase_spectrum`
A single spectrum, similar to having a single segment when *mode*
is 'phase'. Plots a line instead of a colormap.
Notes
-----
The parameters *detrend* and *scale_by_freq* do only apply when *mode*
is set to 'psd'.
"""
if NFFT is None:
NFFT = 256 # same default as in mlab.specgram()
if Fc is None:
Fc = 0 # same default as in mlab._spectral_helper()
if noverlap is None:
noverlap = 128 # same default as in mlab.specgram()
if mode == 'complex':
raise ValueError('Cannot plot a complex specgram')
if scale is None or scale == 'default':
if mode in ['angle', 'phase']:
scale = 'linear'
else:
scale = 'dB'
elif mode in ['angle', 'phase'] and scale == 'dB':
raise ValueError('Cannot use dB scale with angle or phase mode')
spec, freqs, t = mlab.specgram(x=x, NFFT=NFFT, Fs=Fs,
detrend=detrend, window=window,
noverlap=noverlap, pad_to=pad_to,
sides=sides,
scale_by_freq=scale_by_freq,
mode=mode)
if scale == 'linear':
Z = spec
elif scale == 'dB':
if mode is None or mode == 'default' or mode == 'psd':
Z = 10. * np.log10(spec)
else:
Z = 20. * np.log10(spec)
else:
raise ValueError('Unknown scale %s', scale)
Z = np.flipud(Z)
if xextent is None:
# padding is needed for first and last segment:
pad_xextent = (NFFT-noverlap) / Fs / 2
xextent = np.min(t) - pad_xextent, np.max(t) + pad_xextent
xmin, xmax = xextent
freqs += Fc
extent = xmin, xmax, freqs[0], freqs[-1]
im = self.imshow(Z, cmap, extent=extent, vmin=vmin, vmax=vmax,
**kwargs)
self.axis('auto')
return spec, freqs, t, im | 45.376923 | 79 | 0.553314 |
73fba5344bb3cf3b9213e8be0f9f80e2f8f10859 | 1,331 | bzl | Python | third_party/com_google_guava.bzl | wix-playground/rules_maven_third_party | ff0b486df194779d7d8e6c9102cd12138e3305c3 | [
"Apache-2.0"
] | null | null | null | third_party/com_google_guava.bzl | wix-playground/rules_maven_third_party | ff0b486df194779d7d8e6c9102cd12138e3305c3 | [
"Apache-2.0"
] | null | null | null | third_party/com_google_guava.bzl | wix-playground/rules_maven_third_party | ff0b486df194779d7d8e6c9102cd12138e3305c3 | [
"Apache-2.0"
] | null | null | null | load(":import_external.bzl", import_external = "import_external")
def dependencies():
import_external(
name = "com_google_guava_failureaccess",
artifact = "com.google.guava:failureaccess:1.0.1",
artifact_sha256 = "a171ee4c734dd2da837e4b16be9df4661afab72a41adaf31eb84dfdaf936ca26",
srcjar_sha256 = "092346eebbb1657b51aa7485a246bf602bb464cc0b0e2e1c7e7201fadce1e98f",
)
import_external(
name = "com_google_guava_guava",
artifact = "com.google.guava:guava:31.0.1-jre",
artifact_sha256 = "d5be94d65e87bd219fb3193ad1517baa55a3b88fc91d21cf735826ab5af087b9",
srcjar_sha256 = "fc0fb66f315f10b8713fc43354936d3649a8ad63f789d42fd7c3e55ecf72e092",
deps = [
"@com_google_code_findbugs_jsr305",
"@com_google_errorprone_error_prone_annotations",
"@com_google_guava_failureaccess",
"@com_google_guava_listenablefuture",
"@com_google_j2objc_j2objc_annotations",
"@org_checkerframework_checker_qual",
],
)
import_external(
name = "com_google_guava_listenablefuture",
artifact = "com.google.guava:listenablefuture:9999.0-empty-to-avoid-conflict-with-guava",
artifact_sha256 = "b372a037d4230aa57fbeffdef30fd6123f9c0c2db85d0aced00c91b974f33f99",
)
| 40.333333 | 97 | 0.718257 |
73fbac16b18f9495efe268754072fd2fdd371032 | 234 | py | Python | oscar/lib/python2.7/site-packages/phonenumbers/data/alt_format_676.py | sainjusajan/django-oscar | 466e8edc807be689b0a28c9e525c8323cc48b8e1 | [
"BSD-3-Clause"
] | null | null | null | oscar/lib/python2.7/site-packages/phonenumbers/data/alt_format_676.py | sainjusajan/django-oscar | 466e8edc807be689b0a28c9e525c8323cc48b8e1 | [
"BSD-3-Clause"
] | null | null | null | oscar/lib/python2.7/site-packages/phonenumbers/data/alt_format_676.py | sainjusajan/django-oscar | 466e8edc807be689b0a28c9e525c8323cc48b8e1 | [
"BSD-3-Clause"
] | null | null | null | """Auto-generated file, do not edit by hand. 676 metadata"""
from ..phonemetadata import NumberFormat
PHONE_ALT_FORMAT_676 = [NumberFormat(pattern='(\\d{2})(\\d{5})', format='\\1 \\2', leading_digits_pattern=['7[5-9]|8[47-9]'])]
| 46.8 | 127 | 0.675214 |
73fbac3d5069fd09e76979132b6ef9d551a16573 | 819 | py | Python | cartpole/model/__init__.py | hypnosapos/cartpole-rl-remote | 7111c9752cb663c1ef7a3815bd7c9f3c2d199ab9 | [
"MIT"
] | 24 | 2018-04-04T14:41:29.000Z | 2020-12-10T02:01:11.000Z | cartpole/model/__init__.py | davsuacar/cartpole-rl-remote | 7111c9752cb663c1ef7a3815bd7c9f3c2d199ab9 | [
"MIT"
] | 8 | 2018-06-19T15:24:22.000Z | 2022-02-09T23:31:26.000Z | cartpole/model/__init__.py | davsuacar/cartpole-rl-remote | 7111c9752cb663c1ef7a3815bd7c9f3c2d199ab9 | [
"MIT"
] | 2 | 2018-06-19T15:00:48.000Z | 2019-04-10T07:58:39.000Z | # -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import TensorBoard
def get_model(model_type='keras', model_spec={}):
assert model_type == 'keras', "Only keras models are supported yet."
return get_keras_model(model_spec=model_spec)
def get_keras_model(model_spec={}):
# TODO: model tuning, pass layers and other config to get custom models
model = Sequential()
model.add(Dense(12, activation='relu', input_dim=4))
model.add(Dense(12, activation='relu'))
model.add(Dense(2))
model.compile(Adam(lr=0.001), 'mse')
return model
def get_tensorboard_callback(**kwargs):
return TensorBoard(**kwargs)
| 30.333333 | 75 | 0.733822 |
73fbafdf2b8120d2d521d122740f59206375b46b | 473 | py | Python | server/mesite/migrations/0003_auto_20210717_1926.py | mezidia/song-helper | 059d02f4e5e4758bef04322ee53d1f1e815ba08b | [
"MIT"
] | 6 | 2021-02-10T10:50:18.000Z | 2021-08-21T10:44:37.000Z | server/mesite/migrations/0003_auto_20210717_1926.py | mezgoodle/song-helper | 059d02f4e5e4758bef04322ee53d1f1e815ba08b | [
"MIT"
] | 116 | 2021-02-10T08:09:03.000Z | 2022-03-30T01:50:02.000Z | server/mesite/migrations/0003_auto_20210717_1926.py | mezgoodle/song-helper | 059d02f4e5e4758bef04322ee53d1f1e815ba08b | [
"MIT"
] | 1 | 2021-08-16T14:31:13.000Z | 2021-08-16T14:31:13.000Z | # Generated by Django 3.1.8 on 2021-07-17 16:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mesite', '0002_auto_20210312_1318'),
]
operations = [
migrations.AlterField(
model_name='mood',
name='mood',
field=models.CharField(choices=[('happy', 'Happy'), ('energetic', 'Energetic'), ('calm', 'Calm'), ('sad', 'Sad')], max_length=15),
),
]
| 24.894737 | 142 | 0.581395 |
73fbd245e64810cb5c88c52e89e8b464e913cffa | 24,532 | py | Python | code/translator/hot/syntax/hot_resource.py | superfluidity/RDCL3D | 3c5717941bd4046aa1be178e9004db1dc1c469a0 | [
"Apache-2.0"
] | 8 | 2017-03-13T16:34:28.000Z | 2021-11-16T11:35:56.000Z | code/translator/hot/syntax/hot_resource.py | superfluidity/RDCL3D | 3c5717941bd4046aa1be178e9004db1dc1c469a0 | [
"Apache-2.0"
] | null | null | null | code/translator/hot/syntax/hot_resource.py | superfluidity/RDCL3D | 3c5717941bd4046aa1be178e9004db1dc1c469a0 | [
"Apache-2.0"
] | 3 | 2017-03-28T09:26:40.000Z | 2020-12-08T14:16:12.000Z | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import OrderedDict
import logging
import os
import six
from toscaparser.elements.interfaces import InterfacesDef
from toscaparser.functions import GetInput
from toscaparser.nodetemplate import NodeTemplate
from toscaparser.utils.gettextutils import _
SECTIONS = (TYPE, PROPERTIES, MEDADATA, DEPENDS_ON, UPDATE_POLICY,
DELETION_POLICY) = \
('type', 'properties', 'metadata',
'depends_on', 'update_policy', 'deletion_policy')
policy_type = ['tosca.policies.Placement',
'tosca.policies.Scaling',
'tosca.policies.Scaling.Cluster']
log = logging.getLogger('heat-translator')
class HotResource(object):
'''Base class for TOSCA node type translation to Heat resource type.'''
def __init__(self, nodetemplate, name=None, type=None, properties=None,
metadata=None, depends_on=None,
update_policy=None, deletion_policy=None, csar_dir=None):
log.debug(_('Translating TOSCA node type to HOT resource type.'))
self.nodetemplate = nodetemplate
if name:
self.name = name
else:
self.name = nodetemplate.name
self.type = type
self.properties = properties or {}
self.csar_dir = csar_dir
# special case for HOT softwareconfig
cwd = os.getcwd()
if type == 'OS::Heat::SoftwareConfig':
config = self.properties.get('config')
if isinstance(config, dict):
if self.csar_dir:
os.chdir(self.csar_dir)
implementation_artifact = os.path.abspath(config.get(
'get_file'))
else:
implementation_artifact = config.get('get_file')
if implementation_artifact:
filename, file_extension = os.path.splitext(
implementation_artifact)
file_extension = file_extension.lower()
# artifact_types should be read to find the exact script
# type, unfortunately artifact_types doesn't seem to be
# supported by the parser
if file_extension == '.ansible' \
or file_extension == '.yaml' \
or file_extension == '.yml':
self.properties['group'] = 'ansible'
if file_extension == '.pp':
self.properties['group'] = 'puppet'
if self.properties.get('group') is None:
self.properties['group'] = 'script'
os.chdir(cwd)
self.metadata = metadata
# The difference between depends_on and depends_on_nodes is
# that depends_on defines dependency in the context of the
# HOT template and it is used during the template output.
# Depends_on_nodes defines the direct dependency between the
# tosca nodes and is not used during the output of the
# HOT template but for internal processing only. When a tosca
# node depends on another node it will be always added to
# depends_on_nodes but not always to depends_on. For example
# if the source of dependency is a server, the dependency will
# be added as properties.get_resource and not depends_on
if depends_on:
self.depends_on = depends_on
self.depends_on_nodes = depends_on
else:
self.depends_on = []
self.depends_on_nodes = []
self.update_policy = update_policy
self.deletion_policy = deletion_policy
self.group_dependencies = {}
# if hide_resource is set to true, then this resource will not be
# generated in the output yaml.
self.hide_resource = False
def handle_properties(self):
# the property can hold a value or the intrinsic function get_input
# for value, copy it
# for get_input, convert to get_param
for prop in self.nodetemplate.get_properties_objects():
pass
def handle_life_cycle(self):
hot_resources = []
deploy_lookup = {}
# TODO(anyone): sequence for life cycle needs to cover different
# scenarios and cannot be fixed or hard coded here
operations_deploy_sequence = ['create', 'configure', 'start']
operations = HotResource.get_all_operations(self.nodetemplate)
# create HotResource for each operation used for deployment:
# create, start, configure
# ignore the other operations
# observe the order: create, start, configure
# use the current HotResource for the first operation in this order
# hold the original name since it will be changed during
# the transformation
node_name = self.name
reserve_current = 'NONE'
for operation in operations_deploy_sequence:
if operation in operations.keys():
reserve_current = operation
break
# create the set of SoftwareDeployment and SoftwareConfig for
# the interface operations
hosting_server = None
if self.nodetemplate.requirements is not None:
hosting_server = self._get_hosting_server()
sw_deployment_resouce = HOTSoftwareDeploymentResources(hosting_server)
server_key = sw_deployment_resouce.server_key
servers = sw_deployment_resouce.servers
sw_deploy_res = sw_deployment_resouce.software_deployment
# hosting_server is None if requirements is None
hosting_on_server = hosting_server if hosting_server else None
base_type = HotResource.get_base_type_str(
self.nodetemplate.type_definition)
# if we are on a compute node the host is self
if hosting_on_server is None and base_type == 'tosca.nodes.Compute':
hosting_on_server = self.name
servers = {'get_resource': self.name}
cwd = os.getcwd()
for operation in operations.values():
if operation.name in operations_deploy_sequence:
config_name = node_name + '_' + operation.name + '_config'
deploy_name = node_name + '_' + operation.name + '_deploy'
if self.csar_dir:
os.chdir(self.csar_dir)
get_file = os.path.abspath(operation.implementation)
else:
get_file = operation.implementation
hot_resources.append(
HotResource(self.nodetemplate,
config_name,
'OS::Heat::SoftwareConfig',
{'config':
{'get_file': get_file}},
csar_dir=self.csar_dir))
if operation.name == reserve_current and \
base_type != 'tosca.nodes.Compute':
deploy_resource = self
self.name = deploy_name
self.type = sw_deploy_res
self.properties = {'config': {'get_resource': config_name},
server_key: servers}
deploy_lookup[operation] = self
else:
sd_config = {'config': {'get_resource': config_name},
server_key: servers}
deploy_resource = \
HotResource(self.nodetemplate,
deploy_name,
sw_deploy_res,
sd_config, csar_dir=self.csar_dir)
hot_resources.append(deploy_resource)
deploy_lookup[operation] = deploy_resource
lifecycle_inputs = self._get_lifecycle_inputs(operation)
if lifecycle_inputs:
deploy_resource.properties['input_values'] = \
lifecycle_inputs
os.chdir(cwd)
# Add dependencies for the set of HOT resources in the sequence defined
# in operations_deploy_sequence
# TODO(anyone): find some better way to encode this implicit sequence
group = {}
op_index_min = None
op_index_max = -1
for op, hot in deploy_lookup.items():
# position to determine potential preceding nodes
op_index = operations_deploy_sequence.index(op.name)
if op_index_min is None or op_index < op_index_min:
op_index_min = op_index
if op_index > op_index_max:
op_index_max = op_index
for preceding_op_name in \
reversed(operations_deploy_sequence[:op_index]):
preceding_hot = deploy_lookup.get(
operations.get(preceding_op_name))
if preceding_hot:
hot.depends_on.append(preceding_hot)
hot.depends_on_nodes.append(preceding_hot)
group[preceding_hot] = hot
break
if op_index_max >= 0:
last_deploy = deploy_lookup.get(operations.get(
operations_deploy_sequence[op_index_max]))
else:
last_deploy = None
# save this dependency chain in the set of HOT resources
self.group_dependencies.update(group)
for hot in hot_resources:
hot.group_dependencies.update(group)
roles_deploy_resource = self._handle_ansiblegalaxy_roles(
hot_resources, node_name, servers)
# add a dependency to this ansible roles deploy to
# the first "classic" deploy generated for this node
if roles_deploy_resource and op_index_min:
first_deploy = deploy_lookup.get(operations.get(
operations_deploy_sequence[op_index_min]))
first_deploy.depends_on.append(roles_deploy_resource)
first_deploy.depends_on_nodes.append(roles_deploy_resource)
return hot_resources, deploy_lookup, last_deploy
def _handle_ansiblegalaxy_roles(self, hot_resources, initial_node_name,
hosting_on_server):
artifacts = self.get_all_artifacts(self.nodetemplate)
install_roles_script = ''
sw_deployment_resouce = \
HOTSoftwareDeploymentResources(hosting_on_server)
server_key = sw_deployment_resouce.server_key
sw_deploy_res = sw_deployment_resouce.software_deployment
for artifact_name, artifact in artifacts.items():
artifact_type = artifact.get('type', '').lower()
if artifact_type == 'tosca.artifacts.ansiblegalaxy.role':
role = artifact.get('file', None)
if role:
install_roles_script += 'ansible-galaxy install ' + role \
+ '\n'
if install_roles_script:
# remove trailing \n
install_roles_script = install_roles_script[:-1]
# add shebang and | to use literal scalar type (for multiline)
install_roles_script = '|\n#!/bin/bash\n' + install_roles_script
config_name = initial_node_name + '_install_roles_config'
deploy_name = initial_node_name + '_install_roles_deploy'
hot_resources.append(
HotResource(self.nodetemplate, config_name,
'OS::Heat::SoftwareConfig',
{'config': install_roles_script},
csar_dir=self.csar_dir))
sd_config = {'config': {'get_resource': config_name},
server_key: hosting_on_server}
deploy_resource = \
HotResource(self.nodetemplate, deploy_name,
sw_deploy_res,
sd_config, csar_dir=self.csar_dir)
hot_resources.append(deploy_resource)
return deploy_resource
def handle_connectsto(self, tosca_source, tosca_target, hot_source,
hot_target, config_location, operation):
# The ConnectsTo relationship causes a configuration operation in
# the target.
# This hot resource is the software config portion in the HOT template
# This method adds the matching software deployment with the proper
# target server and dependency
if config_location == 'target':
hosting_server = hot_target._get_hosting_server()
hot_depends = hot_target
elif config_location == 'source':
hosting_server = self._get_hosting_server()
hot_depends = hot_source
sw_deployment_resouce = HOTSoftwareDeploymentResources(hosting_server)
server_key = sw_deployment_resouce.server_key
servers = sw_deployment_resouce.servers
sw_deploy_res = sw_deployment_resouce.software_deployment
deploy_name = tosca_source.name + '_' + tosca_target.name + \
'_connect_deploy'
sd_config = {'config': {'get_resource': self.name},
server_key: servers}
deploy_resource = \
HotResource(self.nodetemplate,
deploy_name,
sw_deploy_res,
sd_config,
depends_on=[hot_depends], csar_dir=self.csar_dir)
connect_inputs = self._get_connect_inputs(config_location, operation)
if connect_inputs:
deploy_resource.properties['input_values'] = connect_inputs
return deploy_resource
def handle_expansion(self):
pass
def handle_hosting(self):
# handle hosting server for the OS:HEAT::SoftwareDeployment
# from the TOSCA nodetemplate, traverse the relationship chain
# down to the server
sw_deploy_group = \
HOTSoftwareDeploymentResources.HOT_SW_DEPLOYMENT_GROUP_RESOURCE
sw_deploy = HOTSoftwareDeploymentResources.HOT_SW_DEPLOYMENT_RESOURCE
if self.properties.get('servers') and \
self.properties.get('server'):
del self.properties['server']
if self.type == sw_deploy_group or self.type == sw_deploy:
# skip if already have hosting
# If type is NodeTemplate, look up corresponding HotResrouce
host_server = self.properties.get('servers') \
or self.properties.get('server')
if host_server is None:
raise Exception(_("Internal Error: expecting host "
"in software deployment"))
elif isinstance(host_server.get('get_resource'), NodeTemplate):
self.properties['server']['get_resource'] = \
host_server['get_resource'].name
elif isinstance(host_server, dict) and \
not host_server.get('get_resource'):
self.properties['servers'] = \
host_server
def top_of_chain(self):
dependent = self.group_dependencies.get(self)
if dependent is None:
return self
else:
return dependent.top_of_chain()
# this function allows to provides substacks as external files
# those files will be dumped along the output file.
#
# return a dict of filename-content
def extract_substack_templates(self, base_filename, hot_template_version):
return {}
# this function asks the resource to embed substacks
# into the main template, if any.
# this is used when the final output is stdout
def embed_substack_templates(self, hot_template_version):
pass
def get_dict_output(self):
resource_sections = OrderedDict()
resource_sections[TYPE] = self.type
if self.properties:
resource_sections[PROPERTIES] = self.properties
if self.metadata:
resource_sections[MEDADATA] = self.metadata
if self.depends_on:
resource_sections[DEPENDS_ON] = []
for depend in self.depends_on:
resource_sections[DEPENDS_ON].append(depend.name)
if self.update_policy:
resource_sections[UPDATE_POLICY] = self.update_policy
if self.deletion_policy:
resource_sections[DELETION_POLICY] = self.deletion_policy
return {self.name: resource_sections}
def _get_lifecycle_inputs(self, operation):
# check if this lifecycle operation has input values specified
# extract and convert to HOT format
if isinstance(operation.value, six.string_types):
# the operation has a static string
return {}
else:
# the operation is a dict {'implemenation': xxx, 'input': yyy}
inputs = operation.value.get('inputs')
deploy_inputs = {}
if inputs:
for name, value in inputs.items():
deploy_inputs[name] = value
return deploy_inputs
def _get_connect_inputs(self, config_location, operation):
if config_location == 'target':
inputs = operation.get('pre_configure_target').get('inputs')
elif config_location == 'source':
inputs = operation.get('pre_configure_source').get('inputs')
deploy_inputs = {}
if inputs:
for name, value in inputs.items():
deploy_inputs[name] = value
return deploy_inputs
def _get_hosting_server(self, node_template=None):
# find the server that hosts this software by checking the
# requirements and following the hosting chain
hosting_servers = []
host_exists = False
this_node_template = self.nodetemplate \
if node_template is None else node_template
for requirement in this_node_template.requirements:
for requirement_name, assignment in requirement.items():
for check_node in this_node_template.related_nodes:
# check if the capability is Container
if isinstance(assignment, dict):
node_name = assignment.get('node')
else:
node_name = assignment
if node_name and node_name == check_node.name:
if self._is_container_type(requirement_name,
check_node):
hosting_servers.append(check_node.name)
host_exists = True
elif check_node.related_nodes and not host_exists:
return self._get_hosting_server(check_node)
if hosting_servers:
return hosting_servers
return None
def _is_container_type(self, requirement_name, node):
# capability is a list of dict
# For now just check if it's type tosca.nodes.Compute
# TODO(anyone): match up requirement and capability
base_type = HotResource.get_base_type_str(node.type_definition)
if base_type == 'tosca.nodes.Compute':
return True
else:
return False
def get_hot_attribute(self, attribute, args):
# this is a place holder and should be implemented by the subclass
# if translation is needed for the particular attribute
raise Exception(_("No translation in TOSCA type {0} for attribute "
"{1}").format(self.nodetemplate.type, attribute))
def get_tosca_props(self):
tosca_props = {}
for prop in self.nodetemplate.get_properties_objects():
if isinstance(prop.value, GetInput):
tosca_props[prop.name] = {'get_param': prop.value.input_name}
else:
tosca_props[prop.name] = prop.value
return tosca_props
@staticmethod
def get_all_artifacts(nodetemplate):
# workaround bug in the parser
base_type = HotResource.get_base_type_str(nodetemplate.type_definition)
if base_type in policy_type:
artifacts = {}
else:
artifacts = nodetemplate.type_definition.get_value('artifacts',
parent=True)
if not artifacts:
artifacts = {}
tpl_artifacts = nodetemplate.entity_tpl.get('artifacts')
if tpl_artifacts:
artifacts.update(tpl_artifacts)
return artifacts
@staticmethod
def get_all_operations(node):
operations = {}
for operation in node.interfaces:
operations[operation.name] = operation
# workaround bug in the parser
base_type = HotResource.get_base_type_str(node.type_definition)
if base_type in policy_type:
return operations
node_type = node.type_definition
while True:
type_operations = HotResource._get_interface_operations_from_type(
node_type, node, 'Standard')
type_operations.update(operations)
operations = type_operations
if node_type.parent_type is not None:
node_type = node_type.parent_type
else:
return operations
@staticmethod
def _get_interface_operations_from_type(node_type, node, lifecycle_name):
operations = {}
base_type = HotResource.get_base_type_str(node_type)
if base_type in policy_type:
return operations
if node_type.interfaces and lifecycle_name in node_type.interfaces:
for name, elems in node_type.interfaces[lifecycle_name].items():
# ignore empty operations (only type)
# ignore global interface inputs,
# concrete inputs are on the operations themselves
if name != 'type' and name != 'inputs':
operations[name] = InterfacesDef(node_type,
lifecycle_name,
node, name, elems)
return operations
@staticmethod
def get_base_type_str(node_type):
if isinstance(node_type, six.string_types):
return node_type
if node_type.parent_type is not None:
parent_type_str = None
if isinstance(node_type.parent_type, six.string_types):
parent_type_str = node_type.parent_type
else:
parent_type_str = node_type.parent_type.type
if parent_type_str and parent_type_str.endswith('.Root'):
return node_type.type
else:
return HotResource.get_base_type_str(node_type.parent_type)
return node_type.type
class HOTSoftwareDeploymentResources(object):
"""Provides HOT Software Deployment resources
SoftwareDeployment or SoftwareDeploymentGroup Resource
"""
HOT_SW_DEPLOYMENT_RESOURCE = 'OS::Heat::SoftwareDeployment'
HOT_SW_DEPLOYMENT_GROUP_RESOURCE = 'OS::Heat::SoftwareDeploymentGroup'
def __init__(self, hosting_server=None):
self.software_deployment = self.HOT_SW_DEPLOYMENT_RESOURCE
self.software_deployment_group = self.HOT_SW_DEPLOYMENT_GROUP_RESOURCE
self.server_key = 'server'
self.hosting_server = hosting_server
self.servers = {}
if hosting_server is not None:
if len(self.hosting_server) == 1:
if isinstance(hosting_server, list):
self.servers['get_resource'] = self.hosting_server[0]
else:
for server in self.hosting_server:
self.servers[server] = {'get_resource': server}
self.software_deployment = self.software_deployment_group
self.server_key = 'servers'
| 43.266314 | 79 | 0.607207 |
73fbe14f727052328e06a871d7924a52a2c8ac3f | 35,186 | py | Python | sdks/python/apache_beam/runners/portability/stager.py | abergmeier/beam | f1360c4dd71d9880bfce7feea415457ac0647c30 | [
"Apache-2.0"
] | null | null | null | sdks/python/apache_beam/runners/portability/stager.py | abergmeier/beam | f1360c4dd71d9880bfce7feea415457ac0647c30 | [
"Apache-2.0"
] | null | null | null | sdks/python/apache_beam/runners/portability/stager.py | abergmeier/beam | f1360c4dd71d9880bfce7feea415457ac0647c30 | [
"Apache-2.0"
] | 1 | 2021-12-26T15:58:25.000Z | 2021-12-26T15:58:25.000Z | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Support for installing custom code and required dependencies.
Workflows, with the exception of very simple ones, are organized in multiple
modules and packages. Typically, these modules and packages have
dependencies on other standard libraries. Beam relies on the Python
setuptools package to handle these scenarios. For further details please read:
https://pythonhosted.org/an_example_pypi_project/setuptools.html
When a runner tries to run a pipeline it will check for a --requirements_file
and a --setup_file option.
If --setup_file is present then it is assumed that the folder containing the
file specified by the option has the typical layout required by setuptools and
it will run 'python setup.py sdist' to produce a source distribution. The
resulting tarball (a .tar or .tar.gz file) will be staged at the staging
location specified as job option. When a worker starts it will check for the
presence of this file and will run 'easy_install tarball' to install the
package in the worker.
If --requirements_file is present then the file specified by the option will be
staged in the staging location. When a worker starts it will check for the
presence of this file and will run 'pip install -r requirements.txt'. A
requirements file can be easily generated by running 'pip freeze -r
requirements.txt'. The reason a runner does not run this automatically is
because quite often only a small fraction of the dependencies present in a
requirements.txt file are actually needed for remote execution and therefore a
one-time manual trimming is desirable.
TODO(silviuc): Should we allow several setup packages?
TODO(silviuc): We should allow customizing the exact command for setup build.
"""
# pytype: skip-file
from __future__ import absolute_import
import glob
import hashlib
import logging
import os
import shutil
import sys
import tempfile
from typing import List
from typing import Optional
from typing import Tuple
import pkg_resources
from future.moves.urllib.parse import urlparse
from apache_beam.internal import pickler
from apache_beam.internal.http_client import get_new_http
from apache_beam.io.filesystems import FileSystems
from apache_beam.options.pipeline_options import DebugOptions
from apache_beam.options.pipeline_options import PipelineOptions # pylint: disable=unused-import
from apache_beam.options.pipeline_options import SetupOptions
from apache_beam.options.pipeline_options import WorkerOptions
from apache_beam.portability import common_urns
from apache_beam.portability.api import beam_runner_api_pb2
from apache_beam.runners.internal import names
from apache_beam.utils import processes
from apache_beam.utils import retry
# All constants are for internal use only; no backwards-compatibility
# guarantees.
# Standard file names used for staging files.
WORKFLOW_TARBALL_FILE = 'workflow.tar.gz'
REQUIREMENTS_FILE = 'requirements.txt'
EXTRA_PACKAGES_FILE = 'extra_packages.txt'
_LOGGER = logging.getLogger(__name__)
def retry_on_non_zero_exit(exception):
if (isinstance(exception, processes.CalledProcessError) and
exception.returncode != 0):
return True
return False
class Stager(object):
"""Abstract Stager identifies and copies the appropriate artifacts to the
staging location.
Implementation of this stager has to implement :func:`stage_artifact` and
:func:`commit_manifest`.
"""
def stage_artifact(self, local_path_to_artifact, artifact_name):
# type: (str, str) -> None
""" Stages the artifact to Stager._staging_location and adds artifact_name
to the manifest of artifacts that have been staged."""
raise NotImplementedError
def commit_manifest(self):
"""Commits manifest."""
raise NotImplementedError
@staticmethod
def get_sdk_package_name():
"""For internal use only; no backwards-compatibility guarantees.
Returns the PyPI package name to be staged."""
return names.BEAM_PACKAGE_NAME
@staticmethod
def _create_file_stage_to_artifact(local_path, staged_name):
return beam_runner_api_pb2.ArtifactInformation(
type_urn=common_urns.artifact_types.FILE.urn,
type_payload=beam_runner_api_pb2.ArtifactFilePayload(
path=local_path).SerializeToString(),
role_urn=common_urns.artifact_roles.STAGING_TO.urn,
role_payload=beam_runner_api_pb2.ArtifactStagingToRolePayload(
staged_name=staged_name).SerializeToString())
@staticmethod
def _create_file_pip_requirements_artifact(local_path):
return beam_runner_api_pb2.ArtifactInformation(
type_urn=common_urns.artifact_types.FILE.urn,
type_payload=beam_runner_api_pb2.ArtifactFilePayload(
path=local_path).SerializeToString(),
role_urn=common_urns.artifact_roles.PIP_REQUIREMENTS_FILE.urn)
@staticmethod
def extract_staging_tuple_iter(
artifacts: List[beam_runner_api_pb2.ArtifactInformation]):
for artifact in artifacts:
if artifact.type_urn == common_urns.artifact_types.FILE.urn:
file_payload = beam_runner_api_pb2.ArtifactFilePayload()
file_payload.ParseFromString(artifact.type_payload)
src = file_payload.path
if artifact.role_urn == common_urns.artifact_roles.STAGING_TO.urn:
role_payload = beam_runner_api_pb2.ArtifactStagingToRolePayload()
role_payload.ParseFromString(artifact.role_payload)
dst = role_payload.staged_name
elif (artifact.role_urn ==
common_urns.artifact_roles.PIP_REQUIREMENTS_FILE.urn):
dst = hashlib.sha256(artifact.SerializeToString()).hexdigest()
else:
raise RuntimeError("unknown role type: %s" % artifact.role_urn)
yield (src, dst)
else:
raise RuntimeError("unknown artifact type: %s" % artifact.type_urn)
@staticmethod
def create_job_resources(options, # type: PipelineOptions
temp_dir, # type: str
build_setup_args=None, # type: Optional[List[str]]
pypi_requirements=None, # type: Optional[List[str]]
populate_requirements_cache=None, # type: Optional[str]
skip_prestaged_dependencies=False, # type: Optional[bool]
):
"""For internal use only; no backwards-compatibility guarantees.
Creates (if needed) a list of job resources.
Args:
options: Command line options. More specifically the function will
expect requirements_file, setup_file, and save_main_session options
to be present.
temp_dir: Temporary folder where the resource building can happen. If
None then a unique temp directory will be created. Used only for
testing.
build_setup_args: A list of command line arguments used to build a
setup package. Used only if options.setup_file is not None. Used
only for testing.
pypi_requirements: A list of PyPI requirements used to cache source
packages.
populate_requirements_cache: Callable for populating the requirements
cache. Used only for testing.
skip_prestaged_dependencies: Skip staging dependencies that can be
added into SDK containers during prebuilding.
Returns:
A list of ArtifactInformation to be used for staging resources.
Raises:
RuntimeError: If files specified are not found or error encountered
while trying to create the resources (e.g., build a setup package).
"""
resources = [] # type: List[beam_runner_api_pb2.ArtifactInformation]
setup_options = options.view_as(SetupOptions)
# We can skip boot dependencies: apache beam sdk, python packages from
# requirements.txt, python packages from extra_packages and workflow tarball
# if we know we are using a dependency pre-installed sdk container image.
if not skip_prestaged_dependencies:
requirements_cache_path = (
os.path.join(tempfile.gettempdir(), 'dataflow-requirements-cache')
if setup_options.requirements_cache is None else
setup_options.requirements_cache)
if not os.path.exists(requirements_cache_path):
os.makedirs(requirements_cache_path)
# Stage a requirements file if present.
if setup_options.requirements_file is not None:
if not os.path.isfile(setup_options.requirements_file):
raise RuntimeError(
'The file %s cannot be found. It was specified in the '
'--requirements_file command line option.' %
setup_options.requirements_file)
resources.append(
Stager._create_file_stage_to_artifact(
setup_options.requirements_file, REQUIREMENTS_FILE))
# Populate cache with packages from the requirement file option and
# stage the files in the cache.
(
populate_requirements_cache if populate_requirements_cache else
Stager._populate_requirements_cache)(
setup_options.requirements_file, requirements_cache_path)
if pypi_requirements:
tf = tempfile.NamedTemporaryFile(mode='w', delete=False)
tf.writelines(pypi_requirements)
tf.close()
resources.append(Stager._create_file_pip_requirements_artifact(tf.name))
# Populate cache with packages from PyPI requirements and stage
# the files in the cache.
(
populate_requirements_cache if populate_requirements_cache else
Stager._populate_requirements_cache)(
tf.name, requirements_cache_path)
if setup_options.requirements_file is not None or pypi_requirements:
for pkg in glob.glob(os.path.join(requirements_cache_path, '*')):
resources.append(
Stager._create_file_stage_to_artifact(pkg, os.path.basename(pkg)))
# Handle a setup file if present.
# We will build the setup package locally and then copy it to the staging
# location because the staging location is a remote path and the file
# cannot be created directly there.
if setup_options.setup_file is not None:
if not os.path.isfile(setup_options.setup_file):
raise RuntimeError(
'The file %s cannot be found. It was specified in the '
'--setup_file command line option.' % setup_options.setup_file)
if os.path.basename(setup_options.setup_file) != 'setup.py':
raise RuntimeError(
'The --setup_file option expects the full path to a file named '
'setup.py instead of %s' % setup_options.setup_file)
tarball_file = Stager._build_setup_package(
setup_options.setup_file, temp_dir, build_setup_args)
resources.append(
Stager._create_file_stage_to_artifact(
tarball_file, WORKFLOW_TARBALL_FILE))
# Handle extra local packages that should be staged.
if setup_options.extra_packages is not None:
resources.extend(
Stager._create_extra_packages(
setup_options.extra_packages, temp_dir=temp_dir))
if hasattr(setup_options, 'sdk_location'):
if (setup_options.sdk_location == 'default') or Stager._is_remote_path(
setup_options.sdk_location):
# If --sdk_location is not specified then the appropriate package
# will be obtained from PyPI (https://pypi.python.org) based on the
# version of the currently running SDK. If the option is
# present then no version matching is made and the exact URL or path
# is expected.
#
# Unit tests running in the 'python setup.py test' context will
# not have the sdk_location attribute present and therefore we
# will not stage SDK.
sdk_remote_location = 'pypi' if (
setup_options.sdk_location == 'default'
) else setup_options.sdk_location
resources.extend(
Stager._create_beam_sdk(sdk_remote_location, temp_dir))
elif setup_options.sdk_location == 'container':
# Use the SDK that's built into the container, rather than re-staging
# it.
pass
else:
# This branch is also used by internal tests running with the SDK
# built at head.
if os.path.isdir(setup_options.sdk_location):
sdk_path = os.path.join(
setup_options.sdk_location, names.STAGED_SDK_SOURCES_FILENAME)
else:
sdk_path = setup_options.sdk_location
if os.path.isfile(sdk_path):
_LOGGER.info('Copying Beam SDK "%s" to staging location.', sdk_path)
resources.append(
Stager._create_file_stage_to_artifact(
sdk_path,
Stager._desired_sdk_filename_in_staging_location(
setup_options.sdk_location)))
else:
if setup_options.sdk_location == 'default':
raise RuntimeError(
'Cannot find default Beam SDK tar file "%s"' % sdk_path)
elif not setup_options.sdk_location:
_LOGGER.info(
'Beam SDK will not be staged since --sdk_location '
'is empty.')
else:
raise RuntimeError(
'The file "%s" cannot be found. Its location was specified '
'by the --sdk_location command-line option.' % sdk_path)
# The following artifacts are not processed by python sdk container boot
# sequence in a setup mode and hence should not be skipped even if a
# prebuilt sdk container image is used.
# TODO(heejong): remove jar_packages experimental flag when cross-language
# dependency management is implemented for all runners.
# Handle jar packages that should be staged for Java SDK Harness.
jar_packages = options.view_as(DebugOptions).lookup_experiment(
'jar_packages')
if jar_packages is not None:
resources.extend(
Stager._create_jar_packages(
jar_packages.split(','), temp_dir=temp_dir))
# Pickle the main session if requested.
# We will create the pickled main session locally and then copy it to the
# staging location because the staging location is a remote path and the
# file cannot be created directly there.
if setup_options.save_main_session:
pickled_session_file = os.path.join(
temp_dir, names.PICKLED_MAIN_SESSION_FILE)
pickler.dump_session(pickled_session_file)
resources.append(
Stager._create_file_stage_to_artifact(
pickled_session_file, names.PICKLED_MAIN_SESSION_FILE))
worker_options = options.view_as(WorkerOptions)
dataflow_worker_jar = getattr(worker_options, 'dataflow_worker_jar', None)
if dataflow_worker_jar is not None:
jar_staged_filename = 'dataflow-worker.jar'
resources.append(
Stager._create_file_stage_to_artifact(
dataflow_worker_jar, jar_staged_filename))
return resources
def stage_job_resources(self,
resources, # type: List[Tuple[str, str]]
staging_location=None # type: Optional[str]
):
"""For internal use only; no backwards-compatibility guarantees.
Stages job resources to staging_location.
Args:
resources: A list of tuples of local file paths and file names (no
paths) to be used for staging resources.
staging_location: Location to stage the file.
Returns:
A list of file names (no paths) for the resources staged. All the
files are assumed to be staged at staging_location.
Raises:
RuntimeError: If files specified are not found or error encountered
while trying to create the resources (e.g., build a setup package).
"""
# Make sure that all required options are specified.
if staging_location is None:
raise RuntimeError('The staging_location must be specified.')
staged_resources = []
for file_path, staged_path in resources:
self.stage_artifact(
file_path, FileSystems.join(staging_location, staged_path))
staged_resources.append(staged_path)
return staged_resources
def create_and_stage_job_resources(
self,
options, # type: PipelineOptions
build_setup_args=None, # type: Optional[List[str]]
temp_dir=None, # type: Optional[str]
pypi_requirements=None, # type: Optional[List[str]]
populate_requirements_cache=None, # type: Optional[str]
staging_location=None # type: Optional[str]
):
"""For internal use only; no backwards-compatibility guarantees.
Creates (if needed) and stages job resources to staging_location.
Args:
options: Command line options. More specifically the function will
expect requirements_file, setup_file, and save_main_session options
to be present.
build_setup_args: A list of command line arguments used to build a
setup package. Used only if options.setup_file is not None. Used
only for testing.
temp_dir: Temporary folder where the resource building can happen. If
None then a unique temp directory will be created. Used only for
testing.
pypi_requirements: A list of PyPI requirements used to cache source
packages.
populate_requirements_cache: Callable for populating the requirements
cache. Used only for testing.
staging_location: Location to stage the file.
Returns:
A tuple of:
1) retrieval token
2) A list of file names (no paths) for the resources staged. All the
files are assumed to be staged at staging_location
Raises:
RuntimeError: If files specified are not found or error encountered
while trying to create the resources (e.g., build a setup package).
"""
temp_dir = temp_dir or tempfile.mkdtemp()
resources = self.create_job_resources(
options,
temp_dir,
build_setup_args,
pypi_requirements=pypi_requirements,
populate_requirements_cache=populate_requirements_cache)
staged_resources = self.stage_job_resources(
list(Stager.extract_staging_tuple_iter(resources)), staging_location)
# Delete all temp files created while staging job resources.
shutil.rmtree(temp_dir)
retrieval_token = self.commit_manifest()
return retrieval_token, staged_resources
@staticmethod
@retry.with_exponential_backoff(num_retries=4)
def _download_file(from_url, to_path):
"""Downloads a file over http/https from a url or copy it from a remote
path to local path."""
if from_url.startswith('http://') or from_url.startswith('https://'):
# TODO(silviuc): We should cache downloads so we do not do it for every
# job.
try:
# We check if the file is actually there because wget returns a file
# even for a 404 response (file will contain the contents of the 404
# response).
response, content = get_new_http().request(from_url)
if int(response['status']) >= 400:
raise RuntimeError(
'Artifact not found at %s (response: %s)' % (from_url, response))
with open(to_path, 'wb') as f:
f.write(content)
except Exception:
_LOGGER.info('Failed to download Artifact from %s', from_url)
raise
else:
if not os.path.isdir(os.path.dirname(to_path)):
_LOGGER.info(
'Created folder (since we have not done yet, and any errors '
'will follow): %s ',
os.path.dirname(to_path))
os.mkdir(os.path.dirname(to_path))
shutil.copyfile(from_url, to_path)
@staticmethod
def _is_remote_path(path):
return path.find('://') != -1
@staticmethod
def _create_jar_packages(jar_packages, temp_dir):
# type: (...) -> List[beam_runner_api_pb2.ArtifactInformation]
"""Creates a list of local jar packages for Java SDK Harness.
:param jar_packages: Ordered list of local paths to jar packages to be
staged. Only packages on localfile system and GCS are supported.
:param temp_dir: Temporary folder where the resource building can happen.
:return: A list of tuples of local file paths and file names (no paths) for
the resource staged. All the files are assumed to be staged in
staging_location.
:raises:
RuntimeError: If files specified are not found or do not have expected
name patterns.
"""
resources = [] # type: List[beam_runner_api_pb2.ArtifactInformation]
staging_temp_dir = tempfile.mkdtemp(dir=temp_dir)
local_packages = [] # type: List[str]
for package in jar_packages:
if not os.path.basename(package).endswith('.jar'):
raise RuntimeError(
'The --experiment=\'jar_packages=\' option expects a full path '
'ending with ".jar" instead of %s' % package)
if not os.path.isfile(package):
if Stager._is_remote_path(package):
# Download remote package.
_LOGGER.info(
'Downloading jar package: %s locally before staging', package)
_, last_component = FileSystems.split(package)
local_file_path = FileSystems.join(staging_temp_dir, last_component)
Stager._download_file(package, local_file_path)
else:
raise RuntimeError(
'The file %s cannot be found. It was specified in the '
'--experiment=\'jar_packages=\' command line option.' % package)
else:
local_packages.append(package)
local_packages.extend([
FileSystems.join(staging_temp_dir, f)
for f in os.listdir(staging_temp_dir)
])
for package in local_packages:
basename = os.path.basename(package)
resources.append(Stager._create_file_stage_to_artifact(package, basename))
return resources
@staticmethod
def _create_extra_packages(extra_packages, temp_dir):
# type: (...) -> List[beam_runner_api_pb2.ArtifactInformation]
"""Creates a list of local extra packages.
Args:
extra_packages: Ordered list of local paths to extra packages to be
staged. Only packages on localfile system and GCS are supported.
temp_dir: Temporary folder where the resource building can happen.
Caller is responsible for cleaning up this folder after this function
returns.
Returns:
A list of ArtifactInformation of local file paths and file names
(no paths) for the resources staged. All the files are assumed to be
staged in staging_location.
Raises:
RuntimeError: If files specified are not found or do not have expected
name patterns.
"""
resources = [] # type: List[beam_runner_api_pb2.ArtifactInformation]
staging_temp_dir = tempfile.mkdtemp(dir=temp_dir)
local_packages = [] # type: List[str]
for package in extra_packages:
if not (os.path.basename(package).endswith('.tar') or
os.path.basename(package).endswith('.tar.gz') or
os.path.basename(package).endswith('.whl') or
os.path.basename(package).endswith('.zip')):
raise RuntimeError(
'The --extra_package option expects a full path ending with '
'".tar", ".tar.gz", ".whl" or ".zip" instead of %s' % package)
if os.path.basename(package).endswith('.whl'):
_LOGGER.warning(
'The .whl package "%s" is provided in --extra_package. '
'This functionality is not officially supported. Since wheel '
'packages are binary distributions, this package must be '
'binary-compatible with the worker environment (e.g. Python 2.7 '
'running on an x64 Linux host).' % package)
if not os.path.isfile(package):
if Stager._is_remote_path(package):
# Download remote package.
_LOGGER.info(
'Downloading extra package: %s locally before staging', package)
_, last_component = FileSystems.split(package)
local_file_path = FileSystems.join(staging_temp_dir, last_component)
Stager._download_file(package, local_file_path)
else:
raise RuntimeError(
'The file %s cannot be found. It was specified in the '
'--extra_packages command line option.' % package)
else:
local_packages.append(package)
local_packages.extend([
FileSystems.join(staging_temp_dir, f)
for f in os.listdir(staging_temp_dir)
])
for package in local_packages:
basename = os.path.basename(package)
resources.append(Stager._create_file_stage_to_artifact(package, basename))
# Create a file containing the list of extra packages and stage it.
# The file is important so that in the worker the packages are installed
# exactly in the order specified. This approach will avoid extra PyPI
# requests. For example if package A depends on package B and package A
# is installed first then the installer will try to satisfy the
# dependency on B by downloading the package from PyPI. If package B is
# installed first this is avoided.
with open(os.path.join(temp_dir, EXTRA_PACKAGES_FILE), 'wt') as f:
for package in local_packages:
f.write('%s\n' % os.path.basename(package))
# Note that the caller of this function is responsible for deleting the
# temporary folder where all temp files are created, including this one.
resources.append(
Stager._create_file_stage_to_artifact(
os.path.join(temp_dir, EXTRA_PACKAGES_FILE), EXTRA_PACKAGES_FILE))
return resources
@staticmethod
def _get_python_executable():
# Allow overriding the python executable to use for downloading and
# installing dependencies, otherwise use the python executable for
# the current process.
python_bin = os.environ.get('BEAM_PYTHON') or sys.executable
if not python_bin:
raise ValueError('Could not find Python executable.')
return python_bin
@staticmethod
@retry.with_exponential_backoff(
num_retries=4, retry_filter=retry_on_non_zero_exit)
def _populate_requirements_cache(requirements_file, cache_dir):
# The 'pip download' command will not download again if it finds the
# tarball with the proper version already present.
# It will get the packages downloaded in the order they are presented in
# the requirements file and will not download package dependencies.
cmd_args = [
Stager._get_python_executable(),
'-m',
'pip',
'download',
'--dest',
cache_dir,
'-r',
requirements_file,
'--exists-action',
'i',
# Download from PyPI source distributions.
'--no-binary',
':all:'
]
_LOGGER.info('Executing command: %s', cmd_args)
processes.check_output(cmd_args, stderr=processes.STDOUT)
@staticmethod
def _build_setup_package(setup_file, # type: str
temp_dir, # type: str
build_setup_args=None # type: Optional[List[str]]
):
# type: (...) -> str
saved_current_directory = os.getcwd()
try:
os.chdir(os.path.dirname(setup_file))
if build_setup_args is None:
build_setup_args = [
Stager._get_python_executable(),
os.path.basename(setup_file),
'sdist',
'--dist-dir',
temp_dir
]
_LOGGER.info('Executing command: %s', build_setup_args)
processes.check_output(build_setup_args)
output_files = glob.glob(os.path.join(temp_dir, '*.tar.gz'))
if not output_files:
raise RuntimeError(
'File %s not found.' % os.path.join(temp_dir, '*.tar.gz'))
return output_files[0]
finally:
os.chdir(saved_current_directory)
@staticmethod
def _desired_sdk_filename_in_staging_location(sdk_location):
# type: (...) -> str
"""Returns the name that SDK file should have in the staging location.
Args:
sdk_location: Full path to SDK file.
"""
if sdk_location.endswith('.whl'):
_, wheel_filename = FileSystems.split(sdk_location)
if wheel_filename.startswith('apache_beam'):
return wheel_filename
else:
raise RuntimeError('Unrecognized SDK wheel file: %s' % sdk_location)
else:
return names.STAGED_SDK_SOURCES_FILENAME
@staticmethod
def _create_beam_sdk(sdk_remote_location, temp_dir):
# type: (...) -> List[beam_runner_api_pb2.ArtifactInformation]
"""Creates a Beam SDK file with the appropriate version.
Args:
sdk_remote_location: A URL from which the file can be downloaded or a
remote file location. The SDK file can be a tarball or a wheel. Set
to 'pypi' to download and stage a wheel and source SDK from PyPi.
temp_dir: path to temporary location where the file should be
downloaded.
Returns:
A list of ArtifactInformation of local files path and SDK files that
will be staged to the staging location.
Raises:
RuntimeError: if staging was not successful.
"""
if sdk_remote_location == 'pypi':
sdk_local_file = Stager._download_pypi_sdk_package(temp_dir)
sdk_sources_staged_name = Stager.\
_desired_sdk_filename_in_staging_location(sdk_local_file)
_LOGGER.info('Staging SDK sources from PyPI: %s', sdk_sources_staged_name)
staged_sdk_files = [
Stager._create_file_stage_to_artifact(
sdk_local_file, sdk_sources_staged_name)
]
try:
abi_suffix = (
'mu' if sys.version_info[0] < 3 else
('m' if sys.version_info < (3, 8) else ''))
# Stage binary distribution of the SDK, for now on a best-effort basis.
sdk_local_file = Stager._download_pypi_sdk_package(
temp_dir,
fetch_binary=True,
language_version_tag='%d%d' %
(sys.version_info[0], sys.version_info[1]),
abi_tag='cp%d%d%s' %
(sys.version_info[0], sys.version_info[1], abi_suffix))
sdk_binary_staged_name = Stager.\
_desired_sdk_filename_in_staging_location(sdk_local_file)
_LOGGER.info(
'Staging binary distribution of the SDK from PyPI: %s',
sdk_binary_staged_name)
staged_sdk_files.append(
Stager._create_file_stage_to_artifact(
sdk_local_file, sdk_binary_staged_name))
except RuntimeError as e:
_LOGGER.warning(
'Failed to download requested binary distribution '
'of the SDK: %s',
repr(e))
return staged_sdk_files
elif Stager._is_remote_path(sdk_remote_location):
sdk_remote_parsed = urlparse(sdk_remote_location)
sdk_remote_filename = os.path.basename(sdk_remote_parsed.path)
local_download_file = os.path.join(temp_dir, sdk_remote_filename)
Stager._download_file(sdk_remote_location, local_download_file)
staged_name = Stager._desired_sdk_filename_in_staging_location(
local_download_file)
_LOGGER.info('Staging Beam SDK from %s', sdk_remote_location)
return [
Stager._create_file_stage_to_artifact(
local_download_file, staged_name)
]
else:
raise RuntimeError(
'The --sdk_location option was used with an unsupported '
'type of location: %s' % sdk_remote_location)
@staticmethod
def _download_pypi_sdk_package(
temp_dir,
fetch_binary=False,
language_version_tag='27',
language_implementation_tag='cp',
abi_tag='cp27mu',
platform_tag='manylinux1_x86_64'):
"""Downloads SDK package from PyPI and returns path to local path."""
package_name = Stager.get_sdk_package_name()
try:
version = pkg_resources.get_distribution(package_name).version
except pkg_resources.DistributionNotFound:
raise RuntimeError(
'Please set --sdk_location command-line option '
'or install a valid {} distribution.'.format(package_name))
cmd_args = [
Stager._get_python_executable(),
'-m',
'pip',
'download',
'--dest',
temp_dir,
'%s==%s' % (package_name, version),
'--no-deps'
]
if fetch_binary:
_LOGGER.info('Downloading binary distribution of the SDK from PyPi')
# Get a wheel distribution for the SDK from PyPI.
cmd_args.extend([
'--only-binary',
':all:',
'--python-version',
language_version_tag,
'--implementation',
language_implementation_tag,
'--abi',
abi_tag,
'--platform',
platform_tag
])
# Example wheel: apache_beam-2.4.0-cp27-cp27mu-manylinux1_x86_64.whl
expected_files = [
os.path.join(
temp_dir,
'%s-%s-%s%s-%s-%s.whl' % (
package_name.replace('-', '_'),
version,
language_implementation_tag,
language_version_tag,
abi_tag,
platform_tag))
]
else:
_LOGGER.info('Downloading source distribution of the SDK from PyPi')
cmd_args.extend(['--no-binary', ':all:'])
expected_files = [
os.path.join(temp_dir, '%s-%s.zip' % (package_name, version)),
os.path.join(temp_dir, '%s-%s.tar.gz' % (package_name, version))
]
_LOGGER.info('Executing command: %s', cmd_args)
try:
processes.check_output(cmd_args)
except processes.CalledProcessError as e:
raise RuntimeError(repr(e))
for sdk_file in expected_files:
if os.path.exists(sdk_file):
return sdk_file
raise RuntimeError(
'Failed to download a distribution for the running SDK. '
'Expected either one of %s to be found in the download folder.' %
(expected_files))
| 41.788599 | 97 | 0.675837 |
73fbe486fdc475dc98eefed89194af2f1f849152 | 5,773 | py | Python | utils/interpolation.py | NIRVANALAN/self-mono-sf | 80ac323099b3ca32802c5d3f91db3e6a5cafca25 | [
"Apache-2.0"
] | 213 | 2020-03-12T07:43:26.000Z | 2022-03-24T05:13:04.000Z | utils/interpolation.py | NIRVANALAN/self-mono-sf | 80ac323099b3ca32802c5d3f91db3e6a5cafca25 | [
"Apache-2.0"
] | 18 | 2020-04-20T12:30:46.000Z | 2022-02-18T09:26:35.000Z | utils/interpolation.py | NIRVANALAN/self-mono-sf | 80ac323099b3ca32802c5d3f91db3e6a5cafca25 | [
"Apache-2.0"
] | 45 | 2020-04-09T01:37:20.000Z | 2022-03-24T05:12:48.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
from torch import nn
import torch.nn.functional as tf
def interpolate2d(inputs, size, mode="bilinear"):
return tf.interpolate(inputs, size, mode=mode, align_corners=True)
def interpolate2d_as(inputs, target_as, mode="bilinear"):
_, _, h, w = target_as.size()
return tf.interpolate(inputs, [h, w], mode=mode, align_corners=True)
def _bchw2bhwc(tensor):
return tensor.transpose(1,2).transpose(2,3)
def _bhwc2bchw(tensor):
return tensor.transpose(2,3).transpose(1,2)
class Meshgrid(nn.Module):
def __init__(self):
super(Meshgrid, self).__init__()
self.width = 0
self.height = 0
self.register_buffer("xx", torch.zeros(1,1))
self.register_buffer("yy", torch.zeros(1,1))
self.register_buffer("rangex", torch.zeros(1,1))
self.register_buffer("rangey", torch.zeros(1,1))
def _compute_meshgrid(self, width, height):
torch.arange(0, width, out=self.rangex)
torch.arange(0, height, out=self.rangey)
self.xx = self.rangex.repeat(height, 1).contiguous()
self.yy = self.rangey.repeat(width, 1).t().contiguous()
def forward(self, width, height):
if self.width != width or self.height != height:
self._compute_meshgrid(width=width, height=height)
self.width = width
self.height = height
return self.xx, self.yy
class BatchSub2Ind(nn.Module):
def __init__(self):
super(BatchSub2Ind, self).__init__()
self.register_buffer("_offsets", torch.LongTensor())
def forward(self, shape, row_sub, col_sub, out=None):
batch_size = row_sub.size(0)
height, width = shape
ind = row_sub*width + col_sub
torch.arange(batch_size, out=self._offsets)
self._offsets *= (height*width)
if out is None:
return torch.add(ind, self._offsets.view(-1,1,1))
else:
torch.add(ind, self._offsets.view(-1,1,1), out=out)
class Interp2(nn.Module):
def __init__(self, clamp=False):
super(Interp2, self).__init__()
self._clamp = clamp
self._batch_sub2ind = BatchSub2Ind()
self.register_buffer("_x0", torch.LongTensor())
self.register_buffer("_x1", torch.LongTensor())
self.register_buffer("_y0", torch.LongTensor())
self.register_buffer("_y1", torch.LongTensor())
self.register_buffer("_i00", torch.LongTensor())
self.register_buffer("_i01", torch.LongTensor())
self.register_buffer("_i10", torch.LongTensor())
self.register_buffer("_i11", torch.LongTensor())
self.register_buffer("_v00", torch.FloatTensor())
self.register_buffer("_v01", torch.FloatTensor())
self.register_buffer("_v10", torch.FloatTensor())
self.register_buffer("_v11", torch.FloatTensor())
self.register_buffer("_x", torch.FloatTensor())
self.register_buffer("_y", torch.FloatTensor())
def forward(self, v, xq, yq):
batch_size, channels, height, width = v.size()
# clamp if wanted
if self._clamp:
xq.clamp_(0, width - 1)
yq.clamp_(0, height - 1)
# ------------------------------------------------------------------
# Find neighbors
#
# x0 = torch.floor(xq).long(), x0.clamp_(0, width - 1)
# x1 = x0 + 1, x1.clamp_(0, width - 1)
# y0 = torch.floor(yq).long(), y0.clamp_(0, height - 1)
# y1 = y0 + 1, y1.clamp_(0, height - 1)
#
# ------------------------------------------------------------------
self._x0 = torch.floor(xq).long().clamp(0, width - 1)
self._y0 = torch.floor(yq).long().clamp(0, height - 1)
self._x1 = torch.add(self._x0, 1).clamp(0, width - 1)
self._y1 = torch.add(self._y0, 1).clamp(0, height - 1)
# batch_sub2ind
self._batch_sub2ind([height, width], self._y0, self._x0, out=self._i00)
self._batch_sub2ind([height, width], self._y0, self._x1, out=self._i01)
self._batch_sub2ind([height, width], self._y1, self._x0, out=self._i10)
self._batch_sub2ind([height, width], self._y1, self._x1, out=self._i11)
# reshape
v_flat = _bchw2bhwc(v).contiguous().view(-1, channels)
torch.index_select(v_flat, dim=0, index=self._i00.view(-1), out=self._v00)
torch.index_select(v_flat, dim=0, index=self._i01.view(-1), out=self._v01)
torch.index_select(v_flat, dim=0, index=self._i10.view(-1), out=self._v10)
torch.index_select(v_flat, dim=0, index=self._i11.view(-1), out=self._v11)
# local_coords
torch.add(xq, - self._x0.float(), out=self._x)
torch.add(yq, - self._y0.float(), out=self._y)
# weights
w00 = torch.unsqueeze((1.0 - self._y) * (1.0 - self._x), dim=1)
w01 = torch.unsqueeze((1.0 - self._y) * self._x, dim=1)
w10 = torch.unsqueeze(self._y * (1.0 - self._x), dim=1)
w11 = torch.unsqueeze(self._y * self._x, dim=1)
def _reshape(u):
return _bhwc2bchw(u.view(batch_size, height, width, channels))
# values
values = _reshape(self._v00)*w00 + _reshape(self._v01)*w01 \
+ _reshape(self._v10)*w10 + _reshape(self._v11)*w11
if self._clamp:
return values
else:
# find_invalid
invalid = ((xq < 0) | (xq >= width) | (yq < 0) | (yq >= height)).unsqueeze(dim=1).float()
# maskout invalid
transformed = invalid * torch.zeros_like(values) + (1.0 - invalid)*values
return transformed
| 38.486667 | 101 | 0.593452 |
73fbfdc610ae3f0784d2fc140d8e0d2747ade457 | 3,809 | py | Python | pgbot/emotion.py | gresm/PygameCommunityBot | 0da081704baaaa6fd6464f7abe43e6ba5043952d | [
"MIT"
] | 77 | 2020-11-16T05:26:49.000Z | 2021-03-08T06:27:06.000Z | pgbot/emotion.py | gresm/PygameCommunityBot | 0da081704baaaa6fd6464f7abe43e6ba5043952d | [
"MIT"
] | 71 | 2021-03-19T17:51:30.000Z | 2022-02-19T12:42:19.000Z | pgbot/emotion.py | gresm/PygameCommunityBot | 0da081704baaaa6fd6464f7abe43e6ba5043952d | [
"MIT"
] | 19 | 2021-03-19T12:48:17.000Z | 2021-12-18T04:41:08.000Z | """
This file is a part of the source code for the PygameCommunityBot.
This project has been licensed under the MIT license.
Copyright (c) 2020-present PygameCommunityDiscord
This file defines some utitities and functions for the bots emotion system
"""
import random
import math
import discord
import unidecode
from pgbot import common, db
from pgbot.utils import embed_utils, utils
EMOTION_CAPS = {
"happy": (-100, 100),
"anger": (0, 100),
"bored": (-100, 100),
"confused": (0, 100),
}
async def update(emotion_name: str, value: int):
"""
Update emotion characteristic 'emotion_name' with value 'value' integer
"""
async with db.DiscordDB("emotions") as db_obj:
emotions = db_obj.get({})
try:
emotions[emotion_name] += value
except KeyError:
emotions[emotion_name] = value
emotions[emotion_name] = utils.clamp(
emotions[emotion_name], *EMOTION_CAPS[emotion_name]
)
db_obj.write(emotions)
async def get(emotion_name: str):
"""
Get emotion characteristic 'emotion_name'
"""
async with db.DiscordDB("emotions") as db_obj:
emotions = db_obj.get({})
try:
return emotions[emotion_name]
except KeyError:
return 0
async def check_bonk(msg: discord.Message):
"""
Function to check bonk, update emotion state, and reply when bonked
"""
if common.BONK not in msg.content:
return
bonks = msg.content.count(common.BONK)
if await get("anger") + bonks > 30:
await embed_utils.send(
msg.channel,
title="Did you hit the snek?",
description="You mortal mammal! How you dare to boncc a snake?",
thumbnail_url="https://cdn.discordapp.com/emojis/779775305224159232.gif",
)
bonks = math.floor(math.log2(msg.content.count(common.BONK) + 1))
await update("anger", bonks)
await update("happy", -bonks)
async def dad_joke(msg: discord.Message):
"""
Utility to handle the bot making dad jokes
"""
# make typecheckers happy
if common.bot.user is None:
return
if await utils.get_channel_feature("dadjokes", msg.channel):
return
lowered = unidecode.unidecode(msg.content.lower().strip())
for trigger in ("i am", "i'm"):
if lowered == trigger:
await msg.channel.send(random.choice(common.SHAKESPEARE_QUOTES))
return
if trigger in lowered and len(lowered) < 60:
ind = lowered.index(trigger)
if ind and not msg.content[ind - 1].isspace():
return
name = msg.content[ind + len(trigger) :]
if not name or not name[0].isspace():
return
name = name.strip()
for char in (",", "\n", "."):
if char in name:
name = name.split(char)[0]
if name:
await msg.channel.send(
f"Hi {name}! I am <@!{common.bot.user.id}>",
allowed_mentions=discord.AllowedMentions.none(),
)
return
async def euphoria():
"""
Trigger a state of "euphoria" emotion, extremely happy and positive bot
"""
async with db.DiscordDB("emotions") as db_obj:
db_obj.write(
{
"happy": EMOTION_CAPS["happy"][1],
"anger": EMOTION_CAPS["anger"][0],
"bored": 0,
"confused": 0,
}
)
async def server_boost(msg: discord.Message):
"""
Helper to handle boost, trigger euphoria emotion state
"""
await euphoria()
if common.TEST_MODE:
return
await msg.channel.send("A LOT OF THANKSSS! :heart: <:pg_party:772652894574084098>")
| 27.601449 | 87 | 0.589919 |
73fc34dc87e50a87b1d010f0465f27fd36fe14f1 | 5,023 | py | Python | recipes/recipe_modules/support_3pp/run_script.py | allaparthi/monorail | e18645fc1b952a5a6ff5f06e0c740d75f1904473 | [
"BSD-3-Clause"
] | null | null | null | recipes/recipe_modules/support_3pp/run_script.py | allaparthi/monorail | e18645fc1b952a5a6ff5f06e0c740d75f1904473 | [
"BSD-3-Clause"
] | null | null | null | recipes/recipe_modules/support_3pp/run_script.py | allaparthi/monorail | e18645fc1b952a5a6ff5f06e0c740d75f1904473 | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Defines the utility function for running a script; understands how to run
bash scripts on all host platforms (including windows)."""
from contextlib import contextmanager
from .workdir import Workdir
def _extract_contextual_dockerbuild_env_args(api):
# We don't want to pass the CIPD_CACHE_DIR through to dockerbuild, since it
# refers to a path on the host machine.
banlist = set(('CIPD_CACHE_DIR',))
return [
('--env-prefix', k, str(v))
for k, vs in api.context.env_prefixes.iteritems()
for v in vs
if k not in banlist
] + [
('--env-suffix', k, str(v))
for k, vs in api.context.env_suffixes.iteritems()
for v in vs
if k not in banlist
] + [
('--env', k, str(v))
for k, v in api.context.env.iteritems()
if k not in banlist
]
# Dockerbuild uses different names than the CIPD platform names. This maps from
# the CIPD platform name to the dockerbuild name.
_DOCKERBUILD_PLATFORM = {
'linux-armv6l': 'linux-armv6',
'linux-arm64': 'linux-arm64',
'linux-mipsle': 'linux-mipsel',
'linux-mips64': 'linux-mips64',
'linux-amd64': 'manylinux-x64',
'linux-386': 'manylinux-x86',
}
def run_script(api, *args, **kwargs):
"""Runs a script (python or bash) with the given arguments.
Understands how to make bash scripts run on windows, as well as how to run
linux commands under dockerbuild.
Will prepare the windows or OS X toolchain as well.
Args:
* args (*str) - The arguments of the script. The script name (`args[0]`)
must end with either '.sh' or '.py'.
Kwargs:
* compile_platform (str) - Indicates what platform we want this step to
compile for. If omitted, executes under the host platform without any
compiler available. Omit to use the host environment.
* no_toolchain (bool) - If compiling natively (without docker), do not
attempt to make a toolchain available.
* workdir (Workdir) - The working directory object we're running the script
under. Required if `compile_platform` is specified.
* stdout - Passed through to the underlying step.
* step_test_data - Passed through to the underlying step.
"""
compile_platform = kwargs.pop('compile_platform', '')
no_toolchain = kwargs.pop('no_toolchain', False)
workdir = kwargs.pop('workdir', None)
stdout = kwargs.pop('stdout', None)
step_test_data = kwargs.pop('step_test_data', None)
if compile_platform:
assert isinstance(workdir, Workdir), (
'workdir argument required if compile_platform is specified')
script_name = args[0].pieces[-1]
step_name = str(' '.join([script_name]+map(str, args[1:])))
interpreter = {
'py': 'python',
'sh': 'bash',
}.get(script_name.rsplit('.', 1)[-1], None)
assert interpreter is not None, (
'scriptname must end with either ".sh" or ".py"')
# TODO(iannucci): Allow better control of toolchain environments.
# See also resolved_spec.tool_platform.
if compile_platform.startswith('linux-'):
# dockerbuild time.
dockerbuild_platform = _DOCKERBUILD_PLATFORM[compile_platform]
repo_root = api.support_3pp.repo_resource()
cmd = [
'infra.tools.dockerbuild', '--logs-debug', 'run',
'--platform', dockerbuild_platform, '--workdir', workdir.base,
]
for tup in _extract_contextual_dockerbuild_env_args(api):
cmd.extend(tup)
cmd += ['--', interpreter, args[0]] + list(args[1:])
with api.context(env={'PYTHONPATH': repo_root}):
return api.python(step_name,
'-m', cmd, stdout=stdout, step_test_data=step_test_data,
venv=repo_root.join(
'infra', 'tools', 'dockerbuild', 'standalone.vpython'))
@contextmanager
def no_sdk():
yield
sdk = no_sdk()
if not no_toolchain:
if compile_platform.startswith('mac-'):
sdk = api.osx_sdk('mac')
if compile_platform.startswith('windows-'):
sdk = api.windows_sdk()
with sdk:
if interpreter == 'bash':
cmd = ['bash'] + list(args)
# On windows, we use the bash.exe that ships with git-for-windows,
# cheating a bit by injecting a `git-bash` script into $PATH, and then
# running the desired script with `git bash` instead of `bash`.
env_prefixes = {}
if api.platform.is_win:
env_prefixes['PATH'] = [api.support_3pp.resource('win_support')]
cmd = ['git'] + cmd
elif api.platform.is_mac:
env_prefixes['PATH'] = [api.support_3pp.resource('mac_support')]
with api.context(env_prefixes=env_prefixes):
return api.step(step_name, cmd,
stdout=stdout, step_test_data=step_test_data)
elif interpreter == 'python':
return api.python(step_name, args[0], args[1:],
stdout=stdout, step_test_data=step_test_data,
venv=True)
raise AssertionError('impossible') # pragma: no cover
| 36.136691 | 79 | 0.670317 |
73fc55554d03855a25852ef208233705e5a20b67 | 2,835 | py | Python | instructors/course-2015/functions_gens_and_ducks/examples/in_class/parsetext_trade_2015.py | mgadagin/PythonClass | 70b370362d75720b3fb0e1d6cc8158f9445e9708 | [
"MIT"
] | 46 | 2017-09-27T20:19:36.000Z | 2020-12-08T10:07:19.000Z | instructors/course-2015/functions_gens_and_ducks/examples/in_class/parsetext_trade_2015.py | mgadagin/PythonClass | 70b370362d75720b3fb0e1d6cc8158f9445e9708 | [
"MIT"
] | 6 | 2018-01-09T08:07:37.000Z | 2020-09-07T12:25:13.000Z | instructors/course-2015/functions_gens_and_ducks/examples/in_class/parsetext_trade_2015.py | mgadagin/PythonClass | 70b370362d75720b3fb0e1d6cc8158f9445e9708 | [
"MIT"
] | 18 | 2017-10-10T02:06:51.000Z | 2019-12-01T10:18:13.000Z | # -*- coding: utf-8 -*-
#above helps to declare what encoding we want to use in the module
#note this is copied from the first json lab
#above is used to set the encoding for this module, (unfortunately didn't help that much)
#used for seeing our data in nicest string format possible
import pprint
#again idiom for reading in a file, relative path given
with open('../pres_on_trade.txt', 'r') as fp:
all_text = fp.read()
#str.split() will split groups of characters on any white space, easy... nice
#sorted built-in function will only sort alphbetically here
all_words = sorted(all_text.split())
#begin preparation of words for a reasonable word frequency count
#we need to change our words from str to unicode
#unicode_words = [unicode(word) for word in all_words if unicode(word)]
#list comprehensions won't work because we get errors,
#let's do a try: except: block
unicode_words = []
for word in all_words:
try:
unicode_words.append(unicode(word))
except UnicodeDecodeError:
pass
#awesome list comprehension, they take iterables and return lists
#this will clean our words of unwanted punctuation and change to all lowercase
all_words = [word.strip("?.\'-,().").lower() for word in unicode_words]
#print all_words
#help(''.strip)
#reminder on dictionary syntax - setting the key and value
#dict_name[key] = value
#word_freq_dc['word'] = 18
#using dict.get method to check for existence and build word_freq dictionary
word_freq_dc = {}
for word in all_words:
times = word_freq_dc.get(word, 0)
times += 1
word_freq_dc[word] = times
#the easy way :) if you knew about it or where to look
from collections import Counter
#help(Counter)
counter = Counter(all_words)
#can use slice method on a sequence, this gets first 40 of type list
#that is: Counter.most_common() returns a list, a list is considerd one kind of sequence
print(counter.most_common()[:40])
#end line character for clarity when printing
print '\n'
#to be sure
counter_for_dc = Counter(word_freq_dc)
counter_from_before = Counter(all_words)
print counter_for_dc == counter_from_before
#going further with a generator expression
non_small_words = (word for word in all_words
if len(word) > 4 and
word is not 'usa' and
word not in
['applause', 'laughter', 'there', 'these', 'those'])
recounter = Counter(non_small_words)
print(recounter.most_common()[:40])
#below is work we did to figure out the proper procedure to
#count words using a dictionary
#pprint.pprint(word_freq_dc)
#for k, v in word_freq_dc.iteritems():
# tupled_word_freq.append((k, v))
#tupled_word_freq = zip(word_freq_dc.itervalues(), word_freq_dc.iterkeys())
#print(tupled_word_freq)
#print sorted(tupled_word_freq)
#help(word_freq_dc.get)
| 28.928571 | 89 | 0.724515 |
73fc6bf7d20cfcd4e4ccc6ea541d1270cc2ae4f5 | 870 | py | Python | nova/api/openstack/compute/schemas/networks_associate.py | ebalduf/nova-backports | 6bf97ec73467de522d34ab7a17ca0e0874baa7f9 | [
"Apache-2.0"
] | 5 | 2016-04-28T16:20:38.000Z | 2021-04-25T11:19:03.000Z | nova/api/openstack/compute/schemas/networks_associate.py | woraser/nova | fc3890667e4971e3f0f35ac921c2a6c25f72adec | [
"Apache-2.0"
] | 132 | 2017-03-27T11:31:52.000Z | 2022-03-30T08:45:02.000Z | nova/api/openstack/compute/schemas/networks_associate.py | woraser/nova | fc3890667e4971e3f0f35ac921c2a6c25f72adec | [
"Apache-2.0"
] | 8 | 2017-03-27T07:50:38.000Z | 2020-02-14T16:55:56.000Z | # Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.validation import parameter_types
associate_host = {
'type': 'object',
'properties': {
'associate_host': parameter_types.hostname
},
'required': ['associate_host'],
'additionalProperties': False
}
| 34.8 | 78 | 0.712644 |
73fcc339e6fefd60a002c2ec3b49f0f4fc3f89e0 | 3,127 | py | Python | scripts/freesurfer/_01_recon_all.py | mathias-sm/mne-bids-pipeline | 55a8d7c7ca5a254ff7b9af84b818b164692667d5 | [
"BSD-3-Clause"
] | null | null | null | scripts/freesurfer/_01_recon_all.py | mathias-sm/mne-bids-pipeline | 55a8d7c7ca5a254ff7b9af84b818b164692667d5 | [
"BSD-3-Clause"
] | null | null | null | scripts/freesurfer/_01_recon_all.py | mathias-sm/mne-bids-pipeline | 55a8d7c7ca5a254ff7b9af84b818b164692667d5 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
import os
import shutil
import sys
from pathlib import Path
import logging
from typing import Union
from mne.utils import run_subprocess
from config import parallel_func
import config
PathLike = Union[str, Path]
logger = logging.getLogger('mne-bids-pipeline')
fs_bids_app = Path(__file__).parent / 'contrib' / 'run.py'
def run_recon(root_dir, subject, fs_bids_app) -> None:
logger.info(f"Running recon-all on subject {subject}. This will take "
f"a LONG time – it's a good idea to let it run over night.")
subjects_dir = Path(config.get_fs_subjects_dir())
subj_dir = subjects_dir / f"sub-{subject}"
if subj_dir.exists():
logger.info(f"Subject {subject} is already present. Please delete the "
f"directory if you want to recompute.")
return
env = os.environ
if 'FREESURFER_HOME' not in env:
raise RuntimeError("FreeSurfer is not available.")
license_file = Path(f"{env['FREESURFER_HOME']}/license.txt")
if not license_file.exists():
license_file = Path(f"{env['FREESURFER_HOME']}/.license")
if not license_file.exists():
raise RuntimeError("FreeSurfer license file not found.")
cmd = [
f"{sys.executable}",
f"{fs_bids_app}",
f"{root_dir}",
f"{subjects_dir}", "participant",
"--n_cpus=2", "--stages=all", "--skip_bids_validator",
f"--license_file={license_file}",
f"--participant_label={subject}"
]
logger.debug("Running: " + " ".join(cmd))
run_subprocess(cmd, env=env, verbose=logger.level)
def main() -> None:
"""Run freesurfer recon-all command on BIDS dataset.
The script allows to run the freesurfer recon-all
command on all subjects of your BIDS dataset. It can
run in parallel with the --n_jobs parameter.
It is built on top of the FreeSurfer BIDS app:
https://github.com/BIDS-Apps/freesurfer
and the MNE BIDS Pipeline
https://mne.tools/mne-bids-pipeline
You must have freesurfer available on your system.
Run via the MNE BIDS Pipeline's `run.py`:
python run.py --steps=freesurfer --config=your_pipeline_config.py
""" # noqa
logger.info('Running FreeSurfer')
subjects = config.get_subjects()
root_dir = config.get_bids_root()
subjects_dir = Path(config.get_fs_subjects_dir())
subjects_dir.mkdir(parents=True, exist_ok=True)
with config.get_parallel_backend():
n_jobs = config.get_n_jobs()
parallel, run_func, _ = parallel_func(run_recon, n_jobs=n_jobs)
parallel(run_func(root_dir, subject, fs_bids_app)
for subject in subjects)
# Handle fsaverage
fsaverage_dir = subjects_dir / 'fsaverage'
if fsaverage_dir.exists():
if fsaverage_dir.is_symlink():
fsaverage_dir.unlink()
else:
shutil.rmtree(fsaverage_dir)
env = os.environ
shutil.copytree(f"{env['FREESURFER_HOME']}/subjects/fsaverage",
subjects_dir / 'fsaverage')
if __name__ == '__main__':
main()
| 29.5 | 79 | 0.654941 |
73fccf655a075cfd1f4448b85d4944bc3d7f534a | 1,309 | py | Python | setup.py | kaiterra/api-python | 102a99b487f328f61f785597d22eee876ddcfcb6 | [
"MIT"
] | 2 | 2019-07-03T16:26:48.000Z | 2021-09-11T18:57:12.000Z | setup.py | kaiterra/api-python | 102a99b487f328f61f785597d22eee876ddcfcb6 | [
"MIT"
] | null | null | null | setup.py | kaiterra/api-python | 102a99b487f328f61f785597d22eee876ddcfcb6 | [
"MIT"
] | 4 | 2019-09-10T19:20:03.000Z | 2019-11-28T02:37:07.000Z | #!/usr/bin/env python3
"""Define the setup options."""
import os
import re
import setuptools
with open('README.rst', 'r') as f:
readme = f.read()
setuptools.setup(
name='kaiterra-client',
version='0.0.1',
description="Kaiterra API Client",
long_description=readme,
long_description_content_type="text/x-rst",
url='https://github.com/kaiterra/api-python',
license='MIT License',
packages=setuptools.find_packages(exclude=['tests']),
test_suite='kaiterra_client.tests',
tests_require=[
'requests-mock',
],
install_requires=[
# Require new ISRG root certificates
'requests>=2.16.0',
],
# Uses enums (3.4) and type hints (3.5), though reducing this to >=3.4
# by importing the typing package is a possibility
python_requires='>=3.5',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| 30.44186 | 74 | 0.627196 |
73fcf1399dc6c9150cb4ed54047914f713bb929a | 22,318 | py | Python | python/jiminy_py/src/jiminy_py/viewer.py | matthieuvigne/jiminy | f893b2254a9e695a4154b941b599536756ea3d8b | [
"MIT"
] | null | null | null | python/jiminy_py/src/jiminy_py/viewer.py | matthieuvigne/jiminy | f893b2254a9e695a4154b941b599536756ea3d8b | [
"MIT"
] | null | null | null | python/jiminy_py/src/jiminy_py/viewer.py | matthieuvigne/jiminy | f893b2254a9e695a4154b941b599536756ea3d8b | [
"MIT"
] | null | null | null | #!/usr/bin/env python
## @file jiminy_py/viewer.py
import os
import re
import time
import shutil
import tempfile
import subprocess
import numpy as np
from bisect import bisect_right
from threading import Thread, Lock
from PIL import Image
import pinocchio as pnc
from pinocchio.robot_wrapper import RobotWrapper
from pinocchio import libpinocchio_pywrap as pin
from pinocchio import Quaternion, SE3, se3ToXYZQUAT
from pinocchio.rpy import rpyToMatrix
class Viewer:
backend = None
port_forwarding = None
_backend_obj = None
_backend_exception = None
_backend_proc = None
## Unique threading.Lock for every simulation.
# It is required for parallel rendering since corbaserver does not support multiple connection simultaneously.
_lock = Lock()
def __init__(self, jiminy_model, use_theoretical_model=False,
urdf_rgba=None, robot_index=0,
backend=None, window_name='python-pinocchio', scene_name='world'):
# Backup some user arguments
self.urdf_path = jiminy_model.urdf_path
self.scene_name = scene_name
self.window_name = window_name
self.use_theoretical_model = use_theoretical_model
# Extract the right Pinocchio model
if self.use_theoretical_model:
self.pinocchio_model = jiminy_model.pinocchio_model_th
self.pinocchio_data = jiminy_model.pinocchio_data_th
else:
self.pinocchio_model = jiminy_model.pinocchio_model
self.pinocchio_data = jiminy_model.pinocchio_data
# Select the desired backend
if backend is None:
if Viewer.backend is None:
backend = 'meshcat' if Viewer._is_notebook() else 'gepetto-gui'
else:
backend = Viewer.backend
# Update the backend currently running, if any
if (Viewer.backend != backend) and \
(Viewer._backend_obj is not None or \
Viewer._backend_proc is not None):
Viewer.close()
print("Different backend already running. Closing it...")
Viewer.backend = backend
# Check if the backend is still available, if any
if Viewer._backend_obj is not None and Viewer._backend_proc is not None:
if Viewer._backend_proc.poll() is not None:
Viewer._backend_obj = None
Viewer._backend_proc = None
Viewer._backend_exception = None
# Access the current backend or create one if none is available
try:
if (Viewer.backend == 'gepetto-gui'):
import omniORB
Viewer._backend_exception = omniORB.CORBA.COMM_FAILURE
if Viewer._backend_obj is None:
Viewer._backend_obj, Viewer._backend_proc = \
Viewer._get_gepetto_client(True)
if Viewer._backend_obj is not None:
self._client = Viewer._backend_obj.gui
else:
raise RuntimeError("Impossible to open Gepetto-viewer.")
else:
from pinocchio.visualize import MeshcatVisualizer
from pinocchio.shortcuts import createDatas
if Viewer._backend_obj is None:
Viewer._create_meshcat_backend()
if Viewer._is_notebook():
Viewer.display_jupyter_cell()
else:
Viewer._backend_obj.open()
self._client = MeshcatVisualizer(self.pinocchio_model, None, None)
self._client.viewer = Viewer._backend_obj
except:
raise RuntimeError("Impossible to load backend.")
# Create a RobotWrapper
robot_name = "robot_" + str(robot_index)
if (Viewer.backend == 'gepetto-gui'):
Viewer._delete_gepetto_nodes_viewer(scene_name + '/' + robot_name)
if (urdf_rgba is not None):
alpha = urdf_rgba[3]
self.urdf_path = Viewer._get_colorized_urdf(self.urdf_path, urdf_rgba[:3])
else:
alpha = 1.0
collision_model = pin.buildGeomFromUrdf(self.pinocchio_model, self.urdf_path,
os.environ.get('JIMINY_MESH_PATH', []),
pin.GeometryType.COLLISION)
visual_model = pin.buildGeomFromUrdf(self.pinocchio_model, self.urdf_path,
os.environ.get('JIMINY_MESH_PATH', []),
pin.GeometryType.VISUAL)
self._rb = RobotWrapper(model=self.pinocchio_model,
collision_model=collision_model,
visual_model=visual_model)
if not self.use_theoretical_model:
self._rb.data = jiminy_model.pinocchio_data
self.pinocchio_data = self._rb.data
# Load robot in the backend viewer
if (Viewer.backend == 'gepetto-gui'):
if not scene_name in self._client.getSceneList():
self._client.createSceneWithFloor(scene_name)
if not window_name in self._client.getWindowList():
self._window_id = self._client.createWindow(window_name)
self._client.addSceneToWindow(scene_name, self._window_id)
self._client.createGroup(scene_name + '/' + scene_name)
self._client.addLandmark(scene_name + '/' + scene_name, 0.1)
else:
self._window_id = int(np.where([name == window_name
for name in self._client.getWindowList()])[0][0])
self._rb.initViewer(windowName=window_name, sceneName=scene_name, loadModel=False)
self._rb.loadViewerModel(robot_name)
self._client.setFloatProperty(scene_name + '/' + robot_name,
'Transparency', 1 - alpha)
else:
self._client.collision_model = collision_model
self._client.visual_model = visual_model
self._client.data, self._client.collision_data, self._client.visual_data = \
createDatas(self.pinocchio_model, collision_model, visual_model)
self._client.loadViewerModel(rootNodeName=robot_name, color=urdf_rgba)
self._rb.viz = self._client
@staticmethod
def _create_meshcat_backend():
import meshcat
from contextlib import redirect_stdout
with redirect_stdout(None):
Viewer._backend_obj = meshcat.Visualizer()
Viewer._backend_proc = Viewer._backend_obj.window.server_proc
@staticmethod
def reset_port_forwarding(port_forwarding=None):
Viewer.port_forwarding = port_forwarding
@staticmethod
def display_jupyter_cell(height=600, width=900, force_create_backend=False):
if Viewer.backend == 'meshcat' and Viewer._is_notebook():
from IPython.core.display import HTML, display as ipython_display
if Viewer._backend_obj is None:
if force_create_backend:
Viewer._create_meshcat_backend()
else:
raise ValueError("No meshcat backend available and 'force_create_backend' is set to False.")
viewer_url = Viewer._backend_obj.url()
if Viewer.port_forwarding is not None:
url_port_pattern = '(?<=:)[0-9]+(?=/)'
port_localhost = int(re.search(url_port_pattern, viewer_url).group())
if port_localhost in Viewer.port_forwarding.keys():
viewer_url = re.sub(url_port_pattern, str(Viewer.port_forwarding[port_localhost]), viewer_url)
else:
print("Port forwarding defined but no port mapping associated with {port_localhost}.")
jupyter_html = f'\n<div style="height: {height}px; width: {width}px; overflow-x: auto; overflow-y: hidden; resize: both">\
\n<iframe src="{viewer_url}" style="width: 100%; height: 100%; border: none">\
</iframe>\n</div>\n'
ipython_display(HTML(jupyter_html))
else:
raise ValueError("Display in a Jupyter cell is only available using 'meshcat' backend and within a Jupyter notebook.")
@staticmethod
def close():
if Viewer._backend_proc is not None:
if Viewer._backend_proc.poll() is not None:
Viewer._backend_proc.terminate()
Viewer._backend_proc = None
Viewer._backend_obj = None
Viewer._backend_exception = None
@staticmethod
def _is_notebook():
try:
shell = get_ipython().__class__.__name__
if shell == 'ZMQInteractiveShell':
return True # Jupyter notebook or qtconsole
elif shell == 'TerminalInteractiveShell':
return False # Terminal running IPython
else:
return False # Other type, if any
except NameError:
return False # Probably standard Python interpreter
@staticmethod
def _get_colorized_urdf(urdf_path, rgb, custom_mesh_search_path=None):
"""
@brief Generate a unique colorized URDF.
@remark Multiple identical URDF model of different colors can be
loaded in Gepetto-viewer this way.
@param[in] urdf_path Full path of the URDF file
@param[in] rgb RGB code defining the color of the model. It is the same for each link.
@return Full path of the colorized URDF file.
"""
color_string = "%.3f_%.3f_%.3f_1.0" % rgb
color_tag = "<color rgba=\"%.3f %.3f %.3f 1.0\"" % rgb # don't close tag with '>', in order to handle <color/> and <color></color>
colorized_tmp_path = os.path.join("/tmp", "colorized_urdf_rgba_" + color_string)
colorized_urdf_path = os.path.join(colorized_tmp_path, os.path.basename(urdf_path))
if not os.path.exists(colorized_tmp_path):
os.makedirs(colorized_tmp_path)
with open(urdf_path, 'r') as urdf_file:
colorized_contents = urdf_file.read()
for mesh_fullpath in re.findall('<mesh filename="(.*)"', colorized_contents):
colorized_mesh_fullpath = os.path.join(colorized_tmp_path, mesh_fullpath[1:])
colorized_mesh_path = os.path.dirname(colorized_mesh_fullpath)
if not os.access(colorized_mesh_path, os.F_OK):
os.makedirs(colorized_mesh_path)
shutil.copy2(mesh_fullpath, colorized_mesh_fullpath)
colorized_contents = colorized_contents.replace('"' + mesh_fullpath + '"',
'"' + colorized_mesh_fullpath + '"', 1)
colorized_contents = re.sub("<color rgba=\"[\d. ]*\"", color_tag, colorized_contents)
with open(colorized_urdf_path, 'w') as colorized_urdf_file:
colorized_urdf_file.write(colorized_contents)
return colorized_urdf_path
@staticmethod
def _get_gepetto_client(open_if_needed=False):
"""
@brief Get a pointer to the running process of Gepetto-Viewer.
@details This method can be used to open a new process if necessary.
.
@param[in] open_if_needed Whether a new process must be opened if
no running process is found.
Optional: False by default
@return A pointer to the running Gepetto-viewer Client and its PID.
"""
try:
from gepetto.corbaserver.client import Client
return Client(), None
except:
try:
return Client(), None
except:
if (open_if_needed):
FNULL = open(os.devnull, 'w')
proc = subprocess.Popen(['gepetto-gui'],
shell=False,
stdout=FNULL,
stderr=FNULL)
time.sleep(1)
try:
return Client(), proc
except:
try:
return Client(), proc
except:
print("Impossible to open Gepetto-viewer")
return None, None
def _delete_gepetto_nodes_viewer(self, *nodes_path):
"""
@brief Delete a 'node' in Gepetto-viewer.
@remark Be careful, one must specify the full path of a node, including
all parent group, but without the window name, ie
'scene_name/robot_name' to delete the robot.
@param[in] nodes_path Full path of the node to delete
"""
if Viewer.backend == 'gepetto-gui':
for node_path in nodes_path:
if node_path in self._client.getNodeList():
self._client.deleteNode(node_path, True)
def _getViewerNodeName(self, geometry_object, geometry_type):
"""
@brief Get the full path of a node associated with a given geometry
object and geometry type.
@remark This is a hidden function that is not automatically imported
using 'from wdc_jiminy_py import *'. It is not intended to
be called manually.
@param[in] geometry_object Geometry object from which to get the node
@param[in] geometry_type Geometry type. It must be either
pin.GeometryType.VISUAL or pin.GeometryType.COLLISION
for display and collision, respectively.
@return Full path of the associated node.
"""
if geometry_type is pin.GeometryType.VISUAL:
return self._rb.viz.viewerVisualGroupName + '/' + geometry_object.name
elif geometry_type is pin.GeometryType.COLLISION:
return self._rb.viz.viewerCollisionGroupName + '/' + geometry_object.name
def _updateGeometryPlacements(self, visual=False):
"""
@brief Update the generalized position of a geometry object.
@remark This is a hidden function that is not automatically imported
using 'from wdc_jiminy_py import *'. It is not intended to
be called manually.
@param[in] visual Wether it is a visual or collision update
"""
if visual:
geom_model = self._rb.visual_model
geom_data = self._rb.visual_data
else:
geom_model = self._rb.collision_model
geom_data = self._rb.collision_data
pin.updateGeometryPlacements(self.pinocchio_model,
self.pinocchio_data,
geom_model, geom_data)
def setCameraTransform(self, translation, rotation):
# translation : [Px, Py, Pz],
# rotation : [Roll, Pitch, Yaw]
R_pnc = rpyToMatrix(np.array(rotation))
if Viewer.backend == 'gepetto-gui':
T_pnc = np.array(translation)
T_R = SE3(R_pnc, T_pnc)
self._client.setCameraTransform(self._window_id, se3ToXYZQUAT(T_R).tolist())
else:
import meshcat.transformations as tf
# Transformation of the camera object
T_meshcat = tf.translation_matrix(translation)
self._client.viewer["/Cameras/default/rotated/<object>"].set_transform(T_meshcat)
# Orientation of the camera object
Q_pnc = Quaternion(R_pnc).coeffs()
Q_meshcat = np.roll(Q_pnc, shift=1)
R_meshcat = tf.quaternion_matrix(Q_meshcat)
self._client.viewer["/Cameras/default"].set_transform(R_meshcat)
def captureFrame(self):
if Viewer.backend == 'gepetto-gui':
png_path = next(tempfile._get_candidate_names())
self._client.captureFrame(self._window_id, png_path)
rgb_array = np.array(Image.open(png_path))[:, :, :-1]
os.remove(png_path)
return rgb_array
else:
raise RuntimeError("Screen capture through Python only available using 'gepetto-gui' backend.")
def refresh(self):
"""
@brief Refresh the configuration of Model in the viewer.
"""
if self.use_theoretical_model:
raise RuntimeError("'Refresh' method only available if 'use_theoretical_model'=False.")
if Viewer.backend == 'gepetto-gui':
if self._rb.displayCollisions:
self._client.applyConfigurations(
[self._getViewerNodeName(collision, pin.GeometryType.COLLISION)
for collision in self._rb.collision_model.geometryObjects],
[pin.se3ToXYZQUATtuple(self._rb.collision_data.oMg[\
self._rb.collision_model.getGeometryId(collision.name)])
for collision in self._rb.collision_model.geometryObjects]
)
if self._rb.displayVisuals:
self._updateGeometryPlacements(visual=True)
self._client.applyConfigurations(
[self._getViewerNodeName(visual, pin.GeometryType.VISUAL)
for visual in self._rb.visual_model.geometryObjects],
[pin.se3ToXYZQUATtuple(self._rb.visual_data.oMg[\
self._rb.visual_model.getGeometryId(visual.name)])
for visual in self._rb.visual_model.geometryObjects]
)
self._client.refresh()
else:
self._updateGeometryPlacements(visual=True)
for visual in self._rb.visual_model.geometryObjects:
T = self._rb.visual_data.oMg[\
self._rb.visual_model.getGeometryId(visual.name)].homogeneous.A
self._client.viewer[\
self._getViewerNodeName(visual, pin.GeometryType.VISUAL)].set_transform(T)
def display(self, evolution_robot, speed_ratio, xyz_offset=None):
t = [s.t for s in evolution_robot]
i = 0
init_time = time.time()
while i < len(evolution_robot):
s = evolution_robot[i]
if (xyz_offset is not None):
q = s.q.copy() # Make sure to use a copy to avoid altering the original data
q[:3] += xyz_offset
else:
q = s.q
with Viewer._lock: # It is necessary to use Lock since corbaserver does not support multiple connection simultaneously.omniORB
try:
self._rb.display(q)
except Viewer._backend_exception:
break
t_simu = (time.time() - init_time) * speed_ratio
i = bisect_right(t, t_simu)
if t_simu < s.t:
time.sleep(s.t - t_simu)
def play_trajectories(trajectory_data, xyz_offset=None, urdf_rgba=None, speed_ratio=1.0,
backend='gepetto-gui', window_name='python-pinocchio', scene_name='world',
close_backend=None):
"""!
@brief Display robot evolution in Gepetto-viewer at stable speed.
@remark The speed is independent of the machine, and more
specifically of CPU power.
@param[in] trajectory_data Trajectory dictionary with keys:
'evolution_robot': list of State object of increasing time
'jiminy_model': Jiminy model (None if omitted)
'use_theoretical_model': whether the theoretical or actual model must be used
@param[in] xyz_offset Constant translation of the root joint in world frame (1D numpy array)
@param[in] urdf_rgba RGBA code defining the color of the model. It is the same for each link.
Optional: Original colors of each link. No alpha.
@param[in] speed_ratio Speed ratio of the simulation
@param[in] window_name Name of the Gepetto-viewer's window in which to display the robot.
Optional: Common default name if omitted.
@param[in] scene_name Name of the Gepetto-viewer's scene in which to display the robot.
Optional: Common default name if omitted.
"""
if (close_backend is None):
# Close backend if it was not available beforehand
close_backend = Viewer._backend_obj is None
# Load robots in gepetto viewer
robots = []
for i in range(len(trajectory_data)):
jiminy_model = trajectory_data[i]['jiminy_model']
use_theoretical_model = trajectory_data[i]['use_theoretical_model']
robot = Viewer(jiminy_model, use_theoretical_model=use_theoretical_model,
urdf_rgba=urdf_rgba[i] if urdf_rgba is not None else None, robot_index=i,
backend=backend, window_name=window_name, scene_name=scene_name)
if (xyz_offset is not None and xyz_offset[i] is not None):
q = trajectory_data[i]['evolution_robot'][0].q.copy() # Make sure to use a copy to avoid altering the original data
q[:3] += xyz_offset[i]
else:
q = trajectory_data[i]['evolution_robot'][0].q
try:
robot._rb.display(q)
except Viewer._backend_exception:
break
robots.append(robot)
if (xyz_offset is None):
xyz_offset = len(trajectory_data) * (None,)
threads = []
for i in range(len(trajectory_data)):
threads.append(Thread(target=robots[i].display,
args=(trajectory_data[i]['evolution_robot'],
speed_ratio, xyz_offset[i])))
for i in range(len(trajectory_data)):
threads[i].start()
for i in range(len(trajectory_data)):
threads[i].join()
if close_backend:
Viewer.close()
| 44.995968 | 138 | 0.592705 |
73fd15b6311977c3272f370691a734157d4d6666 | 1,923 | py | Python | data/setup_data.py | comath/goko | a58925baf5398f7cca7909eed632dbbd87931827 | [
"Apache-2.0"
] | null | null | null | data/setup_data.py | comath/goko | a58925baf5398f7cca7909eed632dbbd87931827 | [
"Apache-2.0"
] | null | null | null | data/setup_data.py | comath/goko | a58925baf5398f7cca7909eed632dbbd87931827 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import tensorflow as tf
mnist = tf.keras.datasets.mnist
import pandas as pd
from sklearn.neighbors import KDTree
import os
# Base MNIST transform for easy access.
# The Yaml files are often messed with, these are the base files.
mnist_yaml = '''
---
cutoff: 5
resolution: -10
scale_base: 2
data_path: ../data/mnist.dat
labels_path: ../data/mnist.csv
count: 60000
data_dim: 784
labels_dim: 10
in_ram: True
'''
mnist_complex_yaml = '''
---
cutoff: 0
resolution: -20
scale_base: 1.3
use_singletons: true
verbosity: 0
data_path: ../data/mnist.dat
labels_path: ../data/mnist.csv
count: 60000
data_dim: 784
in_ram: True
schema:
'y': i32
name: string
'''
metaFile = open("data/mnist.yml","wb")
metaFile.write(mnist_yaml.encode('utf-8'))
metaFile.close()
metaFile = open("data/mnist_complex.yml","wb")
metaFile.write(mnist_complex_yaml.encode('utf-8'))
metaFile.close()
(x_train, y_train),(x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
x_train = x_train.astype(np.float32)
x_train = x_train.reshape(-1, 28*28)
dataFile = open("data/mnist.dat", "wb")
for x in x_train:
dataFile.write(x.tobytes())
dataFile.close()
y_bools = [y%2 == 0 for y in y_train]
y_str = [str(y) for y in y_train]
df = pd.DataFrame({"y":y_train,"even":y_bools,"name":y_str})
df.index.rename('index', inplace=True)
df.to_csv('data/mnist.csv')
# KNN data for tests
data = np.memmap("data/mnist.dat", dtype=np.float32)
data = data.reshape([-1,784])
tree = KDTree(data, leaf_size=2)
dist, ind = tree.query(data[:100], k=5)
dist, ind = tree.query(np.zeros([1,784],dtype=np.float32), k=5)
nbrs = {"d0":dist[:,0],
"d1":dist[:,1],
"d2":dist[:,2],
"d3":dist[:,3],
"d4":dist[:,4],
"i0": ind[:,0],
"i1": ind[:,1],
"i2": ind[:,2],
"i3": ind[:,3],
"i4": ind[:,4],}
csv = pd.DataFrame(nbrs)
csv.to_csv("data/mnist_nbrs.csv") | 23.168675 | 65 | 0.660426 |
73fd4e7f141456a1b7ae053b19716498b95b4ddf | 2,368 | py | Python | exercises/phone-number/phone_number_test.py | jamesmcm/python | 9f1d41da23ff7248e891ee1c8f01d11793e7d3ea | [
"MIT"
] | null | null | null | exercises/phone-number/phone_number_test.py | jamesmcm/python | 9f1d41da23ff7248e891ee1c8f01d11793e7d3ea | [
"MIT"
] | null | null | null | exercises/phone-number/phone_number_test.py | jamesmcm/python | 9f1d41da23ff7248e891ee1c8f01d11793e7d3ea | [
"MIT"
] | 1 | 2021-12-29T19:26:23.000Z | 2021-12-29T19:26:23.000Z | import unittest
from phone_number import Phone
# Tests adapted from `problem-specifications//canonical-data.json` @ v1.2.0
class PhoneTest(unittest.TestCase):
def test_cleans_number(self):
number = Phone("(223) 456-7890").number
self.assertEqual(number, "2234567890")
def test_cleans_number_with_dots(self):
number = Phone("223.456.7890").number
self.assertEqual(number, "2234567890")
def test_cleans_number_with_multiple_spaces(self):
number = Phone("223 456 7890 ").number
self.assertEqual(number, "2234567890")
def test_invalid_when_9_digits(self):
number = Phone("123456789").number
self.assertEqual(number, "0000000000")
def test_invalid_when_11_digits_and_first_not_1(self):
number = Phone("22234567890").number
self.assertEqual(number, "0000000000")
def test_valid_when_11_digits_and_first_is_1(self):
number = Phone("12234567890").number
self.assertEqual(number, "2234567890")
def test_valid_when_11_digits_and_first_is_1_with_punctuation(self):
number = Phone("+1 (223) 456-7890").number
self.assertEqual(number, "2234567890")
def test_invalid_when_more_than_11_digits(self):
number = Phone("321234567890").number
self.assertEqual(number, "0000000000")
def test_invalid_with_letters(self):
number = Phone("123-abc-7890").number
self.assertEqual(number, "0000000000")
def test_invalid_with_punctuation(self):
number = Phone("123-@:!-7890").number
self.assertEqual(number, "0000000000")
def test_invalid_area_code(self):
number = Phone("(123) 456-7890").number
self.assertEqual(number, "0000000000")
def test_invalid_exchange_code(self):
number = Phone("(223) 056-7890").number
self.assertEqual(number, "0000000000")
# Track specific tests
def test_area_code(self):
number = Phone("2234567890")
self.assertEqual(number.area_code(), "223")
def test_pretty_print(self):
number = Phone("2234567890")
self.assertEqual(number.pretty(), "(223) 456-7890")
def test_pretty_print_with_full_us_phone_number(self):
number = Phone("12234567890")
self.assertEqual(number.pretty(), "(223) 456-7890")
if __name__ == '__main__':
unittest.main()
| 32.438356 | 75 | 0.681588 |
73fd51265998400557f74bd8bb1975cc8e4cfcdf | 193 | py | Python | testmaster/dummyprograms/fibonacci.py | S-P-A-N-N-E-R-S/Testmaster | e0804b30a6f77c136eca890e692ea54195a31bb9 | [
"BSD-3-Clause"
] | null | null | null | testmaster/dummyprograms/fibonacci.py | S-P-A-N-N-E-R-S/Testmaster | e0804b30a6f77c136eca890e692ea54195a31bb9 | [
"BSD-3-Clause"
] | null | null | null | testmaster/dummyprograms/fibonacci.py | S-P-A-N-N-E-R-S/Testmaster | e0804b30a6f77c136eca890e692ea54195a31bb9 | [
"BSD-3-Clause"
] | null | null | null | import sys
def fibonacci(n):
if n in {0, 1}: # Base case
return n
return fibonacci(n - 1) + fibonacci(n - 2)
x = int(sys.argv[1])
print ("fibonacci von", x, "=", fibonacci(x)) | 24.125 | 46 | 0.580311 |
73fd5872db86dd2a7ef5ebb3ad6fc6b8b122cc52 | 414 | py | Python | components/py_engine/tests/haas/HaaSEdu/pyamp/main.py | ekmixon/AliOS-Things | 00334295af8aa474d818724149726ca93da4645d | [
"Apache-2.0"
] | 4,538 | 2017-10-20T05:19:03.000Z | 2022-03-30T02:29:30.000Z | components/py_engine/tests/haas/HaaSEdu/pyamp/main.py | ekmixon/AliOS-Things | 00334295af8aa474d818724149726ca93da4645d | [
"Apache-2.0"
] | 1,088 | 2017-10-21T07:57:22.000Z | 2022-03-31T08:15:49.000Z | components/py_engine/tests/haas/HaaSEdu/pyamp/main.py | willianchanlovegithub/AliOS-Things | 637c0802cab667b872d3b97a121e18c66f256eab | [
"Apache-2.0"
] | 1,860 | 2017-10-20T05:22:35.000Z | 2022-03-27T10:54:14.000Z | #!/usr/bin/env python
# -*- coding:utf-8 -*-
###
# Filename: /Users/guoliang.wgl/Downloads/aos33_new/components/py_engine/tests/haas/HaaS100/pyamp/main.py
# Path: /Users/guoliang.wgl/Downloads/aos33_new/components/py_engine/tests/haas/HaaS100/pyamp
# Created Date: Monday, July 12th 2021, 4:01:02 pm
# Author: guoliang.wgl
#
# Copyright (c) 2021 AliBaBa
###
if __name__ == '__main__':
print('Hello Python')
| 25.875 | 105 | 0.722222 |
73fd67dc5b29f6c43cc1814612ec37af17296684 | 1,317 | py | Python | venv/Lib/site-packages/pyrogram/raw/functions/stats/__init__.py | D1ne2021/jjhhhjj | a090da30983b3ef276dfe4cef2ded4526f36002a | [
"MIT"
] | 5 | 2021-09-11T22:01:15.000Z | 2022-03-16T21:33:42.000Z | backend/pyrogram/raw/functions/stats/__init__.py | iamatlasss/social-media-analyzer | 429d1d2bbd8bfce80c50c5f8edda58f87ace668d | [
"Apache-2.0"
] | null | null | null | backend/pyrogram/raw/functions/stats/__init__.py | iamatlasss/social-media-analyzer | 429d1d2bbd8bfce80c50c5f8edda58f87ace668d | [
"Apache-2.0"
] | 3 | 2022-01-18T11:06:22.000Z | 2022-02-26T13:39:28.000Z | # Pyrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2021 Dan <https://github.com/delivrance>
#
# This file is part of Pyrogram.
#
# Pyrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrogram. If not, see <http://www.gnu.org/licenses/>.
# # # # # # # # # # # # # # # # # # # # # # # #
# !!! WARNING !!! #
# This is a generated file! #
# All changes made in this file will be lost! #
# # # # # # # # # # # # # # # # # # # # # # # #
from .get_broadcast_stats import GetBroadcastStats
from .load_async_graph import LoadAsyncGraph
from .get_megagroup_stats import GetMegagroupStats
from .get_message_public_forwards import GetMessagePublicForwards
from .get_message_stats import GetMessageStats
| 43.9 | 75 | 0.689446 |
73fd70d6c54514a287d4f538187f070dab1b978a | 2,444 | py | Python | python/basic/heap/max_heap.py | linshaoyong/leetcode | ea052fad68a2fe0cbfa5469398508ec2b776654f | [
"MIT"
] | 6 | 2019-07-15T13:23:57.000Z | 2020-01-22T03:12:01.000Z | python/basic/heap/max_heap.py | linshaoyong/leetcode | ea052fad68a2fe0cbfa5469398508ec2b776654f | [
"MIT"
] | null | null | null | python/basic/heap/max_heap.py | linshaoyong/leetcode | ea052fad68a2fe0cbfa5469398508ec2b776654f | [
"MIT"
] | 1 | 2019-07-24T02:15:31.000Z | 2019-07-24T02:15:31.000Z | class MaxHeap:
def __init__(self):
self.data = []
def root_node(self):
return self.data[0]
def last_node(self):
return self.data[-1]
def left_child_index(self, index):
return 2 * index + 1
def right_child_index(self, index):
return 2 * index + 2
def parent_index(self, index):
return (index - 1) // 2
def insert(self, value):
self.data.append(value)
ci = len(self.data) - 1
pi = self.parent_index(ci)
while pi >= 0 and self.data[pi] < self.data[ci]:
self.data[pi], self.data[ci] = self.data[ci], self.data[pi]
ci = pi
pi = self.parent_index(ci)
def delete(self):
last = self.data.pop()
self.data[0] = last
ci = 0
while self.has_larger_child(ci):
gi = self.calculate_larger_child_index(ci)
self.data[gi], self.data[ci] = self.data[ci], self.data[gi]
ci = gi
def has_larger_child(self, index):
li = self.left_child_index(index)
ri = self.right_child_index(index)
if li < len(self.data) and self.data[li] > self.data[index]:
return True
if ri < len(self.data) and self.data[ri] > self.data[index]:
return True
return False
def calculate_larger_child_index(self, index):
li = self.left_child_index(index)
ri = self.right_child_index(index)
if ri >= len(self.data):
return li
if self.data[li] > self.data[ri]:
return li
return ri
def test_heap_insert():
heap = MaxHeap()
heap.insert(5)
assert 5 == heap.root_node()
heap.insert(8)
assert 8 == heap.root_node()
heap.insert(7)
assert 8 == heap.root_node()
heap.insert(1)
assert 8 == heap.root_node()
heap.insert(9)
assert 9 == heap.root_node()
heap.insert(7)
assert 9 == heap.root_node()
def test_heap_delete():
heap = MaxHeap()
heap.insert(5)
heap.insert(8)
heap.delete()
assert 5 == heap.root_node()
heap.insert(1)
heap.insert(9)
heap.insert(6)
heap.insert(5)
heap.insert(8)
heap.delete()
assert 8 == heap.root_node()
heap.delete()
assert 6 == heap.root_node()
heap.delete()
assert 5 == heap.root_node()
heap.delete()
assert 5 == heap.root_node()
heap.delete()
assert 1 == heap.root_node()
| 22.841121 | 71 | 0.568331 |
73fd8e400ddaf7962e2e4763f4949dde3192c1d3 | 9,253 | py | Python | appengine/findit/handlers/test/collect_tree_closures_test.py | allaparthi/monorail | e18645fc1b952a5a6ff5f06e0c740d75f1904473 | [
"BSD-3-Clause"
] | 2 | 2021-04-13T21:22:18.000Z | 2021-09-07T02:11:57.000Z | appengine/findit/handlers/test/collect_tree_closures_test.py | allaparthi/monorail | e18645fc1b952a5a6ff5f06e0c740d75f1904473 | [
"BSD-3-Clause"
] | 21 | 2020-09-06T02:41:05.000Z | 2022-03-02T04:40:01.000Z | appengine/findit/handlers/test/collect_tree_closures_test.py | allaparthi/monorail | e18645fc1b952a5a6ff5f06e0c740d75f1904473 | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from datetime import datetime
import json
import mock
from google.appengine.ext import ndb
import webapp2
from gae_libs.testcase import TestCase
from handlers import collect_tree_closures
from model.tree_closure import TreeClosure
from model.tree_closure import TreeStatus
class CollectTreeClosuresTest(TestCase):
app_module = webapp2.WSGIApplication(
[
('/collect-tree-closures', collect_tree_closures.CollectTreeClosures),
],
debug=True)
def testGetCurrentCheckingPointForTreeWithoutExistingData(self):
self.MockUTCNow(datetime(2017, 04, 13, 10, 10, 10))
expected_checking_point = datetime(2017, 01, 13, 10, 10, 10)
checking_point = collect_tree_closures._GetCurrentCheckingPointForTree('c')
self.assertEqual(expected_checking_point, checking_point)
def testGetCurrentCheckingPointForTreeWithExistingData(self):
TreeClosure(tree_name='c', closed_time=datetime(2017, 04, 10, 10, 10)).put()
TreeClosure(
tree_name='c',
closed_time=datetime(2017, 04, 11, 05, 05),
opened_time=datetime(2017, 04, 11, 05, 15)).put()
expected_checking_point = datetime(2017, 04, 11, 05, 15)
checking_point = collect_tree_closures._GetCurrentCheckingPointForTree('c')
self.assertEqual(expected_checking_point, checking_point)
@mock.patch.object(collect_tree_closures.FinditHttpClient, 'Get')
def testRetrieveTreeStatusSuccess(self, mocked_Get):
mocked_Get.side_effect = [(200,
json.dumps([{
'date': '2017-04-01 12:12:12',
'message': 'm1',
'general_state': 'open',
'username': 'test@chromium.org',
}, {
'date': '2017-04-01 12:12:12',
'message': 'm1',
'general_state': 'open',
'username': 'test@chromium.org',
}]), {})]
statuses = collect_tree_closures._RetrieveTreeStatus(
'chromium', datetime(2017, 03, 31))
self.assertEqual(1, len(statuses))
self.assertEqual(statuses[0].time, datetime(2017, 04, 01, 12, 12, 12))
self.assertEqual(statuses[0].message, 'm1')
self.assertEqual(statuses[0].state, 'open')
self.assertEqual(statuses[0].username, 'test@chromium.org')
mocked_Get.assert_called_once_with(
'https://chromium-status.appspot.com/allstatus',
params={
'limit': 1000,
'format': 'json',
'endTime': 1490918400,
})
@mock.patch.object(collect_tree_closures.FinditHttpClient, 'Get')
def testRetrieveTreeStatusFailure(self, mocked_Get):
mocked_Get.side_effect = [(400, 'error', {})]
statuses = collect_tree_closures._RetrieveTreeStatus(
'chromium', datetime(2017, 03, 31), end_time=datetime(2017, 04, 01))
self.assertEqual(0, len(statuses))
mocked_Get.assert_called_once_with(
'https://chromium-status.appspot.com/allstatus',
params={
'limit': 1000,
'format': 'json',
'endTime': 1490918400,
'startTime': 1491004800,
})
def testExtractFailureInfoWithFullBuildLink(self):
message = ('Tree is closed (Automatic: "compile" on '
'http://build.chromium.org/p/m/builders/b/builds/1 "b" from ...')
info = collect_tree_closures._ExtractFailureInfo(message)
self.assertEqual(('m', 'b', '1', 'compile'), info)
def testExtractFailureInfoWithPartialBuildLink(self):
message = ('Tree is closed (Automatic: "compile" on '
'/builders/b/builds/1 "b" from ...')
info = collect_tree_closures._ExtractFailureInfo(message)
self.assertEqual((None, 'b', '1', 'compile'), info)
def testExtractFailureInfoWithUnknownMessageFormat(self):
message = 'Tree is closed for blink rename'
info = collect_tree_closures._ExtractFailureInfo(message)
self.assertEqual((None, None, None, None), info)
def testDetectTreeClosureForTreeWithOneCompleteClosure(self):
all_statuses = [
TreeStatus(state='open'),
# A complete closure.
TreeStatus(
time=datetime(2017, 03, 31, 0, 0, 0), # timestamp is 1490918400.
message=('Tree is closed (Automatic: "compile" on '
'/builders/Win%20x64/builds/10327 "Win x64" from blabla'),
state='closed',
username='buildbot@chromium.org',
),
TreeStatus(
time=datetime(2017, 03, 31, 0, 1, 0),
message='Tree is closed (sheriff investigating)',
state='closed',
username='test@chromium.org',
),
TreeStatus(
time=datetime(2017, 03, 31, 0, 5, 0),
message='possible flake',
state='open',
username='test@chromium.org',
),
TreeStatus(
time=datetime(2017, 03, 31, 0, 15, 0),
message='speculative Reverted r12345678',
state='open',
username='test@chromium.org',
),
# An incomplete closure.
TreeStatus(state='closed')
]
num = collect_tree_closures._DetectTreeClosureForTree('c', all_statuses)
self.assertEqual(1, num)
key_str_id = '%s-%s' % ('c', 1490918400)
closure = ndb.Key(TreeClosure, key_str_id).get()
self.assertIsNotNone(closure)
self.assertEqual('c', closure.tree_name)
self.assertEqual(all_statuses[1:-1], closure.statuses)
self.assertEqual(datetime(2017, 03, 31, 0, 0, 0), closure.closed_time)
self.assertEqual(datetime(2017, 03, 31, 0, 5, 0), closure.opened_time)
self.assertEqual(
datetime(2017, 03, 31, 0, 15, 0), closure.latest_action_time)
self.assertTrue(closure.auto_closed)
self.assertFalse(closure.auto_opened)
self.assertTrue(closure.possible_flake)
self.assertTrue(closure.has_revert)
self.assertIsNone(closure.master_name)
self.assertEqual('Win x64', closure.builder_name)
self.assertEqual('10327', closure.build_id)
self.assertEqual('compile', closure.step_name)
def testDetectTreeClosureForTreeWithIncompleteClosure(self):
all_statuses = [
# A incomplete closure.
TreeStatus(
time=datetime(2017, 03, 31, 0, 0, 0), # timestamp is 1490918400.
message=('Tree is closed (Automatic: "compile" on '
'/builders/Win%20x64/builds/10327 "Win x64" from blabla'),
state='closed',
username='buildbot@chromium.org',
),
TreeStatus(
time=datetime(2017, 03, 31, 0, 15, 0),
message='possible flake',
state='open',
username='test@chromium.org',
),
]
num = collect_tree_closures._DetectTreeClosureForTree('c', all_statuses)
self.assertEqual(0, num)
key_str_id = '%s-%s' % ('c', 1490918400)
closure = ndb.Key(TreeClosure, key_str_id).get()
self.assertIsNone(closure)
@mock.patch.object(
collect_tree_closures,
'_GetCurrentCheckingPointForTree',
return_value=datetime(2017, 03, 01))
@mock.patch.object(
collect_tree_closures, '_RetrieveTreeStatus', return_value=['a'])
@mock.patch.object(
collect_tree_closures, '_DetectTreeClosureForTree', return_value=2)
def testGetWithStartTimeAndEndTime(self, mocked_detect_fun,
mocked_retrive_fun, mocked_check_fun):
response = self.test_app.get(
'/collect-tree-closures',
params={'start_time': '2017-04-01',
'end_time': '2017-04-05'},
headers={
'X-AppEngine-Cron': 'true'
})
self.assertEquals(200, response.status_int)
expected_result = {'chromium': 2}
self.assertEqual(expected_result, response.json_body)
self.assertFalse(mocked_check_fun.called)
mocked_retrive_fun.assert_called_once_with(
'chromium', datetime(2017, 04, 01), end_time=datetime(2017, 04, 05))
mocked_detect_fun.assert_called_once_with('chromium', ['a'])
@mock.patch.object(
collect_tree_closures,
'_GetCurrentCheckingPointForTree',
return_value=datetime(2017, 04, 01))
@mock.patch.object(
collect_tree_closures, '_RetrieveTreeStatus', return_value=['a'])
@mock.patch.object(
collect_tree_closures, '_DetectTreeClosureForTree', return_value=2)
def testGetWithoutStartTime(self, mocked_detect_fun, mocked_retrive_fun,
mocked_check_fun):
response = self.test_app.get(
'/collect-tree-closures', headers={
'X-AppEngine-Cron': 'true'
})
self.assertEquals(200, response.status_int)
expected_result = {'chromium': 2}
self.assertEqual(expected_result, response.json_body)
mocked_check_fun.assert_called_once_with('chromium')
mocked_retrive_fun.assert_called_once_with(
'chromium', datetime(2017, 04, 01), end_time=None)
mocked_detect_fun.assert_called_once_with('chromium', ['a'])
| 40.583333 | 80 | 0.637307 |
73fdbdb3f342779e8a77df41f2a9713a01363db2 | 461 | py | Python | modeling/losses/__init__.py | WenmuZhou/crnn.pytorch | bf7a7c62376eee93943ca7c68e88e3d563c09aa8 | [
"Apache-2.0"
] | 46 | 2018-05-29T08:01:10.000Z | 2022-02-14T21:47:40.000Z | modeling/losses/__init__.py | WenmuZhou/crnn.pytorch | bf7a7c62376eee93943ca7c68e88e3d563c09aa8 | [
"Apache-2.0"
] | null | null | null | modeling/losses/__init__.py | WenmuZhou/crnn.pytorch | bf7a7c62376eee93943ca7c68e88e3d563c09aa8 | [
"Apache-2.0"
] | 17 | 2018-11-14T09:17:00.000Z | 2021-08-06T04:05:07.000Z | # -*- coding: utf-8 -*-
# @Time : 2020/6/17 11:17
# @Author : zhoujun
import copy
from .CTCLoss import CTCLoss
from .AttnLoss import AttnLoss
__all__ = ['build_loss']
support_loss = ['CTCLoss', 'AttnLoss']
def build_loss(config):
copy_config = copy.deepcopy(config)
loss_type = copy_config.pop('type')
assert loss_type in support_loss, f'all support loss is {support_loss}'
criterion = eval(loss_type)(**copy_config)
return criterion
| 25.611111 | 75 | 0.700651 |
73fdc17de9bd3fa68df67ef762fd2326b883f4c2 | 31,855 | py | Python | storm_control/hal4000/hal4000.py | BoettigerLab/scopeTest | 50c6601c952adf1d63c7f27b1aba8890e789c280 | [
"MIT"
] | 1 | 2021-03-17T20:25:59.000Z | 2021-03-17T20:25:59.000Z | storm_control/hal4000/hal4000.py | BoettigerLab/scopeTest | 50c6601c952adf1d63c7f27b1aba8890e789c280 | [
"MIT"
] | null | null | null | storm_control/hal4000/hal4000.py | BoettigerLab/scopeTest | 50c6601c952adf1d63c7f27b1aba8890e789c280 | [
"MIT"
] | 1 | 2021-03-17T21:24:35.000Z | 2021-03-17T21:24:35.000Z | #!/usr/bin/env python
"""
Heuristically programmed ALgorithmic STORM setup control.
This module handles setup, clean up and message passing
between the various sub-modules that define the
behavior. Each of these modules must be a sub-class of
the HalModule class in halLib.halModule. Setup specific
configuration is provided by a 'XX_config.xml' file
examples of which can be found in the xml folder.
In addition this module handles drag/drops and
the film notes QTextEdit.
Jeff 03/14
Hazen 01/17
"""
from collections import deque
import faulthandler
import importlib
import os
import signal
import time
import sys # beacause python is extremely stupid and can't read my .pth file
sys.path.append(r'C:\Users\Scope3\Desktop\MicroscopeSoftware\Hal2')
# print(sys.path)
from PyQt5 import QtCore, QtGui, QtWidgets
import storm_control.sc_library.halExceptions as halExceptions
import storm_control.sc_library.hdebug as hdebug
import storm_control.sc_library.hgit as hgit
import storm_control.sc_library.parameters as params
import storm_control.hal4000.halLib.halDialog as halDialog
import storm_control.hal4000.halLib.halMessage as halMessage
import storm_control.hal4000.halLib.halMessageBox as halMessageBox
import storm_control.hal4000.halLib.halModule as halModule
import storm_control.hal4000.qtWidgets.qtAppIcon as qtAppIcon
app = None
def ctrlCHandler(sig, frame):
print("CTRL-C Handler:")
print("Trackback:")
faulthandler.dump_traceback()
print("")
print("Aborting now")
assert(False)
#
# Main window controller.
#
class HalController(halModule.HalModule):
"""
HAL main window controller.
"""
def __init__(self, module_params = None, qt_settings = None, **kwds):
super().__init__(**kwds)
if (module_params.get("ui_type") == "classic"):
self.view = ClassicView(module_params = module_params,
qt_settings = qt_settings,
**kwds)
else:
self.view = DetachedView(module_params = module_params,
qt_settings = qt_settings,
**kwds)
self.view.guiMessage.connect(self.handleGuiMessage)
def cleanUp(self, qt_settings):
self.view.cleanUp(qt_settings)
def handleGuiMessage(self, message):
"""
This just passes through the messages from the GUI after
correcting the source.
"""
self.sendMessage(message)
def processMessage(self, message):
if message.isType("add to menu"):
self.view.addMenuItem(message.getData()["item name"],
message.getData()["item data"])
elif message.isType("add to ui"):
[module, parent_widget] = message.getData()["ui_parent"].split(".")
if (module == self.module_name):
self.view.addUiWidget(parent_widget,
message.getData()["ui_widget"],
message.getData().get("ui_order"))
elif message.isType("change directory"):
self.view.setFilmDirectory(message.getData()["directory"])
elif message.isType("start"):
if message.getData()["show_gui"]:
self.view.addMenuItems()
self.view.addWidgets()
self.view.show()
self.sendMessage(halMessage.HalMessage(m_type = "change directory",
data = {"directory" : self.view.getFilmDirectory()}))
elif message.isType("start film"):
self.view.startFilm(message.getData()["film settings"])
elif message.isType("stop film"):
self.view.stopFilm()
notes_param = params.ParameterString(name = "notes",
value = self.view.getNotesEditText())
message.addResponse(halMessage.HalMessageResponse(source = self.module_name,
data = {"acquisition" : [notes_param]}))
elif message.isType("tests done", check_valid = False):
self.view.close()
#
# Main window View.
#
class HalView(QtWidgets.QMainWindow):
"""
HAL main window view.
"""
guiMessage = QtCore.pyqtSignal(object)
def __init__(self, module_name = None, module_params = None, qt_settings = None, **kwds):
super().__init__(**kwds)
self.close_now = False
self.close_timer = QtCore.QTimer(self)
self.film_directory = module_params.get("directory")
self.menu_items_to_add = []
self.module_name = module_name
self.widgets_to_add = []
# Configure UI.
if self.classic_view:
import storm_control.hal4000.qtdesigner.hal4000_ui as hal4000Ui
else:
import storm_control.hal4000.qtdesigner.hal4000_detached_ui as hal4000Ui
self.ui = hal4000Ui.Ui_MainWindow()
self.ui.setupUi(self)
# Create layout for the cameraFrame.
if self.classic_view:
vbox_layout = QtWidgets.QVBoxLayout(self.ui.cameraFrame)
vbox_layout.setContentsMargins(0,0,0,0)
vbox_layout.setSpacing(0)
self.ui.cameraFrame.setLayout(vbox_layout)
# Create layout for settings, film, etc..
vbox_layout = QtWidgets.QVBoxLayout(self.ui.containerWidget)
vbox_layout.setContentsMargins(0,0,0,0)
vbox_layout.setSpacing(0)
self.ui.containerWidget.setLayout(vbox_layout)
# Set icon.
self.setWindowIcon(qtAppIcon.QAppIcon())
# Set title
title = module_params.get("setup_name")
if (hgit.getBranch().lower() != "master"):
title += " (" + hgit.getBranch() + ")"
self.setWindowTitle(title)
# Configure based on saved settings.
self.move(qt_settings.value(self.module_name + ".pos", self.pos()))
self.resize(qt_settings.value(self.module_name + ".size", self.size()))
self.xml_directory = str(qt_settings.value(self.module_name + ".xml_directory",
self.film_directory))
# ui signals
self.ui.actionDirectory.triggered.connect(self.handleDirectory)
self.ui.actionSettings.triggered.connect(self.handleSettings)
self.ui.actionShutter.triggered.connect(self.handleShutters)
self.ui.actionQuit.triggered.connect(self.handleQuit)
# Configure close timer.
self.close_timer.setInterval(5)
self.close_timer.timeout.connect(self.handleCloseTimer)
self.close_timer.setSingleShot(True)
def addMenuItem(self, item_name, item_data):
"""
A menu item (from another module) that should be added to the file menu.
"""
self.menu_items_to_add.append([item_name, item_data])
def addMenuItems(self):
"""
This actually adds the items to the file menu.
"""
if (len(self.menu_items_to_add) > 0):
for item in sorted(self.menu_items_to_add, key = lambda x : x[0]):
a_action = QtWidgets.QAction(self.tr(item[0]), self)
self.ui.menuFile.insertAction(self.ui.actionQuit, a_action)
a_action.triggered.connect(lambda x, item_data = item[1] : self.handleMenuMessage(item_data))
self.ui.menuFile.insertSeparator(self.ui.actionQuit)
def addUiWidget(self, parent_widget_name, ui_widget, ui_order):
"""
A UI widget (from another module) to the list of widgets to add.
"""
if ui_order is None:
ui_order = 0
self.widgets_to_add.append([parent_widget_name, ui_widget, ui_order])
def addWidgets(self):
"""
This actually adds the widgets to UI.
"""
for to_add in sorted(self.widgets_to_add, key = lambda x: x[2]):
[parent_widget_name, ui_widget] = to_add[:2]
hal_widget = getattr(self.ui, parent_widget_name)
ui_widget.setParent(hal_widget)
layout = hal_widget.layout()
layout.addWidget(ui_widget)
def cleanUp(self, qt_settings):
"""
Save GUI settings and close.
"""
qt_settings.setValue(self.module_name + ".pos", self.pos())
qt_settings.setValue(self.module_name + ".size", self.size())
qt_settings.setValue(self.module_name + ".xml_directory", self.xml_directory)
self.close_now = True
self.close()
def closeEvent(self, event):
#
# This is a little fiddly. Basically the problem is that we'll get event
# if the user clicks on the X in the upper right corner of the window.
# In that case we don't want to close right away as core needs some
# time to clean up the modules. However we also get this event when
# we call close() and at that point we do want to close.
#
# We use a timer with a small delay because without it it appeared
# that this method was getting called twice with same event object when
# we clicked on the X, and this meant that you had to click the X
# twice.
#
if not self.close_now:
event.ignore()
self.close_timer.start()
def dragEnterEvent(self, event):
if event.mimeData().hasUrls():
event.accept()
else:
event.ignore()
def dropEvent(self, event):
# Get filename(s)
filenames = []
for url in event.mimeData().urls():
filenames.append(str(url.toLocalFile()))
# Send message(s) with filenames.
for filename in sorted(filenames):
[file_type, error_text] = params.fileType(filename)
if (file_type == "parameters"):
self.xml_directory = os.path.dirname(filename)
self.guiMessage.emit(halMessage.HalMessage(m_type = "new parameters file",
data = {"filename" : filename}))
elif (file_type == "shutters"):
self.xml_directory = os.path.dirname(filename)
self.guiMessage.emit(halMessage.HalMessage(m_type = "new shutters file",
data = {"filename" : filename}))
else:
if error_text:
halMessageBox.halMessageBoxInfo("XML file parsing error " + error_text + ".")
else:
halMessageBox.halMessageBoxInfo("File type not recognized.")
def getFilmDirectory(self):
return self.film_directory
def getNotesEditText(self):
return self.ui.notesEdit.toPlainText()
def handleCloseTimer(self):
self.guiMessage.emit(halMessage.HalMessage(m_type = "close event",
sync = True))
def handleDirectory(self, boolean):
new_directory = QtWidgets.QFileDialog.getExistingDirectory(self,
"New Directory",
self.film_directory,
QtWidgets.QFileDialog.ShowDirsOnly)
if new_directory and os.path.exists(new_directory):
self.film_directory = new_directory
self.guiMessage.emit(halMessage.HalMessage(m_type = "change directory",
data = {"directory" : self.film_directory}))
def handleMenuMessage(self, item_data):
self.guiMessage.emit(halMessage.HalMessage(m_type = "show",
data = {"show" : item_data}))
def handleSettings(self, boolean):
parameters_filename = QtWidgets.QFileDialog.getOpenFileName(self,
"New Settings",
self.xml_directory,
"*.xml")[0]
if parameters_filename:
self.xml_directory = os.path.dirname(parameters_filename)
self.guiMessage.emit(halMessage.HalMessage(m_type = "new parameters file",
data = {"filename" : parameters_filename}))
def handleShutters(self, boolean):
shutters_filename = QtWidgets.QFileDialog.getOpenFileName(self,
"New Shutter Sequence",
self.xml_directory,
"*.xml")[0]
if shutters_filename:
self.xml_directory = os.path.dirname(shutters_filename)
self.guiMessage.emit(halMessage.HalMessage(m_type = "new shutters file",
data = {"filename" : shutters_filename}))
def handleQuit(self, boolean):
self.close_now = True
self.guiMessage.emit(halMessage.HalMessage(m_type = "close event",
sync = True))
def setFilmDirectory(self, film_directory):
self.film_directory = film_directory
def startFilm(self, film_settings):
pass
def stopFilm(self):
pass
class ClassicView(HalView):
"""
The 'classic' main window view. The record button is handled
by the camera view.
"""
def __init__(self, **kwds):
self.classic_view = True
super().__init__(**kwds)
class DetachedView(HalView):
"""
The 'detached' main window view. This includes a record
button that this view has to handle.
"""
def __init__(self, **kwds):
self.classic_view = False
super().__init__(**kwds)
self.ui.recordButton.clicked.connect(self.handleRecordButton)
def handleRecordButton(self, boolean):
self.guiMessage.emit(self.ui.recordButton.getHalMessage())
def startFilm(self, film_settings):
self.ui.recordButton.startFilm(film_settings)
def stopFilm(self):
self.ui.recordButton.stopFilm()
#
# The core..
#
class HalCore(QtCore.QObject):
"""
The core of it all. It sets everything else up, handles
the message passing and tears everything down.
"""
def __init__(self, config = None,
parameters_file_name = None,
testing_mode = False,
show_gui = True,
**kwds):
super().__init__(**kwds)
self.modules = []
self.module_name = "core"
self.qt_settings = QtCore.QSettings("storm-control", "hal4000" + config.get("setup_name").lower())
self.queued_messages = deque()
self.queued_messages_timer = QtCore.QTimer(self)
self.running = True # This is solely for the benefit of unit tests.
self.sent_messages = []
self.strict = config.get("strict", False)
self.queued_messages_timer.setInterval(0)
self.queued_messages_timer.timeout.connect(self.handleSendMessage)
self.queued_messages_timer.setSingleShot(True)
# Initialize messages.
halMessage.initializeMessages()
# In strict mode we all workers must finish in 60 seconds.
if self.strict:
halModule.max_job_time = 60000
# Load all the modules.
print("Loading modules")
#
# For HAL it is easier to just use a list of modules, but at initialization
# we also send a dictionary with the module names as keys to all of the
# modules
#
# In testing mode the testing.testing module may use the other modules to
# spoof the message sources.
#
# During normal operation most inter-module communication is done using
# messages. Modules may also request functionalities from other modules
# that they can use to do specific tasks, such as daq output or displaying
# the images from a camera.
#
all_modules = {}
if testing_mode:
all_modules["core"] = self
else:
all_modules["core"] = True
#
# Need to load HAL's main window first so that other GUI windows will
# have the correct Qt parent.
#
module_names = sorted(config.get("modules").getAttrs())
module_names.insert(0, module_names.pop(module_names.index("hal")))
for module_name in module_names:
print(" " + module_name)
# Get module specific parameters.
module_params = config.get("modules").get(module_name)
print(module_params)
# Add the 'root' parameters to this module parameters
# so that they are visible to the module.
for root_param in config.getAttrs():
if (root_param != "modules"):
module_params.add(root_param, config.getp(root_param))
# Load the module.
a_module = importlib.import_module(module_params.get("module_name"))
a_class = getattr(a_module, module_params.get("class_name"))
a_object = a_class(module_name = module_name,
module_params = module_params,
qt_settings = self.qt_settings)
# If this is HAL's main window set the HalDialog qt_parent class
# attribute so that any GUI QDialogs will have the correct Qt parent.
if (module_name == "hal"):
halDialog.HalDialog.qt_parent = a_object.view
self.modules.append(a_object)
if testing_mode:
all_modules[module_name] = a_object
else:
all_modules[module_name] = True
print("")
# Connect signals.
for module in self.modules:
module.newMessage.connect(self.handleMessage)
# Create messages.
#
# We do it this way with finalizers because otherwise all of these messages
# would get queued first and the modules would not have a chance to insert
# messages in between these messages.
#
# The actual sequence of sent messages is:
#
# 1. "configure1", tell modules to finish configuration.
# The message includes a dictionary of the names of
# all modules that were loaded.
#
# 2. "configure2", gives the modules a chance to 'react'
# based on what happened during configure1.
#
# 3. "configure3", gives the modules a chance to 'react'
# based on what happened during configure2.
#
# 4. "new parameters file", initial parameters (if any).
#
# 5. "start", tell the modules to start.
# This is the point where any GUI modules that are
# visible should call show().
#
message_chain = []
# configure1.
message_chain.append(halMessage.HalMessage(source = self,
m_type = "configure1",
data = {"all_modules" : all_modules}))
# configure2.
message_chain.append(halMessage.HalMessage(source = self,
m_type = "configure2"))
# configure3.
message_chain.append(halMessage.HalMessage(source = self,
m_type = "configure3"))
# update default parameters.
if parameters_file_name is not None:
message_chain.append(halMessage.HalMessage(source = self,
m_type = "new parameters file",
data = {"parameters filename" : parameters_file_name,
"is_default" : True}))
# start.
#
# It is safe to stop blocking Qt's last window closed behavior after
# this message as HAL's main window will be open.
#
# If we run HAL from another module, in testing for example, app might
# be none.
#
if app is not None:
message_chain.append(halMessage.HalMessage(source = self,
m_type = "start",
data = {"show_gui" : show_gui},
sync = True,
finalizer = lambda : app.setQuitOnLastWindowClosed(True)))
else:
message_chain.append(halMessage.HalMessage(source = self,
m_type = "start",
data = {"show_gui" : show_gui},
sync = True))
message_chain.append(halMessage.SyncMessage(source = self))
self.handleMessage(halMessage.chainMessages(self.handleMessage,
message_chain))
def close(self):
"""
This is called by qtbot at the end of a test.
"""
self.cleanUp()
def cleanUp(self):
print("Stopping modules")
for module in self.modules:
print(" " + module.module_name)
module.cleanUp(self.qt_settings)
print("Waiting for QThreadPool to finish.")
halModule.threadpool.waitForDone()
self.running = False
print(" Dave? What are you doing Dave?")
print(" ...")
def findChild(self, qt_type, name, options = QtCore.Qt.FindChildrenRecursively):
"""
Overwrite the QT version as the 'child' will be (hopefully) be in one of
the modules.
"""
for module in self.modules:
print(module)
m_child = module.findChild(qt_type, name, options)
if m_child is not None:
return m_child
assert False, "UI element " + name + " not found."
def handleErrors(self, message):
"""
Handle errors in messages from 'core'
"""
for m_error in message.getErrors():
msg = "from '" + m_error.source + "' of type '" + m_error.message + "'!"
# Just print the error and crash on exceptions.
if m_error.hasException():
m_error.printException()
self.cleanUp()
# Use a informational box for warnings.
else:
msg = "Got a warning" + msg
halMessageBox.halMessageBoxInfo(msg)
def handleMessage(self, message):
"""
Adds a message to the queue of images to send.
"""
# Check the message and it to the queue.
if self.strict:
if not message.m_type in halMessage.valid_messages:
msg = "Invalid message type '" + message.m_type
msg += "' received from " + message.getSourceName()
raise halExceptions.HalException(msg)
validator = halMessage.valid_messages[message.m_type].get("data")
halMessage.validateData(validator, message)
message.logEvent("queued")
self.queued_messages.append(message)
# Start the message timer, if it is not already running.
self.startMessageTimer()
def handleProcessed(self, message):
"""
Removes a processed message from the queue of sent messages
and performs message finalization.
"""
# Remove message from list of sent messages.
self.sent_messages.remove(message)
# Disconnect messages processed signal.
message.processed.disconnect(self.handleProcessed)
# Call message finalizer.
message.finalize()
# Always exit on exceptions in strict mode.
if self.strict and message.hasErrors():
for m_error in message.getErrors():
if m_error.hasException():
m_error.printException()
self.cleanUp()
return
# Notify the sender if errors occured while processing the
# message and exit if the sender doesn't handle the error.
if message.hasErrors():
if not message.getSource().handleErrors(sent_message):
self.cleanUp()
return
# Check the responses if we are in strict mode.
if self.strict:
validator = halMessage.valid_messages[message.m_type].get("resp")
for response in message.getResponses():
halMessage.validateResponse(validator, message, response)
# Notify the sender of any responses to the message.
message.getSource().handleResponses(message)
# Print a warning if the message was 'get functionality'
# and there were no responses.
if message.isType("get functionality") and not message.hasResponses():
print(">> Warning functionality '" + message.getData()["name"] + "' not found!")
hdebug.logText("no functionality " + message.getData()["name"])
# Start message processing timer in case there are other messages
# waiting for this message to get finalized.
self.startMessageTimer()
def handleResponses(self, message):
"""
This is just a place holder. There should not be any responses
to message from HalCore.
"""
assert not message.hasResponses()
def handleSendMessage(self):
"""
Handle sending the current message to all the modules.
"""
# Process the next message.
if (len(self.queued_messages) > 0):
cur_message = self.queued_messages.popleft()
#
# If this message requested synchronization and there are
# pending messages then push it back into the queue.
#
if cur_message.sync and (len(self.sent_messages) > 0):
print("> waiting for the following to be processed:")
for message in self.sent_messages:
text = " '" + message.m_type + "' from " + message.getSourceName() + ", "
text += str(message.getRefCount()) + " module(s) have not responded yet."
print(text)
print("")
self.queued_messages.appendleft(cur_message)
#
# Otherwise process the message.
#
else:
print(cur_message.source.module_name + " '" + cur_message.m_type + "'")
# Check for "closeEvent" message from the main window.
if cur_message.isType("close event") and (cur_message.getSourceName() == "hal"):
self.cleanUp()
return
else:
# Check for "sync" message, these don't actually get sent.
if cur_message.isType("sync"):
pass
# Otherwise send the message.
else:
cur_message.logEvent("sent")
cur_message.processed.connect(self.handleProcessed)
self.sent_messages.append(cur_message)
for module in self.modules:
cur_message.ref_count += 1
module.handleMessage(cur_message)
# Process any remaining messages with immediate timeout.
if (len(self.queued_messages) > 0):
self.startMessageTimer()
def startMessageTimer(self, interval = 0):
if not self.queued_messages_timer.isActive():
self.queued_messages_timer.setInterval(interval)
self.queued_messages_timer.start()
if (__name__ == "__main__"):
# Use both so that we can pass sys.argv to QApplication.
import argparse
import sys
# Get command line arguments..
parser = argparse.ArgumentParser(description = 'STORM microscope control software')
parser.add_argument('config', type = str, help = "The name of the configuration file to use.")
parser.add_argument('--xml', dest = 'default_xml', type = str, required = False, default = None,
help = "The name of a settings xml file to use as the default.")
args = parser.parse_args()
# Start..
app = QtWidgets.QApplication(sys.argv)
# This keeps Qt from closing everything if a message box is displayed
# before HAL's main window is shown.
app.setQuitOnLastWindowClosed(False)
# Splash Screen.
pixmap = QtGui.QPixmap("splash.png")
splash = QtWidgets.QSplashScreen(pixmap)
splash.show()
app.processEvents()
# Load configuration.
config = params.config(args.config)
# Start logger.
hdebug.startLogging(config.get("directory") + "logs/", "hal4000")
# Setup HAL and all of the modules.
hal = HalCore(config = config,
parameters_file_name = args.default_xml)
# Hide splash screen and start.
splash.hide()
# Configure ctrl-c handling.
signal.signal(signal.SIGINT, ctrlCHandler)
app.exec_()
#
# The MIT License
#
# Copyright (c) 2017 Zhuang Lab, Harvard University
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
| 40.119647 | 114 | 0.562612 |
73fdc5429d65b6b13fc1ef6008661485d1de3485 | 13,454 | py | Python | src/taxi_in/pipelines/test/nodes.py | nasa/ML-airport-taxi-in | 49d9cb3b4f94320102146a2c12ff1c029cb98879 | [
"NASA-1.3"
] | 1 | 2021-10-11T06:08:59.000Z | 2021-10-11T06:08:59.000Z | src/taxi_in/pipelines/test/nodes.py | nasa/ML-airport-taxi-in | 49d9cb3b4f94320102146a2c12ff1c029cb98879 | [
"NASA-1.3"
] | null | null | null | src/taxi_in/pipelines/test/nodes.py | nasa/ML-airport-taxi-in | 49d9cb3b4f94320102146a2c12ff1c029cb98879 | [
"NASA-1.3"
] | null | null | null | """Testing nodes for unimpeded model development
"""
from typing import Any, Dict, Tuple
import logging
import pandas as pd
import numpy as np
import mlflow
from sklearn.pipeline import Pipeline as sklearn_Pipeline
import random
from copy import deepcopy
from math import isclose
def test_valid_prediction(
model_pipeline: sklearn_Pipeline,
data,
model_params: Dict[str, Any],
passed: int,
tests: int,
warning_string='',
) -> Tuple:
# Predict and confirm get a valid output
try:
prediction = model_pipeline.predict(data)[0]
except ValueError:
prediction = False # indicator that prediction failed
if isinstance(prediction, target_type(model_params)):
passed += 1
else:
log = logging.getLogger(__name__)
log.warning(
'failed to generate valid prediction ' +
warning_string
)
tests += 1
return (passed, tests)
def test_missing_features(
data: pd.DataFrame,
model_pipeline: sklearn_Pipeline,
model_params: Dict[str, Any],
active_run_id: str,
) -> None:
"""Node for testing whether the model returns a valid value when
missing features that should be imputed are passed in.
"""
# Prep for metrics & logging
passed = 0
tests = 0
log = logging.getLogger(__name__)
# Grab a random entry from data
random_idx = random_not_null_idx(data, model_params)
log.info('Testing feature imputation')
features_not_core = [v for v in model_params['features'] if v not in model_params['features_core']]
for feature_impute in features_not_core:
data_copy = deepcopy(data.loc[[random_idx], model_params['features']])
# Remove features that should be imputed
# using None
data_copy.loc[random_idx, feature_impute] = None
passed, tests = test_valid_prediction(
model_pipeline,
data_copy,
model_params,
passed,
tests,
'when ' + feature_impute + ' is None',
)
# Remove features that should be imputed
# using np.nan
data_copy.loc[random_idx, feature_impute] = np.nan
passed, tests = test_valid_prediction(
model_pipeline,
data_copy,
model_params,
passed,
tests,
'when ' + feature_impute + ' is np.nan',
)
# Remove features that should be imputed
# using empty string
data_copy.loc[random_idx, feature_impute] = ''
passed, tests = test_valid_prediction(
model_pipeline,
data_copy,
model_params,
passed,
tests,
'when ' + feature_impute + ' is empty string',
)
# Log results
if tests > 0:
with mlflow.start_run(run_id=active_run_id):
mlflow.log_metric(
'unit_test_fraction_feature_imputed',
passed / tests,
)
def test_unknown_features(
data: pd.DataFrame,
model_pipeline: sklearn_Pipeline,
model_params: Dict[str, Any],
active_run_id: str,
test_same_as_missing=True,
) -> None:
"""Node for testing whether the model returns a valid value when
unknown categories for features are passed in.
"""
# Prep for metrics & logging
passed = 0
log = logging.getLogger(__name__)
# Grab a random entry from data
random_idx = random_not_null_idx(data, model_params)
log.info('Testing unknown feature categories')
features_str = [feature for feature in model_params['features']
if any([type(v) == str for v in data[feature].values])]
for feature_handle_unknown in features_str:
data_copy = deepcopy(data.loc[[random_idx], model_params['features']])
# Make sure 'unknown_category' isn't actually a category
assert 'unknown_category'\
not in data[feature_handle_unknown].unique(),\
'test failure because category called unknown_category'
data_copy.loc[random_idx, feature_handle_unknown] = 'unknown_category'
# Predict and confirm get a valid output
try:
prediction = model_pipeline.predict(data_copy)[0]
except ValueError:
prediction = False # indicator that prediction failed
if test_same_as_missing and (feature_handle_unknown not in model_params['features_core']):
# Remove features that should be imputed
data_copy.loc[random_idx, feature_handle_unknown] = None # is this how to remove a feature?
# Predict again
prediction_missing = model_pipeline.predict(data_copy)[0]
if isclose(
prediction,
prediction_missing,
rel_tol=1e-2,
):
passed += 1
else:
log.warning(
'\tunknown category for feature ' +
feature_handle_unknown +
' produces different prediction than when missing'
)
else:
if isinstance(prediction, target_type(model_params)):
passed += 1
else:
log.warning(
'\tfailed handling unknown category for feature: ' +
feature_handle_unknown
)
# Log results
if len(features_str) > 0:
with mlflow.start_run(run_id=active_run_id):
mlflow.log_metric(
'unit_test_fraction_feature_handled_unknown',
passed / len(features_str),
)
def test_category_exclusions(
data: pd.DataFrame,
model_pipeline: sklearn_Pipeline,
model_params: Dict[str, Any],
category_exclusions: Dict[str, Any],
active_run_id: str,
) -> None:
"""Node for testing whether the model returns a valid value when an
excluded feature category is provided.
Also checks that the prediction is the same as when an unknown category
is provided.
"""
# Prep for metrics & logging
passed = 0
tests = 0
log = logging.getLogger(__name__)
# Grab a random entry from data
random_idx = random_not_null_idx(data, model_params)
log.info('Testing excluded feature categories')
if isinstance(category_exclusions, dict): # is list when empty
for feature_w_exclusions in category_exclusions.keys():
for excluded_category in category_exclusions[feature_w_exclusions]:
excluded_category = str(excluded_category) # ensure is a string
data_copy = deepcopy(data.loc[[random_idx], model_params['features']])
data_copy.loc[random_idx, feature_w_exclusions] = excluded_category
# Predict and confirm get a valid output
try:
prediction_excluded_cat = model_pipeline.predict(data_copy)[0]
except ValueError:
prediction_excluded_cat = False # indicator that prediction failed
# Now make it an unknown category and see what get
# Make sure 'unknown_category' isn't actually a category
assert 'unknown_category'\
not in data[feature_w_exclusions].unique(),\
'test failure because category called unknown_category'
data_copy.loc[random_idx, feature_w_exclusions] = 'unknown_category'
# Predict with unknown category
prediction_unknown_cat = model_pipeline.predict(data_copy)[0]
if isclose(
prediction_excluded_cat,
prediction_unknown_cat,
rel_tol=1e-2, # within 1%
) or (feature_w_exclusions in model_params['features_core'] and np.isnan(prediction_excluded_cat)):
passed += 1
else:
log.warning(
'failed handling excluded category ' +
excluded_category +
' for feature: ' +
feature_w_exclusions
)
tests += 1
# Log results
if tests > 0:
with mlflow.start_run(run_id=active_run_id):
mlflow.log_metric(
'unit_test_fraction_feature_handled_exclusions',
passed / tests,
)
def test_features_order(
data: pd.DataFrame,
model_pipeline: sklearn_Pipeline,
model_params: Dict[str, Any],
active_run_id: str,
) -> None:
"""Test changing column order
"""
# Grab a random entry from data
passed=0
random_idx = random_not_null_idx( data, model_params )
data_copy = deepcopy(data.loc[[random_idx], model_params['features']+ ['predicted_{}'.format(model_params['name'])] ])
prediction = data_copy['predicted_{}'.format(model_params['name'])].values[0]
# Invert order
data_copy = data_copy[data_copy.columns[::-1]]
# Predict and confirm get a valid output
try:
prediction_inv_order = model_pipeline.predict(data_copy[model_params['features']])[0]
except ValueError:
prediction_inv_order = False # indicator that prediction failed
if prediction == prediction_inv_order:
passed = 1
# Log results
with mlflow.start_run(run_id=active_run_id):
mlflow.log_metric(
'unit_test_features_order_change',
passed,
)
def test_predicted_range(
data: pd.DataFrame,
model_params: Dict[str, Any],
active_run_id: str,
) -> None:
"""Test range of output values
"""
predictions = data['predicted_{}'.format(model_params['name'])]
if 'target_min' in model_params['unit_tests']:
out_of_bound = predictions < model_params['unit_tests']['target_min']
if 'target_max' in model_params['unit_tests']:
out_of_bound = out_of_bound | (predictions < model_params['unit_tests']['target_max'])
passed = int(sum( out_of_bound ) == 0)
# Log results
with mlflow.start_run(run_id=active_run_id):
mlflow.log_metric(
'unit_test_predicted_range',
passed,
)
def test_predicted_type(
data: pd.DataFrame,
model_params: Dict[str, Any],
active_run_id: str,
) -> None:
"""Test predictions data type
"""
passed = 0
# Select single not null value
random_idx = random_not_null_idx( data, model_params )
prediction = data.loc[[random_idx], 'predicted_{}'.format(model_params['name'])].values[0]
passed = int(isinstance(prediction, target_type(model_params)))
# Log results
with mlflow.start_run(run_id=active_run_id):
mlflow.log_metric(
'unit_test_predicted_type',
passed,
)
def test_predicted_valid(
data: pd.DataFrame,
model_params: Dict[str, Any],
active_run_id: str,
) -> None:
"""Test predictions data type
"""
predictions = data['predicted_{}'.format(model_params['name'])]
percent_valid = (len(predictions) - sum(predictions.isnull())) / len(predictions)
# ratio of not null predictions
passed = int(
percent_valid >= model_params['unit_tests']['target_min_valid_ratio']
)
# Log results
with mlflow.start_run(run_id=active_run_id):
mlflow.log_metric(
'unit_test_predicted_valid',
passed,
)
if not passed:
log = logging.getLogger(__name__)
log.warning(
'failed test prediction valid unit test because ' +
'only {:.1f}% were not null '.format(percent_valid*100) +
'but unit test requires {:.1f}% or more to be not null'.format(
model_params['unit_tests']['target_min_valid_ratio']*100
)
)
log.warning(
'of the null predictions, ' +
'{:.1f}% were null because of missing core features'.format(
(sum(predictions[data.missing_core_features].isnull()) /
sum(predictions.isnull()))*100
)
)
def test_predicted_scale(
data: pd.DataFrame,
model_params: Dict[str, Any],
active_run_id: str,
) -> None:
"""Test predictions data scale (i.e. decimal places)
"""
# Grab a random entry from data
passed = 0
random_idx = random_not_null_idx( data, model_params )
prediction = data.loc[[random_idx], 'predicted_{}'.format(model_params['name'])].values[0]
# After pushing target_scale decimals to the left no more decimals should be remaining.
passed = int(((prediction * (10 ** model_params['unit_tests'][
'target_scale'])) % 1) < 0.00000001 ) # comparing with small non-zero value due to floating point precision
# Log results
with mlflow.start_run(run_id=active_run_id):
mlflow.log_metric(
'unit_test_predicted_scale',
passed,
)
def target_type(model_params):
types = ()
if model_params['unit_tests']['target_type'] == 'float':
types = (float, np.floating)
else:
if model_params['unit_tests']['target_type'] == 'int':
types = (int, np.int)
return types
def random_not_null_idx(data, model_params):
not_null_idx = data['predicted_{}'.format(model_params['name'])].isnull() == False
return random.choice(data[not_null_idx].index)
| 31.361305 | 122 | 0.615505 |
73fdf4df25f4e96d155b7fe1fd5ea78c4b59ddfc | 14,689 | py | Python | lib/spack/spack/patch.py | lguyot/spack | e910c227a7bac3adf2c18fc86cf994811b7d14f7 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 2 | 2020-10-15T01:08:42.000Z | 2021-10-18T01:28:18.000Z | lib/spack/spack/patch.py | lguyot/spack | e910c227a7bac3adf2c18fc86cf994811b7d14f7 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null | lib/spack/spack/patch.py | lguyot/spack | e910c227a7bac3adf2c18fc86cf994811b7d14f7 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1 | 2017-01-21T17:19:32.000Z | 2017-01-21T17:19:32.000Z | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import hashlib
import os
import os.path
import inspect
import llnl.util.filesystem
import llnl.util.lang
import spack.error
import spack.fetch_strategy as fs
import spack.repo
import spack.stage
import spack.util.spack_json as sjson
import spack
from spack.util.compression import allowed_archive
from spack.util.crypto import checksum, Checker
from spack.util.executable import which
def apply_patch(stage, patch_path, level=1, working_dir='.'):
"""Apply the patch at patch_path to code in the stage.
Args:
stage (spack.stage.Stage): stage with code that will be patched
patch_path (str): filesystem location for the patch to apply
level (int, optional): patch level (default 1)
working_dir (str): relative path *within* the stage to change to
(default '.')
"""
patch = which("patch", required=True)
with llnl.util.filesystem.working_dir(stage.source_path):
patch('-s',
'-p', str(level),
'-i', patch_path,
'-d', working_dir)
class Patch(object):
"""Base class for patches.
Arguments:
pkg (str): the package that owns the patch
The owning package is not necessarily the package to apply the patch
to -- in the case where a dependent package patches its dependency,
it is the dependent's fullname.
"""
def __init__(self, pkg, path_or_url, level, working_dir):
# validate level (must be an integer >= 0)
if not isinstance(level, int) or not level >= 0:
raise ValueError("Patch level needs to be a non-negative integer.")
# Attributes shared by all patch subclasses
self.owner = pkg.fullname
self.path_or_url = path_or_url # needed for debug output
self.path = None # must be set before apply()
self.level = level
self.working_dir = working_dir
def fetch(self):
"""Fetch the patch in case of a UrlPatch
"""
def clean(self):
"""Clean up the patch stage in case of a UrlPatch"""
def apply(self, stage):
"""Apply a patch to source in a stage.
Arguments:
stage (spack.stage.Stage): stage where source code lives
"""
assert self.path, (
"Path for patch not set in apply: %s" % self.path_or_url)
if not os.path.isfile(self.path):
raise NoSuchPatchError("No such patch: %s" % self.path)
apply_patch(stage, self.path, self.level, self.working_dir)
def cache(self):
return None
def to_dict(self):
"""Partial dictionary -- subclases should add to this."""
return {
'owner': self.owner,
'sha256': self.sha256,
'level': self.level,
'working_dir': self.working_dir,
}
class FilePatch(Patch):
"""Describes a patch that is retrieved from a file in the repository.
Arguments:
pkg (str): the class object for the package that owns the patch
relative_path (str): path to patch, relative to the repository
directory for a package.
level (int): level to pass to patch command
working_dir (str): path within the source directory where patch
should be applied
"""
def __init__(self, pkg, relative_path, level, working_dir,
ordering_key=None):
self.relative_path = relative_path
# patches may be defined by relative paths to parent classes
# search mro to look for the file
abs_path = None
# At different times we call FilePatch on instances and classes
pkg_cls = pkg if inspect.isclass(pkg) else pkg.__class__
for cls in inspect.getmro(pkg_cls):
if not hasattr(cls, 'module'):
# We've gone too far up the MRO
break
# Cannot use pkg.package_dir because it's a property and we have
# classes, not instances.
pkg_dir = os.path.abspath(os.path.dirname(cls.module.__file__))
path = os.path.join(pkg_dir, self.relative_path)
if os.path.exists(path):
abs_path = path
break
if abs_path is None:
msg = 'FilePatch: Patch file %s for ' % relative_path
msg += 'package %s.%s does not exist.' % (pkg.namespace, pkg.name)
raise ValueError(msg)
super(FilePatch, self).__init__(pkg, abs_path, level, working_dir)
self.path = abs_path
self._sha256 = None
self.ordering_key = ordering_key
@property
def sha256(self):
if self._sha256 is None:
self._sha256 = checksum(hashlib.sha256, self.path)
return self._sha256
def to_dict(self):
return llnl.util.lang.union_dicts(
super(FilePatch, self).to_dict(),
{'relative_path': self.relative_path})
class UrlPatch(Patch):
"""Describes a patch that is retrieved from a URL.
Arguments:
pkg (str): the package that owns the patch
url (str): URL where the patch can be fetched
level (int): level to pass to patch command
working_dir (str): path within the source directory where patch
should be applied
"""
def __init__(self, pkg, url, level=1, working_dir='.', ordering_key=None,
**kwargs):
super(UrlPatch, self).__init__(pkg, url, level, working_dir)
self.url = url
self._stage = None
self.ordering_key = ordering_key
self.archive_sha256 = kwargs.get('archive_sha256')
if allowed_archive(self.url) and not self.archive_sha256:
raise PatchDirectiveError(
"Compressed patches require 'archive_sha256' "
"and patch 'sha256' attributes: %s" % self.url)
self.sha256 = kwargs.get('sha256')
if not self.sha256:
raise PatchDirectiveError("URL patches require a sha256 checksum")
def fetch(self):
"""Retrieve the patch in a temporary stage and compute self.path
Args:
stage: stage for the package that needs to be patched
"""
self.stage.create()
self.stage.fetch()
self.stage.check()
root = self.stage.path
if self.archive_sha256:
self.stage.expand_archive()
root = self.stage.source_path
files = os.listdir(root)
if not files:
if self.archive_sha256:
raise NoSuchPatchError(
"Archive was empty: %s" % self.url)
else:
raise NoSuchPatchError(
"Patch failed to download: %s" % self.url)
self.path = os.path.join(root, files.pop())
if not os.path.isfile(self.path):
raise NoSuchPatchError(
"Archive %s contains no patch file!" % self.url)
# for a compressed archive, Need to check the patch sha256 again
# and the patch is in a directory, not in the same place
if self.archive_sha256 and spack.config.get('config:checksum'):
checker = Checker(self.sha256)
if not checker.check(self.path):
raise fs.ChecksumError(
"sha256 checksum failed for %s" % self.path,
"Expected %s but got %s" % (self.sha256, checker.sum))
@property
def stage(self):
if self._stage:
return self._stage
# use archive digest for compressed archives
fetch_digest = self.sha256
if self.archive_sha256:
fetch_digest = self.archive_sha256
fetcher = fs.URLFetchStrategy(self.url, fetch_digest,
expand=bool(self.archive_sha256))
# The same package can have multiple patches with the same name but
# with different contents, therefore apply a subset of the hash.
name = '{0}-{1}'.format(os.path.basename(self.url), fetch_digest[:7])
per_package_ref = os.path.join(self.owner.split('.')[-1], name)
# Reference starting with "spack." is required to avoid cyclic imports
mirror_ref = spack.mirror.mirror_archive_paths(
fetcher,
per_package_ref)
self._stage = spack.stage.Stage(fetcher, mirror_paths=mirror_ref)
self._stage.create()
return self._stage
def cache(self):
return self.stage
def clean(self):
self.stage.destroy()
def to_dict(self):
data = super(UrlPatch, self).to_dict()
data['url'] = self.url
if self.archive_sha256:
data['archive_sha256'] = self.archive_sha256
return data
def from_dict(dictionary):
"""Create a patch from json dictionary."""
owner = dictionary.get('owner')
if 'owner' not in dictionary:
raise ValueError('Invalid patch dictionary: %s' % dictionary)
pkg = spack.repo.get(owner)
if 'url' in dictionary:
return UrlPatch(
pkg,
dictionary['url'],
dictionary['level'],
dictionary['working_dir'],
sha256=dictionary['sha256'],
archive_sha256=dictionary.get('archive_sha256'))
elif 'relative_path' in dictionary:
patch = FilePatch(
pkg,
dictionary['relative_path'],
dictionary['level'],
dictionary['working_dir'])
# If the patch in the repo changes, we cannot get it back, so we
# just check it and fail here.
# TODO: handle this more gracefully.
sha256 = dictionary['sha256']
checker = Checker(sha256)
if not checker.check(patch.path):
raise fs.ChecksumError(
"sha256 checksum failed for %s" % patch.path,
"Expected %s but got %s" % (sha256, checker.sum),
"Patch may have changed since concretization.")
return patch
else:
raise ValueError("Invalid patch dictionary: %s" % dictionary)
class PatchCache(object):
"""Index of patches used in a repository, by sha256 hash.
This allows us to look up patches without loading all packages. It's
also needed to properly implement dependency patching, as need a way
to look up patches that come from packages not in the Spec sub-DAG.
The patch index is structured like this in a file (this is YAML, but
we write JSON)::
patches:
sha256:
namespace1.package1:
<patch json>
namespace2.package2:
<patch json>
... etc. ...
"""
def __init__(self, data=None):
if data is None:
self.index = {}
else:
if 'patches' not in data:
raise IndexError('invalid patch index; try `spack clean -m`')
self.index = data['patches']
@classmethod
def from_json(cls, stream):
return PatchCache(sjson.load(stream))
def to_json(self, stream):
sjson.dump({'patches': self.index}, stream)
def patch_for_package(self, sha256, pkg):
"""Look up a patch in the index and build a patch object for it.
Arguments:
sha256 (str): sha256 hash to look up
pkg (spack.package.Package): Package object to get patch for.
We build patch objects lazily because building them requires that
we have information about the package's location in its repo.
"""
sha_index = self.index.get(sha256)
if not sha_index:
raise NoSuchPatchError(
"Couldn't find patch with sha256: %s" % sha256)
patch_dict = sha_index.get(pkg.fullname)
if not patch_dict:
raise NoSuchPatchError(
"Couldn't find patch for package %s with sha256: %s"
% (pkg.fullname, sha256))
# add the sha256 back (we take it out on write to save space,
# because it's the index key)
patch_dict = dict(patch_dict)
patch_dict['sha256'] = sha256
return from_dict(patch_dict)
def update_package(self, pkg_fullname):
# remove this package from any patch entries that reference it.
empty = []
for sha256, package_to_patch in self.index.items():
remove = []
for fullname, patch_dict in package_to_patch.items():
if patch_dict['owner'] == pkg_fullname:
remove.append(fullname)
for fullname in remove:
package_to_patch.pop(fullname)
if not package_to_patch:
empty.append(sha256)
# remove any entries that are now empty
for sha256 in empty:
del self.index[sha256]
# update the index with per-package patch indexes
pkg = spack.repo.get(pkg_fullname)
partial_index = self._index_patches(pkg)
for sha256, package_to_patch in partial_index.items():
p2p = self.index.setdefault(sha256, {})
p2p.update(package_to_patch)
def update(self, other):
"""Update this cache with the contents of another."""
for sha256, package_to_patch in other.index.items():
p2p = self.index.setdefault(sha256, {})
p2p.update(package_to_patch)
@staticmethod
def _index_patches(pkg_class):
index = {}
# Add patches from the class
for cond, patch_list in pkg_class.patches.items():
for patch in patch_list:
patch_dict = patch.to_dict()
patch_dict.pop('sha256') # save some space
index[patch.sha256] = {pkg_class.fullname: patch_dict}
# and patches on dependencies
for name, conditions in pkg_class.dependencies.items():
for cond, dependency in conditions.items():
for pcond, patch_list in dependency.patches.items():
for patch in patch_list:
dspec = spack.repo.get(dependency.spec.name)
patch_dict = patch.to_dict()
patch_dict.pop('sha256') # save some space
index[patch.sha256] = {dspec.fullname: patch_dict}
return index
class NoSuchPatchError(spack.error.SpackError):
"""Raised when a patch file doesn't exist."""
class PatchDirectiveError(spack.error.SpackError):
"""Raised when the wrong arguments are suppled to the patch directive."""
| 34.400468 | 79 | 0.604398 |
73fe153ab1893b363aa19a37f7f3503eb79fb677 | 6,250 | py | Python | tests/platform_tests/mellanox/test_thermal_control.py | vincent201881/sonic-mgmt | 4f02bb5f91600ae5180ace1620a718caf02c63a1 | [
"Apache-2.0"
] | null | null | null | tests/platform_tests/mellanox/test_thermal_control.py | vincent201881/sonic-mgmt | 4f02bb5f91600ae5180ace1620a718caf02c63a1 | [
"Apache-2.0"
] | null | null | null | tests/platform_tests/mellanox/test_thermal_control.py | vincent201881/sonic-mgmt | 4f02bb5f91600ae5180ace1620a718caf02c63a1 | [
"Apache-2.0"
] | null | null | null | import logging
import operator
import pytest
import random
import time
from tests.common.mellanox_data import SWITCH_MODELS
from tests.common.plugins.loganalyzer.loganalyzer import LogAnalyzer
from tests.common.utilities import wait_until
from tests.platform_tests.thermal_control_test_helper import *
from mellanox_thermal_control_test_helper import MockerHelper, AbnormalFanMocker
pytestmark = [
pytest.mark.topology('any')
]
THERMAL_CONTROL_TEST_WAIT_TIME = 65
THERMAL_CONTROL_TEST_CHECK_INTERVAL = 5
COOLING_CUR_STATE_PATH = '/run/hw-management/thermal/cooling_cur_state'
COOLING_CUR_STATE_THRESHOLD = 7
PSU_PRESENCE_PATH = '/run/hw-management/thermal/psu{}_status'
PSU_SPEED_PATH = '/run/hw-management/thermal/psu{}_fan1_speed_get'
PSU_SPEED_TOLERANCE = 0.25
LOG_EXPECT_CHANGE_MIN_COOLING_LEVEL_RE = '.*Changed minimum cooling level to {}.*'
@pytest.mark.disable_loganalyzer
def test_dynamic_minimum_table(duthost, mocker_factory):
air_flow_dirs = ['p2c', 'c2p', 'unk']
max_temperature = 45000 # 45 C
cooling_cur_state = get_cooling_cur_state(duthost)
if cooling_cur_state >= COOLING_CUR_STATE_THRESHOLD:
pytest.skip('The cooling level {} is higher than threshold {}.'.format(cooling_cur_state, COOLING_CUR_STATE_THRESHOLD))
mocker = mocker_factory(duthost, 'MinTableMocker')
loganalyzer = LogAnalyzer(ansible_host=duthost, marker_prefix='thermal_control')
loganalyzer.load_common_config()
for index in range(len(air_flow_dirs)):
air_flow_index = random.randint(0, len(air_flow_dirs) - 1)
air_flow_dir = air_flow_dirs[air_flow_index]
air_flow_dirs.remove(air_flow_dir)
temperature = random.randint(0, max_temperature)
trust_state = True if random.randint(0, 1) else False
logging.info('Testing with air_flow_dir={}, temperature={}, trust_state={}'.format(air_flow_dir, temperature, trust_state))
expect_minimum_cooling_level = mocker.get_expect_cooling_level(air_flow_dir, temperature, trust_state)
loganalyzer.expect_regex = [LOG_EXPECT_CHANGE_MIN_COOLING_LEVEL_RE.format(expect_minimum_cooling_level)]
with loganalyzer:
mocker.mock_min_table(air_flow_dir, temperature, trust_state)
time.sleep(THERMAL_CONTROL_TEST_WAIT_TIME)
temperature = random.randint(0, max_temperature)
logging.info('Testing with air_flow_dir={}, temperature={}, trust_state={}'.format(air_flow_dir, temperature, not trust_state))
expect_minimum_cooling_level = mocker.get_expect_cooling_level(air_flow_dir, temperature, not trust_state)
loganalyzer.expect_regex = [LOG_EXPECT_CHANGE_MIN_COOLING_LEVEL_RE.format(expect_minimum_cooling_level)]
with loganalyzer:
mocker.mock_min_table(air_flow_dir, temperature, not trust_state)
time.sleep(THERMAL_CONTROL_TEST_WAIT_TIME)
@pytest.mark.disable_loganalyzer
def test_set_psu_fan_speed(duthost, mocker_factory):
hwsku = duthost.facts["hwsku"]
psu_num = SWITCH_MODELS[hwsku]['psus']['number']
hot_swappable = SWITCH_MODELS[hwsku]['psus']['hot_swappable']
if not hot_swappable:
pytest.skip('The SKU {} does not support this test case.'.format(hwsku))
logging.info('Create mocker, it may take a few seconds...')
single_fan_mocker = mocker_factory(duthost, 'SingleFanMocker')
logging.info('Mock FAN absence...')
single_fan_mocker.mock_absence()
assert wait_until(THERMAL_CONTROL_TEST_WAIT_TIME, THERMAL_CONTROL_TEST_CHECK_INTERVAL, check_cooling_cur_state, duthost, 10, operator.eq), \
'Current cooling state is {}'.format(get_cooling_cur_state(duthost))
logging.info('Wait {} seconds for the policy to take effect...'.format(THERMAL_CONTROL_TEST_CHECK_INTERVAL))
time.sleep(THERMAL_CONTROL_TEST_CHECK_INTERVAL)
full_speeds = []
for index in range(psu_num):
speed = get_psu_speed(duthost, index)
full_speeds.append(speed)
logging.info('Full speed={}'.format(full_speeds))
logging.info('Mock FAN presence...')
single_fan_mocker.mock_presence()
assert wait_until(THERMAL_CONTROL_TEST_WAIT_TIME, THERMAL_CONTROL_TEST_CHECK_INTERVAL, check_cooling_cur_state, duthost, 10, operator.ne), \
'Current cooling state is {}'.format(get_cooling_cur_state(duthost))
logging.info('Wait {} seconds for the policy to take effect...'.format(THERMAL_CONTROL_TEST_CHECK_INTERVAL))
time.sleep(THERMAL_CONTROL_TEST_CHECK_INTERVAL)
cooling_cur_state = get_cooling_cur_state(duthost)
logging.info('Cooling level changed to {}'.format(cooling_cur_state))
current_speeds = []
for index in range(psu_num):
speed = get_psu_speed(duthost, index)
current_speeds.append(speed)
logging.info('Current speed={}'.format(current_speeds))
index = 0
if cooling_cur_state < 6:
cooling_cur_state = 6
expect_multiple = float(10) / cooling_cur_state
while index < psu_num:
full_speed = full_speeds[index]
current_speed = current_speeds[index]
index += 1
if not full_speed or not current_speed:
continue
actual_multiple = float(full_speed) / current_speed
if expect_multiple > actual_multiple:
assert actual_multiple > expect_multiple * (1 - PSU_SPEED_TOLERANCE)
elif expect_multiple < actual_multiple:
assert actual_multiple < expect_multiple * (1 + PSU_SPEED_TOLERANCE)
def get_psu_speed(dut, index):
index = index + 1
psu_speed_path = PSU_SPEED_PATH.format(index)
file_exists = dut.stat(path=psu_speed_path)
if not file_exists:
return None
cmd_output = dut.command('cat {}'.format(psu_speed_path))
try:
return int(cmd_output['stdout'])
except Exception as e:
assert False, 'Bad content in {} - {}'.format(psu_speed_path, e)
def get_cooling_cur_state(dut):
cmd_output = dut.command('cat {}'.format(COOLING_CUR_STATE_PATH))
try:
return int(cmd_output['stdout'])
except Exception as e:
assert False, 'Bad content in {} - {}'.format(COOLING_CUR_STATE_PATH, e)
def check_cooling_cur_state(dut, expect_value, op):
actual_value = get_cooling_cur_state(dut)
return op(actual_value, expect_value)
| 44.014085 | 144 | 0.7424 |
73fe2d2674710ab418460d116b17185aaf620590 | 15,475 | py | Python | RBC1r/ode_analyzer.py | yuhj1998/OnsagerNet | 32cbb31116cf4244b340497d739a86eb7de9e7a2 | [
"Apache-2.0"
] | 2 | 2021-11-01T07:23:52.000Z | 2022-03-29T03:02:33.000Z | RBC1r/ode_analyzer.py | yuhj1998/OnsagerNet | 32cbb31116cf4244b340497d739a86eb7de9e7a2 | [
"Apache-2.0"
] | null | null | null | RBC1r/ode_analyzer.py | yuhj1998/OnsagerNet | 32cbb31116cf4244b340497d739a86eb7de9e7a2 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- encoding: utf-8 -*-
'''
@File : ode_analyzer.py
@Time : 2020/07/21
@Author : Haijn Yu <hyu@lsec.cc.ac.cn>
@Desc : Analyze the learned OnsagerNet for Lorenz system
see more descriptions and update on [GitHub](https://github.com/yuhj1998/ode-analyzer)
'''
# %% 1. import library and set parameters
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import ode_net as onet
import argparse
import scipy.optimize as pyopt
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.rc('lines', linewidth=1, markersize=2)
float_formatter = "{:.6e}".format
np.set_printoptions(formatter={'float_kind': float_formatter})
def long_run_cmp_Lorenz(onet1: onet.ODENet, onet2: onet.ODENet,
T, fixed_pts, lcs=None,
dt=0.001, nOut=100,
region=[-10.0, 10, -10, 10, 0, 1],
savefile='run_cmp'):
''' based on long_run_cmp function in test_ode_Lorenz.py
with fixed points and limit cycles added
@onet1 : underlying ODE,
@onet2 : learned ODE
'''
nS = 4
n = onet1.nVar
if T/nOut > 0.025:
nOut = int(T/0.025)
p1 = torch.zeros(nOut, nS, n)
p2 = torch.zeros(nOut, nS, n)
p1[0, :, 0] = torch.tensor([region[0], -1.0, 1.0, region[1]]).float()
p1[0, :, 1] = torch.tensor([region[2], -1.0, 1.0, region[3]]).float()
p1[0, :, 2] = torch.tensor(region[4]).float()
p2[0, :, 0:n] = p1[0, :, 0:n]
with torch.no_grad():
print('Calculating evaluation data ...', end=' ')
for i in range(nOut-1):
nt = int(T/nOut/dt)
p1[i+1, ...] = onet1.ode_rk3(p1[i, ...], dt, nt)
p2[i+1, ...] = onet2.ode_rk3(p2[i, ...], dt, nt)
print('done.')
L2err_pts = torch.sum((p1-p2)**2, dim=2).sqrt()
Linf = torch.max(L2err_pts)
L2nrm_pth = torch.sqrt(torch.sum(p1**2, dim=[0, 2]) * T/nOut)
L2err_pth = torch.sqrt(torch.sum(L2err_pts**2, dim=0)*T/nOut)
L2err_rel = torch.sqrt(torch.sum((L2err_pth/L2nrm_pth)**2)/nS)
print(f'The maximum point error for {nS} path is {Linf:.6e}')
print(f'The average L2norm error for {nS} path is {L2err_rel:.6e}')
n = onet2.nVar
if lcs is not None:
nLC = lcs.shape[0]
pLC = torch.zeros(nOut, nLC, n)
pLC[0, :, 0:n] = torch.tensor(lcs[:, 0:n]).float()
nt = 10
with torch.no_grad():
print('Calculating limit cycle data ...', end=' ')
for ip in range(nLC):
Tlc = lcs[ip, n]
for i in range(nOut-1):
dt = Tlc/(nOut-1)/nt
pLC[i+1, ip, :] = onet2.ode_rk3(pLC[i, ip, :], dt, nt)
print('done.')
else:
nLC = 0
f = plt.figure(figsize=[12, 10], dpi=144)
dt_out = T/nOut
ax = f.add_subplot(311)
nErrOut = nOut
ii = np.arange(nErrOut)
tt = ii*dt_out
ipp = L2err_pth.argmax()
plt.plot(tt, p2[ii, ipp, 0], label='X learned ODE')
plt.plot(tt, p1[ii, ipp, 0], '.', markersize=2, zorder=3,
alpha=0.8, label='X original ODE')
plt.plot(tt, p2[ii, ipp, 1], label='Y learned ODE')
plt.plot(tt, p1[ii, ipp, 1], '.', markersize=2, zorder=3,
alpha=0.8, label='Y original ODE')
plt.plot(tt, p2[ii, ipp, 2], label='Z learned ODE')
plt.plot(tt, p1[ii, ipp, 2], '.', markersize=2, zorder=3,
alpha=0.8, label='Z original ODE')
ax.set_title('Trajectory with max error')
plt.tick_params(axis='y', which='both', labelleft='on', labelright='on')
plt.autoscale(enable=True, axis='x', tight=True)
plt.xlabel('t')
plt.legend(fontsize='small', ncol=3, loc="best")
ax = f.add_subplot(312)
plt.plot(tt, p1[ii, ipp, 0]-p2[ii, ipp, 0], label='X error')
plt.plot(tt, p1[ii, ipp, 1]-p2[ii, ipp, 1], label='Y error')
plt.plot(tt, p1[ii, ipp, 2]-p2[ii, ipp, 2], label='Z error')
plt.tick_params(axis='y', which='both', labelleft='on', labelright='on')
plt.xlabel('t')
plt.autoscale(enable=True, axis='x', tight=True)
plt.legend(fontsize='small', loc=0, ncol=3)
ax = f.add_subplot(337)
for ip in np.arange(nS):
plt.plot(p1[:, ip, 0], p1[:, ip, 1], '.',
markersize=1, alpha=0.8, zorder=4)
plt.plot(p1[:, ip, 0], p1[:, ip, 1], color='grey',
linewidth=0.5, alpha=0.2, zorder=1)
if ip == ipp:
plt.plot(p2[:, ip, 0], p2[:, ip, 1], color='C3',
linewidth=1, alpha=0.9, zorder=3)
else:
plt.plot(p2[:, ip, 0], p2[:, ip, 1], color='C0',
linewidth=0.75, alpha=0.7, zorder=2)
for ip in np.arange(nLC):
plt.plot(pLC[:, ip, 0], pLC[:, ip, 1], color='yellow',
linewidth=1, alpha=0.6, zorder=4)
ax.scatter(fixed_pts[:, 0], fixed_pts[:, 1], color='red',
marker='+', alpha=0.9, edgecolors=None,
zorder=5)
plt.xlabel('X')
plt.ylabel('Y')
ax = f.add_subplot(338)
for ip in np.arange(nS):
plt.plot(p1[:, ip, 0], p1[:, ip, 2], '.',
markersize=1, alpha=0.8, zorder=4)
plt.plot(p1[:, ip, 0], p1[:, ip, 2], color='grey',
linewidth=0.5, alpha=0.2, zorder=1)
if ip == ipp:
plt.plot(p2[:, ip, 0], p2[:, ip, 2], color='C3',
linewidth=1, alpha=0.9, zorder=3)
else:
plt.plot(p2[:, ip, 0], p2[:, ip, 2], color='C0',
linewidth=0.75, alpha=0.7, zorder=2)
for ip in np.arange(nLC):
plt.plot(pLC[:, ip, 0], pLC[:, ip, 2], color='yellow',
linewidth=1, alpha=0.6, zorder=4)
ax.scatter(fixed_pts[:, 0], fixed_pts[:, 2], color='red',
marker='+', alpha=0.9, edgecolors=None,
zorder=5)
plt.xlabel('X')
plt.ylabel('Z')
ax = f.add_subplot(339)
for ip in np.arange(nS):
plt.plot(p1[:, ip, 1], p1[:, ip, 2], '.',
markersize=1, alpha=0.8, zorder=4)
plt.plot(p1[:, ip, 1], p1[:, ip, 2], color='grey',
linewidth=0.5, alpha=0.2, zorder=1)
if ip == ipp:
plt.plot(p2[:, ip, 1], p2[:, ip, 2], color='C3',
linewidth=1, alpha=0.9, zorder=3)
else:
plt.plot(p2[:, ip, 1], p2[:, ip, 2], color='C0',
linewidth=0.75, alpha=0.7, zorder=2)
for ip in np.arange(nLC):
plt.plot(pLC[:, ip, 1], pLC[:, ip, 2], color='yellow',
linewidth=1, alpha=0.6, zorder=4)
ax.scatter(fixed_pts[:, 1], fixed_pts[:, 2], color='red',
marker='+', alpha=0.9, edgecolors=None,
zorder=5)
plt.xlabel('Y')
plt.ylabel('Z')
plt.savefig(savefile+'.pdf', bbox_inches='tight', dpi=288)
def plot_ode_structure(ode_net: onet.ODENet,
fixed_pts, lcs, nOut=100, savefile=None):
''' ode_net, the ode system
@fixed_pts, an numpy arrange store fixed points
@lcs, a numpy array store the start points and periods of
limit cycles
'''
n = ode_net.nVar
nLC = lcs.shape[0]
pLC = torch.zeros(nOut, nLC, n)
tt = torch.zeros(nOut, nLC)
pLC[0, :, 0:n] = torch.tensor(lcs[:, 0:n]).float()
tt[0, :] = 0.
nt = 10
with torch.no_grad():
print('Calculating evaluation data ...', end=' ')
for ip in range(nLC):
T = lcs[ip, n]
for i in range(nOut-1):
dt = T/(nOut-1)/nt
pLC[i+1, ip, :] = ode_net.ode_rk3(pLC[i, ip, :], dt, nt)
print('done.')
f = plt.figure(figsize=[12, 4], dpi=144)
ax = f.add_subplot(131)
for ip in np.arange(nLC):
plt.plot(pLC[:, ip, 0], pLC[:, ip, 1],
linewidth=1, alpha=0.5, zorder=1)
ax.scatter(fixed_pts[:, 0], fixed_pts[:, 1], color='red',
marker='+', alpha=0.9, edgecolors=None)
plt.xlabel('X')
plt.ylabel('Y')
ax = f.add_subplot(132)
for ip in np.arange(nLC):
plt.plot(pLC[:, ip, 0], pLC[:, ip, 2],
linewidth=1, alpha=0.5, zorder=1)
ax.scatter(fixed_pts[:, 0], fixed_pts[:, 2], color='red',
marker='.', alpha=0.8, edgecolors=None,
zorder=3)
plt.xlabel('X')
plt.ylabel('Z')
ax = f.add_subplot(133)
for ip in np.arange(nLC):
plt.plot(pLC[:, ip, 1], pLC[:, ip, 2],
linewidth=1, alpha=0.5, zorder=1)
ax.scatter(fixed_pts[:, 1], fixed_pts[:, 2], color='red',
marker='.', alpha=0.8, edgecolors=None,
zorder=3)
plt.xlabel('Y')
plt.ylabel('Z')
plt.tight_layout()
plt.savefig(savefile+'_structure.pdf', bbox_inches='tight', dpi=144)
def find_fixed_pt(ode, x0):
def ode_fun(x):
''' need convert x to tensor '''
shape = x.shape
x0 = torch.tensor(x).float().view(-1, ode.nVar)
f = ode(x0)
return f.detach().numpy().reshape(shape)
xfix = pyopt.fsolve(ode_fun, x0, full_output=1)
return xfix
def find_limit_cycle(ode_net, x0, T0, niter=100, lr=0.00128, d_fix=1):
""" Find the limit cycle for given initial points and period
by using least square method with Adam optimizer in pytorch
d_fix: x0[d_fix] is fixed
"""
x = torch.nn.Parameter(torch.tensor(x0, requires_grad=True).float())
T = torch.nn.Parameter(torch.tensor(T0, requires_grad=True).float())
optimizer = optim.Adam([{'params': [x, T]}], lr=lr)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer,
'min',
factor=0.5,
patience=8)
nt = 80
T_beta = 10
Tth = torch.tensor(T0, requires_grad=False).float()
for e in range(niter):
xmiddle = ode_net.ode_rk3(x, T/2./nt, nt, test=False)
xt = ode_net.ode_rk3(xmiddle, T/2./nt, nt, test=False)
loss = (torch.sum((xt-x)**2) + T_beta * F.relu(Tth/3-T)
+ T_beta * F.relu(1.0-torch.sum(xmiddle-x)**2))
optimizer.zero_grad()
loss.backward()
x.grad.data[d_fix] = 0
nn.utils.clip_grad_norm_([x, T], 1.0)
optimizer.step()
scheduler.step(loss)
last_lr = optimizer.param_groups[0]["lr"]
if loss < 9e-5:
break
if e % 5 == 0 or e == niter-1:
print(f'iter:{e+1:4d}/{niter}', end=' ')
print(f'loss: {loss.item():.3e}', end=' ')
print(f'x: {x.data}', end=' ')
print(f'T: {T.data}', end=' ')
print(f'lr: {last_lr}', flush=True)
return x.detach().data, T.detach().data
def estimate_Lyapunov_exp1(data, dt, P, J, m, K):
''' a quick implementation
dt: the time stepsize of the series
P: mean period
J: time lag
m: embedding dimension
K: number of distances used to fit the index
'''
# Step 0: prepare X
N = len(data)
M = N - (m-1)*J
nbs = np.zeros((M-2*K, ), dtype=int)
d = np.zeros((2*K, M-2*K), dtype=np.float64)
Xt = np.zeros((m, M), dtype=np.float64)
dmax = np.sqrt((np.max(data) - np.min(data))**2 * m) + 1.0
for j in np.arange(m):
Xt[j, :] = data[j*J:j*J+M]
X = Xt.transpose()
# Step 1: find neighbor index with minum distance to i
# but with index distance > P
for j in np.arange(M-2*K):
dist = np.linalg.norm(X[0:M-2*K, :] - X[j, :], ord=2, axis=1)
ii = np.arange(M-2*K)
i_mask = np.logical_and(ii >= j-P, ii <= j+P)
dist[i_mask] = dmax
nbs[j] = np.argmin(dist)
# Step 2: calculate d_j(i)
for i in np.arange(2*K):
j = np.arange(M-2*K)
j1 = j + i
j2 = nbs[j] + i
d[i, j] = np.linalg.norm(X[j1, :]-X[j2, :], ord=2, axis=1)
# Step 3: average over j
y = np.mean(np.log(d+1e-20), axis=1) / dt
ii = np.arange(int(0.2*K), 2*K)
poly = np.polyfit(ii, y[ii], deg=1)
print('lsq coef =', poly)
print('Lyapunov index ~=', poly[0])
plt.subplot(224)
plt.plot(y)
plt.xlabel('k')
plt.ylabel('<log(d(k))>')
plt.title(f'Estimated Lyapunov index ~={poly[0]}')
plt.draw()
plt.pause(1)
plt.close()
return poly[0], y
def plot_fft(x, y, th=1e-4):
""" Do FFT analysis on time series, find its mean period
x: independ variable
y: depend variable
th: threshold below which the frequency will not be plotted
"""
n = x.size
Lx = x[-1]-x[0]
yf = np.fft.rfft(y)
xf = np.fft.rfftfreq(n, d=Lx/n)
fig = plt.figure(figsize=[9, 9])
ax = fig.add_subplot(211)
ax.plot(x, y)
plt.title('1) first component of ODE solution')
ax = fig.add_subplot(223)
yf = yf / (n/2)
ii = (np.abs(yf) > th)
ii[0] = False
plt.plot(xf[ii], np.abs(yf[ii]))
T0 = 1.0/np.mean(xf*np.abs(yf))
plt.title('2) power spectrum')
plt.draw()
plt.pause(2)
return T0
def calc_Lyapunov_exp1(lz_net, T0=5, nOut=5000, dt=0.01):
print(f'T0={T0}, nOut={nOut}, dt={dt}')
T = int(dt*nOut)
nOut_tot = int((T+T0)/dt)
Path = lz_net.gen_sample_paths(nS=1, T=T+T0, dt=0.001, nOut=nOut_tot)
data = Path[::2, 0].numpy()
data = data[nOut_tot-nOut:]
print(f'dt={dt}, T={T}, len(data)=', data.shape)
x = np.arange(nOut) * dt
Tmean = plot_fft(x, data)
K = int(2/dt)
P = nOut//15
print('Tmean=', Tmean)
Lindex, yy = estimate_Lyapunov_exp1(data, dt, P=P, J=11, m=5, K=K)
def analyze_Lorenz_structure(ode_net, r, b, lr, niter):
q = np.sqrt(b*(r-1.0))
x10 = np.array([q, q, r-1.0])
x20 = np.array([-q, -q, r-1.0])
x10 = torch.tensor(x10).float()
x20 = torch.tensor(x20).float()
x1 = find_fixed_pt(ode_net, x10)
x2 = find_fixed_pt(ode_net, x20)
print('Initial guess of two fixed points are\n',
f' x1={x10}\n',
f' x2={x20}')
print('The two fixed points found are\n',
f' x1={x1}\n',
f' x2={x2}')
x1 = x1[0]
x2 = x2[0]
x1t = torch.tensor(x1).float()
f1 = ode_net(x1t)
x2t = torch.tensor(x2).float()
f2 = ode_net(x2t)
print('Check the two fixed points found\n',
f' f(x1)={f1}\n',
f' f(x2)={f2}')
fixed_pts = np.vstack((x1, x2))
if r == 16:
nLC = 2
x0 = np.zeros((nLC, ode_net.nVar))
T0 = np.zeros((nLC, 1))
x0[0] = np.array([1.1138, 1.8421, 3.1879], dtype=float)
T0[0] = 1.3027
x0[1] = np.array([-1.1055, -1.8277, 3.1954], dtype=float)
T0[1] = 1.3027
elif r == 22:
nLC = 2
x0 = np.zeros((nLC, ode_net.nVar))
T0 = np.zeros((nLC, 1))
x0[0] = np.array([10.3266, 13.3565, 20.1329], dtype=float)
T0[0] = 0.7638
x0[1] = np.array([-10.3266, -13.3565, 20.1329], dtype=float)
T0[1] = 0.7638
else:
nLC = 0
nPC = ode_net.nVar
lcs = np.zeros((nLC, nPC+1), dtype=float)
for i in np.arange(nLC):
lcs[i, 0:nPC], lcs[i, nPC] = find_limit_cycle(ode_net, x0[i], T0[i],
niter=niter, lr=lr)
print('The start point and poerid of limit cycle is\n',
f' xlc={lcs[i,0:nPC]}\n',
f' Tlc={lcs[i, nPC]}')
return fixed_pts, lcs
| 35.493119 | 96 | 0.524717 |
73fe597755d49c91373e9c4e7a38ac985f6afa8d | 882 | py | Python | Keras_tensorflow_nightly/source2.7/tensorflow/tools/api/generator/api/keras/optimizers/__init__.py | Con-Mi/lambda-packs | b23a8464abdd88050b83310e1d0e99c54dac28ab | [
"MIT"
] | 3 | 2019-04-01T11:03:04.000Z | 2019-12-31T02:17:15.000Z | Keras_tensorflow_nightly/source2.7/tensorflow/tools/api/generator/api/keras/optimizers/__init__.py | Con-Mi/lambda-packs | b23a8464abdd88050b83310e1d0e99c54dac28ab | [
"MIT"
] | 1 | 2021-04-15T18:46:45.000Z | 2021-04-15T18:46:45.000Z | Keras_tensorflow_nightly/source2.7/tensorflow/tools/api/generator/api/keras/optimizers/__init__.py | Con-Mi/lambda-packs | b23a8464abdd88050b83310e1d0e99c54dac28ab | [
"MIT"
] | 1 | 2021-09-23T13:43:07.000Z | 2021-09-23T13:43:07.000Z | """Imports for Python API.
This file is MACHINE GENERATED! Do not edit.
Generated by: tensorflow/tools/api/generator/create_python_api.py script.
"""
from tensorflow.python.keras._impl.keras.optimizers import Adadelta
from tensorflow.python.keras._impl.keras.optimizers import Adagrad
from tensorflow.python.keras._impl.keras.optimizers import Adam
from tensorflow.python.keras._impl.keras.optimizers import Adamax
from tensorflow.python.keras._impl.keras.optimizers import Nadam
from tensorflow.python.keras._impl.keras.optimizers import Optimizer
from tensorflow.python.keras._impl.keras.optimizers import RMSprop
from tensorflow.python.keras._impl.keras.optimizers import SGD
from tensorflow.python.keras._impl.keras.optimizers import deserialize
from tensorflow.python.keras._impl.keras.optimizers import get
from tensorflow.python.keras._impl.keras.optimizers import serialize | 55.125 | 73 | 0.85034 |
73fe7d1f5be0763c0fd49d1597710bb4f2de7190 | 45,216 | py | Python | vyper/functions/functions.py | daejunpark/viper | a9281385661e95610ec0b1e163fc60ebba4d7f01 | [
"MIT"
] | 1 | 2021-04-23T21:48:20.000Z | 2021-04-23T21:48:20.000Z | vyper/functions/functions.py | iamonuwa/vyper | 4908ae7a671301dd487f9a087e2e7c8e1813879b | [
"MIT"
] | null | null | null | vyper/functions/functions.py | iamonuwa/vyper | 4908ae7a671301dd487f9a087e2e7c8e1813879b | [
"MIT"
] | null | null | null | import hashlib
from vyper import ast
from vyper.exceptions import (
ConstancyViolationException,
InvalidLiteralException,
ParserException,
StructureException,
TypeMismatchException,
)
from vyper.parser.expr import (
Expr,
)
from vyper.parser.keccak256_helper import (
keccak256_helper,
)
from vyper.parser.parser_utils import (
LLLnode,
add_variable_offset,
byte_array_to_num,
get_length,
get_number_as_fraction,
getpos,
make_byte_array_copier,
make_byte_slice_copier,
unwrap_location,
)
from vyper.signatures.function_signature import (
VariableRecord,
)
from vyper.types import (
BaseType,
ByteArrayLike,
ByteArrayType,
ListType,
StringType,
TupleType,
are_units_compatible,
get_size_of_type,
is_base_type,
)
from vyper.types.convert import (
convert,
)
from vyper.utils import (
DECIMAL_DIVISOR,
RLP_DECODER_ADDRESS,
MemoryPositions,
SizeLimits,
bytes_to_int,
fourbytes_to_int,
keccak256,
)
from .signatures import (
Optional,
signature,
)
SHA256_ADDRESS = 2
SHA256_BASE_GAS = 60
SHA256_PER_WORD_GAS = 12
def enforce_units(typ, obj, expected):
if not are_units_compatible(typ, expected):
raise TypeMismatchException("Invalid units", obj)
def get_keyword(expr, keyword):
for kw in expr.keywords:
if kw.arg == keyword:
return kw.value
# This should never happen, as kwargs['value'] will KeyError first.
# Leaving exception for other use cases.
raise Exception(f"Keyword {keyword} not found") # pragma: no cover
# like `assert foo`, but doesn't check constancy.
# currently no option for reason string (easy to add, just need to refactor
# vyper.parser.stmt so we can use _assert_reason).
@signature('bool')
def assert_modifiable(expr, args, kwargs, context):
# cf. vyper.parser.stmt.parse_assert
return LLLnode.from_list(['assert', args[0]], typ=None, pos=getpos(expr))
@signature('decimal')
def floor(expr, args, kwargs, context):
return LLLnode.from_list(
[
'if',
['slt', args[0], 0],
['sdiv', ['sub', args[0], DECIMAL_DIVISOR - 1], DECIMAL_DIVISOR],
['sdiv', args[0], DECIMAL_DIVISOR]
],
typ=BaseType('int128', args[0].typ.unit, args[0].typ.positional),
pos=getpos(expr)
)
@signature('decimal')
def ceil(expr, args, kwards, context):
return LLLnode.from_list(
[
'if',
['slt', args[0], 0],
['sdiv', args[0], DECIMAL_DIVISOR],
['sdiv', ['add', args[0], DECIMAL_DIVISOR - 1], DECIMAL_DIVISOR]
],
typ=BaseType('int128', args[0].typ.unit, args[0].typ.positional),
pos=getpos(expr)
)
@signature(('uint256', 'int128', 'decimal'))
def as_unitless_number(expr, args, kwargs, context):
return LLLnode(
value=args[0].value,
args=args[0].args,
typ=BaseType(args[0].typ.typ, {}),
pos=getpos(expr),
)
def _convert(expr, context):
return convert(expr, context)
@signature(('bytes32', 'bytes', 'string'), start='int128', len='int128')
def _slice(expr, args, kwargs, context):
sub, start, length = args[0], kwargs['start'], kwargs['len']
if not are_units_compatible(start.typ, BaseType('int128')):
raise TypeMismatchException("Type for slice start index must be a unitless number", expr)
# Expression representing the length of the slice
if not are_units_compatible(length.typ, BaseType('int128')):
raise TypeMismatchException("Type for slice length must be a unitless number", expr)
if is_base_type(sub.typ, 'bytes32'):
if (start.typ.is_literal and length.typ.is_literal) and \
not (0 <= start.value + length.value <= 32):
raise InvalidLiteralException(
'Invalid start / length values needs to be between 0 and 32.',
expr,
)
sub_typ_maxlen = 32
else:
sub_typ_maxlen = sub.typ.maxlen
# Get returntype string or bytes
if isinstance(args[0].typ, ByteArrayType) or is_base_type(sub.typ, 'bytes32'):
ReturnType = ByteArrayType
else:
ReturnType = StringType
# Node representing the position of the output in memory
np = context.new_placeholder(ReturnType(maxlen=sub_typ_maxlen + 32))
placeholder_node = LLLnode.from_list(np, typ=sub.typ, location='memory')
placeholder_plus_32_node = LLLnode.from_list(np + 32, typ=sub.typ, location='memory')
# Copies over bytearray data
if sub.location == 'storage':
adj_sub = LLLnode.from_list(
['add', ['sha3_32', sub], ['add', ['div', '_start', 32], 1]],
typ=sub.typ,
location=sub.location,
)
else:
adj_sub = LLLnode.from_list(
['add', sub, ['add', ['sub', '_start', ['mod', '_start', 32]], 32]],
typ=sub.typ,
location=sub.location,
)
if is_base_type(sub.typ, 'bytes32'):
adj_sub = LLLnode.from_list(
sub.args[0], typ=sub.typ, location="memory"
)
copier = make_byte_slice_copier(
placeholder_plus_32_node,
adj_sub,
['add', '_length', 32],
sub_typ_maxlen,
pos=getpos(expr),
)
# New maximum length in the type of the result
newmaxlen = length.value if not len(length.args) else sub_typ_maxlen
if is_base_type(sub.typ, 'bytes32'):
maxlen = 32
else:
maxlen = ['mload', Expr(sub, context=context).lll_node] # Retrieve length of the bytes.
out = [
'with', '_start', start, [
'with', '_length', length, [
'with', '_opos', ['add', placeholder_node, ['mod', '_start', 32]], [
'seq',
['assert', ['le', ['add', '_start', '_length'], maxlen]],
copier,
['mstore', '_opos', '_length'],
'_opos'
],
],
],
]
return LLLnode.from_list(out, typ=ReturnType(newmaxlen), location='memory', pos=getpos(expr))
@signature(('bytes', 'string'))
def _len(expr, args, kwargs, context):
return get_length(args[0])
def concat(expr, context):
args = [Expr(arg, context).lll_node for arg in expr.args]
if len(args) < 2:
raise StructureException("Concat expects at least two arguments", expr)
prev_type = ''
for _, (expr_arg, arg) in enumerate(zip(expr.args, args)):
if not isinstance(arg.typ, ByteArrayLike) and not is_base_type(arg.typ, 'bytes32'):
raise TypeMismatchException("Concat expects string, bytes or bytes32 objects", expr_arg)
current_type = (
'bytes'
if isinstance(arg.typ, ByteArrayType) or is_base_type(arg.typ, 'bytes32')
else 'string'
)
if prev_type and current_type != prev_type:
raise TypeMismatchException(
(
"Concat expects consistant use of string or byte types, "
"user either bytes or string."
),
expr_arg,
)
prev_type = current_type
if current_type == 'string':
ReturnType = StringType
else:
ReturnType = ByteArrayType
# Maximum length of the output
total_maxlen = sum([
arg.typ.maxlen if isinstance(arg.typ, ByteArrayLike) else 32 for arg in args
])
# Node representing the position of the output in memory
placeholder = context.new_placeholder(ReturnType(total_maxlen))
# Object representing the output
seq = []
# For each argument we are concatenating...
for arg in args:
# Start pasting into a position the starts at zero, and keeps
# incrementing as we concatenate arguments
placeholder_node = LLLnode.from_list(
['add', placeholder, '_poz'],
typ=ReturnType(total_maxlen),
location='memory',
)
placeholder_node_plus_32 = LLLnode.from_list(
['add', ['add', placeholder, '_poz'], 32],
typ=ReturnType(total_maxlen),
location='memory',
)
if isinstance(arg.typ, ReturnType):
# Ignore empty strings
if arg.typ.maxlen == 0:
continue
# Get the length of the current argument
if arg.location == "memory":
length = LLLnode.from_list(['mload', '_arg'], typ=BaseType('int128'))
argstart = LLLnode.from_list(
['add', '_arg', 32],
typ=arg.typ,
location=arg.location,
)
elif arg.location == "storage":
length = LLLnode.from_list(['sload', ['sha3_32', '_arg']], typ=BaseType('int128'))
argstart = LLLnode.from_list(
['add', ['sha3_32', '_arg'], 1],
typ=arg.typ,
location=arg.location,
)
# Make a copier to copy over data from that argument
seq.append([
'with', '_arg', arg, [
'seq',
make_byte_slice_copier(
placeholder_node_plus_32,
argstart,
length,
arg.typ.maxlen, pos=getpos(expr),
),
# Change the position to start at the correct
# place to paste the next value
['set', '_poz', ['add', '_poz', length]],
],
])
else:
seq.append([
'seq',
['mstore', ['add', placeholder_node, 32], unwrap_location(arg)],
['set', '_poz', ['add', '_poz', 32]],
])
# The position, after all arguments are processing, equals the total
# length. Paste this in to make the output a proper bytearray
seq.append(['mstore', placeholder, '_poz'])
# Memory location of the output
seq.append(placeholder)
return LLLnode.from_list(
['with', '_poz', 0, ['seq'] + seq],
typ=ReturnType(total_maxlen),
location='memory',
pos=getpos(expr),
annotation='concat',
)
@signature(('bytes_literal', 'str_literal', 'bytes', 'string', 'bytes32'))
def _sha3(expr, args, kwargs, context):
raise StructureException("sha3 function has been deprecated in favor of keccak256")
@signature(('bytes_literal', 'str_literal', 'bytes', 'string', 'bytes32'))
def _keccak256(expr, args, kwargs, context):
return keccak256_helper(expr, args, kwargs, context)
def _make_sha256_call(inp_start, inp_len, out_start, out_len):
return [
'assert', [
'call',
['gas'], # gas
SHA256_ADDRESS, # address
0, # value
inp_start,
inp_len,
out_start,
out_len
]
]
@signature(('bytes_literal', 'str_literal', 'bytes', 'string', 'bytes32'))
def sha256(expr, args, kwargs, context):
sub = args[0]
# Literal input
if isinstance(sub, bytes):
return LLLnode.from_list(
bytes_to_int(hashlib.sha256(sub).digest()),
typ=BaseType('bytes32'),
pos=getpos(expr)
)
# bytes32 input
elif is_base_type(sub.typ, 'bytes32'):
return LLLnode.from_list(
[
'seq',
['mstore', MemoryPositions.FREE_VAR_SPACE, sub],
_make_sha256_call(
inp_start=MemoryPositions.FREE_VAR_SPACE,
inp_len=32,
out_start=MemoryPositions.FREE_VAR_SPACE,
out_len=32
),
['mload', MemoryPositions.FREE_VAR_SPACE] # push value onto stack
],
typ=BaseType('bytes32'),
pos=getpos(expr),
add_gas_estimate=SHA256_BASE_GAS + 1 * SHA256_PER_WORD_GAS
)
# bytearay-like input
if sub.location == "storage":
# Copy storage to memory
placeholder = context.new_placeholder(sub.typ)
placeholder_node = LLLnode.from_list(placeholder, typ=sub.typ, location='memory')
copier = make_byte_array_copier(
placeholder_node,
LLLnode.from_list('_sub', typ=sub.typ, location=sub.location),
)
return LLLnode.from_list(
[
'with', '_sub', sub, [
'seq',
copier,
_make_sha256_call(
inp_start=['add', placeholder, 32],
inp_len=['mload', placeholder],
out_start=MemoryPositions.FREE_VAR_SPACE,
out_len=32
),
['mload', MemoryPositions.FREE_VAR_SPACE]
],
],
typ=BaseType('bytes32'),
pos=getpos(expr),
add_gas_estimate=SHA256_BASE_GAS + sub.typ.maxlen * SHA256_PER_WORD_GAS
)
elif sub.location == "memory":
return LLLnode.from_list(
[
'with', '_sub', sub, [
'seq',
_make_sha256_call(
inp_start=['add', '_sub', 32],
inp_len=['mload', '_sub'],
out_start=MemoryPositions.FREE_VAR_SPACE,
out_len=32
),
['mload', MemoryPositions.FREE_VAR_SPACE]
]
],
typ=BaseType('bytes32'),
pos=getpos(expr),
add_gas_estimate=SHA256_BASE_GAS + sub.typ.maxlen * SHA256_PER_WORD_GAS
)
else:
# This should never happen, but just left here for future compiler-writers.
raise Exception(f"Unsupported location: {sub.location}") # pragma: no test
@signature('str_literal', 'name_literal')
def method_id(expr, args, kwargs, context):
if b' ' in args[0]:
raise TypeMismatchException('Invalid function signature no spaces allowed.')
method_id = fourbytes_to_int(keccak256(args[0])[:4])
if args[1] == 'bytes32':
return LLLnode(method_id, typ=BaseType('bytes32'), pos=getpos(expr))
elif args[1] == 'bytes[4]':
placeholder = LLLnode.from_list(context.new_placeholder(ByteArrayType(4)))
return LLLnode.from_list(
['seq',
['mstore', ['add', placeholder, 4], method_id],
['mstore', placeholder, 4], placeholder],
typ=ByteArrayType(4), location='memory', pos=getpos(expr))
else:
raise StructureException('Can only produce bytes32 or bytes[4] as outputs')
@signature('bytes32', 'uint256', 'uint256', 'uint256')
def ecrecover(expr, args, kwargs, context):
placeholder_node = LLLnode.from_list(
context.new_placeholder(ByteArrayType(128)), typ=ByteArrayType(128), location='memory'
)
return LLLnode.from_list([
'seq',
['mstore', placeholder_node, args[0]],
['mstore', ['add', placeholder_node, 32], args[1]],
['mstore', ['add', placeholder_node, 64], args[2]],
['mstore', ['add', placeholder_node, 96], args[3]],
['pop', ['call', 3000, 1, 0, placeholder_node, 128, MemoryPositions.FREE_VAR_SPACE, 32]],
['mload', MemoryPositions.FREE_VAR_SPACE],
], typ=BaseType('address'), pos=getpos(expr))
def avo(arg, ind, pos):
return unwrap_location(add_variable_offset(arg, LLLnode.from_list(ind, 'int128'), pos=pos))
@signature('uint256[2]', 'uint256[2]')
def ecadd(expr, args, kwargs, context):
placeholder_node = LLLnode.from_list(
context.new_placeholder(ByteArrayType(128)), typ=ByteArrayType(128), location='memory'
)
pos = getpos(expr)
o = LLLnode.from_list([
'seq',
['mstore', placeholder_node, avo(args[0], 0, pos)],
['mstore', ['add', placeholder_node, 32], avo(args[0], 1, pos)],
['mstore', ['add', placeholder_node, 64], avo(args[1], 0, pos)],
['mstore', ['add', placeholder_node, 96], avo(args[1], 1, pos)],
['assert', ['call', 500, 6, 0, placeholder_node, 128, placeholder_node, 64]],
placeholder_node,
], typ=ListType(BaseType('uint256'), 2), pos=getpos(expr), location='memory')
return o
@signature('uint256[2]', 'uint256')
def ecmul(expr, args, kwargs, context):
placeholder_node = LLLnode.from_list(
context.new_placeholder(ByteArrayType(128)), typ=ByteArrayType(128), location='memory'
)
pos = getpos(expr)
o = LLLnode.from_list([
'seq',
['mstore', placeholder_node, avo(args[0], 0, pos)],
['mstore', ['add', placeholder_node, 32], avo(args[0], 1, pos)],
['mstore', ['add', placeholder_node, 64], args[1]],
['assert', ['call', 40000, 7, 0, placeholder_node, 96, placeholder_node, 64]],
placeholder_node,
], typ=ListType(BaseType('uint256'), 2), pos=pos, location='memory')
return o
def _memory_element_getter(index):
return LLLnode.from_list(
['mload', ['add', '_sub', ['add', 32, ['mul', 32, index]]]],
typ=BaseType('int128'),
)
def _storage_element_getter(index):
return LLLnode.from_list(
['sload', ['add', ['sha3_32', '_sub'], ['add', 1, index]]],
typ=BaseType('int128'),
)
@signature('bytes', 'int128', type=Optional('name_literal', 'bytes32'))
def extract32(expr, args, kwargs, context):
sub, index = args
ret_type = kwargs['type']
# Get length and specific element
if sub.location == "memory":
lengetter = LLLnode.from_list(['mload', '_sub'], typ=BaseType('int128'))
elementgetter = _memory_element_getter
elif sub.location == "storage":
lengetter = LLLnode.from_list(['sload', ['sha3_32', '_sub']], typ=BaseType('int128'))
elementgetter = _storage_element_getter
# TODO: unclosed if/elif clause. Undefined behavior if `sub.location`
# isn't one of `memory`/`storage`
# Special case: index known to be a multiple of 32
if isinstance(index.value, int) and not index.value % 32:
o = LLLnode.from_list(
[
'with', '_sub', sub,
elementgetter(['div', ['clamp', 0, index, ['sub', lengetter, 32]], 32])
],
typ=BaseType(ret_type),
annotation='extracting 32 bytes',
)
# General case
else:
o = LLLnode.from_list([
'with', '_sub', sub, [
'with', '_len', lengetter, [
'with', '_index', ['clamp', 0, index, ['sub', '_len', 32]], [
'with', '_mi32', ['mod', '_index', 32], [
'with', '_di32', ['div', '_index', 32],
[
'if',
'_mi32',
[
'add',
['mul', elementgetter('_di32'), ['exp', 256, '_mi32']],
[
'div',
elementgetter(['add', '_di32', 1]),
['exp', 256, ['sub', 32, '_mi32']],
],
],
elementgetter('_di32'),
],
],
],
],
],
], typ=BaseType(ret_type), pos=getpos(expr), annotation='extracting 32 bytes')
if ret_type == 'int128':
return LLLnode.from_list(
['clamp', ['mload', MemoryPositions.MINNUM], o, ['mload', MemoryPositions.MAXNUM]],
typ=BaseType('int128'),
pos=getpos(expr),
)
elif ret_type == 'address':
return LLLnode.from_list(
['uclamplt', o, ['mload', MemoryPositions.ADDRSIZE]],
typ=BaseType(ret_type),
pos=getpos(expr),
)
else:
return o
@signature(('num_literal', 'int128', 'uint256', 'decimal'), 'str_literal')
def as_wei_value(expr, args, kwargs, context):
# Denominations
wei_denominations = {
("wei", ): 1,
("femtoether", "kwei", "babbage"): 10**3,
("picoether", "mwei", "lovelace"): 10**6,
("nanoether", "gwei", "shannon"): 10**9,
("microether", "szabo", ): 10**12,
("milliether", "finney", ): 10**15,
("ether", ): 10**18,
("kether", "grand"): 10**21,
}
value, denom_name = args[0], args[1].decode()
denom_divisor = next((v for k, v in wei_denominations.items() if denom_name in k), False)
if not denom_divisor:
raise InvalidLiteralException(
f"Invalid denomination: {denom_name}, valid denominations are: "
f"{','.join(x[0] for x in wei_denominations)}",
expr.args[1]
)
# Compute the amount of wei and return that value
if isinstance(value, (int, float)):
expr_args_0 = expr.args[0]
# On constant reference fetch value node of constant assignment.
if context.constants.ast_is_constant(expr.args[0]):
expr_args_0 = context.constants._constants_ast[expr.args[0].id]
numstring, num, den = get_number_as_fraction(expr_args_0, context)
if denom_divisor % den:
max_len = len(str(denom_divisor))-1
raise InvalidLiteralException(
f"Wei value of denomination '{denom_name}' has maximum {max_len} decimal places",
expr.args[0]
)
sub = num * denom_divisor // den
elif value.typ.is_literal:
if value.value <= 0:
raise InvalidLiteralException("Negative wei value not allowed", expr)
sub = ['mul', value.value, denom_divisor]
elif value.typ.typ == 'uint256':
sub = ['mul', value, denom_divisor]
else:
sub = ['div', ['mul', value, denom_divisor], DECIMAL_DIVISOR]
return LLLnode.from_list(
sub,
typ=BaseType('uint256', {'wei': 1}),
location=None,
pos=getpos(expr),
)
zero_value = LLLnode.from_list(0, typ=BaseType('uint256', {'wei': 1}))
false_value = LLLnode.from_list(0, typ=BaseType('bool', is_literal=True))
@signature(
'address',
'bytes',
outsize='num_literal',
gas='uint256',
value=Optional('uint256', zero_value),
delegate_call=Optional('bool', false_value),
)
def raw_call(expr, args, kwargs, context):
to, data = args
gas, value, outsize, delegate_call = (
kwargs['gas'],
kwargs['value'],
kwargs['outsize'],
kwargs['delegate_call'],
)
if delegate_call.typ.is_literal is False:
raise TypeMismatchException(
'The delegate_call parameter has to be a static/literal boolean value.'
)
if context.is_constant():
raise ConstancyViolationException(
f"Cannot make calls from {context.pp_constancy()}",
expr,
)
if value != zero_value:
enforce_units(
value.typ,
get_keyword(expr, 'value'),
BaseType('uint256', {'wei': 1}),
)
placeholder = context.new_placeholder(data.typ)
placeholder_node = LLLnode.from_list(placeholder, typ=data.typ, location='memory')
copier = make_byte_array_copier(placeholder_node, data, pos=getpos(expr))
output_placeholder = context.new_placeholder(ByteArrayType(outsize))
output_node = LLLnode.from_list(
output_placeholder,
typ=ByteArrayType(outsize),
location='memory',
)
if delegate_call.value == 1:
z = LLLnode.from_list(
[
'seq',
copier,
[
'assert',
[
'delegatecall',
gas,
to,
['add', placeholder_node, 32],
['mload', placeholder_node],
['add', output_node, 32],
outsize,
],
],
['mstore', output_node, outsize],
output_node,
],
typ=ByteArrayType(outsize),
location='memory',
pos=getpos(expr),
)
else:
z = LLLnode.from_list(
[
'seq',
copier,
[
'assert',
[
'call',
gas,
to,
value,
['add', placeholder_node, 32],
['mload', placeholder_node],
['add', output_node, 32],
outsize,
],
],
['mstore', output_node, outsize],
output_node,
],
typ=ByteArrayType(outsize), location='memory', pos=getpos(expr)
)
return z
@signature('address', 'uint256')
def send(expr, args, kwargs, context):
to, value = args
if context.is_constant():
raise ConstancyViolationException(
f"Cannot send ether inside {context.pp_constancy()}!",
expr,
)
enforce_units(value.typ, expr.args[1], BaseType('uint256', {'wei': 1}))
return LLLnode.from_list(
['assert', ['call', 0, to, value, 0, 0, 0, 0]],
typ=None,
pos=getpos(expr),
)
@signature('address')
def selfdestruct(expr, args, kwargs, context):
if context.is_constant():
raise ConstancyViolationException(
f"Cannot {expr.func.id} inside {context.pp_constancy()}!",
expr.func,
)
return LLLnode.from_list(['selfdestruct', args[0]], typ=None, pos=getpos(expr))
@signature(('uint256'))
def blockhash(expr, args, kwargs, contact):
return LLLnode.from_list(
['blockhash', ['uclamplt', ['clampge', args[0], ['sub', ['number'], 256]], 'number']],
typ=BaseType('bytes32'),
pos=getpos(expr),
)
@signature('bytes', '*')
def _RLPlist(expr, args, kwargs, context):
# Second argument must be a list of types
if not isinstance(args[1], ast.List):
raise TypeMismatchException("Expecting list of types for second argument", args[1])
if len(args[1].elts) == 0:
raise TypeMismatchException("RLP list must have at least one item", expr)
if len(args[1].elts) > 32:
raise TypeMismatchException("RLP list must have at most 32 items", expr)
# Get the output format
_format = []
for arg in args[1].elts:
if isinstance(arg, ast.Name) and arg.id == "bytes":
subtyp = ByteArrayType(args[0].typ.maxlen)
else:
subtyp = context.parse_type(arg, 'memory')
if not isinstance(subtyp, BaseType):
raise TypeMismatchException("RLP lists only accept BaseTypes and byte arrays", arg)
if not is_base_type(subtyp, ('int128', 'uint256', 'bytes32', 'address', 'bool')):
raise TypeMismatchException(f"Unsupported base type: {subtyp.typ}", arg)
_format.append(subtyp)
output_type = TupleType(_format)
output_placeholder_type = ByteArrayType(
(2 * len(_format) + 1 + get_size_of_type(output_type)) * 32,
)
output_placeholder = context.new_placeholder(output_placeholder_type)
output_node = LLLnode.from_list(
output_placeholder,
typ=output_placeholder_type,
location='memory',
)
# Create a decoder for each element in the tuple
decoder = []
for i, typ in enumerate(_format):
# Decoder for bytes32
if is_base_type(typ, 'bytes32'):
decoder.append(LLLnode.from_list(
[
'seq',
[
'assert',
[
'eq',
[
'mload',
[
'add',
output_node,
['mload', ['add', output_node, 32 * i]],
],
],
32,
],
],
[
'mload',
[
'add',
32,
[
'add',
output_node,
['mload', ['add', output_node, 32 * i]],
],
],
],
],
typ,
annotation='getting and checking bytes32 item',
))
# Decoder for address
elif is_base_type(typ, 'address'):
decoder.append(LLLnode.from_list(
[
'seq',
[
'assert',
[
'eq',
[
'mload',
[
'add',
output_node,
['mload', ['add', output_node, 32 * i]],
],
],
20,
]
],
[
'mod',
[
'mload',
[
'add',
20,
['add', output_node, ['mload', ['add', output_node, 32 * i]]],
],
],
['mload', MemoryPositions.ADDRSIZE],
]
],
typ,
annotation='getting and checking address item',
))
# Decoder for bytes
elif isinstance(typ, ByteArrayType):
decoder.append(LLLnode.from_list(
[
'add',
output_node,
['mload', ['add', output_node, 32 * i]],
],
typ,
location='memory',
annotation='getting byte array',
))
# Decoder for num and uint256
elif is_base_type(typ, ('int128', 'uint256')):
bytez = LLLnode.from_list(
[
'add',
output_node,
['mload', ['add', output_node, 32 * i]],
],
typ,
location='memory',
annotation=f'getting and checking {typ.typ}',
)
decoder.append(byte_array_to_num(bytez, expr, typ.typ))
# Decoder for bools
elif is_base_type(typ, ('bool')):
# This is basically a really clever way to test for a
# length-prefixed one or zero. We take the 32 bytes starting one
# byte *after* the start of the length declaration; this includes
# the last 31 bytes of the length and the first byte of the value.
# 0 corresponds to length 0, first byte 0, and 257 corresponds to
# length 1, first byte \x01
decoder.append(LLLnode.from_list(
[
'with', '_ans', [
'mload',
[
'add',
1,
['add', output_node, ['mload', ['add', output_node, 32 * i]]]
],
],
[
'seq',
['assert', ['or', ['eq', '_ans', 0], ['eq', '_ans', 257]]],
['div', '_ans', 257],
],
],
typ,
annotation='getting and checking bool',
))
else:
# Should never reach because of top level base level check.
raise Exception("Type not yet supported") # pragma: no cover
# Copy the input data to memory
if args[0].location == "memory":
variable_pointer = args[0]
elif args[0].location == "storage":
placeholder = context.new_placeholder(args[0].typ)
placeholder_node = LLLnode.from_list(placeholder, typ=args[0].typ, location='memory')
copier = make_byte_array_copier(
placeholder_node,
LLLnode.from_list('_ptr', typ=args[0].typ, location=args[0].location),
)
variable_pointer = ['with', '_ptr', args[0], ['seq', copier, placeholder_node]]
else:
# Should never reach because of top level base level check.
raise Exception("Location not yet supported") # pragma: no cover
# Decode the input data
initial_setter = LLLnode.from_list(
['seq',
['with', '_sub', variable_pointer,
['pop', ['call',
1500 + 400 * len(_format) + 10 * len(args),
LLLnode.from_list(RLP_DECODER_ADDRESS, annotation='RLP decoder'),
0,
['add', '_sub', 32],
['mload', '_sub'],
output_node,
64 * len(_format) + 32 + 32 * get_size_of_type(output_type)]]],
['assert', ['eq', ['mload', output_node], 32 * len(_format) + 32]]],
typ=None)
# Shove the input data decoder in front of the first variable decoder
decoder[0] = LLLnode.from_list(
['seq', initial_setter, decoder[0]],
typ=decoder[0].typ,
location=decoder[0].location,
)
return LLLnode.from_list(
["multi"] + decoder,
typ=output_type,
location='memory',
pos=getpos(expr),
)
@signature('*', ('bytes32', 'bytes'))
def raw_log(expr, args, kwargs, context):
if not isinstance(args[0], ast.List) or len(args[0].elts) > 4:
raise StructureException("Expecting a list of 0-4 topics as first argument", args[0])
topics = []
for elt in args[0].elts:
arg = Expr.parse_value_expr(elt, context)
if not is_base_type(arg.typ, 'bytes32'):
raise TypeMismatchException("Expecting a bytes32 argument as topic", elt)
topics.append(arg)
if args[1].typ == BaseType('bytes32'):
placeholder = context.new_placeholder(BaseType('bytes32'))
return LLLnode.from_list(
['seq',
['mstore', placeholder, unwrap_location(args[1])],
[
"log" + str(len(topics)),
placeholder,
32,
] + topics], typ=None, pos=getpos(expr))
if args[1].location == "memory":
return LLLnode.from_list([
"with", "_arr", args[1], [
"log" + str(len(topics)),
["add", "_arr", 32],
["mload", "_arr"],
] + topics
], typ=None, pos=getpos(expr))
placeholder = context.new_placeholder(args[1].typ)
placeholder_node = LLLnode.from_list(placeholder, typ=args[1].typ, location='memory')
copier = make_byte_array_copier(
placeholder_node,
LLLnode.from_list('_sub', typ=args[1].typ, location=args[1].location),
pos=getpos(expr),
)
return LLLnode.from_list(
[
"with", "_sub", args[1],
[
"seq",
copier,
[
"log" + str(len(topics)),
["add", placeholder_node, 32],
["mload", placeholder_node],
] + topics
],
],
typ=None,
pos=getpos(expr),
)
@signature('uint256', 'uint256')
def bitwise_and(expr, args, kwargs, context):
return LLLnode.from_list(['and', args[0], args[1]], typ=BaseType('uint256'), pos=getpos(expr))
@signature('uint256', 'uint256')
def bitwise_or(expr, args, kwargs, context):
return LLLnode.from_list(['or', args[0], args[1]], typ=BaseType('uint256'), pos=getpos(expr))
@signature('uint256', 'uint256')
def bitwise_xor(expr, args, kwargs, context):
return LLLnode.from_list(['xor', args[0], args[1]], typ=BaseType('uint256'), pos=getpos(expr))
@signature('uint256', 'uint256', 'uint256')
def uint256_addmod(expr, args, kwargs, context):
return LLLnode.from_list(
[
'seq',
['assert', args[2]],
['assert', ['or', ['iszero', args[1]], ['gt', ['add', args[0], args[1]], args[0]]]],
['addmod', args[0], args[1], args[2]],
],
typ=BaseType('uint256'),
pos=getpos(expr),
)
@signature('uint256', 'uint256', 'uint256')
def uint256_mulmod(expr, args, kwargs, context):
return LLLnode.from_list(
[
'seq',
['assert', args[2]],
['assert', [
'or',
['iszero', args[0]],
['eq', ['div', ['mul', args[0], args[1]], args[0]], args[1]],
]],
['mulmod', args[0], args[1], args[2]],
],
typ=BaseType('uint256'),
pos=getpos(expr),
)
@signature('uint256')
def bitwise_not(expr, args, kwargs, context):
return LLLnode.from_list(['not', args[0]], typ=BaseType('uint256'), pos=getpos(expr))
@signature('uint256', 'int128')
def shift(expr, args, kwargs, context):
return LLLnode.from_list(
[
'with', '_v', args[0], [
'with', '_s', args[1], [
# If second argument is positive, left-shift so multiply by a power of two
# If it is negative, divide by a power of two
# node that if the abs of the second argument >= 256, then in the EVM
# 2**(second arg) = 0, and multiplying OR dividing by 0 gives 0
'if',
['slt', '_s', 0],
['div', '_v', ['exp', 2, ['sub', 0, '_s']]],
['mul', '_v', ['exp', 2, '_s']]
],
],
],
typ=BaseType('uint256'),
pos=getpos(expr),
)
def get_create_forwarder_to_bytecode():
from vyper.compile_lll import (
assembly_to_evm,
num_to_bytearray
)
code_a = [
'PUSH1', 0x33,
'PUSH1', 0x0c,
'PUSH1', 0x00,
'CODECOPY',
'PUSH1', 0x33,
'PUSH1', 0x00,
'RETURN',
'CALLDATASIZE',
'PUSH1', 0x00,
'PUSH1', 0x00,
'CALLDATACOPY',
'PUSH2', num_to_bytearray(0x1000),
'PUSH1', 0x00,
'CALLDATASIZE',
'PUSH1', 0x00,
'PUSH20', # [address to delegate to]
]
code_b = [
'GAS',
'DELEGATECALL',
'PUSH1', 0x2c, # jumpdest of whole program.
'JUMPI',
'PUSH1', 0x0,
'DUP1',
'REVERT',
'JUMPDEST',
'PUSH2', num_to_bytearray(0x1000),
'PUSH1', 0x00,
'RETURN'
]
return assembly_to_evm(code_a)[0] + (b'\x00' * 20) + assembly_to_evm(code_b)[0]
@signature('address', value=Optional('uint256', zero_value))
def create_forwarder_to(expr, args, kwargs, context):
value = kwargs['value']
if value != zero_value:
enforce_units(value.typ, get_keyword(expr, 'value'),
BaseType('uint256', {'wei': 1}))
if context.is_constant():
raise ConstancyViolationException(
f"Cannot make calls from {context.pp_constancy()}",
expr,
)
placeholder = context.new_placeholder(ByteArrayType(96))
kode = get_create_forwarder_to_bytecode()
high = bytes_to_int(kode[:32])
low = bytes_to_int((kode + b'\x00' * 32)[47:79])
return LLLnode.from_list(
[
'seq',
['mstore', placeholder, high],
['mstore', ['add', placeholder, 27], ['mul', args[0], 2**96]],
['mstore', ['add', placeholder, 47], low],
['clamp_nonzero', ['create', value, placeholder, 96]],
],
typ=BaseType('address'),
pos=getpos(expr),
add_gas_estimate=11000,
)
@signature(('int128', 'decimal', 'uint256'), ('int128', 'decimal', 'uint256'))
def _min(expr, args, kwargs, context):
return minmax(expr, args, kwargs, context, True)
@signature(('int128', 'decimal', 'uint256'), ('int128', 'decimal', 'uint256'))
def _max(expr, args, kwargs, context):
return minmax(expr, args, kwargs, context, False)
def minmax(expr, args, kwargs, context, is_min):
def _can_compare_with_uint256(operand):
if operand.typ.typ == 'uint256':
return True
elif operand.typ.typ == 'int128' and operand.typ.is_literal and SizeLimits.in_bounds('uint256', operand.value): # noqa: E501
return True
return False
left, right = args[0], args[1]
if not are_units_compatible(left.typ, right.typ) and not are_units_compatible(right.typ, left.typ): # noqa: E501
raise TypeMismatchException("Units must be compatible", expr)
if left.typ.typ == 'uint256':
comparator = 'gt' if is_min else 'lt'
else:
comparator = 'sgt' if is_min else 'slt'
if left.typ.typ == right.typ.typ:
o = ['if', [comparator, '_l', '_r'], '_r', '_l']
otyp = left.typ
otyp.is_literal = False
elif _can_compare_with_uint256(left) and _can_compare_with_uint256(right):
o = ['if', [comparator, '_l', '_r'], '_r', '_l']
if right.typ.typ == 'uint256':
otyp = right.typ
else:
otyp = left.typ
otyp.is_literal = False
else:
raise TypeMismatchException(
f"Minmax types incompatible: {left.typ.typ} {right.typ.typ}"
)
return LLLnode.from_list(
['with', '_l', left, ['with', '_r', right, o]],
typ=otyp,
pos=getpos(expr),
)
@signature('decimal')
def sqrt(expr, args, kwargs, context):
from vyper.functions.utils import (
generate_inline_function,
)
arg = args[0]
sqrt_code = """
assert x >= 0.0
z: decimal = 0.0
if x == 0.0:
z = 0.0
else:
z = x / 2.0 + 0.5
y: decimal = x
for i in range(256):
if z == y:
break
y = z
z = (x / z + z) / 2.0
"""
x_type = BaseType('decimal')
placeholder_copy = ['pass']
# Steal current position if variable is already allocated.
if arg.value == 'mload':
new_var_pos = arg.args[0]
# Other locations need to be copied.
else:
new_var_pos = context.new_placeholder(x_type)
placeholder_copy = ['mstore', new_var_pos, arg]
# Create input variables.
variables = {
'x': VariableRecord(
name='x',
pos=new_var_pos,
typ=x_type,
mutable=False
)
}
# Generate inline LLL.
new_ctx, sqrt_lll = generate_inline_function(
code=sqrt_code,
variables=variables,
memory_allocator=context.memory_allocator
)
return LLLnode.from_list(
[
'seq_unchecked',
placeholder_copy, # load x variable
sqrt_lll,
['mload', new_ctx.vars['z'].pos] # unload z variable into the stack,
],
typ=BaseType('decimal'),
pos=getpos(expr),
)
def _clear():
raise ParserException(
"This function should never be called! `clear()` is currently handled "
"differently than other functions as it self modifies its input argument "
"statement. Please see `_clear()` in `stmt.py`"
)
DISPATCH_TABLE = {
'floor': floor,
'ceil': ceil,
'as_unitless_number': as_unitless_number,
'convert': _convert,
'slice': _slice,
'len': _len,
'concat': concat,
'sha3': _sha3,
'sha256': sha256,
'method_id': method_id,
'keccak256': _keccak256,
'ecrecover': ecrecover,
'ecadd': ecadd,
'ecmul': ecmul,
'extract32': extract32,
'as_wei_value': as_wei_value,
'raw_call': raw_call,
'RLPList': _RLPlist,
'blockhash': blockhash,
'bitwise_and': bitwise_and,
'bitwise_or': bitwise_or,
'bitwise_xor': bitwise_xor,
'bitwise_not': bitwise_not,
'uint256_addmod': uint256_addmod,
'uint256_mulmod': uint256_mulmod,
'sqrt': sqrt,
'shift': shift,
'create_forwarder_to': create_forwarder_to,
'min': _min,
'max': _max,
}
STMT_DISPATCH_TABLE = {
'assert_modifiable': assert_modifiable,
'clear': _clear,
'send': send,
'selfdestruct': selfdestruct,
'raw_call': raw_call,
'raw_log': raw_log,
'create_forwarder_to': create_forwarder_to,
}
BUILTIN_FUNCTIONS = {**STMT_DISPATCH_TABLE, **DISPATCH_TABLE}.keys()
| 34.228615 | 133 | 0.526119 |
73fe90c3c23419107aaa718f3bfa707a154534b7 | 2,560 | py | Python | core/kagura.py | AliasPedroKarim/Kagura | 892ee3daab31f0e55e7c810413617e89d5ce1317 | [
"MIT"
] | null | null | null | core/kagura.py | AliasPedroKarim/Kagura | 892ee3daab31f0e55e7c810413617e89d5ce1317 | [
"MIT"
] | null | null | null | core/kagura.py | AliasPedroKarim/Kagura | 892ee3daab31f0e55e7c810413617e89d5ce1317 | [
"MIT"
] | null | null | null | # -*- encoding:utf-8 -*-
import datetime
import json
import logging
import os
import sys
from jishaku.modules import resolve_extensions
from discord.ext import commands
from utils import colored_logger
class Kagura(commands.Bot):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.start_time = datetime.datetime.utcnow()
self.config_path = 'config.json'
# START
if os.path.exists(self.config_path):
with open(self.config_path, 'r') as f:
self.config: dict = json.load(f)
else:
with open(self.config_path, 'w') as f:
json.dump({}, f)
self.config: dict = json.load(f)
self.configs = {}
# LOGGER
self.db = None
# LOGGER
logging.basicConfig(filename='kagura.log', level=logging.INFO)
self.logger = logging.getLogger("Kagura")
stdout = logging.StreamHandler(sys.stdout)
stdout.setLevel(logging.INFO)
stdout.setFormatter(colored_logger.ColoredFormatter(colored_logger.formatter_message(
"[$BOLD%(name)s$RESET][%(levelname)s] %(message)s $RESET($BOLD%(filename)s$RESET:%(lineno)d)")))
self.logger.addHandler(stdout)
# COMMANDS
self.load_commands()
# EVENTS
self.load_event()
def load_commands(self):
# First try loading
try:
self.load_extension('jishaku')
except Exception as e:
self.logger.error(f'$REDError detect during loading $BLUEJishaku', exc_info=e)
for ext in resolve_extensions(self, 'commands.*'):
try:
self.load_extension(ext)
except Exception as e:
self.logger.error(f'$REDError detect during loading commands $BLUE{ext}', exc_info=e)
def load_event(self):
for ext in resolve_extensions(self, 'events.*'):
try:
self.load_extension(ext)
except Exception as e:
self.logger.error(f'$REDError detect during loading event $BLUE{ext}', exc_info=e)
async def on_ready(self):
print('Client {0} is connected.'.format(self.user))
# async def is_owner(self, user: discord.User):
# if something: # Implement your own conditions here
# return True
# # Else fall back to the original
# return await super().is_owner(user)
# async def on_message(self, message):
# print('Message from {0.author} -> {0.content}'.format(message))
| 30.117647 | 108 | 0.605469 |
73ff36a4eb04cedbd20147d8964ba22cddf0ba46 | 1,828 | py | Python | alipay/aop/api/domain/AlipayCommerceDataScenicMappingQueryModel.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 213 | 2018-08-27T16:49:32.000Z | 2021-12-29T04:34:12.000Z | alipay/aop/api/domain/AlipayCommerceDataScenicMappingQueryModel.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 29 | 2018-09-29T06:43:00.000Z | 2021-09-02T03:27:32.000Z | alipay/aop/api/domain/AlipayCommerceDataScenicMappingQueryModel.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 59 | 2018-08-27T16:59:26.000Z | 2022-03-25T10:08:15.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.ScenicAuditQueryReq import ScenicAuditQueryReq
class AlipayCommerceDataScenicMappingQueryModel(object):
def __init__(self):
self._scenic_audit_query_req = None
@property
def scenic_audit_query_req(self):
return self._scenic_audit_query_req
@scenic_audit_query_req.setter
def scenic_audit_query_req(self, value):
if isinstance(value, list):
self._scenic_audit_query_req = list()
for i in value:
if isinstance(i, ScenicAuditQueryReq):
self._scenic_audit_query_req.append(i)
else:
self._scenic_audit_query_req.append(ScenicAuditQueryReq.from_alipay_dict(i))
def to_alipay_dict(self):
params = dict()
if self.scenic_audit_query_req:
if isinstance(self.scenic_audit_query_req, list):
for i in range(0, len(self.scenic_audit_query_req)):
element = self.scenic_audit_query_req[i]
if hasattr(element, 'to_alipay_dict'):
self.scenic_audit_query_req[i] = element.to_alipay_dict()
if hasattr(self.scenic_audit_query_req, 'to_alipay_dict'):
params['scenic_audit_query_req'] = self.scenic_audit_query_req.to_alipay_dict()
else:
params['scenic_audit_query_req'] = self.scenic_audit_query_req
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayCommerceDataScenicMappingQueryModel()
if 'scenic_audit_query_req' in d:
o.scenic_audit_query_req = d['scenic_audit_query_req']
return o
| 34.490566 | 96 | 0.649891 |
73ff40a23deeb72ebb64eaa6c46d80c88e486e84 | 2,377 | py | Python | mnsim_noc/utils/registry.py | godfather991/MNSIM_NoC | 402680ad72c46c2a0b040b5fa52232807d554aec | [
"MIT"
] | null | null | null | mnsim_noc/utils/registry.py | godfather991/MNSIM_NoC | 402680ad72c46c2a0b040b5fa52232807d554aec | [
"MIT"
] | 3 | 2021-11-01T15:43:20.000Z | 2021-11-09T03:49:06.000Z | mnsim_noc/utils/registry.py | ILTShade/MNSIM_NoC | 8fa4580cce0ef113b473dd22662748846ec6b45a | [
"MIT"
] | null | null | null | #-*-coding:utf-8-*-
"""
@FileName:
registry.py
@Description:
RegistryMeta
@CreateTime:
2021/10/08 17:32
"""
import abc
import collections
from mnsim_noc.utils.log import getLogger
__all__ = ["RegistryMeta", "RegistryError"]
class RegistryError(Exception):
pass
LOGGER = getLogger("registry")
class RegistryMeta(abc.ABCMeta):
registry_dict = collections.defaultdict(dict)
def __init__(cls, name, bases, namespace):
super(RegistryMeta, cls).__init__(name, bases, namespace)
# base class should have REGISTRY
if hasattr(cls, "REGISTRY"):
# register the class
table = cls.REGISTRY
abstract_methods = cls.__abstractmethods__
# leaf class should have no abstract methods
if not abstract_methods:
entry = namespace.get("NAME", name.lower())
setattr(cls, "NAME", entry)
RegistryMeta.registry_dict[table][entry] = cls
LOGGER.debug(
"Register class {} as entry {} in table {}.".format(
name, entry, table
)
)
else:
# non leaf classes should have no name
if "NAME" in namespace:
entry = namespace["NAME"]
LOGGER.warning(
"Can't register abstract class {} as entry {} in table {}, ignore. Abstract methods: {}".format(
name, entry, table, ", ".join(abstract_methods)
)
)
@classmethod
def get_class(mcs, table, name):
try:
return mcs.all_classes(table)[name]
except KeyError:
raise RegistryError(
"No registry item {} available in registry {}.".format(
name, table
)
)
@classmethod
def all_classes(mcs, table):
try:
return mcs.registry_dict[table]
except KeyError:
raise RegistryError("No registry table {} available.".format(table))
@classmethod
def avail_tables(mcs):
return mcs.registry_dict.keys()
def all_classes_(cls):
return RegistryMeta.all_classes(cls.REGISTRY)
def get_class_(cls, name):
return RegistryMeta.get_class(cls.REGISTRY, name)
| 30.474359 | 120 | 0.55448 |
73ff4925493c215d1a5dc931d0de449af8f21795 | 2,744 | py | Python | leetcode_python/Hash_table/friends-of-appropriate-ages.py | yennanliu/CS_basics | 3c50c819897a572ff38179bfb0083a19b2325fde | [
"Unlicense"
] | 18 | 2019-08-01T07:45:02.000Z | 2022-03-31T18:05:44.000Z | leetcode_python/Hash_table/friends-of-appropriate-ages.py | yennanliu/CS_basics | 3c50c819897a572ff38179bfb0083a19b2325fde | [
"Unlicense"
] | null | null | null | leetcode_python/Hash_table/friends-of-appropriate-ages.py | yennanliu/CS_basics | 3c50c819897a572ff38179bfb0083a19b2325fde | [
"Unlicense"
] | 15 | 2019-12-29T08:46:20.000Z | 2022-03-08T14:14:05.000Z | # V0
import collections
class Solution:
"""
@param ages:
@return: nothing
"""
def numFriendRequests(self, ages):
def request(a, b):
return not (b <= 0.5 * a + 7 or b > a or b > 100 and a < 100)
c = collections.Counter(ages)
return sum(request(a, b) * c[a] * (c[b] - (a == b)) for a in c for b in c)
# V0'
from collections import Counter
class Solution:
def numFriendRequests(self, ages):
def request(x,y):
return not ( y <= 0.5*x + 7 or
y > x or
(y > 100 and x < 100))
c = collections.Counter(ages)
return sum(request(a, b) * c[a] * (c[b] - (a == b)) for a in c for b in c)
# V1
# http://bookshadow.com/weblog/2018/04/29/leetcode-friends-of-appropriate-ages/
import collections
class Solution(object):
def numFriendRequests(self, ages):
"""
:type ages: List[int]
:rtype: int
"""
cnt = collections.Counter(ages)
ans = 0
for age in ages:
cnt[age] -= 1
left, right = age / 2 + 8, age
ans += sum(cnt[age] for age in range(left, right + 1))
cnt[age] += 1
return ans
# V1'
# https://blog.csdn.net/fuxuemingzhu/article/details/83183022
import collections
class Solution(object):
def numFriendRequests(self, ages):
"""
:type ages: List[int]
:rtype: int
"""
count = collections.Counter(ages)
ages = sorted(count.keys())
N = len(ages)
res = 0
for A in ages:
for B in range(int(0.5 * A) + 7 + 1, A + 1):
res += count[A] * (count[B] - int(A == B))
return res
# V1''
# https://www.jiuzhang.com/solution/friends-of-appropriate-ages/#tag-highlight-lang-python
import collections
class Solution:
"""
@param ages:
@return: nothing
get the friend request conditions, 2 for loop can it sorted
"""
def numFriendRequests(self, ages):
def request(a, b):
return not (b <= 0.5 * a + 7 or b > a or b > 100 and a < 100)
c = collections.Counter(ages)
return sum(request(a, b) * c[a] * (c[b] - (a == b)) for a in c for b in c)
# V2
# Time: O(a^2 + n), a is the number of ages,
# n is the number of people
# Space: O(a)
import collections
class Solution(object):
def numFriendRequests(self, ages):
"""
:type ages: List[int]
:rtype: int
"""
def request(a, b):
return 0.5*a+7 < b <= a
c = collections.Counter(ages)
return sum(int(request(a, b)) * c[a]*(c[b]-int(a == b))
for a in c
for b in c)
| 29.505376 | 90 | 0.519679 |